Code2Intelligences 0.1.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ from .utils import get_code,get_data
@@ -0,0 +1,462 @@
1
+ programs={"ml":{
2
+ 1:'''import pandas as pd
3
+ from sklearn.model_selection import train_test_split
4
+ from sklearn.tree import DecisionTreeClassifier, plot_tree
5
+ from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
6
+ import matplotlib.pyplot as plt
7
+
8
+ # Load dataset
9
+ data = pd.read_csv("iris.csv")
10
+
11
+ print("Dataset preview:\n", data.head(), "\n")
12
+
13
+ # Features & target
14
+ X = data.drop("species", axis=1)
15
+ y, labels = pd.factorize(data["species"])
16
+
17
+ # Split data
18
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
19
+
20
+ # Model
21
+ clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=42)
22
+ clf.fit(X_train, y_train)
23
+
24
+ # Prediction
25
+ y_pred = clf.predict(X_test)
26
+
27
+ # Evaluation
28
+ print("Accuracy:", accuracy_score(y_test, y_pred))
29
+ print("\nClassification Report:\n", classification_report(y_test, y_pred))
30
+ print("\nConfusion Matrix:\n", confusion_matrix(y_test, y_pred))
31
+
32
+ # New samples prediction
33
+ new_samples = pd.DataFrame([
34
+ [5.1, 3.5, 1.4, 0.2],
35
+ [6.7, 3.1, 4.7, 1.5],
36
+ [7.5, 3.6, 6.1, 2.5]
37
+ ], columns=X.columns)
38
+
39
+ predictions = clf.predict(new_samples)
40
+ species_predictions = [labels[p] for p in predictions]
41
+
42
+ print("\nPredictions:", species_predictions)
43
+
44
+ # Tree Visualization
45
+ plt.figure(figsize=(20, 10))
46
+ plot_tree(clf, feature_names=X.columns, class_names=labels, filled=True)
47
+ plt.title("Decision Tree")
48
+ plt.show()''',
49
+ 2:'''#Linearity Visualization
50
+ import numpy as np
51
+ import matplotlib.pyplot as plt
52
+ from sklearn import datasets
53
+
54
+ iris = datasets.load_iris()
55
+ X = iris.data
56
+ y = iris.target
57
+
58
+ # Plot
59
+ plt.scatter(X[:50, 0], X[:50, 2], color='black', marker='x', label='setosa')
60
+ plt.scatter(X[50:100, 0], X[50:100, 2], color='green', marker='s', label='versicolor')
61
+
62
+ plt.xlabel('Sepal Length')
63
+ plt.ylabel('Petal Length')
64
+ plt.legend()
65
+ plt.show()''',
66
+ 3:'''#Non-Linearity
67
+ import numpy as np
68
+ import matplotlib.pyplot as plt
69
+ from sklearn import datasets
70
+
71
+ iris = datasets.load_iris()
72
+ X = iris.data
73
+ y = iris.target
74
+
75
+ plt.scatter(X[50:100, 0], X[50:100, 2], color='black', marker='x', label='versicolor')
76
+ plt.scatter(X[100:150, 0], X[100:150, 2], color='red', marker='o', label='virginica')
77
+
78
+ plt.xlabel('Sepal Length')
79
+ plt.ylabel('Petal Length')
80
+ plt.legend()
81
+ plt.show()''',
82
+ 4:'''#Spam Classification (SVM)
83
+ import pandas as pd
84
+ from sklearn.feature_extraction.text import TfidfVectorizer
85
+ from sklearn.model_selection import train_test_split
86
+ from sklearn.svm import SVC
87
+ from sklearn.metrics import classification_report, accuracy_score
88
+
89
+ # Load dataset
90
+ data = pd.read_csv("spam.csv", delimiter='\t', encoding='latin-1')
91
+
92
+ # Keep required columns
93
+ data = data[['Type', 'Message']]
94
+ data.columns = ['label', 'email']
95
+
96
+ # Encode labels
97
+ data['label'] = data['label'].map({'ham': 0, 'spam': 1})
98
+
99
+ # Vectorization
100
+ vectorizer = TfidfVectorizer(stop_words='english')
101
+ X = vectorizer.fit_transform(data['email'])
102
+ y = data['label']
103
+
104
+ # Split
105
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
106
+
107
+ # Model
108
+ model = SVC(kernel='linear')
109
+ model.fit(X_train, y_train)
110
+
111
+ # Prediction
112
+ y_pred = model.predict(X_test)
113
+
114
+ # Evaluation
115
+ print(classification_report(y_test, y_pred))
116
+ print("Accuracy:", accuracy_score(y_test, y_pred))''',
117
+ 5:'''#Face Detection
118
+ import cv2
119
+
120
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
121
+ 'haarcascade_frontalface_default.xml')
122
+
123
+ image = cv2.imread("image.jpg")
124
+
125
+ if image is None:
126
+ print("Image not found")
127
+ else:
128
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
129
+
130
+ faces = face_cascade.detectMultiScale(gray, 1.1, 5)
131
+
132
+ for (x, y, w, h) in faces:
133
+ cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
134
+
135
+ cv2.imshow("Face Detection", image)
136
+ cv2.waitKey(0)
137
+ cv2.destroyAllWindows()
138
+ ''',
139
+ 6:'''#KNN
140
+ import pandas as pd
141
+ from sklearn.model_selection import train_test_split
142
+ from sklearn.preprocessing import LabelEncoder
143
+ from sklearn.neighbors import KNeighborsClassifier
144
+ from sklearn.metrics import accuracy_score
145
+
146
+ df = pd.read_csv("iris.csv")
147
+
148
+ le = LabelEncoder()
149
+ df["species"] = le.fit_transform(df["species"])
150
+
151
+ X = df.iloc[:, :-1]
152
+ y = df.iloc[:, -1]
153
+
154
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
155
+
156
+ model = KNeighborsClassifier(n_neighbors=5)
157
+ model.fit(X_train, y_train)
158
+
159
+ y_pred = model.predict(X_test)
160
+
161
+ print("Accuracy:", accuracy_score(y_test, y_pred))''',
162
+ 7:'''#MNIST Neural Network
163
+ import tensorflow as tf
164
+ from tensorflow import keras
165
+ import numpy as np
166
+ import matplotlib.pyplot as plt
167
+
168
+ # Load dataset
169
+ (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
170
+
171
+ # Normalize
172
+ x_train = x_train / 255.0
173
+ x_test = x_test / 255.0
174
+
175
+ # Flatten
176
+ x_train = x_train.reshape(-1, 28*28)
177
+ x_test = x_test.reshape(-1, 28*28)
178
+
179
+ # Model
180
+ model = keras.Sequential([
181
+ keras.layers.Dense(128, activation='relu', input_shape=(784,)),
182
+ keras.layers.Dense(64, activation='relu'),
183
+ keras.layers.Dense(10, activation='softmax')
184
+ ])
185
+
186
+ # Compile
187
+ model.compile(optimizer='adam',
188
+ loss='sparse_categorical_crossentropy',
189
+ metrics=['accuracy'])
190
+
191
+ # Train
192
+ history = model.fit(x_train, y_train, epochs=10, batch_size=32,
193
+ validation_data=(x_test, y_test))
194
+
195
+ # Evaluate
196
+ loss, acc = model.evaluate(x_test, y_test)
197
+ print("Test Accuracy:", acc)
198
+
199
+ # Plot accuracy
200
+ plt.plot(history.history['accuracy'], label='train')
201
+ plt.plot(history.history['val_accuracy'], label='validation')
202
+ plt.legend()
203
+ plt.show()
204
+
205
+ # Prediction example
206
+ predictions = model.predict(x_test)
207
+
208
+ plt.imshow(x_test[0].reshape(28, 28), cmap='gray')
209
+ plt.title(f"Predicted: {np.argmax(predictions[0])}")
210
+ plt.show()''',
211
+ 8:'''#K-Means Clustering
212
+ import pandas as pd
213
+ import numpy as np
214
+ from sklearn.cluster import KMeans
215
+ from sklearn.preprocessing import LabelEncoder
216
+ from sklearn.decomposition import PCA
217
+ from sklearn.metrics import accuracy_score
218
+ import matplotlib.pyplot as plt
219
+
220
+ # Load dataset
221
+ df = pd.read_csv('adult.csv')
222
+
223
+ # Clean data
224
+ df.replace(' ?', np.nan, inplace=True)
225
+ df.dropna(inplace=True)
226
+
227
+ # Encode categorical columns
228
+ le = LabelEncoder()
229
+ for col in df.select_dtypes(include=['object']).columns:
230
+ df[col] = le.fit_transform(df[col])
231
+
232
+ # Features & target
233
+ X = df.drop('income', axis=1)
234
+ y = df['income']
235
+
236
+ # Model
237
+ kmeans = KMeans(n_clusters=2, random_state=42)
238
+ df['predicted'] = kmeans.fit_predict(X)
239
+
240
+ # PCA for visualization
241
+ pca = PCA(n_components=2)
242
+ X_reduced = pca.fit_transform(X)
243
+
244
+ # Plot
245
+ plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=df['predicted'])
246
+ plt.title("K-Means Clustering")
247
+ plt.show()''',
248
+ 9:'''#Random Forest
249
+ import pandas as pd
250
+ from sklearn.model_selection import train_test_split, GridSearchCV
251
+ from sklearn.feature_extraction.text import TfidfVectorizer
252
+ from sklearn.ensemble import RandomForestClassifier
253
+ from sklearn.metrics import accuracy_score, classification_report
254
+
255
+ # Load dataset
256
+ df = pd.read_csv('test.csv', encoding='ISO-8859-1')
257
+
258
+ # Encode sentiment
259
+ df['sentiment'] = df['sentiment'].apply(lambda x: 1 if x == 'positive' else 0)
260
+
261
+ X = df['text']
262
+ y = df['sentiment']
263
+
264
+ # Split
265
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
266
+
267
+ # Vectorization
268
+ vectorizer = TfidfVectorizer(max_features=5000)
269
+ X_train = vectorizer.fit_transform(X_train)
270
+ X_test = vectorizer.transform(X_test)
271
+
272
+ # Model
273
+ model = RandomForestClassifier()
274
+
275
+ # Grid Search
276
+ params = {
277
+ 'n_estimators': [50, 100],
278
+ 'max_depth': [None, 10],
279
+ 'min_samples_split': [2, 5]
280
+ }
281
+
282
+ grid = GridSearchCV(model, params, cv=3)
283
+ grid.fit(X_train, y_train)
284
+
285
+ # Prediction
286
+ y_pred = grid.predict(X_test)
287
+
288
+ print("Accuracy:", accuracy_score(y_test, y_pred))
289
+ print(classification_report(y_test, y_pred))
290
+ print("Best Params:", grid.best_params_)''',
291
+ 10:'''#Locally Weighted Regression vs Linear Regression
292
+ import numpy as np
293
+ import pandas as pd
294
+ import matplotlib.pyplot as plt
295
+ from sklearn.linear_model import LinearRegression
296
+ from sklearn.metrics import mean_squared_error
297
+
298
+ # Gaussian weights
299
+ def gaussian_weights(X, x_query, tau):
300
+ return np.exp(-((X - x_query) ** 2) / (2 * tau ** 2))
301
+
302
+ # LWR
303
+ def lwr(X, y, x_query, tau):
304
+ W = np.diag(gaussian_weights(X, x_query, tau))
305
+ X_b = np.c_[np.ones(len(X)), X]
306
+ theta = np.linalg.inv(X_b.T @ W @ X_b) @ (X_b.T @ W @ y)
307
+ return np.array([1, x_query]) @ theta
308
+
309
+ # Load dataset
310
+ data = pd.read_csv('tips.csv')
311
+ X = data['total_bill'].values
312
+ y = data['tip'].values
313
+
314
+ # Predictions
315
+ X_test = np.linspace(min(X), max(X), 100)
316
+ y_lwr = np.array([lwr(X, y, x, 10) for x in X_test])
317
+
318
+ # Linear Regression
319
+ lr = LinearRegression()
320
+ lr.fit(X.reshape(-1,1), y)
321
+ y_lr = lr.predict(X_test.reshape(-1,1))
322
+
323
+ # Plot
324
+ plt.scatter(X, y)
325
+ plt.plot(X_test, y_lwr, color='red', label='LWR')
326
+ plt.plot(X_test, y_lr, linestyle='dashed', label='Linear')
327
+ plt.legend()
328
+ plt.show()''',
329
+ 11:'''#Bayesian Network
330
+ import pandas as pd
331
+ from pgmpy.models import BayesianNetwork
332
+ from pgmpy.estimators import BayesianEstimator
333
+ from pgmpy.inference import VariableElimination
334
+
335
+ # Load dataset
336
+ df = pd.read_csv('heart.csv')
337
+ df.dropna(inplace=True)
338
+
339
+ # Model
340
+ model = BayesianNetwork([
341
+ ('age', 'target'),
342
+ ('chol', 'target')
343
+ ])
344
+
345
+ # Train
346
+ model.fit(df, estimator=BayesianEstimator)
347
+
348
+ # Inference
349
+ infer = VariableElimination(model)
350
+
351
+ result = infer.query(variables=['target'], evidence={
352
+ 'age': 63,
353
+ 'chol': 233
354
+ })
355
+
356
+ print(result)''',
357
+ 12:'''#Credit Card Fraud Detection
358
+ import pandas as pd
359
+ import tensorflow as tf
360
+ from tensorflow import keras
361
+ from sklearn.model_selection import train_test_split
362
+ from sklearn.preprocessing import StandardScaler
363
+ from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
364
+
365
+ # Load dataset
366
+ df = pd.read_csv("credit.csv")
367
+ df.dropna(inplace=True)
368
+
369
+ X = df.drop("Class", axis=1)
370
+ y = df["Class"]
371
+
372
+ # Scale
373
+ scaler = StandardScaler()
374
+ X = scaler.fit_transform(X)
375
+
376
+ # Split
377
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
378
+
379
+ # Model
380
+ model = keras.Sequential([
381
+ keras.layers.Dense(64, activation='relu', input_shape=(X.shape[1],)),
382
+ keras.layers.Dense(32, activation='relu'),
383
+ keras.layers.Dense(16, activation='relu'),
384
+ keras.layers.Dense(1, activation='sigmoid')
385
+ ])
386
+
387
+ # Compile
388
+ model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
389
+
390
+ # Train
391
+ model.fit(X_train, y_train, epochs=2, batch_size=32)
392
+
393
+ # Predict
394
+ y_pred = (model.predict(X_test) > 0.5).astype(int)
395
+
396
+ # Metrics
397
+ print("Accuracy:", accuracy_score(y_test, y_pred))
398
+ print("Precision:", precision_score(y_test, y_pred))
399
+ print("Recall:", recall_score(y_test, y_pred))
400
+ print("F1:", f1_score(y_test, y_pred))'''
401
+ }}
402
+
403
+
404
+ datasets = {
405
+ "iris":'''sepal_length,sepal_width,petal_length,petal_width,species
406
+ 5.1,3.5,1.4,0.2,setosa
407
+ 4.9,3.0,1.4,0.2,setosa
408
+ 5.0,3.6,1.4,0.2,setosa
409
+ 6.0,2.2,4.0,1.0,versicolor
410
+ 5.5,2.3,4.0,1.3,versicolor
411
+ 6.5,2.8,4.6,1.5,versicolor
412
+ 6.3,3.3,6.0,2.5,virginica
413
+ 5.8,2.7,5.1,1.9,virginica
414
+ 7.1,3.0,5.9,2.1,virginica''',
415
+ "spam":'''Type Message
416
+ ham Hello how are you
417
+ spam Win money now!!!
418
+ ham Are you coming today?
419
+ spam Claim your free prize
420
+ ham Let's meet tomorrow
421
+ spam Free entry in contest''',
422
+ "adult":'''age,workclass,education,marital-status,occupation,race,gender,hours-per-week,income
423
+ 39,State-gov,Bachelors,Never-married,Adm-clerical,White,Male,40,<=50K
424
+ 50,Self-emp,HS-grad,Married,Exec-managerial,White,Male,60,>50K
425
+ 38,Private,HS-grad,Divorced,Handlers-cleaners,White,Male,40,<=50K
426
+ 53,Private,11th,Married,Handlers-cleaners,Black,Male,40,<=50K
427
+ 28,Private,Bachelors,Married,Prof-specialty,Black,Female,40,>50K''',
428
+ "test":'''text,sentiment
429
+ I love this product,positive
430
+ This is very bad,negative
431
+ Amazing experience,positive
432
+ Worst service ever,negative
433
+ Very happy with this,positive
434
+ Not good at all,negative''',
435
+ "tips":'''total_bill,tip
436
+ 10.34,1.66
437
+ 20.45,3.50
438
+ 15.20,2.50
439
+ 25.00,4.00
440
+ 30.50,5.50
441
+ 18.75,3.00''',
442
+ "heart":'''age,chol,target
443
+ 63,233,1
444
+ 37,250,1
445
+ 41,204,1
446
+ 56,236,0
447
+ 57,354,1
448
+ 44,263,0''',
449
+ "credit":'''Time,V1,V2,V3,Amount,Class
450
+ 0,-1.35,-0.07,2.53,149.62,0
451
+ 1,1.19,0.26,0.16,2.69,0
452
+ 2,-1.36,-1.34,1.77,378.66,0
453
+ 3,-0.97,-0.18,1.79,123.50,1
454
+ 4,1.23,0.14,0.05,69.99,0'''
455
+ }
456
+
457
+ def get_code(sub,no):
458
+ return programs[sub][no]
459
+
460
+ def get_data(title):
461
+ return datasets[title]
462
+
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: Code2Intelligences
3
+ Version: 0.1.5
4
+ Summary: My reusable python module
5
+ Author: Unknown
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,8 @@
1
+ pyproject.toml
2
+ setup.py
3
+ Code2Intelligences/__init__.py
4
+ Code2Intelligences/utils.py
5
+ Code2Intelligences.egg-info/PKG-INFO
6
+ Code2Intelligences.egg-info/SOURCES.txt
7
+ Code2Intelligences.egg-info/dependency_links.txt
8
+ Code2Intelligences.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ Code2Intelligences
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: Code2Intelligences
3
+ Version: 0.1.5
4
+ Summary: My reusable python module
5
+ Author: Unknown
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,11 @@
1
+ [project]
2
+ name = "Code2Intelligences"
3
+ version = "0.1.5"
4
+ description = "My reusable python module"
5
+ authors = [{name = "Unknown"}]
6
+ readme = "README.md"
7
+ requires-python = ">=3.7"
8
+
9
+ [build-system]
10
+ requires = ["setuptools"]
11
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,8 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="Code2Intelligences",
5
+ version="0.1",
6
+ packages=find_packages(),
7
+ install_requires=[],
8
+ )