absd 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
absd-0.1.0/LICENSE ADDED
@@ -0,0 +1,5 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy...
absd-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,37 @@
1
+ Metadata-Version: 2.4
2
+ Name: absd
3
+ Version: 0.1.0
4
+ Summary: testing python library
5
+ Home-page: https://github.com/pacyandrocash/absd
6
+ Author: blank
7
+ Author-email: pacyandrocash@gmail.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Requires-Python: >=3.7
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Dynamic: author
16
+ Dynamic: author-email
17
+ Dynamic: classifier
18
+ Dynamic: description
19
+ Dynamic: description-content-type
20
+ Dynamic: home-page
21
+ Dynamic: license
22
+ Dynamic: license-file
23
+ Dynamic: requires-python
24
+ Dynamic: summary
25
+
26
+ # absd
27
+
28
+ A simple Python library that prints 16 code snippets when functions are called.
29
+
30
+ ## Usage
31
+
32
+ ```python
33
+ import absd
34
+
35
+ absd.snippet1()
36
+ absd.snippet10()
37
+ ```
absd-0.1.0/README.md ADDED
@@ -0,0 +1,12 @@
1
+ # absd
2
+
3
+ A simple Python library that prints 16 code snippets when functions are called.
4
+
5
+ ## Usage
6
+
7
+ ```python
8
+ import absd
9
+
10
+ absd.snippet1()
11
+ absd.snippet10()
12
+ ```
@@ -0,0 +1,3 @@
1
+ from .messages import (
2
+ a1,a2,a3,a4,a5,a6,a7,a8,b1,b2,b3,b4,b5,b6,b7,b8
3
+ )
@@ -0,0 +1,556 @@
1
+ def a1():
2
+ code = '''import tensorflow as tf
3
+ x=tf.constant([1,2,3,4,5,6],shape=[2,3])
4
+ print(x)
5
+ y=tf.constant([7,8,9,10,11,12],shape=[3,2])
6
+ print(y)
7
+ z=tf.matmul(x,y)
8
+ print("Product:",z)
9
+ e_matrix_A=tf.random.uniform([2,2],minval=3,maxval=10,dtype=tf.float32,name="matrixA")
10
+ print("Matrix A:\n{}\n\n".format(e_matrix_A))
11
+ eigen_values_A,eigen_vectors_A=tf.linalg.eigh(e_matrix_A)
12
+ print("Eigen Vectors:\n{}\n\nEigen Values:\n{}\n\n".format(eigen_vectors_A,eigen_values_A))'''
13
+ print(code)
14
+
15
+ def a2():
16
+ code = '''import numpy as np
17
+ from keras.layers import Dense
18
+ from keras.models import Sequential
19
+ model = Sequential()
20
+ model.add(Dense(units=2, activation='tanh', input_dim=2))
21
+ model.add(Dense(units=1, activation='sigmoid'))
22
+ model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
23
+ print(model.summary())
24
+ print(model.get_weights())
25
+ X=np.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.]])
26
+ Y=np.array([0.,1.,1.,0.])
27
+ model.fit(X, Y, epochs=100,batch_size=4)
28
+ print(model.get_weights())
29
+ print(model.predict(X,batch_size=4))'''
30
+ print(code)
31
+
32
+ def a3():
33
+ code = '''from keras.models import Sequential
34
+ from keras.layers import Dense
35
+ from sklearn.datasets import make_blobs
36
+ from sklearn.preprocessing import MinMaxScaler
37
+ import numpy as np
38
+
39
+ X,Y=make_blobs(n_samples=100,centers=2,n_features=2,random_state=1)
40
+ scalar=MinMaxScaler()
41
+ scalar.fit(X)
42
+ X=scalar.transform(X)
43
+ model=Sequential()
44
+ model.add(Dense(4,input_dim=2,activation='relu'))
45
+ model.add(Dense(4,activation='relu'))
46
+ model.add(Dense(1,activation='sigmoid'))
47
+ model.compile(loss='binary_crossentropy',optimizer='adam')
48
+ model.fit(X,Y,epochs=500)
49
+ Xnew,Yreal=make_blobs(n_samples=3,centers=2,n_features=2,random_state=1)
50
+ Xnew=scalar.transform(Xnew)
51
+ Ynew_probs = model.predict(Xnew)
52
+ Ynew = (Ynew_probs > 0.5).astype(int)
53
+ for i in range(len(Xnew)):
54
+ print("X=%s,Predicted=%s,Desired=%s"%(Xnew[i],Ynew[i],Yreal[i]))
55
+ print(model.summary())'''
56
+ print(code)
57
+
58
+ def a4():
59
+ code = '''from keras.models import Sequential
60
+ from keras.layers import Dense
61
+ from sklearn.datasets import make_regression
62
+ from sklearn.preprocessing import MinMaxScaler
63
+
64
+ X,Y=make_regression(n_samples=100,n_features=2,noise=0.1,random_state=1)
65
+ scalarX, scalarY=MinMaxScaler(),MinMaxScaler()
66
+ scalarX.fit(X)
67
+ scalarY.fit(Y.reshape(100,1))
68
+ X=scalarX.transform(X)
69
+ Y=scalarY.transform(Y.reshape(100,1))
70
+ model=Sequential()
71
+ model.add(Dense(4,input_dim=2,activation='relu'))
72
+ model.add(Dense(4,activation='relu'))
73
+ model.add(Dense(1,activation='sigmoid'))
74
+ model.compile(loss='mse',optimizer='adam')
75
+ model.fit(X,Y,epochs=1000,verbose=0)
76
+ Xnew,a=make_regression(n_samples=3,n_features=2,noise=0.1,random_state=1)
77
+ Xnew=scalarX.transform(Xnew)
78
+ Ynew = model.predict(Xnew)
79
+ for i in range(len(Xnew)):
80
+ print("X=%s,Predicted=%s,Desired=%s"%(Xnew[i],Ynew[i],a[i]))
81
+ #print("X=%s,Predicted=%s"%(Xnew[i],Ynew[i]))'''
82
+ print(code)
83
+
84
+ def a5():
85
+ code = '''import numpy as np
86
+ import matplotlib.pyplot as plt
87
+ from keras.models import Sequential
88
+ from keras.layers import LSTM,Dense,Dropout
89
+ from sklearn.preprocessing import MinMaxScaler
90
+
91
+ #1)Generate synthetic stock price data using a sine wave
92
+ np.random.seed(0)
93
+ time_steps=300
94
+ x=np.linspace(0,50,time_steps)
95
+ data=np.sin(x)+np.random.normal(scale=0.2,size=time_steps) #Sine_wave+noise
96
+ data=data.reshape(-1,1)
97
+
98
+ #2)Scale the data
99
+ scaler=MinMaxScaler(feature_range=(0,1))
100
+ scaled_data=scaler.fit_transform(data)
101
+
102
+ #3)Create sequences of 60 time steps
103
+ X=[]
104
+ y=[]
105
+ sequence_length=60
106
+ for i in range(sequence_length,len(scaled_data)):
107
+ X.append(scaled_data[i-sequence_length:i,0])
108
+ y.append(scaled_data[i,0])
109
+
110
+
111
+ X,y=np.array(X),np.array(y)
112
+
113
+ #Reshape input to be [samples,time steps,features]
114
+ X=np.reshape(X,(X.shape[0],X.shape[1],1))
115
+
116
+ #4)Build the LSTM model
117
+ model=Sequential()
118
+ model.add(LSTM(units=50,return_sequences=True,input_shape=(X.shape[1],1)))
119
+ model.add(Dropout(0.2))
120
+ model.add(LSTM(units=50,return_sequences=True))
121
+ model.add(Dropout(0.2))
122
+ model.add(LSTM(units=50))
123
+ model.add(Dropout(0.2))
124
+ model.add(Dense(units=1))
125
+
126
+ #5)Compile and train the model
127
+ model.compile(optimizer='adam',loss='mean_squared_error')
128
+ model.fit(X,y,epochs=20,batch_size=32)
129
+
130
+ #6)Predict
131
+ predicted=model.predict(X)
132
+ predicted=scaler.inverse_transform(predicted.reshape(-1,1))
133
+ actual=scaler.inverse_transform(y.reshape(-1,1))
134
+
135
+ #7)Plot results
136
+ plt.figure(figsize=(12,6))
137
+ plt.plot(actual,color="red",label="Actual (Synthetic Stock Price)")
138
+ plt.plot(predicted,color="blue",label='Predicted Price')
139
+ plt.title('Junaid Kazi SYMSDS2024011 - LSTM Stock Price Prediction (Synthetic Data)')
140
+ plt.xlabel('Time')
141
+ plt.ylabel('Price')
142
+ plt.legend()
143
+ plt.grid(True)
144
+ plt.show()'''
145
+ print(code)
146
+
147
+ def a6():
148
+ code = '''iimport numpy as np
149
+ import pandas as pd
150
+ import matplotlib.pyplot as plt
151
+ from statsmodels.tsa.arima.model import ARIMA
152
+ from pandas.plotting import register_matplotlib_converters
153
+
154
+ register_matplotlib_converters()
155
+
156
+ #1)Generate synthetic monthly umbrella sales data for 3 years(36 months)
157
+ np.random.seed()
158
+ months=pd.date_range(start="2020-01",periods=36,freq="M")
159
+
160
+ #Simulate sales with seasonality (rainy season in June,July,August)
161
+ seasonality=10+5*np.sin(2*np.pi*(months.month-1)/12)
162
+ noise=np.random.normal(0,1,len(months))
163
+ sales=seasonality+noise
164
+
165
+ #Create DataFrame
166
+ data=pd.DataFrame({"Date":months,"Umbrella_Sales":sales})
167
+ data.set_index("Date",inplace=True)
168
+
169
+ #2)Plot the sales data
170
+ plt.figure(figsize=(10,4))
171
+ plt.plot(data,label="Umbrella_Sales")
172
+ plt.title("Junaid Kazi - SYMSDS2024011 - Synthetic Monthly Umbrella Sales")
173
+ plt.xlabel("Date")
174
+ plt.ylabel("Sales")
175
+ plt.grid(True)
176
+ plt.legend()
177
+ plt.show()
178
+
179
+ #3)Fit ARIMA model (we'll use ARIMA(1,1,1) as a simple example)
180
+ model=ARIMA(data,order=(1,1,1))
181
+ model_fit=model.fit()
182
+
183
+ #4)Forecast the next 12 months
184
+ forecast_steps=12
185
+ forecast=model_fit.forecast(steps=forecast_steps)
186
+
187
+ #5)Plot the original and forecasted values
188
+ forecast_index=pd.date_range(start=data.index[-1]+pd.offsets.MonthEnd(1),periods=forecast_steps,freq="M")
189
+ forecast_series=pd.Series(forecast,index=forecast_index)
190
+
191
+ plt.figure(figsize=(12,6))
192
+ plt.plot(data,label="Historical Sales")
193
+ plt.plot(forecast_series,label="Forecasted Sales",color="orange")
194
+ plt.title("Junaid Kazi - SYMSDS2024011 - Umbrella Sales Forecast (ARIMA Model)")
195
+ plt.xlabel("Date")
196
+ plt.ylabel("Sales")
197
+ plt.grid(True)
198
+ plt.legend()
199
+ plt.show()'''
200
+ print(code)
201
+
202
+ def a7():
203
+ code = '''import numpy as np
204
+ import matplotlib.pyplot as plt
205
+ from tensorflow.keras.datasets import mnist
206
+ from tensorflow.keras.models import Sequential
207
+ from tensorflow.keras.layers import Conv2D, Dense, Flatten
208
+ from tensorflow.keras.utils import to_categorical
209
+
210
+ (X_train, y_train), (X_test, y_test) = mnist.load_data()
211
+
212
+ X_train = X_train.reshape(-1, 28, 28, 1).astype('float32')/ 255.0
213
+ X_test = X_test.reshape(-1, 28, 28, 1).astype('float32')/ 255.0
214
+
215
+ y_train = to_categorical(y_train,10)
216
+ y_test = to_categorical(y_test,10)
217
+
218
+ model = Sequential([
219
+ Conv2D(32, kernel_size=3 ,activation='relu', input_shape=(28,28,1)),
220
+ Conv2D(64, kernel_size=3 ,activation='relu'),
221
+ Flatten(),
222
+ Dense(10, activation='softmax')
223
+ ])
224
+
225
+ model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
226
+ model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=1,batch_size=128)
227
+
228
+ index = 0
229
+ sample_image = X_test[index]
230
+ sample_label = y_test[index]
231
+ prediction = model.predict(np.expand_dims(sample_image, axis=0))
232
+ predicted_class = np.argmax(prediction)
233
+ actual_class = np.argmax(y_test[index])
234
+
235
+ plt.figure(figsize=(3,3))
236
+ plt.imshow(sample_image.reshape(28,28), cmap='gray')
237
+ plt.title(f'Junaid Kazi - SYMSDS2024011 Predicted: {predicted_class}, Actual: {actual_class}')
238
+ plt.axis('off')
239
+ plt.show()'''
240
+ print(code)
241
+
242
+ def a8():
243
+ code = '''import keras
244
+ from keras.datasets import mnist
245
+ from keras import layers
246
+ import numpy as np
247
+ import matplotlib.pyplot as plt
248
+
249
+ (X_train, _), (X_test, _) = mnist.load_data()
250
+ X_train = X_train.astype('float32') / 255.
251
+ X_test = X_test.astype('float32') / 255.
252
+ X_train = X_train[..., np.newaxis]
253
+ X_test = X_test[..., np.newaxis]
254
+
255
+ noise_factor = 0.5
256
+ X_train_noisy = np.clip(X_train + noise_factor * np.random.normal(size=X_train.shape), 0., 1.)
257
+ X_test_noisy = np.clip(X_test + noise_factor * np.random.normal(size=X_test.shape), 0., 1.)
258
+
259
+ input_imp = keras.Input(shape=(28,28,1))
260
+ x = layers.Conv2D(32,3,activation='relu',padding='same')(input_imp)
261
+ x = layers.MaxPooling2D(2, padding='same')(x)
262
+ x = layers.Conv2D(32,3,activation='relu',padding='same')(x)
263
+ encoded = layers.MaxPooling2D(2, padding='same')(x)
264
+
265
+ x = layers.Conv2D(32,3,activation='relu',padding='same')(encoded)
266
+ x = layers.UpSampling2D(2)(x)
267
+ x = layers.Conv2D(32,3,activation='relu',padding='same')(x)
268
+ x = layers.UpSampling2D(2)(x)
269
+ decoded = layers.Conv2D(1,3,activation='sigmoid',padding='same')(x)
270
+
271
+ autoencoder = keras.Model(input_imp,decoded)
272
+ autoencoder.compile(optimizer='adam',loss='binary_crossentropy', metrics=['accuracy'])
273
+ autoencoder.fit(X_train_noisy,X_train,epochs=1,batch_size=128,shuffle=True,validation_data=(X_test_noisy,X_test))
274
+
275
+ plt.figure(figsize=(20, 2))
276
+ for i in range(10):
277
+ ax = plt.subplot(1,10,i+1)
278
+ plt.imshow(X_test_noisy[i].squeeze(),cmap='gray')
279
+ ax.axis('off')
280
+ plt.show()
281
+
282
+ predictions = autoencoder.predict(X_test_noisy)
283
+ plt.figure(figsize=(20, 2))
284
+ for i in range(10):
285
+ ax = plt.subplot(1,10,i+1)
286
+ plt.imshow(predictions[i].squeeze(),cmap='gray')
287
+ ax.axis('off')
288
+ plt.show()'''
289
+ print(code)
290
+
291
+ def b1():
292
+ code = '''import pandas as pd
293
+ import matplotlib.pyplot as plt
294
+ import seaborn as sns
295
+ from sklearn.cluster import KMeans
296
+ df=pd.read_csv("")
297
+ df.head()
298
+ sns.scatterplot(x="cgpa",y="ML",data=df)
299
+ kmeans=KMeans(n_clusters=4,random_state=0,n_init=10)
300
+ kmeans.fit(df)
301
+ centroid=kmeans.cluster_centers_
302
+ label=kmeans.labels_
303
+ sns.scatterplot(x="cgpa",y="ML",data=df,hue=label)
304
+ sns.scatterplot(x="cgpa",y="ML",data=df,hue=label,palette='coolwarm',s=50)'''
305
+ print(code)
306
+
307
+ def b2():
308
+ code = '''from scipy.cluster import hierarchy as SCH
309
+ from sklearn.cluster import AgglomerativeClustering
310
+ import matplotlib.pyplot as plt
311
+ import pandas as pd
312
+ df = pd.read_csv(r"")
313
+ df.columns = ['CustomerID', 'Gender', 'Age', 'annual_income', 'spending_score']
314
+ df = df[['annual_income', 'spending_score']]
315
+ plt.figure(figsize =(6, 6))
316
+ plt.title('Visualising the data')
317
+ Dendrogram = SCH.dendrogram((SCH.linkage (df, method ='ward')))
318
+ ac2 = AgglomerativeClustering(n_clusters = 5) # Visualizing the clustering
319
+ plt.figure(figsize =(6, 6))
320
+ plt.scatter(df['annual_income'], df['spending_score'], c = ac2.fit_predict(df))
321
+ plt.show()'''
322
+ print(code)
323
+
324
+ def b3():
325
+ code = '''print([i for i in range(10) if i % 2 == 0])'''
326
+ print(code)
327
+
328
+ def b4():
329
+ code = '''import numpy as np
330
+ import matplotlib.pyplot as plt
331
+ from sklearn.mixture import GaussianMixture
332
+ from sklearn.datasets import make_blobs
333
+
334
+ # Generate synthetic data
335
+ X, y = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
336
+
337
+ # Fit Gaussian Mixture Model
338
+ gmm = GaussianMixture(n_components=4, random_state=42)
339
+ gmm.fit(X)
340
+ labels = gmm.predict(X)
341
+ # Plot the results
342
+ plt.figure(figsize=(10, 6))
343
+
344
+ # Plot the data points
345
+ plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', alpha=0.6)
346
+
347
+ # Plot the Gaussian ellipses
348
+ for i in range(gmm.n_components):
349
+ # Get eigenvalues and eigenvectors
350
+ covariances = gmm.covariances_[i][:2, :2]
351
+ v, w = np.linalg.eigh(covariances)
352
+ v = 2.0 * np.sqrt(2.0) * np.sqrt(v)
353
+
354
+ # Calculate ellipse angle
355
+ angle = np.arctan2(w[0][1], w[0][0])
356
+ angle = 180.0 * angle / np.pi # Convert to degrees
357
+
358
+ # Create ellipse
359
+ mean = gmm.means_[i, :2]
360
+ ell = plt.matplotlib.patches.Ellipse(mean, v[0], v[1], 180.0 + angle,color='red', alpha=0.3)
361
+ plt.gca().add_artist(ell)
362
+
363
+ plt.title('Gaussian Mixture Model Clustering')
364
+ plt.xlabel('Feature 1')
365
+ plt.ylabel('Feature 2')
366
+ plt.grid(True)
367
+ plt.show()
368
+
369
+ # Print model parameters
370
+ print("Means:", gmm.means_)
371
+ print("Covariances:", gmm.covariances_)
372
+ print("Weights:", gmm.weights_)
373
+ '''
374
+ print(code)
375
+
376
+ def b5():
377
+ code = '''import pandas as pd
378
+ from sklearn.feature_extraction.text import TfidfVectorizer
379
+ from sklearn.metrics.pairwise import cosine_similarity
380
+ movies={
381
+ 'title':["The Dark Knight","Inception","Toy Story","Finding Nemo"," The Shawshank Redemption","Pulp Fiction"],
382
+ 'genre':["Action,Crime,Drama","Action,Adventure,Sci-Fi","Animation,Adventure,Comedy","Animation,Adventure,Comedy",
383
+ "Drama","Crime,Drama"]
384
+ }
385
+ movies_df=pd.DataFrame(movies)
386
+ user_likes=["The Dark Knight"]
387
+ tfidf=TfidfVectorizer(stop_words="english")
388
+ tfidf_matrix=tfidf.fit_transform(movies_df["genre"])
389
+ cosine_sim=cosine_similarity(tfidf_matrix,tfidf_matrix)
390
+
391
+ def get_recommendations(title,cosine_sim=cosine_sim):
392
+ idx=movies_df[movies_df["title"]==title].index[0]
393
+ sim_scores=list(enumerate(cosine_sim[idx]))
394
+ sim_scores=sorted(sim_scores,key=lambda x: x[1],reverse=True)
395
+ sim_scores=sim_scores[1:4]
396
+ movie_indices=[i[0] for i in sim_scores]
397
+ return movies_df["title"].iloc[movie_indices]
398
+
399
+ print("Recommendations based on your likes:")
400
+ for liked_movie in user_likes:
401
+ recommendations=get_recommendations(liked_movie)
402
+ print(f"Because you liked'{liked_movie}':")
403
+ print(recommendations.to_string(index=False))
404
+ '''
405
+ print(code)
406
+
407
+ def b6():
408
+ code = '''import numpy as np
409
+ from sklearn.metrics.pairwise import cosine_similarity
410
+ ratings = {
411
+ 'User': ['Alice', 'Bob', 'Charlie', 'David', 'Eve'],
412
+ 'The Dark Knight': [5, 4, 0, 0, 1],
413
+ 'Inception': [4, 5, 2, 0, 0],
414
+ 'Toy Story': [1, 2, 5, 4, 0],
415
+ 'Finding Nemo': [0, 0, 4, 5, 3],
416
+ 'The Shawshank Redemption': [5, 0, 1, 0, 4]
417
+ }
418
+
419
+ import pandas as pd
420
+ ratings_df = pd.DataFrame(ratings).set_index('User')
421
+
422
+ def recommend_movies(user_name, ratings_df, n_recommendations=3):
423
+ user_similarity = cosine_similarity(ratings_df.fillna(0))
424
+ user_sim_df = pd.DataFrame(
425
+ user_similarity,
426
+ index=ratings_df.index,
427
+ columns=ratings_df.index
428
+ )
429
+ user_sim_scores = user_sim_df[user_name]
430
+ user_sim_scores = user_sim_scores.drop(user_name)
431
+ similar_users = user_sim_scores.sort_values(ascending=False)
432
+ target_user_ratings = ratings_df.loc[user_name]
433
+ unseen_movies = target_user_ratings[target_user_ratings == 0].index
434
+
435
+ recommendations = {}
436
+ for movie in unseen_movies:
437
+ weighted_ratings = 0
438
+ similarity_sum = 0
439
+
440
+ for other_user in similar_users.index:
441
+ if ratings_df.loc[other_user, movie] > 0:
442
+ weighted_ratings += similar_users[other_user] * ratings_df.loc[other_user, movie]
443
+ similarity_sum += similar_users[other_user]
444
+ if similarity_sum > 0:
445
+ recommendations[movie] = weighted_ratings / similarity_sum
446
+ recommended_movies = sorted(recommendations.items(), key=lambda x: x[1],reverse=True)
447
+ return recommended_movies[:n_recommendations]
448
+
449
+
450
+ user = 'Alice'
451
+ recommendations = recommend_movies(user, ratings_df)
452
+ print(f"Recommendations for {user}:")
453
+ for movie, predicted_rating in recommendations:
454
+ print(f"{movie} (predicted rating: {predicted_rating:.2f})")
455
+ '''
456
+ print(code)
457
+
458
+ def b7():
459
+ code = '''import numpy as np
460
+ import random
461
+
462
+ # Step 1: Define the slot machines (arms) with reward probabilities
463
+ true_probs = [0.3, 0.5, 0.7] # A, B, C
464
+
465
+ # Step 2: Parameters
466
+ epsilon = 0.2 # 20% explore
467
+ n_rounds = 50 # total plays
468
+ n_arms = len(true_probs)
469
+
470
+ # Step 3: Tracking variables
471
+ counts = np.zeros(n_arms)
472
+ # how many times each arm is pulled
473
+ rewards = np.zeros(n_arms)
474
+ # total rewards for each arm
475
+
476
+ # Step 4: Run simulation
477
+ history = []
478
+
479
+ for t in range(1, n_rounds+1):
480
+ # ε-greedy: explore or exploit
481
+ if random.random() < epsilon:
482
+ choice = random.randint(0, n_arms-1) # explore
483
+ else:
484
+ choice = np.argmax(rewards / (counts + 1e-6)) # exploit best so far
485
+
486
+ # Generate reward (1 or 0) based on true probability
487
+ reward = 1 if random.random() < true_probs[choice] else 0
488
+
489
+ # Update stats
490
+ counts[choice] += 1
491
+ rewards[choice] += reward
492
+
493
+ # Save history
494
+ avg_rewards = rewards / (counts + 1e-6)
495
+ history.append((t, choice, reward, avg_rewards.copy()))
496
+
497
+ # Step 5: Print results
498
+ print("Final average rewards per arm:", rewards / counts)
499
+ print("Total rewards collected:", sum(rewards))
500
+ print("Arm chosen most often:", np.argmax(counts))
501
+
502
+ # Show detailed round history (first 10 rounds)
503
+ for h in history[:10]:
504
+ print(f"Round {h[0]} | Chose Arm {h[1]} | Reward={h[2]} | Estimates={h[3]}")
505
+ '''
506
+ print(code)
507
+
508
+ def b8():
509
+ code = '''import gymnasium as gym
510
+ import numpy as np
511
+ import random
512
+
513
+ # Create environment
514
+ env = gym.make("FrozenLake-v1", is_slippery=False)
515
+
516
+ # Initialize Q-table
517
+ Q = np.zeros((env.observation_space.n, env.action_space.n))
518
+
519
+ # Monte Carlo parameters
520
+ episodes = 5000
521
+ epsilon, gamma = 0.3, 1.0
522
+
523
+ returns_sum, returns_count = {}, {}
524
+
525
+ for _ in range(episodes):
526
+ state = env.reset()[0]
527
+ episode = []
528
+ done = False
529
+
530
+ while not done:
531
+ # ε-greedy policy
532
+ action = random.choice(range(env.action_space.n)) if random.random() < epsilon else np.argmax(Q[state])
533
+ next_state, reward, done, _, _ = env.step(action)
534
+ episode.append((state, action, reward))
535
+ state = next_state
536
+
537
+ # Update Q-values
538
+ G, visited = 0, set()
539
+ for s, a, r in reversed(episode):
540
+ G = gamma * G + r
541
+ if (s, a) not in visited:
542
+ returns_sum[(s,a)] = returns_sum.get((s,a),0) + G
543
+ returns_count[(s,a)] = returns_count.get((s,a),0) + 1
544
+ Q[s,a] = returns_sum[(s,a)] / returns_count[(s,a)]
545
+ visited.add((s,a))
546
+
547
+ # ------------------------------
548
+ # Derive Policy and Value Function
549
+ # ------------------------------
550
+ policy = np.argmax(Q, axis=1).reshape(4,4)
551
+ V = np.max(Q, axis=1).reshape(4,4) # Value Function
552
+
553
+ print("Learned Policy (0=Left,1=Down,2=Right,3=Up):\n", policy)
554
+ print("\nLearned Value Function (V):\n",V)'''
555
+ print(code)
556
+
@@ -0,0 +1,37 @@
1
+ Metadata-Version: 2.4
2
+ Name: absd
3
+ Version: 0.1.0
4
+ Summary: testing python library
5
+ Home-page: https://github.com/pacyandrocash/absd
6
+ Author: blank
7
+ Author-email: pacyandrocash@gmail.com
8
+ License: MIT
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Requires-Python: >=3.7
13
+ Description-Content-Type: text/markdown
14
+ License-File: LICENSE
15
+ Dynamic: author
16
+ Dynamic: author-email
17
+ Dynamic: classifier
18
+ Dynamic: description
19
+ Dynamic: description-content-type
20
+ Dynamic: home-page
21
+ Dynamic: license
22
+ Dynamic: license-file
23
+ Dynamic: requires-python
24
+ Dynamic: summary
25
+
26
+ # absd
27
+
28
+ A simple Python library that prints 16 code snippets when functions are called.
29
+
30
+ ## Usage
31
+
32
+ ```python
33
+ import absd
34
+
35
+ absd.snippet1()
36
+ absd.snippet10()
37
+ ```
@@ -0,0 +1,9 @@
1
+ LICENSE
2
+ README.md
3
+ setup.py
4
+ absd/__init__.py
5
+ absd/messages.py
6
+ absd.egg-info/PKG-INFO
7
+ absd.egg-info/SOURCES.txt
8
+ absd.egg-info/dependency_links.txt
9
+ absd.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ absd
absd-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
absd-0.1.0/setup.py ADDED
@@ -0,0 +1,22 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="absd",
5
+ version="0.1.0",
6
+ packages=find_packages(),
7
+ install_requires=[],
8
+ author="blank",
9
+ author_email="pacyandrocash@gmail.com",
10
+ description="testing python library",
11
+ long_description=open("README.md", encoding="utf-8").read(),
12
+ long_description_content_type="text/markdown",
13
+ url="https://github.com/pacyandrocash/absd",
14
+ classifiers=[
15
+ "Programming Language :: Python :: 3",
16
+ "Operating System :: OS Independent",
17
+ "License :: OSI Approved :: MIT License",
18
+ ],
19
+ python_requires=">=3.7",
20
+ license="MIT",
21
+ license_files=["LICENSE"],
22
+ )