initdl 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
initdl-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: initdl
3
+ Version: 0.1.0
4
+ Summary: sss
5
+ Author: sss
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,3 @@
1
+ from .experiments import ex1, ex2, ex3, ex4, ex5a, ex5b, ex6a, ex6b, ex6c, ex7, ex8, ex9, create_all
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,326 @@
1
+ import os
2
+
3
+ def _write(filename, code):
4
+ with open(filename, "w", encoding="utf-8") as f:
5
+ f.write(code)
6
+
7
+
8
+ def ex1():
9
+ code = '''
10
+ EX1: DEEP NEURAL NETWORK
11
+ REQUIRED LIBRARIES: numpy, tensorflow, matplotlib
12
+
13
+ import numpy as np
14
+ import tensorflow as tf
15
+ from tensorflow import keras
16
+ from tensorflow.keras import layers
17
+ import matplotlib.pyplot as plt
18
+
19
+ x=np.array([0,1,2,3,4,5],dtype=float)
20
+ y=3*x+2
21
+
22
+ model=keras.Sequential([layers.Dense(1,input_shape=[1])])
23
+ model.compile(optimizer='adam',loss='mse')
24
+
25
+ history=model.fit(x,y,epochs=500,verbose=0)
26
+
27
+ weights=model.layers[0].get_weights()
28
+ print("Learned weight (slope):",weights[0][0][0])
29
+ print("Learned bias:",weights[1][0])
30
+
31
+ test_value=np.array([10.0])
32
+ prediction=model.predict(test_value)[0][0]
33
+ print("Prediction for x=10:",prediction)
34
+
35
+ plt.plot(history.history['loss'])
36
+ plt.xlabel("Epochs")
37
+ plt.ylabel("Loss")
38
+ plt.title("Training Loss Curve")
39
+ plt.show()
40
+ '''
41
+ _write("ex1.py", code)
42
+
43
+
44
+ def ex2():
45
+ code = '''
46
+ EX2: SPEECH TO TEXT
47
+ REQUIRED LIBRARIES: speechrecognition
48
+
49
+ import speech_recognition as sr
50
+
51
+ r=sr.Recognizer()
52
+
53
+ with sr.AudioFile("audio.wav") as source:
54
+ audio=r.record(source)
55
+
56
+ try:
57
+ text=r.recognize_google(audio)
58
+ print("Recognized text:",text)
59
+ except sr.UnknownValueError:
60
+ print("Sorry, could not understand the audio")
61
+ except sr.RequestError as e:
62
+ print("Could not request results;",e)
63
+ '''
64
+ _write("ex2.py", code)
65
+
66
+
67
+ def ex3():
68
+ code = '''
69
+ EX3: TEXT TO SPEECH
70
+ REQUIRED LIBRARIES: pyttsx3
71
+
72
+ import pyttsx3
73
+
74
+ engine=pyttsx3.init()
75
+ engine.setProperty('rate',170)
76
+ engine.setProperty('volume',0.9)
77
+
78
+ voices=engine.getProperty('voices')
79
+ engine.setProperty('voice',voices[1].id)
80
+
81
+ text=input("Enter the text to convert to speech: ")
82
+
83
+ engine.say(text)
84
+ engine.runAndWait()
85
+ '''
86
+ _write("ex3.py", code)
87
+
88
+
89
+ def ex4():
90
+ code = '''
91
+ EX4: TIME SERIES FORECASTING WITH LSTM
92
+ REQUIRED LIBRARIES: numpy, pandas, matplotlib, scikit-learn, tensorflow
93
+
94
+ import numpy as np
95
+ import pandas as pd
96
+ import matplotlib.pyplot as plt
97
+ from sklearn.preprocessing import MinMaxScaler
98
+ from tensorflow.keras.models import Sequential
99
+ from tensorflow.keras.layers import LSTM,Dense
100
+
101
+ time=np.arange(0,100,0.1)
102
+ data=np.sin(time)
103
+
104
+ df=pd.DataFrame(data,columns=['value'])
105
+
106
+ scaler=MinMaxScaler(feature_range=(0,1))
107
+ scaled_data=scaler.fit_transform(df)
108
+
109
+ def create_sequences(data,time_step=10):
110
+ X=[]
111
+ y=[]
112
+ for i in range(len(data)-time_step):
113
+ X.append(data[i:i+time_step,0])
114
+ y.append(data[i+time_step,0])
115
+ return np.array(X),np.array(y)
116
+
117
+ time_step=10
118
+
119
+ X,y=create_sequences(scaled_data,time_step)
120
+ X=X.reshape(X.shape[0],X.shape[1],1)
121
+
122
+ train_size=int(len(X)*0.8)
123
+
124
+ X_train=X[:train_size]
125
+ X_test=X[train_size:]
126
+
127
+ y_train=y[:train_size]
128
+ y_test=y[train_size:]
129
+
130
+ model=Sequential()
131
+ model.add(LSTM(50,input_shape=(time_step,1)))
132
+ model.add(Dense(1))
133
+
134
+ model.compile(loss='mean_squared_error',optimizer='adam')
135
+
136
+ model.fit(X_train,y_train,epochs=10,batch_size=32)
137
+
138
+ plt.plot(df.values)
139
+ plt.show()
140
+ '''
141
+ _write("ex4.py", code)
142
+
143
+
144
+ def ex5a():
145
+ code = '''
146
+ EX5A: FEEDFORWARD NEURAL NETWORK FOR SINGLE LOGIC GATE
147
+ REQUIRED LIBRARIES: numpy, tensorflow
148
+
149
+ import numpy as np
150
+ from tensorflow.keras.models import Sequential
151
+ from tensorflow.keras.layers import Dense,Input
152
+
153
+ inputs=np.array([[0,0],[0,1],[1,0],[1,1]])
154
+ outputs=np.array([[0],[1],[1],[0]])
155
+
156
+ model=Sequential([
157
+ Input(shape=(2,)),
158
+ Dense(4,activation='relu'),
159
+ Dense(1,activation='sigmoid')
160
+ ])
161
+
162
+ model.compile(optimizer='adam',loss='binary_crossentropy')
163
+ model.fit(inputs,outputs,epochs=3000,verbose=0)
164
+ '''
165
+ _write("ex5a.py", code)
166
+
167
+
168
+ def ex5b():
169
+ code = '''
170
+ EX5B: FEEDFORWARD NEURAL NETWORK FOR MULTIPLE LOGIC GATES
171
+ REQUIRED LIBRARIES: numpy, tensorflow
172
+
173
+ import numpy as np
174
+ from tensorflow.keras.models import Sequential
175
+ from tensorflow.keras.layers import Dense,Input
176
+
177
+ inputs=np.array([[0,0],[0,1],[1,0],[1,1]])
178
+
179
+ outputs=np.array([
180
+ [0,0,0],
181
+ [0,1,1],
182
+ [0,1,1],
183
+ [1,1,0]
184
+ ])
185
+
186
+ model=Sequential([
187
+ Input(shape=(2,)),
188
+ Dense(6,activation='relu'),
189
+ Dense(3,activation='sigmoid')
190
+ ])
191
+
192
+ model.compile(optimizer='adam',loss='binary_crossentropy')
193
+ model.fit(inputs,outputs,epochs=4000,verbose=0)
194
+ '''
195
+ _write("ex5b.py", code)
196
+
197
+
198
+ def ex6a():
199
+ code = '''
200
+ EX6A: RGB TO GRAYSCALE
201
+ REQUIRED LIBRARIES: scikit-image, matplotlib
202
+
203
+ from skimage import data
204
+ from skimage.color import rgb2gray
205
+ import matplotlib.pyplot as plt
206
+
207
+ coffee=data.coffee()
208
+
209
+ plt.imshow(rgb2gray(coffee),cmap="gray")
210
+ plt.show()
211
+ '''
212
+ _write("ex6a.py", code)
213
+
214
+
215
+ def ex6b():
216
+ code = '''
217
+ EX6B: RGB TO HSV
218
+ REQUIRED LIBRARIES: scikit-image, matplotlib
219
+
220
+ from skimage import data
221
+ from skimage.color import rgb2hsv
222
+ import matplotlib.pyplot as plt
223
+
224
+ coffee=data.coffee()
225
+
226
+ plt.imshow(rgb2hsv(coffee))
227
+ plt.show()
228
+ '''
229
+ _write("ex6b.py", code)
230
+
231
+
232
+ def ex6c():
233
+ code = '''
234
+ EX6C: SUPERVISED SEGMENTATION
235
+ REQUIRED LIBRARIES: scikit-image, matplotlib
236
+
237
+ from skimage import data
238
+ from skimage.color import rgb2gray
239
+ import matplotlib.pyplot as plt
240
+
241
+ coffee=data.coffee()
242
+ gray=rgb2gray(coffee)
243
+
244
+ plt.imshow(gray>0.5,cmap='gray')
245
+ plt.show()
246
+ '''
247
+ _write("ex6c.py", code)
248
+
249
+
250
+ def ex7():
251
+ code = '''
252
+ EX7: OBJECT DETECTION USING YOLO
253
+ REQUIRED LIBRARIES: ultralytics, opencv-python
254
+
255
+ from ultralytics import YOLO
256
+ import cv2
257
+
258
+ model=YOLO("yolov8n.pt")
259
+ results=model("nun.jpg")
260
+
261
+ for r in results:
262
+ cv2.imshow("Result",r.plot())
263
+ cv2.waitKey(0)
264
+
265
+ cv2.destroyAllWindows()
266
+ '''
267
+ _write("ex7.py", code)
268
+
269
+
270
+ def ex8():
271
+ code = '''
272
+ EX8: CHARACTER RECOGNITION USING CNN
273
+ REQUIRED LIBRARIES: tensorflow, numpy, opencv-python, matplotlib
274
+
275
+ import tensorflow as tf
276
+ from tensorflow.keras import layers,models
277
+ import numpy as np
278
+ import cv2
279
+ import matplotlib.pyplot as plt
280
+
281
+ (x_train,y_train),(x_test,y_test)=tf.keras.datasets.mnist.load_data()
282
+
283
+ x_train=x_train/255.0
284
+ x_train=x_train.reshape((-1,28,28,1))
285
+
286
+ model=models.Sequential([
287
+ layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1)),
288
+ layers.MaxPooling2D(),
289
+ layers.Flatten(),
290
+ layers.Dense(10,activation='softmax')
291
+ ])
292
+
293
+ model.compile(optimizer='adam',loss='sparse_categorical_crossentropy')
294
+ model.fit(x_train,y_train,epochs=3)
295
+ '''
296
+ _write("ex8.py", code)
297
+
298
+
299
+ def ex9():
300
+ code = '''
301
+ EX9: AUTOENCODER USING MNIST
302
+ REQUIRED LIBRARIES: tensorflow, numpy, matplotlib
303
+
304
+ import numpy as np
305
+ import matplotlib.pyplot as plt
306
+ from tensorflow.keras.datasets import mnist
307
+ from tensorflow.keras.layers import Input,Dense
308
+ from tensorflow.keras.models import Model
309
+
310
+ (x_train,_),(x_test,_)=mnist.load_data()
311
+
312
+ x_train=x_train/255.0
313
+ x_train=x_train.reshape((len(x_train),784))
314
+
315
+ input_img=Input(shape=(784,))
316
+ encoded=Dense(32,activation='relu')(input_img)
317
+ decoded=Dense(784,activation='sigmoid')(encoded)
318
+
319
+ model=Model(input_img,decoded)
320
+ model.compile(optimizer='adam',loss='binary_crossentropy')
321
+ model.fit(x_train,x_train,epochs=5)
322
+
323
+ plt.imshow(x_train[0].reshape(28,28),cmap='gray')
324
+ plt.show()
325
+ '''
326
+ _write("ex9.py", code)
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: initdl
3
+ Version: 0.1.0
4
+ Summary: sss
5
+ Author: sss
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
@@ -0,0 +1,7 @@
1
+ pyproject.toml
2
+ initdl/__init__.py
3
+ initdl/experiments.py
4
+ initdl.egg-info/PKG-INFO
5
+ initdl.egg-info/SOURCES.txt
6
+ initdl.egg-info/dependency_links.txt
7
+ initdl.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ initdl
@@ -0,0 +1,11 @@
1
+ [project]
2
+ name = "initdl"
3
+ version = "0.1.0"
4
+ description = "sss"
5
+ readme = "README.md"
6
+ authors = [{ name = "sss" }]
7
+ requires-python = ">=3.7"
8
+
9
+ dependencies = [
10
+
11
+ ]
initdl-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+