sklip 0.4.0__tar.gz → 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sklip-0.4.0 → sklip-0.6.0}/PKG-INFO +1 -1
- {sklip-0.4.0 → sklip-0.6.0}/pyproject.toml +1 -1
- {sklip-0.4.0 → sklip-0.6.0}/sklip/__init__.py +1 -1
- sklip-0.6.0/sklip/core.py +402 -0
- {sklip-0.4.0 → sklip-0.6.0}/sklip.egg-info/PKG-INFO +1 -1
- sklip-0.4.0/sklip/core.py +0 -42
- {sklip-0.4.0 → sklip-0.6.0}/setup.cfg +0 -0
- {sklip-0.4.0 → sklip-0.6.0}/sklip.egg-info/SOURCES.txt +0 -0
- {sklip-0.4.0 → sklip-0.6.0}/sklip.egg-info/dependency_links.txt +0 -0
- {sklip-0.4.0 → sklip-0.6.0}/sklip.egg-info/requires.txt +0 -0
- {sklip-0.4.0 → sklip-0.6.0}/sklip.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
import pyperclip
|
|
2
|
+
|
|
3
|
+
SNIPPETS = {
|
|
4
|
+
1: """#### Задание:
|
|
5
|
+
* Обучение модели - выборку проверить на дисбаланс, если необходимо устранить. Рассмотреть не менее 3 моделей классификации. Обучить выбранную модель, оценить ее качество.
|
|
6
|
+
* Организовать непрерывное обучение
|
|
7
|
+
* Расссмотреть не менее 3 моделей регрессии, выбрать модель
|
|
8
|
+
|
|
9
|
+
#### Результат модуля:
|
|
10
|
+
* Выбрана модель классификации
|
|
11
|
+
*
|
|
12
|
+
* Аналитическая записка с данными, графиками, выводами
|
|
13
|
+
""",
|
|
14
|
+
2: """labels = cluster.labels_
|
|
15
|
+
cluster_sizes = np.bincount(labels)
|
|
16
|
+
print("Размеры кластеров:", cluster_sizes)
|
|
17
|
+
""",
|
|
18
|
+
3: """from sklearn.metrics import davies_bouldin_score
|
|
19
|
+
dbi = davies_bouldin_score(X_scaled, labels)
|
|
20
|
+
print("Индекс Дэвиса-Болдина:", dbi)""",
|
|
21
|
+
4: """df["Cluster"].value_counts()""",
|
|
22
|
+
5: """indices_to_drop = df[df['Cluster'] == 0].index[:10]
|
|
23
|
+
df = df.drop(indices_to_drop)""",
|
|
24
|
+
6: """from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error, precision_score, recall_score, f1_score, confusion_matrix, r2_score
|
|
25
|
+
from sklearn.metrics import classification_report
|
|
26
|
+
from sklearn.metrics import confusion_matrix
|
|
27
|
+
|
|
28
|
+
from sklearn.neighbors import KNeighborsClassifier
|
|
29
|
+
from sklearn.svm import SVC
|
|
30
|
+
from sklearn.ensemble import RandomForestClassifier
|
|
31
|
+
from sklearn.naive_bayes import GaussianNB
|
|
32
|
+
from sklearn.linear_model import LogisticRegression
|
|
33
|
+
from sklearn.model_selection import train_test_split""",
|
|
34
|
+
7: """X = df.drop(['Cluster','datetime','data','region'], axis=1)
|
|
35
|
+
y = df['Cluster']""",
|
|
36
|
+
8: """X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)""",
|
|
37
|
+
9: """knn = KNeighborsClassifier()
|
|
38
|
+
knn.fit(X_train, y_train)""",
|
|
39
|
+
10: """import joblib
|
|
40
|
+
knn_clf = KNeighborsClassifier().fit(X_train, y_train)
|
|
41
|
+
joblib.dump(knn_clf, 'classifier_model.pkl')
|
|
42
|
+
|
|
43
|
+
""",
|
|
44
|
+
11: """knn_predict = knn.predict(X_test)""",
|
|
45
|
+
12: """knn_train_predict = knn.predict(X_train)
|
|
46
|
+
print('train accuracy: ', accuracy_score(y_train, knn_train_predict, normalize=True))
|
|
47
|
+
print('test accuracy: ', accuracy_score(y_test, knn_predict, normalize=True))""",
|
|
48
|
+
13: """knn_accuracy = accuracy_score(y_test, knn_predict, normalize=True)""",
|
|
49
|
+
14: """print(classification_report(y_test, knn_predict))""",
|
|
50
|
+
15: """cm = confusion_matrix(y_test, knn_predict)
|
|
51
|
+
|
|
52
|
+
sns.heatmap(cm, annot=True, fmt='d', cmap='Reds')
|
|
53
|
+
plt.title('Confusion Matrix')
|
|
54
|
+
plt.xlabel('Predicted Label')
|
|
55
|
+
plt.ylabel('True Label')
|
|
56
|
+
plt.show()""",
|
|
57
|
+
16: """from matplotlib.pylab import rc, plot
|
|
58
|
+
from sklearn.metrics import roc_curve
|
|
59
|
+
|
|
60
|
+
sns.set(font_scale=1)
|
|
61
|
+
sns.set_color_codes("muted")
|
|
62
|
+
|
|
63
|
+
plt.figure(figsize=(6, 4))
|
|
64
|
+
fpr, tpr, thresholds = roc_curve(y_test, knn_predict, pos_label=1)
|
|
65
|
+
lw = 2
|
|
66
|
+
plt.plot(fpr, tpr, lw=lw, label='ROC curve ')
|
|
67
|
+
plt.plot([0, 1], [0, 1])
|
|
68
|
+
plt.xlim([0.0, 1.0])
|
|
69
|
+
plt.ylim([0.0, 1.05])
|
|
70
|
+
plt.xlabel('False Positive Rate')
|
|
71
|
+
plt.ylabel('True Positive Rate')
|
|
72
|
+
plt.title('ROC curve')
|
|
73
|
+
plt.savefig("ROC.png")
|
|
74
|
+
plt.show()""",
|
|
75
|
+
17: """gnb = GaussianNB()
|
|
76
|
+
gnb.fit(X_train, y_train)""",
|
|
77
|
+
18: """svc = SVC()
|
|
78
|
+
svc.fit(X_train, y_train)""",
|
|
79
|
+
19: """
|
|
80
|
+
metrics_k = ['KNN accurancy', 'SVC accurancy','GNB accuracy']
|
|
81
|
+
scores_k = [knn_accuracy, svc_accuracy,gnb_accuracy]
|
|
82
|
+
|
|
83
|
+
plt.figure(figsize=(10, 6))
|
|
84
|
+
plt.bar(metrics_k, scores_k, color=['blue', 'green'])
|
|
85
|
+
plt.ylabel('Scores')
|
|
86
|
+
plt.title('Comparison of Clustering Metrics')
|
|
87
|
+
|
|
88
|
+
for i, v in enumerate(scores_k):
|
|
89
|
+
plt.text(i, v, f'{v:.4f}', ha='center', va='bottom')
|
|
90
|
+
|
|
91
|
+
plt.show()""",
|
|
92
|
+
20: """import pandas as pd
|
|
93
|
+
import numpy as np
|
|
94
|
+
from sklearn.model_selection import train_test_split
|
|
95
|
+
from sklearn.preprocessing import StandardScaler, LabelEncoder
|
|
96
|
+
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
97
|
+
from sklearn.linear_model import LinearRegression
|
|
98
|
+
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
|
|
99
|
+
import warnings; warnings.filterwarnings('ignore')
|
|
100
|
+
""",
|
|
101
|
+
21: """X = df.drop(['Cluster','datetime','data','region'], axis=1)
|
|
102
|
+
y = df['step_frequency']
|
|
103
|
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
|
104
|
+
scaler = StandardScaler()
|
|
105
|
+
X_train_scaled = scaler.fit_transform(X_train)
|
|
106
|
+
X_test_scaled = scaler.transform(X_test)
|
|
107
|
+
|
|
108
|
+
lr = LinearRegression()
|
|
109
|
+
lr.fit(X_train_scaled, y_train)
|
|
110
|
+
lr_pred = lr.predict(X_test_scaled)
|
|
111
|
+
rf = RandomForestRegressor(n_estimators=100, random_state=42)
|
|
112
|
+
rf.fit(X_train_scaled, y_train)
|
|
113
|
+
rf_pred = rf.predict(X_test_scaled)
|
|
114
|
+
gbr = GradientBoostingRegressor(n_estimators=100, random_state=42)
|
|
115
|
+
gbr.fit(X_train_scaled, y_train)
|
|
116
|
+
gbr_pred = gbr.predict(X_test_scaled)
|
|
117
|
+
""",
|
|
118
|
+
22:"""def print_regression_report(y_true, y_pred, model_name):
|
|
119
|
+
mae = mean_absolute_error(y_true, y_pred)
|
|
120
|
+
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
|
|
121
|
+
r2 = r2_score(y_true, y_pred)
|
|
122
|
+
print(f"{model_name}:\\n MAE: {mae:.2f} шаг/мин | RMSE: {rmse:.2f} | R²: {r2:.3f}")
|
|
123
|
+
|
|
124
|
+
print_regression_report(y_test, lr_pred, "LinearRegression")
|
|
125
|
+
print_regression_report(y_test, rf_pred, "RandomForest")
|
|
126
|
+
print_regression_report(y_test, gbr_pred, "GradientBoosting")""",
|
|
127
|
+
23:"""from fastapi import FastAPI, HTTPException
|
|
128
|
+
import pandas as pd
|
|
129
|
+
import joblib
|
|
130
|
+
import numpy as np
|
|
131
|
+
import warnings
|
|
132
|
+
|
|
133
|
+
warnings.filterwarnings("ignore")""",
|
|
134
|
+
24:"""app = FastAPI()
|
|
135
|
+
df = pd.read_csv('tracks.csv')
|
|
136
|
+
model = joblib.load('classifier_model.pkl')
|
|
137
|
+
n_features = model.n_features_in_""",
|
|
138
|
+
25:"""@app.get("/tracks")
|
|
139
|
+
def get_tracks():
|
|
140
|
+
return {"track_ids": sorted(df['track_id'].astype(int).unique().tolist())}
|
|
141
|
+
""",
|
|
142
|
+
26:"""@app.post("/predict/{track_id}")
|
|
143
|
+
def predict_track(track_id: int):
|
|
144
|
+
track = df[df['track_id'] == track_id]
|
|
145
|
+
if len(track) == 0:
|
|
146
|
+
raise HTTPException(404, f"Track {track_id} not found")
|
|
147
|
+
|
|
148
|
+
cols = ['track_id', 'point_id'""",
|
|
149
|
+
27:""" risks = []
|
|
150
|
+
for _, point in track.iterrows():
|
|
151
|
+
features = point[cols].fillna(0).values
|
|
152
|
+
if len(features) < n_features:
|
|
153
|
+
features = np.pad(features, (0, n_features - len(features)), 'constant')
|
|
154
|
+
risk = model.predict(features.reshape(1, -1))[0]
|
|
155
|
+
risks.append(int(risk))
|
|
156
|
+
""",
|
|
157
|
+
28:""" points_with_risk = track[['track_id', 'point_id', 'latitude', 'longitude',
|
|
158
|
+
'elevation', 'temp', 'speed', 'step_frequency']].copy()
|
|
159
|
+
points_with_risk['risk'] = risks""",
|
|
160
|
+
29:""" points_json = []
|
|
161
|
+
for _, row in points_with_risk.iterrows():
|
|
162
|
+
points_json.append({
|
|
163
|
+
"track_id": int(row['track_id']),
|
|
164
|
+
"point_id": int(row['point_id']),
|
|
165
|
+
"latitude": float(row['latitude']),
|
|
166
|
+
"longitude": float(row['longitude']),
|
|
167
|
+
"elevation": float(row['elevation']),
|
|
168
|
+
"temp": float(row['temp']),
|
|
169
|
+
"speed": float(row['speed']),
|
|
170
|
+
"step_frequency": float(row['step_frequency']),
|
|
171
|
+
"risk": int(row['risk'])
|
|
172
|
+
})""",
|
|
173
|
+
30:""" return {
|
|
174
|
+
"track_id": int(track_id),
|
|
175
|
+
"total_points": int(len(points_json)),
|
|
176
|
+
"points": points_json
|
|
177
|
+
}""",
|
|
178
|
+
31:"""import streamlit as st
|
|
179
|
+
import folium
|
|
180
|
+
import pandas as pd
|
|
181
|
+
import requests
|
|
182
|
+
import numpy as np
|
|
183
|
+
""",
|
|
184
|
+
32:"""@st.cache_data(ttl=300)
|
|
185
|
+
def get_tracks():
|
|
186
|
+
try:
|
|
187
|
+
response = requests.get("http://127.0.0.1:8000/tracks")
|
|
188
|
+
return response.json()["track_ids"]
|
|
189
|
+
except:
|
|
190
|
+
st.error("")
|
|
191
|
+
return []""",
|
|
192
|
+
33:"""track_ids = get_tracks()
|
|
193
|
+
selected_track = st.selectbox("Выбери трек:", track_ids)
|
|
194
|
+
""",
|
|
195
|
+
34:"""if selected_track is not None and selected_track != "":
|
|
196
|
+
@st.cache_data(ttl=300)
|
|
197
|
+
def get_track_data(track_id):
|
|
198
|
+
try:
|
|
199
|
+
response = requests.post(f"http://127.0.0.1:8000/predict/{track_id}")
|
|
200
|
+
return response.json()
|
|
201
|
+
except:
|
|
202
|
+
st.error("Ошибка API!")
|
|
203
|
+
return None""",
|
|
204
|
+
35:"""track_data = get_track_data(selected_track)
|
|
205
|
+
|
|
206
|
+
if track_data and "points" in track_data:
|
|
207
|
+
points = pd.DataFrame(track_data["points"])
|
|
208
|
+
|
|
209
|
+
center_lat = points['latitude'].mean()
|
|
210
|
+
center_lon = points['longitude'].mean()
|
|
211
|
+
|
|
212
|
+
m = folium.Map(
|
|
213
|
+
location=[center_lat, center_lon],
|
|
214
|
+
zoom_start=14
|
|
215
|
+
)""",
|
|
216
|
+
36:"""avg_risk = points['risk'].mean()
|
|
217
|
+
line_color = 'green' if avg_risk < 1 else 'orange' if avg_risk < 1.5 else 'red'
|
|
218
|
+
|
|
219
|
+
folium.PolyLine(
|
|
220
|
+
list(zip(points['latitude'], points['longitude'])),
|
|
221
|
+
color=line_color,
|
|
222
|
+
weight=4,
|
|
223
|
+
opacity=0.8,
|
|
224
|
+
popup=f"Трек {selected_track} (ср. риск: {avg_risk:.1f})"
|
|
225
|
+
).add_to(m)""",
|
|
226
|
+
37:""" for _, point in points.iterrows():
|
|
227
|
+
color = 'green' if point['risk'] == 0 else 'orange' if point['risk'] == 1 else 'red'
|
|
228
|
+
folium.CircleMarker(
|
|
229
|
+
location=[point['latitude'], point['longitude']],
|
|
230
|
+
radius=6,
|
|
231
|
+
color=color,
|
|
232
|
+
fill=True,
|
|
233
|
+
fillColor=color,
|
|
234
|
+
fillOpacity=0.7,
|
|
235
|
+
popup=f"Точка {point['point_id']}<br>"
|
|
236
|
+
f"Риск: {point['risk']}<br>"
|
|
237
|
+
f"Высота: {point['elevation']:.0f}м<br>"
|
|
238
|
+
f"Скорость: {point['speed']:.1f}м/с"
|
|
239
|
+
).add_to(m)""",
|
|
240
|
+
38:""" legend_html = '''
|
|
241
|
+
<div style="position: fixed; bottom: 50px; right: 50px; width: 220px; height: auto;
|
|
242
|
+
background-color: white; border:2px solid grey; z-index:9999;
|
|
243
|
+
font-size:14px; padding: 12px; border-radius: 5px; box-shadow: 0 0 10px rgba(0,0,0,0.2);">
|
|
244
|
+
<p style="margin: 0 0 10px 0;"><b>Легенда рисков</b></p>
|
|
245
|
+
<p style="margin: 8px 0;">
|
|
246
|
+
<span style="color: green; font-size: 18px;">●</span>
|
|
247
|
+
Низкий риск (0)
|
|
248
|
+
</p>
|
|
249
|
+
<p style="margin: 8px 0;">
|
|
250
|
+
<span style="color: orange; font-size: 18px;">●</span>
|
|
251
|
+
Средний риск (1)
|
|
252
|
+
</p>
|
|
253
|
+
<p style="margin: 8px 0;">
|
|
254
|
+
<span style="color: red; font-size: 18px;">●</span>
|
|
255
|
+
Высокий риск (2)
|
|
256
|
+
</p>
|
|
257
|
+
<p style="margin: 5px 0; font-size: 11px; color: #666;">
|
|
258
|
+
Линия: средний риск трека
|
|
259
|
+
</p>
|
|
260
|
+
</div>
|
|
261
|
+
'''""",
|
|
262
|
+
39:"""m.get_root().html.add_child(folium.Element(legend_html))
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
st.components.v1.html(m._repr_html_(), height=600)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
else:
|
|
269
|
+
st.error("Трек не найден!")
|
|
270
|
+
""",
|
|
271
|
+
40:"""import streamlit as st
|
|
272
|
+
import folium
|
|
273
|
+
import pandas as pd
|
|
274
|
+
import requests
|
|
275
|
+
import numpy as np
|
|
276
|
+
|
|
277
|
+
st.title("Карта")
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
@st.cache_data(ttl=300)
|
|
281
|
+
def get_tracks():
|
|
282
|
+
try:
|
|
283
|
+
response = requests.get("http://127.0.0.1:8000/tracks")
|
|
284
|
+
return response.json()["track_ids"]
|
|
285
|
+
except:
|
|
286
|
+
st.error(" API недоступен! Запусти: python -m uvicorn api:app --reload")
|
|
287
|
+
return []
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
track_ids = get_tracks()
|
|
291
|
+
selected_track = st.selectbox("Выбери трек:", track_ids)
|
|
292
|
+
|
|
293
|
+
if selected_track is not None and selected_track != "":
|
|
294
|
+
@st.cache_data(ttl=300)
|
|
295
|
+
def get_track_data(track_id):
|
|
296
|
+
try:
|
|
297
|
+
response = requests.post(f"http://127.0.0.1:8000/predict/{track_id}")
|
|
298
|
+
return response.json()
|
|
299
|
+
except:
|
|
300
|
+
st.error("Ошибка API!")
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
track_data = get_track_data(selected_track)
|
|
305
|
+
|
|
306
|
+
if track_data and "points" in track_data:
|
|
307
|
+
points = pd.DataFrame(track_data["points"])
|
|
308
|
+
|
|
309
|
+
center_lat = points['latitude'].mean()
|
|
310
|
+
center_lon = points['longitude'].mean()
|
|
311
|
+
|
|
312
|
+
m = folium.Map(
|
|
313
|
+
location=[center_lat, center_lon],
|
|
314
|
+
zoom_start=14
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
avg_risk = points['risk'].mean()
|
|
318
|
+
line_color = 'green' if avg_risk < 1 else 'orange' if avg_risk < 1.5 else 'red'
|
|
319
|
+
|
|
320
|
+
folium.PolyLine(
|
|
321
|
+
list(zip(points['latitude'], points['longitude'])),
|
|
322
|
+
color=line_color,
|
|
323
|
+
weight=4,
|
|
324
|
+
opacity=0.8,
|
|
325
|
+
popup=f"Трек {selected_track} (ср. риск: {avg_risk:.1f})"
|
|
326
|
+
).add_to(m)
|
|
327
|
+
|
|
328
|
+
for _, point in points.iterrows():
|
|
329
|
+
color = 'green' if point['risk'] == 0 else 'orange' if point['risk'] == 1 else 'red'
|
|
330
|
+
folium.CircleMarker(
|
|
331
|
+
location=[point['latitude'], point['longitude']],
|
|
332
|
+
radius=6,
|
|
333
|
+
color=color,
|
|
334
|
+
fill=True,
|
|
335
|
+
fillColor=color,
|
|
336
|
+
fillOpacity=0.7,
|
|
337
|
+
popup=f"Точка {point['point_id']}<br>"
|
|
338
|
+
f"Риск: {point['risk']}<br>"
|
|
339
|
+
f"Высота: {point['elevation']:.0f}м<br>"
|
|
340
|
+
f"Скорость: {point['speed']:.1f}м/с"
|
|
341
|
+
).add_to(m)
|
|
342
|
+
|
|
343
|
+
legend_html = '''
|
|
344
|
+
<div style="position: fixed; bottom: 50px; right: 50px; width: 220px; height: auto;
|
|
345
|
+
background-color: white; border:2px solid grey; z-index:9999;
|
|
346
|
+
font-size:14px; padding: 12px; border-radius: 5px; box-shadow: 0 0 10px rgba(0,0,0,0.2);">
|
|
347
|
+
<p style="margin: 0 0 10px 0;"><b>Легенда рисков</b></p>
|
|
348
|
+
<p style="margin: 8px 0;">
|
|
349
|
+
<span style="color: green; font-size: 18px;">●</span>
|
|
350
|
+
Низкий риск (0)
|
|
351
|
+
</p>
|
|
352
|
+
<p style="margin: 8px 0;">
|
|
353
|
+
<span style="color: orange; font-size: 18px;">●</span>
|
|
354
|
+
Средний риск (1)
|
|
355
|
+
</p>
|
|
356
|
+
<p style="margin: 8px 0;">
|
|
357
|
+
<span style="color: red; font-size: 18px;">●</span>
|
|
358
|
+
Высокий риск (2)
|
|
359
|
+
</p>
|
|
360
|
+
<p style="margin: 5px 0; font-size: 11px; color: #666;">
|
|
361
|
+
Линия: средний риск трека
|
|
362
|
+
</p>
|
|
363
|
+
</div>
|
|
364
|
+
'''
|
|
365
|
+
m.get_root().html.add_child(folium.Element(legend_html))
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
st.components.v1.html(m._repr_html_(), height=600)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
else:
|
|
372
|
+
st.error("Трек не найден!")
|
|
373
|
+
""",
|
|
374
|
+
41:"""@echo off
|
|
375
|
+
echo Запуск API + Streamlit...
|
|
376
|
+
start "FastAPI" cmd /k "cd /d C:PycharmProjects\MapPy && .venv\Scripts\activate && python -m uvicorn api:app --reload --port 8000"
|
|
377
|
+
timeout /t 3
|
|
378
|
+
start "Streamlit" cmd /k "cd /d C:PycharmProjects\MapPy && .venv\Scripts\activate && streamlit run app.py --server.port 8501"
|
|
379
|
+
|
|
380
|
+
echo API: http://127.0.0.1:8000/docs
|
|
381
|
+
echo Streamlit: http://127.0.0.1:8501
|
|
382
|
+
pause
|
|
383
|
+
""",
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
def s(id: int = 1) -> bool:
|
|
387
|
+
|
|
388
|
+
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
389
|
+
pyperclip.copy(text)
|
|
390
|
+
def ClusterKMeans(id: int = 1) -> bool:
|
|
391
|
+
|
|
392
|
+
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
393
|
+
pyperclip.copy(text)
|
|
394
|
+
|
|
395
|
+
def figsize(id: int = 1, id2: int = 1) -> bool:
|
|
396
|
+
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
397
|
+
pyperclip.copy(text)
|
|
398
|
+
|
|
399
|
+
def x(id: int = 1) -> bool:
|
|
400
|
+
|
|
401
|
+
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
402
|
+
pyperclip.copy(text)
|
sklip-0.4.0/sklip/core.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
import pyperclip
|
|
2
|
-
|
|
3
|
-
SNIPPETS = {
|
|
4
|
-
1: "import 4333432numpy as np \n import pandas as pd \n import folium \n import seaborn as sns \n import matplotlib.pyplot as plt",
|
|
5
|
-
2: "import pandas as pd",
|
|
6
|
-
3: "import matplotlib.pyplot as plt",
|
|
7
|
-
4: "from sklearn.cluster import KMeans",
|
|
8
|
-
5: "from sklearn.decomposition import PCA",
|
|
9
|
-
6: "from sklearn.metrics import silhouette_score",
|
|
10
|
-
7: "from sklearn.cluster import AgglomerativeClustering",
|
|
11
|
-
8: "from sklearn.cluster import SpectralClustering",
|
|
12
|
-
9: "from scipy.cluster.hierarchy import dendrogram, linkage",
|
|
13
|
-
10: "import seaborn as sns",
|
|
14
|
-
11: "warnings.filterwarnings('ignore')",
|
|
15
|
-
12: "X = np.random.randn(100, 2)",
|
|
16
|
-
13: "kmeans = KMeans(n_clusters=3, random_state=42)",
|
|
17
|
-
14: "labels = kmeans.fit_predict(X)",
|
|
18
|
-
15: "pca = PCA(n_components=2)",
|
|
19
|
-
16: "plt.figure(figsize=(10, 7))",
|
|
20
|
-
17: "df = pd.read_csv('data.csv')",
|
|
21
|
-
18: "from sklearn.metrics import calinski_harabasz_score",
|
|
22
|
-
19: "import scipy.cluster.hierarchy as shc",
|
|
23
|
-
20: "np.random.seed(42)"
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
def s(id: int = 1) -> bool:
|
|
27
|
-
|
|
28
|
-
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
29
|
-
pyperclip.copy(text)
|
|
30
|
-
def ClusterKMeans(id: int = 1) -> bool:
|
|
31
|
-
|
|
32
|
-
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
33
|
-
pyperclip.copy(text)
|
|
34
|
-
|
|
35
|
-
def figsize(id: int = 1, id2: int = 1) -> bool:
|
|
36
|
-
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
37
|
-
pyperclip.copy(text)
|
|
38
|
-
|
|
39
|
-
def x(id: int = 1) -> bool:
|
|
40
|
-
|
|
41
|
-
text = SNIPPETS.get(id, SNIPPETS[1])
|
|
42
|
-
pyperclip.copy(text)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|