rgwfuncs 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,19 @@
1
+ Copyright (c) 2018 The Python Packaging Authority
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
@@ -0,0 +1,325 @@
1
+ Metadata-Version: 2.2
2
+ Name: rgwfuncs
3
+ Version: 0.0.2
4
+ Summary: A functional programming paradigm for mathematical modelling and data science
5
+ Home-page: https://github.com/ryangerardwilson/rgwfunc
6
+ Author: Ryan Gerard Wilson
7
+ Author-email: Ryan Gerard Wilson <ryangerardwilson@gmail.com>
8
+ Project-URL: Homepage, https://github.com/ryangerardwilson/rgwfuncs
9
+ Project-URL: Issues, https://github.com/ryangerardwilson/rgwfuncs
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.12
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Requires-Dist: pandas
17
+ Requires-Dist: pymssql
18
+ Requires-Dist: mysql-connector-python
19
+ Requires-Dist: clickhouse-connect
20
+ Requires-Dist: google-cloud-bigquery
21
+ Requires-Dist: google-auth
22
+ Requires-Dist: xgboost
23
+ Requires-Dist: requests
24
+ Requires-Dist: slack-sdk
25
+ Requires-Dist: google-api-python-client
26
+
27
+ RGWML
28
+
29
+ ***By Ryan Gerard Wilson (https://ryangerardwilson.com)***
30
+
31
+ ***Manipulate data with code that is less a golden retriever, and more a Samurai's sword***
32
+
33
+ 1. Install
34
+ ----------
35
+
36
+ sudo apt update
37
+ sudo apt install ffmpeg
38
+ pip3 install --upgrade rgwml
39
+
40
+ 2. Import & Load Data
41
+ ---------------------
42
+
43
+ import rgwml as r
44
+
45
+ # For 99% use cases a Pandas df is good enough
46
+ d1 = r.p()
47
+ d1.fp('/path/to/your/file')
48
+
49
+ # For the remaining 1%
50
+ d2 = r.d()
51
+ d2.fp('/path/to/your/file')
52
+
53
+ 3. Create a rgwml.config file
54
+ -----------------------------
55
+
56
+ An rgwml.config file is required for MSSQL, CLICKHOUSE, MYSQL, GOOGLE BIG QUERY, OPEN AI, NETLIFY and VERCEL integrations. It allows you to namespace your db connections, so you can query like this:
57
+
58
+ import rgwml as r
59
+ d = r.p()
60
+ d.fq('mysql_db2','SELECT * FROM your_table')
61
+
62
+ Set out below is the format of a rgwml.config file. Place it anywhere in your Desktop, Downloads or Documents directories.
63
+
64
+ {
65
+ "db_presets" : [
66
+ {
67
+ "name": "mssql_db9",
68
+ "db_type": "mssql",
69
+ "host": "",
70
+ "username": "",
71
+ "password": "",
72
+ "database": ""
73
+ },
74
+ {
75
+ "name": "clickhouse_db7",
76
+ "db_type": "clickhouse",
77
+ "host": "",
78
+ "username": "",
79
+ "password": "",
80
+ "database": ""
81
+ },
82
+ {
83
+ "name": "mysql_db2",
84
+ "db_type": "mysql",
85
+ "host": "",
86
+ "username": "",
87
+ "password": "",
88
+ "database": ""
89
+ },
90
+ {
91
+ "name": "bq_db1",
92
+ "db_type": "google_big_query",
93
+ "json_file_path": "",
94
+ "project_id": ""
95
+ }
96
+ ],
97
+ "vm_presets": [
98
+ {
99
+ "name": "main_server",
100
+ "host": "",
101
+ "ssh_user": "",
102
+ "ssh_key_path": ""
103
+ }
104
+ ],
105
+ "cloud_storage_presets": [
106
+ {
107
+ "name": "gcs_bucket_name",
108
+ "credential_path": "path/to/your/credentials.json"
109
+ }
110
+ ],
111
+ "open_ai_key": "",
112
+ "netlify_token": "",
113
+ "vercel_token": ""
114
+ }
115
+
116
+ 4. `r.p()` Class Methods
117
+ ------------------------
118
+
119
+ Instantiate this class by `d = r.p()`
120
+
121
+ ### 4.1. LOAD
122
+
123
+ # From raw data
124
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
125
+
126
+ # From path
127
+ d.fp('/absolute/path')
128
+
129
+ # From Directory (select from your last 7 recently modified files in your Desktop/Downloads/Documents directories)
130
+ d.fd()
131
+
132
+ # From query
133
+ d.fq('rgwml_config_db_preset_name','SELECT * FROM your_table')
134
+
135
+ # FROM chunkable query
136
+ d.fcq('rgwml_config_db_preset_name', 'SELECT * FROM your_table', chunk_size)
137
+
138
+ ### 4.2. INSPECT
139
+
140
+ # Describe
141
+ d.d()
142
+
143
+ # Print
144
+ d.pr()
145
+
146
+ # First n rows
147
+ d.fnr('n')
148
+
149
+ # Last n rows
150
+ d.lnr('n')
151
+
152
+ # Top n unique values for specified columns
153
+ d.tnuv(n, ['col1', 'col2'])
154
+
155
+ # Bottom n unique values for specified columns
156
+ d.bnuv(n, ['col1', 'col2'])
157
+
158
+ # Is empty. Returns boolean, not chainable.
159
+ d.ie()
160
+
161
+ # Memory usage print.
162
+ d.mem()
163
+
164
+ # Print correlation
165
+ d.prc([('column1','column2'), ('column3','column4')])
166
+
167
+ # Print n frequency linear. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
168
+ d.pnfl(5,'Column1,Columns')
169
+
170
+ # Print n frequency cascading. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
171
+ d.pnfc(5,'Column1,Columns')
172
+
173
+ ### 4.3. APPEND
174
+
175
+ # Append boolean classification column
176
+ d.abc('column1 > 30 and column2 < 50', 'new_column_name')
177
+
178
+ # Append DBSCAN cluster column. Optional: visualize (boolean)
179
+ d.adbscancc('Column1,Column2', 'new_cluster_column_name', eps=0.5, min_samples=5, visualize=True)
180
+
181
+ # Append n-cluster column. Available operations: KMEANS/ AGGLOMERATIVE/ MEAN_SHIFT/ GMM/ SPECTRAL/ BIRCH. Optional: visualize (boolean), n_cluster_finding_method (str) i.e. ELBOW/ SILHOUETTE/ FIXED:n (specify a number of n clusters).
182
+ d.ancc('Column1,Column2', 'KMEANS', 'new_cluster_column_name', n_clusters_finding_method='FIXED:5', visualize=True)
183
+
184
+ # Append percentile classification column
185
+ d.apc('0,25,50,75,100', 'column_to_be_analyzed', 'new_column_name')
186
+
187
+ # Append ranged classification column
188
+ d.arc('0,500,1000,2000,5000,10000,100000,1000000', 'column_to_be_analyzed', 'new_column_name')
189
+
190
+ # Append ranged date classification column
191
+ d.ardc('2024-01-01,2024-02-01,2024-03-01', 'date_column', 'new_date_classification')
192
+
193
+ # Append count of timestamps after reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
194
+ d.atcar('comma_separated_timestamps_column', 'reference_date_or_timestamps_column', 'new_column_count_after_reference')
195
+
196
+ # Append count of timestamps before reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
197
+ d.atcbr('comma_separated_timestamps_column', 'reference_date_or_timestamp_column', 'new_column_count_before_reference')
198
+
199
+ ### 4.4. DOCUMENTATION
200
+
201
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
202
+ d.doc()
203
+
204
+ ### 4.5. JOINS
205
+
206
+ # Union join
207
+ d.uj(d2)
208
+
209
+ # Bag union join
210
+ d.buj(d2)
211
+
212
+ # Left join
213
+ d.lj(d2,'table_a_id','table_b_id')
214
+
215
+ # Right join
216
+ d.rj(d2,'table_a_id','table_b_id')
217
+
218
+ ### 4.6. PERSIST
219
+
220
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
221
+ d.s('/filename/or/path')
222
+ d.s() #If the dataframe was loaded from a source with an absolute path, calling the s method without an argument will save at the same path
223
+
224
+ ### 4.7. PLOT
225
+
226
+ # Plot correlation heatmap for the specified columns. Optional param: image_save_path (str)
227
+ d.pcr(y='Column1, Column2, Column3')
228
+
229
+ # Plot distribution histograms for the specified columns. Optional param: image_save_path (str)
230
+ d.pdist(y='Column1, Column2, Column3')
231
+
232
+ # Plot line chart. Optional param: x (str), i.e. a single column name for the x axis eg. 'Column5', image_save_path (str)
233
+ d.plc(y='Column1, Column2, Column3')
234
+
235
+ # Plot Q-Q plots for the specified columns. Optional param: image_save_path (str)
236
+ d.pqq(y='Column1, Column2, Column3')
237
+
238
+ ### 4.8. PREDICT
239
+
240
+ # Append XGB training labels based on a ratio string. Specify a ratio a:b:c to split into TRAIN, VALIDATE and TEST, or a:b to split into TRAIN and TEST.
241
+ d.axl('70:20:10')
242
+
243
+ # Append XGB regression predictions. Assumes labelling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
244
+ d.axlinr('target_column','feature1, feature2, feature3','prediction_column_name')
245
+
246
+ # Append XGB logistic regression predictions. Assumes labeling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
247
+ d.axlogr('target_column','feature1, feature2, feature3','prediction_column_name')
248
+
249
+ ### 4.9. TINKER
250
+
251
+ # Cascade sort by specified columns.
252
+ d.cs(['Column1', 'Column2'])
253
+
254
+ # Filter
255
+ d.f("col1 > 100 and Col1 == Col3 and Col5 == 'XYZ'")
256
+
257
+ # Filter Indian Mobiles
258
+ d.fim('mobile')
259
+
260
+ # Filter Indian Mobiles (complement)
261
+ d.fimc('mobile')
262
+
263
+ # Make numerically parseable by defaulting to zero for specified column
264
+ d.mnpdz(['Column1', Column2])
265
+
266
+ # Rename columns
267
+ d.rnc({'old_col1': 'new_col1', 'old_col2': 'new_col2'})
268
+
269
+ ### 4.10. TRANSFORM
270
+
271
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
272
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
273
+
274
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
275
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
276
+
277
+ 5. `r.d()` Methods
278
+ ------------------
279
+
280
+ Instantiate this class by `d = r.d()`
281
+
282
+ ### 5.1. LOAD
283
+
284
+ # From raw data
285
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
286
+
287
+ # From path
288
+ d.fp('/absolute/path')
289
+
290
+ ### 5.2. INSPECT
291
+
292
+ # Print
293
+ d.pr()
294
+
295
+ ### 5.3. DOCUMENTATION
296
+
297
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
298
+ d.doc()
299
+
300
+ ### 5.4. JOINS
301
+
302
+ # Union join
303
+ d.uj(d2)
304
+
305
+ ### 5.5. PERSIST
306
+
307
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
308
+ d.s('/filename/or/path')
309
+
310
+ ### 5.6. TINKER
311
+
312
+ # Filter Indian Mobiles
313
+ d.fim('mobile')
314
+
315
+ # Filter Indian Mobiles (complement)
316
+ d.fimc('mobile')
317
+
318
+ ### 5.7. TRANSFORM
319
+
320
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
321
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
322
+
323
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
324
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
325
+
@@ -0,0 +1,8 @@
1
+ rgwfuncs/__init__.py,sha256=8suLAGE7rHBY9e2ViUJuRCUyiam4PO7bjNq_l59dW8Q,24
2
+ rgwfuncs/df_lib.py,sha256=dUOlEE1J_e21K-B7zO649np8Jh1xrATkxYZIi8bN3nI,55042
3
+ rgwfuncs-0.0.2.dist-info/LICENSE,sha256=7EI8xVBu6h_7_JlVw-yPhhOZlpY9hP8wal7kHtqKT_E,1074
4
+ rgwfuncs-0.0.2.dist-info/METADATA,sha256=4BinzCXqpNId2feZm0cSjb8jSCDuz_WgBsh5DKneUa0,9845
5
+ rgwfuncs-0.0.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
+ rgwfuncs-0.0.2.dist-info/entry_points.txt,sha256=j-c5IOPIQ0252EaOV6j6STio56sbXl2C4ym_fQ0lXx0,43
7
+ rgwfuncs-0.0.2.dist-info/top_level.txt,sha256=aGuVIzWsKiV1f2gCb6mynx0zx5ma0B1EwPGFKVEMTi4,9
8
+ rgwfuncs-0.0.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.8.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ rgwfuncs = rgwfuncs:main
@@ -0,0 +1 @@
1
+ rgwfuncs