rgwfuncs 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rgwfuncs-0.0.2/LICENSE ADDED
@@ -0,0 +1,19 @@
1
+ Copyright (c) 2018 The Python Packaging Authority
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ of this software and associated documentation files (the "Software"), to deal
5
+ in the Software without restriction, including without limitation the rights
6
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ copies of the Software, and to permit persons to whom the Software is
8
+ furnished to do so, subject to the following conditions:
9
+
10
+ The above copyright notice and this permission notice shall be included in all
11
+ copies or substantial portions of the Software.
12
+
13
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+ SOFTWARE.
@@ -0,0 +1,325 @@
1
+ Metadata-Version: 2.2
2
+ Name: rgwfuncs
3
+ Version: 0.0.2
4
+ Summary: A functional programming paradigm for mathematical modelling and data science
5
+ Home-page: https://github.com/ryangerardwilson/rgwfunc
6
+ Author: Ryan Gerard Wilson
7
+ Author-email: Ryan Gerard Wilson <ryangerardwilson@gmail.com>
8
+ Project-URL: Homepage, https://github.com/ryangerardwilson/rgwfuncs
9
+ Project-URL: Issues, https://github.com/ryangerardwilson/rgwfuncs
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Requires-Python: >=3.12
14
+ Description-Content-Type: text/markdown
15
+ License-File: LICENSE
16
+ Requires-Dist: pandas
17
+ Requires-Dist: pymssql
18
+ Requires-Dist: mysql-connector-python
19
+ Requires-Dist: clickhouse-connect
20
+ Requires-Dist: google-cloud-bigquery
21
+ Requires-Dist: google-auth
22
+ Requires-Dist: xgboost
23
+ Requires-Dist: requests
24
+ Requires-Dist: slack-sdk
25
+ Requires-Dist: google-api-python-client
26
+
27
+ RGWML
28
+
29
+ ***By Ryan Gerard Wilson (https://ryangerardwilson.com)***
30
+
31
+ ***Manipulate data with code that is less a golden retriever, and more a Samurai's sword***
32
+
33
+ 1. Install
34
+ ----------
35
+
36
+ sudo apt update
37
+ sudo apt install ffmpeg
38
+ pip3 install --upgrade rgwml
39
+
40
+ 2. Import & Load Data
41
+ ---------------------
42
+
43
+ import rgwml as r
44
+
45
+ # For 99% use cases a Pandas df is good enough
46
+ d1 = r.p()
47
+ d1.fp('/path/to/your/file')
48
+
49
+ # For the remaining 1%
50
+ d2 = r.d()
51
+ d2.fp('/path/to/your/file')
52
+
53
+ 3. Create a rgwml.config file
54
+ -----------------------------
55
+
56
+ An rgwml.config file is required for MSSQL, CLICKHOUSE, MYSQL, GOOGLE BIG QUERY, OPEN AI, NETLIFY and VERCEL integrations. It allows you to namespace your db connections, so you can query like this:
57
+
58
+ import rgwml as r
59
+ d = r.p()
60
+ d.fq('mysql_db2','SELECT * FROM your_table')
61
+
62
+ Set out below is the format of a rgwml.config file. Place it anywhere in your Desktop, Downloads or Documents directories.
63
+
64
+ {
65
+ "db_presets" : [
66
+ {
67
+ "name": "mssql_db9",
68
+ "db_type": "mssql",
69
+ "host": "",
70
+ "username": "",
71
+ "password": "",
72
+ "database": ""
73
+ },
74
+ {
75
+ "name": "clickhouse_db7",
76
+ "db_type": "clickhouse",
77
+ "host": "",
78
+ "username": "",
79
+ "password": "",
80
+ "database": ""
81
+ },
82
+ {
83
+ "name": "mysql_db2",
84
+ "db_type": "mysql",
85
+ "host": "",
86
+ "username": "",
87
+ "password": "",
88
+ "database": ""
89
+ },
90
+ {
91
+ "name": "bq_db1",
92
+ "db_type": "google_big_query",
93
+ "json_file_path": "",
94
+ "project_id": ""
95
+ }
96
+ ],
97
+ "vm_presets": [
98
+ {
99
+ "name": "main_server",
100
+ "host": "",
101
+ "ssh_user": "",
102
+ "ssh_key_path": ""
103
+ }
104
+ ],
105
+ "cloud_storage_presets": [
106
+ {
107
+ "name": "gcs_bucket_name",
108
+ "credential_path": "path/to/your/credentials.json"
109
+ }
110
+ ],
111
+ "open_ai_key": "",
112
+ "netlify_token": "",
113
+ "vercel_token": ""
114
+ }
115
+
116
+ 4. `r.p()` Class Methods
117
+ ------------------------
118
+
119
+ Instantiate this class by `d = r.p()`
120
+
121
+ ### 4.1. LOAD
122
+
123
+ # From raw data
124
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
125
+
126
+ # From path
127
+ d.fp('/absolute/path')
128
+
129
+ # From Directory (select from your last 7 recently modified files in your Desktop/Downloads/Documents directories)
130
+ d.fd()
131
+
132
+ # From query
133
+ d.fq('rgwml_config_db_preset_name','SELECT * FROM your_table')
134
+
135
+ # FROM chunkable query
136
+ d.fcq('rgwml_config_db_preset_name', 'SELECT * FROM your_table', chunk_size)
137
+
138
+ ### 4.2. INSPECT
139
+
140
+ # Describe
141
+ d.d()
142
+
143
+ # Print
144
+ d.pr()
145
+
146
+ # First n rows
147
+ d.fnr('n')
148
+
149
+ # Last n rows
150
+ d.lnr('n')
151
+
152
+ # Top n unique values for specified columns
153
+ d.tnuv(n, ['col1', 'col2'])
154
+
155
+ # Bottom n unique values for specified columns
156
+ d.bnuv(n, ['col1', 'col2'])
157
+
158
+ # Is empty. Returns boolean, not chainable.
159
+ d.ie()
160
+
161
+ # Memory usage print.
162
+ d.mem()
163
+
164
+ # Print correlation
165
+ d.prc([('column1','column2'), ('column3','column4')])
166
+
167
+ # Print n frequency linear. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
168
+ d.pnfl(5,'Column1,Columns')
169
+
170
+ # Print n frequency cascading. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
171
+ d.pnfc(5,'Column1,Columns')
172
+
173
+ ### 4.3. APPEND
174
+
175
+ # Append boolean classification column
176
+ d.abc('column1 > 30 and column2 < 50', 'new_column_name')
177
+
178
+ # Append DBSCAN cluster column. Optional: visualize (boolean)
179
+ d.adbscancc('Column1,Column2', 'new_cluster_column_name', eps=0.5, min_samples=5, visualize=True)
180
+
181
+ # Append n-cluster column. Available operations: KMEANS/ AGGLOMERATIVE/ MEAN_SHIFT/ GMM/ SPECTRAL/ BIRCH. Optional: visualize (boolean), n_cluster_finding_method (str) i.e. ELBOW/ SILHOUETTE/ FIXED:n (specify a number of n clusters).
182
+ d.ancc('Column1,Column2', 'KMEANS', 'new_cluster_column_name', n_clusters_finding_method='FIXED:5', visualize=True)
183
+
184
+ # Append percentile classification column
185
+ d.apc('0,25,50,75,100', 'column_to_be_analyzed', 'new_column_name')
186
+
187
+ # Append ranged classification column
188
+ d.arc('0,500,1000,2000,5000,10000,100000,1000000', 'column_to_be_analyzed', 'new_column_name')
189
+
190
+ # Append ranged date classification column
191
+ d.ardc('2024-01-01,2024-02-01,2024-03-01', 'date_column', 'new_date_classification')
192
+
193
+ # Append count of timestamps after reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
194
+ d.atcar('comma_separated_timestamps_column', 'reference_date_or_timestamps_column', 'new_column_count_after_reference')
195
+
196
+ # Append count of timestamps before reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
197
+ d.atcbr('comma_separated_timestamps_column', 'reference_date_or_timestamp_column', 'new_column_count_before_reference')
198
+
199
+ ### 4.4. DOCUMENTATION
200
+
201
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
202
+ d.doc()
203
+
204
+ ### 4.5. JOINS
205
+
206
+ # Union join
207
+ d.uj(d2)
208
+
209
+ # Bag union join
210
+ d.buj(d2)
211
+
212
+ # Left join
213
+ d.lj(d2,'table_a_id','table_b_id')
214
+
215
+ # Right join
216
+ d.rj(d2,'table_a_id','table_b_id')
217
+
218
+ ### 4.6. PERSIST
219
+
220
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
221
+ d.s('/filename/or/path')
222
+ d.s() #If the dataframe was loaded from a source with an absolute path, calling the s method without an argument will save at the same path
223
+
224
+ ### 4.7. PLOT
225
+
226
+ # Plot correlation heatmap for the specified columns. Optional param: image_save_path (str)
227
+ d.pcr(y='Column1, Column2, Column3')
228
+
229
+ # Plot distribution histograms for the specified columns. Optional param: image_save_path (str)
230
+ d.pdist(y='Column1, Column2, Column3')
231
+
232
+ # Plot line chart. Optional param: x (str), i.e. a single column name for the x axis eg. 'Column5', image_save_path (str)
233
+ d.plc(y='Column1, Column2, Column3')
234
+
235
+ # Plot Q-Q plots for the specified columns. Optional param: image_save_path (str)
236
+ d.pqq(y='Column1, Column2, Column3')
237
+
238
+ ### 4.8. PREDICT
239
+
240
+ # Append XGB training labels based on a ratio string. Specify a ratio a:b:c to split into TRAIN, VALIDATE and TEST, or a:b to split into TRAIN and TEST.
241
+ d.axl('70:20:10')
242
+
243
+ # Append XGB regression predictions. Assumes labelling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
244
+ d.axlinr('target_column','feature1, feature2, feature3','prediction_column_name')
245
+
246
+ # Append XGB logistic regression predictions. Assumes labeling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
247
+ d.axlogr('target_column','feature1, feature2, feature3','prediction_column_name')
248
+
249
+ ### 4.9. TINKER
250
+
251
+ # Cascade sort by specified columns.
252
+ d.cs(['Column1', 'Column2'])
253
+
254
+ # Filter
255
+ d.f("col1 > 100 and Col1 == Col3 and Col5 == 'XYZ'")
256
+
257
+ # Filter Indian Mobiles
258
+ d.fim('mobile')
259
+
260
+ # Filter Indian Mobiles (complement)
261
+ d.fimc('mobile')
262
+
263
+ # Make numerically parseable by defaulting to zero for specified column
264
+ d.mnpdz(['Column1', Column2])
265
+
266
+ # Rename columns
267
+ d.rnc({'old_col1': 'new_col1', 'old_col2': 'new_col2'})
268
+
269
+ ### 4.10. TRANSFORM
270
+
271
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
272
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
273
+
274
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
275
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
276
+
277
+ 5. `r.d()` Methods
278
+ ------------------
279
+
280
+ Instantiate this class by `d = r.d()`
281
+
282
+ ### 5.1. LOAD
283
+
284
+ # From raw data
285
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
286
+
287
+ # From path
288
+ d.fp('/absolute/path')
289
+
290
+ ### 5.2. INSPECT
291
+
292
+ # Print
293
+ d.pr()
294
+
295
+ ### 5.3. DOCUMENTATION
296
+
297
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
298
+ d.doc()
299
+
300
+ ### 5.4. JOINS
301
+
302
+ # Union join
303
+ d.uj(d2)
304
+
305
+ ### 5.5. PERSIST
306
+
307
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
308
+ d.s('/filename/or/path')
309
+
310
+ ### 5.6. TINKER
311
+
312
+ # Filter Indian Mobiles
313
+ d.fim('mobile')
314
+
315
+ # Filter Indian Mobiles (complement)
316
+ d.fimc('mobile')
317
+
318
+ ### 5.7. TRANSFORM
319
+
320
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
321
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
322
+
323
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
324
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
325
+
@@ -0,0 +1,299 @@
1
+ RGWML
2
+
3
+ ***By Ryan Gerard Wilson (https://ryangerardwilson.com)***
4
+
5
+ ***Manipulate data with code that is less a golden retriever, and more a Samurai's sword***
6
+
7
+ 1. Install
8
+ ----------
9
+
10
+ sudo apt update
11
+ sudo apt install ffmpeg
12
+ pip3 install --upgrade rgwml
13
+
14
+ 2. Import & Load Data
15
+ ---------------------
16
+
17
+ import rgwml as r
18
+
19
+ # For 99% use cases a Pandas df is good enough
20
+ d1 = r.p()
21
+ d1.fp('/path/to/your/file')
22
+
23
+ # For the remaining 1%
24
+ d2 = r.d()
25
+ d2.fp('/path/to/your/file')
26
+
27
+ 3. Create a rgwml.config file
28
+ -----------------------------
29
+
30
+ An rgwml.config file is required for MSSQL, CLICKHOUSE, MYSQL, GOOGLE BIG QUERY, OPEN AI, NETLIFY and VERCEL integrations. It allows you to namespace your db connections, so you can query like this:
31
+
32
+ import rgwml as r
33
+ d = r.p()
34
+ d.fq('mysql_db2','SELECT * FROM your_table')
35
+
36
+ Set out below is the format of a rgwml.config file. Place it anywhere in your Desktop, Downloads or Documents directories.
37
+
38
+ {
39
+ "db_presets" : [
40
+ {
41
+ "name": "mssql_db9",
42
+ "db_type": "mssql",
43
+ "host": "",
44
+ "username": "",
45
+ "password": "",
46
+ "database": ""
47
+ },
48
+ {
49
+ "name": "clickhouse_db7",
50
+ "db_type": "clickhouse",
51
+ "host": "",
52
+ "username": "",
53
+ "password": "",
54
+ "database": ""
55
+ },
56
+ {
57
+ "name": "mysql_db2",
58
+ "db_type": "mysql",
59
+ "host": "",
60
+ "username": "",
61
+ "password": "",
62
+ "database": ""
63
+ },
64
+ {
65
+ "name": "bq_db1",
66
+ "db_type": "google_big_query",
67
+ "json_file_path": "",
68
+ "project_id": ""
69
+ }
70
+ ],
71
+ "vm_presets": [
72
+ {
73
+ "name": "main_server",
74
+ "host": "",
75
+ "ssh_user": "",
76
+ "ssh_key_path": ""
77
+ }
78
+ ],
79
+ "cloud_storage_presets": [
80
+ {
81
+ "name": "gcs_bucket_name",
82
+ "credential_path": "path/to/your/credentials.json"
83
+ }
84
+ ],
85
+ "open_ai_key": "",
86
+ "netlify_token": "",
87
+ "vercel_token": ""
88
+ }
89
+
90
+ 4. `r.p()` Class Methods
91
+ ------------------------
92
+
93
+ Instantiate this class by `d = r.p()`
94
+
95
+ ### 4.1. LOAD
96
+
97
+ # From raw data
98
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
99
+
100
+ # From path
101
+ d.fp('/absolute/path')
102
+
103
+ # From Directory (select from your last 7 recently modified files in your Desktop/Downloads/Documents directories)
104
+ d.fd()
105
+
106
+ # From query
107
+ d.fq('rgwml_config_db_preset_name','SELECT * FROM your_table')
108
+
109
+ # FROM chunkable query
110
+ d.fcq('rgwml_config_db_preset_name', 'SELECT * FROM your_table', chunk_size)
111
+
112
+ ### 4.2. INSPECT
113
+
114
+ # Describe
115
+ d.d()
116
+
117
+ # Print
118
+ d.pr()
119
+
120
+ # First n rows
121
+ d.fnr('n')
122
+
123
+ # Last n rows
124
+ d.lnr('n')
125
+
126
+ # Top n unique values for specified columns
127
+ d.tnuv(n, ['col1', 'col2'])
128
+
129
+ # Bottom n unique values for specified columns
130
+ d.bnuv(n, ['col1', 'col2'])
131
+
132
+ # Is empty. Returns boolean, not chainable.
133
+ d.ie()
134
+
135
+ # Memory usage print.
136
+ d.mem()
137
+
138
+ # Print correlation
139
+ d.prc([('column1','column2'), ('column3','column4')])
140
+
141
+ # Print n frequency linear. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
142
+ d.pnfl(5,'Column1,Columns')
143
+
144
+ # Print n frequency cascading. Optional: order_by (str), which has options: ASC, DESC, FREQ_ASC, FREQ_DESC (default)
145
+ d.pnfc(5,'Column1,Columns')
146
+
147
+ ### 4.3. APPEND
148
+
149
+ # Append boolean classification column
150
+ d.abc('column1 > 30 and column2 < 50', 'new_column_name')
151
+
152
+ # Append DBSCAN cluster column. Optional: visualize (boolean)
153
+ d.adbscancc('Column1,Column2', 'new_cluster_column_name', eps=0.5, min_samples=5, visualize=True)
154
+
155
+ # Append n-cluster column. Available operations: KMEANS/ AGGLOMERATIVE/ MEAN_SHIFT/ GMM/ SPECTRAL/ BIRCH. Optional: visualize (boolean), n_cluster_finding_method (str) i.e. ELBOW/ SILHOUETTE/ FIXED:n (specify a number of n clusters).
156
+ d.ancc('Column1,Column2', 'KMEANS', 'new_cluster_column_name', n_clusters_finding_method='FIXED:5', visualize=True)
157
+
158
+ # Append percentile classification column
159
+ d.apc('0,25,50,75,100', 'column_to_be_analyzed', 'new_column_name')
160
+
161
+ # Append ranged classification column
162
+ d.arc('0,500,1000,2000,5000,10000,100000,1000000', 'column_to_be_analyzed', 'new_column_name')
163
+
164
+ # Append ranged date classification column
165
+ d.ardc('2024-01-01,2024-02-01,2024-03-01', 'date_column', 'new_date_classification')
166
+
167
+ # Append count of timestamps after reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
168
+ d.atcar('comma_separated_timestamps_column', 'reference_date_or_timestamps_column', 'new_column_count_after_reference')
169
+
170
+ # Append count of timestamps before reference time. Requires values in YYYY-MM-DD or YYYY-MM-DD HH:MM:SS format.
171
+ d.atcbr('comma_separated_timestamps_column', 'reference_date_or_timestamp_column', 'new_column_count_before_reference')
172
+
173
+ ### 4.4. DOCUMENTATION
174
+
175
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
176
+ d.doc()
177
+
178
+ ### 4.5. JOINS
179
+
180
+ # Union join
181
+ d.uj(d2)
182
+
183
+ # Bag union join
184
+ d.buj(d2)
185
+
186
+ # Left join
187
+ d.lj(d2,'table_a_id','table_b_id')
188
+
189
+ # Right join
190
+ d.rj(d2,'table_a_id','table_b_id')
191
+
192
+ ### 4.6. PERSIST
193
+
194
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
195
+ d.s('/filename/or/path')
196
+ d.s() #If the dataframe was loaded from a source with an absolute path, calling the s method without an argument will save at the same path
197
+
198
+ ### 4.7. PLOT
199
+
200
+ # Plot correlation heatmap for the specified columns. Optional param: image_save_path (str)
201
+ d.pcr(y='Column1, Column2, Column3')
202
+
203
+ # Plot distribution histograms for the specified columns. Optional param: image_save_path (str)
204
+ d.pdist(y='Column1, Column2, Column3')
205
+
206
+ # Plot line chart. Optional param: x (str), i.e. a single column name for the x axis eg. 'Column5', image_save_path (str)
207
+ d.plc(y='Column1, Column2, Column3')
208
+
209
+ # Plot Q-Q plots for the specified columns. Optional param: image_save_path (str)
210
+ d.pqq(y='Column1, Column2, Column3')
211
+
212
+ ### 4.8. PREDICT
213
+
214
+ # Append XGB training labels based on a ratio string. Specify a ratio a:b:c to split into TRAIN, VALIDATE and TEST, or a:b to split into TRAIN and TEST.
215
+ d.axl('70:20:10')
216
+
217
+ # Append XGB regression predictions. Assumes labelling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
218
+ d.axlinr('target_column','feature1, feature2, feature3','prediction_column_name')
219
+
220
+ # Append XGB logistic regression predictions. Assumes labeling by the .axl() method. Optional params: boosting_rounds (int), model_path (str)
221
+ d.axlogr('target_column','feature1, feature2, feature3','prediction_column_name')
222
+
223
+ ### 4.9. TINKER
224
+
225
+ # Cascade sort by specified columns.
226
+ d.cs(['Column1', 'Column2'])
227
+
228
+ # Filter
229
+ d.f("col1 > 100 and Col1 == Col3 and Col5 == 'XYZ'")
230
+
231
+ # Filter Indian Mobiles
232
+ d.fim('mobile')
233
+
234
+ # Filter Indian Mobiles (complement)
235
+ d.fimc('mobile')
236
+
237
+ # Make numerically parseable by defaulting to zero for specified column
238
+ d.mnpdz(['Column1', Column2])
239
+
240
+ # Rename columns
241
+ d.rnc({'old_col1': 'new_col1', 'old_col2': 'new_col2'})
242
+
243
+ ### 4.10. TRANSFORM
244
+
245
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
246
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
247
+
248
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
249
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
250
+
251
+ 5. `r.d()` Methods
252
+ ------------------
253
+
254
+ Instantiate this class by `d = r.d()`
255
+
256
+ ### 5.1. LOAD
257
+
258
+ # From raw data
259
+ d.frd(['col1','col2'],[[1,2,3],[4,5,6]])
260
+
261
+ # From path
262
+ d.fp('/absolute/path')
263
+
264
+ ### 5.2. INSPECT
265
+
266
+ # Print
267
+ d.pr()
268
+
269
+ ### 5.3. DOCUMENTATION
270
+
271
+ # Prints docs. Optional parameter: method_type_filter (str) egs. 'APPEND, PLOT'
272
+ d.doc()
273
+
274
+ ### 5.4. JOINS
275
+
276
+ # Union join
277
+ d.uj(d2)
278
+
279
+ ### 5.5. PERSIST
280
+
281
+ # Save (saves as csv (default) or h5, to desktop (default) or path)
282
+ d.s('/filename/or/path')
283
+
284
+ ### 5.6. TINKER
285
+
286
+ # Filter Indian Mobiles
287
+ d.fim('mobile')
288
+
289
+ # Filter Indian Mobiles (complement)
290
+ d.fimc('mobile')
291
+
292
+ ### 5.7. TRANSFORM
293
+
294
+ # Group. Permits multiple aggregations on the same column. Available agg options: sum, mean, min, max, count, size, std, var, median, css (comma-separated strings), etc.
295
+ d.(['group_by_columns'], ['column1::sum', 'column1::count', 'column3::sum'])
296
+
297
+ # Pivot. Optional param: seg_columns. Available agg options: sum, mean, min, max, count, size, std, var, median, etc.
298
+ d.p(['group_by_cols'], 'values_to_agg_col', 'sum', ['seg_columns'])
299
+
@@ -0,0 +1,40 @@
1
+ [build-system]
2
+ requires = ["setuptools", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "rgwfuncs"
7
+ version = "0.0.2"
8
+ authors = [
9
+ { name = "Ryan Gerard Wilson", email = "ryangerardwilson@gmail.com" },
10
+ ]
11
+ description = "A functional programming paradigm for mathematical modelling and data science"
12
+ readme = "README.md"
13
+ requires-python = ">=3.12"
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Operating System :: OS Independent",
18
+ ]
19
+ dependencies = [
20
+ "pandas",
21
+ "pymssql",
22
+ "mysql-connector-python",
23
+ "clickhouse-connect",
24
+ "google-cloud-bigquery",
25
+ "google-auth",
26
+ "xgboost",
27
+ "requests",
28
+ "slack-sdk",
29
+ "google-api-python-client",
30
+ ]
31
+
32
+ dynamic = ["scripts"]
33
+
34
+ [project.urls]
35
+ Homepage = "https://github.com/ryangerardwilson/rgwfuncs"
36
+ Issues = "https://github.com/ryangerardwilson/rgwfuncs"
37
+
38
+ [tool.setuptools.packages.find]
39
+ where = ["src"]
40
+