reykit 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- reykit/__init__.py +41 -0
- reykit/rall.py +33 -0
- reykit/rcomm.py +431 -0
- reykit/rdata.py +395 -0
- reykit/rdll/__init__.py +17 -0
- reykit/rdll/rdll_inject.py +41 -0
- reykit/rdll/rdll_inject_core.py +202 -0
- reykit/remail.py +276 -0
- reykit/rexception.py +339 -0
- reykit/rimage.py +261 -0
- reykit/rlog.py +1061 -0
- reykit/rmonkey.py +341 -0
- reykit/rmultitask.py +871 -0
- reykit/rnumber.py +161 -0
- reykit/ros.py +1917 -0
- reykit/rrandom.py +351 -0
- reykit/rregex.py +293 -0
- reykit/rschedule.py +272 -0
- reykit/rstdout.py +356 -0
- reykit/rsystem.py +1180 -0
- reykit/rtable.py +511 -0
- reykit/rtext.py +458 -0
- reykit/rtime.py +678 -0
- reykit/rtype.py +106 -0
- reykit/rwrap.py +613 -0
- reykit/rzip.py +137 -0
- reykit-1.0.0.dist-info/METADATA +29 -0
- reykit-1.0.0.dist-info/RECORD +30 -0
- reykit-1.0.0.dist-info/WHEEL +5 -0
- reykit-1.0.0.dist-info/top_level.txt +1 -0
reykit/rtable.py
ADDED
@@ -0,0 +1,511 @@
|
|
1
|
+
# !/usr/bin/env python
|
2
|
+
# -*- coding: utf-8 -*-
|
3
|
+
|
4
|
+
"""
|
5
|
+
@Time : 2023-06-16 13:49:33
|
6
|
+
@Author : Rey
|
7
|
+
@Contact : reyxbo@163.com
|
8
|
+
@Explain : Table methods.
|
9
|
+
"""
|
10
|
+
|
11
|
+
|
12
|
+
from typing import Any, TypedDict, Optional, Union, overload
|
13
|
+
from collections.abc import Iterable
|
14
|
+
from os.path import abspath as os_abspath
|
15
|
+
from pandas import DataFrame, ExcelWriter, isnull
|
16
|
+
from sqlalchemy.engine.cursor import CursorResult
|
17
|
+
|
18
|
+
from .ros import RFile
|
19
|
+
from .rtext import to_json, to_text
|
20
|
+
from .rtime import time_to
|
21
|
+
|
22
|
+
|
23
|
+
__all__ = (
|
24
|
+
'to_table',
|
25
|
+
'to_dict',
|
26
|
+
'to_list',
|
27
|
+
'to_df',
|
28
|
+
'to_json',
|
29
|
+
'to_text',
|
30
|
+
'to_sql',
|
31
|
+
'to_html',
|
32
|
+
'to_csv',
|
33
|
+
'to_excel'
|
34
|
+
)
|
35
|
+
|
36
|
+
|
37
|
+
type Table = Union[list[dict], dict, CursorResult, DataFrame]
|
38
|
+
SheetSet = TypedDict('SheetsSet', {'name': str, 'index': int, 'fields': Union[str, list[str]]})
|
39
|
+
|
40
|
+
|
41
|
+
def to_table(
|
42
|
+
data: Union[Table, Iterable[Iterable]],
|
43
|
+
fields: Optional[Iterable] = None
|
44
|
+
) -> list[dict]:
|
45
|
+
"""
|
46
|
+
Convert data to table in `list[dict]` format, keys and keys sort of the dictionary are the same.
|
47
|
+
|
48
|
+
Parameters
|
49
|
+
----------
|
50
|
+
data : Table format data.
|
51
|
+
fields : Table fields.
|
52
|
+
- `None`: Infer.
|
53
|
+
- `Iterable`: Use values in Iterable.
|
54
|
+
|
55
|
+
Returns
|
56
|
+
-------
|
57
|
+
Table in `list[dict]` format.
|
58
|
+
"""
|
59
|
+
|
60
|
+
# Convert.
|
61
|
+
match data:
|
62
|
+
|
63
|
+
## From CursorResult object.
|
64
|
+
case CursorResult():
|
65
|
+
fields = fields or data.keys()
|
66
|
+
data_table = [
|
67
|
+
dict(zip(fields, row))
|
68
|
+
for row in data
|
69
|
+
]
|
70
|
+
|
71
|
+
## From DataFrame object.
|
72
|
+
case DataFrame():
|
73
|
+
data_df = to_df(data, fields)
|
74
|
+
fields = data_df.columns
|
75
|
+
data_table = [
|
76
|
+
dict(zip(
|
77
|
+
fields,
|
78
|
+
[
|
79
|
+
None
|
80
|
+
if (value.__class__ != list and isnull(value))
|
81
|
+
else value
|
82
|
+
for value in row
|
83
|
+
]
|
84
|
+
))
|
85
|
+
for row in data_df.values
|
86
|
+
]
|
87
|
+
|
88
|
+
## From other object.
|
89
|
+
case _:
|
90
|
+
data_df = to_df(data, fields)
|
91
|
+
data_table = to_table(data_df)
|
92
|
+
|
93
|
+
return data_table
|
94
|
+
|
95
|
+
|
96
|
+
@overload
|
97
|
+
def to_dict(
|
98
|
+
data: Union[Table, Iterable[Iterable]],
|
99
|
+
key_field: Union[int, str] = 0,
|
100
|
+
val_field: None = None
|
101
|
+
) -> dict[Any, dict]: ...
|
102
|
+
|
103
|
+
@overload
|
104
|
+
def to_dict(
|
105
|
+
data: Union[Table, Iterable[Iterable]],
|
106
|
+
key_field: Union[int, str] = 0,
|
107
|
+
val_field: Union[int, str] = None
|
108
|
+
) -> dict: ...
|
109
|
+
|
110
|
+
def to_dict(
|
111
|
+
data: Union[Table, Iterable[Iterable]],
|
112
|
+
key_field: Union[int, str] = 0,
|
113
|
+
val_field: Optional[Union[int, str]] = None
|
114
|
+
) -> Union[dict[Any, dict], dict]:
|
115
|
+
"""
|
116
|
+
Convert data as dictionary.
|
117
|
+
|
118
|
+
Parameters
|
119
|
+
----------
|
120
|
+
data : Table format data.
|
121
|
+
key_field : Key field of dictionary.
|
122
|
+
- `int`: Subscript index.
|
123
|
+
- `str`: Name index.
|
124
|
+
val_field : Value field of dictionary.
|
125
|
+
- `None`: All fields except key.
|
126
|
+
- `int`: Subscript index.
|
127
|
+
- `str`: Name index.
|
128
|
+
|
129
|
+
Returns
|
130
|
+
-------
|
131
|
+
Dictionary.
|
132
|
+
"""
|
133
|
+
|
134
|
+
# Handle parameter.
|
135
|
+
data = to_table(data)
|
136
|
+
|
137
|
+
## Check parameter.
|
138
|
+
if len(data) == 0:
|
139
|
+
return {}
|
140
|
+
|
141
|
+
# Get fields.
|
142
|
+
fields = list(data[0].keys())
|
143
|
+
if key_field.__class__ == int:
|
144
|
+
key_field = fields[key_field]
|
145
|
+
if val_field.__class__ == int:
|
146
|
+
val_field = fields[val_field]
|
147
|
+
|
148
|
+
# Convert.
|
149
|
+
|
150
|
+
## Value is all fields except key.
|
151
|
+
if val_field is None:
|
152
|
+
data_dict = {
|
153
|
+
row[key_field]: {
|
154
|
+
key: value
|
155
|
+
for key, value in row.items()
|
156
|
+
if key != key_field
|
157
|
+
}
|
158
|
+
for row in data
|
159
|
+
}
|
160
|
+
|
161
|
+
## Value is one field.
|
162
|
+
else:
|
163
|
+
data_dict = {
|
164
|
+
row[key_field]: row[val_field]
|
165
|
+
for row in data
|
166
|
+
}
|
167
|
+
|
168
|
+
return data_dict
|
169
|
+
|
170
|
+
|
171
|
+
def to_list(
|
172
|
+
data: Union[Table, Iterable[Iterable]],
|
173
|
+
field: Union[int, str] = 0,
|
174
|
+
) -> list:
|
175
|
+
"""
|
176
|
+
Convert data as list.
|
177
|
+
|
178
|
+
Parameters
|
179
|
+
----------
|
180
|
+
data : Table format data.
|
181
|
+
field : Field of value.
|
182
|
+
- `int`: Subscript index.
|
183
|
+
- `str`: Name index.
|
184
|
+
|
185
|
+
Returns
|
186
|
+
-------
|
187
|
+
List.
|
188
|
+
"""
|
189
|
+
|
190
|
+
# Handle parameter.
|
191
|
+
data = to_table(data)
|
192
|
+
|
193
|
+
## Check parameter.
|
194
|
+
if len(data) == 0:
|
195
|
+
return []
|
196
|
+
|
197
|
+
# Get fields.
|
198
|
+
fields = list(data[0].keys())
|
199
|
+
if field.__class__ == int:
|
200
|
+
field = fields[field]
|
201
|
+
|
202
|
+
# Convert.
|
203
|
+
data_list = [
|
204
|
+
row[field]
|
205
|
+
for row in data
|
206
|
+
]
|
207
|
+
|
208
|
+
return data_list
|
209
|
+
|
210
|
+
|
211
|
+
def to_df(
|
212
|
+
data: Union[Table, Iterable[Iterable]],
|
213
|
+
fields: Optional[Iterable] = None
|
214
|
+
) -> DataFrame:
|
215
|
+
"""
|
216
|
+
Convert data to table of `DataFrame` object.
|
217
|
+
|
218
|
+
Parameters
|
219
|
+
----------
|
220
|
+
data : Table format data.
|
221
|
+
fields : Table fields.
|
222
|
+
- `None`: Infer.
|
223
|
+
- `Iterable`: Use values in Iterable.
|
224
|
+
|
225
|
+
Returns
|
226
|
+
-------
|
227
|
+
DataFrame object.
|
228
|
+
"""
|
229
|
+
|
230
|
+
# Convert.
|
231
|
+
match data:
|
232
|
+
|
233
|
+
## From CursorResult object.
|
234
|
+
case CursorResult():
|
235
|
+
fields = fields or data.keys()
|
236
|
+
data_df = DataFrame(data, columns=fields)
|
237
|
+
data_df = data_df.convert_dtypes()
|
238
|
+
|
239
|
+
## From DataFrame object.
|
240
|
+
case DataFrame():
|
241
|
+
data_df = data.convert_dtypes()
|
242
|
+
if fields is not None:
|
243
|
+
data_df.columns = fields
|
244
|
+
|
245
|
+
## From other object.
|
246
|
+
case _:
|
247
|
+
if data.__class__ == dict:
|
248
|
+
data = [data]
|
249
|
+
data_df = DataFrame(data, columns=fields)
|
250
|
+
data_df = data_df.convert_dtypes()
|
251
|
+
|
252
|
+
return data_df
|
253
|
+
|
254
|
+
|
255
|
+
def to_json(
|
256
|
+
data: Union[Table, Iterable[Iterable]],
|
257
|
+
fields: Optional[Iterable] = None,
|
258
|
+
compact: bool = True
|
259
|
+
) -> str:
|
260
|
+
"""
|
261
|
+
Convert data to JSON string.
|
262
|
+
|
263
|
+
Parameters
|
264
|
+
----------
|
265
|
+
data : Table format data.
|
266
|
+
fields : Table fields.
|
267
|
+
- `None`: Infer.
|
268
|
+
- `Iterable`: Use values in Iterable.
|
269
|
+
compact : Whether compact content.
|
270
|
+
|
271
|
+
Returns
|
272
|
+
-------
|
273
|
+
JSON string.
|
274
|
+
"""
|
275
|
+
|
276
|
+
# Handle parameter.
|
277
|
+
data = to_table(data, fields)
|
278
|
+
|
279
|
+
# Convert.
|
280
|
+
string = to_json(data, compact)
|
281
|
+
|
282
|
+
return string
|
283
|
+
|
284
|
+
|
285
|
+
def to_text(
|
286
|
+
data: Union[Table, Iterable[Iterable]],
|
287
|
+
fields: Optional[Iterable] = None,
|
288
|
+
width: int = 100
|
289
|
+
) -> str:
|
290
|
+
"""
|
291
|
+
Convert data to text.
|
292
|
+
|
293
|
+
Parameters
|
294
|
+
----------
|
295
|
+
data : Table format data.
|
296
|
+
fields : Table fields.
|
297
|
+
- `None`: Infer.
|
298
|
+
- `Iterable`: Use values in Iterable.
|
299
|
+
width : Format width.
|
300
|
+
|
301
|
+
Returns
|
302
|
+
-------
|
303
|
+
Formatted text.
|
304
|
+
"""
|
305
|
+
|
306
|
+
# Handle parameter.
|
307
|
+
data = to_table(data, fields)
|
308
|
+
|
309
|
+
# Convert.
|
310
|
+
text = to_text(data, width)
|
311
|
+
|
312
|
+
return text
|
313
|
+
|
314
|
+
|
315
|
+
def to_sql(
|
316
|
+
data: Union[Table, Iterable[Iterable]],
|
317
|
+
fields: Optional[Iterable] = None
|
318
|
+
) -> str:
|
319
|
+
"""
|
320
|
+
Convert data to SQL string.
|
321
|
+
|
322
|
+
Parameters
|
323
|
+
----------
|
324
|
+
data : Table format data.
|
325
|
+
fields : Table fields.
|
326
|
+
- `None`: Infer.
|
327
|
+
- `Iterable`: Use values in Iterable.
|
328
|
+
|
329
|
+
Returns
|
330
|
+
-------
|
331
|
+
SQL string.
|
332
|
+
"""
|
333
|
+
|
334
|
+
# Get fields of table.
|
335
|
+
if isinstance(data, CursorResult):
|
336
|
+
fields = fields or data.keys()
|
337
|
+
else:
|
338
|
+
data = to_table(data, fields)
|
339
|
+
fields = data[0].keys()
|
340
|
+
|
341
|
+
# Generate SQL.
|
342
|
+
sql_rows_values = [
|
343
|
+
[
|
344
|
+
repr(time_to(value, raising=False))
|
345
|
+
if value is not None
|
346
|
+
else 'NULL'
|
347
|
+
for value in row
|
348
|
+
]
|
349
|
+
for row in data
|
350
|
+
]
|
351
|
+
sql_rows = [
|
352
|
+
'SELECT ' + ','.join(row_values)
|
353
|
+
for row_values in sql_rows_values
|
354
|
+
]
|
355
|
+
sql_row_first = 'SELECT ' + ','.join(
|
356
|
+
[
|
357
|
+
f'{value} AS `{key}`'
|
358
|
+
for key, value in list(zip(fields, sql_rows_values[0]))
|
359
|
+
]
|
360
|
+
)
|
361
|
+
sql_rows[0] = sql_row_first
|
362
|
+
data_sql = ' UNION ALL '.join(sql_rows)
|
363
|
+
|
364
|
+
return data_sql
|
365
|
+
|
366
|
+
|
367
|
+
def to_html(
|
368
|
+
data: Union[Table, Iterable[Iterable]],
|
369
|
+
fields: Optional[Iterable] = None
|
370
|
+
) -> str:
|
371
|
+
"""
|
372
|
+
Convert data to HTML string.
|
373
|
+
|
374
|
+
Parameters
|
375
|
+
----------
|
376
|
+
data : Table format data.
|
377
|
+
fields : Table fields.
|
378
|
+
- `None`: Infer.
|
379
|
+
- `Iterable`: Use values in Iterable.
|
380
|
+
|
381
|
+
Returns
|
382
|
+
-------
|
383
|
+
HTML string.
|
384
|
+
"""
|
385
|
+
|
386
|
+
# Handle parameter.
|
387
|
+
data_df = to_df(data, fields)
|
388
|
+
|
389
|
+
# Convert.
|
390
|
+
data_html = data_df.to_html(col_space=50, index=False, justify='center')
|
391
|
+
|
392
|
+
return data_html
|
393
|
+
|
394
|
+
|
395
|
+
def to_csv(
|
396
|
+
data: Union[Table, Iterable[Iterable]],
|
397
|
+
path: str = 'data.csv',
|
398
|
+
fields: Optional[Iterable] = None
|
399
|
+
) -> str:
|
400
|
+
"""
|
401
|
+
Convert data to save CSV format file.
|
402
|
+
When file exist, then append data.
|
403
|
+
|
404
|
+
Parameters
|
405
|
+
----------
|
406
|
+
data : Table format data.
|
407
|
+
path : File save path.
|
408
|
+
fields : Table fields.
|
409
|
+
- `None`: Infer.
|
410
|
+
- `Iterable`: Use values in Iterable.
|
411
|
+
|
412
|
+
Returns
|
413
|
+
-------
|
414
|
+
File absolute path.
|
415
|
+
"""
|
416
|
+
|
417
|
+
# Handle parameter.
|
418
|
+
data_df = to_df(data, fields)
|
419
|
+
rfile = RFile(path)
|
420
|
+
if rfile:
|
421
|
+
header = False
|
422
|
+
else:
|
423
|
+
header = True
|
424
|
+
|
425
|
+
# Save file.
|
426
|
+
data_df.to_csv(rfile.path, header=header, index=False, mode='a')
|
427
|
+
|
428
|
+
return rfile.path
|
429
|
+
|
430
|
+
|
431
|
+
def to_excel(
|
432
|
+
data: Union[Table, Iterable[Iterable]],
|
433
|
+
path: str = 'data.xlsx',
|
434
|
+
group_field: Optional[str] = None,
|
435
|
+
sheets_set: dict[Union[str, int], SheetSet] = {}
|
436
|
+
) -> str:
|
437
|
+
"""
|
438
|
+
Convert data to save Excel format file and return sheet name and sheet data.
|
439
|
+
When file exist, then rebuild file.
|
440
|
+
|
441
|
+
Parameters
|
442
|
+
----------
|
443
|
+
data : Table format data.
|
444
|
+
path : File save path.
|
445
|
+
group_field : Group filed.
|
446
|
+
sheets_set : Set sheet new name and sort sheet and filter sheet fields,
|
447
|
+
key is old name or index, value is set parameters.
|
448
|
+
- Parameter `name` : Set sheet new name.
|
449
|
+
- Parameter `index` : Sort sheet.
|
450
|
+
- Parameter `fields` : Filter sheet fields.
|
451
|
+
|
452
|
+
Returns
|
453
|
+
-------
|
454
|
+
File absolute path.
|
455
|
+
|
456
|
+
Examples
|
457
|
+
--------
|
458
|
+
>>> data = [
|
459
|
+
... {'id': 1, 'age': 21, 'group': 'one'},
|
460
|
+
... {'id': 2, 'age': 22, 'group': 'one'},
|
461
|
+
... {'id': 3, 'age': 23, 'group': 'two'}
|
462
|
+
... ]
|
463
|
+
>>> sheets_set = {
|
464
|
+
... 'one': {'name': 'age', 'index': 2, 'fields': ['id', 'age']},
|
465
|
+
... 'two': {'name': 'id', 'index': 1, 'fields': 'id'}
|
466
|
+
... }
|
467
|
+
>>> to_excel(data, 'file.xlsx', 'group', sheets_set)
|
468
|
+
"""
|
469
|
+
|
470
|
+
# Handle parameter.
|
471
|
+
if data.__class__ != DataFrame:
|
472
|
+
data = to_df(data)
|
473
|
+
path = os_abspath(path)
|
474
|
+
|
475
|
+
# Generate sheets.
|
476
|
+
if group_field is None:
|
477
|
+
data_group = (('Sheet1', data),)
|
478
|
+
else:
|
479
|
+
data_group = data.groupby(group_field)
|
480
|
+
sheets_table_before = []
|
481
|
+
sheets_table_after = []
|
482
|
+
for index, sheet_table in enumerate(data_group):
|
483
|
+
sheet_name, sheet_df = sheet_table
|
484
|
+
if group_field is not None:
|
485
|
+
del sheet_df[group_field]
|
486
|
+
if sheet_name in sheets_set:
|
487
|
+
sheet_set = sheets_set[sheet_name]
|
488
|
+
elif index in sheets_set:
|
489
|
+
sheet_set = sheets_set[index]
|
490
|
+
else:
|
491
|
+
sheets_table_after.append((sheet_name, sheet_df))
|
492
|
+
continue
|
493
|
+
if 'name' in sheet_set:
|
494
|
+
sheet_name = sheet_set['name']
|
495
|
+
if 'fields' in sheet_set:
|
496
|
+
sheet_df = sheet_df[sheet_set['fields']]
|
497
|
+
if 'index' in sheet_set:
|
498
|
+
sheets_table_before.append((sheet_set['index'], (sheet_name, sheet_df)))
|
499
|
+
else:
|
500
|
+
sheets_table_after.append((sheet_name, sheet_df))
|
501
|
+
sort_func = lambda item: item[0]
|
502
|
+
sheets_table_before.sort(key=sort_func)
|
503
|
+
sheets_table = [sheet_table for sheet_index, sheet_table in sheets_table_before] + sheets_table_after
|
504
|
+
|
505
|
+
# Save file.
|
506
|
+
excel = ExcelWriter(path)
|
507
|
+
for sheet_name, sheet_df in sheets_table:
|
508
|
+
sheet_df.to_excel(excel, sheet_name, index=False)
|
509
|
+
excel.close()
|
510
|
+
|
511
|
+
return path
|