absfuyu 3.1.1__py3-none-any.whl → 3.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of absfuyu might be problematic. Click here for more details.
- absfuyu/__init__.py +3 -10
- absfuyu/__main__.py +5 -250
- absfuyu/cli/__init__.py +51 -0
- absfuyu/cli/color.py +24 -0
- absfuyu/cli/config_group.py +56 -0
- absfuyu/cli/do_group.py +76 -0
- absfuyu/cli/game_group.py +109 -0
- absfuyu/config/__init__.py +117 -100
- absfuyu/config/config.json +0 -7
- absfuyu/core.py +5 -66
- absfuyu/everything.py +7 -9
- absfuyu/extensions/beautiful.py +30 -23
- absfuyu/extensions/dev/__init__.py +11 -8
- absfuyu/extensions/dev/password_hash.py +4 -2
- absfuyu/extensions/dev/passwordlib.py +7 -5
- absfuyu/extensions/dev/project_starter.py +4 -2
- absfuyu/extensions/dev/shutdownizer.py +148 -0
- absfuyu/extensions/extra/__init__.py +1 -2
- absfuyu/extensions/extra/data_analysis.py +182 -107
- absfuyu/fun/WGS.py +50 -26
- absfuyu/fun/__init__.py +6 -7
- absfuyu/fun/tarot.py +1 -1
- absfuyu/game/__init__.py +75 -81
- absfuyu/game/game_stat.py +36 -0
- absfuyu/game/sudoku.py +41 -48
- absfuyu/game/tictactoe.py +303 -548
- absfuyu/game/wordle.py +56 -47
- absfuyu/general/__init__.py +17 -7
- absfuyu/general/content.py +16 -15
- absfuyu/general/data_extension.py +282 -90
- absfuyu/general/generator.py +67 -67
- absfuyu/general/human.py +74 -78
- absfuyu/logger.py +94 -68
- absfuyu/pkg_data/__init__.py +29 -25
- absfuyu/py.typed +0 -0
- absfuyu/sort.py +61 -47
- absfuyu/tools/__init__.py +0 -1
- absfuyu/tools/converter.py +80 -62
- absfuyu/tools/keygen.py +62 -67
- absfuyu/tools/obfuscator.py +57 -53
- absfuyu/tools/stats.py +24 -24
- absfuyu/tools/web.py +10 -9
- absfuyu/util/__init__.py +71 -33
- absfuyu/util/api.py +53 -43
- absfuyu/util/json_method.py +25 -27
- absfuyu/util/lunar.py +20 -24
- absfuyu/util/path.py +362 -241
- absfuyu/util/performance.py +217 -135
- absfuyu/util/pkl.py +8 -8
- absfuyu/util/zipped.py +17 -19
- absfuyu/version.py +160 -147
- absfuyu-3.3.3.dist-info/METADATA +124 -0
- absfuyu-3.3.3.dist-info/RECORD +59 -0
- {absfuyu-3.1.1.dist-info → absfuyu-3.3.3.dist-info}/WHEEL +1 -2
- {absfuyu-3.1.1.dist-info → absfuyu-3.3.3.dist-info}/entry_points.txt +1 -0
- {absfuyu-3.1.1.dist-info → absfuyu-3.3.3.dist-info/licenses}/LICENSE +1 -1
- absfuyu/extensions/dev/pkglib.py +0 -98
- absfuyu/game/tictactoe2.py +0 -318
- absfuyu-3.1.1.dist-info/METADATA +0 -215
- absfuyu-3.1.1.dist-info/RECORD +0 -55
- absfuyu-3.1.1.dist-info/top_level.txt +0 -1
|
@@ -3,11 +3,10 @@ Absfuyu: Data Analysis [W.I.P]
|
|
|
3
3
|
------------------------------
|
|
4
4
|
Extension for ``pd.DataFrame``
|
|
5
5
|
|
|
6
|
-
Version: 2.
|
|
7
|
-
Date updated:
|
|
6
|
+
Version: 2.1.3
|
|
7
|
+
Date updated: 20/03/2024 (dd/mm/yyyy)
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
-
|
|
11
10
|
# Module level
|
|
12
11
|
###########################################################################
|
|
13
12
|
__all__ = [
|
|
@@ -26,11 +25,11 @@ __all__ = [
|
|
|
26
25
|
|
|
27
26
|
# Library
|
|
28
27
|
###########################################################################
|
|
28
|
+
import random
|
|
29
|
+
import string
|
|
29
30
|
from collections import deque
|
|
30
31
|
from datetime import datetime
|
|
31
|
-
import random
|
|
32
32
|
from itertools import chain, product
|
|
33
|
-
import string
|
|
34
33
|
from typing import Any, Dict, List, NamedTuple, Optional, Union
|
|
35
34
|
|
|
36
35
|
# import matplotlib.pyplot as plt
|
|
@@ -38,13 +37,18 @@ from typing import Any, Dict, List, NamedTuple, Optional, Union
|
|
|
38
37
|
# from dateutil.relativedelta import relativedelta
|
|
39
38
|
import numpy as np
|
|
40
39
|
import pandas as pd
|
|
40
|
+
from deprecated import deprecated
|
|
41
|
+
from deprecated.sphinx import deprecated as sphinx_deprecated
|
|
42
|
+
from deprecated.sphinx import versionadded
|
|
41
43
|
|
|
42
44
|
from absfuyu.logger import logger
|
|
43
|
-
from absfuyu.util import
|
|
45
|
+
from absfuyu.util import set_min, set_min_max
|
|
44
46
|
|
|
45
47
|
|
|
46
48
|
# Function
|
|
47
49
|
###########################################################################
|
|
50
|
+
@deprecated(reason="Not needed", version="3.1.0")
|
|
51
|
+
@sphinx_deprecated(reason="Not needed", version="3.1.0")
|
|
48
52
|
def summary(data: Union[list, np.ndarray]): # del this
|
|
49
53
|
"""
|
|
50
54
|
Quick summary of data
|
|
@@ -93,7 +97,7 @@ def equalize_df(data: Dict[str, list], fillna=np.nan) -> Dict[str, list]:
|
|
|
93
97
|
## Update 05/10
|
|
94
98
|
|
|
95
99
|
|
|
96
|
-
def compare_2_list(*arr
|
|
100
|
+
def compare_2_list(*arr) -> pd.DataFrame:
|
|
97
101
|
"""
|
|
98
102
|
Compare 2 lists then create DataFrame
|
|
99
103
|
to see which items are missing
|
|
@@ -110,7 +114,7 @@ def compare_2_list(*arr: list) -> pd.DataFrame:
|
|
|
110
114
|
"""
|
|
111
115
|
# Setup
|
|
112
116
|
col_name = "list"
|
|
113
|
-
arr = [sorted(x) for x in arr] # map(sorted, arr)
|
|
117
|
+
arr = [sorted(x) for x in arr] # type: ignore # map(sorted, arr)
|
|
114
118
|
|
|
115
119
|
# Total array
|
|
116
120
|
tarr = sorted(list(set(chain.from_iterable(arr))))
|
|
@@ -178,33 +182,33 @@ class CityData(NamedTuple):
|
|
|
178
182
|
area: str
|
|
179
183
|
|
|
180
184
|
@staticmethod
|
|
181
|
-
def _sample_city_data(size: int = 100):
|
|
185
|
+
def _sample_city_data(size: int = 100) -> List["CityData"]:
|
|
182
186
|
"""
|
|
183
187
|
Generate sample city data (testing purpose)
|
|
184
188
|
"""
|
|
185
189
|
sample_range = 10 ** len(str(size))
|
|
186
190
|
|
|
187
191
|
# Serial list
|
|
188
|
-
serials = []
|
|
189
|
-
while len(serials) != size:
|
|
192
|
+
serials: List[str] = []
|
|
193
|
+
while len(serials) != size: # Unique serial
|
|
190
194
|
serial = random.randint(0, sample_range - 1)
|
|
191
|
-
serial = str(serial).rjust(len(str(size)), "0")
|
|
192
|
-
if serial not in serials:
|
|
193
|
-
serials.append(serial)
|
|
194
|
-
|
|
195
|
-
ss2 = deque(serials[: int(len(serials) / 2)])
|
|
195
|
+
serial = str(serial).rjust(len(str(size)), "0") # type: ignore
|
|
196
|
+
if serial not in serials: # type: ignore
|
|
197
|
+
serials.append(serial) # type: ignore
|
|
198
|
+
|
|
199
|
+
ss2 = deque(serials[: int(len(serials) / 2)]) # Cut half for region
|
|
196
200
|
ss2.rotate(random.randrange(1, 5))
|
|
197
|
-
[ss2.extend(ss2) for _ in range(2)] # Extend back
|
|
201
|
+
[ss2.extend(ss2) for _ in range(2)] # type: ignore # Extend back
|
|
198
202
|
|
|
199
|
-
ss3 = deque(serials[: int(len(serials) / 4)])
|
|
203
|
+
ss3 = deque(serials[: int(len(serials) / 4)]) # Cut forth for area
|
|
200
204
|
ss3.rotate(random.randrange(1, 5))
|
|
201
|
-
[ss3.extend(ss3) for _ in range(4)] # Extend back
|
|
205
|
+
[ss3.extend(ss3) for _ in range(4)] # type: ignore # Extend back
|
|
202
206
|
|
|
203
207
|
serials = ["city_" + x for x in serials]
|
|
204
|
-
ss2 = ["region_" + x for x in ss2]
|
|
205
|
-
ss3 = ["area_" + x for x in ss3]
|
|
208
|
+
ss2 = ["region_" + x for x in ss2] # type: ignore
|
|
209
|
+
ss3 = ["area_" + x for x in ss3] # type: ignore
|
|
206
210
|
|
|
207
|
-
ss = list(zip(serials, ss2, ss3))
|
|
211
|
+
ss = list(zip(serials, ss2, ss3)) # Zip back
|
|
208
212
|
out = list(map(CityData._make, ss))
|
|
209
213
|
|
|
210
214
|
return out
|
|
@@ -265,7 +269,7 @@ class SplittedDF(NamedTuple):
|
|
|
265
269
|
DataFrame
|
|
266
270
|
Joined DataFrame
|
|
267
271
|
"""
|
|
268
|
-
return self.concat_df(self, join=join)
|
|
272
|
+
return self.concat_df(self, join=join) # type: ignore
|
|
269
273
|
|
|
270
274
|
@staticmethod
|
|
271
275
|
def divide_dataframe(df: pd.DataFrame, by_column: str) -> List[pd.DataFrame]:
|
|
@@ -318,10 +322,10 @@ class _DictToAtrr:
|
|
|
318
322
|
if key_as_atrribute:
|
|
319
323
|
# temp = list(map(self._remove_space, self._data.keys()))
|
|
320
324
|
temp = [self._remove_space(x, remove_char) for x in self._data.keys()]
|
|
321
|
-
[self.__setattr__(k, v) for k, v in zip(temp, self._data.values())]
|
|
325
|
+
[self.__setattr__(k, v) for k, v in zip(temp, self._data.values())] # type: ignore
|
|
322
326
|
else:
|
|
323
327
|
temp = [self._remove_space(x, remove_char) for x in self._data.values()]
|
|
324
|
-
[self.__setattr__(k, v) for k, v in zip(temp, self._data.keys())]
|
|
328
|
+
[self.__setattr__(k, v) for k, v in zip(temp, self._data.keys())] # type: ignore
|
|
325
329
|
self._keys = temp
|
|
326
330
|
|
|
327
331
|
def __str__(self) -> str:
|
|
@@ -335,7 +339,7 @@ class _DictToAtrr:
|
|
|
335
339
|
"""
|
|
336
340
|
Remove special characters and replace space with underscore
|
|
337
341
|
"""
|
|
338
|
-
remove_char = remove_char.split(" ")
|
|
342
|
+
remove_char = remove_char.split(" ") # type: ignore
|
|
339
343
|
logger.debug(remove_char)
|
|
340
344
|
for x in remove_char:
|
|
341
345
|
value = value.replace(x, "")
|
|
@@ -398,15 +402,15 @@ class MatplotlibFormatString:
|
|
|
398
402
|
@staticmethod
|
|
399
403
|
def all_format_string() -> List[PLTFormatString]:
|
|
400
404
|
fmt_str = [
|
|
401
|
-
__class__.MARKER_LIST,
|
|
402
|
-
__class__.LINE_STYLE_LIST,
|
|
403
|
-
__class__.COLOR_LIST,
|
|
405
|
+
__class__.MARKER_LIST, # type: ignore
|
|
406
|
+
__class__.LINE_STYLE_LIST, # type: ignore
|
|
407
|
+
__class__.COLOR_LIST, # type: ignore
|
|
404
408
|
]
|
|
405
409
|
return [PLTFormatString._make(x) for x in list(product(*fmt_str))]
|
|
406
410
|
|
|
407
411
|
@staticmethod
|
|
408
412
|
def get_random(alt: bool = False) -> str:
|
|
409
|
-
temp = random.choice(__class__.all_format_string())
|
|
413
|
+
temp = random.choice(__class__.all_format_string()) # type: ignore
|
|
410
414
|
if alt:
|
|
411
415
|
return f"{temp.marker}{temp.line_style}{temp.color}"
|
|
412
416
|
else:
|
|
@@ -416,9 +420,9 @@ class MatplotlibFormatString:
|
|
|
416
420
|
# Class - DA
|
|
417
421
|
###########################################################################
|
|
418
422
|
class DataAnalystDataFrame(pd.DataFrame):
|
|
419
|
-
"""
|
|
420
|
-
|
|
421
|
-
|
|
423
|
+
"""
|
|
424
|
+
Data Analyst ``pd.DataFrame``
|
|
425
|
+
"""
|
|
422
426
|
|
|
423
427
|
# Support
|
|
424
428
|
# ================================================================
|
|
@@ -441,14 +445,14 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
441
445
|
Modified DataFrame
|
|
442
446
|
"""
|
|
443
447
|
cols = self.columns.to_list() # List of columns
|
|
444
|
-
num_of_cols = set_min_max(num_of_cols, min_value=1, max_value=len(cols))
|
|
448
|
+
num_of_cols = int(set_min_max(num_of_cols, min_value=1, max_value=len(cols)))
|
|
445
449
|
col_index = cols.index(insert_to_col)
|
|
446
450
|
cols = (
|
|
447
451
|
cols[: col_index + 1]
|
|
448
452
|
+ cols[-num_of_cols:]
|
|
449
453
|
+ cols[col_index + 1 : len(cols) - num_of_cols]
|
|
450
454
|
)
|
|
451
|
-
self = __class__(self[cols])
|
|
455
|
+
self = self.__class__(self[cols])
|
|
452
456
|
return self
|
|
453
457
|
|
|
454
458
|
# Drop a list of column
|
|
@@ -469,7 +473,7 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
469
473
|
for column in columns:
|
|
470
474
|
try:
|
|
471
475
|
self.drop(columns=[column], inplace=True)
|
|
472
|
-
except:
|
|
476
|
+
except Exception:
|
|
473
477
|
logger.debug(f"{column} column does not exist")
|
|
474
478
|
# pass
|
|
475
479
|
return self
|
|
@@ -494,7 +498,9 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
494
498
|
# num_of_cols = 1
|
|
495
499
|
# if num_of_cols > self.shape[1]:
|
|
496
500
|
# num_of_cols = self.shape[1]
|
|
497
|
-
num_of_cols =
|
|
501
|
+
num_of_cols = int(
|
|
502
|
+
set_min_max(num_of_cols, min_value=1, max_value=self.shape[1])
|
|
503
|
+
)
|
|
498
504
|
|
|
499
505
|
# Logic
|
|
500
506
|
for _ in range(num_of_cols):
|
|
@@ -528,9 +534,9 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
528
534
|
def convert_city(
|
|
529
535
|
self,
|
|
530
536
|
city_column: str,
|
|
531
|
-
city_list: List[CityData]
|
|
537
|
+
city_list: List[CityData],
|
|
532
538
|
*,
|
|
533
|
-
mode: str = "ra"
|
|
539
|
+
mode: str = "ra",
|
|
534
540
|
):
|
|
535
541
|
"""
|
|
536
542
|
Get ``region`` and ``area`` of a city
|
|
@@ -557,19 +563,25 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
557
563
|
"""
|
|
558
564
|
|
|
559
565
|
# Support function
|
|
560
|
-
def _convert_city_support(value: str):
|
|
566
|
+
def _convert_city_support(value: str) -> CityData:
|
|
561
567
|
for x in city_list:
|
|
562
568
|
if x.city.lower().startswith(value.lower()):
|
|
563
569
|
return x
|
|
564
|
-
return CityData(city=value, region=np.nan, area=np.nan)
|
|
570
|
+
return CityData(city=value, region=np.nan, area=np.nan) # type: ignore
|
|
565
571
|
|
|
566
572
|
# Convert
|
|
567
573
|
col_counter = 0
|
|
568
574
|
if mode.find("r") != -1:
|
|
569
|
-
|
|
575
|
+
logger.debug("Mode: 'region'")
|
|
576
|
+
self["region"] = self[city_column].apply(
|
|
577
|
+
lambda x: _convert_city_support(x).region
|
|
578
|
+
)
|
|
570
579
|
col_counter += 1
|
|
571
580
|
if mode.find("a") != -1:
|
|
572
|
-
|
|
581
|
+
logger.debug("Mode: 'area'")
|
|
582
|
+
self["area"] = self[city_column].apply(
|
|
583
|
+
lambda x: _convert_city_support(x).area
|
|
584
|
+
)
|
|
573
585
|
col_counter += 1
|
|
574
586
|
|
|
575
587
|
# Rearrange
|
|
@@ -651,6 +663,8 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
651
663
|
logger.debug("Mode: 'year'")
|
|
652
664
|
self["year"] = self["date"].dt.year
|
|
653
665
|
col_counter += 1
|
|
666
|
+
|
|
667
|
+
# Return
|
|
654
668
|
return self.rearrange_column(date_column, col_counter)
|
|
655
669
|
|
|
656
670
|
def delta_date(
|
|
@@ -725,7 +739,7 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
725
739
|
"""
|
|
726
740
|
try:
|
|
727
741
|
self[column_name] = self[column_name].fillna(fill)
|
|
728
|
-
except:
|
|
742
|
+
except Exception:
|
|
729
743
|
self.add_blank_column(column_name, fill_when_not_exist)
|
|
730
744
|
return self
|
|
731
745
|
|
|
@@ -753,20 +767,19 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
753
767
|
return out
|
|
754
768
|
|
|
755
769
|
# Threshold filter
|
|
770
|
+
# @versionchanged(version="3.2.0", reason="Optimized the code")
|
|
756
771
|
def threshold_filter(
|
|
757
772
|
self,
|
|
758
773
|
destination_column: str,
|
|
759
774
|
threshold: Union[int, float] = 10,
|
|
760
775
|
*,
|
|
761
776
|
top: Optional[int] = None,
|
|
762
|
-
group_by_column: Optional[str] = None,
|
|
763
777
|
replace_with: Any = "Other",
|
|
764
778
|
):
|
|
765
779
|
"""
|
|
766
780
|
Filter out percentage of data that smaller than the ``threshold``,
|
|
767
781
|
replace all of the smaller data to ``replace_with``.
|
|
768
782
|
As a result, pie chart is less messy.
|
|
769
|
-
Version: 1.1.0
|
|
770
783
|
|
|
771
784
|
Parameters
|
|
772
785
|
----------
|
|
@@ -782,10 +795,6 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
782
795
|
(replace threshold mode)
|
|
783
796
|
(Default: ``None``)
|
|
784
797
|
|
|
785
|
-
group_by_column : str
|
|
786
|
-
Calculate threshold for each category in selected column [W.I.P]
|
|
787
|
-
(Default: ``None``)
|
|
788
|
-
|
|
789
798
|
replace_with : Any
|
|
790
799
|
Replace all of the smaller data with specified value
|
|
791
800
|
|
|
@@ -799,52 +808,11 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
799
808
|
self[destination_column] = self[
|
|
800
809
|
destination_column
|
|
801
810
|
].str.strip() # Remove trailing space
|
|
802
|
-
except:
|
|
811
|
+
except Exception:
|
|
803
812
|
pass
|
|
804
813
|
|
|
805
814
|
# Logic
|
|
806
|
-
|
|
807
|
-
# Get a column with no missing values
|
|
808
|
-
col_with_no_na = ""
|
|
809
|
-
for col_name in self.columns:
|
|
810
|
-
if col_name == destination_column:
|
|
811
|
-
continue
|
|
812
|
-
if self[col_name].isna().sum() == 0:
|
|
813
|
-
col_with_no_na = col_name
|
|
814
|
-
break
|
|
815
|
-
if col_with_no_na == "":
|
|
816
|
-
# CASE: every col has NA else where
|
|
817
|
-
for col_name in self.columns:
|
|
818
|
-
if col_name == destination_column:
|
|
819
|
-
continue
|
|
820
|
-
else:
|
|
821
|
-
col_with_no_na = col_name
|
|
822
|
-
break
|
|
823
|
-
self[col_with_no_na].fillna("N/A")
|
|
824
|
-
|
|
825
|
-
# Calculate threshold
|
|
826
|
-
col_df = (
|
|
827
|
-
self.groupby(destination_column)
|
|
828
|
-
.count()[col_with_no_na]
|
|
829
|
-
.sort_values(ascending=False)
|
|
830
|
-
.to_frame()
|
|
831
|
-
.reset_index()
|
|
832
|
-
)
|
|
833
|
-
col_df.rename(columns={col_with_no_na: "total_count"}, inplace=True)
|
|
834
|
-
col_df["percentage"] = col_df["total_count"] / self.shape[0] * 100
|
|
835
|
-
# logger.debug(col_df) # Show calculation result
|
|
836
|
-
else:
|
|
837
|
-
# Real logic: manually select a column to perform percentage calculation
|
|
838
|
-
# Calculate threshold for each category in selected column may be will be added in the future
|
|
839
|
-
col_df = (
|
|
840
|
-
self.groupby(destination_column)[group_by_column]
|
|
841
|
-
.count()
|
|
842
|
-
.sort_values(ascending=False)
|
|
843
|
-
/ self.shape[0]
|
|
844
|
-
* 100
|
|
845
|
-
) # percentage of destination_column
|
|
846
|
-
col_df = col_df.reset_index()
|
|
847
|
-
col_df.rename(columns={group_by_column: "percentage"}, inplace=True)
|
|
815
|
+
col_df = self.show_distribution(destination_column)
|
|
848
816
|
|
|
849
817
|
# Rename
|
|
850
818
|
if top is not None:
|
|
@@ -855,7 +823,7 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
855
823
|
)
|
|
856
824
|
# logger.debug(list_of_keep)
|
|
857
825
|
else:
|
|
858
|
-
list_of_keep
|
|
826
|
+
list_of_keep = col_df[col_df["percentage"] >= threshold][
|
|
859
827
|
destination_column
|
|
860
828
|
].to_list() # values that will not be renamed
|
|
861
829
|
self[f"{destination_column}_filtered"] = self[destination_column].apply(
|
|
@@ -867,6 +835,41 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
867
835
|
|
|
868
836
|
# Info
|
|
869
837
|
# ================================================================
|
|
838
|
+
# Total observation
|
|
839
|
+
@property
|
|
840
|
+
@versionadded(version="3.2.0")
|
|
841
|
+
def total_observation(self) -> int:
|
|
842
|
+
"""Returns total observation of the DataFrame"""
|
|
843
|
+
return self.shape[0] * self.shape[1] # type: ignore
|
|
844
|
+
|
|
845
|
+
# Quick info
|
|
846
|
+
@versionadded(version="3.2.0")
|
|
847
|
+
def qinfo(self) -> str:
|
|
848
|
+
"""
|
|
849
|
+
Show quick infomation about DataFrame
|
|
850
|
+
"""
|
|
851
|
+
mv = self.isnull().sum().sum() # missing values
|
|
852
|
+
to = self.total_observation
|
|
853
|
+
info = (
|
|
854
|
+
f"Dataset Information:\n"
|
|
855
|
+
f"- Number of Rows: {self.shape[0]:,}\n"
|
|
856
|
+
f"- Number of Columns: {self.shape[1]:,}\n"
|
|
857
|
+
f"- Total observation: {to:,}\n"
|
|
858
|
+
f"- Missing value: {mv:,} ({(mv / to * 100):.2f}%)\n\n"
|
|
859
|
+
f"Column names:\n{self.columns.to_list()}"
|
|
860
|
+
)
|
|
861
|
+
return info
|
|
862
|
+
|
|
863
|
+
# Quick describe
|
|
864
|
+
@versionadded(version="3.2.0")
|
|
865
|
+
def qdescribe(self) -> pd.DataFrame:
|
|
866
|
+
"""
|
|
867
|
+
Quick ``describe()`` that exclude ``object`` and ``datetime`` dtype
|
|
868
|
+
"""
|
|
869
|
+
return self[
|
|
870
|
+
self.select_dtypes(exclude=["object", "datetime"]).columns
|
|
871
|
+
].describe()
|
|
872
|
+
|
|
870
873
|
# Missing values analyze
|
|
871
874
|
def get_missing_values(
|
|
872
875
|
self, hightlight: bool = True, *, percentage_round_up: int = 2
|
|
@@ -896,22 +899,76 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
896
899
|
else:
|
|
897
900
|
out = df_na.to_frame()
|
|
898
901
|
out.rename(columns={0: "Num of N/A"}, inplace=True)
|
|
899
|
-
out["Percentage"] = (out["Num of N/A"] / self.shape[0] * 100).round(
|
|
900
|
-
|
|
901
|
-
logger.debug(
|
|
902
|
-
f"Percentage of N/A over entire DF: "
|
|
903
|
-
f"{(self.isnull().sum().sum() / (self.shape[0] * self.shape[1]) * 100).round(percentage_round_up)}%"
|
|
902
|
+
out["Percentage"] = (out["Num of N/A"] / self.shape[0] * 100).round(
|
|
903
|
+
percentage_round_up
|
|
904
904
|
)
|
|
905
|
+
|
|
906
|
+
# logger.debug(
|
|
907
|
+
# f"Percentage of N/A over entire DF: "
|
|
908
|
+
# f"{(self.isnull().sum().sum() / (self.shape[0] * self.shape[1]) * 100).round(percentage_round_up)}%"
|
|
909
|
+
# )
|
|
905
910
|
return out
|
|
906
911
|
|
|
907
|
-
#
|
|
908
|
-
|
|
912
|
+
# Show distribution
|
|
913
|
+
@versionadded(version="3.2.0")
|
|
914
|
+
def show_distribution(
|
|
915
|
+
self,
|
|
916
|
+
column_name: str,
|
|
917
|
+
dropna: bool = True,
|
|
918
|
+
*,
|
|
919
|
+
show_percentage: bool = True,
|
|
920
|
+
percentage_round_up: int = 2,
|
|
921
|
+
) -> pd.DataFrame:
|
|
909
922
|
"""
|
|
910
|
-
|
|
923
|
+
Show distribution of a column
|
|
924
|
+
|
|
925
|
+
Parameters
|
|
926
|
+
----------
|
|
927
|
+
column_name : str
|
|
928
|
+
Column to show distribution
|
|
929
|
+
|
|
930
|
+
dropna : bool
|
|
931
|
+
Count N/A when ``False``
|
|
932
|
+
(Default: ``True``)
|
|
933
|
+
|
|
934
|
+
show_percentage : bool
|
|
935
|
+
Show proportion in range 0% - 100% instead of [0, 1]
|
|
936
|
+
(Default: ``True``)
|
|
937
|
+
|
|
938
|
+
percentage_round_up : int
|
|
939
|
+
Round up to which decimals
|
|
940
|
+
(Default: ``2``)
|
|
941
|
+
|
|
942
|
+
Returns
|
|
943
|
+
-------
|
|
944
|
+
DataFrame
|
|
945
|
+
Distribution DataFrame
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
Example:
|
|
949
|
+
--------
|
|
950
|
+
>>> DataAnalystDataFrame.sample_df().show_distribution("number_range")
|
|
951
|
+
number_range count percentage
|
|
952
|
+
0 900 16 16.0
|
|
953
|
+
1 700 15 15.0
|
|
954
|
+
2 300 12 12.0
|
|
955
|
+
3 200 12 12.0
|
|
956
|
+
4 400 11 11.0
|
|
957
|
+
5 600 11 11.0
|
|
958
|
+
6 800 10 10.0
|
|
959
|
+
7 100 9 9.0
|
|
960
|
+
8 500 4 4.0
|
|
911
961
|
"""
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
962
|
+
out = self[column_name].value_counts(dropna=dropna).to_frame().reset_index()
|
|
963
|
+
if show_percentage:
|
|
964
|
+
out["percentage"] = (out["count"] / self.shape[0] * 100).round(
|
|
965
|
+
percentage_round_up
|
|
966
|
+
)
|
|
967
|
+
else:
|
|
968
|
+
out["percentage"] = (out["count"] / self.shape[0]).round(
|
|
969
|
+
percentage_round_up
|
|
970
|
+
)
|
|
971
|
+
return out
|
|
915
972
|
|
|
916
973
|
# Help
|
|
917
974
|
@staticmethod
|
|
@@ -919,7 +976,7 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
919
976
|
"""
|
|
920
977
|
Show all available method of DataAnalystDataFrame
|
|
921
978
|
"""
|
|
922
|
-
list_of_method = list(set(dir(__class__)) - set(dir(pd.DataFrame)))
|
|
979
|
+
list_of_method = list(set(dir(__class__)) - set(dir(pd.DataFrame))) # type: ignore
|
|
923
980
|
return sorted(list_of_method)
|
|
924
981
|
|
|
925
982
|
# Sample DataFrame
|
|
@@ -939,9 +996,27 @@ class DataAnalystDataFrame(pd.DataFrame):
|
|
|
939
996
|
DataAnalystDataFrame
|
|
940
997
|
DataFrame with these columns:
|
|
941
998
|
[number, number_big, number_range, missing_value, text, date]
|
|
999
|
+
|
|
1000
|
+
|
|
1001
|
+
Example:
|
|
1002
|
+
--------
|
|
1003
|
+
>>> DataAnalystDataFrame.sample_df()
|
|
1004
|
+
number number_big number_range missing_value text date
|
|
1005
|
+
0 -2.089770 785 700 NaN vwnlqoql 2013-11-20
|
|
1006
|
+
1 -0.526689 182 100 24.0 prjjcvqc 2007-04-13
|
|
1007
|
+
2 -1.596514 909 900 8.0 cbcpzlac 2023-05-24
|
|
1008
|
+
3 2.982191 989 900 21.0 ivwqwuvd 2022-04-28
|
|
1009
|
+
4 1.687803 878 800 NaN aajtncum 2005-10-05
|
|
1010
|
+
.. ... ... ... ... ... ...
|
|
1011
|
+
95 -1.295145 968 900 16.0 mgqunkhi 2016-04-12
|
|
1012
|
+
96 1.296795 255 200 NaN lwvytego 2014-05-10
|
|
1013
|
+
97 1.440746 297 200 5.0 lqsoykun 2010-04-03
|
|
1014
|
+
98 0.327702 845 800 NaN leadkvsy 2005-08-05
|
|
1015
|
+
99 0.556720 981 900 36.0 bozmxixy 2004-02-22
|
|
1016
|
+
[100 rows x 6 columns]
|
|
942
1017
|
"""
|
|
943
1018
|
# Restrain
|
|
944
|
-
size = set_min(size, min_value=1)
|
|
1019
|
+
size = int(set_min(size, min_value=1))
|
|
945
1020
|
|
|
946
1021
|
# Number col
|
|
947
1022
|
df = pd.DataFrame(np.random.randn(size, 1), columns=["number"])
|
absfuyu/fun/WGS.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# flake8: noqa
|
|
1
2
|
"""
|
|
2
3
|
Absfuyu: WGS
|
|
3
4
|
------------
|
|
@@ -15,12 +16,9 @@ Usage:
|
|
|
15
16
|
>>> print(test.pixel_art())
|
|
16
17
|
"""
|
|
17
18
|
|
|
18
|
-
|
|
19
19
|
# Module level
|
|
20
20
|
###########################################################################
|
|
21
|
-
__all__ = [
|
|
22
|
-
"WGS"
|
|
23
|
-
]
|
|
21
|
+
__all__ = ["WGS"]
|
|
24
22
|
|
|
25
23
|
|
|
26
24
|
# Library
|
|
@@ -40,35 +38,61 @@ class WGS:
|
|
|
40
38
|
- This module is not affiliated with miHoYo/Hoyoverse.
|
|
41
39
|
- Genshin Impact, game content and materials are trademarks and copyrights of miHoYo/Hoyoverse.
|
|
42
40
|
"""
|
|
41
|
+
|
|
43
42
|
def __init__(self) -> None:
|
|
44
43
|
self._text_art_big = "ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuLjo9KzoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgID0qKioqKi4gCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuPSojKisrCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAtIyslKyA6LgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgLSMjJSsuCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC4qIyUqLgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA6IyMlPQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA9IyMjOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICArIyUrICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAtIyMjLQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA9KyAgICA9JSMjLiAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIDojIyUrLi4tLT0tIDojIyo9KiMlJT0gICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMlJSUlIyUjIyUlJSUlJSMjIyUlOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgOj0lJUAlIyMjIyUjJSUlJSUlPS0tOiAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuJSo6JSUjJSUlIyMlJSUlJSUqPSAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA6IyM9KysjJSMjKyolJSMlJSo6ICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIDo9KiojIyMjJSMlIyUlJSUlJSUjKyAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC49KyoqIyUlJSMjIyUjIyUlJSUlJSsgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAqJSMjIyUjIyMjQCMlIys9LSNAJSUjKz0tICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgOisqIyVAJSUjIyUlJSMlIyUjKiouOislJSMjLiAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAtKiMjJSVAJSUlJSUlIyUtLis9ICAgICA6Ky0tLiAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC0qIyMlQEAlIyVAQCUjJUArIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC4tIyUlJUAlJUBAIyUlIyouICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuPSMjJSUlJSUlJSUjJSU9LiAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuLSojJSUlJSUlQCUlJSMqLSAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuOiolJSUlJSUlJSUlIyorLgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgOiojJSUlJSUlJSUlIz06ICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC4gLSMjIyMjIyUlJSUlIy0gCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA9IyMjIyUjIyUlIz0qLiAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgOisjIyMjIyMlJSUqLS0uIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuLSojIyMjJSMlJSUqLTouIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA9KysqKioqIyMqKj0tLiAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuLSsqKiorKiojIyMqPTogCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgLSsrKysqIyMrKiMqPS4gIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgLjorKiojIyMjIyUjKj0tICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgIC4gOisqIyMjIyMjIyMqPTogIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgLiA9KyojKisjIyMjIystIAogICAgICAgICAgICAgICAgICAgICAgICAuIC49KiojKi0qIyMjKj0tICAKICAgICAgICAgICAgICAgICAgICAgICAgOisqKiMrPSojIyMqPToKICAgICAgICAgICAgICAgICAgICAgICAgLSsqKiotPSoqKyorPS4KICAgICAgICAgICAgICAgICAgICAgICAgLj0rKiorLSojKioqKy0uICAKICAgICAgICAgICAgICAgICAgICAuOi09KyoqLT0jIyoqKz06IAogICAgICAgICAgICAgICAgICAgIDotPSsrKiotKyMqKiorLSAKICAgICAgICAgICAgICAgICAgICA6PSsrKyAgOioqKis9OiAgICAKICAgICAgICAgICAgICAgIDo9KyoqLS09KioqKz06IAogICAgICAgICAgICAgICAgLT0rKiotKysrKz06LiAgCiAgICAgICAgICAgICAgICA6PT0rKis6PT06LiAgCiAgICAgICAgICAgIDo9KysqLQogICAgICAgICAgICAuLT0rKy0uIAogICAgICAgIC46PS06LiAgICAKICAgICAgICAuOjouICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAK"
|
|
45
44
|
self._text_art_small = "CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgOjoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAuKiorCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgLiojLi4KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIDojKwogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgPSM6CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIC4uICAqKgogICAgICAgICAgICAgICAgICAgICAgICAgICAgPSUrPSsrIyMqIyoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICArKiUjJSMlJSM9CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgKyoqIyMjJSUlPQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICsjIyUjIyMqKiUjOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgLisjJSUlJSUrKjogLSotCiAgICAgICAgICAgICAgICAgICAgICAgICAgICA9IyUlJSUlKy4KICAgICAgICAgICAgICAgICAgICAgICAgICAgIC49IyUlJSUjPQogICAgICAgICAgICAgICAgICAgICAgICAuKiUlJSUlKy4KICAgICAgICAgICAgICAgICAgICAgICAgLiojJSUlIy0KICAgICAgICAgICAgICAgICAgICAgICAgLSMjIyUjPS4KICAgICAgICAgICAgICAgICAgICAuPSoqKiMqOgogICAgICAgICAgICAgICAgICAgIDorKiMjKisuCiAgICAgICAgICAgICAgICAgICAgLSoqIyMjLQogICAgICAgICAgICAgICAgLj0qKyojKjoKICAgICAgICAgICAgICAgIDorKisqKj0uCiAgICAgICAgICAgICAgICAuLSsrKyoqLQogICAgICAgICAgICAuPSotLSorOgogICAgICAgICAgICAuPSs9PS0uCiAgICAgICAgICAgIDorPQogICAgICAgIC46Lgo="
|
|
46
45
|
self._pixel_art_wgs = [
|
|
47
46
|
# Art Credit: https://www.reddit.com/r/PixelArt/comments/n6xyrb/wolfs_gravestone_genshin_impact/
|
|
48
|
-
"51w",
|
|
49
|
-
"
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
47
|
+
"51w",
|
|
48
|
+
"46w3b1w",
|
|
49
|
+
"43w2b1w1b1R1b1w",
|
|
50
|
+
"43w1b1R1b1R2b1w",
|
|
51
|
+
"43w2b1r1b3w",
|
|
52
|
+
"41w3b1r1b1r1b2w",
|
|
53
|
+
"28w3b10w1b1g5b2w",
|
|
54
|
+
"28w1b1r1b9w1b3g1b5w",
|
|
55
|
+
"28w2b1R1b1w2b4w1b3g2b5w",
|
|
56
|
+
"30w1b1m1b1g1b1w1b1w1b3g1b7w",
|
|
57
|
+
"30w3b2g3b3g1b8w",
|
|
58
|
+
"29w1b2g1b5g1b1g1b9w",
|
|
59
|
+
"29w1b1g2b1g4b1g1b10w",
|
|
60
|
+
"29w1b3g2b1R2b1g2b9w",
|
|
61
|
+
"27w7b3r1b1g1b10w",
|
|
62
|
+
"27w1b5g1b1r1w2b2g2b8w",
|
|
63
|
+
"26w2b3g6b4g1b8w",
|
|
64
|
+
"26w1b2g2b2R1b1g1b1g4b9w",
|
|
65
|
+
"25w1b2g1b2R2r1b1g1b1g1b1g1b1m1b8w",
|
|
56
66
|
"24w1b1w1m1g1b3r1b2g1b3g2b1R2b6w",
|
|
57
67
|
"21w3b2w1m1b3r1w1b2g4b2w1b1r1b6w",
|
|
58
|
-
"21w1b2w3m1b1r1w2b3g1b5w3b6w",
|
|
59
|
-
"
|
|
60
|
-
"
|
|
61
|
-
"
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"
|
|
65
|
-
"
|
|
66
|
-
"
|
|
67
|
-
"
|
|
68
|
-
"
|
|
68
|
+
"21w1b2w3m1b1r1w2b3g1b5w3b6w",
|
|
69
|
+
"21w1b1w3m1b1r2b3g3b14w",
|
|
70
|
+
"20w1b1w2R1m1b1r1b3m1g2b16w",
|
|
71
|
+
"19w1b1w3R1b1r1b2m2w1b18w",
|
|
72
|
+
"18w1b1w3R1b1r1b3m1w1b19w",
|
|
73
|
+
"17w1b1r3R1b1w1b2R1m1w1b20w",
|
|
74
|
+
"16w1b1w1R1r1R1b1r1b3R2w1b20w",
|
|
75
|
+
"15w1b1w1r1R1y1R2b3R1w3b20w",
|
|
76
|
+
"14w1b3r1y1R2r4y1b23w",
|
|
77
|
+
"13w1b1w3r4y2R1w1b24w",
|
|
78
|
+
"12w1b1w2r2R3r1R2y1b25w",
|
|
79
|
+
"10w2b1w2r1R2b1R3r1w1b26w",
|
|
80
|
+
"9w1b2w2r1R1b1w1b1R2r1w1b27w",
|
|
81
|
+
"9w1b1w2r1R1b1w1b1R2r1y1b28w",
|
|
82
|
+
"8w1b1w1r2R1b1w1b2r1y1w1b29w",
|
|
83
|
+
"7w1b1w1r1R2b1w1b4y1b30w",
|
|
84
|
+
"6w1b1w1r1R1b2w1b1R2r1w1b31w",
|
|
85
|
+
"5w1b1w2r1R1b2w1b1R1r2w1b31w",
|
|
86
|
+
"4w1b1w2r1R1b1w2b2r1w2b32w",
|
|
87
|
+
"3w1b1w1r1y1R1b1w1b3y1w1b34w",
|
|
88
|
+
"2w1b1w1r2y1b1w6b35w",
|
|
89
|
+
"1w6b43w",
|
|
90
|
+
"50w",
|
|
69
91
|
]
|
|
92
|
+
|
|
70
93
|
def __str__(self) -> str:
|
|
71
94
|
return f"{self.__class__.__name__}()"
|
|
95
|
+
|
|
72
96
|
def __repr__(self) -> str:
|
|
73
97
|
return self.__str__()
|
|
74
98
|
|
|
@@ -97,8 +121,8 @@ class WGS:
|
|
|
97
121
|
"""
|
|
98
122
|
# Make data
|
|
99
123
|
out = "1N".join(self._pixel_art_wgs)
|
|
100
|
-
out = out.replace("y", "r")
|
|
101
|
-
out = out.replace("m", "R")
|
|
124
|
+
out = out.replace("y", "r") # Convert yellow into red
|
|
125
|
+
out = out.replace("m", "R") # Convert magenta into dark red
|
|
102
126
|
return Str2Pixel(out).convert()
|
|
103
127
|
|
|
104
128
|
|
|
@@ -107,4 +131,4 @@ class WGS:
|
|
|
107
131
|
if __name__ == "__main__":
|
|
108
132
|
logger.setLevel(10)
|
|
109
133
|
test = WGS()
|
|
110
|
-
print(test.pixel_art())
|
|
134
|
+
print(test.pixel_art())
|