xiuyutools 1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xiuyutools-1.0/PKG-INFO +19 -0
- xiuyutools-1.0/README.md +8 -0
- xiuyutools-1.0/owntools/Common.py +146 -0
- xiuyutools-1.0/owntools/CrackCode.py +87 -0
- xiuyutools-1.0/owntools/Download.py +201 -0
- xiuyutools-1.0/owntools/ExtractFile.py +48 -0
- xiuyutools-1.0/owntools/Image.py +42 -0
- xiuyutools-1.0/owntools/Loger.py +44 -0
- xiuyutools-1.0/owntools/Mysql.py +96 -0
- xiuyutools-1.0/owntools/Subtitles.py +109 -0
- xiuyutools-1.0/owntools/Time.py +90 -0
- xiuyutools-1.0/owntools/Translation.py +135 -0
- xiuyutools-1.0/owntools/UncompressRAR.py +15 -0
- xiuyutools-1.0/owntools/_SSH.py +68 -0
- xiuyutools-1.0/owntools/__init__.py +13 -0
- xiuyutools-1.0/owntools/ownEmail.py +76 -0
- xiuyutools-1.0/setup.cfg +4 -0
- xiuyutools-1.0/setup.py +17 -0
- xiuyutools-1.0/xiuyutools.egg-info/PKG-INFO +19 -0
- xiuyutools-1.0/xiuyutools.egg-info/SOURCES.txt +20 -0
- xiuyutools-1.0/xiuyutools.egg-info/dependency_links.txt +1 -0
- xiuyutools-1.0/xiuyutools.egg-info/top_level.txt +1 -0
xiuyutools-1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: xiuyutools
|
|
3
|
+
Version: 1.0
|
|
4
|
+
Summary: Tools for myself
|
|
5
|
+
Home-page: https://github.com/liangxiuyu/owntools
|
|
6
|
+
Author: Neely
|
|
7
|
+
Author-email: liangxiuyu@outlook.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
|
|
12
|
+
# OwnTools
|
|
13
|
+
owntools
|
|
14
|
+
2021-12-27 Created
|
|
15
|
+
|
|
16
|
+
##
|
|
17
|
+
python setup.py sdist bdist_wheel
|
|
18
|
+
python setup.py develop
|
|
19
|
+
twine upload dist/*
|
xiuyutools-1.0/README.md
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import json
|
|
3
|
+
import requests
|
|
4
|
+
import re
|
|
5
|
+
from retry import retry
|
|
6
|
+
from pprint import pprint
|
|
7
|
+
import functools
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class DateEncode(json.JSONEncoder):
|
|
12
|
+
'''
|
|
13
|
+
TypeError: Object of type 'datetime' is not JSON serializable 解决方法:
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
这个错误的原因是json.dumps无法对字典中的datetime时间格式数据进行转化,dumps的原功能是将dict转化为str格式,不支持转化时间,所以需要将json类部分内容重新改写,来处理这种特殊日期格式。
|
|
17
|
+
|
|
18
|
+
使用方式: json.dumps(dict,cls=DateEncoder)
|
|
19
|
+
|
|
20
|
+
'''
|
|
21
|
+
def default(self, obj):
|
|
22
|
+
if isinstance(obj, datetime.datetime):
|
|
23
|
+
return obj.strftime("%Y-%m-%d %H:%M:%S")
|
|
24
|
+
elif isinstance(obj, datetime.date):
|
|
25
|
+
return obj.strftime("%Y-%m-%d")
|
|
26
|
+
elif isinstance(obj, datetime.timedelta):
|
|
27
|
+
return obj.seconds
|
|
28
|
+
else:
|
|
29
|
+
return json.JSONEncoder.default(self, obj)
|
|
30
|
+
|
|
31
|
+
def creat_cookies(from_path: str, to_path: str):
|
|
32
|
+
with open(from_path, "r") as file:
|
|
33
|
+
cookies = json.loads(file.read())
|
|
34
|
+
|
|
35
|
+
real_cookies = {}
|
|
36
|
+
for item in cookies['cookies']:
|
|
37
|
+
real_cookies[item['name']] = item['value']
|
|
38
|
+
|
|
39
|
+
with open(to_path, "w") as file:
|
|
40
|
+
_ = file.write(json.dumps(real_cookies, indent=4))
|
|
41
|
+
|
|
42
|
+
def get_run_time(info="used"):
|
|
43
|
+
def _time_me(fn):
|
|
44
|
+
print("Begin to timing:", fn.__name__)
|
|
45
|
+
|
|
46
|
+
@functools.wraps(fn)
|
|
47
|
+
def _wrapper(*args, **kwargs):
|
|
48
|
+
start = time.perf_counter()
|
|
49
|
+
print("Start at:", start)
|
|
50
|
+
fn(*args, **kwargs)
|
|
51
|
+
print("%s %s %s" % (fn.__name__, info, time.perf_counter() - start), "second")
|
|
52
|
+
|
|
53
|
+
return _wrapper
|
|
54
|
+
|
|
55
|
+
return _time_me
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def get_headers(copys):
|
|
59
|
+
headers = copys
|
|
60
|
+
headers = headers.strip().split('\n')
|
|
61
|
+
# 使用字典生成式将参数切片重组,并去掉空格,处理带协议头中的://
|
|
62
|
+
headers = {x.split(':')[0].strip(): ("".join(x.split(':')[1:])).strip().replace('//', "://") for x in headers}
|
|
63
|
+
# 使用json模块将字典转化成json格式打印出来
|
|
64
|
+
|
|
65
|
+
return headers
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_suffix(filename):
|
|
69
|
+
""" Get file's suffix """
|
|
70
|
+
return re.findall(r'\.[^.\\/:*?"<>|\r\n]+$', filename)[0]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def search_torrent(content) -> str | None:
|
|
74
|
+
|
|
75
|
+
# In case name like : hhd1080
|
|
76
|
+
content = re.sub("hhd", "", content)
|
|
77
|
+
content = re.sub("chd", "", content)
|
|
78
|
+
content = re.sub("1080", "", content)
|
|
79
|
+
torrent_pattern = '[a-z]{2,5}[0-9]{3,5}|[A-Z]{2,5}-[0-9]{3,5}'
|
|
80
|
+
|
|
81
|
+
torrent = re.search(torrent_pattern, content)
|
|
82
|
+
torrent = torrent.group() if torrent else None
|
|
83
|
+
|
|
84
|
+
if torrent:
|
|
85
|
+
part_pattern = '([a-zA-Z]{2,5})-([0-9]{2,5})'
|
|
86
|
+
name_part = re.search(part_pattern, torrent)
|
|
87
|
+
|
|
88
|
+
if name_part:
|
|
89
|
+
|
|
90
|
+
first_part = name_part.group(1).lower()
|
|
91
|
+
second_part = name_part.group(2)
|
|
92
|
+
|
|
93
|
+
for _ in range(5 - len(second_part)):
|
|
94
|
+
second_part = '0' + second_part
|
|
95
|
+
|
|
96
|
+
torrent = first_part + second_part
|
|
97
|
+
|
|
98
|
+
return torrent
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def isChinese(word):
|
|
102
|
+
""" 判断word中是否含有汉字 """
|
|
103
|
+
for ch in word:
|
|
104
|
+
if '\u4e00' <= ch <= '\u9fff':
|
|
105
|
+
return True
|
|
106
|
+
return False
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def isJapanese(word):
|
|
110
|
+
""" 判断word中是否含有日语 """
|
|
111
|
+
for ja in word:
|
|
112
|
+
if '\u0800' <= ja <= '\u4e00':
|
|
113
|
+
return True
|
|
114
|
+
return False
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def change_path_form(path: str):
|
|
118
|
+
new_path = eval(repr(path).replace('\\', '/'))
|
|
119
|
+
return new_path
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
@retry(tries=3, delay=3)
|
|
123
|
+
def requests_retry(url, _header=None):
|
|
124
|
+
''' 提取并分析一页内容时的requests方法,使用retry装饰器自动重试,失败则抛出异常)
|
|
125
|
+
|
|
126
|
+
成功返回response 错误返回false'''
|
|
127
|
+
if _header:
|
|
128
|
+
response = requests.get(url, headers=_header, timeout=10)
|
|
129
|
+
else:
|
|
130
|
+
response = requests.get(url, timeout=10)
|
|
131
|
+
|
|
132
|
+
if response.status_code == 200:
|
|
133
|
+
return response
|
|
134
|
+
else:
|
|
135
|
+
# print(f'Response status code wrong : {response.status_code}')
|
|
136
|
+
return False
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def write_json(all_data, file_name='test', ensure=True):
|
|
140
|
+
|
|
141
|
+
with open(f'{file_name}.json', 'w', encoding='utf-8') as f:
|
|
142
|
+
f.write(json.dumps(all_data, indent=4, ensure_ascii=ensure))
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
if __name__ == '__main__':
|
|
146
|
+
print(time.perf_counter())
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
from typing import TypedDict
|
|
2
|
+
import httpx
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class CrackCode(object):
|
|
6
|
+
"""
|
|
7
|
+
Identify verification code
|
|
8
|
+
|
|
9
|
+
postPic(image:bytes|url,codetype):
|
|
10
|
+
|
|
11
|
+
postPicAsync(image:bytes|url,codetype):
|
|
12
|
+
|
|
13
|
+
codetype : http://www.chaojiying.com/price.html
|
|
14
|
+
|
|
15
|
+
return:
|
|
16
|
+
|
|
17
|
+
err_no=int, err_str=str, pic_id=str, pic_str=str, md5=str
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
ResultModel = TypedDict("ResultModel", err_no=int, err_str=str, pic_id=str, pic_str=str, md5=str)
|
|
21
|
+
|
|
22
|
+
def __init__(self, headers: dict | None = None):
|
|
23
|
+
"""
|
|
24
|
+
'pass2': md5(password.encode('utf8')).hexdigest(),
|
|
25
|
+
"""
|
|
26
|
+
self.base_data = {
|
|
27
|
+
'user': 'liangxiuyu',
|
|
28
|
+
'pass2': "b9c23cc6007dcec86a7577770790e534",
|
|
29
|
+
'softid': '927673', # 用户中心>>软件ID 生成一个替换 96001
|
|
30
|
+
'len_min': 4
|
|
31
|
+
}
|
|
32
|
+
self.headers = headers or {
|
|
33
|
+
'Connection': 'Keep-Alive',
|
|
34
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36 Edg/97.0.1072.55',
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
self.base_url = "http://upload.chaojiying.net/Upload/Processing.php"
|
|
38
|
+
|
|
39
|
+
def postPic(self, image: bytes | str, codetype: str = "1004") -> ResultModel:
|
|
40
|
+
"""
|
|
41
|
+
image: 图片字节
|
|
42
|
+
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
|
|
43
|
+
"""
|
|
44
|
+
print("-> Nomal identifing verification code...")
|
|
45
|
+
# Isinstance image bytes or Url link
|
|
46
|
+
if isinstance(image, bytes):
|
|
47
|
+
files = {'userfile': ('code.jpg', image)}
|
|
48
|
+
else:
|
|
49
|
+
res = httpx.get(image, headers=self.headers)
|
|
50
|
+
files = {'userfile': ('code.jpg', res.content)}
|
|
51
|
+
|
|
52
|
+
self.base_data.update({'codetype': codetype})
|
|
53
|
+
response = httpx.post(self.base_url, data=self.base_data, files=files, headers=self.headers)
|
|
54
|
+
return response.json()
|
|
55
|
+
|
|
56
|
+
async def postPicAsync(self, image: bytes | str, codetype: str = "1004") -> ResultModel:
|
|
57
|
+
|
|
58
|
+
print("-> Async identifing verification code...")
|
|
59
|
+
|
|
60
|
+
client = httpx.AsyncClient(headers=self.headers)
|
|
61
|
+
|
|
62
|
+
# Isinstance image bytes or Url link
|
|
63
|
+
if isinstance(image, bytes):
|
|
64
|
+
files = {'userfile': ('code.jpg', image)}
|
|
65
|
+
else:
|
|
66
|
+
res = await client.get(image)
|
|
67
|
+
files = {'userfile': ('code.jpg', res.content)}
|
|
68
|
+
|
|
69
|
+
self.base_data.update({'codetype': codetype})
|
|
70
|
+
|
|
71
|
+
response = await client.post(self.base_url, data=self.base_data, files=files)
|
|
72
|
+
|
|
73
|
+
_ = await client.aclose()
|
|
74
|
+
|
|
75
|
+
# print("Code Result:",response.text)
|
|
76
|
+
return response.json()
|
|
77
|
+
|
|
78
|
+
def reportError(self, image_id):
|
|
79
|
+
"""
|
|
80
|
+
im_id:报错题目的图片ID
|
|
81
|
+
"""
|
|
82
|
+
print("-> Report to server this error")
|
|
83
|
+
self.base_data.update({
|
|
84
|
+
'id': image_id,
|
|
85
|
+
})
|
|
86
|
+
r = httpx.post('http://upload.chaojiying.net/Upload/ReportError.php', data=self.base_data, headers=self.headers)
|
|
87
|
+
return r.json()
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import httpx
|
|
3
|
+
from typing import List
|
|
4
|
+
from tqdm import tqdm
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def getDonwloadsPath():
|
|
10
|
+
import winreg
|
|
11
|
+
""" 获取windows下downloads文件夹路径 """
|
|
12
|
+
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
|
|
13
|
+
return winreg.QueryValueEx(key, "{374DE290-123F-4565-9164-39C4925E467B}")[0]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AsyncDownloader(object):
|
|
17
|
+
|
|
18
|
+
def __init__(self, thread_num: int, url: str, folder: str, filename: str | None = None, proxies=False):
|
|
19
|
+
"""
|
|
20
|
+
Async Download
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
thread_num (int): async num
|
|
24
|
+
url (str): download url
|
|
25
|
+
folder (str): save path
|
|
26
|
+
filename (str, optional): If Defaults to None it will get name from the url
|
|
27
|
+
proxies (bool, optional): Chose if to use the 127.0.0.1:1080 sockets proxies. Defaults to False.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
self.proxies = "http://127.0.0.1:1080" if proxies else None
|
|
31
|
+
|
|
32
|
+
if not filename:
|
|
33
|
+
filename = urlparse(url).path.split('/')[-1]
|
|
34
|
+
|
|
35
|
+
self.url = url
|
|
36
|
+
self.client = httpx.AsyncClient(proxies=self.proxies)
|
|
37
|
+
self.thread_num = thread_num
|
|
38
|
+
|
|
39
|
+
self.folder_path = Path(folder) # .joinpath(Path(filename).stem)
|
|
40
|
+
self.file_path = self.folder_path.joinpath(filename)
|
|
41
|
+
|
|
42
|
+
self.file_size = self._get_file_size()
|
|
43
|
+
self.cut_info = self._cutting()
|
|
44
|
+
|
|
45
|
+
# Progress Bar
|
|
46
|
+
self.tqdm_obj = tqdm(total=self.file_size, unit_scale=True, unit_divisor=1024, unit="B")
|
|
47
|
+
|
|
48
|
+
self._create_folder(self.folder_path)
|
|
49
|
+
|
|
50
|
+
def __call__(self):
|
|
51
|
+
self.main()
|
|
52
|
+
|
|
53
|
+
def _create_folder(self, folder_path):
|
|
54
|
+
"""
|
|
55
|
+
Create a folder path
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
folder_path (path): normal path or Path obj
|
|
59
|
+
"""
|
|
60
|
+
p = Path(folder_path)
|
|
61
|
+
|
|
62
|
+
if not p.exists():
|
|
63
|
+
p.mkdir()
|
|
64
|
+
|
|
65
|
+
return
|
|
66
|
+
|
|
67
|
+
def _get_file_size(self):
|
|
68
|
+
|
|
69
|
+
with httpx.stream("GET", self.url, proxies=self.proxies) as res:
|
|
70
|
+
|
|
71
|
+
size = res.headers.get("Content-Length")
|
|
72
|
+
|
|
73
|
+
if not size:
|
|
74
|
+
size = len(res.read())
|
|
75
|
+
|
|
76
|
+
return int(size)
|
|
77
|
+
|
|
78
|
+
def _cutting(self):
|
|
79
|
+
"""
|
|
80
|
+
切割成若干份
|
|
81
|
+
:param file_size: 下载文件大小
|
|
82
|
+
:param thread_num: 线程数量
|
|
83
|
+
:return:
|
|
84
|
+
:[0, 31409080],
|
|
85
|
+
:[31409081, 62818160],
|
|
86
|
+
:[62818161, 94227240],
|
|
87
|
+
:[94227241, 125636320],
|
|
88
|
+
:[125636321, 157045400],
|
|
89
|
+
:[157045401, 188454480],
|
|
90
|
+
:[188454481, 219863560],
|
|
91
|
+
:[219863561, 251272640],
|
|
92
|
+
:[251272641, 282681720],
|
|
93
|
+
:[282681721, '-']]
|
|
94
|
+
"""
|
|
95
|
+
cut_info: List[List[int | str]] = []
|
|
96
|
+
cut_size = self.file_size // self.thread_num
|
|
97
|
+
|
|
98
|
+
for num in range(self.thread_num):
|
|
99
|
+
cut_info.append([cut_size*num + 1, cut_size * (num + 1)])
|
|
100
|
+
|
|
101
|
+
if num == self.thread_num - 1:
|
|
102
|
+
cut_info[-1][1] = "-"
|
|
103
|
+
elif num == 0:
|
|
104
|
+
cut_info[0][0] = 0
|
|
105
|
+
|
|
106
|
+
return cut_info
|
|
107
|
+
|
|
108
|
+
def _merge_files(self):
|
|
109
|
+
"""
|
|
110
|
+
合并分段下载的文件
|
|
111
|
+
:param file_path:
|
|
112
|
+
:return:
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
with open(self.file_path.absolute(), 'ab') as f_count:
|
|
116
|
+
for index in range(self.thread_num):
|
|
117
|
+
|
|
118
|
+
sub_file = self.folder_path.joinpath(f"{index}_{self.file_path.name}")
|
|
119
|
+
|
|
120
|
+
with open(sub_file.absolute(), 'rb') as sub_write:
|
|
121
|
+
f_count.write(sub_write.read())
|
|
122
|
+
|
|
123
|
+
# 合并完成删除子文件
|
|
124
|
+
sub_file.unlink()
|
|
125
|
+
|
|
126
|
+
return
|
|
127
|
+
|
|
128
|
+
async def downloader(self, index, start_size, stop_size, retry=False):
|
|
129
|
+
|
|
130
|
+
sub_file = self.folder_path.joinpath(f"{index}_{self.file_path.name}")
|
|
131
|
+
|
|
132
|
+
if sub_file.exists():
|
|
133
|
+
temp_size = sub_file.stat().st_size # 本地已经下载的文件大小
|
|
134
|
+
if not retry:
|
|
135
|
+
self.tqdm_obj.update(temp_size) # 更新下载进度条
|
|
136
|
+
else:
|
|
137
|
+
temp_size = 0
|
|
138
|
+
|
|
139
|
+
stop_size = "" if stop_size == '-' else stop_size
|
|
140
|
+
|
|
141
|
+
headers = {'Range': f'bytes={start_size + temp_size}-{stop_size}'}
|
|
142
|
+
|
|
143
|
+
down_file = open(sub_file.absolute(), 'ab')
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
async with self.client.stream("GET", self.url, headers=headers) as response:
|
|
147
|
+
num_bytes_downloaded = response.num_bytes_downloaded
|
|
148
|
+
async for chunk in response.aiter_bytes():
|
|
149
|
+
if chunk:
|
|
150
|
+
down_file.write(chunk)
|
|
151
|
+
self.tqdm_obj.update(response.num_bytes_downloaded - num_bytes_downloaded)
|
|
152
|
+
num_bytes_downloaded = response.num_bytes_downloaded
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
print("{}:请求超时,尝试重连\n报错信息:{}".format(index, e))
|
|
156
|
+
await self.downloader(index, start_size, stop_size, retry=True)
|
|
157
|
+
|
|
158
|
+
finally:
|
|
159
|
+
down_file.close()
|
|
160
|
+
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
async def main_download(self):
|
|
164
|
+
|
|
165
|
+
index = 0
|
|
166
|
+
tasks = []
|
|
167
|
+
for info in self.cut_info:
|
|
168
|
+
task = asyncio.create_task(self.downloader(index, info[0], info[1]))
|
|
169
|
+
tasks.append(task)
|
|
170
|
+
index += 1
|
|
171
|
+
|
|
172
|
+
await asyncio.gather(*tasks)
|
|
173
|
+
await self.client.aclose()
|
|
174
|
+
|
|
175
|
+
async def async_main(self):
|
|
176
|
+
""" 异步中下载防止event loop """
|
|
177
|
+
if self.file_path.exists():
|
|
178
|
+
if self.file_path.stat().st_size >= self.file_size:
|
|
179
|
+
print(f"{self.file_path.name} Already exists.")
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
await self.main_download()
|
|
183
|
+
self._merge_files()
|
|
184
|
+
|
|
185
|
+
def main(self):
|
|
186
|
+
|
|
187
|
+
if self.file_path.exists():
|
|
188
|
+
if self.file_path.stat().st_size >= self.file_size:
|
|
189
|
+
print(f"{self.file_path.name} Already exists.")
|
|
190
|
+
return
|
|
191
|
+
|
|
192
|
+
asyncio.run(self.main_download())
|
|
193
|
+
self._merge_files()
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
if __name__ == "__main__":
|
|
197
|
+
|
|
198
|
+
folder = "D:/Downloads"
|
|
199
|
+
url = r"https://d3.qinkan.net/d/mobi/26/%E3%80%8A%E5%87%A1%E4%BA%BA%E4%BF%AE%E4%BB%99%E4%BC%A0%E3%80%8B(%E7%B2%BE%E6%A0%A1%E7%89%88)_qinkan.net.mobi"
|
|
200
|
+
AsyncDownloader(20, url, folder, proxies=False)()
|
|
201
|
+
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import shutil
|
|
3
|
+
|
|
4
|
+
class ExtractFile:
|
|
5
|
+
"""
|
|
6
|
+
Input Path & Pattern to extract files the match the pattern from the path's folders into this path
|
|
7
|
+
|
|
8
|
+
If pattern is none, extrcat all kind of files.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, path, pattern):
|
|
12
|
+
self.path = self._changePathForm(path)
|
|
13
|
+
self.pattern = pattern
|
|
14
|
+
|
|
15
|
+
def __call__(self, *args, **kwds):
|
|
16
|
+
self.main()
|
|
17
|
+
|
|
18
|
+
def _changePathForm(self, path: str):
|
|
19
|
+
new_path = eval(repr(path).replace('\\', '/'))
|
|
20
|
+
return new_path
|
|
21
|
+
|
|
22
|
+
def _getCurrentFiles(self):
|
|
23
|
+
return os.listdir(self.path)
|
|
24
|
+
|
|
25
|
+
def _getFolders(self):
|
|
26
|
+
currentFiles = self._getCurrentFiles()
|
|
27
|
+
|
|
28
|
+
folderList = list()
|
|
29
|
+
for file in currentFiles:
|
|
30
|
+
if os.path.isdir(self._changePathForm(os.path.join(self.path, file))):
|
|
31
|
+
print(file)
|
|
32
|
+
folderList.append(file)
|
|
33
|
+
|
|
34
|
+
return folderList
|
|
35
|
+
|
|
36
|
+
def _extractFile(self, folder):
|
|
37
|
+
forlderPath = self._changePathForm(os.path.join(self.path, folder))
|
|
38
|
+
folderFiles = os.listdir(forlderPath)
|
|
39
|
+
|
|
40
|
+
for file in folderFiles:
|
|
41
|
+
if file.find(self.pattern) != -1:
|
|
42
|
+
shutil.move(self._changePathForm(os.path.join(forlderPath, file)), self.path)
|
|
43
|
+
print(f"Move: {file}")
|
|
44
|
+
|
|
45
|
+
def main(self):
|
|
46
|
+
forlders = self._getFolders()
|
|
47
|
+
for forlder in forlders:
|
|
48
|
+
self._extractFile(forlder)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from .Common import requests_retry
|
|
2
|
+
from pprint import pprint
|
|
3
|
+
from PIL import Image
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def checkOneImage(path):
|
|
7
|
+
''' 判断path是否为有效图片,是返回true,否返回flase且删除文件 '''
|
|
8
|
+
try:
|
|
9
|
+
Image.open(path)
|
|
10
|
+
return True
|
|
11
|
+
except IOError:
|
|
12
|
+
# os.remove(path)
|
|
13
|
+
return False
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def downloadImage(url, name, path, _header=None):
|
|
17
|
+
'''
|
|
18
|
+
下载一张图片
|
|
19
|
+
|
|
20
|
+
TODO: Change the suffix
|
|
21
|
+
'''
|
|
22
|
+
|
|
23
|
+
# print(f"Donwloading Image: {name}")
|
|
24
|
+
|
|
25
|
+
response = requests_retry(url, _header)
|
|
26
|
+
|
|
27
|
+
if response:
|
|
28
|
+
|
|
29
|
+
image_path = f'{path}{name}.jpg'
|
|
30
|
+
|
|
31
|
+
with open(image_path, 'wb') as f:
|
|
32
|
+
f.write(response.content)
|
|
33
|
+
|
|
34
|
+
if checkOneImage(image_path):
|
|
35
|
+
return True
|
|
36
|
+
else:
|
|
37
|
+
print(f"Donloaded wrong image: {name}")
|
|
38
|
+
return False
|
|
39
|
+
|
|
40
|
+
else:
|
|
41
|
+
# print(f"Failed Donload: {name}")
|
|
42
|
+
return False
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from loguru import logger
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Logger():
|
|
6
|
+
# 日志简单配置
|
|
7
|
+
|
|
8
|
+
def __init__(self, logPath: str | Path, need_log=True,serialize=False):
|
|
9
|
+
self.my_logger = logger
|
|
10
|
+
|
|
11
|
+
# 判断是否需要写入日志
|
|
12
|
+
if need_log is True:
|
|
13
|
+
|
|
14
|
+
log_path = Path(logPath)
|
|
15
|
+
|
|
16
|
+
if not log_path.parent.exists():
|
|
17
|
+
Path.mkdir(log_path.parent, parents=True, exist_ok=True)
|
|
18
|
+
|
|
19
|
+
_ = self.my_logger.add(log_path, retention="30 days", encoding='utf-8', enqueue=True,serialize=serialize)
|
|
20
|
+
|
|
21
|
+
def info(self, content):
|
|
22
|
+
|
|
23
|
+
self.my_logger.info(content)
|
|
24
|
+
|
|
25
|
+
def debug(self, content):
|
|
26
|
+
self.my_logger.debug(content)
|
|
27
|
+
|
|
28
|
+
def error(self, content):
|
|
29
|
+
self.my_logger.error(content)
|
|
30
|
+
|
|
31
|
+
def critical(self, content):
|
|
32
|
+
self.my_logger.critical(content)
|
|
33
|
+
|
|
34
|
+
def warning(self, content):
|
|
35
|
+
self.my_logger.warning(content)
|
|
36
|
+
|
|
37
|
+
def success(self, content):
|
|
38
|
+
self.my_logger.success(content)
|
|
39
|
+
|
|
40
|
+
def trace(self, content):
|
|
41
|
+
self.my_logger.trace(content)
|
|
42
|
+
|
|
43
|
+
def traceback(self,content=""):
|
|
44
|
+
self.my_logger.exception(content)
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import pymysql
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class Mysql:
|
|
5
|
+
'''
|
|
6
|
+
数据库类
|
|
7
|
+
:参数 db=选择数据库,默认为Week_event,data_dict返回数据是否需要dict形式,默认为false
|
|
8
|
+
:每一种方法只需sql语句参数即可使用
|
|
9
|
+
:insert,updata,delete方法没区别,便于使用时review
|
|
10
|
+
:empty为清空表并排序自动递增
|
|
11
|
+
:暂未写入关闭连接,因为不知道原理以及相关性能影响,待学习
|
|
12
|
+
'''
|
|
13
|
+
|
|
14
|
+
def __init__(self, host="152.136.113.50", db='personal', port=3306, user='neely', passwd='Neely2018', data_dict=False):
|
|
15
|
+
|
|
16
|
+
self.my_con = pymysql.connect(
|
|
17
|
+
host=host, port=port, user=user, passwd=passwd, db=db, connect_timeout=1000)
|
|
18
|
+
|
|
19
|
+
if data_dict:
|
|
20
|
+
self.my_cousor = self.my_con.cursor(
|
|
21
|
+
cursor=pymysql.cursors.DictCursor)
|
|
22
|
+
else:
|
|
23
|
+
self.my_cousor = self.my_con.cursor()
|
|
24
|
+
|
|
25
|
+
def get_one(self, sql):
|
|
26
|
+
''' 获取一条数据 '''
|
|
27
|
+
|
|
28
|
+
self.my_cousor.execute(sql)
|
|
29
|
+
|
|
30
|
+
data = self.my_cousor.fetchone()
|
|
31
|
+
|
|
32
|
+
# self.my_cousor.close()
|
|
33
|
+
# self.my_con.close()
|
|
34
|
+
|
|
35
|
+
return data
|
|
36
|
+
|
|
37
|
+
def get_all(self, sql, singleColum=False):
|
|
38
|
+
''' 获取多条数据
|
|
39
|
+
formed="single"
|
|
40
|
+
'''
|
|
41
|
+
self.my_cousor.execute(sql)
|
|
42
|
+
|
|
43
|
+
data = self.my_cousor.fetchall()
|
|
44
|
+
|
|
45
|
+
if singleColum:
|
|
46
|
+
return [i[0] for i in data]
|
|
47
|
+
|
|
48
|
+
return data
|
|
49
|
+
|
|
50
|
+
def insert(self, sql):
|
|
51
|
+
''' 插入数据 '''
|
|
52
|
+
msg = self.my_cousor.execute(sql)
|
|
53
|
+
|
|
54
|
+
self.my_con.commit()
|
|
55
|
+
|
|
56
|
+
return msg
|
|
57
|
+
|
|
58
|
+
def insert_many(self, sql, param):
|
|
59
|
+
''' 插入多条数据 '''
|
|
60
|
+
msg = self.my_cousor.executemany(sql, param)
|
|
61
|
+
|
|
62
|
+
self.my_con.commit()
|
|
63
|
+
|
|
64
|
+
return msg
|
|
65
|
+
|
|
66
|
+
def update(self, sql):
|
|
67
|
+
''' 更新数据 '''
|
|
68
|
+
msg = self.my_cousor.execute(sql)
|
|
69
|
+
|
|
70
|
+
self.my_con.commit()
|
|
71
|
+
|
|
72
|
+
return msg
|
|
73
|
+
|
|
74
|
+
def delete(self, sql):
|
|
75
|
+
''' 删除数据 '''
|
|
76
|
+
msg = self.my_cousor.execute(sql)
|
|
77
|
+
|
|
78
|
+
self.my_con.commit()
|
|
79
|
+
|
|
80
|
+
return msg
|
|
81
|
+
|
|
82
|
+
def empty(self, db):
|
|
83
|
+
'''
|
|
84
|
+
直接清空表并排序key
|
|
85
|
+
|
|
86
|
+
用于重新写入全部数据
|
|
87
|
+
'''
|
|
88
|
+
|
|
89
|
+
sql = (f'DELETE from {db}')
|
|
90
|
+
sql2 = (f'alter table {db} AUTO_INCREMENT 1')
|
|
91
|
+
|
|
92
|
+
msg = self.my_cousor.execute(sql)
|
|
93
|
+
self.my_cousor.execute(sql2)
|
|
94
|
+
self.my_con.commit()
|
|
95
|
+
|
|
96
|
+
print(f'已删除{msg}条数据,清空{db}表')
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# Imports
|
|
2
|
+
import os,re
|
|
3
|
+
from typing import TypedDict
|
|
4
|
+
from .Common import change_path_form
|
|
5
|
+
|
|
6
|
+
class SrtToVtt():
|
|
7
|
+
"""Convert .srt file to .vtt file
|
|
8
|
+
|
|
9
|
+
Args:
|
|
10
|
+
inputfilename (path): the output file name based on the inputfile name, as well as the path.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, inputfilename):
|
|
14
|
+
|
|
15
|
+
self.inputfilename = self._getInputPath(inputfilename)
|
|
16
|
+
self.outputfilename = self._getOutputPath()
|
|
17
|
+
|
|
18
|
+
def __call__(self):
|
|
19
|
+
self.main()
|
|
20
|
+
|
|
21
|
+
def _getInputPath(self, inputfilename):
|
|
22
|
+
|
|
23
|
+
if inputfilename == None:
|
|
24
|
+
print("Please enter the name of the file to open:", end=" ")
|
|
25
|
+
inputfilename = input()
|
|
26
|
+
|
|
27
|
+
done = False
|
|
28
|
+
while done == False:
|
|
29
|
+
if os.path.isfile(inputfilename):
|
|
30
|
+
done = True
|
|
31
|
+
else:
|
|
32
|
+
print("File not found, please enter the filename:", end=" ")
|
|
33
|
+
inputfilename = input()
|
|
34
|
+
|
|
35
|
+
return inputfilename
|
|
36
|
+
|
|
37
|
+
def _getOutputPath(self):
|
|
38
|
+
""" Return the outputfilename depend on the inputfilename """
|
|
39
|
+
|
|
40
|
+
path, name = os.path.split(self.inputfilename)
|
|
41
|
+
# Get the file name with suffix
|
|
42
|
+
name = os.path.splitext(name)[0]
|
|
43
|
+
outputfilename = os.path.join(path, name+".vtt")
|
|
44
|
+
return outputfilename
|
|
45
|
+
|
|
46
|
+
def _getTransformedLine(self, line):
|
|
47
|
+
if "-->" in line:
|
|
48
|
+
return line.replace(",", ".")
|
|
49
|
+
else:
|
|
50
|
+
return line
|
|
51
|
+
|
|
52
|
+
def main(self):
|
|
53
|
+
|
|
54
|
+
# Open the file
|
|
55
|
+
inputfile = open(self.inputfilename, "rb").read().decode("utf-8-sig").splitlines()
|
|
56
|
+
|
|
57
|
+
output = open(self.outputfilename, "w")
|
|
58
|
+
output.write("WEBVTT")
|
|
59
|
+
output.write("\n\n")
|
|
60
|
+
for line in inputfile:
|
|
61
|
+
output.write(self._getTransformedLine(line))
|
|
62
|
+
output.write("\n")
|
|
63
|
+
# Close our output file
|
|
64
|
+
output.close()
|
|
65
|
+
|
|
66
|
+
print(f"Write Done : {self.outputfilename}")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class SubFuncs():
|
|
70
|
+
"""
|
|
71
|
+
Normal Subtitll functions
|
|
72
|
+
: getSubFolder()
|
|
73
|
+
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(self, path: str):
|
|
77
|
+
self.path = change_path_form(path)
|
|
78
|
+
|
|
79
|
+
def getSubFolder(self):
|
|
80
|
+
|
|
81
|
+
"""Get the folders video name and sub name
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
[dict]: {'fileName': '', 'subName': ''}
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
Type_folderData = TypedDict("Type_folderData", fileName=str, subName=str)
|
|
88
|
+
|
|
89
|
+
videoSuffix = ['mp4', 'mkv', 'mov', 'avi']
|
|
90
|
+
subSuffix = ['srt', 'ass']
|
|
91
|
+
folderData: Type_folderData = dict()
|
|
92
|
+
|
|
93
|
+
fileList: [str] = os.listdir(self.path)
|
|
94
|
+
for file in fileList:
|
|
95
|
+
for suffix in videoSuffix:
|
|
96
|
+
if re.findall(f".{suffix}$",file):
|
|
97
|
+
folderData["fileName"] = file
|
|
98
|
+
break
|
|
99
|
+
for suffix in subSuffix:
|
|
100
|
+
if re.findall(f".{suffix}$", file):
|
|
101
|
+
folderData["subName"] = file
|
|
102
|
+
break
|
|
103
|
+
|
|
104
|
+
return folderData
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
if __name__ == "__main__":
|
|
108
|
+
test = SubFuncs("E:\Videos\Friends & 老友记\S10\es1")
|
|
109
|
+
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import re
|
|
3
|
+
import datetime
|
|
4
|
+
from time import gmtime, strftime
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def find_date(content):
|
|
8
|
+
|
|
9
|
+
pattern = "\\d{4}-\\d{1,2}-\\d{1,2}"
|
|
10
|
+
result = re.findall(pattern, content)
|
|
11
|
+
if result:
|
|
12
|
+
return result[0]
|
|
13
|
+
else:
|
|
14
|
+
return ""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_t_time():
|
|
18
|
+
|
|
19
|
+
return str(time.time())[:10]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_d_time():
|
|
23
|
+
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_short_d_time():
|
|
27
|
+
|
|
28
|
+
return datetime.datetime.now().strftime('%Y-%m-%d')
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def translate_timestamp(timestamp: int, form: str = '%Y-%m-%d %H:%M:%S'):
|
|
32
|
+
""" 将时间戳转换为form格式化字符串.默认:%Y-%m-%d %H:%M:%S """
|
|
33
|
+
formatted_time = datetime.datetime.fromtimestamp(timestamp).strftime(form)
|
|
34
|
+
return formatted_time
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def convert_second(second):
|
|
38
|
+
|
|
39
|
+
res = strftime("%H:%M:%S", gmtime(second))
|
|
40
|
+
return (res)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def translate_time(_time, data_type):
|
|
44
|
+
'''
|
|
45
|
+
时间转换格式
|
|
46
|
+
|
|
47
|
+
_time = 输入的时间
|
|
48
|
+
|
|
49
|
+
data_type = 想要的格式:
|
|
50
|
+
t_time = 时间戳形式
|
|
51
|
+
d_time = datetime格式
|
|
52
|
+
'''
|
|
53
|
+
|
|
54
|
+
if data_type == 't_time':
|
|
55
|
+
return int(time.mktime(time.strptime(_time, "%Y-%m-%d %H:%M:%S")))
|
|
56
|
+
if data_type == 'd_time':
|
|
57
|
+
timeArray = time.localtime(_time)
|
|
58
|
+
return time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def convert_datetime(date: str):
|
|
62
|
+
|
|
63
|
+
date_list = re.findall(r'([0-9]{4})\D([0-3]?[0-9])\D([0-3]?[0-9])', date)
|
|
64
|
+
|
|
65
|
+
if date_list:
|
|
66
|
+
date_list = [int(i) for i in date_list[0]]
|
|
67
|
+
datetime_date = datetime.datetime(date_list[0], date_list[1], date_list[2])
|
|
68
|
+
return datetime_date
|
|
69
|
+
|
|
70
|
+
else:
|
|
71
|
+
raise BaseException("Not validated date string.")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def calculate_date(date: str, num: int, format: str = "%Y-%m-%d"):
|
|
75
|
+
"""
|
|
76
|
+
Calculate date with given args
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
date (str): %Y-%m-%d
|
|
80
|
+
num (int): +num or -num
|
|
81
|
+
format(str) : default is %Y-%m-%d
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
str: format
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
datetime_date = convert_datetime(date)
|
|
88
|
+
result = (datetime_date + datetime.timedelta(days=num)).strftime(format)
|
|
89
|
+
|
|
90
|
+
return result
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import json
|
|
3
|
+
import requests
|
|
4
|
+
import re
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
class Translation():
|
|
8
|
+
def __init__(self):
|
|
9
|
+
"""
|
|
10
|
+
Translate Function
|
|
11
|
+
"""
|
|
12
|
+
import sys
|
|
13
|
+
from importlib import reload
|
|
14
|
+
reload(sys)
|
|
15
|
+
|
|
16
|
+
self.YOUDAO_URL = 'https://openapi.youdao.com/api'
|
|
17
|
+
self.APP_KEY = '55780d0d419052ba'
|
|
18
|
+
self.APP_SECRET = 'cm9nQh6b4rrBfDBBEpWOD1i2eDTGkdCx'
|
|
19
|
+
|
|
20
|
+
def encrypt(self, signStr):
|
|
21
|
+
import hashlib
|
|
22
|
+
hash_algorithm = hashlib.sha256()
|
|
23
|
+
hash_algorithm.update(signStr.encode('utf-8'))
|
|
24
|
+
return hash_algorithm.hexdigest()
|
|
25
|
+
|
|
26
|
+
def truncate(self, q):
|
|
27
|
+
if q is None:
|
|
28
|
+
return None
|
|
29
|
+
size = len(q)
|
|
30
|
+
return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size]
|
|
31
|
+
|
|
32
|
+
def do_request(self, data):
|
|
33
|
+
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
|
34
|
+
return requests.post(self.YOUDAO_URL, data=data, headers=headers)
|
|
35
|
+
|
|
36
|
+
async def do_request_async(self, data):
|
|
37
|
+
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
|
38
|
+
async with httpx.AsyncClient(headers=headers) as client:
|
|
39
|
+
result = await client.post(self.YOUDAO_URL, data=data)
|
|
40
|
+
return result
|
|
41
|
+
|
|
42
|
+
def trans(self, from_language, content, useby='p'):
|
|
43
|
+
"""
|
|
44
|
+
from_language : ja = japan; en = english;
|
|
45
|
+
if useby = 'p' => filter bracket content
|
|
46
|
+
"""
|
|
47
|
+
import uuid
|
|
48
|
+
|
|
49
|
+
if useby == 'p':
|
|
50
|
+
pattern = '\(.*?\)|(.*?)|\[.*?\]'
|
|
51
|
+
content = re.sub(pattern, '', content, 0)
|
|
52
|
+
|
|
53
|
+
if content.endswith('\\') or content.endswith("'"):
|
|
54
|
+
content = content[:-1]
|
|
55
|
+
|
|
56
|
+
data = {}
|
|
57
|
+
data['from'] = from_language
|
|
58
|
+
data['to'] = 'zh-CHS'
|
|
59
|
+
data['signType'] = 'v3'
|
|
60
|
+
curtime = str(int(time.time()))
|
|
61
|
+
data['curtime'] = curtime
|
|
62
|
+
salt = str(uuid.uuid1())
|
|
63
|
+
signStr = self.APP_KEY + self.truncate(content) + salt + curtime + self.APP_SECRET
|
|
64
|
+
sign = self.encrypt(signStr)
|
|
65
|
+
data['appKey'] = self.APP_KEY
|
|
66
|
+
data['q'] = content
|
|
67
|
+
data['salt'] = salt
|
|
68
|
+
data['sign'] = sign
|
|
69
|
+
#data['vocabId'] = "您的用户词表ID"
|
|
70
|
+
|
|
71
|
+
response = self.do_request(data)
|
|
72
|
+
#contentType = response.headers['Content-Type']
|
|
73
|
+
result = json.loads(response.content.decode())
|
|
74
|
+
|
|
75
|
+
if result['errorCode'] == '0':
|
|
76
|
+
return result['translation'][0]
|
|
77
|
+
else:
|
|
78
|
+
errorcode = result['errorCode']
|
|
79
|
+
print(f'Translation wrong Error Code : {errorcode} \n Retring...')
|
|
80
|
+
|
|
81
|
+
response = self.do_request(data)
|
|
82
|
+
result = json.loads(response.content.decode())
|
|
83
|
+
|
|
84
|
+
if result['errorCode'] == '0':
|
|
85
|
+
return result['translation'][0]
|
|
86
|
+
else:
|
|
87
|
+
errorcode = result['errorCode']
|
|
88
|
+
return f'Translation wrong Error Code : {errorcode}'
|
|
89
|
+
|
|
90
|
+
async def trans_async(self, from_language, content, useby='p'):
|
|
91
|
+
"""
|
|
92
|
+
from_language : ja = japan; en = english;
|
|
93
|
+
if useby = 'p' => filter bracket content
|
|
94
|
+
"""
|
|
95
|
+
import uuid
|
|
96
|
+
if useby == 'p':
|
|
97
|
+
pattern = '\(.*?\)|(.*?)|\[.*?\]'
|
|
98
|
+
content = re.sub(pattern, '', content, 0)
|
|
99
|
+
|
|
100
|
+
if content.endswith('\\') or content.endswith("'"):
|
|
101
|
+
content = content[:-1]
|
|
102
|
+
|
|
103
|
+
data = {}
|
|
104
|
+
data['from'] = from_language
|
|
105
|
+
data['to'] = 'zh-CHS'
|
|
106
|
+
data['signType'] = 'v3'
|
|
107
|
+
curtime = str(int(time.time()))
|
|
108
|
+
data['curtime'] = curtime
|
|
109
|
+
salt = str(uuid.uuid1())
|
|
110
|
+
signStr = self.APP_KEY + self.truncate(content) + salt + curtime + self.APP_SECRET
|
|
111
|
+
sign = self.encrypt(signStr)
|
|
112
|
+
data['appKey'] = self.APP_KEY
|
|
113
|
+
data['q'] = content
|
|
114
|
+
data['salt'] = salt
|
|
115
|
+
data['sign'] = sign
|
|
116
|
+
#data['vocabId'] = "您的用户词表ID"
|
|
117
|
+
|
|
118
|
+
response = await self.do_request_async(data)
|
|
119
|
+
#contentType = response.headers['Content-Type']
|
|
120
|
+
result = json.loads(response.content.decode())
|
|
121
|
+
|
|
122
|
+
if result['errorCode'] == '0':
|
|
123
|
+
return result['translation'][0]
|
|
124
|
+
else:
|
|
125
|
+
errorcode = result['errorCode']
|
|
126
|
+
print(f'Translation wrong Error Code : {errorcode} \n Retring...')
|
|
127
|
+
|
|
128
|
+
response = await self.do_request_async(data)
|
|
129
|
+
result = json.loads(response.content.decode())
|
|
130
|
+
|
|
131
|
+
if result['errorCode'] == '0':
|
|
132
|
+
return result['translation'][0]
|
|
133
|
+
else:
|
|
134
|
+
errorcode = result['errorCode']
|
|
135
|
+
return f'Translation wrong Error Code : {errorcode}'
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import rarfile
|
|
2
|
+
|
|
3
|
+
def uncompress(src_file, dest_dir):
|
|
4
|
+
"""解压各种类型的压缩包
|
|
5
|
+
|
|
6
|
+
:param src_file: 你要解压的压缩包文件
|
|
7
|
+
:type src_file: file
|
|
8
|
+
:param dest_dir: 你要解压到的目标路径
|
|
9
|
+
:type dest_dir: str
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
# 需要安装rar包:pip install rarfile
|
|
13
|
+
rar = rarfile.RarFile(src_file)
|
|
14
|
+
rar.extractall(dest_dir)
|
|
15
|
+
rar.close()
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import paramiko
|
|
2
|
+
from typing import cast
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class SSHConnection:
|
|
6
|
+
# 初始化连接创建Transport通道
|
|
7
|
+
def __init__(self, host='127.0.0.1', port=22, user='root', pwd='root'):
|
|
8
|
+
self.host = host
|
|
9
|
+
self.port = port
|
|
10
|
+
self.user = user
|
|
11
|
+
self.pwd = pwd
|
|
12
|
+
self.__transport = paramiko.Transport((self.host, self.port))
|
|
13
|
+
self.__transport.connect(username=self.user, password=self.pwd)
|
|
14
|
+
self.sftp = cast(paramiko.SFTPClient, paramiko.SFTPClient.from_transport(self.__transport))
|
|
15
|
+
|
|
16
|
+
# 关闭通道
|
|
17
|
+
def close(self):
|
|
18
|
+
self.sftp.close()
|
|
19
|
+
self.__transport.close()
|
|
20
|
+
|
|
21
|
+
# 上传文件到远程主机
|
|
22
|
+
def upload(self, local_path, remote_path):
|
|
23
|
+
_ = self.sftp.put(local_path, remote_path)
|
|
24
|
+
return _
|
|
25
|
+
|
|
26
|
+
# 从远程主机下载文件到本地
|
|
27
|
+
def download(self, local_path, remote_path):
|
|
28
|
+
self.sftp.get(remote_path, local_path)
|
|
29
|
+
|
|
30
|
+
# 在远程主机上创建目录
|
|
31
|
+
def mkdir(self, target_path, mode=777):
|
|
32
|
+
self.sftp.mkdir(target_path, mode)
|
|
33
|
+
|
|
34
|
+
# 删除远程主机上的目录
|
|
35
|
+
def rmdir(self, target_path):
|
|
36
|
+
self.sftp.rmdir(target_path)
|
|
37
|
+
|
|
38
|
+
# 查看目录下文件以及子目录(如果需要更加细粒度的文件信息建议使用listdir_attr)
|
|
39
|
+
def listdir(self, target_path):
|
|
40
|
+
return self.sftp.listdir(target_path)
|
|
41
|
+
|
|
42
|
+
# 删除文件
|
|
43
|
+
def remove(self, target_path):
|
|
44
|
+
self.sftp.remove(target_path)
|
|
45
|
+
|
|
46
|
+
# 查看目录下文件以及子目录的详细信息(包含内容和参考os.stat返回一个FSTPAttributes对象,对象的具体属性请用__dict__查看)
|
|
47
|
+
def listdirattr(self, target_path):
|
|
48
|
+
list = []
|
|
49
|
+
try:
|
|
50
|
+
list = self.sftp.listdir_attr(target_path)
|
|
51
|
+
except BaseException as e:
|
|
52
|
+
print(e)
|
|
53
|
+
|
|
54
|
+
return list
|
|
55
|
+
|
|
56
|
+
# 获取文件详情
|
|
57
|
+
def stat(self, remote_path):
|
|
58
|
+
return self.sftp.stat(remote_path)
|
|
59
|
+
|
|
60
|
+
# SSHClient输入命令远程操作主机
|
|
61
|
+
def cmd(self, command):
|
|
62
|
+
ssh = paramiko.SSHClient()
|
|
63
|
+
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
|
|
64
|
+
ssh._transport = self.__transport # type:ignore
|
|
65
|
+
stdin, stdout, stderr = ssh.exec_command(command)
|
|
66
|
+
result = stdout.read()
|
|
67
|
+
print(result)
|
|
68
|
+
return result
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from .Common import *
|
|
2
|
+
from .Download import *
|
|
3
|
+
from .ExtractFile import ExtractFile
|
|
4
|
+
from .Image import *
|
|
5
|
+
from .Mysql import Mysql
|
|
6
|
+
from .Time import *
|
|
7
|
+
from .Translation import Translation
|
|
8
|
+
from .UncompressRAR import uncompress
|
|
9
|
+
from .Subtitles import SrtToVtt, SubFuncs
|
|
10
|
+
from .CrackCode import CrackCode
|
|
11
|
+
from .ownEmail import Email
|
|
12
|
+
from ._SSH import SSHConnection
|
|
13
|
+
from .Loger import Logger
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import smtplib
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from email.mime.text import MIMEText
|
|
4
|
+
from email.mime.multipart import MIMEMultipart
|
|
5
|
+
from email.mime.application import MIMEApplication
|
|
6
|
+
from email.mime.image import MIMEImage
|
|
7
|
+
from email.utils import formataddr
|
|
8
|
+
from email.header import Header
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Email():
|
|
12
|
+
"""
|
|
13
|
+
:my_email = ownEmail.Email(user,password,name,host)
|
|
14
|
+
:my_email.make_msg(content="test", title=f'继续测试')
|
|
15
|
+
:my_email.send_msg()
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, user='285242442@qq.com', password='tpgdlmjmfqxybgdf', name="Neely", host='smtp.qq.com'):
|
|
19
|
+
|
|
20
|
+
self._user = user
|
|
21
|
+
self._password = password
|
|
22
|
+
self._host = host
|
|
23
|
+
self._msg = MIMEMultipart() # 构建主体
|
|
24
|
+
self._msg['From'] = formataddr([name, user]) # 发件人
|
|
25
|
+
|
|
26
|
+
def make_normal_msg(self, content, title, receiver='285242442@qq.com'):
|
|
27
|
+
|
|
28
|
+
self._msg = MIMEText(
|
|
29
|
+
content,
|
|
30
|
+
'plain',
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
self._receiver = receiver
|
|
34
|
+
self._msg['From'] = formataddr([self._name, self._user])
|
|
35
|
+
self._msg['To'] = formataddr(['Your name', self._receiver])
|
|
36
|
+
self._msg['Subject'] = title
|
|
37
|
+
|
|
38
|
+
def make_mult_mag(self, content="", title="", file_path="", image_path="", receiver_name="", receiver='285242442@qq.com'):
|
|
39
|
+
|
|
40
|
+
self.receiver = receiver
|
|
41
|
+
self._msg['To'] = formataddr([receiver_name, receiver]) # 收件人
|
|
42
|
+
|
|
43
|
+
if file_path:
|
|
44
|
+
_path = Path(file_path)
|
|
45
|
+
self._msg['Subject'] = Header(title if title else _path.stem, 'utf-8') # 邮件主题
|
|
46
|
+
|
|
47
|
+
with open(_path, 'rb') as file:
|
|
48
|
+
attchment = MIMEApplication(file.read())
|
|
49
|
+
|
|
50
|
+
attchment.add_header('Content-Disposition', 'attachment', filename=_path.name)
|
|
51
|
+
self._msg.attach(attchment) # 添加附件到邮件
|
|
52
|
+
|
|
53
|
+
if image_path:
|
|
54
|
+
with open(image_path, 'rb') as file:
|
|
55
|
+
msgimage = MIMEImage(file.read()) # 打开图片
|
|
56
|
+
|
|
57
|
+
html_img = f'<p>{content}<br><img src="cid:image1"></br></p>'
|
|
58
|
+
msgimage.add_header('Content-ID', '<image1>') # 设置图片
|
|
59
|
+
self._msg.attach(msgimage)
|
|
60
|
+
self._msg.attach(MIMEText(html_img, 'html', 'utf-8')) # 添加到邮件正文
|
|
61
|
+
|
|
62
|
+
else:
|
|
63
|
+
self._msg['Subject'] = Header(title, 'utf-8') # 邮件主题
|
|
64
|
+
self._msg.attach(MIMEText(content, 'html', 'utf-8')) # 添加到邮件正文
|
|
65
|
+
|
|
66
|
+
def send_msg(self):
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
smtp = smtplib.SMTP_SSL(self._host, 465)
|
|
70
|
+
_ = smtp.login(self._user, self._password)
|
|
71
|
+
_ = smtp.sendmail(self._user, [self.receiver], self._msg.as_string())
|
|
72
|
+
_ = smtp.quit()
|
|
73
|
+
return True
|
|
74
|
+
except Exception as e:
|
|
75
|
+
print(e)
|
|
76
|
+
return False
|
xiuyutools-1.0/setup.cfg
ADDED
xiuyutools-1.0/setup.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from setuptools import setup, find_packages
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
name='xiuyutools',
|
|
5
|
+
version='1.0',
|
|
6
|
+
packages=find_packages(),
|
|
7
|
+
description='Tools for myself',
|
|
8
|
+
long_description=open('README.md').read(),
|
|
9
|
+
long_description_content_type='text/markdown',
|
|
10
|
+
author='Neely',
|
|
11
|
+
author_email='liangxiuyu@outlook.com',
|
|
12
|
+
url='https://github.com/liangxiuyu/owntools',
|
|
13
|
+
classifiers=[
|
|
14
|
+
'Programming Language :: Python :: 3',
|
|
15
|
+
'License :: OSI Approved :: MIT License',
|
|
16
|
+
],
|
|
17
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: xiuyutools
|
|
3
|
+
Version: 1.0
|
|
4
|
+
Summary: Tools for myself
|
|
5
|
+
Home-page: https://github.com/liangxiuyu/owntools
|
|
6
|
+
Author: Neely
|
|
7
|
+
Author-email: liangxiuyu@outlook.com
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
|
|
12
|
+
# OwnTools
|
|
13
|
+
owntools
|
|
14
|
+
2021-12-27 Created
|
|
15
|
+
|
|
16
|
+
##
|
|
17
|
+
python setup.py sdist bdist_wheel
|
|
18
|
+
python setup.py develop
|
|
19
|
+
twine upload dist/*
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
setup.py
|
|
3
|
+
owntools/Common.py
|
|
4
|
+
owntools/CrackCode.py
|
|
5
|
+
owntools/Download.py
|
|
6
|
+
owntools/ExtractFile.py
|
|
7
|
+
owntools/Image.py
|
|
8
|
+
owntools/Loger.py
|
|
9
|
+
owntools/Mysql.py
|
|
10
|
+
owntools/Subtitles.py
|
|
11
|
+
owntools/Time.py
|
|
12
|
+
owntools/Translation.py
|
|
13
|
+
owntools/UncompressRAR.py
|
|
14
|
+
owntools/_SSH.py
|
|
15
|
+
owntools/__init__.py
|
|
16
|
+
owntools/ownEmail.py
|
|
17
|
+
xiuyutools.egg-info/PKG-INFO
|
|
18
|
+
xiuyutools.egg-info/SOURCES.txt
|
|
19
|
+
xiuyutools.egg-info/dependency_links.txt
|
|
20
|
+
xiuyutools.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
owntools
|