gomyck-tools 1.5.4__py3-none-any.whl → 1.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,429 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ """
4
+ 授权码验证工具类
5
+ 仅用于验证授权码的有效性
6
+ """
7
+ __author__ = 'haoyang'
8
+ __date__ = '2026/1/26'
9
+
10
+ from typing import Optional
11
+ from datetime import datetime
12
+ import json
13
+ import ipaddress
14
+
15
+ from ctools.cipher import sm_util
16
+
17
+
18
+ class AuthCodeValidator:
19
+ """授权码验证器"""
20
+
21
+ def __init__(self, public_key: str = None):
22
+ """
23
+ 初始化验证器
24
+
25
+ Args:
26
+ public_key: SM2公钥(用于签名验证)
27
+ """
28
+ if not public_key:
29
+ raise Exception("未提供公钥,无法初始化验证器。请传入 public_key 参数。")
30
+ self.public_key = public_key
31
+
32
+ def validate(self, authcode_json: str) -> bool:
33
+ """
34
+ 快速验证授权码是否有效
35
+
36
+ Args:
37
+ authcode_json: 授权码JSON字符串
38
+
39
+ Returns:
40
+ bool: 授权码是否有效
41
+ """
42
+ try:
43
+ authcode_obj = json.loads(authcode_json)
44
+ except:
45
+ return False
46
+
47
+ # 验证必需字段
48
+ if 'version' not in authcode_obj or 'body' not in authcode_obj or 'signature' not in authcode_obj:
49
+ return False
50
+
51
+ body = authcode_obj.get('body')
52
+ if not isinstance(body, dict):
53
+ return False
54
+
55
+ return self._verify_signature(authcode_json)
56
+
57
+ def _verify_signature(self, authcode_json: str) -> bool:
58
+ """
59
+ 验证授权码签名
60
+
61
+ Args:
62
+ authcode_json: 授权码JSON字符串
63
+
64
+ Returns:
65
+ bool: 签名是否有效
66
+ """
67
+ if not self.public_key:
68
+ raise Exception("未初始化公钥,无法验证签名。请传入 public_key 参数。")
69
+
70
+ try:
71
+ authcode_obj = json.loads(authcode_json)
72
+ body = authcode_obj.get('body', {})
73
+ signature = authcode_obj.get('signature', '')
74
+ version = authcode_obj.get('version', 'v1_1')
75
+
76
+ # 初始化SM2
77
+ sm_util.init(self.public_key, self.public_key)
78
+
79
+ # 构建签名字符串
80
+ final_val = self._build_sign_string(body, version)
81
+
82
+ # 验证签名
83
+ return sm_util.verify_with_sm2(signature, final_val)
84
+ except Exception as e:
85
+ try:
86
+ # 重试:重新初始化SM2并验证
87
+ authcode_obj = json.loads(authcode_json)
88
+ body = authcode_obj.get('body', {})
89
+ signature = authcode_obj.get('signature', '')
90
+ version = authcode_obj.get('version', 'v1_1')
91
+
92
+ sm_util.init(self.public_key, self.public_key)
93
+ final_val = self._build_sign_string(body, version)
94
+ return sm_util.verify_with_sm2(signature, final_val)
95
+ except:
96
+ return False
97
+
98
+ def _build_sign_string(self, body: dict, version: str) -> str:
99
+ """
100
+ 构建签名字符串,与app.py中的签名逻辑一致
101
+
102
+ Args:
103
+ body: 授权码内容
104
+ version: 版本号
105
+
106
+ Returns:
107
+ str: 签名字符串
108
+ """
109
+ ordered_dict = sorted(body.items())
110
+ final_val = ""
111
+
112
+ for k, v in ordered_dict:
113
+ if isinstance(v, list):
114
+ value_str = ",".join(v)
115
+ else:
116
+ value_str = str(v)
117
+
118
+ if version == 'v1':
119
+ # v1: 不带换行符
120
+ final_val += k + ":" + value_str
121
+ elif version == 'v1_1':
122
+ # v1_1: 带换行符
123
+ final_val += k + ":" + value_str + '\n'
124
+ else:
125
+ # 默认v1_1
126
+ final_val += k + ":" + value_str + '\n'
127
+
128
+ return final_val
129
+
130
+ def check_expired(self, authcode_json: str) -> bool:
131
+ """
132
+ 检查授权码是否过期
133
+
134
+ Args:
135
+ authcode_json: 授权码JSON字符串
136
+
137
+ Returns:
138
+ bool: 是否未过期
139
+ """
140
+ try:
141
+ authcode_obj = json.loads(authcode_json)
142
+ body = authcode_obj.get('body', {})
143
+
144
+ # 检查过期时间
145
+ if 'expired_time' in body:
146
+ expired_time_str = body['expired_time']
147
+ expired_dt = self._parse_datetime(expired_time_str)
148
+
149
+ if not expired_dt:
150
+ return False
151
+
152
+ if datetime.now() > expired_dt:
153
+ return False
154
+
155
+ # 检查生效时间
156
+ if 'effect_time' in body:
157
+ effect_time_str = body['effect_time']
158
+ effect_dt = self._parse_datetime(effect_time_str)
159
+
160
+ if not effect_dt:
161
+ return False
162
+
163
+ if datetime.now() < effect_dt:
164
+ return False
165
+
166
+ return True
167
+ except:
168
+ return False
169
+
170
+ def check_ip(self, authcode_json: str, client_ip: str) -> bool:
171
+ """
172
+ 检查客户端IP是否在授权范围内
173
+
174
+ Args:
175
+ authcode_json: 授权码JSON字符串
176
+ client_ip: 客户端IP地址
177
+
178
+ Returns:
179
+ bool: IP是否在授权范围内
180
+ """
181
+ try:
182
+ authcode_obj = json.loads(authcode_json)
183
+ body = authcode_obj.get('body', {})
184
+
185
+ if 'ip_range' not in body:
186
+ return True
187
+
188
+ ip_ranges = body['ip_range']
189
+ if not ip_ranges:
190
+ return False
191
+
192
+ # 转换为列表
193
+ if isinstance(ip_ranges, str):
194
+ ip_list = [ip.strip() for ip in ip_ranges.split(',')]
195
+ elif isinstance(ip_ranges, list):
196
+ ip_list = ip_ranges
197
+ else:
198
+ return False
199
+
200
+ # 支持通配符 "*"
201
+ if '*' in ip_list:
202
+ return True
203
+
204
+ # 检查IP是否在范围内
205
+ try:
206
+ client_ip_obj = ipaddress.ip_address(client_ip)
207
+ except:
208
+ return False
209
+
210
+ for ip_pattern in ip_list:
211
+ try:
212
+ if '/' in ip_pattern:
213
+ # CIDR格式
214
+ ip_network = ipaddress.ip_network(ip_pattern, strict=False)
215
+ if client_ip_obj in ip_network:
216
+ return True
217
+ else:
218
+ # 精确匹配
219
+ if str(client_ip_obj) == ip_pattern:
220
+ return True
221
+ except:
222
+ continue
223
+
224
+ return False
225
+ except:
226
+ return False
227
+
228
+ def check_machine_code(self, authcode_json: str, machine_code: str) -> bool:
229
+ """
230
+ 检查机器码是否被授权
231
+
232
+ Args:
233
+ authcode_json: 授权码JSON字符串
234
+ machine_code: 机器码
235
+
236
+ Returns:
237
+ bool: 机器码是否被授权
238
+ """
239
+ try:
240
+ authcode_obj = json.loads(authcode_json)
241
+ body = authcode_obj.get('body', {})
242
+
243
+ if 'machine_codes' not in body:
244
+ return True
245
+
246
+ codes = body['machine_codes']
247
+ if not codes:
248
+ return False
249
+
250
+ # 转换为列表
251
+ if isinstance(codes, str):
252
+ code_list = [c.strip() for c in codes.split(',')]
253
+ elif isinstance(codes, list):
254
+ code_list = codes
255
+ else:
256
+ return False
257
+
258
+ # 支持通配符 "*"
259
+ if '*' in code_list:
260
+ return True
261
+
262
+ return machine_code in code_list
263
+ except:
264
+ return False
265
+
266
+ def check_module(self, authcode_json: str, module_name: str) -> bool:
267
+ """
268
+ 检查模块是否被授权
269
+
270
+ Args:
271
+ authcode_json: 授权码JSON字符串
272
+ module_name: 模块名称
273
+
274
+ Returns:
275
+ bool: 模块是否被授权
276
+ """
277
+ try:
278
+ authcode_obj = json.loads(authcode_json)
279
+ body = authcode_obj.get('body', {})
280
+
281
+ if 'modules' not in body:
282
+ return True
283
+
284
+ modules = body['modules']
285
+ if not modules:
286
+ return False
287
+
288
+ # 转换为列表
289
+ if isinstance(modules, str):
290
+ module_list = [m.strip() for m in modules.split(',')]
291
+ elif isinstance(modules, list):
292
+ module_list = modules
293
+ else:
294
+ return False
295
+
296
+ # 支持通配符 "*"
297
+ if '*' in module_list:
298
+ return True
299
+
300
+ return module_name in module_list
301
+ except:
302
+ return False
303
+
304
+ def check_artifact(self, authcode_json: str, artifact_name: str) -> bool:
305
+ """
306
+ 检查制品名称是否匹配
307
+
308
+ Args:
309
+ authcode_json: 授权码JSON字符串
310
+ artifact_name: 制品名称
311
+
312
+ Returns:
313
+ bool: 制品是否被授权
314
+ """
315
+ try:
316
+ authcode_obj = json.loads(authcode_json)
317
+ body = authcode_obj.get('body', {})
318
+
319
+ if 'artifact' not in body:
320
+ return True
321
+
322
+ authorized_artifact = body['artifact']
323
+
324
+ # 支持通配符 "*"
325
+ if authorized_artifact == '*':
326
+ return True
327
+
328
+ return artifact_name == authorized_artifact
329
+ except:
330
+ return False
331
+
332
+ def check_version(self, authcode_json: str, version: str) -> bool:
333
+ """
334
+ 检查版本是否被授权
335
+
336
+ Args:
337
+ authcode_json: 授权码JSON字符串
338
+ version: 版本号
339
+
340
+ Returns:
341
+ bool: 版本是否被授权
342
+ """
343
+ try:
344
+ authcode_obj = json.loads(authcode_json)
345
+ body = authcode_obj.get('body', {})
346
+
347
+ if 'version' not in body:
348
+ return True
349
+
350
+ authorized_version = body['version']
351
+
352
+ # 支持通配符 "*"
353
+ if authorized_version == '*':
354
+ return True
355
+
356
+ return version == authorized_version
357
+ except:
358
+ return False
359
+
360
+ def validate_all(self, authcode_json: str, client_ip: str = None,
361
+ machine_code: str = None, artifact: str = None,
362
+ version: str = None, module: str = None) -> bool:
363
+ """
364
+ 检查所有条件是否都满足
365
+
366
+ Args:
367
+ authcode_json: 授权码JSON字符串
368
+ client_ip: 客户端IP(可选)
369
+ machine_code: 机器码(可选)
370
+ artifact: 制品名称(可选)
371
+ version: 版本号(可选)
372
+ module: 模块名称(可选)
373
+
374
+ Returns:
375
+ bool: 所有条件是否都满足
376
+ """
377
+ # 基本验证
378
+ if not self.validate(authcode_json):
379
+ return False
380
+
381
+ # 检查时间
382
+ if not self.check_expired(authcode_json):
383
+ return False
384
+
385
+ # 检查IP
386
+ if client_ip and not self.check_ip(authcode_json, client_ip):
387
+ return False
388
+
389
+ # 检查机器码
390
+ if machine_code and not self.check_machine_code(authcode_json, machine_code):
391
+ return False
392
+
393
+ # 检查制品
394
+ if artifact and not self.check_artifact(authcode_json, artifact):
395
+ return False
396
+
397
+ # 检查版本
398
+ if version and not self.check_version(authcode_json, version):
399
+ return False
400
+
401
+ # 检查模块
402
+ if module and not self.check_module(authcode_json, module):
403
+ return False
404
+
405
+ return True
406
+
407
+ # ============= 私有方法 =============
408
+
409
+ def _parse_datetime(self, date_str: str) -> Optional[datetime]:
410
+ """解析日期时间字符串"""
411
+ if not date_str:
412
+ return None
413
+
414
+ # 尝试多种格式
415
+ formats = [
416
+ '%Y-%m-%dT%H:%M', # ISO: 2026-01-26T09:23
417
+ '%Y-%m-%dT%H:%M:%S', # ISO: 2026-01-26T09:23:45
418
+ '%Y-%m-%d %H:%M:%S', # 标准: 2026-01-26 09:23:45
419
+ '%Y-%m-%d %H:%M', # 标准: 2026-01-26 09:23
420
+ '%Y-%m-%d', # 日期: 2026-01-26
421
+ ]
422
+
423
+ for fmt in formats:
424
+ try:
425
+ return datetime.strptime(date_str, fmt)
426
+ except:
427
+ continue
428
+
429
+ return None
ctools/cipher/sm_util.py CHANGED
@@ -9,7 +9,6 @@ sm2_crypt: sm2.CryptSM2 = None
9
9
  def init(private_key: str, public_key: str):
10
10
  global sm2_crypt
11
11
  if sm2_crypt is not None:
12
- print('sm2 is already init!!!')
13
12
  return
14
13
  sm2_crypt = sm2.CryptSM2(private_key=private_key, public_key=public_key, asn1=True, mode=1)
15
14
 
@@ -0,0 +1,121 @@
1
+ import random
2
+
3
+ import numpy as np
4
+ from PIL import Image, ImageDraw, ImageFont
5
+
6
+
7
+ def preprocess(img, img_size):
8
+ mean = [0.485, 0.456, 0.406]
9
+ std = [0.229, 0.224, 0.225]
10
+ img = resize(img, img_size)
11
+ img = img[:, :, ::-1].astype('float32') # RGB->BGR
12
+ img = normalize(img, mean, std)
13
+ img = img.transpose((2, 0, 1)) # hwc -> chw
14
+ #show_preprocess(img, mean, std)
15
+ return img[np.newaxis, :]
16
+
17
+
18
+ def resize(img, target_size):
19
+ """
20
+ img: numpy.ndarray (H,W,3) BGR or RGB
21
+ return: numpy.ndarray (target_size, target_size, 3)
22
+ """
23
+ img = Image.fromarray(img)
24
+ img = img.resize((target_size, target_size), Image.BILINEAR)
25
+ return np.array(img)
26
+
27
+
28
+ def normalize(img, mean, std):
29
+ img = img / 255.0
30
+ mean = np.array(mean)[np.newaxis, np.newaxis, :]
31
+ std = np.array(std)[np.newaxis, np.newaxis, :]
32
+ img -= mean
33
+ img /= std
34
+ return img
35
+
36
+
37
+ def show_preprocess(chw_img, mean, std):
38
+ """
39
+ chw_img: (3, H, W), float32, normalized
40
+ """
41
+ img = chw_img.copy()
42
+ # 1. CHW -> HWC
43
+ img = img.transpose(1, 2, 0)
44
+ # 2. de-normalize
45
+ img = img * std + mean
46
+ img = img * 255.0
47
+ # 3. clamp + uint8
48
+ img = np.clip(img, 0, 255).astype(np.uint8)
49
+ Image.fromarray(img).show()
50
+
51
+
52
+ def draw_bbox(img, result, threshold=0.5, save_name='res.jpg', scale_factor=None, im_size=320, class_names=None):
53
+ draw = ImageDraw.Draw(img)
54
+
55
+ if scale_factor is not None:
56
+ h_scale, w_scale = scale_factor[0]
57
+ else:
58
+ h_scale = w_scale = 1.
59
+
60
+ # 类别颜色随机但固定
61
+ category_colors = {}
62
+ if class_names is not None:
63
+ for cls in class_names:
64
+ category_colors[cls] = tuple(random.randint(0, 255) for _ in range(3))
65
+
66
+ # 字体
67
+ try:
68
+ font = ImageFont.truetype("arial.ttf", 15)
69
+ except:
70
+ font = ImageFont.load_default()
71
+
72
+ for res in result:
73
+ cat_id, score, bbox = res[0], res[1], res[2:]
74
+ if score < threshold:
75
+ continue
76
+
77
+ # 归一化 bbox -> 模型输入尺寸
78
+ xmin = bbox[0] * im_size
79
+ ymin = bbox[1] * im_size
80
+ xmax = bbox[2] * im_size
81
+ ymax = bbox[3] * im_size
82
+
83
+ # 模型输入尺寸 -> 原图
84
+ xmin = xmin / w_scale
85
+ xmax = xmax / w_scale
86
+ ymin = ymin / h_scale
87
+ ymax = ymax / h_scale
88
+
89
+ # 类别名和颜色
90
+ if class_names is not None:
91
+ class_name = class_names[int(cat_id)]
92
+ color = category_colors[class_name]
93
+ text = f"{class_name}:{score:.2f}"
94
+
95
+ # 获取文字尺寸,兼容所有版本 Pillow
96
+ try:
97
+ text_width, text_height = font.getsize(text) # 旧版 / 大部分版本
98
+ except AttributeError:
99
+ # Pillow 9.2+ 推荐用 getbbox
100
+ bbox_font = font.getbbox(text)
101
+ text_width = bbox_font[2] - bbox_font[0]
102
+ text_height = bbox_font[3] - bbox_font[1]
103
+
104
+ text_origin = (xmin, max(0, ymin - text_height)) # 框上方显示
105
+ draw.text(text_origin, text, fill=color, font=font)
106
+ else:
107
+ color = 'red'
108
+
109
+ # 画矩形框
110
+ draw.rectangle([xmin, ymin, xmax, ymax], outline=color, width=2)
111
+
112
+ img.save(save_name)
113
+
114
+
115
+ def image_show(base64_str):
116
+ """base64字符串转PIL Image并显示"""
117
+ from io import BytesIO
118
+ import base64
119
+ img_data = base64.b64decode(base64_str)
120
+ img = Image.open(BytesIO(img_data))
121
+ img.show()
@@ -0,0 +1,59 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: UTF-8 -*-
3
+ __author__ = 'haoyang'
4
+ __date__ = '2026/1/20 15:02'
5
+
6
+ import base64
7
+ from io import BytesIO
8
+
9
+ class ClassRegionBase64ExtractorPIL:
10
+ def __init__(self, class_names, target_classes=None, threshold=0.5):
11
+ """
12
+ class_names: 模型类别列表
13
+ target_classes: 只截取的类别名列表,None 表示全部
14
+ threshold: 置信度阈值
15
+ """
16
+ self.class_names = class_names
17
+ self.target_classes = target_classes
18
+ self.threshold = threshold
19
+
20
+ @staticmethod
21
+ def image_to_base64(img, format='PNG'):
22
+ """
23
+ PIL Image -> base64 字符串
24
+ """
25
+ buffer = BytesIO()
26
+ img.save(buffer, format=format)
27
+ return base64.b64encode(buffer.getvalue()).decode('utf-8')
28
+
29
+ def extract(self, img, results, scale_factor=None, im_size=320):
30
+ """
31
+ img: PIL Image
32
+ results: 模型输出 [[cat_id, score, xmin, ymin, xmax, ymax], ...]
33
+ scale_factor: np.array([[h_scale, w_scale]]) 或 None
34
+ im_size: 模型输入尺寸
35
+ return: List[Dict] -> [{"class": class_name, "score": score, "base64": base64_str}, ...]
36
+ """
37
+ outputs = []
38
+ for res in results:
39
+ cat_id, score, bbox = res[0], res[1], res[2:]
40
+ if score < self.threshold or cat_id > len(self.class_names) - 1:
41
+ continue
42
+ class_name = self.class_names[int(cat_id)]
43
+ if self.target_classes is not None and class_name not in self.target_classes:
44
+ continue
45
+ xmin = bbox[0]
46
+ ymin = bbox[1]
47
+ xmax = bbox[2]
48
+ ymax = bbox[3]
49
+ # 裁剪
50
+ pil_img_threadsafe = img.copy()
51
+ cropped = pil_img_threadsafe.crop((xmin, ymin, xmax, ymax))
52
+ # 转 base64
53
+ b64_str = self.image_to_base64(cropped)
54
+ outputs.append({
55
+ "class": class_name,
56
+ "score": float(score),
57
+ "base64": b64_str
58
+ })
59
+ return outputs
ctools/ml/ppi.py ADDED
@@ -0,0 +1,275 @@
1
+ from io import BytesIO
2
+ from queue import Queue
3
+
4
+ import numpy as np
5
+ import yaml
6
+ from PIL import Image
7
+ from paddle.inference import Config, create_predictor
8
+
9
+ from ctools import path_info
10
+ from ctools.ml.image_process import preprocess
11
+ from ctools.ml.img_extractor import ClassRegionBase64ExtractorPIL
12
+
13
+
14
+ class PaddlePredictorPool:
15
+ """Predictor 池,用于多线程安全推理"""
16
+ def __init__(self, config_path, pool_size: int = 4):
17
+ """
18
+ 初始化预测器池
19
+ Note: 每个 Config 对象只能创建一个 predictor,所以我们需要保存 config_path
20
+ """
21
+ self.config_path = config_path
22
+ self.pool = Queue()
23
+ self._init_pool(pool_size)
24
+
25
+ def _load_config_yaml(self):
26
+ """加载 yaml 配置"""
27
+ with open(self.config_path, "r", encoding="utf-8") as f:
28
+ return yaml.safe_load(f)
29
+
30
+ def _create_config(self):
31
+ """为新的 predictor 创建一个新的 Config 对象"""
32
+ cfg = self._load_config_yaml()
33
+ model_dir = cfg.get("MODEL_DIR", "")
34
+ model_file = cfg.get("MODEL_FILE", "")
35
+ if not model_file:
36
+ model_dir = path_info.get_app_path('mod/model.pdmodel')
37
+ params_file = cfg.get("PARAMS_FILE", "")
38
+ if not params_file:
39
+ model_dir = path_info.get_app_path('mod/model.pdiparams')
40
+ use_gpu = cfg.get("USE_GPU", False)
41
+
42
+ if model_dir:
43
+ config = Config(model_dir)
44
+ else:
45
+ config = Config(model_file, params_file)
46
+
47
+ config.enable_memory_optim()
48
+
49
+ if use_gpu:
50
+ config.enable_use_gpu(1000, 0)
51
+ else:
52
+ config.set_cpu_math_library_num_threads(4)
53
+ config.enable_mkldnn()
54
+
55
+ return config
56
+
57
+ def _init_pool(self, pool_size: int):
58
+ """初始化池中的所有 predictor"""
59
+ for _ in range(pool_size):
60
+ config = self._create_config()
61
+ predictor = create_predictor(config)
62
+ self.pool.put(predictor)
63
+
64
+ def acquire(self, timeout=None):
65
+ """从池中获取一个 predictor"""
66
+ return self.pool.get(timeout=timeout)
67
+
68
+ def release(self, predictor):
69
+ """将 predictor 放回池中"""
70
+ self.pool.put(predictor)
71
+
72
+
73
+ class PaddleInferenceEngine:
74
+ def __init__(self, config_path, pool_size=4):
75
+ self.config_path = config_path
76
+ self.cfg = self._load_config(config_path)
77
+ self.predictor_pool = PaddlePredictorPool(config_path, pool_size=pool_size)
78
+
79
+ def _load_config(self, config_path):
80
+ with open(config_path, "r", encoding="utf-8") as f:
81
+ return yaml.safe_load(f)
82
+
83
+ def predict(self, inputs, timeout=None):
84
+ """线程安全预测"""
85
+ predictor = self.predictor_pool.acquire(timeout=timeout)
86
+ try:
87
+ input_names = predictor.get_input_names()
88
+ for name in input_names:
89
+ if name not in inputs:
90
+ raise ValueError(f"缺少模型输入: {name}")
91
+ tensor = predictor.get_input_handle(name)
92
+ data = inputs[name]
93
+ tensor.reshape(data.shape)
94
+ tensor.copy_from_cpu(data)
95
+ predictor.run()
96
+ outputs = []
97
+ for name in predictor.get_output_names():
98
+ out = predictor.get_output_handle(name)
99
+ outputs.append(out.copy_to_cpu())
100
+ return outputs
101
+ finally:
102
+ self.predictor_pool.release(predictor)
103
+
104
+ def predict_image(self, img, im_size=320):
105
+ if isinstance(img, bytes):
106
+ img = Image.open(BytesIO(img)).convert("RGB")
107
+ elif isinstance(img, Image.Image):
108
+ img = img.convert("RGB")
109
+ elif isinstance(img, np.ndarray):
110
+ pass
111
+ else:
112
+ raise ValueError("Unsupported image type for predict_image")
113
+ orig_img_np = np.array(img) if not isinstance(img, np.ndarray) else img
114
+ data = preprocess(orig_img_np, im_size)
115
+ scale_factor = np.array([im_size * 1. / orig_img_np.shape[0], im_size * 1. / orig_img_np.shape[1]]).reshape((1, 2)).astype(np.float32)
116
+ im_shape = np.array([im_size, im_size]).reshape((1, 2)).astype(np.float32)
117
+ outputs = self.predict({"image": data, "im_shape": im_shape, "scale_factor": scale_factor})
118
+ return outputs, scale_factor, im_size
119
+
120
+ def predict_image_and_extract(self, img, im_size=320, class_names=None, target_classes=None, threshold=0.3):
121
+ """预测并提取检测区域"""
122
+ raw_outputs, scale_factor, im_size_ret = self.predict_image(img, im_size=im_size)
123
+
124
+ # 转换为 PIL Image
125
+ if isinstance(img, bytes):
126
+ pil_img = Image.open(BytesIO(img)).convert("RGB")
127
+ elif isinstance(img, Image.Image):
128
+ pil_img = img.convert("RGB")
129
+ elif isinstance(img, np.ndarray):
130
+ pil_img = Image.fromarray(img.astype('uint8')).convert("RGB")
131
+ else:
132
+ raise ValueError("Unsupported image type")
133
+
134
+ # 提取检测区域
135
+ extractor = ClassRegionBase64ExtractorPIL(class_names or [], target_classes=target_classes, threshold=threshold)
136
+ detection_results = raw_outputs[0]
137
+ return extractor.extract(pil_img, detection_results, scale_factor=scale_factor, im_size=im_size_ret)
138
+
139
+ @staticmethod
140
+ def _nms_detections(detections, iou_threshold=0.5):
141
+ if len(detections) == 0:
142
+ return detections
143
+ dets = np.array(detections, dtype=np.float32)
144
+ scores = dets[:, 1]
145
+ sorted_idx = np.argsort(-scores)
146
+ keep = []
147
+ while len(sorted_idx) > 0:
148
+ current_idx = sorted_idx[0]
149
+ keep.append(current_idx)
150
+ if len(sorted_idx) == 1:
151
+ break
152
+ current_box = dets[current_idx, 2:6]
153
+ other_boxes = dets[sorted_idx[1:], 2:6]
154
+ x1_inter = np.maximum(current_box[0], other_boxes[:, 0])
155
+ y1_inter = np.maximum(current_box[1], other_boxes[:, 1])
156
+ x2_inter = np.minimum(current_box[2], other_boxes[:, 2])
157
+ y2_inter = np.minimum(current_box[3], other_boxes[:, 3])
158
+ inter_area = np.maximum(0, x2_inter - x1_inter) * np.maximum(0, y2_inter - y1_inter)
159
+ area_current = (current_box[2] - current_box[0]) * (current_box[3] - current_box[1])
160
+ area_others = (other_boxes[:, 2] - other_boxes[:, 0]) * (other_boxes[:, 3] - other_boxes[:, 1])
161
+ union_area = area_current + area_others - inter_area
162
+ iou = inter_area / (union_area + 1e-6)
163
+ valid_idx = np.where(iou < iou_threshold)[0] + 1
164
+ sorted_idx = sorted_idx[valid_idx]
165
+ return dets[keep].tolist()
166
+
167
+ def predict_image_tiled_and_extract(self, img, im_size=320, tile_overlap=0.2, class_names=None, target_classes=None, threshold=0.3, nms_iou=0.5):
168
+ """
169
+ Tiled prediction for large images (2K, 4K, etc).
170
+ Splits image into overlapping tiles, predicts each tile, merges results.
171
+
172
+ Args:
173
+ img: PIL Image, numpy array, or bytes
174
+ im_size: model training resolution (e.g., 320)
175
+ tile_overlap: overlap ratio between tiles (0.0-1.0), default 0.2
176
+ class_names: list of class names
177
+ target_classes: list of target classes to extract
178
+ threshold: confidence threshold
179
+ nms_iou: IoU threshold for NMS merging
180
+
181
+ Returns:
182
+ extracted_outputs: list of dicts with extracted regions (with coordinates mapped to original image)
183
+ all_detections: list of raw detections [cat_id, score, xmin, ymin, xmax, ymax] (original image coords)
184
+ """
185
+ # Convert input to numpy array
186
+ if isinstance(img, bytes):
187
+ pil_img = Image.open(BytesIO(img)).convert("RGB")
188
+ img_np = np.array(pil_img)
189
+ elif isinstance(img, Image.Image):
190
+ img_np = np.array(img.convert("RGB"))
191
+ elif isinstance(img, np.ndarray):
192
+ img_np = img
193
+ else:
194
+ raise ValueError("Unsupported image type for predict_image_tiled")
195
+ orig_h, orig_w = img_np.shape[:2]
196
+ # Calculate tile parameters
197
+ stride = int(im_size * (1 - tile_overlap))
198
+ stride = max(1, stride)
199
+ # Generate tile coordinates
200
+ tiles = []
201
+ y_start = 0
202
+ while y_start < orig_h:
203
+ y_end = min(y_start + im_size, orig_h)
204
+ # If last tile doesn't cover the bottom, adjust
205
+ if y_end == orig_h and y_start > 0:
206
+ y_start = max(0, orig_h - im_size)
207
+ y_end = orig_h
208
+ x_start = 0
209
+ while x_start < orig_w:
210
+ x_end = min(x_start + im_size, orig_w)
211
+ # If last tile doesn't cover the right, adjust
212
+ if x_end == orig_w and x_start > 0:
213
+ x_start = max(0, orig_w - im_size)
214
+ x_end = orig_w
215
+ tiles.append((x_start, y_start, x_end, y_end))
216
+ x_start += stride
217
+ if x_end == orig_w:
218
+ break
219
+ y_start += stride
220
+ if y_end == orig_h:
221
+ break
222
+ # Predict each tile and collect detections
223
+ all_detections = []
224
+ for x_start, y_start, x_end, y_end in tiles:
225
+ tile_img = img_np[y_start:y_end, x_start:x_end]
226
+ # Pad tile if smaller than im_size
227
+ if tile_img.shape[0] < im_size or tile_img.shape[1] < im_size:
228
+ pad_h = im_size - tile_img.shape[0]
229
+ pad_w = im_size - tile_img.shape[1]
230
+ tile_img = np.pad(tile_img, ((0, pad_h), (0, pad_w), (0, 0)), mode='constant', constant_values=0)
231
+ # Run inference on tile
232
+ try:
233
+ tile_outputs, _, _ = self.predict_image(tile_img, im_size=im_size)
234
+ tile_detections = tile_outputs[0]
235
+ # Map coordinates back to original image
236
+ for det in tile_detections:
237
+ cat_id, score = det[0], det[1]
238
+ xmin, ymin, xmax, ymax = det[2], det[3], det[4], det[5]
239
+ # Scale from model resolution to tile resolution
240
+ tile_h, tile_w = y_end - y_start, x_end - x_start
241
+ xmin_tile = xmin * tile_w / im_size
242
+ ymin_tile = ymin * tile_h / im_size
243
+ xmax_tile = xmax * tile_w / im_size
244
+ ymax_tile = ymax * tile_h / im_size
245
+ # Translate to original image coordinates
246
+ xmin_orig = xmin_tile + x_start
247
+ ymin_orig = ymin_tile + y_start
248
+ xmax_orig = xmax_tile + x_start
249
+ ymax_orig = ymax_tile + y_start
250
+ # Clip to image bounds
251
+ xmin_orig = max(0, min(xmin_orig, orig_w))
252
+ ymin_orig = max(0, min(ymin_orig, orig_h))
253
+ xmax_orig = max(0, min(xmax_orig, orig_w))
254
+ ymax_orig = max(0, min(ymax_orig, orig_h))
255
+ all_detections.append([cat_id, score, xmin_orig, ymin_orig, xmax_orig, ymax_orig])
256
+ except Exception as e:
257
+ print(f"Error processing tile {(x_start, y_start, x_end, y_end)}: {e}")
258
+ continue
259
+ # Apply NMS to merge duplicate detections
260
+ merged_detections = self._nms_detections(all_detections, iou_threshold=nms_iou)
261
+
262
+ # Extract regions using the merged detections
263
+ if isinstance(img, bytes):
264
+ pil_img = Image.open(BytesIO(img)).convert("RGB")
265
+ elif isinstance(img, Image.Image):
266
+ pil_img = img.convert("RGB")
267
+ elif isinstance(img, np.ndarray):
268
+ pil_img = Image.fromarray(img_np.astype('uint8')).convert("RGB")
269
+ else:
270
+ raise ValueError("Unsupported image type")
271
+
272
+ # Create a dummy scale_factor (1:1 since we're already in original coordinates)
273
+ scale_factor = np.array([[1.0, 1.0]], dtype=np.float32)
274
+ extractor = ClassRegionBase64ExtractorPIL(class_names or [], target_classes=target_classes, threshold=threshold)
275
+ return extractor.extract(pil_img, merged_detections, scale_factor=scale_factor, im_size=orig_h)
ctools/sys_log.py CHANGED
@@ -1,96 +1,170 @@
1
- import logging
2
- import os
3
1
  import sys
2
+ import os
4
3
  import time
5
-
6
- from ctools import call, path_info
7
-
8
- clog: logging.Logger = None
9
- flog: logging.Logger = None
10
-
11
- neglect_keywords = [
12
- "OPTIONS",
13
- ]
14
-
15
-
16
- # 文件日志
17
- @call.once
18
- def _file_log(sys_log_path: str = './', log_level: int = logging.INFO, mixin: bool = False) -> logging:
19
- try:
20
- os.mkdir(sys_log_path)
21
- except Exception:
22
- pass
23
- log_file = sys_log_path + os.path.sep + "log-" + time.strftime("%Y-%m-%d-%H", time.localtime(time.time())) + ".log"
24
- if mixin:
25
- handlers = [logging.FileHandler(filename=log_file, encoding='utf-8'), logging.StreamHandler()]
26
- else:
27
- handlers = [logging.FileHandler(filename=log_file, encoding='utf-8')]
28
- logging.basicConfig(level=log_level,
29
- format='%(asctime)s | %(levelname)-5s | T%(thread)d | %(module)s.%(funcName)s:%(lineno)d: %(message)s',
30
- datefmt='%Y%m%d%H%M%S',
31
- handlers=handlers)
32
- logger = logging.getLogger('ck-flog')
33
- return logger
34
-
35
-
36
- # 控制台日志
37
- @call.once
38
- def _console_log(log_level: int = logging.INFO) -> logging:
39
- handler = logging.StreamHandler()
40
- logging.basicConfig(level=log_level,
41
- format='%(asctime)s | %(levelname)-5s | T%(thread)d | %(name)s | %(module)s.%(funcName)s:%(lineno)d: %(message)s',
42
- datefmt='%Y%m%d%H%M%S',
43
- handlers=[handler])
44
- logger = logging.getLogger('ck-clog')
45
- return logger
46
-
47
-
48
- import io
49
4
  import logging
5
+ from typing import Protocol
50
6
 
7
+ from ctools import call
51
8
 
52
- class StreamToLogger(io.StringIO):
53
- def __init__(self, logger: logging.Logger, level: int = logging.INFO):
54
- super().__init__()
55
- self.logger = logger
56
- self.level = level
57
- self._buffer = ''
58
9
 
59
- def write(self, message: str):
60
- if not message:
10
+ # =========================
11
+ # 日志接口(IDE 提示)
12
+ # =========================
13
+ class LoggerProtocol(Protocol):
14
+ def debug(self, msg: str, *args, **kwargs) -> None: ...
15
+ def info(self, msg: str, *args, **kwargs) -> None: ...
16
+ def warning(self, msg: str, *args, **kwargs) -> None: ...
17
+ def error(self, msg: str, *args, **kwargs) -> None: ...
18
+ def critical(self, msg: str, *args, **kwargs) -> None: ...
19
+ def exception(self, msg: str, *args, **kwargs) -> None: ...
20
+
21
+
22
+ class _LazyLogger:
23
+ def __getattr__(self, item):
24
+ raise RuntimeError("Logger not initialized, call init_log() first")
25
+
26
+
27
+ flog: LoggerProtocol = _LazyLogger()
28
+ clog: LoggerProtocol = _LazyLogger()
29
+
30
+
31
+ # =========================
32
+ # TeeStream
33
+ # =========================
34
+ class TeeStream:
35
+ def __init__(self, *streams):
36
+ self.streams = streams
37
+
38
+ def write(self, data):
39
+ if not data:
61
40
  return
62
- self._buffer += message
63
- if '\n' in self._buffer:
64
- lines = self._buffer.splitlines(keepends=False)
65
- for line in lines:
66
- if line.strip():
67
- try:
68
- self.logger.log(self.level, line.strip(), stacklevel=3)
69
- except Exception:
70
- self.logger.log(self.level, line.strip())
71
- self._buffer = ''
41
+ for s in self.streams:
42
+ try:
43
+ s.write(data)
44
+ except Exception:
45
+ pass
72
46
 
73
47
  def flush(self):
74
- if self._buffer.strip():
48
+ for s in self.streams:
75
49
  try:
76
- self.logger.log(self.level, self._buffer.strip(), stacklevel=3)
50
+ s.flush()
77
51
  except Exception:
78
- self.logger.log(self.level, self._buffer.strip())
79
- self._buffer = ''
52
+ pass
53
+
54
+ def isatty(self):
55
+ return any(getattr(s, "isatty", lambda: False)() for s in self.streams)
80
56
 
81
57
  def fileno(self):
82
- return sys.__stdout__.fileno()
58
+ for s in self.streams:
59
+ try:
60
+ return s.fileno()
61
+ except Exception:
62
+ pass
63
+ raise OSError
83
64
 
84
65
 
66
+ # =========================
67
+ # print → 文件
68
+ # =========================
69
+ class PrintToFile:
70
+ def __init__(self, file_handler: logging.Handler, level=logging.INFO):
71
+ self.handler = file_handler
72
+ self.level = level
73
+ self._buffer = ""
74
+
75
+ def write(self, msg):
76
+ if not msg:
77
+ return
78
+ self._buffer += msg
79
+ while "\n" in self._buffer:
80
+ line, self._buffer = self._buffer.split("\n", 1)
81
+ line = line.rstrip()
82
+ if line:
83
+ record = logging.LogRecord(
84
+ name="print",
85
+ level=self.level,
86
+ pathname="",
87
+ lineno=0,
88
+ msg=line,
89
+ args=(),
90
+ exc_info=None,
91
+ )
92
+ self.handler.emit(record)
93
+
94
+ def flush(self):
95
+ if self._buffer.strip():
96
+ record = logging.LogRecord(
97
+ name="print",
98
+ level=self.level,
99
+ pathname="",
100
+ lineno=0,
101
+ msg=self._buffer.strip(),
102
+ args=(),
103
+ exc_info=None,
104
+ )
105
+ self.handler.emit(record)
106
+ self._buffer = ""
107
+
108
+ def isatty(self):
109
+ return False
110
+
111
+
112
+ # =========================
113
+ # 初始化日志
114
+ # =========================
85
115
  @call.init
86
- def _init_log() -> None:
116
+ def init_log():
87
117
  global flog, clog
88
- flog = _file_log(path_info.get_user_work_path(".ck/ck-py-log", mkdir=True), mixin=True, log_level=logging.DEBUG)
89
- clog = _console_log()
90
- sys.stdout = StreamToLogger(flog, level=logging.INFO)
91
- sys.stderr = StreamToLogger(flog, level=logging.ERROR)
92
118
 
119
+ # 绝对路径
120
+ home_dir = os.path.expanduser("~")
121
+ log_dir = os.path.join(home_dir, ".ck", "ck-py-log")
122
+ os.makedirs(log_dir, exist_ok=True)
123
+
124
+ log_file = os.path.join(
125
+ log_dir, f"log-{time.strftime('%Y-%m-%d-%H')}.log"
126
+ )
127
+
128
+ formatter = logging.Formatter(
129
+ "%(asctime)s | %(levelname)-8s | %(name)s:%(lineno)d - %(message)s"
130
+ )
131
+
132
+ # ===== 文件 handler =====
133
+ file_handler = logging.FileHandler(log_file, encoding="utf-8")
134
+ file_handler.setLevel(logging.DEBUG)
135
+ file_handler.setFormatter(formatter)
136
+
137
+ # ===== 控制台 handler =====
138
+ console_handler = logging.StreamHandler(sys.stderr)
139
+ console_handler.setLevel(logging.INFO)
140
+ console_handler.setFormatter(formatter)
141
+
142
+ # ===== logger =====
143
+ logger = logging.getLogger("app")
144
+ logger.setLevel(logging.DEBUG)
145
+ logger.handlers.clear()
146
+ logger.addHandler(file_handler)
147
+ logger.addHandler(console_handler)
148
+ logger.propagate = False
149
+
150
+ flog = logger
151
+ clog = logger
152
+
153
+ # ===== stdout / stderr tee =====
154
+ original_stdout = sys.stdout
155
+ original_stderr = sys.stderr
156
+
157
+ sys.stdout = TeeStream(
158
+ original_stdout,
159
+ PrintToFile(file_handler, level=logging.INFO)
160
+ )
161
+
162
+ sys.stderr = TeeStream(
163
+ original_stderr,
164
+ PrintToFile(file_handler, level=logging.ERROR)
165
+ )
166
+
167
+ # 确认文件已经创建
168
+ if not os.path.isfile(log_file):
169
+ raise RuntimeError(f"日志文件未创建: {log_file}")
93
170
 
94
- def setLevel(log_level=logging.INFO):
95
- flog.setLevel(log_level)
96
- clog.setLevel(log_level)
ctools/util/jb_cut.py CHANGED
@@ -3,7 +3,6 @@
3
3
  __author__ = 'haoyang'
4
4
  __date__ = '2025/7/15 13:08'
5
5
 
6
- import sys
7
6
  from collections import Counter
8
7
 
9
8
  import jieba
@@ -207,6 +207,7 @@ def params_resolve(func):
207
207
  dict_wrapper = DictWrapper({'body': params})
208
208
  dict_wrapper.update(query_params.dict)
209
209
  return func(params=auto_exchange(func, dict_wrapper), *args, **kwargs)
210
+ return None
210
211
  else:
211
212
  return func(*args, **kwargs)
212
213
 
@@ -19,7 +19,9 @@ from ctools.util.config_util import load_config
19
19
  from ctools.web import bottle_web_base, bottle_webserver
20
20
  from key_word_cloud.db_core.db_init import init_partitions
21
21
  from patch_manager import patch_funcs
22
+ from ctools.web.ctoken import CToken
22
23
 
24
+ CToken.token_audience = 'server-name'
23
25
  database.init_db('postgresql://postgres:123123@192.168.xx.xx:5432/xxx', default_schema='xxx', auto_gen_table=False, echo=False)
24
26
 
25
27
  config = load_config('application.ini')
@@ -31,7 +33,7 @@ app = bottle_web_base.init_app("/api", True)
31
33
 
32
34
  @bottle_web_base.before_intercept(0)
33
35
  def token_check():
34
- return bottle_web_base.common_auth_verify(config.base.secret_key)
36
+ return bottle_web_base.common_auth_verify()
35
37
 
36
38
  if __name__ == '__main__':
37
39
  main_server = bottle_webserver.init_bottle(app)
@@ -120,7 +122,7 @@ class CBottle:
120
122
 
121
123
  def run(self):
122
124
  http_server = WSGIRefServer(port=self.port)
123
- print('Click the link below to open the service homepage %s' % '\n \t\t http://localhost:%s \n \t\t http://%s:%s' % (self.port, sys_info.get_local_ipv4(), self.port), file=sys.stderr)
125
+ print('Click the link below to open the service homepage %s' % '\n \t\t http://localhost:%s \n \t\t http://%s:%s \n' % (self.port, sys_info.get_local_ipv4(), self.port), file=sys.stderr)
124
126
  cache_white_list(self.bottle)
125
127
  self.bottle.run(server=http_server, quiet=self.quiet)
126
128
 
@@ -19,7 +19,7 @@ def get_ws_modules():
19
19
  """
20
20
 
21
21
  """
22
- ws_app = bottle_web_base.init_app('/websocket_demo')
22
+ ws_app = bottle_web_base.init_app('/websocket_demo', main_app=True)
23
23
 
24
24
  @ws_app.route('/script_debug', apply=[websocket])
25
25
  @bottle_web_base.rule('DOC:DOWNLOAD')
ctools/web/ctoken.py CHANGED
@@ -13,7 +13,7 @@ from ctools.dict_wrapper import DictWrapper
13
13
 
14
14
 
15
15
  class CToken:
16
- token_audience = ["gomyck"]
16
+ token_audience = ["gomyck"] # token 受众: 颁发至哪个服务, 不同服务要修改这个值, 避免服务之间 token 泄漏
17
17
  token_secret_key = 'gomyck123'
18
18
  token_header = 'Authorization'
19
19
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gomyck-tools
3
- Version: 1.5.4
3
+ Version: 1.5.6
4
4
  Summary: A tools collection for python development by hao474798383
5
5
  Author-email: gomyck <hao474798383@163.com>
6
6
  License-Expression: Apache-2.0
@@ -45,6 +45,8 @@ Requires-Dist: scikit-learn>=1.7.1; extra == "ml"
45
45
  Requires-Dist: pandas>=2.3.2; extra == "ml"
46
46
  Requires-Dist: lxml>=6.0.1; extra == "ml"
47
47
  Requires-Dist: xlrd>=2.0.2; extra == "ml"
48
+ Provides-Extra: xj-plate-server
49
+ Requires-Dist: paddlepaddle==3.2.2; extra == "xj-plate-server"
48
50
  Dynamic: license-file
49
51
 
50
52
  # Gomyck-Tools
@@ -1,6 +1,7 @@
1
1
  ctools/__init__.py,sha256=7EtnbO7mi1iYQeyU4CLusYrwO96_dC7eR7jboyWAqvg,1089
2
2
  ctools/application.py,sha256=QFLbwmjznz2HAwVyph9PhzMpJUU5R82eMaVcw5UDSMk,15899
3
3
  ctools/aspect.py,sha256=lXrpeu_F3w6v2Hu2yOwQIRGqfDN25_H-1YyW6fPt_mw,1667
4
+ ctools/authcode_validator.py,sha256=g-AFBCEVvtLaoRTXn4qZgVfMv3TUFFYFUmc4fn8Hps8,12312
4
5
  ctools/call.py,sha256=TFFC8PqvCu0PS0XelmV4QXdXezQiUsEacxg3RgKvdwE,1572
5
6
  ctools/cdate.py,sha256=OhKAaQfo2Rxd3Jx3g9AfPsaISRoLkstqZdaGT4ZZr_I,3096
6
7
  ctools/cdebug.py,sha256=_mihZRCEx_bi7Kv_QPjP4MPLNFrl-GR1Y_irTgOP7OU,4021
@@ -14,7 +15,7 @@ ctools/patch.py,sha256=2eIle4uVTIcy1QMB1LbApo47cQTJ8GnXMihp-Mkgse0,3128
14
15
  ctools/path_info.py,sha256=JoEGPeeSu1jMGTIquq2CZFMe7Ergg8gL4DPt3_Q4q0U,2688
15
16
  ctools/similar.py,sha256=ByOFaJ2AHZBe3ekoEco5miXcZP1XW7vGecH1i3dES5g,698
16
17
  ctools/sys_info.py,sha256=QynB_2nQeGUuUdqJQldtEq916gDfPBsei1OWx9GYKSI,4278
17
- ctools/sys_log.py,sha256=Sud91NQEVIiqiW6zffv-LrdmVKqva9ijUpdhTxrQhDA,2782
18
+ ctools/sys_log.py,sha256=zFXOuq6R5qimMfN0fovsyKgblLRJql9ZQLiThW1HJk4,3961
18
19
  ctools/ai/__init__.py,sha256=gTYAICILq48icnFbg0HCbsQO8PbU02EDOQ0JeMvfqTY,98
19
20
  ctools/ai/llm_chat.py,sha256=vgv1C72fv7YEGj_203L6JY_PyDszxvhrlLjLYbxuvZk,9133
20
21
  ctools/ai/llm_client.py,sha256=0xHvSQoAd4PAnunyH2a-cJ9RKTkWO1cFyrnqmA1j3g0,6178
@@ -41,12 +42,15 @@ ctools/cipher/b64.py,sha256=TLVYRCZc4BowPGk-RoSz89o9mqpDmFW8IlceP_KNZcI,196
41
42
  ctools/cipher/czip.py,sha256=7zNaafiG-O6NkaSv4cBP8jxUBqCTmCmPsd6efX0bu9Y,4679
42
43
  ctools/cipher/rsa.py,sha256=7TPwlt-JQxDVHwvL_Am9ZKI70B2CfJDqQDf473edHSQ,2264
43
44
  ctools/cipher/sign.py,sha256=i__5PVevvR2-VjJyInmyH1QJOpmuxcA9ZIM0oxxFWVo,567
44
- ctools/cipher/sm_util.py,sha256=cC58wZ9IL08SvUfWUNjX5139fal2SnXGaFQ0R9WA3SE,1675
45
+ ctools/cipher/sm_util.py,sha256=vqECjsD7nLccQqnito_AyFm3WmlXdMS3yd_vnVZdt5w,1639
45
46
  ctools/database/__init__.py,sha256=fB36UC93Pya_1YyWGMzDy3D4tMDTBQoYK20E4wgNqec,98
46
47
  ctools/database/database.py,sha256=IQzPw2fOJh0Xn_OdSMeEOXBEV9YCRDoszg-dwJPXTTU,7765
47
48
  ctools/geo/__init__.py,sha256=OkUaZv5ckkXJFNbRyFZqkX5m3GxTueEGEBU99_jJQNE,98
48
49
  ctools/geo/coord_trans.py,sha256=UWCU1wnrTDU1aLSVAwmiHOeH7Pu-Dp8lDLAngDG48NM,3761
49
50
  ctools/geo/douglas_rarefy.py,sha256=bJo6TwNxPa-7-8MOi8MULxeqnz4cvIJN-oXqBDWNAVM,4883
51
+ ctools/ml/image_process.py,sha256=keiIbcCkCZq-dp3PBpI-8tHFXNqKJKXATcrojMEpAGs,3168
52
+ ctools/ml/img_extractor.py,sha256=x_o3il6gWoKMBRgJ9m7HNt70nY3Etr5QcPzEKq76oiA,1829
53
+ ctools/ml/ppi.py,sha256=BtYam72ovii7UZ4LniM0smLv2fMmeqBUfKpfJTrHMVM,10886
50
54
  ctools/office/__init__.py,sha256=wum34b8YJg0qD7uKdDEbozSE8RIxWqTVa44CCIZyqPU,98
51
55
  ctools/office/cword.py,sha256=bIthKmf0oBqjcdkrU5hFDAPp66ZrjeMNzjIxOlMPeCc,837
52
56
  ctools/office/word_fill.py,sha256=ZoTxz0-agEy5atIRWqYmQZz82nVr2zz9JebSTMkulIo,18214
@@ -69,20 +73,20 @@ ctools/util/env_config.py,sha256=L98G9LPdpD7Yl5XbA_-KfkcA4mDunQoKiYtK5w7QR-s,179
69
73
  ctools/util/html_soup.py,sha256=rnr8M3gn3gQGo-wNaNFXDjdzp8AAkv9o4yqfIIfO-zw,1567
70
74
  ctools/util/http_util.py,sha256=cx0FRnPLFdJ0mF9UYphl40SZj68fqG30Q0udku9hZIE,769
71
75
  ctools/util/image_process.py,sha256=nqJOi2p8wLe8wRsfkH99MyEYSjE9i4fthxBJwrrZVB8,835
72
- ctools/util/jb_cut.py,sha256=2vs054_lM-0sxh70cUv7nHhLkWW1ioM3MQ5LF25sBl8,1844
76
+ ctools/util/jb_cut.py,sha256=ZMj5QV031sersGzd8xrO_LihqnyeEGud6fFwJQIGwGo,1833
73
77
  ctools/util/snow_id.py,sha256=KCuQ0zOTlmus8gZetmRA5y0jBSd8J0KXcJ33EzgCKjE,2225
74
78
  ctools/web/__init__.py,sha256=koSNYeKF5Z_xbp4Q2qbZ4ZP-3--1phbOYN9e4SJy_gk,98
75
79
  ctools/web/aio_web_server.py,sha256=p46BOU3_m4Jb57yAACeedKjhlFc1YC0QJSUe2selBgA,5693
76
80
  ctools/web/api_result.py,sha256=i1MjTnnlgkWl_q07xr-TLQeLYlXEh4DEclUFE414nSk,1568
77
- ctools/web/bottle_web_base.py,sha256=iQZJtRQH3HJ-P-48Gspvp1VEd6boAlZfiat3H8E5bBU,8763
78
- ctools/web/bottle_webserver.py,sha256=9IcmQ8dHtbR7VW29sqZ_NM954KNf-V_Z6rJZ4pnz4P4,7169
79
- ctools/web/bottle_websocket.py,sha256=xsu9fAtTuR5DsSsQjiBfaYxLjOWFyfr1sYM6cktTovI,1957
80
- ctools/web/ctoken.py,sha256=WaB29kqGlKAh21aUw5avl2h8AgLD1aESw8KCpqaN5nM,2539
81
+ ctools/web/bottle_web_base.py,sha256=DWqvS7-_BRP_ojGWmFeIGfdXVfeuhcptVb7qxl4MO6c,8781
82
+ ctools/web/bottle_webserver.py,sha256=k4p-ji3VGvU8jCO9NBx-nIbEOmAxXbBo9sYQKrOwtCM,7225
83
+ ctools/web/bottle_websocket.py,sha256=cVNGnTs1ej1iDRkLiA_EUklydfC7ZHiiU_DzL2RHgtY,1972
84
+ ctools/web/ctoken.py,sha256=qsxrQY7IWgpPe5HyiDDnf50vsRX27QXMWPzam7bGu_Y,2646
81
85
  ctools/web/download_util.py,sha256=v0JTXiED1bvoWFfwfd-LD5s7_aoRQ0lCkaGwSnSp7WI,1954
82
86
  ctools/web/params_util.py,sha256=eJDV3PSq-ZHb8UZf6xqs8kOhbyZzits1H9yPoUBIDXg,828
83
87
  ctools/web/upload_util.py,sha256=z1QQCi4SFx08jrAQH5-Y_ShiM4MghuD_5Qz6V9KK_4U,1076
84
- gomyck_tools-1.5.4.dist-info/licenses/LICENSE,sha256=X25ypfH9E6VTht2hcO8k7LCSdHUcoG_ALQt80jdYZfY,547
85
- gomyck_tools-1.5.4.dist-info/METADATA,sha256=byhNUGqMt6IFVmf5ROsxNuuK3ST4fjcPTKqZa8pzuC4,1862
86
- gomyck_tools-1.5.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
87
- gomyck_tools-1.5.4.dist-info/top_level.txt,sha256=-MiIH9FYRVKp1i5_SVRkaI-71WmF1sZSRrNWFU9ls3s,7
88
- gomyck_tools-1.5.4.dist-info/RECORD,,
88
+ gomyck_tools-1.5.6.dist-info/licenses/LICENSE,sha256=X25ypfH9E6VTht2hcO8k7LCSdHUcoG_ALQt80jdYZfY,547
89
+ gomyck_tools-1.5.6.dist-info/METADATA,sha256=bzC0W0VinVmWDxCSpMkw5elRZ2svLnrog1aHUehGFFA,1957
90
+ gomyck_tools-1.5.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
91
+ gomyck_tools-1.5.6.dist-info/top_level.txt,sha256=-MiIH9FYRVKp1i5_SVRkaI-71WmF1sZSRrNWFU9ls3s,7
92
+ gomyck_tools-1.5.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5