mlog-util 0.1.6__tar.gz → 0.1.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlog-util might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlog-util
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mlog-util"
3
- version = "0.1.6"
3
+ version = "0.1.7"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.12"
@@ -1,3 +1,5 @@
1
1
  from .log_manager import LogManager, get_logger
2
2
  from .handlers import MultiProcessSafeSizeRotatingHandler, MultiProcessSafeTimeRotatingHandler
3
3
 
4
+ __version__ = "0.1.7"
5
+
@@ -5,6 +5,8 @@ import glob
5
5
  import errno
6
6
  import logging
7
7
  import portalocker
8
+ from pathlib import Path
9
+ import re
8
10
  from abc import ABC, abstractmethod
9
11
 
10
12
 
@@ -234,20 +236,34 @@ class MultiProcessSafeSizeRotatingHandler(MultiProcessSafeRotatingHandlerBase):
234
236
 
235
237
  def _do_rollover_impl(self):
236
238
  # 轮转备份文件
239
+ log_path = Path(self.filename)
240
+
241
+ # 1. 轮转已存在的备份文件 (例如 .3 -> .4, .2 -> .3, .1 -> .2)
242
+ # 倒序处理,避免覆盖
237
243
  for i in range(self.backupCount - 1, 0, -1):
238
- sfn = f"{self.filename}.{i}"
239
- dfn = f"{self.filename}.{i+1}"
240
- if os.path.exists(sfn):
241
- if os.path.exists(dfn):
242
- os.remove(dfn)
243
- os.rename(sfn, dfn)
244
- if os.path.exists(self.filename):
245
- dfn = f"{self.filename}.1"
246
- if os.path.exists(dfn):
247
- os.remove(dfn)
248
- os.rename(self.filename, dfn)
249
-
250
- # 重新创建空的日志文件
244
+ sfn = log_path.with_suffix(f'.log.{i}')
245
+ dfn = log_path.with_suffix(f'.log.{i+1}')
246
+
247
+ if sfn.exists():
248
+ try:
249
+ # 直接尝试重命名,如果目标文件已存在会失败
250
+ os.rename(sfn, dfn)
251
+ except FileExistsError:
252
+ # 如果失败,说明目标文件已存在,先删除再重命名
253
+ # 这比 "先检查再操作" 更能避免竞态条件
254
+ dfn.unlink() # pathlib 的删除方法
255
+ os.rename(sfn, dfn)
256
+
257
+ # 2. 将当前日志文件重命名为第一个备份 .1
258
+ if log_path.exists():
259
+ dfn = log_path.with_suffix('.log.1')
260
+ try:
261
+ os.rename(log_path, dfn)
262
+ except FileExistsError:
263
+ dfn.unlink()
264
+ os.rename(log_path, dfn)
265
+
266
+ # 重新创建空的日志文件 占位
251
267
  try:
252
268
  with open(self.filename, 'w', encoding='utf-8') as f:
253
269
  pass
@@ -268,7 +284,6 @@ class MultiProcessSafeTimeRotatingHandler(MultiProcessSafeRotatingHandlerBase):
268
284
  super().__init__(filename, backupCount)
269
285
  self.when = when.upper()
270
286
  self.interval = max(1, int(interval)) # 至少为 1
271
- self.last_rollover = int(time.time())
272
287
 
273
288
  # 支持的单位映射
274
289
  self.when_to_seconds = {
@@ -278,29 +293,90 @@ class MultiProcessSafeTimeRotatingHandler(MultiProcessSafeRotatingHandlerBase):
278
293
  'D': 86400, # 天
279
294
  }
280
295
 
281
- def _should_rollover(self, record) -> bool:
282
- now = int(time.time())
283
-
284
296
  if self.when not in self.when_to_seconds:
285
- return False
297
+ raise ValueError(f"Invalid rollover interval specified: {self.when}")
298
+
299
+ # 在初始化时就计算好下一个轮转时间点
300
+ self.rolloverAt = self._compute_next_rollover_time(int(time.time()))
286
301
 
287
- seconds_per_unit = self.when_to_seconds[self.when]
288
- total_interval_seconds = seconds_per_unit * self.interval
302
+ def _compute_next_rollover_time(self, current_time):
303
+ """
304
+ 计算下一个轮转的时间点(时间戳)。
305
+ 这个方法的核心是使用本地时间来计算,确保轮转发生在正确的本地时间。
306
+ """
307
+ t = time.localtime(current_time)
308
+
309
+ # 根据轮转单位,找到当前周期的起始时间点
310
+ if self.when == 'S':
311
+ current_period_start = time.mktime((t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, t.tm_wday, t.tm_yday, t.tm_isdst))
312
+ elif self.when == 'M':
313
+ current_period_start = time.mktime((t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, 0, t.tm_wday, t.tm_yday, t.tm_isdst))
314
+ elif self.when == 'H':
315
+ current_period_start = time.mktime((t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, 0, 0, t.tm_wday, t.tm_yday, t.tm_isdst))
316
+ else: # 'D' or any other
317
+ current_period_start = time.mktime((t.tm_year, t.tm_mon, t.tm_mday, 0, 0, 0, t.tm_wday, t.tm_yday, t.tm_isdst))
289
318
 
290
- # 计算当前属于第几个周期(从 0 开始)
291
- current_cycle = now // total_interval_seconds
292
- last_cycle = self.last_rollover // total_interval_seconds
319
+ # 下一个轮转时间点 = 当前周期起始 + N个周期
320
+ next_rollover_time = current_period_start + (self.interval * self.when_to_seconds[self.when])
293
321
 
294
- return current_cycle > last_cycle
322
+ # 如果计算出的时间点已经过了(例如,程序刚好在边界点启动),则再推后一个周期
323
+ if next_rollover_time <= current_time:
324
+ next_rollover_time += self.interval * self.when_to_seconds[self.when]
325
+
326
+ return next_rollover_time
327
+
328
+ def _should_rollover(self, record) -> bool:
329
+ """
330
+ 修正3: 判断逻辑改为与下一个轮转时间点比较
331
+ """
332
+ # 获取日志记录产生的时间戳
333
+ record_time = int(record.created)
334
+
335
+ # 如果记录的时间已经超过了我们预定的下一个轮转时间点,则触发轮转
336
+ return record_time >= self.rolloverAt
295
337
 
296
338
  def _do_rollover_impl(self):
297
- # 按日期重命名,如 log.txt -> log.txt.2025-09-16
339
+ # 1. 执行轮转:将当前日志文件重命名为带时间戳的文件
298
340
  date_str = time.strftime(self._get_rollover_format())
299
- dfn = f"{self.filename}.{date_str}"
300
- if os.path.exists(self.filename):
301
- os.rename(self.filename, dfn)
302
- with open(self.filename, 'w'): pass
303
- self.last_rollover = int(time.time())
341
+ log_path = Path(self.filename)
342
+ dfn = log_path.with_name(f"{log_path.name}.{date_str}")
343
+
344
+ if log_path.exists():
345
+ try:
346
+ log_path.rename(dfn)
347
+ except FileExistsError:
348
+ dfn.unlink()
349
+ log_path.rename(dfn)
350
+
351
+ # 2. 重新创建空的日志文件(使用你指定的方式)
352
+ try:
353
+ with open(self.filename, 'w', encoding='utf-8') as f:
354
+ pass
355
+ except Exception as e:
356
+ print(f"Failed to recreate log file {self.filename}: {e}")
357
+
358
+ # 3. 更新下一个轮转的时间点(核心逻辑)
359
+ current_time = int(time.time())
360
+ self.rolloverAt = self._compute_next_rollover_time(current_time)
361
+
362
+ # --- 清理旧备份的逻辑 ---
363
+
364
+ # 4. 查找所有匹配的备份文件
365
+ backup_pattern = log_path.with_name(f"{log_path.name}.*")
366
+ backup_files = glob.glob(str(backup_pattern))
367
+
368
+ # 5. 如果备份文件数量超过限制,则进行清理
369
+ if len(backup_files) > self.backupCount:
370
+ # 6. 按文件名(即时间戳)排序,找出最旧的文件
371
+ backup_files.sort()
372
+
373
+ # 7. 计算需要删除的文件数量并删除
374
+ files_to_delete = backup_files[:-self.backupCount]
375
+ for file_to_delete in files_to_delete:
376
+ try:
377
+ Path(file_to_delete).unlink()
378
+ except OSError as e:
379
+ print(f"Error deleting old log file {file_to_delete}: {e}")
304
380
 
305
381
  def _get_rollover_format(self):
306
382
  """
@@ -19,7 +19,7 @@ class LogManager:
19
19
  log_file: Optional[str] = None,
20
20
  add_console: bool = True,
21
21
  level: int = logging.INFO,
22
- custom_handlers: Optional[List[logging.Handler]] = None,
22
+ custom_handlers: Optional[logging.Handler] = None,
23
23
  ) -> logging.Logger:
24
24
  """
25
25
  获取或创建一个配置好的 logger。
@@ -69,10 +69,9 @@ class LogManager:
69
69
  "%(asctime)s | %(name)s | %(levelname)-8s | %(message)s",
70
70
  datefmt="%Y-%m-%d %H:%M:%S"
71
71
  )
72
- for handler in custom_handlers:
73
- if handler not in logger.handlers:
74
- handler.setFormatter(custom_formatter)
75
- logger.addHandler(handler)
72
+ if custom_handlers not in logger.handlers:
73
+ custom_handlers.setFormatter(custom_formatter)
74
+ logger.addHandler(custom_handlers)
76
75
 
77
76
  return logger
78
77
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mlog-util
3
- Version: 0.1.6
3
+ Version: 0.1.7
4
4
  Summary: Add your description here
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -7,4 +7,5 @@ src/mlog_util.egg-info/SOURCES.txt
7
7
  src/mlog_util.egg-info/dependency_links.txt
8
8
  src/mlog_util.egg-info/requires.txt
9
9
  src/mlog_util.egg-info/top_level.txt
10
- tests/test_mlog.py
10
+ tests/test_size.py
11
+ tests/test_time.py
@@ -0,0 +1,175 @@
1
+ import os
2
+ import time
3
+ import glob
4
+ import shutil
5
+ import multiprocessing
6
+ from src.mlog_util import MultiProcessSafeSizeRotatingHandler, get_logger
7
+
8
+ # --- 修复:将工作函数定义为顶级函数 ---
9
+
10
+ def write_logs_worker(process_id, num_messages=1000):
11
+ """在指定进程中写入日志(用于测试1)"""
12
+ handler = MultiProcessSafeSizeRotatingHandler(
13
+ filename="logs/size.log",
14
+ maxBytes=1 * 1024 * 1024, # 1MB
15
+ backupCount=5
16
+ )
17
+ logger = get_logger(f"test_process_{process_id}", custom_handlers=handler, add_console=False)
18
+
19
+ for i in range(num_messages):
20
+ logger.info(f"Process {process_id} - Message {i}: " + "x" * 100) # 每条消息约100字节
21
+
22
+ def concurrent_rotation_worker(worker_id):
23
+ """并发轮转测试的工作进程(用于测试3)"""
24
+ handler = MultiProcessSafeSizeRotatingHandler(
25
+ filename="logs/size.log",
26
+ maxBytes=500 * 1024, # 500KB
27
+ backupCount=3
28
+ )
29
+ logger = get_logger(f"worker_{worker_id}", custom_handlers=handler, add_console=False)
30
+
31
+ for i in range(200):
32
+ logger.info(f"Worker {worker_id} - Log {i}: " + "z" * 150)
33
+ time.sleep(0.01) # 模拟实际工作负载
34
+
35
+ # --- 测试函数 ---
36
+
37
+ def setup_test_environment():
38
+ """设置测试环境,清理旧的日志文件"""
39
+ if os.path.exists("logs"):
40
+ shutil.rmtree("logs")
41
+ os.makedirs("logs", exist_ok=True)
42
+
43
+ def test_large_backup_count():
44
+ """测试1: 大backupCount,验证轮转是否生效,日志是否丢失"""
45
+ print("\n=== 测试1: 大backupCount测试 ===")
46
+ setup_test_environment()
47
+
48
+ # 启动多个进程写入日志
49
+ processes = []
50
+ for i in range(3):
51
+ # 使用顶级函数
52
+ p = multiprocessing.Process(target=write_logs_worker, args=(i, 500))
53
+ processes.append(p)
54
+ p.start()
55
+
56
+ for p in processes:
57
+ p.join()
58
+
59
+ # 检查日志文件
60
+ log_files = sorted(glob.glob("logs/size.log*"))
61
+ print(f"生成的日志文件: {log_files}")
62
+
63
+ # 验证文件数量不超过backupCount+1
64
+ assert len(log_files) <= 6, f"日志文件数量 {len(log_files)} 超过预期 6"
65
+
66
+ # 验证所有文件内容完整性
67
+ total_messages = 0
68
+ for log_file in log_files:
69
+ with open(log_file, 'r') as f:
70
+ lines = f.readlines()
71
+ total_messages += len(lines)
72
+
73
+ print(f"总日志条数: {total_messages} (预期: 1500)")
74
+ assert total_messages == 1500, f"日志条数 {total_messages} 不等于预期 1500"
75
+ print("✓ 测试1通过: 大backupCount测试成功")
76
+
77
+ def test_medium_backup_count():
78
+ """测试2: 中等backupCount,触发删除机制"""
79
+ print("\n=== 测试2: 中等backupCount测试 ===")
80
+ setup_test_environment()
81
+
82
+ # 使用较小的backupCount
83
+ handler = MultiProcessSafeSizeRotatingHandler(
84
+ filename="logs/size.log",
85
+ maxBytes=1 * 1024 * 1024, # 1MB
86
+ backupCount=2
87
+ )
88
+ logger = get_logger("test_medium", custom_handlers=handler, add_console=False)
89
+
90
+ # 写入足够多的日志以触发多次轮转
91
+ for i in range(3000):
92
+ logger.info(f"Message {i}: " + "y" * 200) # 每条消息约200字节
93
+
94
+ # 检查日志文件
95
+ log_files = sorted(glob.glob("logs/size.log*"))
96
+ print(f"生成的日志文件: {log_files}")
97
+
98
+ # 验证文件数量不超过backupCount+1
99
+ assert len(log_files) <= 3, f"日志文件数量 {len(log_files)} 超过预期 3"
100
+
101
+ # 验证最新的日志文件存在
102
+ assert os.path.exists("logs/size.log"), "主日志文件不存在"
103
+
104
+ # 验证备份文件编号正确
105
+ if len(log_files) > 1:
106
+ assert "size.log.1" in log_files[-1], "备份文件编号不正确"
107
+
108
+ print("✓ 测试2通过: 中等backupCount测试成功")
109
+
110
+ def test_concurrent_rotation():
111
+ """测试3: 并发轮转测试"""
112
+ print("\n=== 测试3: 并发轮转测试 ===")
113
+ setup_test_environment()
114
+
115
+ # 启动多个工作进程
116
+ processes = []
117
+ for i in range(5):
118
+ # 使用顶级函数
119
+ p = multiprocessing.Process(target=concurrent_rotation_worker, args=(i,))
120
+ processes.append(p)
121
+ p.start()
122
+
123
+ for p in processes:
124
+ p.join()
125
+
126
+ # 检查日志文件
127
+ log_files = sorted(glob.glob("logs/size.log*"))
128
+ print(f"并发测试生成的日志文件: {log_files}")
129
+
130
+ # 验证文件数量
131
+ assert len(log_files) <= 4, f"并发测试日志文件数量 {len(log_files)} 超过预期 4"
132
+
133
+ # 验证日志完整性
134
+ total_lines = 0
135
+ for log_file in log_files:
136
+ with open(log_file, 'r') as f:
137
+ total_lines += len(f.readlines())
138
+
139
+ print(f"并发测试总日志条数: {total_lines} (预期: 1000)")
140
+ assert total_lines == 1000, f"并发测试日志条数 {total_lines} 不等于预期 1000"
141
+ print("✓ 测试3通过: 并发轮转测试成功")
142
+
143
+ def run_all_tests():
144
+ """运行所有测试"""
145
+ print("开始运行 MultiProcessSafeSizeRotatingHandler 自动化测试...")
146
+ print(f"当前时间: {time.strftime('%Y-%m-%d %H:%M:%S')}")
147
+
148
+ try:
149
+ test_large_backup_count()
150
+ test_medium_backup_count()
151
+ test_concurrent_rotation()
152
+
153
+ print("\n🎉 所有测试通过!")
154
+ except AssertionError as e:
155
+ print(f"\n❌ 测试失败: {e}")
156
+ except Exception as e:
157
+ print(f"\n💥 测试出错: {e}")
158
+ import traceback
159
+ traceback.print_exc() # 打印详细的错误堆栈
160
+ finally:
161
+ # 清理测试环境
162
+ if os.path.exists("logs"):
163
+ shutil.rmtree("logs")
164
+
165
+ if __name__ == "__main__":
166
+ # 为了安全地在多进程环境中运行,设置启动方法
167
+ # 'spawn' 是跨平台最安全的方法,但启动开销较大
168
+ # 'fork' (仅在Unix上可用) 启动快,但可能有一些副作用
169
+ try:
170
+ multiprocessing.set_start_method('spawn', force=True)
171
+ except RuntimeError:
172
+ # 在某些环境(如Jupyter Notebook)中,可能已经设置了启动方法
173
+ pass
174
+
175
+ run_all_tests()
@@ -0,0 +1,108 @@
1
+ import os
2
+ import time
3
+ import glob
4
+ import shutil
5
+ import multiprocessing
6
+ from pathlib import Path
7
+ from src.mlog_util import MultiProcessSafeTimeRotatingHandler, get_logger
8
+
9
+ def setup_test_environment():
10
+ """设置测试环境,清理旧的日志文件"""
11
+ if os.path.exists("logs"):
12
+ shutil.rmtree("logs")
13
+ os.makedirs("logs", exist_ok=True)
14
+
15
+ def test_large_backup_count():
16
+ """测试1: 大backupCount,验证轮转是否生效,日志是否丢失"""
17
+ print("\n=== 测试1: 大backupCount测试 ===")
18
+ setup_test_environment()
19
+
20
+ handler = MultiProcessSafeTimeRotatingHandler(
21
+ filename="logs/time.log",
22
+ when="S",
23
+ backupCount=5 # 5 * 10 = 50s
24
+ )
25
+ logger = get_logger(f"test_time", custom_handlers=handler, add_console=False)
26
+
27
+ add_nums = 0
28
+ for i in range(22):
29
+ logger.info(f"{i=}")
30
+ add_nums += 1
31
+ time.sleep(1)
32
+
33
+ # 检查文件数量
34
+ files_list = list(Path("logs").glob("time.log*"))
35
+ files_num = len(files_list)
36
+ assert files_num == 3, f"日志条数 {files_num} 不等于预期 3"
37
+ print(f"日志数量 = {files_num}, 符合日志要求")
38
+
39
+ # 验证所有文件内容完整性
40
+ total_messages = 0
41
+ for log_file in files_list:
42
+ with open(log_file, 'r') as f:
43
+ lines = f.readlines()
44
+ total_messages += len(lines)
45
+
46
+ assert total_messages == 22, f"日志条数 {total_messages} 不等于预期 22"
47
+ print(f"当前日志一共 {total_messages} 条")
48
+
49
+ def test_medium_backup_count():
50
+ """
51
+ 检测轮询的文件数量是否达到预期
52
+ """
53
+ print("\n=== 测试1: backupCount数量测试 ===")
54
+ setup_test_environment()
55
+
56
+ handler = MultiProcessSafeTimeRotatingHandler(
57
+ filename="logs/time.log",
58
+ when="S",
59
+ backupCount=3 # 2 * 10 = 20s
60
+ )
61
+ logger = get_logger(f"test_time", custom_handlers=handler, add_console=False)
62
+ for i in range(32):
63
+ logger.info(f"{i=}")
64
+ time.sleep(1)
65
+
66
+ # 检查文件数量
67
+ files_list = list(Path("logs").glob("time.log*"))
68
+ files_num = len(files_list)
69
+ assert files_num == 3, f"日志条数 {files_num} 不等于预期 3"
70
+ print(f"日志数量 = {files_num}, 符合日志要求")
71
+
72
+ """
73
+ TODO: 不知道顺序应该怎么写测试
74
+ """
75
+
76
+ def run_all_tests():
77
+ """运行所有测试"""
78
+ print("开始运行 MultiProcessSafeTimeRotatingHandler 自动化测试...")
79
+ try:
80
+ test_large_backup_count()
81
+ test_medium_backup_count()
82
+ # test_concurrent_rotation()
83
+
84
+ print("\n🎉 所有测试通过!")
85
+ except AssertionError as e:
86
+ print(f"\n❌ 测试失败: {e}")
87
+ except Exception as e:
88
+ print(f"\n💥 测试出错: {e}")
89
+ import traceback
90
+ traceback.print_exc() # 打印详细的错误堆栈
91
+ finally:
92
+ # 清理测试环境
93
+ if os.path.exists("logs"):
94
+ shutil.rmtree("logs")
95
+ pass
96
+
97
+
98
+ if __name__ == "__main__":
99
+ # 为了安全地在多进程环境中运行,设置启动方法
100
+ # 'spawn' 是跨平台最安全的方法,但启动开销较大
101
+ # 'fork' (仅在Unix上可用) 启动快,但可能有一些副作用
102
+ try:
103
+ multiprocessing.set_start_method('spawn', force=True)
104
+ except RuntimeError:
105
+ # 在某些环境(如Jupyter Notebook)中,可能已经设置了启动方法
106
+ pass
107
+
108
+ run_all_tests()
@@ -1,37 +0,0 @@
1
- import time
2
- import os
3
- import mlog
4
- from mlog import LogManager
5
- from multiprocessing import Pool
6
-
7
- log_file = "logs/a1.log"
8
-
9
- log_manager = LogManager()
10
- logger_a1 = log_manager.get_logger("a1", log_file=log_file, add_console=False)
11
- # logger_a2 = log_manager.get_logger("a2", log_file=log_file, add_console=False)
12
- # logger_a3 = log_manager.get_logger("a3", log_file=log_file, add_console=False)
13
-
14
-
15
- # 测试 num 个 日志耗时
16
- def test_speed_time(num = 500):
17
- import time
18
- _st = time.time()
19
- for i in range(num):
20
- logger_a1.info(i)
21
- logger_a1.info(f"{num} --- {time.time() - _st}")
22
-
23
- # 测试多进程
24
- # 1000 个日志有没有
25
- def test_logger(x):
26
- _pid = os.getpid()
27
- logger_a1.info(f"{_pid} -- {x}")
28
-
29
-
30
- if __name__ == "__main__":
31
- with open(log_file, "w") as f:
32
- pass
33
- with Pool(2) as pool:
34
- pool.map(test_logger, range(0, 5000))
35
-
36
-
37
-
File without changes