roblox-studio-physical-operation-mcp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- roblox_studio_physical_operation_mcp/__init__.py +13 -0
- roblox_studio_physical_operation_mcp/__main__.py +8 -0
- roblox_studio_physical_operation_mcp/log_filter.py +99 -0
- roblox_studio_physical_operation_mcp/log_utils.py +467 -0
- roblox_studio_physical_operation_mcp/server.py +602 -0
- roblox_studio_physical_operation_mcp/studio_manager.py +476 -0
- roblox_studio_physical_operation_mcp/toolbar_detector.py +513 -0
- roblox_studio_physical_operation_mcp/windows_utils.py +578 -0
- roblox_studio_physical_operation_mcp-0.1.0.dist-info/METADATA +273 -0
- roblox_studio_physical_operation_mcp-0.1.0.dist-info/RECORD +13 -0
- roblox_studio_physical_operation_mcp-0.1.0.dist-info/WHEEL +4 -0
- roblox_studio_physical_operation_mcp-0.1.0.dist-info/entry_points.txt +2 -0
- roblox_studio_physical_operation_mcp-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Roblox Studio MCP 服务
|
|
3
|
+
|
|
4
|
+
提供以下功能:
|
|
5
|
+
- 游戏控制: start_game, stop_game, pause_resume_game
|
|
6
|
+
- 日志分析: get_recent_logs, search_logs_by_pattern
|
|
7
|
+
- 视觉捕获: capture_screenshot, start_recording, stop_recording
|
|
8
|
+
- 系统工具: get_studio_status, clean_logs, open_place, close_place
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from .server import mcp, main
|
|
12
|
+
|
|
13
|
+
__all__ = ["mcp", "main"]
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"""
|
|
2
|
+
日志过滤规则
|
|
3
|
+
|
|
4
|
+
用于过滤 FLog::Output 中的 Studio 内部日志,只保留用户脚本输出。
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import List
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# 排除前缀:以这些字符串开头的日志会被过滤掉
|
|
11
|
+
EXCLUDE_PREFIXES: List[str] = [
|
|
12
|
+
# Studio 内部日志:Info: 后面有制表符缩进
|
|
13
|
+
'Info: \t',
|
|
14
|
+
|
|
15
|
+
# Studio 内部日志:Info: 开头的各种内部消息
|
|
16
|
+
'Info: RobloxScriptDoc',
|
|
17
|
+
'Info: RPC:',
|
|
18
|
+
|
|
19
|
+
# Studio 版本和架构信息
|
|
20
|
+
'Studio Version:',
|
|
21
|
+
'Studio Architecture:',
|
|
22
|
+
'*******',
|
|
23
|
+
'RobloxGitHash:',
|
|
24
|
+
|
|
25
|
+
# 资源和路径
|
|
26
|
+
'setExtraAssetFolder',
|
|
27
|
+
'setAssetFolder',
|
|
28
|
+
'Reflection::load',
|
|
29
|
+
|
|
30
|
+
# GPU 和驱动信息
|
|
31
|
+
'Studio D3D',
|
|
32
|
+
'ESGamePerfMonitor',
|
|
33
|
+
|
|
34
|
+
# AB 测试和云插件
|
|
35
|
+
'ABTestFramework',
|
|
36
|
+
'Web returned cloud plugins',
|
|
37
|
+
|
|
38
|
+
# Lua Ribbon 加载
|
|
39
|
+
'Loading Lua Ribbon',
|
|
40
|
+
|
|
41
|
+
# TeamCreate
|
|
42
|
+
'TeamCreateWidget',
|
|
43
|
+
|
|
44
|
+
# 设置相关
|
|
45
|
+
'settingsUrl:',
|
|
46
|
+
'Settings ',
|
|
47
|
+
|
|
48
|
+
# 崩溃评估
|
|
49
|
+
'Evaluating deferred',
|
|
50
|
+
|
|
51
|
+
# Lua 标志引用
|
|
52
|
+
'Flag ',
|
|
53
|
+
|
|
54
|
+
# 策略和基础 URL
|
|
55
|
+
'Creating PolicyContext',
|
|
56
|
+
'BaseUrl:',
|
|
57
|
+
|
|
58
|
+
# 会话和机器信息
|
|
59
|
+
'Session GUID',
|
|
60
|
+
'Machine GUID',
|
|
61
|
+
'Studio Launch Intent',
|
|
62
|
+
'Is Studio Configured',
|
|
63
|
+
|
|
64
|
+
# 安装路径
|
|
65
|
+
'isSupportedInstallLocation',
|
|
66
|
+
|
|
67
|
+
# 语言设置
|
|
68
|
+
'preferredLocale',
|
|
69
|
+
'systemLocale',
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def should_exclude(message: str) -> bool:
|
|
74
|
+
"""
|
|
75
|
+
判断日志消息是否应该被排除
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
message: 日志消息内容
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
True 表示应该排除,False 表示保留
|
|
82
|
+
"""
|
|
83
|
+
for prefix in EXCLUDE_PREFIXES:
|
|
84
|
+
if message.startswith(prefix):
|
|
85
|
+
return True
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def filter_logs(messages: List[str]) -> List[str]:
|
|
90
|
+
"""
|
|
91
|
+
过滤日志消息列表
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
messages: 日志消息列表
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
过滤后的消息列表
|
|
98
|
+
"""
|
|
99
|
+
return [msg for msg in messages if not should_exclude(msg)]
|
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
"""
|
|
2
|
+
日志工具模块: 日志读取、搜索等
|
|
3
|
+
|
|
4
|
+
优化:
|
|
5
|
+
- 从文件末尾倒序读取,适合大文件
|
|
6
|
+
- 支持过滤特定类别 (如 FLog::Output)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
from typing import Optional, Generator
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
|
|
14
|
+
LOG_DIR = os.path.expandvars(r"%LOCALAPPDATA%\Roblox\logs")
|
|
15
|
+
|
|
16
|
+
# 默认只读取这些类别的日志
|
|
17
|
+
DEFAULT_CATEGORIES = ["FLog::Output"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class LogEntry:
|
|
22
|
+
timestamp: str
|
|
23
|
+
level: str
|
|
24
|
+
category: str
|
|
25
|
+
message: str
|
|
26
|
+
raw: str
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_log_line(line: str) -> Optional[LogEntry]:
|
|
30
|
+
"""解析单行日志"""
|
|
31
|
+
# 格式: 2026-02-03T08:52:02.095Z,128.095795,1996c,12 [DFLog::HttpTraceError] message
|
|
32
|
+
# 或: 2026-02-03T08:52:04.244Z,130.244095,12f4,6,Info [FLog::...] message
|
|
33
|
+
match = re.match(
|
|
34
|
+
r'^(\d{4}-\d{2}-\d{2}T[\d:.]+Z),[\d.]+,[a-f0-9]+,\d+(?:,(\w+))?\s*\[([^\]]+)\]\s*(.*)$',
|
|
35
|
+
line
|
|
36
|
+
)
|
|
37
|
+
if match:
|
|
38
|
+
return LogEntry(
|
|
39
|
+
timestamp=match.group(1),
|
|
40
|
+
level=match.group(2) or "Info",
|
|
41
|
+
category=match.group(3),
|
|
42
|
+
message=match.group(4),
|
|
43
|
+
raw=line
|
|
44
|
+
)
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def read_file_reverse(file_path: str, chunk_size: int = 8192) -> Generator[str, None, None]:
|
|
49
|
+
"""
|
|
50
|
+
从文件末尾倒序读取行
|
|
51
|
+
|
|
52
|
+
对于大文件,这比读取整个文件再 reverse 更高效
|
|
53
|
+
"""
|
|
54
|
+
with open(file_path, 'rb') as f:
|
|
55
|
+
# 移动到文件末尾
|
|
56
|
+
f.seek(0, 2)
|
|
57
|
+
file_size = f.tell()
|
|
58
|
+
|
|
59
|
+
buffer = b''
|
|
60
|
+
position = file_size
|
|
61
|
+
|
|
62
|
+
while position > 0:
|
|
63
|
+
# 计算读取位置
|
|
64
|
+
read_size = min(chunk_size, position)
|
|
65
|
+
position -= read_size
|
|
66
|
+
f.seek(position)
|
|
67
|
+
|
|
68
|
+
# 读取并拼接
|
|
69
|
+
chunk = f.read(read_size)
|
|
70
|
+
buffer = chunk + buffer
|
|
71
|
+
|
|
72
|
+
# 按行分割
|
|
73
|
+
lines = buffer.split(b'\n')
|
|
74
|
+
|
|
75
|
+
# 最后一个可能不完整,保留到下次
|
|
76
|
+
buffer = lines[0]
|
|
77
|
+
|
|
78
|
+
# 倒序返回完整的行
|
|
79
|
+
for line in reversed(lines[1:]):
|
|
80
|
+
line_str = line.decode('utf-8', errors='ignore').strip()
|
|
81
|
+
if line_str:
|
|
82
|
+
yield line_str
|
|
83
|
+
|
|
84
|
+
# 处理剩余的 buffer
|
|
85
|
+
if buffer:
|
|
86
|
+
line_str = buffer.decode('utf-8', errors='ignore').strip()
|
|
87
|
+
if line_str:
|
|
88
|
+
yield line_str
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
from .log_filter import should_exclude
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def get_logs_from_line(
|
|
95
|
+
log_path: str,
|
|
96
|
+
after_line: int = None,
|
|
97
|
+
before_line: int = None,
|
|
98
|
+
timestamps: bool = False,
|
|
99
|
+
categories: list[str] = None,
|
|
100
|
+
apply_filter: bool = True
|
|
101
|
+
) -> dict:
|
|
102
|
+
"""
|
|
103
|
+
从指定行范围读取日志
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
log_path: 日志文件路径
|
|
107
|
+
after_line: 从哪一行之后开始读取,None 表示从头开始
|
|
108
|
+
before_line: 到哪一行之前结束,None 表示到末尾
|
|
109
|
+
timestamps: 是否附加时间戳
|
|
110
|
+
categories: 只返回这些类别的日志,默认 ["FLog::Output"]
|
|
111
|
+
apply_filter: 是否应用过滤规则排除 Studio 内部日志
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
{
|
|
115
|
+
"logs": "日志文本",
|
|
116
|
+
"start_line": 起始行号,
|
|
117
|
+
"last_line": 最后行号,
|
|
118
|
+
"remaining": 剩余有效日志行数,
|
|
119
|
+
"has_more": 是否还有更多
|
|
120
|
+
}
|
|
121
|
+
"""
|
|
122
|
+
MAX_BYTES = 32000
|
|
123
|
+
|
|
124
|
+
if not os.path.exists(log_path):
|
|
125
|
+
return {"logs": "", "start_line": 0, "last_line": 0, "remaining": 0, "has_more": False}
|
|
126
|
+
|
|
127
|
+
if categories is None:
|
|
128
|
+
categories = DEFAULT_CATEGORIES
|
|
129
|
+
|
|
130
|
+
start_line = None
|
|
131
|
+
last_line = 0
|
|
132
|
+
current_bytes = 0
|
|
133
|
+
log_lines = []
|
|
134
|
+
remaining = 0
|
|
135
|
+
bytes_exceeded = False
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
139
|
+
for line_num, line in enumerate(f, 1):
|
|
140
|
+
# 跳过 after_line 之前的行
|
|
141
|
+
if after_line is not None and line_num <= after_line:
|
|
142
|
+
continue
|
|
143
|
+
|
|
144
|
+
# 停止在 before_line
|
|
145
|
+
if before_line is not None and line_num >= before_line:
|
|
146
|
+
break
|
|
147
|
+
|
|
148
|
+
line = line.strip()
|
|
149
|
+
if not line:
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
entry = parse_log_line(line)
|
|
153
|
+
if not entry:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
# 类别过滤
|
|
157
|
+
if categories and entry.category not in categories:
|
|
158
|
+
continue
|
|
159
|
+
|
|
160
|
+
# 应用排除规则
|
|
161
|
+
if apply_filter and should_exclude(entry.message):
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
# 这是一条有效日志
|
|
165
|
+
remaining += 1
|
|
166
|
+
|
|
167
|
+
# 如果已超过字节限制,只统计不添加
|
|
168
|
+
if bytes_exceeded:
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
# 格式化输出
|
|
172
|
+
if timestamps:
|
|
173
|
+
time_part = entry.timestamp[11:19]
|
|
174
|
+
output_line = f"[{time_part}] {entry.message}"
|
|
175
|
+
else:
|
|
176
|
+
output_line = entry.message
|
|
177
|
+
|
|
178
|
+
line_bytes = len(output_line.encode('utf-8')) + 1
|
|
179
|
+
|
|
180
|
+
# 检查是否超过字节限制
|
|
181
|
+
if current_bytes + line_bytes > MAX_BYTES and log_lines:
|
|
182
|
+
bytes_exceeded = True
|
|
183
|
+
continue
|
|
184
|
+
|
|
185
|
+
if start_line is None:
|
|
186
|
+
start_line = line_num
|
|
187
|
+
|
|
188
|
+
log_lines.append(output_line)
|
|
189
|
+
last_line = line_num
|
|
190
|
+
current_bytes += line_bytes
|
|
191
|
+
|
|
192
|
+
except Exception:
|
|
193
|
+
pass
|
|
194
|
+
|
|
195
|
+
returned_count = len(log_lines)
|
|
196
|
+
return {
|
|
197
|
+
"logs": "\n".join(log_lines),
|
|
198
|
+
"start_line": start_line or 0,
|
|
199
|
+
"last_line": last_line,
|
|
200
|
+
"remaining": remaining - returned_count,
|
|
201
|
+
"has_more": remaining > returned_count
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def get_recent_logs(
|
|
206
|
+
log_path: str,
|
|
207
|
+
limit: int = 100,
|
|
208
|
+
categories: list[str] = None,
|
|
209
|
+
min_level: Optional[str] = None,
|
|
210
|
+
apply_filter: bool = True
|
|
211
|
+
) -> list[LogEntry]:
|
|
212
|
+
"""
|
|
213
|
+
从日志文件末尾读取最近的日志
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
log_path: 日志文件路径
|
|
217
|
+
limit: 返回的最大条数
|
|
218
|
+
categories: 只返回这些类别的日志,默认 ["FLog::Output"]
|
|
219
|
+
min_level: 最低日志级别过滤
|
|
220
|
+
apply_filter: 是否应用过滤规则排除 Studio 内部日志,默认 True
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
日志条目列表 (按时间正序)
|
|
224
|
+
"""
|
|
225
|
+
if not os.path.exists(log_path):
|
|
226
|
+
return []
|
|
227
|
+
|
|
228
|
+
if categories is None:
|
|
229
|
+
categories = DEFAULT_CATEGORIES
|
|
230
|
+
|
|
231
|
+
entries = []
|
|
232
|
+
try:
|
|
233
|
+
for line in read_file_reverse(log_path):
|
|
234
|
+
entry = parse_log_line(line)
|
|
235
|
+
if not entry:
|
|
236
|
+
continue
|
|
237
|
+
|
|
238
|
+
# 类别过滤
|
|
239
|
+
if categories and entry.category not in categories:
|
|
240
|
+
continue
|
|
241
|
+
|
|
242
|
+
# 级别过滤
|
|
243
|
+
if min_level and entry.level.lower() != min_level.lower():
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
# 应用排除规则
|
|
247
|
+
if apply_filter and should_exclude(entry.message):
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
entries.append(entry)
|
|
251
|
+
if len(entries) >= limit:
|
|
252
|
+
break
|
|
253
|
+
except Exception:
|
|
254
|
+
pass
|
|
255
|
+
|
|
256
|
+
# 返回正序 (最旧的在前)
|
|
257
|
+
return list(reversed(entries))
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def get_all_logs(
|
|
261
|
+
log_path: str,
|
|
262
|
+
limit: int = 100,
|
|
263
|
+
min_level: Optional[str] = None
|
|
264
|
+
) -> list[LogEntry]:
|
|
265
|
+
"""
|
|
266
|
+
获取所有类别的日志 (不过滤类别)
|
|
267
|
+
"""
|
|
268
|
+
return get_recent_logs(log_path, limit, categories=[], min_level=min_level)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def search_logs(
|
|
272
|
+
log_path: str,
|
|
273
|
+
pattern: str,
|
|
274
|
+
limit: int = 50,
|
|
275
|
+
categories: list[str] = None
|
|
276
|
+
) -> list[LogEntry]:
|
|
277
|
+
"""
|
|
278
|
+
在日志中搜索匹配的条目
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
log_path: 日志文件路径
|
|
282
|
+
pattern: 正则表达式模式
|
|
283
|
+
limit: 返回的最大条数
|
|
284
|
+
categories: 只搜索这些类别,None 表示搜索所有
|
|
285
|
+
"""
|
|
286
|
+
if not os.path.exists(log_path):
|
|
287
|
+
return []
|
|
288
|
+
|
|
289
|
+
entries = []
|
|
290
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
291
|
+
|
|
292
|
+
try:
|
|
293
|
+
for line in read_file_reverse(log_path):
|
|
294
|
+
if not regex.search(line):
|
|
295
|
+
continue
|
|
296
|
+
|
|
297
|
+
entry = parse_log_line(line)
|
|
298
|
+
if not entry:
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
if categories and entry.category not in categories:
|
|
302
|
+
continue
|
|
303
|
+
|
|
304
|
+
entries.append(entry)
|
|
305
|
+
if len(entries) >= limit:
|
|
306
|
+
break
|
|
307
|
+
except Exception:
|
|
308
|
+
pass
|
|
309
|
+
|
|
310
|
+
return list(reversed(entries))
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def search_logs_from_line(
|
|
314
|
+
log_path: str,
|
|
315
|
+
pattern: str,
|
|
316
|
+
after_line: int = None,
|
|
317
|
+
before_line: int = None,
|
|
318
|
+
timestamps: bool = False,
|
|
319
|
+
categories: list[str] = None,
|
|
320
|
+
apply_filter: bool = True
|
|
321
|
+
) -> dict:
|
|
322
|
+
"""
|
|
323
|
+
在指定行范围内搜索日志
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
log_path: 日志文件路径
|
|
327
|
+
pattern: 正则表达式模式
|
|
328
|
+
after_line: 从哪一行之后开始搜索
|
|
329
|
+
before_line: 到哪一行之前结束
|
|
330
|
+
timestamps: 是否附加时间戳
|
|
331
|
+
categories: 只搜索这些类别,默认 ["FLog::Output"]
|
|
332
|
+
apply_filter: 是否应用过滤规则
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
{
|
|
336
|
+
"logs": "匹配的日志文本",
|
|
337
|
+
"start_line": 起始行号,
|
|
338
|
+
"last_line": 最后行号,
|
|
339
|
+
"match_count": 匹配条数,
|
|
340
|
+
"remaining": 剩余匹配数,
|
|
341
|
+
"has_more": 是否还有更多
|
|
342
|
+
}
|
|
343
|
+
"""
|
|
344
|
+
MAX_BYTES = 32000
|
|
345
|
+
|
|
346
|
+
if not os.path.exists(log_path):
|
|
347
|
+
return {"logs": "", "start_line": 0, "last_line": 0, "match_count": 0, "remaining": 0, "has_more": False}
|
|
348
|
+
|
|
349
|
+
if categories is None:
|
|
350
|
+
categories = DEFAULT_CATEGORIES
|
|
351
|
+
|
|
352
|
+
try:
|
|
353
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
354
|
+
except re.error:
|
|
355
|
+
return {"error": f"Invalid regex pattern: {pattern}"}
|
|
356
|
+
|
|
357
|
+
start_line = None
|
|
358
|
+
last_line = 0
|
|
359
|
+
current_bytes = 0
|
|
360
|
+
log_lines = []
|
|
361
|
+
match_count = 0
|
|
362
|
+
bytes_exceeded = False
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
with open(log_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
366
|
+
for line_num, line in enumerate(f, 1):
|
|
367
|
+
if after_line is not None and line_num <= after_line:
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
if before_line is not None and line_num >= before_line:
|
|
371
|
+
break
|
|
372
|
+
|
|
373
|
+
line = line.strip()
|
|
374
|
+
if not line:
|
|
375
|
+
continue
|
|
376
|
+
|
|
377
|
+
entry = parse_log_line(line)
|
|
378
|
+
if not entry:
|
|
379
|
+
continue
|
|
380
|
+
|
|
381
|
+
if categories and entry.category not in categories:
|
|
382
|
+
continue
|
|
383
|
+
|
|
384
|
+
if apply_filter and should_exclude(entry.message):
|
|
385
|
+
continue
|
|
386
|
+
|
|
387
|
+
# 正则匹配
|
|
388
|
+
if not regex.search(entry.message):
|
|
389
|
+
continue
|
|
390
|
+
|
|
391
|
+
match_count += 1
|
|
392
|
+
|
|
393
|
+
if bytes_exceeded:
|
|
394
|
+
continue
|
|
395
|
+
|
|
396
|
+
if timestamps:
|
|
397
|
+
time_part = entry.timestamp[11:19]
|
|
398
|
+
output_line = f"{line_num}|[{time_part}] {entry.message}"
|
|
399
|
+
else:
|
|
400
|
+
output_line = f"{line_num}|{entry.message}"
|
|
401
|
+
|
|
402
|
+
line_bytes = len(output_line.encode('utf-8')) + 1
|
|
403
|
+
|
|
404
|
+
if current_bytes + line_bytes > MAX_BYTES and log_lines:
|
|
405
|
+
bytes_exceeded = True
|
|
406
|
+
continue
|
|
407
|
+
|
|
408
|
+
if start_line is None:
|
|
409
|
+
start_line = line_num
|
|
410
|
+
|
|
411
|
+
log_lines.append(output_line)
|
|
412
|
+
last_line = line_num
|
|
413
|
+
current_bytes += line_bytes
|
|
414
|
+
|
|
415
|
+
except Exception:
|
|
416
|
+
pass
|
|
417
|
+
|
|
418
|
+
returned_count = len(log_lines)
|
|
419
|
+
return {
|
|
420
|
+
"logs": "\n".join(log_lines),
|
|
421
|
+
"start_line": start_line or 0,
|
|
422
|
+
"last_line": last_line,
|
|
423
|
+
"match_count": returned_count,
|
|
424
|
+
"remaining": match_count - returned_count,
|
|
425
|
+
"has_more": match_count > returned_count
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def find_latest_studio_log() -> Optional[str]:
|
|
430
|
+
"""查找最新的 Studio 日志文件"""
|
|
431
|
+
if not os.path.exists(LOG_DIR):
|
|
432
|
+
return None
|
|
433
|
+
|
|
434
|
+
studio_logs = []
|
|
435
|
+
for f in os.listdir(LOG_DIR):
|
|
436
|
+
if "Studio" in f and f.endswith(".log"):
|
|
437
|
+
path = os.path.join(LOG_DIR, f)
|
|
438
|
+
studio_logs.append((path, os.path.getmtime(path)))
|
|
439
|
+
|
|
440
|
+
if not studio_logs:
|
|
441
|
+
return None
|
|
442
|
+
|
|
443
|
+
studio_logs.sort(key=lambda x: x[1], reverse=True)
|
|
444
|
+
return studio_logs[0][0]
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def clean_old_logs(days: int = 7) -> int:
|
|
448
|
+
"""清理超过指定天数的旧日志"""
|
|
449
|
+
if not os.path.exists(LOG_DIR):
|
|
450
|
+
return 0
|
|
451
|
+
|
|
452
|
+
from datetime import datetime
|
|
453
|
+
count = 0
|
|
454
|
+
now = datetime.now().timestamp()
|
|
455
|
+
threshold = days * 24 * 60 * 60
|
|
456
|
+
|
|
457
|
+
for f in os.listdir(LOG_DIR):
|
|
458
|
+
if f.endswith(".log"):
|
|
459
|
+
path = os.path.join(LOG_DIR, f)
|
|
460
|
+
try:
|
|
461
|
+
if now - os.path.getmtime(path) > threshold:
|
|
462
|
+
os.remove(path)
|
|
463
|
+
count += 1
|
|
464
|
+
except Exception:
|
|
465
|
+
pass
|
|
466
|
+
|
|
467
|
+
return count
|