iflow-mcp_galaxyxieyu_api-auto-test 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,132 @@
1
+ """
2
+ MCP Metrics Tools
3
+ MCP 调用指标与统计工具
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ import time
10
+ from collections import Counter, deque
11
+ from datetime import datetime, timedelta
12
+ from typing import Any
13
+
14
+ from mcp.server.fastmcp import FastMCP
15
+
16
+ from atf.core.log_manager import log
17
+ from atf.mcp.models import McpMetricsResponse
18
+ from atf.mcp.utils import MCP_CALLS_LOG, build_error_payload, log_tool_call, new_request_id
19
+
20
+
21
+ def _parse_timestamp(value: str) -> datetime | None:
22
+ try:
23
+ return datetime.fromisoformat(value)
24
+ except ValueError:
25
+ return None
26
+
27
+
28
+ def _load_recent_records(limit: int, since_minutes: int | None) -> list[dict[str, Any]]:
29
+ if not MCP_CALLS_LOG.exists():
30
+ return []
31
+
32
+ cutoff = None
33
+ if since_minutes is not None and since_minutes > 0:
34
+ cutoff = datetime.now().astimezone() - timedelta(minutes=since_minutes)
35
+
36
+ records: deque[dict[str, Any]] = deque(maxlen=limit if limit > 0 else None)
37
+
38
+ with MCP_CALLS_LOG.open("r", encoding="utf-8") as file:
39
+ for line in file:
40
+ line = line.strip()
41
+ if not line:
42
+ continue
43
+ try:
44
+ record = json.loads(line)
45
+ except json.JSONDecodeError:
46
+ continue
47
+ if cutoff:
48
+ ts = _parse_timestamp(record.get("timestamp", ""))
49
+ if ts is None or ts < cutoff:
50
+ continue
51
+ records.append(record)
52
+
53
+ return list(records)
54
+
55
+
56
+ def register_metrics_tools(mcp: FastMCP) -> None:
57
+ """注册 MCP 指标工具"""
58
+
59
+ @mcp.tool(
60
+ name="get_mcp_metrics",
61
+ title="获取 MCP 调用指标",
62
+ description="返回最近 MCP 工具调用的成功率、耗时与错误分布。",
63
+ )
64
+ def get_mcp_metrics(
65
+ limit: int = 500,
66
+ since_minutes: int | None = None,
67
+ ) -> McpMetricsResponse:
68
+ request_id = new_request_id()
69
+ start_time = time.perf_counter()
70
+ try:
71
+ records = _load_recent_records(limit, since_minutes)
72
+ total = len(records)
73
+ if total == 0:
74
+ response = McpMetricsResponse(
75
+ status="ok",
76
+ request_id=request_id,
77
+ total=0,
78
+ success=0,
79
+ error=0,
80
+ success_rate=1.0,
81
+ avg_latency_ms=0.0,
82
+ p95_latency_ms=0.0,
83
+ error_codes={},
84
+ window_minutes=since_minutes,
85
+ )
86
+ else:
87
+ success = sum(1 for r in records if r.get("status") == "ok")
88
+ error = total - success
89
+ latencies = sorted(int(r.get("latency_ms", 0)) for r in records)
90
+ avg_latency = round(sum(latencies) / total, 2) if total else 0.0
91
+ p95_index = max(0, int(total * 0.95) - 1)
92
+ p95_latency = latencies[p95_index] if latencies else 0.0
93
+ error_codes = Counter(
94
+ r.get("error_code") for r in records if r.get("status") != "ok" and r.get("error_code")
95
+ )
96
+
97
+ response = McpMetricsResponse(
98
+ status="ok",
99
+ request_id=request_id,
100
+ total=total,
101
+ success=success,
102
+ error=error,
103
+ success_rate=round(success / total, 4) if total else 1.0,
104
+ avg_latency_ms=avg_latency,
105
+ p95_latency_ms=p95_latency,
106
+ error_codes=dict(error_codes),
107
+ window_minutes=since_minutes,
108
+ )
109
+ except Exception as exc:
110
+ log.error(f"获取 MCP 指标失败: {exc}")
111
+ payload = build_error_payload(
112
+ code="MCP_METRICS_ERROR",
113
+ message=str(exc),
114
+ retryable=False,
115
+ details={"error_type": "unknown_error"},
116
+ )
117
+ response = McpMetricsResponse(
118
+ status="error",
119
+ request_id=request_id,
120
+ **payload,
121
+ )
122
+
123
+ latency_ms = int((time.perf_counter() - start_time) * 1000)
124
+ log_tool_call(
125
+ "get_mcp_metrics",
126
+ request_id,
127
+ response.status,
128
+ latency_ms,
129
+ response.error_code,
130
+ meta={"limit": limit, "since_minutes": since_minutes},
131
+ )
132
+ return response
@@ -0,0 +1,380 @@
1
+ """
2
+ Test Runner Tools
3
+ 测试执行工具(已整合:run_tests = run_testcase + run_testcases)
4
+ """
5
+
6
+ import time
7
+ import uuid
8
+ from pathlib import Path
9
+ from typing import Literal
10
+
11
+ from mcp.server.fastmcp import FastMCP
12
+ from pydantic import ValidationError
13
+
14
+ from atf.core.log_manager import log
15
+ from atf.mcp.models import (
16
+ AssertionResultModel,
17
+ RunTestsResponse,
18
+ GetTestResultsResponse,
19
+ TestResultHistoryModel,
20
+ TestResultModel,
21
+ )
22
+ from atf.mcp.tools.testcase_tools import format_validation_error
23
+ from atf.mcp.utils import (
24
+ build_error_payload,
25
+ get_roots,
26
+ load_yaml_file,
27
+ log_tool_call,
28
+ new_request_id,
29
+ parse_testcase_input,
30
+ parse_unittest_input,
31
+ resolve_tests_root,
32
+ resolve_yaml_path,
33
+ expected_py_path,
34
+ detect_testcase_type,
35
+ )
36
+ from atf.mcp.executor import (
37
+ execute_single_test,
38
+ run_pytest,
39
+ save_to_history,
40
+ get_history,
41
+ )
42
+
43
+
44
+ def register_runner_tools(mcp: FastMCP) -> None:
45
+ """注册测试执行工具"""
46
+
47
+ @mcp.tool(
48
+ name="run_tests",
49
+ title="执行测试用例",
50
+ description="执行单个或批量 YAML 测试用例,支持集成测试 testcase 和单元测试 unittest。\n\n"
51
+ "【给 AI 助手的强制规则】\n"
52
+ "- 执行前应确保 YAML 已通过 `write_testcase` 生成/更新对应的 pytest 脚本(tests/scripts)。若脚本不存在,先调用 `write_testcase`,不要让用户手动补文件。\n"
53
+ "- 每次调用都必须显式传 `workspace`(项目根目录)。\n\n"
54
+ "**执行模式(自动识别)**:\n"
55
+ "- 传入 `yaml_path` → 执行单个测试用例\n"
56
+ "- 传入 `root_path` → 批量执行目录下的所有测试用例\n\n"
57
+ "**参数说明**:\n"
58
+ "- `yaml_path`: 单个测试用例路径(相对于 workspace),与 root_path 二选一\n"
59
+ "- `root_path`: 测试目录路径,默认 tests\n"
60
+ "- `test_type`: `all`(全部) | `integration`(集成) | `unit`(单元),仅批量模式有效\n"
61
+ "- `workspace`: **必须**,指定项目根目录\n"
62
+ "- `python_path`: 可选,指定 Python 解释器路径,如 `/path/to/venv/bin/python`\n\n"
63
+ "**返回值说明**:\n"
64
+ "- `mode`: single=单个, batch=批量\n"
65
+ "- 单个模式: test_name, yaml_path, py_path, result\n"
66
+ "- 批量模式: total, passed, failed, skipped, duration, results\n\n"
67
+ "**示例**:\n"
68
+ "```json\n"
69
+ "# 单个测试\n"
70
+ "{\n"
71
+ " \"yaml_path\": \"tests/cases/auth_integration.yaml\",\n"
72
+ " \"workspace\": \"/Volumes/DATABASE/code/glam-cart/backend\"\n"
73
+ "}\n\n"
74
+ "# 批量测试\n"
75
+ "{\n"
76
+ " \"root_path\": \"tests/cases\",\n"
77
+ " \"test_type\": \"integration\",\n"
78
+ " \"workspace\": \"/Volumes/DATABASE/code/glam-cart/backend\",\n"
79
+ " \"python_path\": \"/Volumes/DATABASE/code/glam-cart/backend/venv/bin/python\"\n"
80
+ "}\n"
81
+ "```",
82
+ )
83
+ def run_tests(
84
+ yaml_path: str | None = None,
85
+ root_path: str | None = None,
86
+ test_type: Literal["all", "integration", "unit"] = "all",
87
+ workspace: str | None = None,
88
+ python_path: str | None = None,
89
+ ) -> RunTestsResponse:
90
+ """统一执行单个或批量测试用例"""
91
+ # 判断执行模式
92
+ is_single_mode = yaml_path is not None
93
+ request_id = new_request_id()
94
+ start_time = time.perf_counter()
95
+
96
+ try:
97
+ if is_single_mode:
98
+ # ========== 单个测试模式 ==========
99
+ yaml_full_path, yaml_relative_path, repo_root = resolve_yaml_path(yaml_path, workspace)
100
+ yaml_data = load_yaml_file(yaml_full_path)
101
+
102
+ testcase_type = detect_testcase_type(yaml_data)
103
+ if testcase_type == "unittest":
104
+ testcase_model = parse_unittest_input(yaml_data)
105
+ else:
106
+ testcase_model = parse_testcase_input(yaml_data)
107
+ test_name = testcase_model.name
108
+
109
+ py_full_path, py_relative_path = expected_py_path(
110
+ yaml_full_path=yaml_full_path,
111
+ testcase_name=test_name,
112
+ workspace=workspace,
113
+ )
114
+
115
+ if not py_full_path.exists():
116
+ payload = build_error_payload(
117
+ code="MCP_PYTEST_NOT_FOUND",
118
+ message=f"pytest 文件不存在: {py_relative_path}",
119
+ retryable=False,
120
+ details={"error_type": "file_not_found", "py_path": py_relative_path},
121
+ )
122
+ response = RunTestsResponse(
123
+ status="error",
124
+ request_id=request_id,
125
+ mode="single",
126
+ test_name=test_name,
127
+ yaml_path=yaml_relative_path,
128
+ **payload,
129
+ )
130
+ latency_ms = int((time.perf_counter() - start_time) * 1000)
131
+ log_tool_call(
132
+ "run_tests",
133
+ request_id,
134
+ response.status,
135
+ latency_ms,
136
+ response.error_code,
137
+ meta={"mode": "single", "yaml_path": yaml_path},
138
+ )
139
+ return response
140
+
141
+ result_data = run_pytest(str(py_full_path), repo_root, python_path)
142
+ result = TestResultModel(
143
+ test_name=result_data["test_name"],
144
+ status=result_data["status"],
145
+ duration=result_data["duration"],
146
+ assertions=[
147
+ AssertionResultModel(**a) for a in result_data.get("assertions", [])
148
+ ],
149
+ error_message=result_data.get("error_message"),
150
+ traceback=result_data.get("traceback"),
151
+ report_path=result_data.get("report_path"),
152
+ )
153
+
154
+ response = RunTestsResponse(
155
+ status="ok",
156
+ request_id=request_id,
157
+ mode="single",
158
+ test_name=test_name,
159
+ yaml_path=yaml_relative_path,
160
+ py_path=py_relative_path,
161
+ result=result,
162
+ has_failures=result.status != "passed",
163
+ )
164
+
165
+ else:
166
+ # ========== 批量测试模式 ==========
167
+ batch_start_time = time.time()
168
+ results = []
169
+
170
+ repo_root, _, cases_root, _ = get_roots(workspace)
171
+ cases_root_resolved = cases_root.resolve(strict=False)
172
+
173
+ if root_path:
174
+ raw_path = Path(root_path)
175
+ if raw_path.is_absolute():
176
+ base_dir = raw_path.resolve(strict=False)
177
+ else:
178
+ base_dir = (repo_root / raw_path).resolve(strict=False)
179
+ else:
180
+ base_dir = cases_root_resolved
181
+
182
+ if not base_dir.exists():
183
+ payload = build_error_payload(
184
+ code="MCP_DIRECTORY_NOT_FOUND",
185
+ message=f"目录不存在: {base_dir}",
186
+ retryable=False,
187
+ details={"error_type": "directory_not_found", "path": str(base_dir)},
188
+ )
189
+ response = RunTestsResponse(
190
+ status="error",
191
+ request_id=request_id,
192
+ mode="batch",
193
+ **payload,
194
+ )
195
+ latency_ms = int((time.perf_counter() - start_time) * 1000)
196
+ log_tool_call(
197
+ "run_tests",
198
+ request_id,
199
+ response.status,
200
+ latency_ms,
201
+ response.error_code,
202
+ meta={"mode": "batch", "root_path": root_path, "test_type": test_type},
203
+ )
204
+ return response
205
+
206
+ yaml_files = list(base_dir.rglob("*.yaml"))
207
+ filtered_files = []
208
+ for yaml_file in yaml_files:
209
+ if not yaml_file.is_relative_to(cases_root_resolved):
210
+ continue
211
+ try:
212
+ data = load_yaml_file(yaml_file)
213
+ is_unit = "unittest" in data
214
+ is_integration = "testcase" in data
215
+
216
+ if test_type == "all":
217
+ filtered_files.append(yaml_file)
218
+ elif test_type == "unit" and is_unit:
219
+ filtered_files.append(yaml_file)
220
+ elif test_type == "integration" and is_integration:
221
+ filtered_files.append(yaml_file)
222
+ except Exception:
223
+ continue
224
+
225
+ log.info(f"找到 {len(filtered_files)} 个测试用例待执行")
226
+
227
+ for yaml_file in filtered_files:
228
+ try:
229
+ yaml_relative = yaml_file.relative_to(repo_root).as_posix()
230
+ if yaml_file.is_dir():
231
+ for sub_yaml in yaml_file.rglob("*.yaml"):
232
+ if sub_yaml.is_relative_to(cases_root_resolved):
233
+ result = execute_single_test(str(sub_yaml), repo_root, python_path)
234
+ results.append(result)
235
+ else:
236
+ result = execute_single_test(yaml_relative, repo_root, python_path)
237
+ results.append(result)
238
+ except Exception as exc:
239
+ log.error(f"执行测试用例失败: {yaml_file}: {exc}")
240
+
241
+ end_time = time.time()
242
+ duration = round(end_time - batch_start_time, 2)
243
+
244
+ passed = sum(1 for r in results if r.status == "passed")
245
+ failed = sum(1 for r in results if r.status == "failed")
246
+ skipped = sum(1 for r in results if r.status == "skipped")
247
+
248
+ run_id = str(uuid.uuid4())[:8]
249
+ save_to_history(
250
+ run_id=run_id,
251
+ total=len(results),
252
+ passed=passed,
253
+ failed=failed,
254
+ skipped=skipped,
255
+ duration=duration,
256
+ test_names=[r.test_name for r in results],
257
+ )
258
+
259
+ response = RunTestsResponse(
260
+ status="ok",
261
+ request_id=request_id,
262
+ mode="batch",
263
+ total=len(results),
264
+ passed=passed,
265
+ failed=failed,
266
+ skipped=skipped,
267
+ duration=duration,
268
+ results=results,
269
+ has_failures=failed > 0,
270
+ )
271
+
272
+ except ValidationError as exc:
273
+ log.error(f"MCP 执行测试参数验证失败: {exc}")
274
+ payload = build_error_payload(
275
+ code="MCP_VALIDATION_ERROR",
276
+ message=f"参数验证失败: {exc}",
277
+ retryable=False,
278
+ details={"error_type": "validation_error", "details": format_validation_error(exc)},
279
+ )
280
+ response = RunTestsResponse(
281
+ status="error",
282
+ request_id=request_id,
283
+ mode="single" if is_single_mode else "batch",
284
+ **payload,
285
+ )
286
+ except Exception as exc:
287
+ log.error(f"MCP 执行测试失败: {exc}")
288
+ payload = build_error_payload(
289
+ code="MCP_RUN_TESTS_ERROR",
290
+ message=f"未知错误: {type(exc).__name__}: {str(exc)}",
291
+ retryable=False,
292
+ details={"error_type": "unknown_error", "exception_type": type(exc).__name__},
293
+ )
294
+ response = RunTestsResponse(
295
+ status="error",
296
+ request_id=request_id,
297
+ mode="single" if is_single_mode else "batch",
298
+ **payload,
299
+ )
300
+ latency_ms = int((time.perf_counter() - start_time) * 1000)
301
+ log_tool_call(
302
+ "run_tests",
303
+ request_id,
304
+ response.status,
305
+ latency_ms,
306
+ response.error_code,
307
+ meta={
308
+ "mode": "single" if is_single_mode else "batch",
309
+ "yaml_path": yaml_path,
310
+ "root_path": root_path,
311
+ "test_type": test_type,
312
+ },
313
+ )
314
+ return response
315
+
316
+ @mcp.tool(
317
+ name="get_test_results",
318
+ title="获取测试执行历史",
319
+ description="获取测试执行历史记录,包括每次运行的统计信息和测试用例列表。\n\n"
320
+ "**参数说明**:\n"
321
+ "- `limit`: 可选,返回记录数量,默认 10 条\n\n"
322
+ "示例:\n"
323
+ "```json\n"
324
+ "{\n"
325
+ " \"limit\": 20\n"
326
+ "}\n"
327
+ "```",
328
+ )
329
+ def get_test_results(
330
+ limit: int = 10,
331
+ ) -> GetTestResultsResponse:
332
+ """获取测试执行历史"""
333
+ request_id = new_request_id()
334
+ start_time = time.perf_counter()
335
+ try:
336
+ recent = get_history(limit)
337
+
338
+ results = [
339
+ TestResultHistoryModel(
340
+ run_id=item["run_id"],
341
+ timestamp=item["timestamp"],
342
+ total=item["total"],
343
+ passed=item["passed"],
344
+ failed=item["failed"],
345
+ skipped=item["skipped"],
346
+ duration=item["duration"],
347
+ test_names=item["test_names"],
348
+ )
349
+ for item in recent
350
+ ]
351
+
352
+ response = GetTestResultsResponse(
353
+ status="ok",
354
+ request_id=request_id,
355
+ results=results,
356
+ )
357
+ except Exception as exc:
358
+ log.error(f"获取测试执行历史失败: {exc}")
359
+ payload = build_error_payload(
360
+ code="MCP_GET_RESULTS_ERROR",
361
+ message=str(exc),
362
+ retryable=False,
363
+ details={"error_type": "unknown_error"},
364
+ )
365
+ response = GetTestResultsResponse(
366
+ status="error",
367
+ request_id=request_id,
368
+ results=[],
369
+ **payload,
370
+ )
371
+ latency_ms = int((time.perf_counter() - start_time) * 1000)
372
+ log_tool_call(
373
+ "get_test_results",
374
+ request_id,
375
+ response.status,
376
+ latency_ms,
377
+ response.error_code,
378
+ meta={"limit": limit},
379
+ )
380
+ return response