gobby 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +3 -0
- gobby/adapters/__init__.py +30 -0
- gobby/adapters/base.py +93 -0
- gobby/adapters/claude_code.py +276 -0
- gobby/adapters/codex.py +1292 -0
- gobby/adapters/gemini.py +343 -0
- gobby/agents/__init__.py +37 -0
- gobby/agents/codex_session.py +120 -0
- gobby/agents/constants.py +112 -0
- gobby/agents/context.py +362 -0
- gobby/agents/definitions.py +133 -0
- gobby/agents/gemini_session.py +111 -0
- gobby/agents/registry.py +618 -0
- gobby/agents/runner.py +968 -0
- gobby/agents/session.py +259 -0
- gobby/agents/spawn.py +916 -0
- gobby/agents/spawners/__init__.py +77 -0
- gobby/agents/spawners/base.py +142 -0
- gobby/agents/spawners/cross_platform.py +266 -0
- gobby/agents/spawners/embedded.py +225 -0
- gobby/agents/spawners/headless.py +226 -0
- gobby/agents/spawners/linux.py +125 -0
- gobby/agents/spawners/macos.py +277 -0
- gobby/agents/spawners/windows.py +308 -0
- gobby/agents/tty_config.py +319 -0
- gobby/autonomous/__init__.py +32 -0
- gobby/autonomous/progress_tracker.py +447 -0
- gobby/autonomous/stop_registry.py +269 -0
- gobby/autonomous/stuck_detector.py +383 -0
- gobby/cli/__init__.py +67 -0
- gobby/cli/__main__.py +8 -0
- gobby/cli/agents.py +529 -0
- gobby/cli/artifacts.py +266 -0
- gobby/cli/daemon.py +329 -0
- gobby/cli/extensions.py +526 -0
- gobby/cli/github.py +263 -0
- gobby/cli/init.py +53 -0
- gobby/cli/install.py +614 -0
- gobby/cli/installers/__init__.py +37 -0
- gobby/cli/installers/antigravity.py +65 -0
- gobby/cli/installers/claude.py +363 -0
- gobby/cli/installers/codex.py +192 -0
- gobby/cli/installers/gemini.py +294 -0
- gobby/cli/installers/git_hooks.py +377 -0
- gobby/cli/installers/shared.py +737 -0
- gobby/cli/linear.py +250 -0
- gobby/cli/mcp.py +30 -0
- gobby/cli/mcp_proxy.py +698 -0
- gobby/cli/memory.py +304 -0
- gobby/cli/merge.py +384 -0
- gobby/cli/projects.py +79 -0
- gobby/cli/sessions.py +622 -0
- gobby/cli/tasks/__init__.py +30 -0
- gobby/cli/tasks/_utils.py +658 -0
- gobby/cli/tasks/ai.py +1025 -0
- gobby/cli/tasks/commits.py +169 -0
- gobby/cli/tasks/crud.py +685 -0
- gobby/cli/tasks/deps.py +135 -0
- gobby/cli/tasks/labels.py +63 -0
- gobby/cli/tasks/main.py +273 -0
- gobby/cli/tasks/search.py +178 -0
- gobby/cli/tui.py +34 -0
- gobby/cli/utils.py +513 -0
- gobby/cli/workflows.py +927 -0
- gobby/cli/worktrees.py +481 -0
- gobby/config/__init__.py +129 -0
- gobby/config/app.py +551 -0
- gobby/config/extensions.py +167 -0
- gobby/config/features.py +472 -0
- gobby/config/llm_providers.py +98 -0
- gobby/config/logging.py +66 -0
- gobby/config/mcp.py +346 -0
- gobby/config/persistence.py +247 -0
- gobby/config/servers.py +141 -0
- gobby/config/sessions.py +250 -0
- gobby/config/tasks.py +784 -0
- gobby/hooks/__init__.py +104 -0
- gobby/hooks/artifact_capture.py +213 -0
- gobby/hooks/broadcaster.py +243 -0
- gobby/hooks/event_handlers.py +723 -0
- gobby/hooks/events.py +218 -0
- gobby/hooks/git.py +169 -0
- gobby/hooks/health_monitor.py +171 -0
- gobby/hooks/hook_manager.py +856 -0
- gobby/hooks/hook_types.py +575 -0
- gobby/hooks/plugins.py +813 -0
- gobby/hooks/session_coordinator.py +396 -0
- gobby/hooks/verification_runner.py +268 -0
- gobby/hooks/webhooks.py +339 -0
- gobby/install/claude/commands/gobby/bug.md +51 -0
- gobby/install/claude/commands/gobby/chore.md +51 -0
- gobby/install/claude/commands/gobby/epic.md +52 -0
- gobby/install/claude/commands/gobby/eval.md +235 -0
- gobby/install/claude/commands/gobby/feat.md +49 -0
- gobby/install/claude/commands/gobby/nit.md +52 -0
- gobby/install/claude/commands/gobby/ref.md +52 -0
- gobby/install/claude/hooks/HOOK_SCHEMAS.md +632 -0
- gobby/install/claude/hooks/hook_dispatcher.py +364 -0
- gobby/install/claude/hooks/validate_settings.py +102 -0
- gobby/install/claude/hooks-template.json +118 -0
- gobby/install/codex/hooks/hook_dispatcher.py +153 -0
- gobby/install/codex/prompts/forget.md +7 -0
- gobby/install/codex/prompts/memories.md +7 -0
- gobby/install/codex/prompts/recall.md +7 -0
- gobby/install/codex/prompts/remember.md +13 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +268 -0
- gobby/install/gemini/hooks-template.json +138 -0
- gobby/install/shared/plugins/code_guardian.py +456 -0
- gobby/install/shared/plugins/example_notify.py +331 -0
- gobby/integrations/__init__.py +10 -0
- gobby/integrations/github.py +145 -0
- gobby/integrations/linear.py +145 -0
- gobby/llm/__init__.py +40 -0
- gobby/llm/base.py +120 -0
- gobby/llm/claude.py +578 -0
- gobby/llm/claude_executor.py +503 -0
- gobby/llm/codex.py +322 -0
- gobby/llm/codex_executor.py +513 -0
- gobby/llm/executor.py +316 -0
- gobby/llm/factory.py +34 -0
- gobby/llm/gemini.py +258 -0
- gobby/llm/gemini_executor.py +339 -0
- gobby/llm/litellm.py +287 -0
- gobby/llm/litellm_executor.py +303 -0
- gobby/llm/resolver.py +499 -0
- gobby/llm/service.py +236 -0
- gobby/mcp_proxy/__init__.py +29 -0
- gobby/mcp_proxy/actions.py +175 -0
- gobby/mcp_proxy/daemon_control.py +198 -0
- gobby/mcp_proxy/importer.py +436 -0
- gobby/mcp_proxy/lazy.py +325 -0
- gobby/mcp_proxy/manager.py +798 -0
- gobby/mcp_proxy/metrics.py +609 -0
- gobby/mcp_proxy/models.py +139 -0
- gobby/mcp_proxy/registries.py +215 -0
- gobby/mcp_proxy/schema_hash.py +381 -0
- gobby/mcp_proxy/semantic_search.py +706 -0
- gobby/mcp_proxy/server.py +549 -0
- gobby/mcp_proxy/services/__init__.py +0 -0
- gobby/mcp_proxy/services/fallback.py +306 -0
- gobby/mcp_proxy/services/recommendation.py +224 -0
- gobby/mcp_proxy/services/server_mgmt.py +214 -0
- gobby/mcp_proxy/services/system.py +72 -0
- gobby/mcp_proxy/services/tool_filter.py +231 -0
- gobby/mcp_proxy/services/tool_proxy.py +309 -0
- gobby/mcp_proxy/stdio.py +565 -0
- gobby/mcp_proxy/tools/__init__.py +27 -0
- gobby/mcp_proxy/tools/agents.py +1103 -0
- gobby/mcp_proxy/tools/artifacts.py +207 -0
- gobby/mcp_proxy/tools/hub.py +335 -0
- gobby/mcp_proxy/tools/internal.py +337 -0
- gobby/mcp_proxy/tools/memory.py +543 -0
- gobby/mcp_proxy/tools/merge.py +422 -0
- gobby/mcp_proxy/tools/metrics.py +283 -0
- gobby/mcp_proxy/tools/orchestration/__init__.py +23 -0
- gobby/mcp_proxy/tools/orchestration/cleanup.py +619 -0
- gobby/mcp_proxy/tools/orchestration/monitor.py +380 -0
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +746 -0
- gobby/mcp_proxy/tools/orchestration/review.py +736 -0
- gobby/mcp_proxy/tools/orchestration/utils.py +16 -0
- gobby/mcp_proxy/tools/session_messages.py +1056 -0
- gobby/mcp_proxy/tools/task_dependencies.py +219 -0
- gobby/mcp_proxy/tools/task_expansion.py +591 -0
- gobby/mcp_proxy/tools/task_github.py +393 -0
- gobby/mcp_proxy/tools/task_linear.py +379 -0
- gobby/mcp_proxy/tools/task_orchestration.py +77 -0
- gobby/mcp_proxy/tools/task_readiness.py +522 -0
- gobby/mcp_proxy/tools/task_sync.py +351 -0
- gobby/mcp_proxy/tools/task_validation.py +843 -0
- gobby/mcp_proxy/tools/tasks/__init__.py +25 -0
- gobby/mcp_proxy/tools/tasks/_context.py +112 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +516 -0
- gobby/mcp_proxy/tools/tasks/_factory.py +176 -0
- gobby/mcp_proxy/tools/tasks/_helpers.py +129 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +517 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +301 -0
- gobby/mcp_proxy/tools/tasks/_resolution.py +55 -0
- gobby/mcp_proxy/tools/tasks/_search.py +215 -0
- gobby/mcp_proxy/tools/tasks/_session.py +125 -0
- gobby/mcp_proxy/tools/workflows.py +973 -0
- gobby/mcp_proxy/tools/worktrees.py +1264 -0
- gobby/mcp_proxy/transports/__init__.py +0 -0
- gobby/mcp_proxy/transports/base.py +95 -0
- gobby/mcp_proxy/transports/factory.py +44 -0
- gobby/mcp_proxy/transports/http.py +139 -0
- gobby/mcp_proxy/transports/stdio.py +213 -0
- gobby/mcp_proxy/transports/websocket.py +136 -0
- gobby/memory/backends/__init__.py +116 -0
- gobby/memory/backends/mem0.py +408 -0
- gobby/memory/backends/memu.py +485 -0
- gobby/memory/backends/null.py +111 -0
- gobby/memory/backends/openmemory.py +537 -0
- gobby/memory/backends/sqlite.py +304 -0
- gobby/memory/context.py +87 -0
- gobby/memory/manager.py +1001 -0
- gobby/memory/protocol.py +451 -0
- gobby/memory/search/__init__.py +66 -0
- gobby/memory/search/text.py +127 -0
- gobby/memory/viz.py +258 -0
- gobby/prompts/__init__.py +13 -0
- gobby/prompts/defaults/expansion/system.md +119 -0
- gobby/prompts/defaults/expansion/user.md +48 -0
- gobby/prompts/defaults/external_validation/agent.md +72 -0
- gobby/prompts/defaults/external_validation/external.md +63 -0
- gobby/prompts/defaults/external_validation/spawn.md +83 -0
- gobby/prompts/defaults/external_validation/system.md +6 -0
- gobby/prompts/defaults/features/import_mcp.md +22 -0
- gobby/prompts/defaults/features/import_mcp_github.md +17 -0
- gobby/prompts/defaults/features/import_mcp_search.md +16 -0
- gobby/prompts/defaults/features/recommend_tools.md +32 -0
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +35 -0
- gobby/prompts/defaults/features/recommend_tools_llm.md +30 -0
- gobby/prompts/defaults/features/server_description.md +20 -0
- gobby/prompts/defaults/features/server_description_system.md +6 -0
- gobby/prompts/defaults/features/task_description.md +31 -0
- gobby/prompts/defaults/features/task_description_system.md +6 -0
- gobby/prompts/defaults/features/tool_summary.md +17 -0
- gobby/prompts/defaults/features/tool_summary_system.md +6 -0
- gobby/prompts/defaults/research/step.md +58 -0
- gobby/prompts/defaults/validation/criteria.md +47 -0
- gobby/prompts/defaults/validation/validate.md +38 -0
- gobby/prompts/loader.py +346 -0
- gobby/prompts/models.py +113 -0
- gobby/py.typed +0 -0
- gobby/runner.py +488 -0
- gobby/search/__init__.py +23 -0
- gobby/search/protocol.py +104 -0
- gobby/search/tfidf.py +232 -0
- gobby/servers/__init__.py +7 -0
- gobby/servers/http.py +636 -0
- gobby/servers/models.py +31 -0
- gobby/servers/routes/__init__.py +23 -0
- gobby/servers/routes/admin.py +416 -0
- gobby/servers/routes/dependencies.py +118 -0
- gobby/servers/routes/mcp/__init__.py +24 -0
- gobby/servers/routes/mcp/hooks.py +135 -0
- gobby/servers/routes/mcp/plugins.py +121 -0
- gobby/servers/routes/mcp/tools.py +1337 -0
- gobby/servers/routes/mcp/webhooks.py +159 -0
- gobby/servers/routes/sessions.py +582 -0
- gobby/servers/websocket.py +766 -0
- gobby/sessions/__init__.py +13 -0
- gobby/sessions/analyzer.py +322 -0
- gobby/sessions/lifecycle.py +240 -0
- gobby/sessions/manager.py +563 -0
- gobby/sessions/processor.py +225 -0
- gobby/sessions/summary.py +532 -0
- gobby/sessions/transcripts/__init__.py +41 -0
- gobby/sessions/transcripts/base.py +125 -0
- gobby/sessions/transcripts/claude.py +386 -0
- gobby/sessions/transcripts/codex.py +143 -0
- gobby/sessions/transcripts/gemini.py +195 -0
- gobby/storage/__init__.py +21 -0
- gobby/storage/agents.py +409 -0
- gobby/storage/artifact_classifier.py +341 -0
- gobby/storage/artifacts.py +285 -0
- gobby/storage/compaction.py +67 -0
- gobby/storage/database.py +357 -0
- gobby/storage/inter_session_messages.py +194 -0
- gobby/storage/mcp.py +680 -0
- gobby/storage/memories.py +562 -0
- gobby/storage/merge_resolutions.py +550 -0
- gobby/storage/migrations.py +860 -0
- gobby/storage/migrations_legacy.py +1359 -0
- gobby/storage/projects.py +166 -0
- gobby/storage/session_messages.py +251 -0
- gobby/storage/session_tasks.py +97 -0
- gobby/storage/sessions.py +817 -0
- gobby/storage/task_dependencies.py +223 -0
- gobby/storage/tasks/__init__.py +42 -0
- gobby/storage/tasks/_aggregates.py +180 -0
- gobby/storage/tasks/_crud.py +449 -0
- gobby/storage/tasks/_id.py +104 -0
- gobby/storage/tasks/_lifecycle.py +311 -0
- gobby/storage/tasks/_manager.py +889 -0
- gobby/storage/tasks/_models.py +300 -0
- gobby/storage/tasks/_ordering.py +119 -0
- gobby/storage/tasks/_path_cache.py +110 -0
- gobby/storage/tasks/_queries.py +343 -0
- gobby/storage/tasks/_search.py +143 -0
- gobby/storage/workflow_audit.py +393 -0
- gobby/storage/worktrees.py +547 -0
- gobby/sync/__init__.py +29 -0
- gobby/sync/github.py +333 -0
- gobby/sync/linear.py +304 -0
- gobby/sync/memories.py +284 -0
- gobby/sync/tasks.py +641 -0
- gobby/tasks/__init__.py +8 -0
- gobby/tasks/build_verification.py +193 -0
- gobby/tasks/commits.py +633 -0
- gobby/tasks/context.py +747 -0
- gobby/tasks/criteria.py +342 -0
- gobby/tasks/enhanced_validator.py +226 -0
- gobby/tasks/escalation.py +263 -0
- gobby/tasks/expansion.py +626 -0
- gobby/tasks/external_validator.py +764 -0
- gobby/tasks/issue_extraction.py +171 -0
- gobby/tasks/prompts/expand.py +327 -0
- gobby/tasks/research.py +421 -0
- gobby/tasks/tdd.py +352 -0
- gobby/tasks/tree_builder.py +263 -0
- gobby/tasks/validation.py +712 -0
- gobby/tasks/validation_history.py +357 -0
- gobby/tasks/validation_models.py +89 -0
- gobby/tools/__init__.py +0 -0
- gobby/tools/summarizer.py +170 -0
- gobby/tui/__init__.py +5 -0
- gobby/tui/api_client.py +281 -0
- gobby/tui/app.py +327 -0
- gobby/tui/screens/__init__.py +25 -0
- gobby/tui/screens/agents.py +333 -0
- gobby/tui/screens/chat.py +450 -0
- gobby/tui/screens/dashboard.py +377 -0
- gobby/tui/screens/memory.py +305 -0
- gobby/tui/screens/metrics.py +231 -0
- gobby/tui/screens/orchestrator.py +904 -0
- gobby/tui/screens/sessions.py +412 -0
- gobby/tui/screens/tasks.py +442 -0
- gobby/tui/screens/workflows.py +289 -0
- gobby/tui/screens/worktrees.py +174 -0
- gobby/tui/widgets/__init__.py +21 -0
- gobby/tui/widgets/chat.py +210 -0
- gobby/tui/widgets/conductor.py +104 -0
- gobby/tui/widgets/menu.py +132 -0
- gobby/tui/widgets/message_panel.py +160 -0
- gobby/tui/widgets/review_gate.py +224 -0
- gobby/tui/widgets/task_tree.py +99 -0
- gobby/tui/widgets/token_budget.py +166 -0
- gobby/tui/ws_client.py +258 -0
- gobby/utils/__init__.py +3 -0
- gobby/utils/daemon_client.py +235 -0
- gobby/utils/git.py +222 -0
- gobby/utils/id.py +38 -0
- gobby/utils/json_helpers.py +161 -0
- gobby/utils/logging.py +376 -0
- gobby/utils/machine_id.py +135 -0
- gobby/utils/metrics.py +589 -0
- gobby/utils/project_context.py +182 -0
- gobby/utils/project_init.py +263 -0
- gobby/utils/status.py +256 -0
- gobby/utils/validation.py +80 -0
- gobby/utils/version.py +23 -0
- gobby/workflows/__init__.py +4 -0
- gobby/workflows/actions.py +1310 -0
- gobby/workflows/approval_flow.py +138 -0
- gobby/workflows/artifact_actions.py +103 -0
- gobby/workflows/audit_helpers.py +110 -0
- gobby/workflows/autonomous_actions.py +286 -0
- gobby/workflows/context_actions.py +394 -0
- gobby/workflows/definitions.py +130 -0
- gobby/workflows/detection_helpers.py +208 -0
- gobby/workflows/engine.py +485 -0
- gobby/workflows/evaluator.py +669 -0
- gobby/workflows/git_utils.py +96 -0
- gobby/workflows/hooks.py +169 -0
- gobby/workflows/lifecycle_evaluator.py +613 -0
- gobby/workflows/llm_actions.py +70 -0
- gobby/workflows/loader.py +333 -0
- gobby/workflows/mcp_actions.py +60 -0
- gobby/workflows/memory_actions.py +272 -0
- gobby/workflows/premature_stop.py +164 -0
- gobby/workflows/session_actions.py +139 -0
- gobby/workflows/state_actions.py +123 -0
- gobby/workflows/state_manager.py +104 -0
- gobby/workflows/stop_signal_actions.py +163 -0
- gobby/workflows/summary_actions.py +344 -0
- gobby/workflows/task_actions.py +249 -0
- gobby/workflows/task_enforcement_actions.py +901 -0
- gobby/workflows/templates.py +52 -0
- gobby/workflows/todo_actions.py +84 -0
- gobby/workflows/webhook.py +223 -0
- gobby/workflows/webhook_executor.py +399 -0
- gobby/worktrees/__init__.py +5 -0
- gobby/worktrees/git.py +690 -0
- gobby/worktrees/merge/__init__.py +20 -0
- gobby/worktrees/merge/conflict_parser.py +177 -0
- gobby/worktrees/merge/resolver.py +485 -0
- gobby-0.2.5.dist-info/METADATA +351 -0
- gobby-0.2.5.dist-info/RECORD +383 -0
- gobby-0.2.5.dist-info/WHEEL +5 -0
- gobby-0.2.5.dist-info/entry_points.txt +2 -0
- gobby-0.2.5.dist-info/licenses/LICENSE.md +193 -0
- gobby-0.2.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,609 @@
|
|
|
1
|
+
"""Tool metrics tracking for MCP proxy."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import uuid
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import UTC, datetime, timedelta
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from gobby.storage.database import DatabaseProtocol
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
# Default retention period for metrics
|
|
14
|
+
DEFAULT_RETENTION_DAYS = 7
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ToolMetrics:
|
|
19
|
+
"""Tool metrics data model."""
|
|
20
|
+
|
|
21
|
+
id: str
|
|
22
|
+
project_id: str
|
|
23
|
+
server_name: str
|
|
24
|
+
tool_name: str
|
|
25
|
+
call_count: int
|
|
26
|
+
success_count: int
|
|
27
|
+
failure_count: int
|
|
28
|
+
total_latency_ms: float
|
|
29
|
+
avg_latency_ms: float | None
|
|
30
|
+
last_called_at: str | None
|
|
31
|
+
created_at: str
|
|
32
|
+
updated_at: str
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def from_row(cls, row: Any) -> "ToolMetrics":
|
|
36
|
+
"""Create ToolMetrics from database row."""
|
|
37
|
+
return cls(
|
|
38
|
+
id=row["id"],
|
|
39
|
+
project_id=row["project_id"],
|
|
40
|
+
server_name=row["server_name"],
|
|
41
|
+
tool_name=row["tool_name"],
|
|
42
|
+
call_count=row["call_count"],
|
|
43
|
+
success_count=row["success_count"],
|
|
44
|
+
failure_count=row["failure_count"],
|
|
45
|
+
total_latency_ms=row["total_latency_ms"],
|
|
46
|
+
avg_latency_ms=row["avg_latency_ms"],
|
|
47
|
+
last_called_at=row["last_called_at"],
|
|
48
|
+
created_at=row["created_at"],
|
|
49
|
+
updated_at=row["updated_at"],
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> dict[str, Any]:
|
|
53
|
+
"""Convert to dictionary."""
|
|
54
|
+
return {
|
|
55
|
+
"id": self.id,
|
|
56
|
+
"project_id": self.project_id,
|
|
57
|
+
"server_name": self.server_name,
|
|
58
|
+
"tool_name": self.tool_name,
|
|
59
|
+
"call_count": self.call_count,
|
|
60
|
+
"success_count": self.success_count,
|
|
61
|
+
"failure_count": self.failure_count,
|
|
62
|
+
"total_latency_ms": self.total_latency_ms,
|
|
63
|
+
"avg_latency_ms": self.avg_latency_ms,
|
|
64
|
+
"success_rate": (self.success_count / self.call_count if self.call_count > 0 else None),
|
|
65
|
+
"last_called_at": self.last_called_at,
|
|
66
|
+
"created_at": self.created_at,
|
|
67
|
+
"updated_at": self.updated_at,
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ToolMetricsManager:
|
|
72
|
+
"""
|
|
73
|
+
Manager for tracking tool call metrics.
|
|
74
|
+
|
|
75
|
+
Tracks call counts, success/failure rates, and latency for MCP tools.
|
|
76
|
+
Metrics are persisted to SQLite and can be used for tool recommendations.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self, db: DatabaseProtocol):
|
|
80
|
+
"""
|
|
81
|
+
Initialize the metrics manager.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
db: LocalDatabase instance for persistence
|
|
85
|
+
"""
|
|
86
|
+
self.db = db
|
|
87
|
+
|
|
88
|
+
def record_call(
|
|
89
|
+
self,
|
|
90
|
+
server_name: str,
|
|
91
|
+
tool_name: str,
|
|
92
|
+
project_id: str,
|
|
93
|
+
latency_ms: float,
|
|
94
|
+
success: bool = True,
|
|
95
|
+
) -> None:
|
|
96
|
+
"""
|
|
97
|
+
Record a tool call with its metrics.
|
|
98
|
+
|
|
99
|
+
Uses atomic INSERT ... ON CONFLICT DO UPDATE to prevent race conditions
|
|
100
|
+
under concurrent writes.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
server_name: Name of the MCP server
|
|
104
|
+
tool_name: Name of the tool
|
|
105
|
+
project_id: Project ID the call was made from
|
|
106
|
+
latency_ms: Execution time in milliseconds
|
|
107
|
+
success: Whether the call succeeded
|
|
108
|
+
"""
|
|
109
|
+
now = datetime.now(UTC).isoformat()
|
|
110
|
+
metrics_id = f"tm-{uuid.uuid4().hex[:6]}"
|
|
111
|
+
success_inc = 1 if success else 0
|
|
112
|
+
failure_inc = 0 if success else 1
|
|
113
|
+
|
|
114
|
+
# Atomic upsert: INSERT new row or UPDATE existing with increments
|
|
115
|
+
# Uses SQLite's INSERT ... ON CONFLICT DO UPDATE (upsert)
|
|
116
|
+
self.db.execute(
|
|
117
|
+
"""
|
|
118
|
+
INSERT INTO tool_metrics (
|
|
119
|
+
id, project_id, server_name, tool_name,
|
|
120
|
+
call_count, success_count, failure_count,
|
|
121
|
+
total_latency_ms, avg_latency_ms,
|
|
122
|
+
last_called_at, created_at, updated_at
|
|
123
|
+
) VALUES (?, ?, ?, ?, 1, ?, ?, ?, ?, ?, ?, ?)
|
|
124
|
+
ON CONFLICT(project_id, server_name, tool_name) DO UPDATE SET
|
|
125
|
+
call_count = call_count + 1,
|
|
126
|
+
success_count = success_count + ?,
|
|
127
|
+
failure_count = failure_count + ?,
|
|
128
|
+
total_latency_ms = total_latency_ms + ?,
|
|
129
|
+
avg_latency_ms = (total_latency_ms + ?) / (call_count + 1),
|
|
130
|
+
last_called_at = ?,
|
|
131
|
+
updated_at = ?
|
|
132
|
+
""",
|
|
133
|
+
(
|
|
134
|
+
# INSERT values
|
|
135
|
+
metrics_id,
|
|
136
|
+
project_id,
|
|
137
|
+
server_name,
|
|
138
|
+
tool_name,
|
|
139
|
+
success_inc,
|
|
140
|
+
failure_inc,
|
|
141
|
+
latency_ms,
|
|
142
|
+
latency_ms, # avg = total for first call
|
|
143
|
+
now,
|
|
144
|
+
now,
|
|
145
|
+
now,
|
|
146
|
+
# ON CONFLICT UPDATE values
|
|
147
|
+
success_inc,
|
|
148
|
+
failure_inc,
|
|
149
|
+
latency_ms,
|
|
150
|
+
latency_ms,
|
|
151
|
+
now,
|
|
152
|
+
now,
|
|
153
|
+
),
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def get_metrics(
|
|
157
|
+
self,
|
|
158
|
+
project_id: str | None = None,
|
|
159
|
+
server_name: str | None = None,
|
|
160
|
+
tool_name: str | None = None,
|
|
161
|
+
) -> dict[str, Any]:
|
|
162
|
+
"""
|
|
163
|
+
Get metrics, optionally filtered by project/server/tool.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
project_id: Filter by project ID
|
|
167
|
+
server_name: Filter by server name
|
|
168
|
+
tool_name: Filter by tool name
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Dictionary with metrics data including per-tool stats
|
|
172
|
+
"""
|
|
173
|
+
conditions = []
|
|
174
|
+
params: list[Any] = []
|
|
175
|
+
|
|
176
|
+
if project_id:
|
|
177
|
+
conditions.append("project_id = ?")
|
|
178
|
+
params.append(project_id)
|
|
179
|
+
if server_name:
|
|
180
|
+
conditions.append("server_name = ?")
|
|
181
|
+
params.append(server_name)
|
|
182
|
+
if tool_name:
|
|
183
|
+
conditions.append("tool_name = ?")
|
|
184
|
+
params.append(tool_name)
|
|
185
|
+
|
|
186
|
+
where_clause = " AND ".join(conditions) if conditions else "1=1"
|
|
187
|
+
|
|
188
|
+
# where_clause built from hardcoded condition strings, values parameterized
|
|
189
|
+
# nosec B608: where_clause is hardcoded strings, values are parameterized
|
|
190
|
+
rows = self.db.fetchall(
|
|
191
|
+
f"SELECT * FROM tool_metrics WHERE {where_clause} ORDER BY call_count DESC", # nosec B608
|
|
192
|
+
tuple(params),
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
tools = [ToolMetrics.from_row(row).to_dict() for row in rows]
|
|
196
|
+
|
|
197
|
+
# Calculate aggregates
|
|
198
|
+
total_calls = sum(t["call_count"] for t in tools)
|
|
199
|
+
total_success = sum(t["success_count"] for t in tools)
|
|
200
|
+
total_failure = sum(t["failure_count"] for t in tools)
|
|
201
|
+
total_latency = sum(t["total_latency_ms"] for t in tools)
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
"tools": tools,
|
|
205
|
+
"summary": {
|
|
206
|
+
"total_tools": len(tools),
|
|
207
|
+
"total_calls": total_calls,
|
|
208
|
+
"total_success": total_success,
|
|
209
|
+
"total_failure": total_failure,
|
|
210
|
+
"overall_success_rate": (total_success / total_calls if total_calls > 0 else None),
|
|
211
|
+
"overall_avg_latency_ms": (
|
|
212
|
+
total_latency / total_calls if total_calls > 0 else None
|
|
213
|
+
),
|
|
214
|
+
},
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
def get_top_tools(
|
|
218
|
+
self,
|
|
219
|
+
project_id: str | None = None,
|
|
220
|
+
limit: int = 10,
|
|
221
|
+
order_by: str = "call_count",
|
|
222
|
+
) -> list[dict[str, Any]]:
|
|
223
|
+
"""
|
|
224
|
+
Get top tools by call count or other metrics.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
project_id: Filter by project ID
|
|
228
|
+
limit: Maximum number of tools to return
|
|
229
|
+
order_by: Column to sort by (call_count, success_count, avg_latency_ms)
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
List of tool metrics sorted by the specified column
|
|
233
|
+
"""
|
|
234
|
+
valid_order_columns = {"call_count", "success_count", "avg_latency_ms"}
|
|
235
|
+
if order_by not in valid_order_columns:
|
|
236
|
+
order_by = "call_count"
|
|
237
|
+
|
|
238
|
+
if project_id:
|
|
239
|
+
# nosec B608: order_by validated against allowlist above
|
|
240
|
+
rows = self.db.fetchall(
|
|
241
|
+
f"SELECT * FROM tool_metrics WHERE project_id = ? ORDER BY {order_by} DESC LIMIT ?", # nosec B608
|
|
242
|
+
(project_id, limit),
|
|
243
|
+
)
|
|
244
|
+
else:
|
|
245
|
+
# nosec B608: order_by validated against allowlist above
|
|
246
|
+
rows = self.db.fetchall(
|
|
247
|
+
f"SELECT * FROM tool_metrics ORDER BY {order_by} DESC LIMIT ?", # nosec B608
|
|
248
|
+
(limit,),
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
return [ToolMetrics.from_row(row).to_dict() for row in rows]
|
|
252
|
+
|
|
253
|
+
def get_tool_success_rate(
|
|
254
|
+
self,
|
|
255
|
+
server_name: str,
|
|
256
|
+
tool_name: str,
|
|
257
|
+
project_id: str,
|
|
258
|
+
) -> float | None:
|
|
259
|
+
"""
|
|
260
|
+
Get success rate for a specific tool.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
server_name: Name of the MCP server
|
|
264
|
+
tool_name: Name of the tool
|
|
265
|
+
project_id: Project ID
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Success rate as a float between 0 and 1, or None if no data
|
|
269
|
+
"""
|
|
270
|
+
row = self.db.fetchone(
|
|
271
|
+
"""
|
|
272
|
+
SELECT success_count, call_count
|
|
273
|
+
FROM tool_metrics
|
|
274
|
+
WHERE project_id = ? AND server_name = ? AND tool_name = ?
|
|
275
|
+
""",
|
|
276
|
+
(project_id, server_name, tool_name),
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
if row and row["call_count"] > 0:
|
|
280
|
+
return float(row["success_count"]) / float(row["call_count"])
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
def get_failing_tools(
|
|
284
|
+
self,
|
|
285
|
+
project_id: str | None = None,
|
|
286
|
+
threshold: float = 0.5,
|
|
287
|
+
limit: int = 10,
|
|
288
|
+
) -> list[dict[str, Any]]:
|
|
289
|
+
"""
|
|
290
|
+
Get tools with failure rate above a threshold.
|
|
291
|
+
|
|
292
|
+
Args:
|
|
293
|
+
project_id: Filter by project ID
|
|
294
|
+
threshold: Minimum failure rate (0.0-1.0) to include a tool (default: 0.5)
|
|
295
|
+
limit: Maximum number of tools to return
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
List of tool metrics sorted by failure rate descending
|
|
299
|
+
"""
|
|
300
|
+
if project_id:
|
|
301
|
+
rows = self.db.fetchall(
|
|
302
|
+
"""
|
|
303
|
+
SELECT *,
|
|
304
|
+
CAST(failure_count AS REAL) / CAST(call_count AS REAL) as failure_rate
|
|
305
|
+
FROM tool_metrics
|
|
306
|
+
WHERE project_id = ?
|
|
307
|
+
AND call_count > 0
|
|
308
|
+
AND CAST(failure_count AS REAL) / CAST(call_count AS REAL) >= ?
|
|
309
|
+
ORDER BY failure_rate DESC
|
|
310
|
+
LIMIT ?
|
|
311
|
+
""",
|
|
312
|
+
(project_id, threshold, limit),
|
|
313
|
+
)
|
|
314
|
+
else:
|
|
315
|
+
rows = self.db.fetchall(
|
|
316
|
+
"""
|
|
317
|
+
SELECT *,
|
|
318
|
+
CAST(failure_count AS REAL) / CAST(call_count AS REAL) as failure_rate
|
|
319
|
+
FROM tool_metrics
|
|
320
|
+
WHERE call_count > 0
|
|
321
|
+
AND CAST(failure_count AS REAL) / CAST(call_count AS REAL) >= ?
|
|
322
|
+
ORDER BY failure_rate DESC
|
|
323
|
+
LIMIT ?
|
|
324
|
+
""",
|
|
325
|
+
(threshold, limit),
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
result = []
|
|
329
|
+
for row in rows:
|
|
330
|
+
tool_dict = ToolMetrics.from_row(row).to_dict()
|
|
331
|
+
tool_dict["failure_rate"] = row["failure_rate"]
|
|
332
|
+
result.append(tool_dict)
|
|
333
|
+
|
|
334
|
+
return result
|
|
335
|
+
|
|
336
|
+
def reset_metrics(
|
|
337
|
+
self,
|
|
338
|
+
project_id: str | None = None,
|
|
339
|
+
server_name: str | None = None,
|
|
340
|
+
tool_name: str | None = None,
|
|
341
|
+
) -> int:
|
|
342
|
+
"""
|
|
343
|
+
Reset/delete metrics.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
project_id: Reset only for this project
|
|
347
|
+
server_name: Reset only for this server
|
|
348
|
+
tool_name: Reset only for this specific tool
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
Number of rows deleted
|
|
352
|
+
"""
|
|
353
|
+
conditions = []
|
|
354
|
+
params: list[Any] = []
|
|
355
|
+
|
|
356
|
+
if project_id:
|
|
357
|
+
conditions.append("project_id = ?")
|
|
358
|
+
params.append(project_id)
|
|
359
|
+
if server_name:
|
|
360
|
+
conditions.append("server_name = ?")
|
|
361
|
+
params.append(server_name)
|
|
362
|
+
if tool_name:
|
|
363
|
+
conditions.append("tool_name = ?")
|
|
364
|
+
params.append(tool_name)
|
|
365
|
+
|
|
366
|
+
if conditions:
|
|
367
|
+
where_clause = " AND ".join(conditions)
|
|
368
|
+
# nosec B608: where_clause built from hardcoded condition strings
|
|
369
|
+
cursor = self.db.execute(
|
|
370
|
+
f"DELETE FROM tool_metrics WHERE {where_clause}", # nosec B608
|
|
371
|
+
tuple(params),
|
|
372
|
+
)
|
|
373
|
+
else:
|
|
374
|
+
cursor = self.db.execute("DELETE FROM tool_metrics")
|
|
375
|
+
|
|
376
|
+
return cursor.rowcount
|
|
377
|
+
|
|
378
|
+
def aggregate_to_daily(self, retention_days: int = DEFAULT_RETENTION_DAYS) -> int:
|
|
379
|
+
"""
|
|
380
|
+
Aggregate old metrics into daily summaries before deletion.
|
|
381
|
+
|
|
382
|
+
Rolls up metrics older than retention_days into tool_metrics_daily table,
|
|
383
|
+
preserving historical data while keeping the main table lean.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
retention_days: Metrics older than this are aggregated (default: 7)
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
Number of rows aggregated
|
|
390
|
+
"""
|
|
391
|
+
cutoff = datetime.now(UTC) - timedelta(days=retention_days)
|
|
392
|
+
cutoff_str = cutoff.isoformat()
|
|
393
|
+
|
|
394
|
+
# Get metrics to aggregate (group by project, server, tool, and date)
|
|
395
|
+
rows = self.db.fetchall(
|
|
396
|
+
"""
|
|
397
|
+
SELECT
|
|
398
|
+
project_id,
|
|
399
|
+
server_name,
|
|
400
|
+
tool_name,
|
|
401
|
+
date(last_called_at) as metric_date,
|
|
402
|
+
SUM(call_count) as total_calls,
|
|
403
|
+
SUM(success_count) as total_success,
|
|
404
|
+
SUM(failure_count) as total_failure,
|
|
405
|
+
SUM(total_latency_ms) as total_latency
|
|
406
|
+
FROM tool_metrics
|
|
407
|
+
WHERE last_called_at < ?
|
|
408
|
+
GROUP BY project_id, server_name, tool_name, date(last_called_at)
|
|
409
|
+
""",
|
|
410
|
+
(cutoff_str,),
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
if not rows:
|
|
414
|
+
return 0
|
|
415
|
+
|
|
416
|
+
aggregated = 0
|
|
417
|
+
now = datetime.now(UTC).isoformat()
|
|
418
|
+
|
|
419
|
+
for row in rows:
|
|
420
|
+
total_calls = row["total_calls"]
|
|
421
|
+
avg_latency = row["total_latency"] / total_calls if total_calls > 0 else None
|
|
422
|
+
|
|
423
|
+
# Upsert into daily table
|
|
424
|
+
self.db.execute(
|
|
425
|
+
"""
|
|
426
|
+
INSERT INTO tool_metrics_daily (
|
|
427
|
+
project_id, server_name, tool_name, date,
|
|
428
|
+
call_count, success_count, failure_count,
|
|
429
|
+
total_latency_ms, avg_latency_ms, created_at
|
|
430
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
431
|
+
ON CONFLICT(project_id, server_name, tool_name, date) DO UPDATE SET
|
|
432
|
+
call_count = call_count + excluded.call_count,
|
|
433
|
+
success_count = success_count + excluded.success_count,
|
|
434
|
+
failure_count = failure_count + excluded.failure_count,
|
|
435
|
+
total_latency_ms = total_latency_ms + excluded.total_latency_ms,
|
|
436
|
+
avg_latency_ms = (total_latency_ms + excluded.total_latency_ms) /
|
|
437
|
+
(call_count + excluded.call_count)
|
|
438
|
+
""",
|
|
439
|
+
(
|
|
440
|
+
row["project_id"],
|
|
441
|
+
row["server_name"],
|
|
442
|
+
row["tool_name"],
|
|
443
|
+
row["metric_date"],
|
|
444
|
+
total_calls,
|
|
445
|
+
row["total_success"],
|
|
446
|
+
row["total_failure"],
|
|
447
|
+
row["total_latency"],
|
|
448
|
+
avg_latency,
|
|
449
|
+
now,
|
|
450
|
+
),
|
|
451
|
+
)
|
|
452
|
+
aggregated += 1
|
|
453
|
+
|
|
454
|
+
if aggregated > 0:
|
|
455
|
+
logger.info(f"Metrics aggregation: rolled up {aggregated} metric groups to daily table")
|
|
456
|
+
|
|
457
|
+
return aggregated
|
|
458
|
+
|
|
459
|
+
def cleanup_old_metrics(self, retention_days: int = DEFAULT_RETENTION_DAYS) -> int:
|
|
460
|
+
"""
|
|
461
|
+
Aggregate and delete metrics older than the retention period.
|
|
462
|
+
|
|
463
|
+
First aggregates old metrics into tool_metrics_daily, then deletes
|
|
464
|
+
them from the main table. This preserves historical data while
|
|
465
|
+
keeping the main table lean.
|
|
466
|
+
|
|
467
|
+
Args:
|
|
468
|
+
retention_days: Number of days to retain metrics (default: 7)
|
|
469
|
+
|
|
470
|
+
Returns:
|
|
471
|
+
Number of rows deleted
|
|
472
|
+
"""
|
|
473
|
+
# First aggregate to daily table
|
|
474
|
+
self.aggregate_to_daily(retention_days)
|
|
475
|
+
|
|
476
|
+
# Then delete from main table
|
|
477
|
+
cutoff = datetime.now(UTC) - timedelta(days=retention_days)
|
|
478
|
+
cutoff_str = cutoff.isoformat()
|
|
479
|
+
|
|
480
|
+
cursor = self.db.execute(
|
|
481
|
+
"""
|
|
482
|
+
DELETE FROM tool_metrics
|
|
483
|
+
WHERE last_called_at < ?
|
|
484
|
+
""",
|
|
485
|
+
(cutoff_str,),
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
deleted = cursor.rowcount
|
|
489
|
+
if deleted > 0:
|
|
490
|
+
logger.info(
|
|
491
|
+
f"Metrics cleanup: deleted {deleted} stale metrics (older than {retention_days} days)"
|
|
492
|
+
)
|
|
493
|
+
return deleted
|
|
494
|
+
|
|
495
|
+
def get_daily_metrics(
|
|
496
|
+
self,
|
|
497
|
+
project_id: str | None = None,
|
|
498
|
+
server_name: str | None = None,
|
|
499
|
+
tool_name: str | None = None,
|
|
500
|
+
start_date: str | None = None,
|
|
501
|
+
end_date: str | None = None,
|
|
502
|
+
) -> dict[str, Any]:
|
|
503
|
+
"""
|
|
504
|
+
Get aggregated daily metrics for historical analysis.
|
|
505
|
+
|
|
506
|
+
Args:
|
|
507
|
+
project_id: Filter by project ID
|
|
508
|
+
server_name: Filter by server name
|
|
509
|
+
tool_name: Filter by tool name
|
|
510
|
+
start_date: Start date (YYYY-MM-DD format)
|
|
511
|
+
end_date: End date (YYYY-MM-DD format)
|
|
512
|
+
|
|
513
|
+
Returns:
|
|
514
|
+
Dictionary with daily metrics data
|
|
515
|
+
"""
|
|
516
|
+
conditions = []
|
|
517
|
+
params: list[Any] = []
|
|
518
|
+
|
|
519
|
+
if project_id:
|
|
520
|
+
conditions.append("project_id = ?")
|
|
521
|
+
params.append(project_id)
|
|
522
|
+
if server_name:
|
|
523
|
+
conditions.append("server_name = ?")
|
|
524
|
+
params.append(server_name)
|
|
525
|
+
if tool_name:
|
|
526
|
+
conditions.append("tool_name = ?")
|
|
527
|
+
params.append(tool_name)
|
|
528
|
+
if start_date:
|
|
529
|
+
conditions.append("date >= ?")
|
|
530
|
+
params.append(start_date)
|
|
531
|
+
if end_date:
|
|
532
|
+
conditions.append("date <= ?")
|
|
533
|
+
params.append(end_date)
|
|
534
|
+
|
|
535
|
+
where_clause = " AND ".join(conditions) if conditions else "1=1"
|
|
536
|
+
|
|
537
|
+
# nosec B608: where_clause built from hardcoded condition strings, values parameterized
|
|
538
|
+
rows = self.db.fetchall(
|
|
539
|
+
f"SELECT * FROM tool_metrics_daily WHERE {where_clause} ORDER BY date DESC, call_count DESC", # nosec B608
|
|
540
|
+
tuple(params),
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
daily_data = [
|
|
544
|
+
{
|
|
545
|
+
"project_id": row["project_id"],
|
|
546
|
+
"server_name": row["server_name"],
|
|
547
|
+
"tool_name": row["tool_name"],
|
|
548
|
+
"date": row["date"],
|
|
549
|
+
"call_count": row["call_count"],
|
|
550
|
+
"success_count": row["success_count"],
|
|
551
|
+
"failure_count": row["failure_count"],
|
|
552
|
+
"total_latency_ms": row["total_latency_ms"],
|
|
553
|
+
"avg_latency_ms": row["avg_latency_ms"],
|
|
554
|
+
"success_rate": (
|
|
555
|
+
row["success_count"] / row["call_count"] if row["call_count"] > 0 else None
|
|
556
|
+
),
|
|
557
|
+
}
|
|
558
|
+
for row in rows
|
|
559
|
+
]
|
|
560
|
+
|
|
561
|
+
# Calculate aggregates
|
|
562
|
+
total_calls = sum(d["call_count"] for d in daily_data)
|
|
563
|
+
total_success = sum(d["success_count"] for d in daily_data)
|
|
564
|
+
total_latency = sum(d["total_latency_ms"] for d in daily_data)
|
|
565
|
+
|
|
566
|
+
return {
|
|
567
|
+
"daily": daily_data,
|
|
568
|
+
"summary": {
|
|
569
|
+
"total_days": len({d["date"] for d in daily_data}),
|
|
570
|
+
"total_calls": total_calls,
|
|
571
|
+
"total_success": total_success,
|
|
572
|
+
"overall_success_rate": (total_success / total_calls if total_calls > 0 else None),
|
|
573
|
+
"overall_avg_latency_ms": (
|
|
574
|
+
total_latency / total_calls if total_calls > 0 else None
|
|
575
|
+
),
|
|
576
|
+
},
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
def get_retention_stats(self) -> dict[str, Any]:
|
|
580
|
+
"""
|
|
581
|
+
Get statistics about metrics retention.
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
Dictionary with retention statistics including oldest/newest metrics
|
|
585
|
+
"""
|
|
586
|
+
row = self.db.fetchone(
|
|
587
|
+
"""
|
|
588
|
+
SELECT
|
|
589
|
+
COUNT(*) as total_count,
|
|
590
|
+
MIN(last_called_at) as oldest,
|
|
591
|
+
MAX(last_called_at) as newest,
|
|
592
|
+
SUM(call_count) as total_calls
|
|
593
|
+
FROM tool_metrics
|
|
594
|
+
"""
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
if row:
|
|
598
|
+
return {
|
|
599
|
+
"total_metrics": row["total_count"],
|
|
600
|
+
"oldest_metric": row["oldest"],
|
|
601
|
+
"newest_metric": row["newest"],
|
|
602
|
+
"total_calls_recorded": row["total_calls"],
|
|
603
|
+
}
|
|
604
|
+
return {
|
|
605
|
+
"total_metrics": 0,
|
|
606
|
+
"oldest_metric": None,
|
|
607
|
+
"newest_metric": None,
|
|
608
|
+
"total_calls_recorded": 0,
|
|
609
|
+
}
|