devguard 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. devguard/INTEGRATION_SUMMARY.md +121 -0
  2. devguard/__init__.py +3 -0
  3. devguard/__main__.py +6 -0
  4. devguard/checkers/__init__.py +41 -0
  5. devguard/checkers/api_usage.py +523 -0
  6. devguard/checkers/aws_cost.py +331 -0
  7. devguard/checkers/aws_iam.py +284 -0
  8. devguard/checkers/base.py +25 -0
  9. devguard/checkers/container.py +137 -0
  10. devguard/checkers/domain.py +189 -0
  11. devguard/checkers/firecrawl.py +117 -0
  12. devguard/checkers/fly.py +225 -0
  13. devguard/checkers/github.py +210 -0
  14. devguard/checkers/npm.py +327 -0
  15. devguard/checkers/npm_security.py +244 -0
  16. devguard/checkers/redteam.py +290 -0
  17. devguard/checkers/secret.py +279 -0
  18. devguard/checkers/swarm.py +376 -0
  19. devguard/checkers/tailscale.py +143 -0
  20. devguard/checkers/tailsnitch.py +303 -0
  21. devguard/checkers/tavily.py +179 -0
  22. devguard/checkers/vercel.py +192 -0
  23. devguard/cli.py +1510 -0
  24. devguard/cli_helpers.py +189 -0
  25. devguard/config.py +249 -0
  26. devguard/core.py +293 -0
  27. devguard/dashboard.py +715 -0
  28. devguard/discovery.py +363 -0
  29. devguard/http_client.py +142 -0
  30. devguard/llm_service.py +481 -0
  31. devguard/mcp_server.py +259 -0
  32. devguard/metrics.py +144 -0
  33. devguard/models.py +208 -0
  34. devguard/reporting.py +1571 -0
  35. devguard/sarif.py +295 -0
  36. devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
  37. devguard/scripts/README.md +221 -0
  38. devguard/scripts/auto_fix_recommendations.py +145 -0
  39. devguard/scripts/generate_npmignore.py +175 -0
  40. devguard/scripts/generate_security_report.py +324 -0
  41. devguard/scripts/prepublish_check.sh +29 -0
  42. devguard/scripts/redteam_npm_packages.py +1262 -0
  43. devguard/scripts/review_all_repos.py +300 -0
  44. devguard/spec.py +617 -0
  45. devguard/sweeps/__init__.py +23 -0
  46. devguard/sweeps/ai_editor_config_audit.py +697 -0
  47. devguard/sweeps/cargo_publish_audit.py +655 -0
  48. devguard/sweeps/dependency_audit.py +419 -0
  49. devguard/sweeps/gitignore_audit.py +336 -0
  50. devguard/sweeps/local_dev.py +260 -0
  51. devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
  52. devguard/sweeps/project_flaudit.py +636 -0
  53. devguard/sweeps/public_github_secrets.py +680 -0
  54. devguard/sweeps/publish_audit.py +478 -0
  55. devguard/sweeps/ssh_key_audit.py +327 -0
  56. devguard/utils.py +174 -0
  57. devguard-0.2.0.dist-info/METADATA +225 -0
  58. devguard-0.2.0.dist-info/RECORD +60 -0
  59. devguard-0.2.0.dist-info/WHEEL +4 -0
  60. devguard-0.2.0.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,143 @@
1
+ """Tailscale node health checker."""
2
+
3
+ import asyncio
4
+ import json
5
+ import logging
6
+
7
+ from devguard.checkers.base import BaseChecker
8
+ from devguard.models import CheckResult, CheckStatus, DeploymentStatus, Finding, Severity
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class TailscaleChecker(BaseChecker):
14
+ """Check Tailscale mesh network health."""
15
+
16
+ check_type = "tailscale"
17
+
18
+ async def check(self) -> CheckResult:
19
+ """Check Tailscale node status."""
20
+ deployments: list[DeploymentStatus] = []
21
+ findings: list[Finding] = []
22
+ errors: list[str] = []
23
+
24
+ expected_nodes = set(self.settings.tailscale_expected_nodes)
25
+
26
+ try:
27
+ # Run tailscale status --json
28
+ proc = await asyncio.create_subprocess_exec(
29
+ "tailscale",
30
+ "status",
31
+ "--json",
32
+ stdout=asyncio.subprocess.PIPE,
33
+ stderr=asyncio.subprocess.PIPE,
34
+ )
35
+ stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=10.0)
36
+
37
+ if proc.returncode != 0:
38
+ errors.append(f"tailscale status failed: {stderr.decode()}")
39
+ return CheckResult(
40
+ check_type=self.check_type,
41
+ success=False,
42
+ errors=errors,
43
+ )
44
+
45
+ status = json.loads(stdout.decode())
46
+ peers = status.get("Peer", {})
47
+ self_node = status.get("Self", {})
48
+
49
+ # Check self node
50
+ if self_node:
51
+ self_name = self_node.get("HostName", "unknown")
52
+ deployments.append(
53
+ DeploymentStatus(
54
+ platform="tailscale",
55
+ project_name=self_name,
56
+ deployment_id=self_node.get("PublicKey", "")[:16],
57
+ status=CheckStatus.HEALTHY,
58
+ url=f"tailscale://{self_name}",
59
+ metadata={"role": "self", "online": True},
60
+ )
61
+ )
62
+
63
+ # Check all peers
64
+ seen_nodes = {self_node.get("HostName", "")}
65
+ for pubkey, peer in peers.items():
66
+ hostname = peer.get("HostName", "unknown")
67
+ seen_nodes.add(hostname)
68
+ online = peer.get("Online", False)
69
+
70
+ is_expected = hostname in expected_nodes
71
+
72
+ if online:
73
+ status_val = CheckStatus.HEALTHY
74
+ elif is_expected:
75
+ status_val = CheckStatus.UNHEALTHY
76
+ findings.append(
77
+ Finding(
78
+ severity=Severity.HIGH,
79
+ title=f"Expected node offline: {hostname}",
80
+ description=f"{hostname} is offline but listed in expected nodes",
81
+ resource=hostname,
82
+ remediation=f"Check {hostname} connectivity",
83
+ )
84
+ )
85
+ else:
86
+ status_val = CheckStatus.UNKNOWN
87
+
88
+ deployments.append(
89
+ DeploymentStatus(
90
+ platform="tailscale",
91
+ project_name=hostname,
92
+ deployment_id=pubkey[:16],
93
+ status=status_val,
94
+ url=f"tailscale://{hostname}",
95
+ metadata={
96
+ "online": online,
97
+ "expected": is_expected,
98
+ "exit_node": peer.get("ExitNode", False),
99
+ "exit_node_option": peer.get("ExitNodeOption", False),
100
+ },
101
+ )
102
+ )
103
+
104
+ # Check for missing expected nodes
105
+ for node_name in expected_nodes:
106
+ if node_name not in seen_nodes:
107
+ findings.append(
108
+ Finding(
109
+ severity=Severity.HIGH,
110
+ title=f"Expected node not in mesh: {node_name}",
111
+ description=f"{node_name} is not visible in Tailscale mesh",
112
+ resource=node_name,
113
+ remediation="Check if node is registered with Tailscale",
114
+ )
115
+ )
116
+
117
+ except TimeoutError:
118
+ errors.append("tailscale status timed out after 10s")
119
+ except FileNotFoundError:
120
+ errors.append("tailscale CLI not found")
121
+ except json.JSONDecodeError as e:
122
+ errors.append(f"Failed to parse tailscale status: {e}")
123
+ except Exception as e:
124
+ errors.append(f"Tailscale check failed: {e}")
125
+
126
+ expected_offline = sum(
127
+ 1
128
+ for d in deployments
129
+ if d.metadata.get("expected") and d.status == CheckStatus.UNHEALTHY
130
+ )
131
+
132
+ return CheckResult(
133
+ check_type=self.check_type,
134
+ success=len(errors) == 0 and expected_offline == 0,
135
+ deployments=deployments,
136
+ findings=findings,
137
+ errors=errors,
138
+ metadata={
139
+ "total_nodes": len(deployments),
140
+ "online_nodes": sum(1 for d in deployments if d.status == CheckStatus.HEALTHY),
141
+ "expected_offline": expected_offline,
142
+ },
143
+ )
@@ -0,0 +1,303 @@
1
+ """Tailsnitch security auditor for Tailscale ACLs."""
2
+
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ import os
7
+ import shutil
8
+
9
+ from devguard.checkers.base import BaseChecker
10
+ from devguard.models import CheckResult, Finding, Severity
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class TailsnitchChecker(BaseChecker):
16
+ """Check Tailscale ACL security using Tailsnitch.
17
+
18
+ Tailsnitch scans Tailscale ACL policies for 50+ security misconfigurations,
19
+ overly permissive access controls, and best practice violations.
20
+
21
+ Requires:
22
+ - Tailsnitch binary installed (see https://github.com/Adversis/tailsnitch)
23
+ - Tailscale authentication (TSKEY or OAuth credentials)
24
+ """
25
+
26
+ check_type = "tailsnitch"
27
+
28
+ def __init__(self, settings):
29
+ """Initialize Tailsnitch checker."""
30
+ super().__init__(settings)
31
+ # Use custom path if provided, otherwise auto-detect
32
+ self.tailsnitch_path = (
33
+ settings.tailsnitch_binary_path
34
+ if settings.tailsnitch_binary_path
35
+ else self._find_tailsnitch()
36
+ )
37
+ self.tailnet = settings.tailsnitch_tailnet
38
+
39
+ # Read auth from environment (pydantic-settings loads .env automatically)
40
+ # Support both TSKEY and TS_API_KEY for compatibility
41
+ self.tskey = os.getenv("TSKEY") or os.getenv("TS_API_KEY")
42
+ self.ts_oauth_client_id = os.getenv("TS_OAUTH_CLIENT_ID")
43
+ self.ts_oauth_client_secret = os.getenv("TS_OAUTH_CLIENT_SECRET")
44
+
45
+ logger.debug(
46
+ "TailsnitchChecker initialized",
47
+ extra={
48
+ "tailsnitch_path": self.tailsnitch_path,
49
+ "has_api_key": bool(self.tskey),
50
+ "has_oauth": bool(self.ts_oauth_client_id and self.ts_oauth_client_secret),
51
+ "tailnet": self.tailnet,
52
+ },
53
+ )
54
+
55
+ def _find_tailsnitch(self) -> str | None:
56
+ """Find tailsnitch binary in PATH or common locations."""
57
+ # Check PATH first
58
+ path = shutil.which("tailsnitch")
59
+ if path:
60
+ logger.debug(f"Found tailsnitch in PATH: {path}")
61
+ return path
62
+
63
+ # Check common install locations
64
+ common_paths = [
65
+ "/usr/local/bin/tailsnitch",
66
+ "/opt/homebrew/bin/tailsnitch",
67
+ os.path.expanduser("~/bin/tailsnitch"),
68
+ os.path.expanduser("~/.local/bin/tailsnitch"),
69
+ ]
70
+
71
+ for path in common_paths:
72
+ if os.path.exists(path) and os.access(path, os.X_OK):
73
+ logger.debug(f"Found tailsnitch at: {path}")
74
+ return path
75
+
76
+ logger.debug("Tailsnitch binary not found in PATH or common locations")
77
+ return None
78
+
79
+ async def check(self) -> CheckResult:
80
+ """Run Tailsnitch security audit."""
81
+ findings: list[Finding] = []
82
+ errors: list[str] = []
83
+
84
+ if not self.tailsnitch_path:
85
+ install_instructions = (
86
+ "Install Tailsnitch:\n"
87
+ " 1. Download from https://github.com/Adversis/tailsnitch/releases\n"
88
+ " 2. Or install via Go: go install github.com/Adversis/tailsnitch@latest\n"
89
+ " 3. Or set TAILSNITCH_BINARY_PATH in .env to custom location"
90
+ )
91
+ errors.append(f"Tailsnitch binary not found. {install_instructions}")
92
+ logger.warning("Tailsnitch binary not found", extra={"check_type": self.check_type})
93
+ return CheckResult(
94
+ check_type=self.check_type,
95
+ success=False,
96
+ errors=errors,
97
+ )
98
+
99
+ # Check authentication
100
+ if not self.tskey and not (self.ts_oauth_client_id and self.ts_oauth_client_secret):
101
+ auth_instructions = (
102
+ "Tailscale authentication required. Set one of:\n"
103
+ " - TSKEY or TS_API_KEY (API key from https://login.tailscale.com/admin/settings/keys)\n"
104
+ " - TS_OAUTH_CLIENT_ID + TS_OAUTH_CLIENT_SECRET (OAuth from https://login.tailscale.com/admin/settings/oauth)\n"
105
+ "Add to .env file or export as environment variables"
106
+ )
107
+ errors.append(auth_instructions)
108
+ logger.warning("Tailscale authentication not configured", extra={"check_type": self.check_type})
109
+ return CheckResult(
110
+ check_type=self.check_type,
111
+ success=False,
112
+ errors=errors,
113
+ )
114
+
115
+ try:
116
+ # Build command
117
+ cmd = [self.tailsnitch_path, "--json"]
118
+
119
+ # Add tailnet flag if specified
120
+ if self.tailnet:
121
+ cmd.extend(["--tailnet", self.tailnet])
122
+ logger.debug(f"Auditing specific tailnet: {self.tailnet}")
123
+
124
+ # Set environment variables for Tailsnitch
125
+ env = os.environ.copy()
126
+ if self.tskey:
127
+ env["TSKEY"] = self.tskey
128
+ logger.debug("Using TSKEY authentication")
129
+ if self.ts_oauth_client_id:
130
+ env["TS_OAUTH_CLIENT_ID"] = self.ts_oauth_client_id
131
+ logger.debug("Using OAuth client ID")
132
+ if self.ts_oauth_client_secret:
133
+ env["TS_OAUTH_CLIENT_SECRET"] = self.ts_oauth_client_secret
134
+ logger.debug("Using OAuth client secret")
135
+
136
+ logger.info("Running Tailsnitch security audit", extra={"command": " ".join(cmd[:3])})
137
+
138
+ # Run Tailsnitch
139
+ proc = await asyncio.create_subprocess_exec(
140
+ *cmd,
141
+ stdout=asyncio.subprocess.PIPE,
142
+ stderr=asyncio.subprocess.PIPE,
143
+ env=env,
144
+ )
145
+
146
+ stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=60.0)
147
+
148
+ if proc.returncode != 0:
149
+ stderr_text = stderr.decode() if stderr else ""
150
+ stdout_preview = stdout.decode()[:200] if stdout else ""
151
+ error_msg = f"Tailsnitch failed (exit {proc.returncode})"
152
+ if stderr_text:
153
+ error_msg += f": {stderr_text[:500]}"
154
+ elif stdout_preview:
155
+ error_msg += f": {stdout_preview}"
156
+ errors.append(error_msg)
157
+ logger.error(
158
+ "Tailsnitch execution failed",
159
+ extra={
160
+ "exit_code": proc.returncode,
161
+ "stderr_preview": stderr_text[:200] if stderr_text else None,
162
+ },
163
+ )
164
+ return CheckResult(
165
+ check_type=self.check_type,
166
+ success=False,
167
+ errors=errors,
168
+ )
169
+
170
+ # Parse JSON output
171
+ try:
172
+ output = json.loads(stdout.decode())
173
+ except json.JSONDecodeError as e:
174
+ errors.append(f"Failed to parse Tailsnitch JSON output: {e}")
175
+ return CheckResult(
176
+ check_type=self.check_type,
177
+ success=False,
178
+ errors=errors,
179
+ )
180
+
181
+ # Convert Tailsnitch findings to Guardian Findings
182
+ suggestions = output.get("suggestions", [])
183
+ summary = output.get("summary", {})
184
+ tailnet_name = output.get("tailnet", "unknown")
185
+
186
+ logger.info(
187
+ "Tailsnitch audit completed",
188
+ extra={
189
+ "tailnet": tailnet_name,
190
+ "total_checks": summary.get("total", 0),
191
+ "failed": summary.get("failed", 0),
192
+ "critical": summary.get("critical", 0),
193
+ "high": summary.get("high", 0),
194
+ },
195
+ )
196
+
197
+ for suggestion in suggestions:
198
+ if suggestion.get("pass", True):
199
+ continue # Skip passing checks
200
+
201
+ check_id = suggestion.get("id", "UNKNOWN")
202
+ title = suggestion.get("title", "Unknown issue")
203
+ severity_str = suggestion.get("severity", "info").upper()
204
+ description = suggestion.get("description", "")
205
+ remediation = suggestion.get("remediation", "")
206
+ category = suggestion.get("category", "")
207
+
208
+ # Map Tailsnitch severity to Guardian severity
209
+ severity_map = {
210
+ "CRITICAL": Severity.CRITICAL,
211
+ "HIGH": Severity.HIGH,
212
+ "MEDIUM": Severity.MEDIUM,
213
+ "LOW": Severity.LOW,
214
+ "INFO": Severity.WARNING,
215
+ }
216
+ severity = severity_map.get(severity_str, Severity.WARNING)
217
+
218
+ # Extract resource from suggestion (can be string, dict, or list)
219
+ resource = suggestion.get("resource", tailnet_name)
220
+ if isinstance(resource, dict):
221
+ resource = (
222
+ resource.get("name")
223
+ or resource.get("id")
224
+ or resource.get("hostname")
225
+ or tailnet_name
226
+ )
227
+ elif isinstance(resource, list) and resource:
228
+ # If resource is a list, use first item or join
229
+ resource = str(resource[0]) if len(resource) == 1 else f"{tailnet_name} ({len(resource)} resources)"
230
+ elif not resource or resource == "tailnet":
231
+ resource = tailnet_name
232
+
233
+ # Build remediation with admin URL if available
234
+ fix_info = suggestion.get("fix", {})
235
+ admin_url = fix_info.get("admin_url")
236
+ full_remediation = remediation
237
+ if admin_url:
238
+ full_remediation = f"{remediation}\n\nFix in admin console: {admin_url}"
239
+
240
+ findings.append(
241
+ Finding(
242
+ severity=severity,
243
+ title=f"{check_id}: {title}",
244
+ description=description,
245
+ resource=str(resource),
246
+ remediation=full_remediation,
247
+ metadata={
248
+ "check_id": check_id,
249
+ "category": category,
250
+ "tailsnitch_severity": severity_str,
251
+ "admin_url": admin_url,
252
+ "tailnet": tailnet_name,
253
+ "details": suggestion.get("details"), # Additional context
254
+ },
255
+ )
256
+ )
257
+
258
+ # Determine overall success
259
+ critical_count = summary.get("critical", 0)
260
+ high_count = summary.get("high", 0)
261
+ success = critical_count == 0 and high_count == 0
262
+
263
+ return CheckResult(
264
+ check_type=self.check_type,
265
+ success=success,
266
+ findings=findings,
267
+ errors=errors,
268
+ metadata={
269
+ "total_checks": summary.get("total", 0),
270
+ "passed": summary.get("passed", 0),
271
+ "failed": summary.get("failed", 0),
272
+ "critical": critical_count,
273
+ "high": high_count,
274
+ "medium": summary.get("medium", 0),
275
+ "low": summary.get("low", 0),
276
+ "info": summary.get("info", 0),
277
+ "tailnet": output.get("tailnet", "unknown"),
278
+ },
279
+ )
280
+
281
+ except TimeoutError:
282
+ errors.append("Tailsnitch timed out after 60s")
283
+ return CheckResult(
284
+ check_type=self.check_type,
285
+ success=False,
286
+ errors=errors,
287
+ )
288
+ except FileNotFoundError:
289
+ errors.append(f"Tailsnitch binary not found at {self.tailsnitch_path}")
290
+ return CheckResult(
291
+ check_type=self.check_type,
292
+ success=False,
293
+ errors=errors,
294
+ )
295
+ except Exception as e:
296
+ errors.append(f"Tailsnitch check failed: {e}")
297
+ logger.exception("Tailsnitch check exception")
298
+ return CheckResult(
299
+ check_type=self.check_type,
300
+ success=False,
301
+ errors=errors,
302
+ )
303
+
@@ -0,0 +1,179 @@
1
+ """Tavily API usage checker."""
2
+
3
+ import logging
4
+
5
+ import httpx
6
+
7
+ from devguard.checkers.base import BaseChecker
8
+ from devguard.http_client import create_client, retry_with_backoff
9
+ from devguard.models import CheckResult, CostMetric
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class TavilyChecker(BaseChecker):
15
+ """Check Tavily API usage."""
16
+
17
+ check_type = "tavily"
18
+
19
+ async def check(self) -> CheckResult:
20
+ """Check Tavily usage."""
21
+ errors: list[str] = []
22
+
23
+ if not self.settings.tavily_api_key:
24
+ return CheckResult(
25
+ check_type=self.check_type,
26
+ success=False,
27
+ deployments=[],
28
+ errors=["Tavily API key not configured"],
29
+ )
30
+
31
+ # Handle SecretStr
32
+ tavily_key = self.settings.tavily_api_key
33
+ if hasattr(tavily_key, "get_secret_value"):
34
+ tavily_key = tavily_key.get_secret_value()
35
+
36
+ headers = {
37
+ "Authorization": f"Bearer {tavily_key}",
38
+ }
39
+
40
+ try:
41
+ async with create_client() as client:
42
+
43
+ async def fetch_usage():
44
+ response = await client.get(
45
+ "https://api.tavily.com/usage",
46
+ headers=headers,
47
+ timeout=10.0,
48
+ )
49
+ response.raise_for_status()
50
+ return response
51
+
52
+ response = await retry_with_backoff(fetch_usage, max_retries=3)
53
+ data = response.json()
54
+
55
+ # Extract usage data
56
+ key_usage = data.get("key", {})
57
+ account_usage = data.get("account", {})
58
+
59
+ # Safely extract numeric values, defaulting to 0 if None
60
+ key_usage_val = float(key_usage.get("usage") or 0)
61
+ key_limit_val = float(key_usage.get("limit") or 0)
62
+ account_usage_val = float(account_usage.get("plan_usage") or 0)
63
+ account_limit_val = float(account_usage.get("plan_limit") or 0)
64
+
65
+ key_usage_pct = (key_usage_val / key_limit_val * 100) if key_limit_val > 0 else 0.0
66
+ account_usage_pct = (
67
+ (account_usage_val / account_limit_val * 100) if account_limit_val > 0 else 0.0
68
+ )
69
+
70
+ # Use the higher usage percentage
71
+ usage_percent = max(key_usage_pct, account_usage_pct)
72
+
73
+ metadata = {
74
+ "key_usage": key_usage_val,
75
+ "key_limit": key_limit_val,
76
+ "account_plan": account_usage.get("current_plan"),
77
+ "account_usage": account_usage_val,
78
+ "account_limit": account_limit_val,
79
+ "usage_percent": round(usage_percent, 2),
80
+ }
81
+
82
+ # Create cost metrics with estimated costs
83
+ # Tavily pricing: $0.008 per credit (pay-as-you-go)
84
+ # Free: 1000 credits/month = $0
85
+ # Monthly plans reduce per-credit cost
86
+ plan = account_usage.get("current_plan", "free")
87
+ # Use $0.008 per request as standard estimate for paid plans
88
+ cost_per_request = 0.0 if plan == "free" else 0.008
89
+
90
+ key_cost = key_usage_val * cost_per_request if cost_per_request > 0 else None
91
+ account_cost = (
92
+ account_usage_val * cost_per_request if cost_per_request > 0 else None
93
+ )
94
+
95
+ cost_metrics = [
96
+ CostMetric(
97
+ service="tavily",
98
+ period="monthly",
99
+ amount=key_cost,
100
+ usage=key_usage_val,
101
+ limit=key_limit_val,
102
+ usage_percent=round(key_usage_pct, 2),
103
+ metadata={
104
+ "unit": "requests",
105
+ "type": "key",
106
+ "cost_per_request": cost_per_request,
107
+ "estimated": cost_per_request > 0,
108
+ },
109
+ ),
110
+ CostMetric(
111
+ service="tavily",
112
+ period="monthly",
113
+ amount=account_cost,
114
+ usage=account_usage_val,
115
+ limit=account_limit_val,
116
+ usage_percent=round(account_usage_pct, 2),
117
+ metadata={
118
+ "unit": "requests",
119
+ "type": "account",
120
+ "plan": plan,
121
+ "cost_per_request": cost_per_request,
122
+ "estimated": cost_per_request > 0,
123
+ },
124
+ ),
125
+ ]
126
+
127
+ return CheckResult(
128
+ check_type=self.check_type,
129
+ success=True,
130
+ deployments=[],
131
+ errors=[],
132
+ cost_metrics=cost_metrics,
133
+ metadata=metadata,
134
+ )
135
+
136
+ except httpx.HTTPStatusError as e:
137
+ status_code = e.response.status_code
138
+ error_text = e.response.text[:100]
139
+ errors.append(f"HTTP {status_code}: {error_text}")
140
+ # Try to extract partial cost data from error response if available
141
+ cost_metrics = []
142
+ try:
143
+ error_data = e.response.json()
144
+ if isinstance(error_data, dict):
145
+ # Try to get any usage info from error response
146
+ key_usage = error_data.get("key", {})
147
+ if key_usage:
148
+ cost_metrics.append(
149
+ CostMetric(
150
+ service="tavily",
151
+ period="monthly",
152
+ amount=None,
153
+ usage=float(key_usage.get("usage") or 0),
154
+ limit=float(key_usage.get("limit") or 0),
155
+ usage_percent=0.0,
156
+ metadata={"error": True, "status_code": status_code},
157
+ )
158
+ )
159
+ except Exception:
160
+ pass
161
+ return CheckResult(
162
+ check_type=self.check_type,
163
+ success=False,
164
+ deployments=[],
165
+ errors=errors,
166
+ cost_metrics=cost_metrics,
167
+ )
168
+ except httpx.RequestError as e:
169
+ errors.append(f"Network error: {str(e)}")
170
+ except Exception as e:
171
+ errors.append(f"Unexpected error: {str(e)}")
172
+
173
+ return CheckResult(
174
+ check_type=self.check_type,
175
+ success=False,
176
+ deployments=[],
177
+ errors=errors,
178
+ cost_metrics=[],
179
+ )