devguard 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- devguard/INTEGRATION_SUMMARY.md +121 -0
- devguard/__init__.py +3 -0
- devguard/__main__.py +6 -0
- devguard/checkers/__init__.py +41 -0
- devguard/checkers/api_usage.py +523 -0
- devguard/checkers/aws_cost.py +331 -0
- devguard/checkers/aws_iam.py +284 -0
- devguard/checkers/base.py +25 -0
- devguard/checkers/container.py +137 -0
- devguard/checkers/domain.py +189 -0
- devguard/checkers/firecrawl.py +117 -0
- devguard/checkers/fly.py +225 -0
- devguard/checkers/github.py +210 -0
- devguard/checkers/npm.py +327 -0
- devguard/checkers/npm_security.py +244 -0
- devguard/checkers/redteam.py +290 -0
- devguard/checkers/secret.py +279 -0
- devguard/checkers/swarm.py +376 -0
- devguard/checkers/tailscale.py +143 -0
- devguard/checkers/tailsnitch.py +303 -0
- devguard/checkers/tavily.py +179 -0
- devguard/checkers/vercel.py +192 -0
- devguard/cli.py +1510 -0
- devguard/cli_helpers.py +189 -0
- devguard/config.py +249 -0
- devguard/core.py +293 -0
- devguard/dashboard.py +715 -0
- devguard/discovery.py +363 -0
- devguard/http_client.py +142 -0
- devguard/llm_service.py +481 -0
- devguard/mcp_server.py +259 -0
- devguard/metrics.py +144 -0
- devguard/models.py +208 -0
- devguard/reporting.py +1571 -0
- devguard/sarif.py +295 -0
- devguard/scripts/ANALYSIS_SUMMARY.md +141 -0
- devguard/scripts/README.md +221 -0
- devguard/scripts/auto_fix_recommendations.py +145 -0
- devguard/scripts/generate_npmignore.py +175 -0
- devguard/scripts/generate_security_report.py +324 -0
- devguard/scripts/prepublish_check.sh +29 -0
- devguard/scripts/redteam_npm_packages.py +1262 -0
- devguard/scripts/review_all_repos.py +300 -0
- devguard/spec.py +617 -0
- devguard/sweeps/__init__.py +23 -0
- devguard/sweeps/ai_editor_config_audit.py +697 -0
- devguard/sweeps/cargo_publish_audit.py +655 -0
- devguard/sweeps/dependency_audit.py +419 -0
- devguard/sweeps/gitignore_audit.py +336 -0
- devguard/sweeps/local_dev.py +260 -0
- devguard/sweeps/local_dirty_worktree_secrets.py +521 -0
- devguard/sweeps/project_flaudit.py +636 -0
- devguard/sweeps/public_github_secrets.py +680 -0
- devguard/sweeps/publish_audit.py +478 -0
- devguard/sweeps/ssh_key_audit.py +327 -0
- devguard/utils.py +174 -0
- devguard-0.2.0.dist-info/METADATA +225 -0
- devguard-0.2.0.dist-info/RECORD +60 -0
- devguard-0.2.0.dist-info/WHEEL +4 -0
- devguard-0.2.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
# npm Security Analysis Integration Summary
|
|
2
|
+
|
|
3
|
+
## Overview
|
|
4
|
+
|
|
5
|
+
The deep npm package security analysis from `devguard/scripts/redteam_npm_packages.py` has been integrated into the main devguard monitoring system as a new checker.
|
|
6
|
+
|
|
7
|
+
## Changes Made
|
|
8
|
+
|
|
9
|
+
### 1. New Checker: `NpmSecurityChecker`
|
|
10
|
+
|
|
11
|
+
**File:** `devguard/checkers/npm_security.py`
|
|
12
|
+
|
|
13
|
+
- Integrates deep security analysis into devguard's checker architecture
|
|
14
|
+
- Converts security findings to devguard `Vulnerability` objects
|
|
15
|
+
- Reports findings through the standard devguard reporting system
|
|
16
|
+
- Uses analysis functions from `devguard/scripts/redteam_npm_packages.py`
|
|
17
|
+
|
|
18
|
+
**Features:**
|
|
19
|
+
- Secret detection (API keys, tokens, passwords)
|
|
20
|
+
- Obfuscated code detection
|
|
21
|
+
- Sensitive file detection
|
|
22
|
+
- Git history detection
|
|
23
|
+
- Missing .npmignore detection
|
|
24
|
+
- Suspicious install script detection
|
|
25
|
+
- Dependency vulnerability checking
|
|
26
|
+
|
|
27
|
+
### 2. Configuration
|
|
28
|
+
|
|
29
|
+
**File:** `devguard/config.py`
|
|
30
|
+
|
|
31
|
+
Added new setting:
|
|
32
|
+
```python
|
|
33
|
+
npm_security_enabled: bool = Field(
|
|
34
|
+
False, description="Enable deep security analysis of npm packages"
|
|
35
|
+
)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
**Default:** `False` (disabled by default due to resource intensity)
|
|
39
|
+
|
|
40
|
+
### 3. Core Integration
|
|
41
|
+
|
|
42
|
+
**File:** `devguard/core.py`
|
|
43
|
+
|
|
44
|
+
- Added `NpmSecurityChecker` to devguard's checker initialization
|
|
45
|
+
- Runs when `npm_security_enabled=true` and packages are configured
|
|
46
|
+
- Executes alongside other checkers in parallel
|
|
47
|
+
|
|
48
|
+
### 4. CLI Updates
|
|
49
|
+
|
|
50
|
+
**File:** `devguard/cli.py`
|
|
51
|
+
|
|
52
|
+
- Updated `config` command to show npm security analysis status
|
|
53
|
+
|
|
54
|
+
### 5. Documentation
|
|
55
|
+
|
|
56
|
+
**File:** `README.md`
|
|
57
|
+
|
|
58
|
+
- Added section on "Deep npm Package Security Analysis"
|
|
59
|
+
- Documented configuration and usage
|
|
60
|
+
- Explained resource considerations
|
|
61
|
+
|
|
62
|
+
## Usage
|
|
63
|
+
|
|
64
|
+
### Enable Deep npm Security Analysis
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
# In .env file
|
|
68
|
+
NPM_SECURITY_ENABLED=true
|
|
69
|
+
NPM_PACKAGES_TO_MONITOR=package1,package2
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Run Guardian Checks
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
devguard check
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
The deep security analysis will run automatically and report findings as vulnerabilities in the standard devguard report format.
|
|
79
|
+
|
|
80
|
+
## Architecture
|
|
81
|
+
|
|
82
|
+
```
|
|
83
|
+
Guardian.run_checks()
|
|
84
|
+
↓
|
|
85
|
+
[NpmChecker.check()] # Basic vulnerability checking (npm audit)
|
|
86
|
+
[NpmSecurityChecker.check()] # Deep security analysis (secrets, obfuscation, etc.)
|
|
87
|
+
↓
|
|
88
|
+
GuardianReport (unified results)
|
|
89
|
+
↓
|
|
90
|
+
Reporter.report()
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## Benefits
|
|
94
|
+
|
|
95
|
+
1. **Unified Reporting**: All security findings appear in the same devguard report
|
|
96
|
+
2. **Integration**: Works with existing devguard features (webhooks, email alerts, dashboard)
|
|
97
|
+
3. **Metrics**: Security findings tracked in Prometheus metrics
|
|
98
|
+
4. **Consistency**: Uses same vulnerability model as other checkers
|
|
99
|
+
|
|
100
|
+
## Differences from Standalone Scripts
|
|
101
|
+
|
|
102
|
+
- **Standalone scripts** (`devguard/scripts/redteam_npm_packages.py`):
|
|
103
|
+
- Run independently
|
|
104
|
+
- Generate detailed JSON/Markdown reports
|
|
105
|
+
- Useful for one-off deep analysis
|
|
106
|
+
|
|
107
|
+
- **Integrated checker** (`devguard/checkers/npm_security.py`):
|
|
108
|
+
- Runs as part of devguard monitoring
|
|
109
|
+
- Reports through devguard's unified system
|
|
110
|
+
- Integrates with alerts, dashboard, metrics
|
|
111
|
+
- Better for continuous monitoring
|
|
112
|
+
|
|
113
|
+
## Future Enhancements
|
|
114
|
+
|
|
115
|
+
- Add caching to avoid re-downloading packages on every check
|
|
116
|
+
- Add rate limiting for package downloads
|
|
117
|
+
- Support for analyzing specific versions
|
|
118
|
+
- Integration with CI/CD pipelines
|
|
119
|
+
- Historical tracking of security findings
|
|
120
|
+
|
|
121
|
+
|
devguard/__init__.py
ADDED
devguard/__main__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Checkers for various services."""
|
|
2
|
+
|
|
3
|
+
from devguard.checkers.api_usage import APIUsageChecker
|
|
4
|
+
from devguard.checkers.aws_cost import AWSCostChecker
|
|
5
|
+
from devguard.checkers.aws_iam import AWSIAMChecker
|
|
6
|
+
from devguard.checkers.base import BaseChecker
|
|
7
|
+
from devguard.checkers.container import ContainerChecker
|
|
8
|
+
from devguard.checkers.domain import DomainChecker
|
|
9
|
+
from devguard.checkers.firecrawl import FirecrawlChecker
|
|
10
|
+
from devguard.checkers.fly import FlyChecker
|
|
11
|
+
from devguard.checkers.github import GitHubChecker
|
|
12
|
+
from devguard.checkers.npm import NpmChecker
|
|
13
|
+
from devguard.checkers.npm_security import NpmSecurityChecker
|
|
14
|
+
from devguard.checkers.redteam import RedTeamChecker
|
|
15
|
+
from devguard.checkers.secret import SecretChecker
|
|
16
|
+
from devguard.checkers.swarm import SwarmChecker
|
|
17
|
+
from devguard.checkers.tailscale import TailscaleChecker
|
|
18
|
+
from devguard.checkers.tailsnitch import TailsnitchChecker
|
|
19
|
+
from devguard.checkers.tavily import TavilyChecker
|
|
20
|
+
from devguard.checkers.vercel import VercelChecker
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"APIUsageChecker",
|
|
24
|
+
"AWSCostChecker",
|
|
25
|
+
"AWSIAMChecker",
|
|
26
|
+
"BaseChecker",
|
|
27
|
+
"ContainerChecker",
|
|
28
|
+
"DomainChecker",
|
|
29
|
+
"NpmChecker",
|
|
30
|
+
"NpmSecurityChecker",
|
|
31
|
+
"GitHubChecker",
|
|
32
|
+
"FlyChecker",
|
|
33
|
+
"VercelChecker",
|
|
34
|
+
"FirecrawlChecker",
|
|
35
|
+
"SwarmChecker",
|
|
36
|
+
"TailscaleChecker",
|
|
37
|
+
"TailsnitchChecker",
|
|
38
|
+
"TavilyChecker",
|
|
39
|
+
"RedTeamChecker",
|
|
40
|
+
"SecretChecker",
|
|
41
|
+
]
|
|
@@ -0,0 +1,523 @@
|
|
|
1
|
+
"""Unified API Usage/Credits checker for LLM providers."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
from datetime import UTC, datetime
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
|
|
11
|
+
from devguard.checkers.base import BaseChecker
|
|
12
|
+
from devguard.models import APIUsage, CheckResult, Finding, Severity
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _get_secret(secret) -> str | None:
|
|
18
|
+
"""Extract secret value from SecretStr or return string directly."""
|
|
19
|
+
if secret is None:
|
|
20
|
+
return None
|
|
21
|
+
if hasattr(secret, "get_secret_value"):
|
|
22
|
+
return secret.get_secret_value()
|
|
23
|
+
return str(secret)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class APIUsageChecker(BaseChecker):
|
|
27
|
+
"""Check usage/credits across multiple API providers.
|
|
28
|
+
|
|
29
|
+
Monitors:
|
|
30
|
+
- Anthropic (Admin API)
|
|
31
|
+
- OpenAI (Usage API)
|
|
32
|
+
- OpenRouter (Credits)
|
|
33
|
+
- Perplexity (undocumented)
|
|
34
|
+
- Groq (undocumented)
|
|
35
|
+
|
|
36
|
+
Alerts when:
|
|
37
|
+
- Credits/balance is below threshold
|
|
38
|
+
- API key is invalid or expired
|
|
39
|
+
- Usage is unusually high
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
check_type = "api_usage"
|
|
43
|
+
|
|
44
|
+
# Thresholds for warnings
|
|
45
|
+
LOW_CREDITS_THRESHOLD_USD = 5.0
|
|
46
|
+
LOW_CREDITS_PERCENT = 10 # Warn when <10% remaining
|
|
47
|
+
|
|
48
|
+
# Budget thresholds (daily/monthly)
|
|
49
|
+
DAILY_BUDGET_OPENROUTER = 5.0 # USD per day
|
|
50
|
+
MONTHLY_BUDGET_OPENROUTER = 50.0 # USD per month
|
|
51
|
+
BUDGET_ALERT_THRESHOLD_PCT = 80.0 # Alert when >80% of budget used
|
|
52
|
+
|
|
53
|
+
async def check(self) -> CheckResult:
|
|
54
|
+
"""Check API usage across all configured providers."""
|
|
55
|
+
api_usage: list[APIUsage] = []
|
|
56
|
+
findings: list[Finding] = []
|
|
57
|
+
errors: list[str] = []
|
|
58
|
+
|
|
59
|
+
# Run all checks in parallel
|
|
60
|
+
results = await asyncio.gather(
|
|
61
|
+
self._check_openrouter(),
|
|
62
|
+
self._check_anthropic(),
|
|
63
|
+
self._check_openai(),
|
|
64
|
+
self._check_perplexity(),
|
|
65
|
+
self._check_groq(),
|
|
66
|
+
return_exceptions=True,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
provider_names = ["openrouter", "anthropic", "openai", "perplexity", "groq"]
|
|
70
|
+
|
|
71
|
+
for provider, result in zip(provider_names, results):
|
|
72
|
+
if isinstance(result, Exception):
|
|
73
|
+
# Log all exceptions for debugging
|
|
74
|
+
logger.warning(
|
|
75
|
+
f"{provider} check raised exception: {type(result).__name__}: {result}"
|
|
76
|
+
)
|
|
77
|
+
if not isinstance(result, (ValueError, KeyError)):
|
|
78
|
+
# Only add to errors for non-config errors
|
|
79
|
+
errors.append(f"{provider}: {result}")
|
|
80
|
+
continue
|
|
81
|
+
|
|
82
|
+
if result is None:
|
|
83
|
+
logger.debug(f"{provider}: No result (key not configured or not admin key)")
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
usage, provider_findings = result
|
|
87
|
+
if usage:
|
|
88
|
+
api_usage.append(usage)
|
|
89
|
+
logger.debug(f"{provider}: Added usage data (credits_used={usage.credits_used})")
|
|
90
|
+
else:
|
|
91
|
+
logger.debug(f"{provider}: No usage object returned")
|
|
92
|
+
findings.extend(provider_findings)
|
|
93
|
+
|
|
94
|
+
return CheckResult(
|
|
95
|
+
check_type=self.check_type,
|
|
96
|
+
success=len(errors) == 0 and not any(f.severity == Severity.CRITICAL for f in findings),
|
|
97
|
+
api_usage=api_usage,
|
|
98
|
+
findings=findings,
|
|
99
|
+
errors=errors,
|
|
100
|
+
metadata={
|
|
101
|
+
"providers_checked": len([u for u in api_usage]),
|
|
102
|
+
"providers_with_issues": len(
|
|
103
|
+
[f for f in findings if f.severity in [Severity.HIGH, Severity.CRITICAL]]
|
|
104
|
+
),
|
|
105
|
+
},
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
async def _check_openrouter(self) -> tuple[APIUsage | None, list[Finding]]:
|
|
109
|
+
"""Check OpenRouter credits."""
|
|
110
|
+
api_key = _get_secret(self.settings.openrouter_api_key)
|
|
111
|
+
if not api_key:
|
|
112
|
+
return None, []
|
|
113
|
+
|
|
114
|
+
findings: list[Finding] = []
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
async with httpx.AsyncClient() as client:
|
|
118
|
+
response = await client.get(
|
|
119
|
+
"https://openrouter.ai/api/v1/credits",
|
|
120
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
121
|
+
timeout=10.0,
|
|
122
|
+
)
|
|
123
|
+
response.raise_for_status()
|
|
124
|
+
data = response.json()
|
|
125
|
+
|
|
126
|
+
credits_data = data.get("data", data)
|
|
127
|
+
total = float(credits_data.get("total_credits", 0))
|
|
128
|
+
used = float(credits_data.get("total_usage", 0))
|
|
129
|
+
remaining = total - used
|
|
130
|
+
|
|
131
|
+
usage = APIUsage(
|
|
132
|
+
service="openrouter",
|
|
133
|
+
credits_total=total,
|
|
134
|
+
credits_used=used,
|
|
135
|
+
credits_remaining=remaining,
|
|
136
|
+
usage_percent=(used / total * 100) if total > 0 else 0,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Check for low credits
|
|
140
|
+
if remaining < self.LOW_CREDITS_THRESHOLD_USD:
|
|
141
|
+
severity = Severity.HIGH if remaining > 0 else Severity.CRITICAL
|
|
142
|
+
findings.append(
|
|
143
|
+
Finding(
|
|
144
|
+
severity=severity,
|
|
145
|
+
title=f"OpenRouter credits low: ${remaining:.2f} remaining",
|
|
146
|
+
description=(
|
|
147
|
+
f"OpenRouter balance is ${remaining:.2f} "
|
|
148
|
+
f"(used ${used:.2f} of ${total:.2f}, {usage.usage_percent:.1f}% used)"
|
|
149
|
+
),
|
|
150
|
+
resource="openrouter",
|
|
151
|
+
remediation="Add credits at https://openrouter.ai/credits",
|
|
152
|
+
)
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Check for high usage percentage (even if credits remain)
|
|
156
|
+
if usage.usage_percent > 90:
|
|
157
|
+
severity = Severity.CRITICAL if usage.usage_percent > 95 else Severity.HIGH
|
|
158
|
+
findings.append(
|
|
159
|
+
Finding(
|
|
160
|
+
severity=severity,
|
|
161
|
+
title=f"OpenRouter usage critical: {usage.usage_percent:.1f}% used",
|
|
162
|
+
description=(
|
|
163
|
+
f"OpenRouter has used {usage.usage_percent:.1f}% of purchased credits "
|
|
164
|
+
f"(${used:.2f} of ${total:.2f}). Only ${remaining:.2f} remaining."
|
|
165
|
+
),
|
|
166
|
+
resource="openrouter",
|
|
167
|
+
remediation="Review usage patterns and add credits if needed",
|
|
168
|
+
)
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Budget alerts (based on recent usage patterns)
|
|
172
|
+
# Note: This requires tracking daily usage, which we'll get from shared tracker
|
|
173
|
+
# For now, we estimate based on total usage vs time
|
|
174
|
+
# TODO: Integrate with shared_usage_tracker for accurate daily/monthly tracking
|
|
175
|
+
|
|
176
|
+
return usage, findings
|
|
177
|
+
|
|
178
|
+
except httpx.HTTPStatusError as e:
|
|
179
|
+
if e.response.status_code == 401:
|
|
180
|
+
findings.append(
|
|
181
|
+
Finding(
|
|
182
|
+
severity=Severity.HIGH,
|
|
183
|
+
title="OpenRouter API key invalid",
|
|
184
|
+
description="The OPENROUTER_API_KEY is invalid or expired",
|
|
185
|
+
resource="openrouter",
|
|
186
|
+
remediation="Regenerate API key at https://openrouter.ai/keys",
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
return None, findings
|
|
190
|
+
except httpx.HTTPStatusError as e:
|
|
191
|
+
logger.warning(
|
|
192
|
+
f"OpenRouter HTTP error: {e.response.status_code} - {e.response.text[:200]}"
|
|
193
|
+
)
|
|
194
|
+
return None, []
|
|
195
|
+
except httpx.RequestError as e:
|
|
196
|
+
logger.warning(f"OpenRouter network error: {e}")
|
|
197
|
+
return None, []
|
|
198
|
+
except Exception as e:
|
|
199
|
+
logger.error(f"OpenRouter unexpected error: {e}", exc_info=True)
|
|
200
|
+
return None, []
|
|
201
|
+
|
|
202
|
+
async def _check_anthropic(self) -> tuple[APIUsage | None, list[Finding]]:
|
|
203
|
+
"""Check Anthropic usage via Admin API."""
|
|
204
|
+
api_key = _get_secret(self.settings.anthropic_api_key)
|
|
205
|
+
if not api_key:
|
|
206
|
+
return None, []
|
|
207
|
+
|
|
208
|
+
# Admin API requires sk-ant-admin... key
|
|
209
|
+
# _get_secret already extracts the actual value, so we can check directly
|
|
210
|
+
if not api_key.startswith("sk-ant-admin"):
|
|
211
|
+
return None, [] # Can't check usage without admin key
|
|
212
|
+
|
|
213
|
+
findings: list[Finding] = []
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
async with httpx.AsyncClient() as client:
|
|
217
|
+
# Get this month's usage
|
|
218
|
+
today = datetime.now(UTC)
|
|
219
|
+
start_of_month = today.replace(day=1).strftime("%Y-%m-%d")
|
|
220
|
+
end_date = today.strftime("%Y-%m-%d")
|
|
221
|
+
|
|
222
|
+
# Use the correct Usage & Cost Admin API endpoint
|
|
223
|
+
# See: https://platform.claude.com/docs/en/build-with-claude/usage-cost-api
|
|
224
|
+
response = await client.get(
|
|
225
|
+
"https://api.anthropic.com/v1/organizations/usage_report/messages",
|
|
226
|
+
headers={
|
|
227
|
+
"anthropic-version": "2023-06-01",
|
|
228
|
+
"x-api-key": api_key,
|
|
229
|
+
},
|
|
230
|
+
params={
|
|
231
|
+
"starting_at": f"{start_of_month}T00:00:00Z",
|
|
232
|
+
"ending_at": f"{end_date}T23:59:59Z",
|
|
233
|
+
"bucket_width": "1d",
|
|
234
|
+
},
|
|
235
|
+
timeout=30.0,
|
|
236
|
+
)
|
|
237
|
+
response.raise_for_status()
|
|
238
|
+
data: Any = response.json()
|
|
239
|
+
|
|
240
|
+
# Parse Usage & Cost Admin API response
|
|
241
|
+
# Response structure: {"data": [{"starting_at": "...", "ending_at": "...", "results": [...]}], "has_more": bool, "next_page": str}
|
|
242
|
+
total_cost = 0.0
|
|
243
|
+
total_tokens = 0
|
|
244
|
+
total_requests = 0
|
|
245
|
+
|
|
246
|
+
if isinstance(data, dict):
|
|
247
|
+
buckets = data.get("data", [])
|
|
248
|
+
for bucket in buckets:
|
|
249
|
+
results = bucket.get("results", [])
|
|
250
|
+
for result in results:
|
|
251
|
+
# Each result has usage and cost data
|
|
252
|
+
usage = result.get("usage", {})
|
|
253
|
+
cost = result.get("cost", {})
|
|
254
|
+
|
|
255
|
+
# Aggregate tokens
|
|
256
|
+
if isinstance(usage, dict):
|
|
257
|
+
total_tokens += usage.get(
|
|
258
|
+
"total_tokens",
|
|
259
|
+
usage.get("input_tokens", 0) + usage.get("output_tokens", 0),
|
|
260
|
+
)
|
|
261
|
+
total_requests += usage.get("requests", 0)
|
|
262
|
+
|
|
263
|
+
# Aggregate cost
|
|
264
|
+
if isinstance(cost, dict):
|
|
265
|
+
total_cost += cost.get("total", cost.get("amount", 0))
|
|
266
|
+
elif isinstance(cost, (int, float)):
|
|
267
|
+
total_cost += cost
|
|
268
|
+
|
|
269
|
+
# If no data found, return 0 (may be no usage this month)
|
|
270
|
+
|
|
271
|
+
usage = APIUsage(
|
|
272
|
+
service="anthropic",
|
|
273
|
+
credits_used=total_cost,
|
|
274
|
+
usage_percent=0, # No credit system, pay-as-you-go
|
|
275
|
+
period_start=start_of_month,
|
|
276
|
+
period_end=end_date,
|
|
277
|
+
metadata={"total_tokens": total_tokens},
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
return usage, findings
|
|
281
|
+
|
|
282
|
+
except httpx.HTTPStatusError as e:
|
|
283
|
+
if e.response.status_code in [401, 403]:
|
|
284
|
+
# Admin key might be invalid
|
|
285
|
+
findings.append(
|
|
286
|
+
Finding(
|
|
287
|
+
severity=Severity.MEDIUM,
|
|
288
|
+
title="Anthropic Admin API access failed",
|
|
289
|
+
description="Could not access Anthropic Usage API - check admin key permissions",
|
|
290
|
+
resource="anthropic",
|
|
291
|
+
remediation="Ensure ANTHROPIC_API_KEY is a valid Admin API key (sk-ant-admin...)",
|
|
292
|
+
)
|
|
293
|
+
)
|
|
294
|
+
return None, findings
|
|
295
|
+
except httpx.HTTPStatusError as e:
|
|
296
|
+
if e.response.status_code in [401, 403]:
|
|
297
|
+
findings.append(
|
|
298
|
+
Finding(
|
|
299
|
+
severity=Severity.MEDIUM,
|
|
300
|
+
title="Anthropic Admin API access failed",
|
|
301
|
+
description=f"HTTP {e.response.status_code}: {e.response.text[:200]}",
|
|
302
|
+
resource="anthropic",
|
|
303
|
+
remediation="Ensure ANTHROPIC_API_KEY is a valid Admin API key (sk-ant-admin...)",
|
|
304
|
+
)
|
|
305
|
+
)
|
|
306
|
+
else:
|
|
307
|
+
logger.warning(
|
|
308
|
+
f"Anthropic HTTP error: {e.response.status_code} - {e.response.text[:200]}"
|
|
309
|
+
)
|
|
310
|
+
return None, findings
|
|
311
|
+
except httpx.RequestError as e:
|
|
312
|
+
logger.warning(f"Anthropic network error: {e}")
|
|
313
|
+
return None, []
|
|
314
|
+
except Exception as e:
|
|
315
|
+
logger.error(f"Anthropic unexpected error: {e}", exc_info=True)
|
|
316
|
+
return None, []
|
|
317
|
+
|
|
318
|
+
async def _check_openai(self) -> tuple[APIUsage | None, list[Finding]]:
|
|
319
|
+
"""Check OpenAI usage via Usage API."""
|
|
320
|
+
api_key = _get_secret(self.settings.openai_api_key)
|
|
321
|
+
if not api_key:
|
|
322
|
+
return None, []
|
|
323
|
+
|
|
324
|
+
findings: list[Finding] = []
|
|
325
|
+
|
|
326
|
+
try:
|
|
327
|
+
async with httpx.AsyncClient() as client:
|
|
328
|
+
today = datetime.now(UTC)
|
|
329
|
+
start_of_month = today.replace(day=1).strftime("%Y-%m-%d")
|
|
330
|
+
|
|
331
|
+
# Try the usage endpoint - OpenAI requires organization header for some endpoints
|
|
332
|
+
headers = {"Authorization": f"Bearer {api_key}"}
|
|
333
|
+
|
|
334
|
+
# Try with organization header if available
|
|
335
|
+
org_id = os.getenv("OPENAI_ORG_ID")
|
|
336
|
+
if org_id:
|
|
337
|
+
headers["OpenAI-Organization"] = org_id
|
|
338
|
+
|
|
339
|
+
response = await client.get(
|
|
340
|
+
"https://api.openai.com/v1/usage",
|
|
341
|
+
headers=headers,
|
|
342
|
+
params={
|
|
343
|
+
"start_date": start_of_month,
|
|
344
|
+
"end_date": today.strftime("%Y-%m-%d"),
|
|
345
|
+
},
|
|
346
|
+
timeout=30.0,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# If that fails, try the organization endpoint
|
|
350
|
+
if response.status_code != 200:
|
|
351
|
+
response = await client.get(
|
|
352
|
+
"https://api.openai.com/v1/organization/usage",
|
|
353
|
+
headers=headers,
|
|
354
|
+
params={
|
|
355
|
+
"start_time": f"{start_of_month}T00:00:00Z",
|
|
356
|
+
"end_time": f"{today.strftime('%Y-%m-%d')}T23:59:59Z",
|
|
357
|
+
"interval": "1d",
|
|
358
|
+
},
|
|
359
|
+
timeout=30.0,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
if response.status_code == 200:
|
|
363
|
+
data = response.json()
|
|
364
|
+
usage_data = data.get("data", [])
|
|
365
|
+
|
|
366
|
+
total_tokens = sum(
|
|
367
|
+
entry.get("input_tokens", 0) + entry.get("output_tokens", 0)
|
|
368
|
+
for entry in usage_data
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# Try to get costs
|
|
372
|
+
cost_response = await client.get(
|
|
373
|
+
"https://api.openai.com/v1/organization/costs",
|
|
374
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
375
|
+
params={
|
|
376
|
+
"start_time": f"{start_of_month}T00:00:00Z",
|
|
377
|
+
"end_time": f"{today.strftime('%Y-%m-%d')}T23:59:59Z",
|
|
378
|
+
"interval": "1d",
|
|
379
|
+
},
|
|
380
|
+
timeout=30.0,
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
total_cost = 0.0
|
|
384
|
+
if cost_response.status_code == 200:
|
|
385
|
+
cost_data = cost_response.json()
|
|
386
|
+
results = cost_data.get("data", {}).get("results", [])
|
|
387
|
+
for bucket in results:
|
|
388
|
+
total_cost += bucket.get("amount", {}).get("value", 0)
|
|
389
|
+
|
|
390
|
+
usage = APIUsage(
|
|
391
|
+
service="openai",
|
|
392
|
+
credits_used=total_cost,
|
|
393
|
+
usage_percent=0,
|
|
394
|
+
period_start=start_of_month,
|
|
395
|
+
period_end=today.strftime("%Y-%m-%d"),
|
|
396
|
+
metadata={"total_tokens": total_tokens},
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
return usage, findings
|
|
400
|
+
else:
|
|
401
|
+
# API key might not have usage permissions
|
|
402
|
+
return None, []
|
|
403
|
+
|
|
404
|
+
except httpx.HTTPStatusError as e:
|
|
405
|
+
if e.response.status_code in [401, 403]:
|
|
406
|
+
findings.append(
|
|
407
|
+
Finding(
|
|
408
|
+
severity=Severity.MEDIUM,
|
|
409
|
+
title="OpenAI Usage API access failed",
|
|
410
|
+
description="Could not access OpenAI Usage API - check API key permissions",
|
|
411
|
+
resource="openai",
|
|
412
|
+
remediation="Ensure OPENAI_API_KEY has 'api.usage.read' scope",
|
|
413
|
+
)
|
|
414
|
+
)
|
|
415
|
+
return None, findings
|
|
416
|
+
except Exception as e:
|
|
417
|
+
logger.debug(f"OpenAI check failed: {e}")
|
|
418
|
+
return None, []
|
|
419
|
+
|
|
420
|
+
async def _check_perplexity(self) -> tuple[APIUsage | None, list[Finding]]:
|
|
421
|
+
"""Check Perplexity API (undocumented - just validate key)."""
|
|
422
|
+
api_key = _get_secret(self.settings.perplexity_api_key)
|
|
423
|
+
if not api_key:
|
|
424
|
+
return None, []
|
|
425
|
+
|
|
426
|
+
findings: list[Finding] = []
|
|
427
|
+
|
|
428
|
+
try:
|
|
429
|
+
async with httpx.AsyncClient() as client:
|
|
430
|
+
# Perplexity doesn't have a public usage API, just validate key works
|
|
431
|
+
# Try the correct endpoint
|
|
432
|
+
response = await client.get(
|
|
433
|
+
"https://api.perplexity.ai/models",
|
|
434
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
435
|
+
timeout=10.0,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# If that fails, try chat completions endpoint
|
|
439
|
+
if response.status_code != 200:
|
|
440
|
+
response = await client.post(
|
|
441
|
+
"https://api.perplexity.ai/chat/completions",
|
|
442
|
+
headers={
|
|
443
|
+
"Authorization": f"Bearer {api_key}",
|
|
444
|
+
"Content-Type": "application/json",
|
|
445
|
+
},
|
|
446
|
+
json={
|
|
447
|
+
"model": "llama-3.1-sonar-small-128k-online",
|
|
448
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
449
|
+
},
|
|
450
|
+
timeout=10.0,
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
if response.status_code == 200:
|
|
454
|
+
usage = APIUsage(
|
|
455
|
+
service="perplexity",
|
|
456
|
+
credits_remaining=-1, # Unknown
|
|
457
|
+
usage_percent=0,
|
|
458
|
+
metadata={"status": "key_valid"},
|
|
459
|
+
)
|
|
460
|
+
return usage, findings
|
|
461
|
+
elif response.status_code == 401:
|
|
462
|
+
findings.append(
|
|
463
|
+
Finding(
|
|
464
|
+
severity=Severity.HIGH,
|
|
465
|
+
title="Perplexity API key invalid",
|
|
466
|
+
description="The PERPLEXITY_API_KEY is invalid or expired",
|
|
467
|
+
resource="perplexity",
|
|
468
|
+
remediation="Regenerate API key at https://www.perplexity.ai/settings/api",
|
|
469
|
+
)
|
|
470
|
+
)
|
|
471
|
+
return None, findings
|
|
472
|
+
|
|
473
|
+
except Exception as e:
|
|
474
|
+
logger.debug(f"Perplexity check failed: {e}")
|
|
475
|
+
|
|
476
|
+
return None, []
|
|
477
|
+
|
|
478
|
+
async def _check_groq(self) -> tuple[APIUsage | None, list[Finding]]:
|
|
479
|
+
"""Check Groq API (validate key, no usage API)."""
|
|
480
|
+
api_key = _get_secret(self.settings.groq_api_key)
|
|
481
|
+
if not api_key:
|
|
482
|
+
return None, []
|
|
483
|
+
|
|
484
|
+
findings: list[Finding] = []
|
|
485
|
+
|
|
486
|
+
try:
|
|
487
|
+
async with httpx.AsyncClient() as client:
|
|
488
|
+
# Groq doesn't have a public usage API, just validate key works
|
|
489
|
+
response = await client.get(
|
|
490
|
+
"https://api.groq.com/openai/v1/models",
|
|
491
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
492
|
+
timeout=10.0,
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
if response.status_code == 200:
|
|
496
|
+
usage = APIUsage(
|
|
497
|
+
service="groq",
|
|
498
|
+
credits_remaining=-1, # Unknown (free tier)
|
|
499
|
+
usage_percent=0,
|
|
500
|
+
metadata={"status": "key_valid"},
|
|
501
|
+
)
|
|
502
|
+
return usage, findings
|
|
503
|
+
elif response.status_code == 401:
|
|
504
|
+
findings.append(
|
|
505
|
+
Finding(
|
|
506
|
+
severity=Severity.HIGH,
|
|
507
|
+
title="Groq API key invalid",
|
|
508
|
+
description="The GROQ_API_KEY is invalid or expired",
|
|
509
|
+
resource="groq",
|
|
510
|
+
remediation="Regenerate API key at https://console.groq.com/keys",
|
|
511
|
+
)
|
|
512
|
+
)
|
|
513
|
+
return None, findings
|
|
514
|
+
|
|
515
|
+
except httpx.HTTPStatusError as e:
|
|
516
|
+
logger.warning(f"Groq HTTP error: {e.response.status_code} - {e.response.text[:200]}")
|
|
517
|
+
return None, []
|
|
518
|
+
except httpx.RequestError as e:
|
|
519
|
+
logger.warning(f"Groq network error: {e}")
|
|
520
|
+
return None, []
|
|
521
|
+
except Exception as e:
|
|
522
|
+
logger.error(f"Groq unexpected error: {e}", exc_info=True)
|
|
523
|
+
return None, []
|