claude-mpm 4.3.22__py3-none-any.whl → 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/cli/commands/doctor.py +2 -2
- claude_mpm/hooks/memory_integration_hook.py +1 -1
- claude_mpm/services/agents/memory/content_manager.py +5 -2
- claude_mpm/services/agents/memory/memory_file_service.py +1 -0
- claude_mpm/services/agents/memory/memory_limits_service.py +1 -0
- claude_mpm/services/unified/__init__.py +65 -0
- claude_mpm/services/unified/analyzer_strategies/__init__.py +44 -0
- claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +473 -0
- claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +643 -0
- claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +804 -0
- claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +661 -0
- claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +696 -0
- claude_mpm/services/unified/deployment_strategies/__init__.py +97 -0
- claude_mpm/services/unified/deployment_strategies/base.py +557 -0
- claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +486 -0
- claude_mpm/services/unified/deployment_strategies/local.py +594 -0
- claude_mpm/services/unified/deployment_strategies/utils.py +672 -0
- claude_mpm/services/unified/deployment_strategies/vercel.py +471 -0
- claude_mpm/services/unified/interfaces.py +499 -0
- claude_mpm/services/unified/migration.py +532 -0
- claude_mpm/services/unified/strategies.py +551 -0
- claude_mpm/services/unified/unified_analyzer.py +534 -0
- claude_mpm/services/unified/unified_config.py +688 -0
- claude_mpm/services/unified/unified_deployment.py +470 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/METADATA +1 -1
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/RECORD +31 -12
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/WHEEL +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.3.22.dist-info → claude_mpm-4.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,643 @@
|
|
1
|
+
"""
|
2
|
+
Dependency Analyzer Strategy Implementation
|
3
|
+
===========================================
|
4
|
+
|
5
|
+
Analyzes project dependencies, package management, and dependency graphs.
|
6
|
+
Consolidates dependency analysis functionality from multiple services.
|
7
|
+
|
8
|
+
Author: Claude MPM Development Team
|
9
|
+
Created: 2025-01-26
|
10
|
+
"""
|
11
|
+
|
12
|
+
import json
|
13
|
+
import re
|
14
|
+
import subprocess
|
15
|
+
from pathlib import Path
|
16
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
17
|
+
|
18
|
+
from claude_mpm.core.logging_utils import get_logger
|
19
|
+
|
20
|
+
from ..strategies import AnalyzerStrategy, StrategyContext, StrategyMetadata, StrategyPriority
|
21
|
+
|
22
|
+
logger = get_logger(__name__)
|
23
|
+
|
24
|
+
|
25
|
+
class DependencyAnalyzerStrategy(AnalyzerStrategy):
|
26
|
+
"""
|
27
|
+
Strategy for analyzing project dependencies and package management.
|
28
|
+
|
29
|
+
Consolidates:
|
30
|
+
- Package manager detection
|
31
|
+
- Dependency graph analysis
|
32
|
+
- Version conflict detection
|
33
|
+
- Security vulnerability scanning
|
34
|
+
- License compliance checking
|
35
|
+
"""
|
36
|
+
|
37
|
+
# Package manager configurations
|
38
|
+
PACKAGE_MANAGERS = {
|
39
|
+
"package.json": "npm",
|
40
|
+
"yarn.lock": "yarn",
|
41
|
+
"pnpm-lock.yaml": "pnpm",
|
42
|
+
"package-lock.json": "npm",
|
43
|
+
"requirements.txt": "pip",
|
44
|
+
"Pipfile": "pipenv",
|
45
|
+
"poetry.lock": "poetry",
|
46
|
+
"pyproject.toml": "pip",
|
47
|
+
"Cargo.toml": "cargo",
|
48
|
+
"go.mod": "go",
|
49
|
+
"pom.xml": "maven",
|
50
|
+
"build.gradle": "gradle",
|
51
|
+
"composer.json": "composer",
|
52
|
+
"Gemfile": "bundler",
|
53
|
+
"mix.exs": "mix",
|
54
|
+
"pubspec.yaml": "pub",
|
55
|
+
}
|
56
|
+
|
57
|
+
# Database-related dependencies
|
58
|
+
DATABASE_PACKAGES = {
|
59
|
+
"postgresql": ["psycopg2", "pg", "postgres", "postgresql", "node-postgres"],
|
60
|
+
"mysql": ["mysql", "mysql2", "mysqlclient", "mysql-connector"],
|
61
|
+
"sqlite": ["sqlite3", "better-sqlite3"],
|
62
|
+
"mongodb": ["mongodb", "mongoose", "pymongo", "motor"],
|
63
|
+
"redis": ["redis", "ioredis", "redis-py"],
|
64
|
+
"elasticsearch": ["elasticsearch", "@elastic/elasticsearch"],
|
65
|
+
"cassandra": ["cassandra-driver"],
|
66
|
+
"neo4j": ["neo4j", "neo4j-driver"],
|
67
|
+
}
|
68
|
+
|
69
|
+
# Testing framework packages
|
70
|
+
TESTING_PACKAGES = {
|
71
|
+
"python": ["pytest", "unittest", "nose", "nose2", "tox", "coverage"],
|
72
|
+
"javascript": ["jest", "mocha", "chai", "jasmine", "cypress", "playwright", "vitest"],
|
73
|
+
"java": ["junit", "testng", "mockito", "assertj"],
|
74
|
+
"ruby": ["rspec", "minitest", "cucumber"],
|
75
|
+
"go": ["testify", "ginkgo", "gomega"],
|
76
|
+
"rust": ["test", "quickcheck", "proptest"],
|
77
|
+
}
|
78
|
+
|
79
|
+
# Web framework packages
|
80
|
+
FRAMEWORK_PACKAGES = {
|
81
|
+
"python": ["django", "flask", "fastapi", "pyramid", "tornado", "aiohttp"],
|
82
|
+
"javascript": ["express", "koa", "fastify", "hapi", "nestjs", "next", "nuxt", "gatsby"],
|
83
|
+
"ruby": ["rails", "sinatra", "hanami"],
|
84
|
+
"java": ["spring", "spring-boot", "struts", "play"],
|
85
|
+
"php": ["laravel", "symfony", "slim", "lumen"],
|
86
|
+
}
|
87
|
+
|
88
|
+
def __init__(self):
|
89
|
+
"""Initialize dependency analyzer strategy."""
|
90
|
+
metadata = StrategyMetadata(
|
91
|
+
name="DependencyAnalyzer",
|
92
|
+
description="Analyzes project dependencies and package management",
|
93
|
+
supported_types=["project", "package", "lockfile", "manifest"],
|
94
|
+
supported_operations=["analyze", "detect", "graph", "vulnerabilities"],
|
95
|
+
priority=StrategyPriority.HIGH,
|
96
|
+
tags={"dependencies", "packages", "versions", "security"},
|
97
|
+
)
|
98
|
+
super().__init__(metadata)
|
99
|
+
|
100
|
+
def can_handle(self, context: StrategyContext) -> bool:
|
101
|
+
"""Check if strategy can handle the given context."""
|
102
|
+
return (
|
103
|
+
context.target_type in self.metadata.supported_types
|
104
|
+
and context.operation in self.metadata.supported_operations
|
105
|
+
)
|
106
|
+
|
107
|
+
def validate_input(self, input_data: Any) -> List[str]:
|
108
|
+
"""Validate input data for strategy."""
|
109
|
+
errors = []
|
110
|
+
|
111
|
+
if not input_data:
|
112
|
+
errors.append("Input data is required")
|
113
|
+
return errors
|
114
|
+
|
115
|
+
if isinstance(input_data, (str, Path)):
|
116
|
+
path = Path(input_data)
|
117
|
+
if not path.exists():
|
118
|
+
errors.append(f"Path does not exist: {path}")
|
119
|
+
elif not isinstance(input_data, dict):
|
120
|
+
errors.append(f"Invalid input type: {type(input_data).__name__}")
|
121
|
+
|
122
|
+
return errors
|
123
|
+
|
124
|
+
def analyze(
|
125
|
+
self, target: Any, options: Optional[Dict[str, Any]] = None
|
126
|
+
) -> Dict[str, Any]:
|
127
|
+
"""
|
128
|
+
Execute dependency analysis on target.
|
129
|
+
|
130
|
+
Args:
|
131
|
+
target: Project directory or package manifest to analyze
|
132
|
+
options: Analysis options (depth, check_vulnerabilities, etc.)
|
133
|
+
|
134
|
+
Returns:
|
135
|
+
Analysis results with dependency information
|
136
|
+
"""
|
137
|
+
options = options or {}
|
138
|
+
|
139
|
+
if isinstance(target, (str, Path)):
|
140
|
+
target_path = Path(target)
|
141
|
+
|
142
|
+
if target_path.is_dir():
|
143
|
+
return self._analyze_project(target_path, options)
|
144
|
+
elif target_path.is_file():
|
145
|
+
return self._analyze_manifest(target_path, options)
|
146
|
+
|
147
|
+
return {
|
148
|
+
"status": "error",
|
149
|
+
"message": f"Unsupported target type: {type(target).__name__}",
|
150
|
+
}
|
151
|
+
|
152
|
+
def _analyze_project(self, project_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
153
|
+
"""Analyze dependencies in a project directory."""
|
154
|
+
results = {
|
155
|
+
"status": "success",
|
156
|
+
"type": "project",
|
157
|
+
"path": str(project_path),
|
158
|
+
"package_managers": [],
|
159
|
+
"dependencies": {},
|
160
|
+
"dev_dependencies": {},
|
161
|
+
"frameworks": [],
|
162
|
+
"databases": [],
|
163
|
+
"testing_tools": [],
|
164
|
+
}
|
165
|
+
|
166
|
+
# Detect package managers
|
167
|
+
detected_managers = self._detect_package_managers(project_path)
|
168
|
+
results["package_managers"] = detected_managers
|
169
|
+
|
170
|
+
# Analyze each package manager's dependencies
|
171
|
+
for manager in detected_managers:
|
172
|
+
manager_deps = self._analyze_package_manager(project_path, manager, options)
|
173
|
+
if manager_deps:
|
174
|
+
results["dependencies"][manager] = manager_deps.get("dependencies", {})
|
175
|
+
results["dev_dependencies"][manager] = manager_deps.get("dev_dependencies", {})
|
176
|
+
|
177
|
+
# Detect frameworks, databases, and testing tools
|
178
|
+
all_deps = self._flatten_dependencies(results["dependencies"])
|
179
|
+
all_deps.update(self._flatten_dependencies(results["dev_dependencies"]))
|
180
|
+
|
181
|
+
results["frameworks"] = self._detect_frameworks(all_deps)
|
182
|
+
results["databases"] = self._detect_databases(all_deps)
|
183
|
+
results["testing_tools"] = self._detect_testing_tools(all_deps)
|
184
|
+
|
185
|
+
# Check for security vulnerabilities if requested
|
186
|
+
if options.get("check_vulnerabilities", False):
|
187
|
+
results["vulnerabilities"] = self._check_vulnerabilities(project_path)
|
188
|
+
|
189
|
+
# Generate dependency statistics
|
190
|
+
results["statistics"] = self._calculate_statistics(results)
|
191
|
+
|
192
|
+
return results
|
193
|
+
|
194
|
+
def _analyze_manifest(self, manifest_path: Path, options: Dict[str, Any]) -> Dict[str, Any]:
|
195
|
+
"""Analyze a specific package manifest file."""
|
196
|
+
results = {
|
197
|
+
"status": "success",
|
198
|
+
"type": "manifest",
|
199
|
+
"path": str(manifest_path),
|
200
|
+
"dependencies": {},
|
201
|
+
"dev_dependencies": {},
|
202
|
+
}
|
203
|
+
|
204
|
+
# Determine package manager from file
|
205
|
+
manager = self.PACKAGE_MANAGERS.get(manifest_path.name)
|
206
|
+
if not manager:
|
207
|
+
# Check for pyproject.toml variants
|
208
|
+
if manifest_path.name == "pyproject.toml":
|
209
|
+
content = manifest_path.read_text()
|
210
|
+
if "[tool.poetry]" in content:
|
211
|
+
manager = "poetry"
|
212
|
+
else:
|
213
|
+
manager = "pip"
|
214
|
+
|
215
|
+
if not manager:
|
216
|
+
return {
|
217
|
+
"status": "error",
|
218
|
+
"message": f"Unknown manifest file: {manifest_path.name}",
|
219
|
+
}
|
220
|
+
|
221
|
+
# Parse manifest based on type
|
222
|
+
if manifest_path.name == "package.json":
|
223
|
+
results.update(self._parse_package_json(manifest_path))
|
224
|
+
elif manifest_path.name == "requirements.txt":
|
225
|
+
results.update(self._parse_requirements_txt(manifest_path))
|
226
|
+
elif manifest_path.name == "pyproject.toml":
|
227
|
+
results.update(self._parse_pyproject_toml(manifest_path))
|
228
|
+
elif manifest_path.name == "Cargo.toml":
|
229
|
+
results.update(self._parse_cargo_toml(manifest_path))
|
230
|
+
elif manifest_path.name == "go.mod":
|
231
|
+
results.update(self._parse_go_mod(manifest_path))
|
232
|
+
|
233
|
+
return results
|
234
|
+
|
235
|
+
def _detect_package_managers(self, project_path: Path) -> List[str]:
|
236
|
+
"""Detect all package managers used in the project."""
|
237
|
+
managers = []
|
238
|
+
|
239
|
+
# Check for lock files first (more specific)
|
240
|
+
lock_files = {
|
241
|
+
"yarn.lock": "yarn",
|
242
|
+
"pnpm-lock.yaml": "pnpm",
|
243
|
+
"package-lock.json": "npm",
|
244
|
+
"poetry.lock": "poetry",
|
245
|
+
"Pipfile.lock": "pipenv",
|
246
|
+
"Cargo.lock": "cargo",
|
247
|
+
"go.sum": "go",
|
248
|
+
"composer.lock": "composer",
|
249
|
+
"Gemfile.lock": "bundler",
|
250
|
+
}
|
251
|
+
|
252
|
+
for lock_file, manager in lock_files.items():
|
253
|
+
if (project_path / lock_file).exists():
|
254
|
+
if manager not in managers:
|
255
|
+
managers.append(manager)
|
256
|
+
|
257
|
+
# Check for manifest files
|
258
|
+
for manifest_file, manager in self.PACKAGE_MANAGERS.items():
|
259
|
+
if (project_path / manifest_file).exists():
|
260
|
+
# Special handling for pyproject.toml
|
261
|
+
if manifest_file == "pyproject.toml":
|
262
|
+
content = (project_path / manifest_file).read_text()
|
263
|
+
if "[tool.poetry]" in content:
|
264
|
+
manager = "poetry"
|
265
|
+
elif "[tool.setuptools]" in content or "[project]" in content:
|
266
|
+
manager = "pip"
|
267
|
+
|
268
|
+
if manager not in managers:
|
269
|
+
managers.append(manager)
|
270
|
+
|
271
|
+
return managers
|
272
|
+
|
273
|
+
def _analyze_package_manager(
|
274
|
+
self, project_path: Path, manager: str, options: Dict[str, Any]
|
275
|
+
) -> Optional[Dict[str, Any]]:
|
276
|
+
"""Analyze dependencies for a specific package manager."""
|
277
|
+
try:
|
278
|
+
if manager in ["npm", "yarn", "pnpm"]:
|
279
|
+
return self._analyze_node_dependencies(project_path, manager)
|
280
|
+
elif manager in ["pip", "pipenv", "poetry"]:
|
281
|
+
return self._analyze_python_dependencies(project_path, manager)
|
282
|
+
elif manager == "cargo":
|
283
|
+
return self._analyze_cargo_dependencies(project_path)
|
284
|
+
elif manager == "go":
|
285
|
+
return self._analyze_go_dependencies(project_path)
|
286
|
+
else:
|
287
|
+
logger.debug(f"Unsupported package manager for analysis: {manager}")
|
288
|
+
return None
|
289
|
+
except Exception as e:
|
290
|
+
logger.error(f"Error analyzing {manager} dependencies: {e}")
|
291
|
+
return None
|
292
|
+
|
293
|
+
def _analyze_node_dependencies(self, project_path: Path, manager: str) -> Dict[str, Any]:
|
294
|
+
"""Analyze Node.js dependencies."""
|
295
|
+
package_json_path = project_path / "package.json"
|
296
|
+
if not package_json_path.exists():
|
297
|
+
return {}
|
298
|
+
|
299
|
+
return self._parse_package_json(package_json_path)
|
300
|
+
|
301
|
+
def _analyze_python_dependencies(self, project_path: Path, manager: str) -> Dict[str, Any]:
|
302
|
+
"""Analyze Python dependencies."""
|
303
|
+
results = {"dependencies": {}, "dev_dependencies": {}}
|
304
|
+
|
305
|
+
if manager == "pip":
|
306
|
+
req_file = project_path / "requirements.txt"
|
307
|
+
if req_file.exists():
|
308
|
+
parsed = self._parse_requirements_txt(req_file)
|
309
|
+
results["dependencies"] = parsed.get("dependencies", {})
|
310
|
+
|
311
|
+
# Check for dev requirements
|
312
|
+
dev_req_file = project_path / "requirements-dev.txt"
|
313
|
+
if dev_req_file.exists():
|
314
|
+
parsed = self._parse_requirements_txt(dev_req_file)
|
315
|
+
results["dev_dependencies"] = parsed.get("dependencies", {})
|
316
|
+
|
317
|
+
elif manager == "poetry":
|
318
|
+
pyproject_path = project_path / "pyproject.toml"
|
319
|
+
if pyproject_path.exists():
|
320
|
+
results = self._parse_pyproject_toml(pyproject_path)
|
321
|
+
|
322
|
+
elif manager == "pipenv":
|
323
|
+
pipfile_path = project_path / "Pipfile"
|
324
|
+
if pipfile_path.exists():
|
325
|
+
results = self._parse_pipfile(pipfile_path)
|
326
|
+
|
327
|
+
return results
|
328
|
+
|
329
|
+
def _analyze_cargo_dependencies(self, project_path: Path) -> Dict[str, Any]:
|
330
|
+
"""Analyze Rust/Cargo dependencies."""
|
331
|
+
cargo_toml_path = project_path / "Cargo.toml"
|
332
|
+
if not cargo_toml_path.exists():
|
333
|
+
return {}
|
334
|
+
|
335
|
+
return self._parse_cargo_toml(cargo_toml_path)
|
336
|
+
|
337
|
+
def _analyze_go_dependencies(self, project_path: Path) -> Dict[str, Any]:
|
338
|
+
"""Analyze Go dependencies."""
|
339
|
+
go_mod_path = project_path / "go.mod"
|
340
|
+
if not go_mod_path.exists():
|
341
|
+
return {}
|
342
|
+
|
343
|
+
return self._parse_go_mod(go_mod_path)
|
344
|
+
|
345
|
+
def _parse_package_json(self, path: Path) -> Dict[str, Any]:
|
346
|
+
"""Parse package.json file."""
|
347
|
+
try:
|
348
|
+
with open(path, "r") as f:
|
349
|
+
data = json.load(f)
|
350
|
+
|
351
|
+
return {
|
352
|
+
"dependencies": data.get("dependencies", {}),
|
353
|
+
"dev_dependencies": data.get("devDependencies", {}),
|
354
|
+
"peer_dependencies": data.get("peerDependencies", {}),
|
355
|
+
}
|
356
|
+
except Exception as e:
|
357
|
+
logger.error(f"Error parsing package.json: {e}")
|
358
|
+
return {}
|
359
|
+
|
360
|
+
def _parse_requirements_txt(self, path: Path) -> Dict[str, Any]:
|
361
|
+
"""Parse requirements.txt file."""
|
362
|
+
dependencies = {}
|
363
|
+
|
364
|
+
try:
|
365
|
+
content = path.read_text()
|
366
|
+
for line in content.splitlines():
|
367
|
+
line = line.strip()
|
368
|
+
if line and not line.startswith("#"):
|
369
|
+
# Parse package spec
|
370
|
+
match = re.match(r"^([a-zA-Z0-9\-_.]+)([<>=!~]+.*)?$", line)
|
371
|
+
if match:
|
372
|
+
package = match.group(1)
|
373
|
+
version = match.group(2) or "*"
|
374
|
+
dependencies[package] = version
|
375
|
+
|
376
|
+
except Exception as e:
|
377
|
+
logger.error(f"Error parsing requirements.txt: {e}")
|
378
|
+
|
379
|
+
return {"dependencies": dependencies}
|
380
|
+
|
381
|
+
def _parse_pyproject_toml(self, path: Path) -> Dict[str, Any]:
|
382
|
+
"""Parse pyproject.toml file."""
|
383
|
+
try:
|
384
|
+
import tomllib
|
385
|
+
except ImportError:
|
386
|
+
try:
|
387
|
+
import tomli as tomllib
|
388
|
+
except ImportError:
|
389
|
+
logger.warning("TOML parser not available")
|
390
|
+
return {}
|
391
|
+
|
392
|
+
try:
|
393
|
+
with open(path, "rb") as f:
|
394
|
+
data = tomllib.load(f)
|
395
|
+
|
396
|
+
dependencies = {}
|
397
|
+
dev_dependencies = {}
|
398
|
+
|
399
|
+
# Check for poetry dependencies
|
400
|
+
if "tool" in data and "poetry" in data["tool"]:
|
401
|
+
poetry_data = data["tool"]["poetry"]
|
402
|
+
dependencies = poetry_data.get("dependencies", {})
|
403
|
+
dev_dependencies = poetry_data.get("dev-dependencies", {})
|
404
|
+
|
405
|
+
# Check for PEP 621 dependencies
|
406
|
+
elif "project" in data:
|
407
|
+
project_data = data["project"]
|
408
|
+
deps = project_data.get("dependencies", [])
|
409
|
+
for dep in deps:
|
410
|
+
# Parse dependency string
|
411
|
+
match = re.match(r"^([a-zA-Z0-9\-_.]+)(.*)$", dep)
|
412
|
+
if match:
|
413
|
+
dependencies[match.group(1)] = match.group(2) or "*"
|
414
|
+
|
415
|
+
return {
|
416
|
+
"dependencies": dependencies,
|
417
|
+
"dev_dependencies": dev_dependencies,
|
418
|
+
}
|
419
|
+
|
420
|
+
except Exception as e:
|
421
|
+
logger.error(f"Error parsing pyproject.toml: {e}")
|
422
|
+
return {}
|
423
|
+
|
424
|
+
def _parse_pipfile(self, path: Path) -> Dict[str, Any]:
|
425
|
+
"""Parse Pipfile."""
|
426
|
+
try:
|
427
|
+
import tomllib
|
428
|
+
except ImportError:
|
429
|
+
try:
|
430
|
+
import tomli as tomllib
|
431
|
+
except ImportError:
|
432
|
+
return {}
|
433
|
+
|
434
|
+
try:
|
435
|
+
with open(path, "rb") as f:
|
436
|
+
data = tomllib.load(f)
|
437
|
+
|
438
|
+
return {
|
439
|
+
"dependencies": data.get("packages", {}),
|
440
|
+
"dev_dependencies": data.get("dev-packages", {}),
|
441
|
+
}
|
442
|
+
except Exception as e:
|
443
|
+
logger.error(f"Error parsing Pipfile: {e}")
|
444
|
+
return {}
|
445
|
+
|
446
|
+
def _parse_cargo_toml(self, path: Path) -> Dict[str, Any]:
|
447
|
+
"""Parse Cargo.toml file."""
|
448
|
+
try:
|
449
|
+
import tomllib
|
450
|
+
except ImportError:
|
451
|
+
try:
|
452
|
+
import tomli as tomllib
|
453
|
+
except ImportError:
|
454
|
+
return {}
|
455
|
+
|
456
|
+
try:
|
457
|
+
with open(path, "rb") as f:
|
458
|
+
data = tomllib.load(f)
|
459
|
+
|
460
|
+
return {
|
461
|
+
"dependencies": data.get("dependencies", {}),
|
462
|
+
"dev_dependencies": data.get("dev-dependencies", {}),
|
463
|
+
}
|
464
|
+
except Exception as e:
|
465
|
+
logger.error(f"Error parsing Cargo.toml: {e}")
|
466
|
+
return {}
|
467
|
+
|
468
|
+
def _parse_go_mod(self, path: Path) -> Dict[str, Any]:
|
469
|
+
"""Parse go.mod file."""
|
470
|
+
dependencies = {}
|
471
|
+
|
472
|
+
try:
|
473
|
+
content = path.read_text()
|
474
|
+
in_require = False
|
475
|
+
|
476
|
+
for line in content.splitlines():
|
477
|
+
line = line.strip()
|
478
|
+
|
479
|
+
if line.startswith("require ("):
|
480
|
+
in_require = True
|
481
|
+
continue
|
482
|
+
elif line == ")":
|
483
|
+
in_require = False
|
484
|
+
continue
|
485
|
+
|
486
|
+
if in_require or line.startswith("require "):
|
487
|
+
# Parse dependency line
|
488
|
+
parts = line.replace("require ", "").split()
|
489
|
+
if len(parts) >= 2:
|
490
|
+
dependencies[parts[0]] = parts[1]
|
491
|
+
|
492
|
+
except Exception as e:
|
493
|
+
logger.error(f"Error parsing go.mod: {e}")
|
494
|
+
|
495
|
+
return {"dependencies": dependencies}
|
496
|
+
|
497
|
+
def _flatten_dependencies(self, deps_dict: Dict[str, Dict[str, str]]) -> Dict[str, str]:
|
498
|
+
"""Flatten nested dependency dictionaries."""
|
499
|
+
flattened = {}
|
500
|
+
for manager_deps in deps_dict.values():
|
501
|
+
flattened.update(manager_deps)
|
502
|
+
return flattened
|
503
|
+
|
504
|
+
def _detect_frameworks(self, dependencies: Dict[str, str]) -> List[str]:
|
505
|
+
"""Detect web frameworks from dependencies."""
|
506
|
+
detected = []
|
507
|
+
|
508
|
+
for language, frameworks in self.FRAMEWORK_PACKAGES.items():
|
509
|
+
for framework in frameworks:
|
510
|
+
if framework in dependencies:
|
511
|
+
detected.append(framework)
|
512
|
+
|
513
|
+
return detected
|
514
|
+
|
515
|
+
def _detect_databases(self, dependencies: Dict[str, str]) -> List[str]:
|
516
|
+
"""Detect database systems from dependencies."""
|
517
|
+
detected = []
|
518
|
+
|
519
|
+
for db_type, packages in self.DATABASE_PACKAGES.items():
|
520
|
+
for package in packages:
|
521
|
+
if package in dependencies:
|
522
|
+
if db_type not in detected:
|
523
|
+
detected.append(db_type)
|
524
|
+
break
|
525
|
+
|
526
|
+
return detected
|
527
|
+
|
528
|
+
def _detect_testing_tools(self, dependencies: Dict[str, str]) -> List[str]:
|
529
|
+
"""Detect testing tools from dependencies."""
|
530
|
+
detected = []
|
531
|
+
|
532
|
+
for language, tools in self.TESTING_PACKAGES.items():
|
533
|
+
for tool in tools:
|
534
|
+
if tool in dependencies:
|
535
|
+
detected.append(tool)
|
536
|
+
|
537
|
+
return detected
|
538
|
+
|
539
|
+
def _check_vulnerabilities(self, project_path: Path) -> Dict[str, Any]:
|
540
|
+
"""Check for known security vulnerabilities in dependencies."""
|
541
|
+
vulnerabilities = {
|
542
|
+
"total": 0,
|
543
|
+
"critical": 0,
|
544
|
+
"high": 0,
|
545
|
+
"medium": 0,
|
546
|
+
"low": 0,
|
547
|
+
"details": [],
|
548
|
+
}
|
549
|
+
|
550
|
+
# This is a simplified placeholder
|
551
|
+
# In production, you would integrate with vulnerability databases
|
552
|
+
# like npm audit, pip-audit, or safety
|
553
|
+
|
554
|
+
return vulnerabilities
|
555
|
+
|
556
|
+
def _calculate_statistics(self, results: Dict[str, Any]) -> Dict[str, Any]:
|
557
|
+
"""Calculate dependency statistics."""
|
558
|
+
all_deps = self._flatten_dependencies(results.get("dependencies", {}))
|
559
|
+
all_dev_deps = self._flatten_dependencies(results.get("dev_dependencies", {}))
|
560
|
+
|
561
|
+
return {
|
562
|
+
"total_dependencies": len(all_deps),
|
563
|
+
"total_dev_dependencies": len(all_dev_deps),
|
564
|
+
"package_managers_count": len(results.get("package_managers", [])),
|
565
|
+
"frameworks_count": len(results.get("frameworks", [])),
|
566
|
+
"databases_count": len(results.get("databases", [])),
|
567
|
+
"testing_tools_count": len(results.get("testing_tools", [])),
|
568
|
+
}
|
569
|
+
|
570
|
+
def extract_metrics(self, analysis_result: Dict[str, Any]) -> Dict[str, Any]:
|
571
|
+
"""Extract key metrics from analysis results."""
|
572
|
+
metrics = {}
|
573
|
+
|
574
|
+
if analysis_result.get("status") != "success":
|
575
|
+
return metrics
|
576
|
+
|
577
|
+
# Extract dependency counts
|
578
|
+
if "statistics" in analysis_result:
|
579
|
+
metrics.update(analysis_result["statistics"])
|
580
|
+
|
581
|
+
# Extract vulnerability metrics
|
582
|
+
if "vulnerabilities" in analysis_result:
|
583
|
+
vuln = analysis_result["vulnerabilities"]
|
584
|
+
metrics.update({
|
585
|
+
"vulnerability_total": vuln.get("total", 0),
|
586
|
+
"vulnerability_critical": vuln.get("critical", 0),
|
587
|
+
"vulnerability_high": vuln.get("high", 0),
|
588
|
+
})
|
589
|
+
|
590
|
+
return metrics
|
591
|
+
|
592
|
+
def compare_results(
|
593
|
+
self, baseline: Dict[str, Any], current: Dict[str, Any]
|
594
|
+
) -> Dict[str, Any]:
|
595
|
+
"""Compare two analysis results."""
|
596
|
+
comparison = {
|
597
|
+
"added_dependencies": [],
|
598
|
+
"removed_dependencies": [],
|
599
|
+
"updated_dependencies": [],
|
600
|
+
"vulnerability_changes": {},
|
601
|
+
}
|
602
|
+
|
603
|
+
# Compare dependencies
|
604
|
+
baseline_deps = self._flatten_dependencies(baseline.get("dependencies", {}))
|
605
|
+
current_deps = self._flatten_dependencies(current.get("dependencies", {}))
|
606
|
+
|
607
|
+
# Find added dependencies
|
608
|
+
for dep, version in current_deps.items():
|
609
|
+
if dep not in baseline_deps:
|
610
|
+
comparison["added_dependencies"].append({
|
611
|
+
"name": dep,
|
612
|
+
"version": version,
|
613
|
+
})
|
614
|
+
|
615
|
+
# Find removed dependencies
|
616
|
+
for dep, version in baseline_deps.items():
|
617
|
+
if dep not in current_deps:
|
618
|
+
comparison["removed_dependencies"].append({
|
619
|
+
"name": dep,
|
620
|
+
"version": version,
|
621
|
+
})
|
622
|
+
|
623
|
+
# Find updated dependencies
|
624
|
+
for dep in baseline_deps:
|
625
|
+
if dep in current_deps and baseline_deps[dep] != current_deps[dep]:
|
626
|
+
comparison["updated_dependencies"].append({
|
627
|
+
"name": dep,
|
628
|
+
"old_version": baseline_deps[dep],
|
629
|
+
"new_version": current_deps[dep],
|
630
|
+
})
|
631
|
+
|
632
|
+
# Compare vulnerability counts
|
633
|
+
if "vulnerabilities" in baseline and "vulnerabilities" in current:
|
634
|
+
baseline_vuln = baseline["vulnerabilities"]
|
635
|
+
current_vuln = current["vulnerabilities"]
|
636
|
+
|
637
|
+
comparison["vulnerability_changes"] = {
|
638
|
+
"total": current_vuln.get("total", 0) - baseline_vuln.get("total", 0),
|
639
|
+
"critical": current_vuln.get("critical", 0) - baseline_vuln.get("critical", 0),
|
640
|
+
"high": current_vuln.get("high", 0) - baseline_vuln.get("high", 0),
|
641
|
+
}
|
642
|
+
|
643
|
+
return comparison
|