skip-trace 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,202 @@
1
+ # skip_trace/collectors/whois.py
2
+ from __future__ import annotations
3
+
4
+ import datetime as _dt
5
+ import logging
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ import whois as python_whois
9
+ from whoisit import domain as rdap_domain
10
+
11
+ from ..analysis.evidence import generate_evidence_id
12
+ from ..schemas import EvidenceKind, EvidenceRecord, EvidenceSource
13
+ from ..utils.cache import get_cached_data, set_cached_data
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def _normalize_org_name(name: Optional[str]) -> Optional[str]:
19
+ """Cleans up organization names from WHOIS/RDAP data."""
20
+ if not isinstance(name, str):
21
+ return None
22
+ name = name.strip()
23
+ common_suffixes = [
24
+ "LLC",
25
+ "L.L.C.",
26
+ "INC",
27
+ "INCORPORATED",
28
+ "CORP",
29
+ "CORPORATION",
30
+ "LTD",
31
+ "LIMITED",
32
+ "GMBH",
33
+ "S.A.",
34
+ "S.L.",
35
+ ]
36
+ up = name.upper()
37
+ for suf in common_suffixes:
38
+ suf_dot = f"{suf}."
39
+ if up.endswith(f" {suf}") or up.endswith(f",{suf}"):
40
+ name = name[: -(len(suf) + 1)].strip().rstrip(",")
41
+ break
42
+ if up.endswith(f" {suf_dot}") or up.endswith(f",{suf_dot}"):
43
+ name = name[: -(len(suf_dot) + 1)].strip().rstrip(",")
44
+ break
45
+ return name.title()
46
+
47
+
48
+ def _rdap_extract(w: Dict[str, Any]) -> Dict[str, Any]:
49
+ """Map RDAP JSON -> normalized fields: org, registrar, creation_date, expiration_date."""
50
+ org = None
51
+ registrar = None
52
+ creation_date = None
53
+ expiration_date = None
54
+
55
+ # Entities: find registrant/registrar
56
+ for ent in w.get("entities", []) or []:
57
+ roles = {r.lower() for r in (ent.get("roles") or [])}
58
+ v = ent.get("vcardArray")
59
+ fn = None
60
+ org_v = None
61
+ if isinstance(v, list) and len(v) == 2 and isinstance(v[1], list):
62
+ for item in v[1]:
63
+ # item like ["fn", {}, "text", "Example Corp"]
64
+ if isinstance(item, list) and len(item) >= 4:
65
+ if item[0] == "fn" and isinstance(item[3], str):
66
+ fn = item[3]
67
+ if item[0] == "org" and isinstance(item[3], str):
68
+ org_v = item[3]
69
+ if "registrant" in roles and not org:
70
+ org = org_v or fn
71
+ if "registrar" in roles and not registrar:
72
+ registrar = org_v or fn
73
+
74
+ # Some registries put registrar at top-level
75
+ registrar = registrar or w.get("registrar")
76
+
77
+ # Events: registration/expiration
78
+ for ev in w.get("events", []) or []:
79
+ action = str(ev.get("eventAction", "")).lower()
80
+ date = ev.get("eventDate")
81
+ if action in {"registration", "registered"} and not creation_date:
82
+ creation_date = date
83
+ if action in {"expiration", "expiry", "paid-through"} and not expiration_date:
84
+ expiration_date = date
85
+
86
+ # ISO8601 -> datetime with tz
87
+ def _parse_dt(x: Any) -> Optional[_dt.datetime]:
88
+ if not x:
89
+ return None
90
+ try:
91
+ # RDAP dates are ISO-8601; fromisoformat handles 'Z' only in 3.11+; fall back simple replace.
92
+ s = str(x).replace("Z", "+00:00")
93
+ return _dt.datetime.fromisoformat(s)
94
+ except Exception:
95
+ return None
96
+
97
+ return {
98
+ "org": org,
99
+ "registrar": registrar,
100
+ "creation_date": _parse_dt(creation_date),
101
+ "expiration_date": _parse_dt(expiration_date),
102
+ "source": "RDAP",
103
+ }
104
+
105
+
106
+ def _whois_extract(w: Any) -> Dict[str, Any]:
107
+ """Map python-whois result -> normalized fields."""
108
+ get = w.get if hasattr(w, "get") else lambda k, d=None: getattr(w, k, d)
109
+ return {
110
+ "org": get("org"),
111
+ "registrar": get("registrar"),
112
+ "creation_date": get("creation_date"),
113
+ "expiration_date": get("expiration_date"),
114
+ "source": "WHOIS",
115
+ }
116
+
117
+
118
+ def _lookup(domain: str) -> Dict[str, Any]:
119
+ """RDAP first, WHOIS fallback. Returns normalized dict or {'error': ...}."""
120
+ # 1) RDAP (HTTP/JSON; far more reliable)
121
+ if rdap_domain is not None:
122
+ try:
123
+ rd = rdap_domain(domain, timeout=10) # type: ignore[arg-type]
124
+ if isinstance(rd, dict):
125
+ data = _rdap_extract(rd)
126
+ if data.get("org") or data.get("registrar"):
127
+ return data
128
+ except Exception as e:
129
+ logger.debug("RDAP error for %s: %s", domain, e)
130
+
131
+ # 2) WHOIS fallback (may be blocked/rate-limited)
132
+ if python_whois is not None:
133
+ try:
134
+ w = python_whois.whois(domain, timeout=5)
135
+ data = _whois_extract(w)
136
+ if data.get("org") or data.get("registrar"):
137
+ return data
138
+ except Exception as e:
139
+ return {"error": f"WHOIS error: {e}"}
140
+
141
+ return {"error": "No RDAP/WHOIS client available or no usable data returned."}
142
+
143
+
144
+ def collect_from_domain(domain: str) -> List[EvidenceRecord]:
145
+ """
146
+ Collect registration ownership signals for a domain using RDAP (preferred) with WHOIS fallback.
147
+ Uses caching to avoid repeated lookups and rate limits.
148
+ """
149
+ logger.info("Checking %s", domain)
150
+ now = _dt.datetime.now(_dt.timezone.utc)
151
+ cache_key_ns = "rdap" # new namespace; do not collide with legacy "whois"
152
+ locator_base = "rdap://"
153
+
154
+ cached = get_cached_data(cache_key_ns, domain)
155
+ if cached:
156
+ logger.debug("Using cached RDAP/WHOIS data for %s", domain)
157
+ info = cached
158
+ else:
159
+ info = _lookup(domain)
160
+ set_cached_data(cache_key_ns, domain, info if info else {"error": "empty"})
161
+
162
+ if not info or "error" in info:
163
+ logger.warning(
164
+ "RDAP/WHOIS lookup for %s failed: %s",
165
+ domain,
166
+ info.get("error") if info else "unknown",
167
+ )
168
+ return []
169
+
170
+ org_name = _normalize_org_name(info.get("org"))
171
+ if not org_name:
172
+ logger.warning(f"No org name for {domain}")
173
+ # Even without org, keep cache; just no evidence emitted.
174
+ return []
175
+
176
+ value = {
177
+ "name": org_name,
178
+ "domain": domain,
179
+ "registrar": info.get("registrar"),
180
+ "source": info.get("source", "RDAP"),
181
+ "creation_date": info.get("creation_date"),
182
+ "expiration_date": info.get("expiration_date"),
183
+ }
184
+
185
+ # Keep EvidenceSource.WHOIS for backward compatibility if RDAP enum doesn't exist in your schema.
186
+ record = EvidenceRecord(
187
+ id=generate_evidence_id(
188
+ EvidenceSource.WHOIS, # if you add EvidenceSource.RDAP later, switch based on info["source"]
189
+ EvidenceKind.DOMAIN,
190
+ f"{locator_base}{domain}",
191
+ str(value),
192
+ org_name,
193
+ ),
194
+ source=EvidenceSource.WHOIS,
195
+ locator=f"{locator_base}{domain}",
196
+ kind=EvidenceKind.DOMAIN,
197
+ value=value,
198
+ observed_at=now,
199
+ confidence=0.30,
200
+ notes=f"Domain '{domain}' registration entity normalized to '{org_name}' via {value['source']}.",
201
+ )
202
+ return [record]
skip_trace/config.py ADDED
@@ -0,0 +1,165 @@
1
+ # skip_trace/config.py
2
+ from __future__ import annotations
3
+
4
+ import os
5
+ from typing import Any, Dict, Optional, cast
6
+
7
+ # Use tomllib if available (Python 3.11+), otherwise fall back to tomli
8
+ try:
9
+ import tomllib
10
+ except ImportError:
11
+ import tomli as tomllib # type: ignore
12
+
13
+ from dotenv import load_dotenv
14
+
15
+ from .exceptions import ConfigurationError
16
+
17
+ # Load .env file at module level
18
+ load_dotenv()
19
+
20
+ DEFAULT_CONFIG: Dict[str, Any] = {
21
+ "default_min_score": 0.70,
22
+ "default_fail_under": 0.50,
23
+ "entity_resolution_llm": False,
24
+ "weights": {
25
+ "verified_release_signature": 0.50,
26
+ "repo_org_matches_email_domain": 0.35,
27
+ "codeowners_org_team": 0.25,
28
+ "pypi_maintainer_corporate_domain": 0.20,
29
+ "local_copyright_header_org": 0.25,
30
+ "governance_doc_org": 0.20,
31
+ "llm_ner_claim": 0.20,
32
+ "conflict": -0.15,
33
+ },
34
+ "llm": {
35
+ "provider": "openrouter",
36
+ "model": "mistralai/mistral-7b-instruct",
37
+ "api_key_env_var": "OPENROUTER_API_KEY",
38
+ "base_url": "https://openrouter.ai/api/v1",
39
+ },
40
+ "http": {
41
+ "user_agent": "skip-trace/0.1.0",
42
+ "timeout": 30,
43
+ },
44
+ # GitHub API configuration
45
+ "github": {
46
+ "api_key_env_var": "GITHUB_TOKEN",
47
+ },
48
+ # Cache configuration
49
+ "cache": {
50
+ "enabled": True,
51
+ "dir": ".skip_trace_cache",
52
+ "ttl_seconds": 604800, # 7 days
53
+ },
54
+ # Domains to ignore for WHOIS lookups
55
+ "whois_ignored_domains": [
56
+ "gmail.com",
57
+ "googlemail.com",
58
+ "google.com",
59
+ "yahoo.com",
60
+ "hotmail.com",
61
+ "outlook.com",
62
+ "live.com",
63
+ "msn.com",
64
+ "aol.com",
65
+ "icloud.com",
66
+ "me.com",
67
+ "mac.com",
68
+ "protonmail.com",
69
+ "pm.me",
70
+ "github.com",
71
+ "users.noreply.github.com",
72
+ "gitlab.com",
73
+ "sourceforge.net",
74
+ "readthedocs.io",
75
+ "twitter.com",
76
+ "mastodon.social",
77
+ "linkedin.com",
78
+ "googlegroups.com",
79
+ ],
80
+ "suppressed_tool_orgs": [
81
+ "github",
82
+ "gitlab",
83
+ "bitbucket",
84
+ "sourceforge",
85
+ "readthedocs",
86
+ "codeberg",
87
+ "pypi", # PSF owns PyPI, but not the packages on it
88
+ ],
89
+ }
90
+
91
+
92
+ def find_pyproject_toml(start_dir: str = ".") -> Optional[str]:
93
+ """Finds pyproject.toml by searching upwards from start_dir."""
94
+ path = os.path.abspath(start_dir)
95
+ while True:
96
+ pyproject_path = os.path.join(path, "pyproject.toml")
97
+ if os.path.exists(pyproject_path):
98
+ return pyproject_path
99
+ parent = os.path.dirname(path)
100
+ if parent == path: # Reached the root
101
+ return None
102
+ path = parent
103
+
104
+
105
+ def load_config(test_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
106
+ """
107
+ Loads configuration, allowing for test overrides.
108
+
109
+ Priority order:
110
+ 1. test_config (if provided)
111
+ 2. [tool.skip-trace] in pyproject.toml
112
+ 3. DEFAULT_CONFIG
113
+
114
+ Args:
115
+ test_config: A dictionary to use as the config, for testing.
116
+
117
+ Returns:
118
+ The final configuration dictionary.
119
+ """
120
+ if test_config:
121
+ return test_config
122
+
123
+ config = DEFAULT_CONFIG.copy()
124
+ pyproject_path = find_pyproject_toml()
125
+
126
+ if pyproject_path:
127
+ try:
128
+ with open(pyproject_path, "rb") as f:
129
+ pyproject_data = tomllib.load(f)
130
+
131
+ if tool_config := pyproject_data.get("tool", {}).get("skip-trace", {}):
132
+ # Deep merge user config into default
133
+ for key, value in tool_config.items():
134
+ if isinstance(value, dict) and isinstance(config.get(key), dict):
135
+ config[key].update(value)
136
+ else:
137
+ config[key] = value
138
+ except Exception as e:
139
+ raise ConfigurationError(f"Error reading {pyproject_path}: {e}") from e
140
+
141
+ # Load secrets from environment variables
142
+ # LLM API Key
143
+ llm_config = config.get("llm", {})
144
+ api_key_env_var = llm_config.get("api_key_env_var")
145
+ if api_key_env_var:
146
+ api_key = os.getenv(api_key_env_var)
147
+ # Ensure the key is nested correctly in the final config object
148
+ config["llm"]["api_key"] = api_key
149
+
150
+ # GitHub API Key
151
+ github_config = config.get("github", {})
152
+ gh_api_key_env_var = github_config.get("api_key_env_var")
153
+ if gh_api_key_env_var:
154
+ gh_api_key = os.getenv(gh_api_key_env_var)
155
+ config["github"]["api_key"] = gh_api_key
156
+
157
+ config["lenient_mode_enabled"] = (
158
+ os.getenv("SKIP_TRACE_INCLUDE_TOOL_ORGS") is not None
159
+ )
160
+
161
+ return cast(Dict[str, Any], config)
162
+
163
+
164
+ # Load once at module level to be imported by other parts of the app
165
+ CONFIG = load_config()
@@ -0,0 +1,22 @@
1
+ # skip_trace/exceptions.py
2
+ from __future__ import annotations
3
+
4
+
5
+ class SkipTraceError(Exception):
6
+ """Base exception for all application-specific errors."""
7
+
8
+
9
+ class ConfigurationError(SkipTraceError):
10
+ """Raised for invalid or missing configuration."""
11
+
12
+
13
+ class NetworkError(SkipTraceError):
14
+ """Raised for network-related issues like timeouts or connection errors."""
15
+
16
+
17
+ class NoEvidenceError(SkipTraceError):
18
+ """Raised when no usable evidence can be found for a package."""
19
+
20
+
21
+ class CollectorError(SkipTraceError):
22
+ """Raised when a specific data collector fails."""
skip_trace/main.py ADDED
@@ -0,0 +1,269 @@
1
+ # skip_trace/main.py
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import dataclasses
6
+ import json
7
+ import logging
8
+ import sys
9
+ from typing import Set
10
+
11
+ import tldextract
12
+ from rich.logging import RichHandler
13
+
14
+ from . import schemas
15
+ from .analysis import evidence as evidence_analyzer
16
+ from .analysis import scoring
17
+ from .collectors import github, package_files, pypi, whois
18
+ from .config import CONFIG
19
+ from .exceptions import CollectorError, NetworkError, NoEvidenceError
20
+ from .reporting import json_reporter, md_reporter
21
+ from .utils.validation import is_valid_email
22
+
23
+ # Create a logger instance for this module
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ def setup_logging(level: str = "INFO"):
28
+ """Configures the application's logger.
29
+
30
+ Args:
31
+ level: The minimum logging level to display (e.g., "INFO", "DEBUG").
32
+ """
33
+ logging.basicConfig(
34
+ level=level,
35
+ format="%(message)s",
36
+ datefmt="[%X]",
37
+ handlers=[RichHandler(rich_tracebacks=True, show_path=False)],
38
+ )
39
+
40
+
41
+ def run_who_owns(args: argparse.Namespace) -> int:
42
+ """Handler for the 'who-owns' command."""
43
+ logger.info(f"Executing 'who-owns' for package: {args.package}")
44
+
45
+ try:
46
+ # 1. Collect initial data from PyPI
47
+ metadata = pypi.fetch_package_metadata(args.package, args.version)
48
+ package_name = metadata.get("info", {}).get("name", args.package)
49
+ package_version = metadata.get("info", {}).get("version")
50
+ logger.debug(
51
+ f"Successfully fetched metadata for {package_name} v{package_version}"
52
+ )
53
+
54
+ # 2. Analyze primary package metadata
55
+ evidence_records, pypi_maintainers = evidence_analyzer.extract_from_pypi(
56
+ metadata
57
+ )
58
+
59
+ # 3. Cross-Reference for more PyPI evidence
60
+ cross_ref_evidence = pypi.cross_reference_by_user(package_name)
61
+ evidence_records.extend(cross_ref_evidence)
62
+
63
+ # 4. Fetch evidence from code repositories found in PyPI evidence
64
+ repo_urls = set()
65
+ for record in evidence_records:
66
+ if (
67
+ record.source == schemas.EvidenceSource.PYPI
68
+ and record.kind == schemas.EvidenceKind.ORGANIZATION
69
+ ):
70
+ url = record.value.get("url")
71
+ if url and "github.com" in url:
72
+ repo_urls.add(url)
73
+
74
+ for url in repo_urls:
75
+ logger.info(f"Analyzing GitHub repository: {url}")
76
+ try:
77
+ github_evidence = github.extract_from_repo_url(url)
78
+ evidence_records.extend(github_evidence)
79
+ except CollectorError as e:
80
+ logger.warning(f"Could not fully analyze GitHub repo {url}: {e}")
81
+
82
+ # 5. Extract domains and perform WHOIS lookups
83
+ domains_to_check: Set[str] = set()
84
+ ignored_domains = set(CONFIG.get("whois_ignored_domains", []))
85
+
86
+ for record in evidence_records:
87
+ potential_domains: Set[str] = set()
88
+
89
+ # Case 1: Maintainer/Author email
90
+ if record.kind in (
91
+ schemas.EvidenceKind.EMAIL,
92
+ schemas.EvidenceKind.MAINTAINER,
93
+ schemas.EvidenceKind.AUTHOR_TAG,
94
+ ):
95
+ if email := record.value.get("email"):
96
+ if "@" in email:
97
+ potential_domains.add(email.split("@")[1])
98
+
99
+ # Case 2: URL from project_urls or org links
100
+ elif record.kind in (
101
+ schemas.EvidenceKind.ORGANIZATION,
102
+ schemas.EvidenceKind.PROJECT_URL,
103
+ ):
104
+ if url := record.value.get("url"):
105
+ extracted = tldextract.extract(url)
106
+ if extracted.registered_domain:
107
+ potential_domains.add(extracted.registered_domain)
108
+
109
+ # Case 3: Contacts from a user profile (email, blog, etc.)
110
+ elif record.kind == schemas.EvidenceKind.USER_PROFILE:
111
+ if contacts := record.value.get("contacts"):
112
+ for contact_value in contacts.values():
113
+ if not contact_value:
114
+ continue
115
+ if valid_email := is_valid_email(contact_value):
116
+ potential_domains.add(valid_email.split("@")[1])
117
+ elif contact_value and "://" in contact_value:
118
+ extracted = tldextract.extract(contact_value)
119
+ if extracted.registered_domain:
120
+ potential_domains.add(extracted.registered_domain)
121
+
122
+ # Add valid domains to the main set to be checked
123
+ for domain in potential_domains:
124
+ if domain not in ignored_domains:
125
+ domains_to_check.add(domain)
126
+
127
+ if domains_to_check:
128
+ logger.info(
129
+ f"Found domains for WHOIS lookup: {', '.join(sorted(list(domains_to_check)))}"
130
+ )
131
+ for domain in domains_to_check:
132
+ try:
133
+ whois_evidence = whois.collect_from_domain(domain)
134
+ evidence_records.extend(whois_evidence)
135
+ except CollectorError as e:
136
+ logger.warning(f"Could not get WHOIS evidence for {domain}: {e}")
137
+
138
+ # 6. Analyze package contents for deep evidence
139
+ try:
140
+ package_files_evidence = package_files.collect_from_package_files(metadata)
141
+ evidence_records.extend(package_files_evidence)
142
+ except CollectorError as e:
143
+ logger.warning(f"Could not analyze package files for {package_name}: {e}")
144
+
145
+ # 7. Score all collected evidence
146
+ owner_candidates = scoring.score_owners(evidence_records)
147
+
148
+ # 8. Assemble final result object
149
+ package_result = schemas.PackageResult(
150
+ package=package_name,
151
+ version=package_version,
152
+ owners=owner_candidates,
153
+ maintainers=pypi_maintainers,
154
+ evidence=evidence_records,
155
+ )
156
+
157
+ # 9. Report
158
+ if args.output_format == "json":
159
+ json_reporter.render(package_result)
160
+ else:
161
+ md_reporter.render(package_result)
162
+
163
+ # PEP specified exit codes based on score
164
+ # Using placeholder thresholds for now
165
+ top_score = owner_candidates[0].score if owner_candidates else 0
166
+ if top_score >= 0.7:
167
+ return 0 # Success
168
+ if top_score >= 0.5:
169
+ return 0 # Indeterminate # The tool didn't fail
170
+ return 101 # No usable evidence
171
+
172
+ # TODO: Pass evidence_records to the scoring engine
173
+ # Later, this will be replaced by a call to the analysis and reporting modules.
174
+ # For example:
175
+ #
176
+ # evidence = analysis.evidence.extract_from_pypi(metadata)
177
+ # owners = analysis.scoring.score_owners(evidence)
178
+ # package_result = schemas.PackageResult(package=args.package, owners=owners, evidence=evidence)
179
+ # reporting.json_reporter.render(package_result)
180
+ # return 0
181
+ except NoEvidenceError as e:
182
+ logger.error(f"{type(e).__name__}: {e}")
183
+ return 101 # As per the PEP for "No usable evidence"
184
+ except NetworkError as e:
185
+ print(f"Error: A network problem occurred: {e}", file=sys.stderr)
186
+ return 101
187
+
188
+
189
+ # --- Handler for the `explain` command ---
190
+ def run_explain(args: argparse.Namespace) -> int:
191
+ """Handler for the 'explain' command."""
192
+ logger.info(f"Explaining evidence for package: {args.package}")
193
+ try:
194
+ metadata = pypi.fetch_package_metadata(args.package)
195
+ evidence_records, _ = evidence_analyzer.extract_from_pypi(metadata)
196
+
197
+ if args.id:
198
+ # Filter for a specific evidence ID
199
+ record = next(
200
+ (r for r in evidence_records if r.id.startswith(args.id)), None
201
+ )
202
+ if record:
203
+ output_record = dataclasses.asdict(record)
204
+ print(json.dumps(output_record, indent=2, default=str))
205
+ return 0
206
+ logger.error(f"Evidence ID matching '{args.id}' not found.")
207
+ return 1
208
+ # Show all evidence
209
+ output: list[dict[str, str | None]] = [
210
+ dataclasses.asdict(r) for r in evidence_records
211
+ ]
212
+
213
+ print(json.dumps(output, indent=2, default=str))
214
+ return 0
215
+
216
+ except (NoEvidenceError, NetworkError) as e:
217
+ logger.error(f"{type(e).__name__}: {e}")
218
+ return 101
219
+
220
+
221
+ def run_venv(args: argparse.Namespace) -> int:
222
+ """Handler for the 'venv' command."""
223
+ print("Executing 'venv' command...")
224
+ print(f" Path: {args.path or 'current environment'}")
225
+ # TODO: Implement the actual logic
226
+ return 200 # Placeholder for "No anonymous"
227
+
228
+
229
+ def run_reqs(args: argparse.Namespace) -> int:
230
+ """Handler for the 'reqs' command."""
231
+ print("Executing 'reqs' command...")
232
+ print(f" Requirements File: {args.requirements_file}")
233
+ # TODO: Implement the actual logic
234
+ return 200 # Placeholder for "No anonymous"
235
+
236
+
237
+ # ... Add placeholder functions for other commands ...
238
+
239
+
240
+ def run_command(args: argparse.Namespace) -> int:
241
+ """
242
+ Dispatches the parsed arguments to the appropriate handler function.
243
+
244
+ Args:
245
+ args: The parsed arguments from argparse.
246
+
247
+ Returns:
248
+ An exit code.
249
+ """
250
+ # Prefer --verbose if set
251
+ log_level = "DEBUG" if args.log_level == "DEBUG" else args.log_level
252
+ setup_logging(log_level)
253
+ command_handlers = {
254
+ "who-owns": run_who_owns,
255
+ "explain": run_explain,
256
+ "venv": run_venv,
257
+ "reqs": run_reqs,
258
+ # "explain": run_explain,
259
+ # "graph": run_graph,
260
+ # "cache": run_cache,
261
+ # "policy": run_policy,
262
+ }
263
+
264
+ handler = command_handlers.get(args.command)
265
+
266
+ if handler:
267
+ return handler(args)
268
+ print(f"Error: Command '{args.command}' is not yet implemented.", file=sys.stderr)
269
+ return 2
skip_trace/py.typed.py ADDED
File without changes
File without changes
@@ -0,0 +1,22 @@
1
+ # skip_trace/reporting/json_reporter.py
2
+ from __future__ import annotations
3
+
4
+ import dataclasses
5
+ import json
6
+ import sys
7
+ from typing import IO
8
+
9
+ from ..schemas import PackageResult
10
+
11
+
12
+ def render(result: PackageResult, file: IO[str] = sys.stdout):
13
+ """
14
+ Renders the PackageResult as JSON to the specified file.
15
+
16
+ Args:
17
+ result: The PackageResult object to render.
18
+ file: The file object to write to (defaults to stdout).
19
+ """
20
+ # default=str is a handler for non-serializable types like datetime
21
+ json.dump(dataclasses.asdict(result), file, indent=2, default=str)
22
+ file.write("\n")