arch-ops-server 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
arch_ops_server/aur.py ADDED
@@ -0,0 +1,1190 @@
1
+ # SPDX-License-Identifier: GPL-3.0-only OR MIT
2
+ """
3
+ AUR (Arch User Repository) interface module.
4
+ Provides search, package info, and PKGBUILD retrieval via AUR RPC v5.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, Any, List, Optional
9
+ import httpx
10
+ from datetime import datetime
11
+
12
+ from .utils import (
13
+ create_error_response,
14
+ add_aur_warning,
15
+ get_aur_helper,
16
+ IS_ARCH,
17
+ run_command
18
+ )
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ # AUR API endpoints
23
+ AUR_RPC_URL = "https://aur.archlinux.org/rpc"
24
+ AUR_CGIT_BASE_URL = "https://aur.archlinux.org/cgit/aur.git/plain" # No cloning - direct file access via web
25
+
26
+ # HTTP client settings
27
+ DEFAULT_TIMEOUT = 10.0
28
+ MAX_RESULTS = 50 # AUR RPC limit
29
+
30
+
31
+ async def search_aur(query: str, limit: int = 20, sort_by: str = "relevance") -> Dict[str, Any]:
32
+ """
33
+ Search AUR packages using RPC v5 interface with smart ranking.
34
+
35
+ Args:
36
+ query: Search term (searches name and description)
37
+ limit: Maximum results to return (default: 20, max: 50)
38
+ sort_by: Sorting method - "relevance", "votes", "popularity", "modified" (default: relevance)
39
+
40
+ Returns:
41
+ Dict with AUR packages and safety warning
42
+ """
43
+ logger.info(f"Searching AUR for: {query} (sort: {sort_by})")
44
+
45
+ # Clamp limit
46
+ limit = min(limit, MAX_RESULTS)
47
+
48
+ params = {
49
+ "v": "5",
50
+ "type": "search",
51
+ "arg": query
52
+ }
53
+
54
+ try:
55
+ async with httpx.AsyncClient(timeout=DEFAULT_TIMEOUT) as client:
56
+ response = await client.get(AUR_RPC_URL, params=params)
57
+ response.raise_for_status()
58
+
59
+ data = response.json()
60
+
61
+ if data.get("type") == "error":
62
+ return create_error_response(
63
+ "AURError",
64
+ data.get("error", "Unknown AUR error")
65
+ )
66
+
67
+ results = data.get("results", [])
68
+
69
+ # Apply smart ranking based on sort_by parameter
70
+ sorted_results = _apply_smart_ranking(results, query, sort_by)
71
+
72
+ # Limit and format results
73
+ formatted_results = [
74
+ _format_package_info(pkg)
75
+ for pkg in sorted_results[:limit]
76
+ ]
77
+
78
+ logger.info(f"Found {len(formatted_results)} AUR packages for '{query}'")
79
+
80
+ # Wrap with safety warning
81
+ return add_aur_warning({
82
+ "query": query,
83
+ "count": len(formatted_results),
84
+ "total_found": len(results),
85
+ "sort_by": sort_by,
86
+ "results": formatted_results
87
+ })
88
+
89
+ except httpx.TimeoutException:
90
+ logger.error(f"AUR search timed out for query: {query}")
91
+ return create_error_response(
92
+ "TimeoutError",
93
+ f"AUR search timed out for query: {query}",
94
+ "The AUR server did not respond in time. Try again later."
95
+ )
96
+ except httpx.HTTPStatusError as e:
97
+ # Handle rate limiting specifically
98
+ if e.response.status_code == 429:
99
+ logger.error("AUR rate limit exceeded")
100
+ return create_error_response(
101
+ "RateLimitError",
102
+ "AUR rate limit exceeded",
103
+ "Too many requests. Please wait before trying again."
104
+ )
105
+ logger.error(f"AUR search HTTP error: {e}")
106
+ return create_error_response(
107
+ "HTTPError",
108
+ f"AUR search failed with status {e.response.status_code}",
109
+ str(e)
110
+ )
111
+ except Exception as e:
112
+ logger.error(f"AUR search failed: {e}")
113
+ return create_error_response(
114
+ "SearchError",
115
+ f"Failed to search AUR: {str(e)}"
116
+ )
117
+
118
+
119
+ async def get_aur_info(package_name: str) -> Dict[str, Any]:
120
+ """
121
+ Get detailed information about a specific AUR package.
122
+
123
+ Args:
124
+ package_name: Exact package name
125
+
126
+ Returns:
127
+ Dict with package details and safety warning
128
+ """
129
+ logger.info(f"Fetching AUR info for: {package_name}")
130
+
131
+ params = {
132
+ "v": "5",
133
+ "type": "info",
134
+ "arg[]": package_name
135
+ }
136
+
137
+ try:
138
+ async with httpx.AsyncClient(timeout=DEFAULT_TIMEOUT) as client:
139
+ response = await client.get(AUR_RPC_URL, params=params)
140
+ response.raise_for_status()
141
+
142
+ data = response.json()
143
+
144
+ if data.get("type") == "error":
145
+ return create_error_response(
146
+ "AURError",
147
+ data.get("error", "Unknown AUR error")
148
+ )
149
+
150
+ results = data.get("results", [])
151
+
152
+ if not results:
153
+ return create_error_response(
154
+ "NotFound",
155
+ f"AUR package '{package_name}' not found"
156
+ )
157
+
158
+ package_info = _format_package_info(results[0], detailed=True)
159
+
160
+ logger.info(f"Successfully fetched info for {package_name}")
161
+
162
+ # Wrap with safety warning
163
+ return add_aur_warning(package_info)
164
+
165
+ except httpx.TimeoutException:
166
+ logger.error(f"AUR info fetch timed out for: {package_name}")
167
+ return create_error_response(
168
+ "TimeoutError",
169
+ f"AUR info fetch timed out for package: {package_name}"
170
+ )
171
+ except httpx.HTTPStatusError as e:
172
+ logger.error(f"AUR info HTTP error: {e}")
173
+ return create_error_response(
174
+ "HTTPError",
175
+ f"AUR info fetch failed with status {e.response.status_code}",
176
+ str(e)
177
+ )
178
+ except Exception as e:
179
+ logger.error(f"AUR info fetch failed: {e}")
180
+ return create_error_response(
181
+ "InfoError",
182
+ f"Failed to get AUR package info: {str(e)}"
183
+ )
184
+
185
+
186
+ async def get_aur_file(package_name: str, filename: str = "PKGBUILD") -> str:
187
+ """
188
+ Fetch any file from an AUR package via cgit web interface (no cloning required).
189
+
190
+ Uses AUR's cgit interface to fetch files directly via HTTP, avoiding the need
191
+ to clone the entire git repository.
192
+
193
+ Args:
194
+ package_name: Package name
195
+ filename: File to fetch (default: "PKGBUILD")
196
+ Common files: "PKGBUILD", ".SRCINFO", ".install", "*.patch"
197
+
198
+ Returns:
199
+ Raw file content as string
200
+
201
+ Raises:
202
+ ValueError: If file cannot be retrieved
203
+
204
+ Examples:
205
+ >>> pkgbuild = await get_aur_file("yay", "PKGBUILD")
206
+ >>> srcinfo = await get_aur_file("yay", ".SRCINFO")
207
+ """
208
+ logger.info(f"Fetching {filename} for package: {package_name}")
209
+
210
+ # Construct cgit URL for the specific file
211
+ # Format: https://aur.archlinux.org/cgit/aur.git/plain/{filename}?h={package_name}
212
+ base_url = "https://aur.archlinux.org/cgit/aur.git/plain"
213
+ url = f"{base_url}/{filename}?h={package_name}"
214
+
215
+ try:
216
+ async with httpx.AsyncClient(timeout=DEFAULT_TIMEOUT) as client:
217
+ response = await client.get(url, follow_redirects=True)
218
+ response.raise_for_status()
219
+
220
+ content = response.text
221
+
222
+ # Basic validation - ensure we got actual content
223
+ if not content or len(content) < 10:
224
+ raise ValueError(f"Retrieved {filename} appears to be empty or invalid")
225
+
226
+ logger.info(f"Successfully fetched {filename} for {package_name} ({len(content)} bytes)")
227
+
228
+ return content
229
+
230
+ except httpx.HTTPStatusError as e:
231
+ if e.response.status_code == 404:
232
+ error_msg = f"{filename} not found for package '{package_name}'"
233
+ logger.error(error_msg)
234
+ raise ValueError(error_msg)
235
+ else:
236
+ logger.error(f"HTTP error fetching {filename}: {e}")
237
+ raise ValueError(f"Failed to fetch {filename}: HTTP {e.response.status_code}")
238
+ except httpx.TimeoutException:
239
+ error_msg = f"Timeout fetching {filename} for {package_name}"
240
+ logger.error(error_msg)
241
+ raise ValueError(error_msg)
242
+ except Exception as e:
243
+ logger.error(f"{filename} fetch failed: {e}")
244
+ raise ValueError(f"Failed to fetch {filename}: {str(e)}")
245
+
246
+
247
+ async def get_pkgbuild(package_name: str) -> str:
248
+ """
249
+ Fetch the PKGBUILD file for an AUR package (no cloning required).
250
+
251
+ This is a convenience wrapper around get_aur_file() specifically for PKGBUILDs.
252
+ Uses AUR's cgit web interface to fetch the file directly via HTTP.
253
+
254
+ Args:
255
+ package_name: Package name
256
+
257
+ Returns:
258
+ Raw PKGBUILD content as string
259
+
260
+ Raises:
261
+ ValueError: If PKGBUILD cannot be retrieved
262
+ """
263
+ return await get_aur_file(package_name, "PKGBUILD")
264
+
265
+
266
+ def _format_package_info(pkg: Dict[str, Any], detailed: bool = False) -> Dict[str, Any]:
267
+ """
268
+ Format AUR package data into clean structure.
269
+
270
+ Args:
271
+ pkg: Raw package data from AUR RPC
272
+ detailed: Include extended fields (default: False)
273
+
274
+ Returns:
275
+ Formatted package info dict
276
+ """
277
+ # Basic info always included
278
+ info = {
279
+ "name": pkg.get("Name"),
280
+ "version": pkg.get("Version"),
281
+ "description": pkg.get("Description"),
282
+ "maintainer": pkg.get("Maintainer"),
283
+ "votes": pkg.get("NumVotes", 0),
284
+ "popularity": round(pkg.get("Popularity", 0.0), 2),
285
+ "last_modified": _format_timestamp(pkg.get("LastModified")),
286
+ "out_of_date": pkg.get("OutOfDate") is not None,
287
+ }
288
+
289
+ # Extended info for detailed view
290
+ if detailed:
291
+ info.update({
292
+ "first_submitted": _format_timestamp(pkg.get("FirstSubmitted")),
293
+ "url": pkg.get("URL"),
294
+ "url_path": pkg.get("URLPath"),
295
+ "package_base": pkg.get("PackageBase"),
296
+ "depends": pkg.get("Depends", []),
297
+ "makedepends": pkg.get("MakeDepends", []),
298
+ "optdepends": pkg.get("OptDepends", []),
299
+ "conflicts": pkg.get("Conflicts", []),
300
+ "provides": pkg.get("Provides", []),
301
+ "license": pkg.get("License", []),
302
+ "keywords": pkg.get("Keywords", []),
303
+ })
304
+
305
+ return info
306
+
307
+
308
+ def _format_timestamp(timestamp: Optional[int]) -> Optional[str]:
309
+ """
310
+ Convert Unix timestamp to human-readable date.
311
+
312
+ Args:
313
+ timestamp: Unix timestamp
314
+
315
+ Returns:
316
+ ISO format date string or None
317
+ """
318
+ if timestamp is None:
319
+ return None
320
+
321
+ try:
322
+ dt = datetime.fromtimestamp(timestamp)
323
+ return dt.strftime("%Y-%m-%d %H:%M:%S")
324
+ except Exception:
325
+ return None
326
+
327
+
328
+ def analyze_package_metadata_risk(package_info: Dict[str, Any]) -> Dict[str, Any]:
329
+ """
330
+ Analyze AUR package metadata for security and trustworthiness indicators.
331
+
332
+ Evaluates:
333
+ - Package popularity and community trust (votes)
334
+ - Maintainer status (orphaned packages)
335
+ - Update frequency (out-of-date, abandoned packages)
336
+ - Package age and maturity
337
+ - Maintainer history
338
+
339
+ Args:
340
+ package_info: Package info dict from AUR RPC (formatted or raw)
341
+
342
+ Returns:
343
+ Dict with metadata risk analysis including:
344
+ - trust_score: 0-100 (higher = more trustworthy)
345
+ - risk_factors: list of identified risks
346
+ - trust_indicators: list of positive indicators
347
+ - recommendation: trust recommendation
348
+ """
349
+ from datetime import datetime, timedelta
350
+
351
+ risk_factors = []
352
+ trust_indicators = []
353
+
354
+ logger.debug(f"Analyzing metadata for package: {package_info.get('name', 'unknown')}")
355
+
356
+ # ========================================================================
357
+ # EXTRACT METADATA
358
+ # ========================================================================
359
+ votes = package_info.get("votes", package_info.get("NumVotes", 0))
360
+ popularity = package_info.get("popularity", package_info.get("Popularity", 0.0))
361
+ maintainer = package_info.get("maintainer", package_info.get("Maintainer"))
362
+ out_of_date = package_info.get("out_of_date", package_info.get("OutOfDate"))
363
+ last_modified = package_info.get("last_modified", package_info.get("LastModified"))
364
+ first_submitted = package_info.get("first_submitted", package_info.get("FirstSubmitted"))
365
+
366
+ # ========================================================================
367
+ # ANALYZE VOTING/POPULARITY
368
+ # ========================================================================
369
+ if votes == 0:
370
+ risk_factors.append({
371
+ "category": "popularity",
372
+ "severity": "HIGH",
373
+ "issue": "Package has zero votes - untested by community"
374
+ })
375
+ elif votes < 5:
376
+ risk_factors.append({
377
+ "category": "popularity",
378
+ "severity": "MEDIUM",
379
+ "issue": f"Low vote count ({votes}) - limited community validation"
380
+ })
381
+ elif votes >= 50:
382
+ trust_indicators.append({
383
+ "category": "popularity",
384
+ "indicator": f"High vote count ({votes}) - well-trusted by community"
385
+ })
386
+ elif votes >= 20:
387
+ trust_indicators.append({
388
+ "category": "popularity",
389
+ "indicator": f"Moderate vote count ({votes}) - some community validation"
390
+ })
391
+
392
+ # Popularity scoring
393
+ if popularity < 0.001:
394
+ risk_factors.append({
395
+ "category": "popularity",
396
+ "severity": "MEDIUM",
397
+ "issue": f"Very low popularity score ({popularity:.4f}) - rarely used"
398
+ })
399
+ elif popularity >= 1.0:
400
+ trust_indicators.append({
401
+ "category": "popularity",
402
+ "indicator": f"High popularity score ({popularity:.2f}) - widely used"
403
+ })
404
+
405
+ # ========================================================================
406
+ # ANALYZE MAINTAINER STATUS
407
+ # ========================================================================
408
+ if not maintainer or maintainer == "None":
409
+ risk_factors.append({
410
+ "category": "maintainer",
411
+ "severity": "CRITICAL",
412
+ "issue": "Package is ORPHANED - no active maintainer"
413
+ })
414
+ else:
415
+ trust_indicators.append({
416
+ "category": "maintainer",
417
+ "indicator": f"Active maintainer: {maintainer}"
418
+ })
419
+
420
+ # ========================================================================
421
+ # ANALYZE OUT-OF-DATE STATUS
422
+ # ========================================================================
423
+ if out_of_date:
424
+ # Check if out_of_date is a boolean or timestamp
425
+ if isinstance(out_of_date, bool) and out_of_date:
426
+ risk_factors.append({
427
+ "category": "maintenance",
428
+ "severity": "MEDIUM",
429
+ "issue": "Package is flagged as out-of-date"
430
+ })
431
+ elif isinstance(out_of_date, (int, float)):
432
+ # It's a timestamp
433
+ try:
434
+ ood_date = datetime.fromtimestamp(out_of_date)
435
+ ood_days = (datetime.now() - ood_date).days
436
+ risk_factors.append({
437
+ "category": "maintenance",
438
+ "severity": "MEDIUM" if ood_days < 90 else "HIGH",
439
+ "issue": f"Out-of-date for {ood_days} days since {ood_date.strftime('%Y-%m-%d')}"
440
+ })
441
+ except Exception:
442
+ risk_factors.append({
443
+ "category": "maintenance",
444
+ "severity": "MEDIUM",
445
+ "issue": "Package is flagged as out-of-date"
446
+ })
447
+
448
+ # ========================================================================
449
+ # ANALYZE LAST MODIFICATION TIME
450
+ # ========================================================================
451
+ if last_modified:
452
+ try:
453
+ # Handle both timestamp formats
454
+ if isinstance(last_modified, str):
455
+ # Try to parse from formatted string
456
+ last_mod_date = datetime.strptime(last_modified.split()[0], "%Y-%m-%d")
457
+ else:
458
+ # It's a Unix timestamp
459
+ last_mod_date = datetime.fromtimestamp(last_modified)
460
+
461
+ days_since_update = (datetime.now() - last_mod_date).days
462
+
463
+ if days_since_update > 730: # 2 years
464
+ risk_factors.append({
465
+ "category": "maintenance",
466
+ "severity": "HIGH",
467
+ "issue": f"Not updated in {days_since_update} days (~{days_since_update//365} years) - possibly abandoned"
468
+ })
469
+ elif days_since_update > 365: # 1 year
470
+ risk_factors.append({
471
+ "category": "maintenance",
472
+ "severity": "MEDIUM",
473
+ "issue": f"Not updated in {days_since_update} days (~{days_since_update//365} year) - low activity"
474
+ })
475
+ elif days_since_update <= 30:
476
+ trust_indicators.append({
477
+ "category": "maintenance",
478
+ "indicator": f"Recently updated ({days_since_update} days ago) - actively maintained"
479
+ })
480
+ except Exception as e:
481
+ logger.debug(f"Failed to parse last_modified: {e}")
482
+
483
+ # ========================================================================
484
+ # ANALYZE PACKAGE AGE
485
+ # ========================================================================
486
+ if first_submitted:
487
+ try:
488
+ # Handle both timestamp formats
489
+ if isinstance(first_submitted, str):
490
+ first_submit_date = datetime.strptime(first_submitted.split()[0], "%Y-%m-%d")
491
+ else:
492
+ first_submit_date = datetime.fromtimestamp(first_submitted)
493
+
494
+ package_age_days = (datetime.now() - first_submit_date).days
495
+
496
+ if package_age_days < 7:
497
+ risk_factors.append({
498
+ "category": "age",
499
+ "severity": "HIGH",
500
+ "issue": f"Very new package ({package_age_days} days old) - needs community review time"
501
+ })
502
+ elif package_age_days < 30:
503
+ risk_factors.append({
504
+ "category": "age",
505
+ "severity": "MEDIUM",
506
+ "issue": f"New package ({package_age_days} days old) - limited track record"
507
+ })
508
+ elif package_age_days >= 365:
509
+ trust_indicators.append({
510
+ "category": "age",
511
+ "indicator": f"Mature package ({package_age_days//365}+ years old) - established track record"
512
+ })
513
+ except Exception as e:
514
+ logger.debug(f"Failed to parse first_submitted: {e}")
515
+
516
+ # ========================================================================
517
+ # CALCULATE TRUST SCORE
518
+ # ========================================================================
519
+ # Start with base score of 50
520
+ trust_score = 50
521
+
522
+ # Adjust based on votes (max +30)
523
+ if votes >= 100:
524
+ trust_score += 30
525
+ elif votes >= 50:
526
+ trust_score += 20
527
+ elif votes >= 20:
528
+ trust_score += 10
529
+ elif votes >= 5:
530
+ trust_score += 5
531
+ elif votes == 0:
532
+ trust_score -= 20
533
+
534
+ # Adjust based on popularity (max +10)
535
+ if popularity >= 5.0:
536
+ trust_score += 10
537
+ elif popularity >= 1.0:
538
+ trust_score += 5
539
+ elif popularity < 0.001:
540
+ trust_score -= 10
541
+
542
+ # Penalties for risk factors
543
+ for risk in risk_factors:
544
+ if risk["severity"] == "CRITICAL":
545
+ trust_score -= 30
546
+ elif risk["severity"] == "HIGH":
547
+ trust_score -= 15
548
+ elif risk["severity"] == "MEDIUM":
549
+ trust_score -= 10
550
+
551
+ # Clamp between 0 and 100
552
+ trust_score = max(0, min(100, trust_score))
553
+
554
+ # ========================================================================
555
+ # GENERATE RECOMMENDATION
556
+ # ========================================================================
557
+ if trust_score >= 70:
558
+ recommendation = "✅ TRUSTED - Package has good community validation and maintenance"
559
+ elif trust_score >= 50:
560
+ recommendation = "⚠️ MODERATE TRUST - Package is acceptable but verify PKGBUILD carefully"
561
+ elif trust_score >= 30:
562
+ recommendation = "⚠️ LOW TRUST - Package has significant risk factors, extra caution needed"
563
+ else:
564
+ recommendation = "❌ UNTRUSTED - Package has critical trust issues, avoid unless necessary"
565
+
566
+ logger.info(f"Package metadata analysis: trust_score={trust_score}, "
567
+ f"{len(risk_factors)} risk factors, {len(trust_indicators)} trust indicators")
568
+
569
+ return {
570
+ "trust_score": trust_score,
571
+ "risk_factors": risk_factors,
572
+ "trust_indicators": trust_indicators,
573
+ "recommendation": recommendation,
574
+ "summary": {
575
+ "votes": votes,
576
+ "popularity": round(popularity, 4),
577
+ "is_orphaned": not maintainer or maintainer == "None",
578
+ "is_out_of_date": bool(out_of_date),
579
+ "total_risk_factors": len(risk_factors),
580
+ "total_trust_indicators": len(trust_indicators)
581
+ }
582
+ }
583
+
584
+
585
+ def _apply_smart_ranking(
586
+ packages: List[Dict[str, Any]],
587
+ query: str,
588
+ sort_by: str
589
+ ) -> List[Dict[str, Any]]:
590
+ """
591
+ Apply smart ranking to AUR search results.
592
+
593
+ Sorting methods:
594
+ - relevance: Name match priority, then by votes and popularity
595
+ - votes: Sort by number of votes (most popular first)
596
+ - popularity: Sort by AUR popularity metric
597
+ - modified: Sort by last modification date (most recent first)
598
+
599
+ Args:
600
+ packages: List of package dicts from AUR RPC
601
+ query: Original search query for relevance scoring
602
+ sort_by: Sorting method
603
+
604
+ Returns:
605
+ Sorted list of packages
606
+ """
607
+ if not packages:
608
+ return packages
609
+
610
+ query_lower = query.lower()
611
+
612
+ # Relevance scoring: prioritize exact name matches, then partial matches
613
+ if sort_by == "relevance":
614
+ def relevance_score(pkg: Dict[str, Any]) -> tuple:
615
+ name = pkg.get("Name", "").lower()
616
+ votes = pkg.get("NumVotes", 0)
617
+ popularity = pkg.get("Popularity", 0.0)
618
+
619
+ # Scoring priority (negative for reverse sort):
620
+ # 1. Exact name match (highest priority)
621
+ # 2. Name starts with query
622
+ # 3. Name contains query
623
+ # 4. Then by votes and popularity
624
+ exact_match = -1 if name == query_lower else 0
625
+ starts_with = -1 if name.startswith(query_lower) else 0
626
+ contains = -1 if query_lower in name else 0
627
+
628
+ return (exact_match, starts_with, contains, -votes, -popularity)
629
+
630
+ return sorted(packages, key=relevance_score)
631
+
632
+ elif sort_by == "votes":
633
+ return sorted(packages, key=lambda p: p.get("NumVotes", 0), reverse=True)
634
+
635
+ elif sort_by == "popularity":
636
+ return sorted(packages, key=lambda p: p.get("Popularity", 0.0), reverse=True)
637
+
638
+ elif sort_by == "modified":
639
+ return sorted(packages, key=lambda p: p.get("LastModified", 0), reverse=True)
640
+
641
+ else:
642
+ # Default to relevance if unknown sort method
643
+ logger.warning(f"Unknown sort method: {sort_by}, using relevance")
644
+ return _apply_smart_ranking(packages, query, "relevance")
645
+
646
+
647
+ async def install_package_secure(package_name: str) -> Dict[str, Any]:
648
+ """
649
+ Install a package with comprehensive security checks.
650
+
651
+ Workflow:
652
+ 1. Check if package exists in official repos first (safer)
653
+ 2. For AUR packages:
654
+ a. Fetch package metadata and analyze trust
655
+ b. Fetch and analyze PKGBUILD for security issues
656
+ c. Only proceed if security checks pass
657
+ 3. Check for AUR helper availability (paru > yay)
658
+ 4. Install with --noconfirm if all checks pass
659
+
660
+ Args:
661
+ package_name: Package name to install
662
+
663
+ Returns:
664
+ Dict with installation status and security analysis
665
+ """
666
+ logger.info(f"Starting secure installation workflow for: {package_name}")
667
+
668
+ # Only supported on Arch Linux
669
+ if not IS_ARCH:
670
+ return create_error_response(
671
+ "NotSupported",
672
+ "Package installation is only supported on Arch Linux systems",
673
+ "This server is not running on Arch Linux"
674
+ )
675
+
676
+ result = {
677
+ "package": package_name,
678
+ "installed": False,
679
+ "security_checks": {},
680
+ "messages": []
681
+ }
682
+
683
+ # ========================================================================
684
+ # STEP 0: Verify sudo is configured properly
685
+ # ========================================================================
686
+ logger.info("[STEP 0/5] Verifying sudo configuration...")
687
+
688
+ # Test if sudo password is cached or passwordless sudo is configured
689
+ # Use skip_sudo_check=True to avoid recursive check
690
+ test_exit_code, _, test_stderr = await run_command(
691
+ ["sudo", "-n", "true"],
692
+ timeout=5,
693
+ check=False,
694
+ skip_sudo_check=True
695
+ )
696
+
697
+ if test_exit_code != 0:
698
+ result["messages"].append("⚠️ SUDO PASSWORD REQUIRED")
699
+ result["messages"].append("")
700
+ result["messages"].append("Package installation requires sudo privileges.")
701
+ result["messages"].append("Please choose one of these options:")
702
+ result["messages"].append("")
703
+ result["messages"].append("Option 1: Configure passwordless sudo for pacman:")
704
+ result["messages"].append(" sudo visudo -f /etc/sudoers.d/arch-package-install")
705
+ result["messages"].append(" Add: your_username ALL=(ALL) NOPASSWD: /usr/bin/pacman")
706
+ result["messages"].append("")
707
+ result["messages"].append("Option 2: Cache sudo password temporarily:")
708
+ result["messages"].append(" Run: sudo -v")
709
+ result["messages"].append(" Then retry the installation")
710
+ result["messages"].append("")
711
+ result["messages"].append("Option 3: Install manually in terminal:")
712
+ result["messages"].append(f" sudo pacman -S {package_name}")
713
+ result["security_checks"]["decision"] = "SUDO_REQUIRED"
714
+ return result
715
+
716
+ result["messages"].append("✅ Sudo privileges verified")
717
+
718
+ # ========================================================================
719
+ # STEP 1: Check if package is in official repos first
720
+ # ========================================================================
721
+ logger.info(f"[STEP 1/5] Checking if '{package_name}' is in official repos...")
722
+ result["messages"].append("🔍 Checking official repositories first...")
723
+
724
+ from .pacman import get_official_package_info
725
+ official_pkg = await get_official_package_info(package_name)
726
+
727
+ # If found in official repos, install directly with pacman
728
+ if not official_pkg.get("error"):
729
+ logger.info(f"Package '{package_name}' found in official repos - installing via pacman")
730
+ result["messages"].append(f"✅ Package found in official repository: {official_pkg.get('repository', 'unknown')}")
731
+ result["is_official"] = True
732
+ result["security_checks"]["source"] = "official_repository"
733
+ result["security_checks"]["risk_level"] = "LOW"
734
+ result["security_checks"]["recommendation"] = "✅ SAFE - Official repository package"
735
+
736
+ # Install using sudo pacman -S --noconfirm
737
+ try:
738
+ result["messages"].append("📦 Installing from official repository...")
739
+ exit_code, stdout, stderr = await run_command(
740
+ ["sudo", "pacman", "-S", "--noconfirm", package_name],
741
+ timeout=300, # 5 minutes for installation
742
+ check=False
743
+ )
744
+
745
+ if exit_code == 0:
746
+ result["installed"] = True
747
+ result["messages"].append(f"✅ Successfully installed {package_name} from official repository")
748
+ logger.info(f"Successfully installed official package: {package_name}")
749
+ else:
750
+ result["messages"].append(f"❌ Installation failed: {stderr}")
751
+ logger.error(f"pacman installation failed: {stderr}")
752
+
753
+ # Check for sudo password issues
754
+ if "password" in stderr.lower() or "sudo" in stderr.lower():
755
+ result["messages"].append("")
756
+ result["messages"].append("⚠️ SUDO PASSWORD REQUIRED")
757
+ result["messages"].append("To enable passwordless installation, run one of these commands:")
758
+ result["messages"].append("1. For passwordless sudo (less secure):")
759
+ result["messages"].append(" sudo visudo -f /etc/sudoers.d/arch-package-install")
760
+ result["messages"].append(" Add: your_username ALL=(ALL) NOPASSWD: /usr/bin/pacman")
761
+ result["messages"].append("2. Or run the installation manually in your terminal:")
762
+ result["messages"].append(f" sudo pacman -S {package_name}")
763
+
764
+ result["install_output"] = stdout
765
+ result["install_errors"] = stderr
766
+
767
+ return result
768
+
769
+ except Exception as e:
770
+ logger.error(f"Installation failed: {e}")
771
+ return create_error_response(
772
+ "InstallError",
773
+ f"Failed to install official package: {str(e)}"
774
+ )
775
+
776
+ # ========================================================================
777
+ # STEP 2: Package is in AUR - fetch and analyze metadata
778
+ # ========================================================================
779
+ logger.info(f"[STEP 2/5] Package not in official repos - checking AUR...")
780
+ result["messages"].append("⚠️ Package not in official repos - checking AUR...")
781
+ result["is_official"] = False
782
+
783
+ # Search AUR for package
784
+ aur_info = await get_aur_info(package_name)
785
+
786
+ if aur_info.get("error"):
787
+ return create_error_response(
788
+ "NotFound",
789
+ f"Package '{package_name}' not found in official repos or AUR"
790
+ )
791
+
792
+ # Extract actual package data (may be wrapped in warning)
793
+ pkg_data = aur_info.get("data", aur_info)
794
+ result["messages"].append(f"📦 Found in AUR: {pkg_data.get('name')} v{pkg_data.get('version')}")
795
+
796
+ # Analyze package metadata for trust
797
+ logger.info(f"[STEP 3/5] Analyzing package metadata for trust indicators...")
798
+ result["messages"].append("🔍 Analyzing package metadata (votes, maintainer, age)...")
799
+
800
+ metadata_analysis = analyze_package_metadata_risk(pkg_data)
801
+ result["security_checks"]["metadata_analysis"] = metadata_analysis
802
+ result["messages"].append(f"📊 Trust Score: {metadata_analysis['trust_score']}/100")
803
+ result["messages"].append(f" {metadata_analysis['recommendation']}")
804
+
805
+ # ========================================================================
806
+ # STEP 3: Fetch and analyze PKGBUILD
807
+ # ========================================================================
808
+ logger.info(f"[STEP 4/5] Fetching and analyzing PKGBUILD for security issues...")
809
+ result["messages"].append("🔍 Fetching PKGBUILD for security analysis...")
810
+
811
+ try:
812
+ pkgbuild_content = await get_pkgbuild(package_name)
813
+ result["messages"].append(f"✅ PKGBUILD fetched ({len(pkgbuild_content)} bytes)")
814
+
815
+ # Analyze PKGBUILD for security issues
816
+ result["messages"].append("🛡️ Analyzing PKGBUILD for security threats...")
817
+ pkgbuild_analysis = analyze_pkgbuild_safety(pkgbuild_content)
818
+ result["security_checks"]["pkgbuild_analysis"] = pkgbuild_analysis
819
+ result["messages"].append(f"🛡️ Risk Score: {pkgbuild_analysis['risk_score']}/100")
820
+ result["messages"].append(f" {pkgbuild_analysis['recommendation']}")
821
+
822
+ # Log findings
823
+ if pkgbuild_analysis["red_flags"]:
824
+ result["messages"].append(f" 🚨 {len(pkgbuild_analysis['red_flags'])} CRITICAL issues found!")
825
+ for flag in pkgbuild_analysis["red_flags"][:3]: # Show first 3
826
+ result["messages"].append(f" - Line {flag['line']}: {flag['issue']}")
827
+
828
+ if pkgbuild_analysis["warnings"]:
829
+ result["messages"].append(f" ⚠️ {len(pkgbuild_analysis['warnings'])} warnings found")
830
+
831
+ # Check if package is safe to install
832
+ if not pkgbuild_analysis["safe"]:
833
+ result["messages"].append("❌ INSTALLATION BLOCKED - Security analysis failed")
834
+ result["messages"].append(" Package has critical security issues and will NOT be installed")
835
+ result["security_checks"]["decision"] = "BLOCKED"
836
+ result["security_checks"]["reason"] = "Critical security issues detected in PKGBUILD"
837
+ logger.warning(f"Installation blocked for {package_name} due to security issues")
838
+ return result
839
+
840
+ # Additional check for high-risk warnings
841
+ if len(pkgbuild_analysis["warnings"]) >= 5:
842
+ result["messages"].append("⚠️ HIGH RISK - Multiple suspicious patterns detected")
843
+ result["messages"].append(" Manual review recommended before installation")
844
+ result["security_checks"]["decision"] = "REVIEW_RECOMMENDED"
845
+
846
+ except ValueError as e:
847
+ logger.error(f"Failed to fetch PKGBUILD: {e}")
848
+ return create_error_response(
849
+ "FetchError",
850
+ f"Failed to fetch PKGBUILD for security analysis: {str(e)}"
851
+ )
852
+
853
+ # ========================================================================
854
+ # STEP 4: Check for AUR helper
855
+ # ========================================================================
856
+ logger.info(f"[STEP 5/5] Checking for AUR helper (paru/yay)...")
857
+ result["messages"].append("🔧 Checking for AUR helper...")
858
+
859
+ aur_helper = get_aur_helper()
860
+
861
+ if not aur_helper:
862
+ result["messages"].append("❌ No AUR helper found (paru or yay)")
863
+ result["messages"].append(" Please install an AUR helper:")
864
+ result["messages"].append(" - Recommended: paru (pacman -S paru)")
865
+ result["messages"].append(" - Alternative: yay")
866
+ result["security_checks"]["decision"] = "NO_HELPER"
867
+ return result
868
+
869
+ result["messages"].append(f"✅ Using AUR helper: {aur_helper}")
870
+ result["aur_helper"] = aur_helper
871
+
872
+ # ========================================================================
873
+ # STEP 5: Install package with AUR helper
874
+ # ========================================================================
875
+ result["messages"].append(f"📦 Installing {package_name} via {aur_helper} (no confirmation)...")
876
+ logger.info(f"Installing AUR package {package_name} with {aur_helper}")
877
+
878
+ try:
879
+ # Install with --noconfirm flag
880
+ exit_code, stdout, stderr = await run_command(
881
+ [aur_helper, "-S", "--noconfirm", package_name],
882
+ timeout=600, # 10 minutes for AUR package build
883
+ check=False
884
+ )
885
+
886
+ if exit_code == 0:
887
+ result["installed"] = True
888
+ result["messages"].append(f"✅ Successfully installed {package_name} from AUR")
889
+ result["security_checks"]["decision"] = "INSTALLED"
890
+ logger.info(f"Successfully installed AUR package: {package_name}")
891
+ else:
892
+ result["messages"].append(f"❌ Installation failed with exit code {exit_code}")
893
+ result["messages"].append(f" Error: {stderr}")
894
+ result["security_checks"]["decision"] = "INSTALL_FAILED"
895
+ logger.error(f"AUR installation failed for {package_name}: {stderr}")
896
+
897
+ # Check for sudo password issues
898
+ if "password" in stderr.lower() or "sudo" in stderr.lower():
899
+ result["messages"].append("")
900
+ result["messages"].append("⚠️ SUDO PASSWORD REQUIRED")
901
+ result["messages"].append("To enable passwordless installation for AUR packages:")
902
+ result["messages"].append("1. For passwordless sudo for pacman:")
903
+ result["messages"].append(" sudo visudo -f /etc/sudoers.d/arch-aur-install")
904
+ result["messages"].append(" Add: your_username ALL=(ALL) NOPASSWD: /usr/bin/pacman")
905
+ result["messages"].append("2. Or run the installation manually in your terminal:")
906
+ result["messages"].append(f" {aur_helper} -S {package_name}")
907
+
908
+ result["install_output"] = stdout
909
+ result["install_errors"] = stderr
910
+
911
+ except Exception as e:
912
+ logger.error(f"Installation failed: {e}")
913
+ result["messages"].append(f"❌ Installation exception: {str(e)}")
914
+ result["security_checks"]["decision"] = "INSTALL_ERROR"
915
+
916
+ return result
917
+
918
+
919
+ def analyze_pkgbuild_safety(pkgbuild_content: str) -> Dict[str, Any]:
920
+ """
921
+ Perform comprehensive safety analysis on PKGBUILD content.
922
+
923
+ Checks for:
924
+ - Dangerous commands (rm -rf /, dd, fork bombs, etc.)
925
+ - Obfuscated code (base64, eval, encoding tricks)
926
+ - Network activity (reverse shells, data exfiltration)
927
+ - Binary downloads and execution
928
+ - Privilege escalation attempts
929
+ - Cryptocurrency mining patterns
930
+ - Source URL validation
931
+ - Suspicious file operations
932
+
933
+ Args:
934
+ pkgbuild_content: Raw PKGBUILD text
935
+
936
+ Returns:
937
+ Dict with detailed safety analysis results including:
938
+ - safe: boolean
939
+ - red_flags: critical security issues
940
+ - warnings: suspicious patterns
941
+ - info: informational notices
942
+ - risk_score: 0-100 (higher = more dangerous)
943
+ - recommendation: action recommendation
944
+ """
945
+ import re
946
+ from urllib.parse import urlparse
947
+
948
+ red_flags = [] # Critical security issues
949
+ warnings = [] # Suspicious but not necessarily malicious
950
+ info = [] # Informational notices
951
+
952
+ lines = pkgbuild_content.split('\n')
953
+ logger.debug(f"Analyzing PKGBUILD with {len(lines)} lines")
954
+
955
+ # ========================================================================
956
+ # CRITICAL PATTERNS - Definitely malicious
957
+ # ========================================================================
958
+ dangerous_patterns = [
959
+ # Destructive commands
960
+ (r"rm\s+-rf\s+/[^a-zA-Z]", "CRITICAL: rm -rf / or /something detected - system destruction"),
961
+ (r"\bdd\b.*if=/dev/(zero|random|urandom).*of=/dev/sd", "CRITICAL: dd overwriting disk detected"),
962
+ (r":\(\)\{.*:\|:.*\}", "CRITICAL: Fork bomb detected"),
963
+ (r"\bmkfs\.", "CRITICAL: Filesystem formatting detected"),
964
+ (r"fdisk.*-w", "CRITICAL: Partition table modification detected"),
965
+
966
+ # Reverse shells and backdoors
967
+ (r"/dev/tcp/\d+\.\d+\.\d+\.\d+/\d+", "CRITICAL: Reverse shell via /dev/tcp detected"),
968
+ (r"nc\s+-[^-]*e\s+/bin/(ba)?sh", "CRITICAL: Netcat reverse shell detected"),
969
+ (r"bash\s+-i\s+>&\s+/dev/tcp/", "CRITICAL: Interactive reverse shell detected"),
970
+ (r"python.*socket.*connect", "CRITICAL: Python socket connection (potential backdoor)"),
971
+ (r"perl.*socket.*connect", "CRITICAL: Perl socket connection (potential backdoor)"),
972
+
973
+ # Malicious downloads and execution
974
+ (r"curl[^|]*\|\s*(ba)?sh", "CRITICAL: Piping curl to shell (remote code execution)"),
975
+ (r"wget[^|]*\|\s*(ba)?sh", "CRITICAL: Piping wget to shell (remote code execution)"),
976
+ (r"curl.*-o.*&&.*chmod\s+\+x.*&&\s*\./", "CRITICAL: Download, make executable, and run pattern"),
977
+
978
+ # Crypto mining patterns
979
+ (r"xmrig|minerd|cpuminer|ccminer", "CRITICAL: Cryptocurrency miner detected"),
980
+ (r"stratum\+tcp://", "CRITICAL: Mining pool connection detected"),
981
+ (r"--donate-level", "CRITICAL: XMRig miner option detected"),
982
+
983
+ # Rootkit/malware installation
984
+ (r"chattr\s+\+i", "CRITICAL: Making files immutable (rootkit technique)"),
985
+ (r"/etc/ld\.so\.preload", "CRITICAL: LD_PRELOAD manipulation (rootkit technique)"),
986
+ (r"HISTFILE=/dev/null", "CRITICAL: History clearing (covering tracks)"),
987
+ ]
988
+
989
+ # ========================================================================
990
+ # SUSPICIOUS PATTERNS - Require careful review
991
+ # ========================================================================
992
+ suspicious_patterns = [
993
+ # Obfuscation techniques
994
+ (r"base64\s+-d", "Obfuscation: base64 decoding detected"),
995
+ (r"xxd\s+-r", "Obfuscation: hex decoding detected"),
996
+ (r"\beval\b", "Obfuscation: eval usage (can execute arbitrary code)"),
997
+ (r"\$\(.*base64.*\)", "Obfuscation: base64 in command substitution"),
998
+ (r"openssl\s+enc\s+-d", "Obfuscation: encrypted content decoding"),
999
+ (r"echo.*\|.*sh", "Obfuscation: piping echo to shell"),
1000
+ (r"printf.*\|.*sh", "Obfuscation: piping printf to shell"),
1001
+
1002
+ # Suspicious permissions and ownership
1003
+ (r"chmod\s+[0-7]*7[0-7]*7", "Dangerous: world-writable permissions"),
1004
+ (r"chown\s+root", "Suspicious: changing ownership to root"),
1005
+ (r"chmod\s+[u+]*s", "Suspicious: setuid/setgid (privilege escalation risk)"),
1006
+
1007
+ # Suspicious file operations
1008
+ (r"mktemp.*&&.*chmod", "Suspicious: temp file creation with permission change"),
1009
+ (r">/dev/null\s+2>&1", "Suspicious: suppressing all output (hiding activity)"),
1010
+ (r"nohup.*&", "Suspicious: background process that persists"),
1011
+
1012
+ # Network activity
1013
+ (r"curl.*-s.*-o", "Network: silent download detected"),
1014
+ (r"wget.*-q.*-O", "Network: quiet download detected"),
1015
+ (r"nc\s+-l", "Network: netcat listening mode (potential backdoor)"),
1016
+ (r"socat", "Network: socat usage (advanced networking tool)"),
1017
+ (r"ssh.*-R\s+\d+:", "Network: SSH reverse tunnel detected"),
1018
+
1019
+ # Data exfiltration
1020
+ (r"curl.*-X\s+POST.*--data", "Data exfiltration: HTTP POST with data"),
1021
+ (r"tar.*\|.*ssh", "Data exfiltration: tar over SSH"),
1022
+ (r"scp.*-r.*\*", "Data exfiltration: recursive SCP"),
1023
+
1024
+ # Systemd/init manipulation
1025
+ (r"systemctl.*enable.*\.service", "System: enabling systemd service"),
1026
+ (r"/etc/systemd/system/", "System: systemd unit file modification"),
1027
+ (r"update-rc\.d", "System: SysV init modification"),
1028
+ (r"@reboot", "System: cron job at reboot"),
1029
+
1030
+ # Kernel module manipulation
1031
+ (r"modprobe", "System: kernel module loading"),
1032
+ (r"insmod", "System: kernel module insertion"),
1033
+ (r"/lib/modules/", "System: kernel module directory access"),
1034
+
1035
+ # Compiler/build chain manipulation
1036
+ (r"gcc.*-fPIC.*-shared", "Build: creating shared library (could be malicious)"),
1037
+ (r"LD_PRELOAD=", "Build: LD_PRELOAD manipulation (function hijacking)"),
1038
+ ]
1039
+
1040
+ # ========================================================================
1041
+ # INFORMATIONAL PATTERNS - Good to know but not necessarily bad
1042
+ # ========================================================================
1043
+ info_patterns = [
1044
+ (r"sudo\s+", "Info: sudo usage detected"),
1045
+ (r"git\s+clone", "Info: git clone detected"),
1046
+ (r"make\s+install", "Info: make install detected"),
1047
+ (r"pip\s+install", "Info: pip install detected"),
1048
+ (r"npm\s+install", "Info: npm install detected"),
1049
+ (r"cargo\s+install", "Info: cargo install detected"),
1050
+ ]
1051
+
1052
+ # ========================================================================
1053
+ # SCAN PATTERNS LINE BY LINE
1054
+ # ========================================================================
1055
+ for i, line in enumerate(lines, 1):
1056
+ # Skip comments and empty lines for pattern matching
1057
+ stripped_line = line.strip()
1058
+ if stripped_line.startswith('#') or not stripped_line:
1059
+ continue
1060
+
1061
+ # Check dangerous patterns (red flags)
1062
+ for pattern, message in dangerous_patterns:
1063
+ if re.search(pattern, line, re.IGNORECASE):
1064
+ logger.warning(f"Red flag found at line {i}: {message}")
1065
+ red_flags.append({
1066
+ "line": i,
1067
+ "content": line.strip()[:100], # Limit length for output
1068
+ "issue": message,
1069
+ "severity": "CRITICAL"
1070
+ })
1071
+
1072
+ # Check suspicious patterns
1073
+ for pattern, message in suspicious_patterns:
1074
+ if re.search(pattern, line, re.IGNORECASE):
1075
+ logger.info(f"Warning found at line {i}: {message}")
1076
+ warnings.append({
1077
+ "line": i,
1078
+ "content": line.strip()[:100],
1079
+ "issue": message,
1080
+ "severity": "WARNING"
1081
+ })
1082
+
1083
+ # Check informational patterns
1084
+ for pattern, message in info_patterns:
1085
+ if re.search(pattern, line, re.IGNORECASE):
1086
+ info.append({
1087
+ "line": i,
1088
+ "content": line.strip()[:100],
1089
+ "issue": message,
1090
+ "severity": "INFO"
1091
+ })
1092
+
1093
+ # ========================================================================
1094
+ # ANALYZE SOURCE URLs
1095
+ # ========================================================================
1096
+ source_urls = re.findall(r'source=\([^)]+\)|source_\w+=\([^)]+\)', pkgbuild_content, re.MULTILINE)
1097
+ suspicious_domains = []
1098
+
1099
+ # Known suspicious TLDs and patterns
1100
+ suspicious_tlds = ['.tk', '.ml', '.ga', '.cf', '.gq', '.cn', '.ru']
1101
+ suspicious_url_patterns = [
1102
+ (r'bit\.ly|tinyurl|shorturl', "URL shortener (hides true destination)"),
1103
+ (r'pastebin|hastebin|paste\.ee', "Paste site (common for malware hosting)"),
1104
+ (r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', "Raw IP address (suspicious)"),
1105
+ ]
1106
+
1107
+ for source_block in source_urls:
1108
+ # Extract URLs from source array
1109
+ urls = re.findall(r'https?://[^\s\'"]+', source_block)
1110
+
1111
+ for url in urls:
1112
+ try:
1113
+ parsed = urlparse(url)
1114
+ domain = parsed.netloc.lower()
1115
+
1116
+ # Check for suspicious TLDs
1117
+ if any(domain.endswith(tld) for tld in suspicious_tlds):
1118
+ warnings.append({
1119
+ "line": 0,
1120
+ "content": url,
1121
+ "issue": f"Suspicious domain TLD: {domain}",
1122
+ "severity": "WARNING"
1123
+ })
1124
+ suspicious_domains.append(domain)
1125
+
1126
+ # Check for suspicious URL patterns
1127
+ for pattern, message in suspicious_url_patterns:
1128
+ if re.search(pattern, url, re.IGNORECASE):
1129
+ warnings.append({
1130
+ "line": 0,
1131
+ "content": url,
1132
+ "issue": message,
1133
+ "severity": "WARNING"
1134
+ })
1135
+ except Exception as e:
1136
+ logger.debug(f"Failed to parse URL {url}: {e}")
1137
+
1138
+ # ========================================================================
1139
+ # DETECT BINARY DOWNLOADS
1140
+ # ========================================================================
1141
+ binary_extensions = ['.bin', '.exe', '.AppImage', '.deb', '.rpm', '.jar', '.apk']
1142
+ for ext in binary_extensions:
1143
+ if ext in pkgbuild_content.lower():
1144
+ warnings.append({
1145
+ "line": 0,
1146
+ "content": "",
1147
+ "issue": f"Binary file type detected: {ext}",
1148
+ "severity": "WARNING"
1149
+ })
1150
+
1151
+ # ========================================================================
1152
+ # CALCULATE RISK SCORE
1153
+ # ========================================================================
1154
+ # Risk scoring: red_flags = 50 points each, warnings = 5 points each, cap at 100
1155
+ risk_score = min(100, (len(red_flags) * 50) + (len(warnings) * 5))
1156
+
1157
+ # ========================================================================
1158
+ # GENERATE RECOMMENDATION
1159
+ # ========================================================================
1160
+ if len(red_flags) > 0:
1161
+ recommendation = "❌ DANGEROUS - Critical security issues detected. DO NOT INSTALL."
1162
+ safe = False
1163
+ elif len(warnings) >= 5:
1164
+ recommendation = "⚠️ HIGH RISK - Multiple suspicious patterns detected. Review carefully before installing."
1165
+ safe = False
1166
+ elif len(warnings) > 0:
1167
+ recommendation = "⚠️ CAUTION - Some suspicious patterns detected. Manual review recommended."
1168
+ safe = True # Technically safe but needs review
1169
+ else:
1170
+ recommendation = "✅ SAFE - No critical issues detected. Standard review still recommended."
1171
+ safe = True
1172
+
1173
+ logger.info(f"PKGBUILD analysis complete: {len(red_flags)} red flags, {len(warnings)} warnings, risk score: {risk_score}")
1174
+
1175
+ return {
1176
+ "safe": safe,
1177
+ "red_flags": red_flags,
1178
+ "warnings": warnings,
1179
+ "info": info,
1180
+ "risk_score": risk_score,
1181
+ "suspicious_domains": list(set(suspicious_domains)),
1182
+ "recommendation": recommendation,
1183
+ "summary": {
1184
+ "total_red_flags": len(red_flags),
1185
+ "total_warnings": len(warnings),
1186
+ "total_info": len(info),
1187
+ "lines_analyzed": len(lines)
1188
+ }
1189
+ }
1190
+