patchops 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. lambdas/__init__.py +1 -0
  2. lambdas/__pycache__/__init__.cpython-311.pyc +0 -0
  3. lambdas/code_analyzer/__init__.py +1 -0
  4. lambdas/code_analyzer/__pycache__/__init__.cpython-311.pyc +0 -0
  5. lambdas/code_analyzer/__pycache__/handler.cpython-311.pyc +0 -0
  6. lambdas/code_analyzer/handler.py +87 -0
  7. lambdas/component_tester/__pycache__/handler.cpython-311.pyc +0 -0
  8. lambdas/component_tester/handler.py +47 -0
  9. lambdas/exploit_crafter/__init__.py +1 -0
  10. lambdas/exploit_crafter/__pycache__/__init__.cpython-311.pyc +0 -0
  11. lambdas/exploit_crafter/__pycache__/handler.cpython-311.pyc +0 -0
  12. lambdas/exploit_crafter/handler.py +111 -0
  13. lambdas/graph_builder/__pycache__/handler.cpython-311.pyc +0 -0
  14. lambdas/graph_builder/handler.py +52 -0
  15. lambdas/neighbor_resolver/__pycache__/handler.cpython-311.pyc +0 -0
  16. lambdas/neighbor_resolver/handler.py +23 -0
  17. lambdas/orchestrator/__init__.py +0 -0
  18. lambdas/orchestrator/handler.py +33 -0
  19. lambdas/patch_writer/__init__.py +0 -0
  20. lambdas/patch_writer/__pycache__/__init__.cpython-311.pyc +0 -0
  21. lambdas/patch_writer/__pycache__/handler.cpython-311.pyc +0 -0
  22. lambdas/patch_writer/handler.py +121 -0
  23. lambdas/pr_generator/__init__.py +0 -0
  24. lambdas/pr_generator/__pycache__/__init__.cpython-311.pyc +0 -0
  25. lambdas/pr_generator/__pycache__/handler.cpython-311.pyc +0 -0
  26. lambdas/pr_generator/__pycache__/template.cpython-311.pyc +0 -0
  27. lambdas/pr_generator/handler.py +126 -0
  28. lambdas/pr_generator/template.py +87 -0
  29. lambdas/requirements_checker/__pycache__/handler.cpython-311.pyc +0 -0
  30. lambdas/requirements_checker/handler.py +94 -0
  31. lambdas/security_reviewer/__init__.py +1 -0
  32. lambdas/security_reviewer/__pycache__/__init__.cpython-311.pyc +0 -0
  33. lambdas/security_reviewer/__pycache__/handler.cpython-311.pyc +0 -0
  34. lambdas/security_reviewer/handler.py +117 -0
  35. lambdas/shared/__init__.py +1 -0
  36. lambdas/shared/__pycache__/__init__.cpython-311.pyc +0 -0
  37. lambdas/shared/__pycache__/utils.cpython-311.pyc +0 -0
  38. lambdas/shared/utils.py +123 -0
  39. lambdas/system_tester/__pycache__/handler.cpython-311.pyc +0 -0
  40. lambdas/system_tester/handler.py +110 -0
  41. patchops/__init__.py +1 -0
  42. patchops/cli.py +68 -0
  43. patchops-1.0.0.dist-info/METADATA +17 -0
  44. patchops-1.0.0.dist-info/RECORD +47 -0
  45. patchops-1.0.0.dist-info/WHEEL +5 -0
  46. patchops-1.0.0.dist-info/entry_points.txt +2 -0
  47. patchops-1.0.0.dist-info/top_level.txt +2 -0
lambdas/__init__.py ADDED
@@ -0,0 +1 @@
1
+ # (empty — no content needed)
@@ -0,0 +1 @@
1
+ # (empty — no content needed)
@@ -0,0 +1,87 @@
1
+ import sys
2
+ import os
3
+
4
+ # Works both locally (from repo root) and in Lambda
5
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
6
+ os.path.abspath(__file__)
7
+ ))))
8
+ from lambdas.shared.utils import safe_call_llm_json
9
+
10
+
11
+ def handler(event, context=None):
12
+ """
13
+ Hard Boundary Code Analyzer Lambda Handler.
14
+ Strictly limited to Top 8 vulnerability categories with consolidation.
15
+ """
16
+ try:
17
+ if not event or "source_code" not in event:
18
+ return {"error": "No source_code provided"}
19
+
20
+ source_code = event.get("source_code", "")
21
+
22
+ # The Hard Boundary Vulnerability Scanning Prompt
23
+ prompt = f"""You are an Elite Static Application Security Testing (SAST) engine.
24
+ You are evaluating this Python Flask application.
25
+
26
+ SOURCE CODE:
27
+ ```python
28
+ {source_code}
29
+ ```
30
+
31
+ CRITICAL BOUNDARY DIRECTIVE:
32
+ You must perform an exhaustive sweep for EXACTLY these 8 vulnerability categories:
33
+
34
+ 1. Hardcoded Secrets (CWE-798)
35
+ 2. SQL Injection (CWE-89)
36
+ 3. Command Injection (CWE-78)
37
+ 4. Path Traversal (CWE-22)
38
+ 5. Cross-Site Scripting / XSS (CWE-79)
39
+ 6. Broken Access Control / IDOR (CWE-284)
40
+ 7. Unsafe Deserialization (CWE-502)
41
+ 8. Server-Side Request Forgery / SSRF (CWE-918)
42
+
43
+ RULES:
44
+
45
+ 1. You MUST return an array of exactly 8 objects, one for each category above.
46
+ 2. For each category, evaluate logically if it is genuinely present in the given code. Set "is_present" to true or false.
47
+ 3. If "is_present" is true, provide the severity, vulnerable_lines (exact match of the code), and an attack_vector explanation.
48
+ 4. If "is_present" is false, leave vulnerable_lines as an empty array and attack_vector as empty string.
49
+ 5. If the same vulnerability appears multiple times, CONSOLIDATE them into the single object.
50
+
51
+ STRICT JSON SCHEMA:
52
+ {{
53
+ "analysis": [
54
+ {{
55
+ "vulnerability_type": "<Type, e.g., SQL Injection>",
56
+ "cwe": "<CWE number, e.g., CWE-89>",
57
+ "is_present": <true/false boolean value only>,
58
+ "severity": "<CRITICAL/HIGH/MEDIUM/NONE>",
59
+ "vulnerable_lines": ["<exact string 1>", "<exact string 2>"],
60
+ "attack_vector": "<Comprehensive attack explanation>"
61
+ }}
62
+ ]
63
+ }}
64
+
65
+ Return ONLY the JSON object. Start with {{ and end with }}. No other text. Every field must be present."""
66
+
67
+ # Call LLM with consolidated findings
68
+ result = safe_call_llm_json(prompt, max_tokens=3000, retries=2)
69
+
70
+ # Validate and clean the response
71
+ if "error" in result:
72
+ return result
73
+
74
+ analysis = result.get("analysis", [])
75
+
76
+ # Filter to ensure we only return genuinely present vulnerabilities
77
+ validated_vulns = []
78
+ for vuln in analysis:
79
+ is_present = vuln.get("is_present")
80
+ if is_present is True or str(is_present).lower() == "true":
81
+ if all(key in vuln for key in ["vulnerability_type", "cwe", "severity", "vulnerable_lines", "attack_vector"]):
82
+ validated_vulns.append(vuln)
83
+
84
+ return {"vulnerabilities": validated_vulns}
85
+
86
+ except Exception as e:
87
+ return {"error": f"Code analyzer exception: {str(e)}"}
@@ -0,0 +1,47 @@
1
+ import json
2
+
3
+ # Mock LLM call for component testing
4
+ def safe_call_llm_json(prompt, max_tokens=1000):
5
+ # In a real scenario, this would call Groq/OpenAI
6
+ # For demo purposes, we'll simulate logic
7
+ if "auth.py" in prompt and "app.py" in prompt:
8
+ # If app.py is fixed (SQLi fixed), auth.py might need update or be fine
9
+ # Spec says auth.py has IDOR, but we're checking COMPATIBILITY
10
+ # Let's say if we change get_profile signature it breaks.
11
+ return {
12
+ "is_compatible": False,
13
+ "issues_found": ["Patched file changed get_profile call site logic"],
14
+ "suggested_fix": "import db_utils\nimport config\nimport utils\n\ndef login(username, password):\n return {\"status\": \"success\", \"token\": \"mock-token-123\"}\n\ndef get_profile(user_id):\n # FIXED IDOR\n return db_utils.safe_query(\"SELECT * FROM users WHERE id = ?\", (user_id,))[0] if db_utils.query_user(user_id) else None\n\ndef check_token(token):\n return token == \"mock-token-123\"\n"
15
+ }
16
+ return {
17
+ "is_compatible": True,
18
+ "issues_found": [],
19
+ "suggested_fix": ""
20
+ }
21
+
22
+ def handler(event, context):
23
+ patched_file_name = event.get("patched_file_name")
24
+ original_code = event.get("original_code")
25
+ patched_code = event.get("patched_code")
26
+ completed_fixes = event.get("completed_fixes", [])
27
+ neighbor_file_name = event.get("neighbor_file_name")
28
+ neighbor_code = event.get("neighbor_code")
29
+
30
+ COMPONENT_TEST_PROMPT = f"""
31
+ You are a code compatibility analyst.
32
+
33
+ PATCHED FILE: {patched_file_name}
34
+ CHANGES MADE:
35
+ {json.dumps(completed_fixes, indent=2)}
36
+
37
+ NEIGHBOR FILE: {neighbor_file_name}
38
+ {neighbor_code}
39
+
40
+ TASK: Does the neighbor file still work correctly given the changes made to the patched file?
41
+ Return ONLY JSON.
42
+ """
43
+
44
+ # In demo mode, we use the mock result
45
+ result = safe_call_llm_json(COMPONENT_TEST_PROMPT)
46
+ result["neighbor"] = neighbor_file_name
47
+ return result
@@ -0,0 +1 @@
1
+ # (empty — no content needed)
@@ -0,0 +1,111 @@
1
+ import sys
2
+ import os
3
+
4
+ # Works both locally (from repo root) and in Lambda
5
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
6
+ os.path.abspath(__file__)
7
+ ))))
8
+ from lambdas.shared.utils import safe_call_llm_json, call_llm, extract_code_block
9
+
10
+ EXPLOIT_PROMPT = """You are a red team security researcher writing a proof-of-concept exploit.
11
+
12
+ Vulnerability type: {vulnerability_type}
13
+ Attack vector: {attack_vector}
14
+ Vulnerable lines: {vulnerable_lines}
15
+
16
+ The target application is running at http://localhost:5000
17
+ The vulnerable endpoint is: GET /user?id=<INJECT_HERE>
18
+ The endpoint returns JSON in this format: {{"users": [[username, email, role], ...]}}
19
+
20
+ Write a Python exploit script with these EXACT requirements:
21
+
22
+ 1. First line must be: import requests
23
+ 2. Second line must be: import sys
24
+ 3. Use the requests params argument to pass the SQL payload, like this:
25
+ response = requests.get('http://localhost:5000/user',
26
+ params={{'id': '1 OR 1=1--'}},
27
+ timeout=10)
28
+ 4. Parse the JSON response with response.json()
29
+ 5. SUCCESS condition: the response contains MORE THAN 1 user in the "users" list
30
+ This proves the SQL injection worked by dumping all rows
31
+ 6. If more than 1 user returned: use print('EXPLOIT_SUCCESS') on its own line,
32
+ then print the full list of users extracted
33
+ 7. If only 1 or 0 users returned: use print('EXPLOIT_FAILED') on its own line
34
+ 8. Wrap everything in try/except Exception as e:
35
+ In the except block: print('EXPLOIT_FAILED') then print(str(e))
36
+ 9. Do NOT use sys.stderr — print everything to stdout
37
+ 10. Do NOT URL-encode the payload manually — pass raw SQL string to params dict
38
+ 11. NEVER use a variable for the success/failure strings — always print the literal strings directly
39
+
40
+ Return ONLY the raw Python script. No markdown. No explanation.
41
+ The script must start with exactly: import requests"""
42
+
43
+ def validate_exploit(code: str) -> list:
44
+ issues = []
45
+ if 'EXPLOIT_SUCCESS' not in code:
46
+ issues.append('missing EXPLOIT_SUCCESS string')
47
+ if 'import requests' not in code:
48
+ issues.append('missing requests import')
49
+ if 'import sys' not in code:
50
+ issues.append('missing sys import')
51
+ if 'localhost:5000' not in code and '127.0.0.1:5000' not in code:
52
+ issues.append('does not target localhost:5000')
53
+ if 'timeout' not in code:
54
+ issues.append('missing timeout parameter')
55
+ if 'try' not in code or 'except' not in code:
56
+ issues.append('missing try/except')
57
+ if 'params' not in code and 'OR' not in code and 'UNION' not in code:
58
+ issues.append('no SQL injection payload visible in code')
59
+ return issues
60
+
61
+
62
+ def handler(event, context=None):
63
+ try:
64
+ vulnerability_type = event.get("vulnerability_type")
65
+ attack_vector = event.get("attack_vector")
66
+
67
+ if not vulnerability_type or not attack_vector:
68
+ return {
69
+ "error": "Missing required fields: vulnerability_type or attack_vector",
70
+ "exploit_code": ""
71
+ }
72
+
73
+ prompt = EXPLOIT_PROMPT.format(
74
+ vulnerability_type=vulnerability_type,
75
+ attack_vector=attack_vector,
76
+ vulnerable_lines=event.get("vulnerable_lines", [])
77
+ )
78
+
79
+ # First attempt
80
+ response = call_llm(prompt, max_tokens=1500)
81
+ code = extract_code_block(response).strip()
82
+
83
+ if not code.startswith("import"):
84
+ code = "import requests\n" + code
85
+
86
+ issues = validate_exploit(code)
87
+
88
+ # Retry if issues found
89
+ if issues:
90
+ retry_prompt = prompt + "\n\nFix these problems in your script: " + ", ".join(issues)
91
+ response_retry = call_llm(retry_prompt, max_tokens=1500)
92
+ code_retry = extract_code_block(response_retry).strip()
93
+
94
+ if not code_retry.startswith("import"):
95
+ code_retry = "import requests\n" + code_retry
96
+
97
+ final_code = code_retry
98
+ final_issues = validate_exploit(final_code)
99
+
100
+ result = {"exploit_code": final_code}
101
+ if final_issues:
102
+ result["warnings"] = final_issues
103
+ return result
104
+
105
+ return {"exploit_code": code}
106
+
107
+ except Exception as e:
108
+ return {
109
+ "error": str(e),
110
+ "exploit_code": ""
111
+ }
@@ -0,0 +1,52 @@
1
+ import os
2
+ import re
3
+
4
+ def handler(event, context):
5
+ repo_path = event.get("repo_path", "PatchOps-Target")
6
+ nodes = []
7
+ edges = []
8
+ found_files = set()
9
+
10
+ for root, dirs, files in os.walk(repo_path):
11
+ for file in files:
12
+ if file.endswith(".py"):
13
+ rel_path = os.path.relpath(os.path.join(root, file), repo_path)
14
+ # Use forward slashes for IDs
15
+ file_id = rel_path.replace("\\", "/")
16
+ found_files.add(file_id.replace(".py", ""))
17
+
18
+ with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
19
+ lines = f.readlines()
20
+ nodes.append({
21
+ "id": file_id,
22
+ "path": os.path.join(root, file),
23
+ "lines": len(lines),
24
+ "status": "neutral"
25
+ })
26
+
27
+ # Regex for imports
28
+ import_pattern = re.compile(r'^(?:import\s+(\w+)|from\s+(\w+)\s+import)')
29
+
30
+ for node in nodes:
31
+ with open(node["path"], 'r', encoding='utf-8') as f:
32
+ content = f.read()
33
+ # Simple line-by-line regex for top-level imports
34
+ for line in content.splitlines():
35
+ match = import_pattern.match(line)
36
+ if match:
37
+ # Group 1 is 'import X', Group 2 is 'from X import Y'
38
+ imported_module = match.group(1) or match.group(2)
39
+ # Check if it's a file in our repo
40
+ if imported_module in found_files:
41
+ target_id = f"{imported_module}.py"
42
+ # Handle potential subdirectories in IDs if needed, but for now simple
43
+ edges.append({
44
+ "source": node["id"],
45
+ "target": target_id,
46
+ "type": "imports"
47
+ })
48
+ # Special case for subdirectories like 'tests/test_suite.py' importing 'app'
49
+ # The regex might only catch 'app'
50
+ # If we have 'import app', and app.py is in root, it works.
51
+
52
+ return {"nodes": nodes, "edges": edges}
@@ -0,0 +1,23 @@
1
+ def handler(event, context):
2
+ patched_file = event.get("patched_file")
3
+ graph = event.get("graph", {"nodes": [], "edges": []})
4
+
5
+ neighbors = set()
6
+ for edge in graph.get("edges", []):
7
+ if edge["source"] == patched_file:
8
+ neighbors.add(edge["target"])
9
+ if edge["target"] == patched_file:
10
+ neighbors.add(edge["source"])
11
+
12
+ all_nodes = {n["id"] for n in graph.get("nodes", [])}
13
+ excluded = list(all_nodes - neighbors - {patched_file})
14
+ neighbors = list(neighbors)
15
+
16
+ reasoning = f"{patched_file} directly connects to: {', '.join(neighbors)}. " \
17
+ f"{len(excluded)} files have no direct edge and are excluded."
18
+
19
+ return {
20
+ "neighbors": neighbors,
21
+ "excluded": excluded,
22
+ "reasoning": reasoning
23
+ }
File without changes
@@ -0,0 +1,33 @@
1
+ import json, boto3, uuid
2
+ from datetime import datetime
3
+
4
+ dynamo = boto3.resource("dynamodb", region_name="eu-north-1")
5
+ table = dynamo.Table("breachloop_pipeline")
6
+
7
+ def lambda_handler(event, context):
8
+ try:
9
+ body = event.get("body")
10
+
11
+ if isinstance(body, str):
12
+ body = json.loads(body)
13
+ elif body is None:
14
+ body = event
15
+
16
+ run_id = str(uuid.uuid4())
17
+
18
+ item = {
19
+ "run_id": run_id,
20
+ "status": "RECEIVED",
21
+ "timestamp": datetime.utcnow().isoformat(),
22
+ "input": body
23
+ }
24
+
25
+ table.put_item(Item=item)
26
+
27
+ return {
28
+ "statusCode": 200,
29
+ "body": json.dumps({"run_id": run_id, "status": "RECEIVED"})
30
+ }
31
+
32
+ except Exception as e:
33
+ return {"statusCode": 500, "body": json.dumps({"error": str(e)})}
File without changes
@@ -0,0 +1,121 @@
1
+ import sys
2
+ import os
3
+ import json
4
+ import difflib
5
+
6
+ # Works both locally (from repo root) and in Lambda
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
8
+ os.path.abspath(__file__)
9
+ ))))
10
+ from lambdas.shared.utils import safe_call_llm_json, extract_code_block
11
+
12
+
13
+ def lambda_handler(event, context=None):
14
+ """
15
+ Batch Patch Writer Lambda Handler.
16
+ Now with fuzzy matching and line-by-line fallback to ensure patches apply.
17
+ """
18
+ try:
19
+ source_code = event.get("source_code", "")
20
+ vulnerabilities = event.get("vulnerabilities", [])
21
+
22
+ if not source_code:
23
+ return {"error": "Missing source_code", "patched_code": "", "completed_fixes": []}
24
+ if not vulnerabilities:
25
+ return {"patched_code": source_code, "completed_fixes": []}
26
+
27
+ vulns_str = json.dumps(vulnerabilities, indent=2)
28
+
29
+ prompt = f"""You are an elite Application Security Engineer.
30
+ You must fix ALL vulnerabilities in the provide source code.
31
+
32
+ ORIGINAL CODE:
33
+ ```python
34
+ {source_code}
35
+ ```
36
+
37
+ VULNERABILITIES TO FIX:
38
+ {vulns_str}
39
+
40
+ CRITICAL RULES:
41
+ 1. USE SURGICAL SEARCH & REPLACE.
42
+ 2. The 'search' block must be a SIGNIFICANT chunk of the original code (e.g., entire function or large block) to ensure uniqueness.
43
+ 3. The 'search' block must match the original code EXACTLY, line for line.
44
+ 4. If you fix multiple issues in one file, group them into the 'fixes' array.
45
+
46
+ JSON SCHEMA:
47
+ {{
48
+ "fixes": [
49
+ {{
50
+ "vulnerability_type": "<type>",
51
+ "cwe": "<cwe>",
52
+ "changes_made": ["Fix 1", "Fix 2"],
53
+ "modifications": [
54
+ {{
55
+ "search": "<EXACT block from original source>",
56
+ "replace": "<Secure result>"
57
+ }}
58
+ ]
59
+ }}
60
+ ]
61
+ }}
62
+ """
63
+ batch_result = safe_call_llm_json(prompt, max_tokens=4000, retries=2)
64
+ current_code = source_code
65
+ completed_fixes = []
66
+
67
+ for fix in batch_result.get("fixes", []):
68
+ vuln_type = fix.get("vulnerability_type", "Unknown")
69
+ cwe = fix.get("cwe", "")
70
+ modifications = fix.get("modifications", [])
71
+
72
+ fix_successful = False
73
+ for mod in modifications:
74
+ search_text = mod.get("search", "").strip()
75
+ replace_text = mod.get("replace", "")
76
+
77
+ if not search_text: continue
78
+
79
+ # 1. Exact Match
80
+ if search_text in current_code:
81
+ current_code = current_code.replace(search_text, replace_text, 1)
82
+ fix_successful = True
83
+ else:
84
+ # 2. Fuzzy Match / Line-by-line fallback
85
+ # This is the "Root Cause" fix - being more flexible with hallucinated search strings
86
+ search_lines = [l.strip() for l in search_text.split('\n') if l.strip()]
87
+ source_lines = current_code.split('\n')
88
+
89
+ best_match_start = -1
90
+ best_match_score = 0
91
+
92
+ # Look for the block with highest overlap
93
+ for i in range(len(source_lines) - len(search_lines) + 1):
94
+ window = [l.strip() for l in source_lines[i : i+len(search_lines)] if l.strip()]
95
+ if not window: continue
96
+
97
+ score = difflib.SequenceMatcher(None, '\n'.join(search_lines), '\n'.join(window)).ratio()
98
+ if score > 0.85 and score > best_match_score:
99
+ best_match_score = score
100
+ best_match_start = i
101
+
102
+ if best_match_start != -1:
103
+ # Replace the block
104
+ new_lines = source_lines[:best_match_start] + [replace_text] + source_lines[best_match_start + len(search_lines):]
105
+ current_code = '\n'.join(new_lines)
106
+ fix_successful = True
107
+ else:
108
+ print(f"⚠️ WARNING: Could not apply patch for {vuln_type} - Search string not found (even with 85% fuzzy match).")
109
+
110
+ if fix_successful:
111
+ completed_fixes.append({"type": vuln_type, "cwe": cwe, "changes_made": fix.get("changes_made", [])})
112
+
113
+ return {
114
+ "patched_code": current_code,
115
+ "completed_fixes": completed_fixes,
116
+ "total_vulnerabilities": len(vulnerabilities),
117
+ "successful_patches": len(completed_fixes)
118
+ }
119
+
120
+ except Exception as e:
121
+ return {"error": f"Exception: {str(e)}", "patched_code": "", "completed_fixes": []}
File without changes
@@ -0,0 +1,126 @@
1
+ import sys
2
+ import os
3
+ import json
4
+ import uuid
5
+
6
+ # Works both locally (from repo root) and in Lambda
7
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
8
+ os.path.abspath(__file__)
9
+ ))))
10
+
11
+ from github import Github, GithubException
12
+ from .template import generate_pr_body
13
+
14
+
15
+ def lambda_handler(event, context=None):
16
+ """
17
+ PR Generator Lambda Handler.
18
+ Creates a GitHub Pull Request with the approved security patches.
19
+ """
20
+ try:
21
+ # Extract input fields
22
+ repo_full_name = event.get("repo_full_name", "")
23
+ file_path = event.get("file_path", "")
24
+ final_patch = event.get("final_patch", "")
25
+ fixed_vulnerabilities = event.get("fixed_vulnerabilities", [])
26
+
27
+ # NEW: Extra audit results
28
+ req_check_result = event.get("req_check_result")
29
+ test_results = event.get("test_results", [])
30
+
31
+ print(f"PR_GENERATOR: Targeting repo {repo_full_name}, file {file_path}")
32
+
33
+ # Validate required fields
34
+ if not all([repo_full_name, file_path, final_patch]):
35
+ return {
36
+ "status": "ERROR",
37
+ "pr_url": "",
38
+ "branch_name": "",
39
+ "error_message": f"Missing required fields. Received: repo={repo_full_name}, path={file_path}"
40
+ }
41
+
42
+ # Fetch GitHub token from environment
43
+ github_token = os.environ.get("GITHUB_TOKEN")
44
+ if not github_token:
45
+ return {
46
+ "status": "ERROR",
47
+ "pr_url": "",
48
+ "branch_name": "",
49
+ "error_message": "GITHUB_TOKEN environment variable not set"
50
+ }
51
+
52
+ # Initialize GitHub client
53
+ g = Github(github_token)
54
+
55
+ # Get the repository
56
+ try:
57
+ repo = g.get_repo(repo_full_name)
58
+ except GithubException as ge:
59
+ if ge.status == 404:
60
+ return {"status": "ERROR", "error_message": f"Repository '{repo_full_name}' not found. Check name and token permissions."}
61
+ raise ge
62
+
63
+ # Get the default branch
64
+ default_branch = repo.default_branch
65
+ main_ref = repo.get_git_ref(f"heads/{default_branch}")
66
+ main_sha = main_ref.object.sha
67
+
68
+ # Generate unique branch name
69
+ branch_name = f"patchops-fix-{uuid.uuid4().hex[:8]}"
70
+
71
+ # Create new branch from main
72
+ repo.create_git_ref(ref=f"refs/heads/{branch_name}", sha=main_sha)
73
+
74
+ # Get the current file's SHA
75
+ try:
76
+ file_contents = repo.get_contents(file_path, ref=default_branch)
77
+ current_file_sha = file_contents.sha
78
+ except GithubException as ge:
79
+ if ge.status == 404:
80
+ return {"status": "ERROR", "error_message": f"File '{file_path}' not found in repo '{repo_full_name}'."}
81
+ raise ge
82
+
83
+ # Update the file
84
+ vulnerability_summary = f"{len(fixed_vulnerabilities)} vulnerabilities" if fixed_vulnerabilities else "security fixes"
85
+ commit_message = f"Auto-patch: Fix {vulnerability_summary}"
86
+ repo.update_file(
87
+ path=file_path,
88
+ message=commit_message,
89
+ content=final_patch,
90
+ sha=current_file_sha,
91
+ branch=branch_name
92
+ )
93
+
94
+ # Generate the PR body markdown - PASS NEW RESULTS
95
+ pr_body = generate_pr_body(
96
+ fixed_vulnerabilities_list=fixed_vulnerabilities,
97
+ req_check_result=req_check_result,
98
+ test_results=test_results
99
+ )
100
+
101
+ # Create the Pull Request title
102
+ pr_title = "🔒 Security Patch: Autonomous Remediation & Verification"
103
+ if fixed_vulnerabilities and fixed_vulnerabilities[0].get('vulnerability_type') != "Dependency Consistency Fix":
104
+ pr_title = f"🔒 Security Patch: {len(fixed_vulnerabilities)} Vulnerabilities Fixed"
105
+
106
+ # Create the Pull Request
107
+ pr = repo.create_pull(
108
+ title=pr_title,
109
+ body=pr_body,
110
+ head=branch_name,
111
+ base=default_branch
112
+ )
113
+
114
+ return {
115
+ "status": "SUCCESS",
116
+ "pr_url": pr.html_url,
117
+ "branch_name": branch_name
118
+ }
119
+
120
+ except GithubException as e:
121
+ error_msg = f"GitHub API error: {e.status} {e.data.get('message', str(e))}"
122
+ return {"status": "ERROR", "pr_url": "", "branch_name": "", "error_message": error_msg}
123
+
124
+ except Exception as e:
125
+ error_msg = f"PR generation exception: {str(e)}"
126
+ return {"status": "ERROR", "pr_url": "", "branch_name": "", "error_message": error_msg}