agentops-cockpit 0.9.5__py3-none-any.whl → 0.9.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_ops_cockpit/agent.py +44 -77
- agent_ops_cockpit/cache/semantic_cache.py +10 -21
- agent_ops_cockpit/cli/main.py +105 -153
- agent_ops_cockpit/eval/load_test.py +33 -50
- agent_ops_cockpit/eval/quality_climber.py +88 -93
- agent_ops_cockpit/eval/red_team.py +84 -25
- agent_ops_cockpit/mcp_server.py +26 -93
- agent_ops_cockpit/ops/arch_review.py +221 -147
- agent_ops_cockpit/ops/auditors/base.py +50 -0
- agent_ops_cockpit/ops/auditors/behavioral.py +31 -0
- agent_ops_cockpit/ops/auditors/compliance.py +35 -0
- agent_ops_cockpit/ops/auditors/dependency.py +48 -0
- agent_ops_cockpit/ops/auditors/finops.py +48 -0
- agent_ops_cockpit/ops/auditors/graph.py +49 -0
- agent_ops_cockpit/ops/auditors/pivot.py +51 -0
- agent_ops_cockpit/ops/auditors/reasoning.py +67 -0
- agent_ops_cockpit/ops/auditors/reliability.py +53 -0
- agent_ops_cockpit/ops/auditors/security.py +87 -0
- agent_ops_cockpit/ops/auditors/sme_v12.py +76 -0
- agent_ops_cockpit/ops/auditors/sovereignty.py +74 -0
- agent_ops_cockpit/ops/auditors/sre_a2a.py +179 -0
- agent_ops_cockpit/ops/benchmarker.py +97 -0
- agent_ops_cockpit/ops/cost_optimizer.py +15 -24
- agent_ops_cockpit/ops/discovery.py +214 -0
- agent_ops_cockpit/ops/evidence_bridge.py +30 -63
- agent_ops_cockpit/ops/frameworks.py +124 -1
- agent_ops_cockpit/ops/git_portal.py +74 -0
- agent_ops_cockpit/ops/mcp_hub.py +19 -42
- agent_ops_cockpit/ops/orchestrator.py +477 -277
- agent_ops_cockpit/ops/policy_engine.py +38 -38
- agent_ops_cockpit/ops/reliability.py +121 -52
- agent_ops_cockpit/ops/remediator.py +54 -0
- agent_ops_cockpit/ops/secret_scanner.py +34 -22
- agent_ops_cockpit/ops/swarm.py +17 -27
- agent_ops_cockpit/ops/ui_auditor.py +67 -6
- agent_ops_cockpit/ops/watcher.py +41 -70
- agent_ops_cockpit/ops/watchlist.json +30 -0
- agent_ops_cockpit/optimizer.py +161 -384
- agent_ops_cockpit/tests/test_arch_review.py +6 -6
- agent_ops_cockpit/tests/test_discovery.py +96 -0
- agent_ops_cockpit/tests/test_ops_core.py +56 -0
- agent_ops_cockpit/tests/test_orchestrator_fleet.py +73 -0
- agent_ops_cockpit/tests/test_persona_architect.py +75 -0
- agent_ops_cockpit/tests/test_persona_finops.py +31 -0
- agent_ops_cockpit/tests/test_persona_security.py +55 -0
- agent_ops_cockpit/tests/test_persona_sre.py +43 -0
- agent_ops_cockpit/tests/test_persona_ux.py +42 -0
- agent_ops_cockpit/tests/test_quality_climber.py +2 -2
- agent_ops_cockpit/tests/test_remediator.py +75 -0
- agent_ops_cockpit/tests/test_ui_auditor.py +52 -0
- agentops_cockpit-0.9.8.dist-info/METADATA +172 -0
- agentops_cockpit-0.9.8.dist-info/RECORD +71 -0
- agent_ops_cockpit/tests/test_optimizer.py +0 -68
- agent_ops_cockpit/tests/test_red_team.py +0 -35
- agent_ops_cockpit/tests/test_secret_scanner.py +0 -24
- agentops_cockpit-0.9.5.dist-info/METADATA +0 -246
- agentops_cockpit-0.9.5.dist-info/RECORD +0 -47
- {agentops_cockpit-0.9.5.dist-info → agentops_cockpit-0.9.8.dist-info}/WHEEL +0 -0
- {agentops_cockpit-0.9.5.dist-info → agentops_cockpit-0.9.8.dist-info}/entry_points.txt +0 -0
- {agentops_cockpit-0.9.5.dist-info → agentops_cockpit-0.9.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,12 +1,27 @@
|
|
|
1
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
2
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
3
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
4
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
5
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
6
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
7
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
8
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
9
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
10
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
11
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
12
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
13
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
14
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
15
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
1
16
|
import json
|
|
2
17
|
import os
|
|
3
18
|
import re
|
|
4
19
|
from typing import Dict, Any
|
|
5
20
|
from rich.console import Console
|
|
6
|
-
|
|
7
21
|
console = Console()
|
|
8
22
|
|
|
9
23
|
class PolicyViolation(Exception):
|
|
24
|
+
|
|
10
25
|
def __init__(self, category: str, message: str):
|
|
11
26
|
self.category = category
|
|
12
27
|
self.message = message
|
|
@@ -17,69 +32,54 @@ class GuardrailPolicyEngine:
|
|
|
17
32
|
Enforces declarative guardrails and cost policies as defined in policies.json.
|
|
18
33
|
Aligned with RFC go/orcas-rfc-307 (Declarative Agent Policy Enforcement).
|
|
19
34
|
"""
|
|
20
|
-
|
|
21
|
-
def __init__(self, policy_path: str
|
|
35
|
+
|
|
36
|
+
def __init__(self, policy_path: str=None):
|
|
22
37
|
if not policy_path:
|
|
23
|
-
policy_path = os.path.join(os.path.dirname(__file__),
|
|
24
|
-
|
|
38
|
+
policy_path = os.path.join(os.path.dirname(__file__), 'policies.json')
|
|
25
39
|
self.policy_path = policy_path
|
|
26
40
|
self.policy = self._load_policy()
|
|
27
41
|
|
|
28
42
|
def _load_policy(self) -> Dict[str, Any]:
|
|
29
43
|
if not os.path.exists(self.policy_path):
|
|
30
44
|
return {}
|
|
31
|
-
with open(self.policy_path,
|
|
45
|
+
with open(self.policy_path, 'r') as f:
|
|
32
46
|
return json.load(f)
|
|
33
47
|
|
|
34
48
|
def validate_input(self, prompt: str):
|
|
35
49
|
"""Step 1: Input Sanitization (Length & Length Limits)"""
|
|
36
|
-
max_len = self.policy.get(
|
|
50
|
+
max_len = self.policy.get('security', {}).get('max_prompt_length', 5000)
|
|
37
51
|
if len(prompt) > max_len:
|
|
38
|
-
raise PolicyViolation(
|
|
39
|
-
|
|
40
|
-
# Step 2: Forbidden Topics Check
|
|
41
|
-
forbidden = self.policy.get("security", {}).get("forbidden_topics", [])
|
|
52
|
+
raise PolicyViolation('SECURITY', f'Prompt exceeds maximum allowed length ({max_len} chars).')
|
|
53
|
+
forbidden = self.policy.get('security', {}).get('forbidden_topics', [])
|
|
42
54
|
for topic in forbidden:
|
|
43
|
-
if re.search(
|
|
44
|
-
raise PolicyViolation(
|
|
55
|
+
if re.search('\\b' + re.escape(topic) + '\\b', prompt.lower()):
|
|
56
|
+
raise PolicyViolation('GOVERNANCE', f"Input contains forbidden topic: '{topic}'.")
|
|
45
57
|
|
|
46
58
|
def check_tool_permission(self, tool_name: str) -> bool:
|
|
47
59
|
"""Step 3: Tool Usage Policies (HITL Enforcement)"""
|
|
48
|
-
require_hitl = self.policy.get(
|
|
60
|
+
require_hitl = self.policy.get('compliance', {}).get('require_hitl_for_tools', [])
|
|
49
61
|
if tool_name in require_hitl:
|
|
50
62
|
console.print(f"⚠️ [bold yellow]HITL REQUIRED:[/bold yellow] Tool '{tool_name}' requires manual approval.")
|
|
51
|
-
return False
|
|
63
|
+
return False
|
|
52
64
|
return True
|
|
53
65
|
|
|
54
|
-
def enforce_cost_limits(self, estimated_tokens: int, accumulated_cost: float
|
|
66
|
+
def enforce_cost_limits(self, estimated_tokens: int, accumulated_cost: float=0.0):
|
|
55
67
|
"""Step 4: Resource Consumption Limits"""
|
|
56
|
-
limits = self.policy.get(
|
|
57
|
-
|
|
58
|
-
# Token Limit
|
|
59
|
-
max_tokens = limits.get("max_tokens_per_turn", 4096)
|
|
68
|
+
limits = self.policy.get('cost_control', {})
|
|
69
|
+
max_tokens = limits.get('max_tokens_per_turn', 4096)
|
|
60
70
|
if estimated_tokens > max_tokens:
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
# Budget Limit
|
|
64
|
-
max_budget = limits.get("max_cost_per_session_usd", 1.0)
|
|
71
|
+
raise PolicyViolation('FINOPS', f'Turn exceeds token limit ({estimated_tokens} > {max_tokens}).')
|
|
72
|
+
max_budget = limits.get('max_cost_per_session_usd', 1.0)
|
|
65
73
|
if accumulated_cost >= max_budget:
|
|
66
|
-
|
|
74
|
+
raise PolicyViolation('FINOPS', f'Session budget exceeded (${accumulated_cost} >= ${max_budget}).')
|
|
67
75
|
|
|
68
76
|
def get_audit_report(self) -> Dict[str, Any]:
|
|
69
77
|
"""Provides a summary for the Cockpit Orchestrator"""
|
|
70
|
-
return {
|
|
71
|
-
|
|
72
|
-
"forbidden_topics_count": len(self.policy.get("security", {}).get("forbidden_topics", [])),
|
|
73
|
-
"hitl_tools": self.policy.get("compliance", {}).get("require_hitl_for_tools", []),
|
|
74
|
-
"token_threshold": self.policy.get("cost_control", {}).get("max_tokens_per_turn")
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
if __name__ == "__main__":
|
|
78
|
-
# Quick Test
|
|
78
|
+
return {'policy_active': bool(self.policy), 'forbidden_topics_count': len(self.policy.get('security', {}).get('forbidden_topics', [])), 'hitl_tools': self.policy.get('compliance', {}).get('require_hitl_for_tools', []), 'token_threshold': self.policy.get('cost_control', {}).get('max_tokens_per_turn')}
|
|
79
|
+
if __name__ == '__main__':
|
|
79
80
|
engine = GuardrailPolicyEngine()
|
|
80
81
|
try:
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
engine.validate_input("Tell me about medical advice for drugs.")
|
|
82
|
+
print(f'SOURCE: Declarative Guardrails | https://cloud.google.com/architecture/framework/security | Google Cloud Governance Best Practices: Input Sanitization & Tool HITL')
|
|
83
|
+
engine.validate_input('Tell me about medical advice for drugs.')
|
|
84
84
|
except PolicyViolation as e:
|
|
85
|
-
print(f
|
|
85
|
+
print(f'Caught Expected Violation: {e.category} - {e.message}')
|
|
@@ -1,70 +1,139 @@
|
|
|
1
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
2
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
3
|
+
import os
|
|
1
4
|
import subprocess
|
|
2
5
|
import sys
|
|
3
6
|
import typer
|
|
4
7
|
from rich.console import Console
|
|
5
8
|
from rich.panel import Panel
|
|
6
9
|
from rich.table import Table
|
|
7
|
-
|
|
8
|
-
app = typer.Typer(help="Reliability Audit: Manage unit tests and regression suites.")
|
|
10
|
+
app = typer.Typer(help='Reliability Audit: Manage unit tests and regression suites.')
|
|
9
11
|
console = Console()
|
|
10
12
|
|
|
11
13
|
@app.command()
|
|
12
|
-
def audit(
|
|
13
|
-
quick: bool = typer.Option(False, "--quick", "-q", help="Run only essential unit tests for faster feedback")
|
|
14
|
-
):
|
|
14
|
+
def audit(quick: bool=typer.Option(False, '--quick', '-q', help='Run only essential unit tests for faster feedback'), path: str=typer.Option('.', '--path', '-p', help='Path to the agent project to audit'), smoke: bool=typer.Option(False, '--smoke', help='Run full End-to-End Persona Smoke Tests')):
|
|
15
15
|
"""Run reliability checks (Unit tests + Regression Suite)."""
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
16
|
+
return run_reliability_audit(quick, path, smoke)
|
|
17
|
+
|
|
18
|
+
def run_reliability_audit(quick: bool=False, path: str='.', smoke: bool=False):
|
|
19
|
+
if smoke:
|
|
20
|
+
run_smoke_test()
|
|
21
|
+
return
|
|
22
|
+
title = '🛡️ RELIABILITY AUDIT (QUICK)' if quick else '🛡️ RELIABILITY AUDIT'
|
|
23
|
+
console.print(Panel.fit(f'[bold green]{title}[/bold green]', border_style='green'))
|
|
24
|
+
console.print(f'🧪 [bold]Running Unit Tests (pytest) in {path}...[/bold]')
|
|
22
25
|
env = os.environ.copy()
|
|
23
|
-
env[
|
|
24
|
-
unit_result = subprocess.run(
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
table.
|
|
37
|
-
table.add_column("Status", style="bold")
|
|
38
|
-
table.add_column("Details", style="dim")
|
|
39
|
-
|
|
40
|
-
unit_status = "[green]PASSED[/green]" if unit_result.returncode == 0 else "[red]FAILED[/red]"
|
|
41
|
-
table.add_row("Core Unit Tests", unit_status, f"{len(unit_result.stdout.splitlines())} tests executed")
|
|
42
|
-
|
|
43
|
-
# Contract Testing (Real Heuristic)
|
|
26
|
+
env['PYTHONPATH'] = f"{path}{os.pathsep}{env.get('PYTHONPATH', '')}:src"
|
|
27
|
+
unit_result = subprocess.run([sys.executable, '-m', 'pytest', path], capture_output=True, text=True, env=env)
|
|
28
|
+
console.print('📈 [bold]Verifying Regression Suite Coverage...[/bold]')
|
|
29
|
+
table = Table(title='🛡️ Reliability Status')
|
|
30
|
+
table.add_column('Check', style='cyan')
|
|
31
|
+
table.add_column('Status', style='bold')
|
|
32
|
+
table.add_column('Details', style='dim')
|
|
33
|
+
unit_status = '[green]PASSED[/green]' if unit_result.returncode == 0 else '[red]FAILED[/red]'
|
|
34
|
+
if 'no tests ran' in unit_result.stdout.lower() or 'collected 0 items' in unit_result.stdout.lower():
|
|
35
|
+
unit_status = '[yellow]SKIPPED[/yellow]'
|
|
36
|
+
details = 'No tests found in target path'
|
|
37
|
+
else:
|
|
38
|
+
details = f'{len(unit_result.stdout.splitlines())} lines of output'
|
|
39
|
+
table.add_row('Core Unit Tests', unit_status, details)
|
|
44
40
|
has_renderer = False
|
|
45
41
|
has_schema = False
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
if file.endswith(".py"):
|
|
49
|
-
with open(os.path.join(root, file), 'r') as f:
|
|
50
|
-
content = f.read()
|
|
51
|
-
if "A2UIRenderer" in content: has_renderer = True
|
|
52
|
-
if "response_schema" in content or "BaseModel" in content: has_schema = True
|
|
53
|
-
|
|
54
|
-
contract_status = "[green]VERIFIED[/green]" if (has_renderer and has_schema) else "[yellow]GAP DETECTED[/yellow]"
|
|
55
|
-
table.add_row("Contract Compliance (A2UI)", contract_status, "Verified Engine-to-Face protocol" if has_renderer else "Missing A2UIRenderer registration")
|
|
56
|
-
|
|
57
|
-
table.add_row("Regression Golden Set", "[green]FOUND[/green]", "50 baseline scenarios active")
|
|
42
|
+
from agent_ops_cockpit.ops.discovery import DiscoveryEngine
|
|
43
|
+
discovery = DiscoveryEngine(path)
|
|
58
44
|
|
|
45
|
+
for file_path in discovery.walk(path):
|
|
46
|
+
if file_path.endswith(('.py', '.ts', '.tsx')):
|
|
47
|
+
try:
|
|
48
|
+
with open(file_path, 'r', errors="ignore") as f:
|
|
49
|
+
content = f.read()
|
|
50
|
+
if 'A2UIRenderer' in content:
|
|
51
|
+
has_renderer = True
|
|
52
|
+
if 'response_schema' in content or 'BaseModel' in content or 'output_schema' in content:
|
|
53
|
+
has_schema = True
|
|
54
|
+
except Exception:
|
|
55
|
+
pass
|
|
56
|
+
contract_status = '[green]VERIFIED[/green]' if has_renderer and has_schema else '[yellow]GAP DETECTED[/yellow]'
|
|
57
|
+
table.add_row('Contract Compliance (A2UI)', contract_status, 'Verified Engine-to-Face protocol' if has_renderer else 'Missing A2UIRenderer registration')
|
|
58
|
+
table.add_row('Regression Golden Set', '[green]FOUND[/green]', '50 baseline scenarios active')
|
|
59
59
|
console.print(table)
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
console.print(
|
|
63
|
-
console.print(f"```\n{unit_result.stdout}\n```")
|
|
64
|
-
raise typer.Exit(code=1)
|
|
60
|
+
if unit_result.returncode != 0 and unit_status != '[yellow]SKIPPED[/yellow]':
|
|
61
|
+
console.print('\n[red]❌ Unit test failures detected. Fix them before production deployment.[/red]')
|
|
62
|
+
console.print(f'```\n{unit_result.stdout}\n```')
|
|
65
63
|
else:
|
|
66
|
-
console.print(
|
|
64
|
+
console.print('\n✅ [bold green]System check complete.[/bold green]')
|
|
65
|
+
|
|
66
|
+
def run_tests():
|
|
67
|
+
"""Wrapper for main.py to run standard reliability audit."""
|
|
68
|
+
run_reliability_audit(quick=True)
|
|
67
69
|
|
|
70
|
+
def run_regression_suite():
|
|
71
|
+
"""Full Regression Suite: Unit Tests + Smoke Tests."""
|
|
72
|
+
console.print(Panel.fit('🚀 [bold green]LAUNCHING FULL REGRESSION SUITE[/bold green]', border_style='green'))
|
|
73
|
+
run_tests()
|
|
74
|
+
console.print('\n')
|
|
75
|
+
run_smoke_test()
|
|
76
|
+
|
|
77
|
+
def run_smoke_test():
|
|
78
|
+
"""Run E2E Persona Journeys (Smoke Tests) for pipes validation."""
|
|
79
|
+
console.print(Panel.fit('🧪 [bold blue]AGENTOPS COCKPIT: E2E REGRESSION SMOKE TEST[/bold blue]', border_style='blue'))
|
|
80
|
+
table = Table(title='🕹️ Persona Journey Pipes Status', show_header=True, header_style='bold magenta')
|
|
81
|
+
table.add_column('Persona', style='cyan')
|
|
82
|
+
table.add_column('Pipe / Journey', style='magenta')
|
|
83
|
+
table.add_column('Verification Logic', style='dim')
|
|
84
|
+
table.add_column('Status', style='bold')
|
|
85
|
+
console.print('👨\u200d💻 [bold]Testing Developer Persona: uvx scaffold -> A2UI -> Dry Deployment...[/bold]')
|
|
86
|
+
dev_status = '[green]PASSED[/green]'
|
|
87
|
+
dev_notes = 'Structure Verified'
|
|
88
|
+
try:
|
|
89
|
+
if not (os.path.isdir('src/a2ui') and os.path.isdir('src/agent_ops_cockpit')):
|
|
90
|
+
dev_status = '[red]FAILED[/red]'
|
|
91
|
+
dev_notes = 'Trinity Pillars Missing'
|
|
92
|
+
else:
|
|
93
|
+
env = os.environ.copy()
|
|
94
|
+
env['PYTHONPATH'] = f".{os.pathsep}{env.get('PYTHONPATH', '')}:src"
|
|
95
|
+
deploy_result = subprocess.run([sys.executable, '-m', 'agent_ops_cockpit.cli.main', 'deploy', '--dry-run'], capture_output=True, text=True, env=env)
|
|
96
|
+
if deploy_result.returncode != 0:
|
|
97
|
+
dev_status = '[red]FAILED[/red]'
|
|
98
|
+
dev_notes = 'Deployment Dry-Run Failed'
|
|
99
|
+
console.print(f'[red]Deployment Error:[/red] {deploy_result.stderr}')
|
|
100
|
+
except Exception as e:
|
|
101
|
+
dev_status = '[red]FAILED[/red]'
|
|
102
|
+
dev_notes = str(e)
|
|
103
|
+
table.add_row('The Builder (Dev)', 'agent-ops deploy --dry-run', dev_notes, dev_status)
|
|
104
|
+
console.print('🏛️ [bold]Testing Architect Persona: Google Well-Architected Review...[/bold]')
|
|
105
|
+
arch_status = '[green]PASSED[/green]'
|
|
106
|
+
from agent_ops_cockpit.ops.frameworks import detect_framework
|
|
107
|
+
fw = detect_framework('.')
|
|
108
|
+
if not fw:
|
|
109
|
+
arch_status = '[red]FAILED[/red]'
|
|
110
|
+
table.add_row('The Strategist (Arch)', 'make arch-review', 'Framework Detection', arch_status)
|
|
111
|
+
console.print('🚩 [bold]Testing Security Persona: Red Team & Secret Scanning...[/bold]')
|
|
112
|
+
sec_status = '[green]PASSED[/green]'
|
|
113
|
+
if not os.path.exists('src/agent_ops_cockpit/ops/secret_scanner.py'):
|
|
114
|
+
sec_status = '[red]FAILED[/red]'
|
|
115
|
+
table.add_row('The Guardian (Sec)', 'make red-team && secrets', 'Vulnerability Heuristics', sec_status)
|
|
116
|
+
console.print('⚖️ [bold]Testing Governance Persona: Master Audit & Policy Engine...[/bold]')
|
|
117
|
+
gov_status = '[green]PASSED[/green]'
|
|
118
|
+
if not os.path.exists('src/agent_ops_cockpit/ops/policy_engine.py'):
|
|
119
|
+
gov_status = '[red]FAILED[/red]'
|
|
120
|
+
table.add_row('The Controller (Gov)', 'make audit-all', 'Policy Compliance Pipe', gov_status)
|
|
121
|
+
console.print('📊 [bold]Testing Product Persona: ROI & UX Auditor...[/bold]')
|
|
122
|
+
prod_status = '[green]PASSED[/green]'
|
|
123
|
+
if not os.path.exists('src/agent_ops_cockpit/ops/cost_optimizer.py'):
|
|
124
|
+
prod_status = '[red]FAILED[/red]'
|
|
125
|
+
table.add_row('The Visionary (Prod)', 'make ui-audit --roi', 'Cost/UX Optimization', prod_status)
|
|
126
|
+
console.print(table)
|
|
127
|
+
results = [dev_status, arch_status, sec_status, gov_status, prod_status]
|
|
128
|
+
if '[red]FAILED[/red]' in results:
|
|
129
|
+
console.print('\n❌ [bold red]Regression Smoke Test Failed. Some Persona pipes are broken.[/bold red]')
|
|
130
|
+
sys.exit(1)
|
|
131
|
+
else:
|
|
132
|
+
console.print('\n✨ [bold green]E2E Persona Regression Smoke Test PASSED.[/bold green]')
|
|
133
|
+
@app.command()
|
|
134
|
+
def version():
|
|
135
|
+
"""Show the version of the audit module."""
|
|
136
|
+
console.print('[bold cyan]v1.3.0[/bold cyan]')
|
|
68
137
|
|
|
69
|
-
if __name__ ==
|
|
70
|
-
app()
|
|
138
|
+
if __name__ == '__main__':
|
|
139
|
+
app()
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
from typing import List
|
|
3
|
+
from .auditors.base import AuditFinding
|
|
4
|
+
|
|
5
|
+
class CodeRemediator:
|
|
6
|
+
"""
|
|
7
|
+
Phase 4: The 'Closer' - Automated Remediation Engine.
|
|
8
|
+
Transforms AST based on audit findings to inject best practices.
|
|
9
|
+
"""
|
|
10
|
+
def __init__(self, file_path: str):
|
|
11
|
+
self.file_path = file_path
|
|
12
|
+
with open(file_path, 'r') as f:
|
|
13
|
+
self.content = f.read()
|
|
14
|
+
self.tree = ast.parse(self.content)
|
|
15
|
+
|
|
16
|
+
def apply_resiliency(self, finding: AuditFinding):
|
|
17
|
+
"""Injects @retry and imports if missing."""
|
|
18
|
+
has_tenacity = "tenacity" in self.content
|
|
19
|
+
|
|
20
|
+
class RetryInjector(ast.NodeTransformer):
|
|
21
|
+
def visit_FunctionDef(self, node):
|
|
22
|
+
if node.lineno == finding.line_number or any(f.title == finding.title for f in []): # Match by line or title
|
|
23
|
+
# Add @retry(wait=wait_exponential(...))
|
|
24
|
+
retry_decorator = ast.parse("retry(wait=wait_exponential(multiplier=1, min=4, max=10), stop=stop_after_attempt(3))").body[0].value
|
|
25
|
+
node.decorator_list.append(retry_decorator)
|
|
26
|
+
return node
|
|
27
|
+
|
|
28
|
+
def visit_AsyncFunctionDef(self, node):
|
|
29
|
+
return self.visit_FunctionDef(node)
|
|
30
|
+
|
|
31
|
+
self.tree = RetryInjector().visit(self.tree)
|
|
32
|
+
|
|
33
|
+
if not has_tenacity:
|
|
34
|
+
# Prepend imports
|
|
35
|
+
import_node = ast.parse("from tenacity import retry, wait_exponential, stop_after_attempt\n").body[0]
|
|
36
|
+
self.tree.body.insert(0, import_node)
|
|
37
|
+
|
|
38
|
+
def apply_timeouts(self, finding: AuditFinding):
|
|
39
|
+
"""Adds timeout=10 to async calls."""
|
|
40
|
+
class TimeoutInjector(ast.NodeTransformer):
|
|
41
|
+
def visit_Call(self, node):
|
|
42
|
+
if node.lineno == finding.line_number:
|
|
43
|
+
# Add timeout=10 keyword
|
|
44
|
+
node.keywords.append(ast.keyword(arg='timeout', value=ast.Constant(value=10)))
|
|
45
|
+
return node
|
|
46
|
+
|
|
47
|
+
self.tree = TimeoutInjector().visit(self.tree)
|
|
48
|
+
|
|
49
|
+
def save(self):
|
|
50
|
+
"""Saves the transformed AST back to the file using native ast.unparse."""
|
|
51
|
+
new_code = ast.unparse(self.tree)
|
|
52
|
+
with open(self.file_path, 'w') as f:
|
|
53
|
+
f.write(new_code)
|
|
54
|
+
return True
|
|
@@ -16,8 +16,9 @@ SECRET_PATTERNS = {
|
|
|
16
16
|
"Anthropic API Key": r"sk-ant-[a-zA-Z0-9]{20,}",
|
|
17
17
|
"Azure OpenAI Key": r"[0-9a-f]{32}",
|
|
18
18
|
"Generic Bearer Token": r"Bearer\s+[0-9a-zA-Z._-]{20,}",
|
|
19
|
-
"Hardcoded API Variable": r"(?i)(api_key|app_secret|client_secret|access_token)\s*=\s*['\"][0-9a-zA-Z_-]{16,}['\"]",
|
|
20
19
|
"GCP Service Account": r"\"type\":\s*\"service_account\"",
|
|
20
|
+
"Placeholder Credential": r"(?i)['\"](REPLACE_ME|INSERT_YOUR_KEY|YOUR_API_KEY|TODO_SET_KEY)['\"]",
|
|
21
|
+
"Hardcoded API Variable": r"(?i)(api_key|client_secret|token)\s*=\s*['\"][a-zA-Z0-9_-]{10,}['\"]",
|
|
21
22
|
}
|
|
22
23
|
|
|
23
24
|
@app.command()
|
|
@@ -27,31 +28,37 @@ def scan(path: str = typer.Argument(".", help="Directory to scan for secrets")):
|
|
|
27
28
|
"""
|
|
28
29
|
console.print(Panel.fit("🔍 [bold yellow]SECRET SCANNER: CREDENTIAL LEAK DETECTION[/bold yellow]", border_style="yellow"))
|
|
29
30
|
|
|
31
|
+
from agent_ops_cockpit.ops.discovery import DiscoveryEngine
|
|
32
|
+
discovery = DiscoveryEngine(path)
|
|
33
|
+
|
|
30
34
|
findings = []
|
|
31
35
|
|
|
32
|
-
for
|
|
33
|
-
#
|
|
34
|
-
if
|
|
36
|
+
for file_path in discovery.walk(path):
|
|
37
|
+
# Filter by relevant extensions
|
|
38
|
+
if not file_path.endswith((".py", ".env", ".ts", ".js", ".json", ".yaml", ".yml")):
|
|
35
39
|
continue
|
|
36
40
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
41
|
+
is_lib = discovery.is_library_file(file_path)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
with open(file_path, "r", errors="ignore") as f:
|
|
45
|
+
lines = f.readlines()
|
|
46
|
+
for i, line in enumerate(lines):
|
|
47
|
+
for secret_name, pattern in SECRET_PATTERNS.items():
|
|
48
|
+
match = re.search(pattern, line)
|
|
49
|
+
if match:
|
|
50
|
+
# Library Isolation: Skip hits in known libraries to reduce false positives
|
|
51
|
+
if is_lib:
|
|
52
|
+
continue
|
|
53
|
+
|
|
54
|
+
findings.append({
|
|
55
|
+
"file": os.path.relpath(file_path, path),
|
|
56
|
+
"line": i + 1,
|
|
57
|
+
"type": secret_name,
|
|
58
|
+
"content": line.strip()[:50] + "..."
|
|
59
|
+
})
|
|
60
|
+
except Exception:
|
|
61
|
+
continue
|
|
55
62
|
|
|
56
63
|
table = Table(title="🛡️ Security Findings: Hardcoded Secrets")
|
|
57
64
|
table.add_column("File", style="cyan")
|
|
@@ -78,5 +85,10 @@ def scan(path: str = typer.Argument(".", help="Directory to scan for secrets")):
|
|
|
78
85
|
else:
|
|
79
86
|
console.print("✅ [bold green]PASS:[/bold green] No hardcoded credentials detected in matched patterns.")
|
|
80
87
|
|
|
88
|
+
@app.command()
|
|
89
|
+
def version():
|
|
90
|
+
"""Show the version of the Secret Scanner."""
|
|
91
|
+
console.print('[bold cyan]v1.3.0[/bold cyan]')
|
|
92
|
+
|
|
81
93
|
if __name__ == "__main__":
|
|
82
94
|
app()
|
agent_ops_cockpit/ops/swarm.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
2
|
+
from tenacity import retry, wait_exponential, stop_after_attempt
|
|
1
3
|
import asyncio
|
|
2
4
|
from typing import List, Dict, Any, Optional
|
|
3
5
|
from dataclasses import dataclass
|
|
4
6
|
from rich.console import Console
|
|
5
7
|
from rich.panel import Panel
|
|
6
|
-
|
|
7
8
|
console = Console()
|
|
8
9
|
|
|
9
10
|
@dataclass
|
|
@@ -17,55 +18,44 @@ class MultiAgentOrchestrator:
|
|
|
17
18
|
"""
|
|
18
19
|
Standardizes Swarm/Coordinator patterns using the A2A spec.
|
|
19
20
|
"""
|
|
20
|
-
|
|
21
|
+
|
|
21
22
|
def __init__(self):
|
|
22
23
|
self.agents: Dict[str, Any] = {}
|
|
23
24
|
self.history: List[SwarmMessage] = []
|
|
24
25
|
|
|
25
26
|
def register_agent(self, name: str, agent_func):
|
|
26
27
|
self.agents[name] = agent_func
|
|
27
|
-
console.print(f
|
|
28
|
+
console.print(f'🤖 Agent [bold cyan]{name}[/bold cyan] registered in swarm.')
|
|
28
29
|
|
|
29
30
|
async def dispatch(self, sender: str, recipient: str, message: str):
|
|
30
31
|
"""Dispatches a message with an A2A Reasoning Evidence Packet."""
|
|
31
|
-
console.print(f
|
|
32
|
-
|
|
33
|
-
# Simulated Evidence Packet for Governance
|
|
34
|
-
evidence = {
|
|
35
|
-
"assurance_score": 0.99,
|
|
36
|
-
"origin_vpc": "secure-engine-zone",
|
|
37
|
-
"pii_scrubbed": True
|
|
38
|
-
}
|
|
39
|
-
|
|
32
|
+
console.print(f'\n📡 [dim]A2A Transmission:[/dim] [bold]{sender}[/bold] -> [bold]{recipient}[/bold]')
|
|
33
|
+
evidence = {'assurance_score': 0.99, 'origin_vpc': 'secure-engine-zone', 'pii_scrubbed': True}
|
|
40
34
|
swarm_msg = SwarmMessage(sender, recipient, message, evidence)
|
|
41
35
|
self.history.append(swarm_msg)
|
|
42
|
-
|
|
43
36
|
if recipient in self.agents:
|
|
44
37
|
response = await self.agents[recipient](message, evidence)
|
|
45
38
|
return response
|
|
46
39
|
else:
|
|
47
|
-
return {
|
|
40
|
+
return {'error': f'Agent {recipient} not found.'}
|
|
48
41
|
|
|
49
42
|
def get_swarm_report(self):
|
|
50
|
-
console.print(Panel.fit(
|
|
43
|
+
console.print(Panel.fit('🐝 [bold]Swarm Orchestration Trace[/bold]', border_style='yellow'))
|
|
51
44
|
for msg in self.history:
|
|
52
|
-
console.print(f
|
|
45
|
+
console.print(f'[blue]{msg.sender}[/blue] -> [green]{msg.recipient}[/green]: {msg.content}')
|
|
53
46
|
|
|
54
47
|
def run_swarm_demo():
|
|
55
48
|
orchestrator = MultiAgentOrchestrator()
|
|
56
|
-
|
|
49
|
+
|
|
57
50
|
async def researcher(query, evidence):
|
|
58
51
|
return f"Research results for {query} (Evidence verified: {evidence['assurance_score']})"
|
|
59
|
-
|
|
60
|
-
async def writer(query, evidence):
|
|
61
|
-
return f"Professional summary of {query}"
|
|
62
52
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
53
|
+
async def writer(query, evidence):
|
|
54
|
+
return f'Professional summary of {query}'
|
|
55
|
+
orchestrator.register_agent('Researcher', researcher)
|
|
56
|
+
orchestrator.register_agent('Writer', writer)
|
|
66
57
|
loop = asyncio.get_event_loop()
|
|
67
|
-
loop.run_until_complete(orchestrator.dispatch(
|
|
58
|
+
loop.run_until_complete(orchestrator.dispatch('Orchestrator', 'Researcher', 'Analyze market trends'))
|
|
68
59
|
orchestrator.get_swarm_report()
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
run_swarm_demo()
|
|
60
|
+
if __name__ == '__main__':
|
|
61
|
+
run_swarm_demo()
|