agentops-cockpit 0.9.7__py3-none-any.whl → 0.9.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_ops_cockpit/agent.py +43 -81
- agent_ops_cockpit/cache/semantic_cache.py +10 -21
- agent_ops_cockpit/cli/main.py +105 -153
- agent_ops_cockpit/eval/load_test.py +33 -50
- agent_ops_cockpit/eval/quality_climber.py +88 -93
- agent_ops_cockpit/eval/red_team.py +54 -21
- agent_ops_cockpit/mcp_server.py +26 -93
- agent_ops_cockpit/ops/arch_review.py +221 -148
- agent_ops_cockpit/ops/auditors/base.py +50 -0
- agent_ops_cockpit/ops/auditors/behavioral.py +31 -0
- agent_ops_cockpit/ops/auditors/compliance.py +35 -0
- agent_ops_cockpit/ops/auditors/dependency.py +48 -0
- agent_ops_cockpit/ops/auditors/finops.py +48 -0
- agent_ops_cockpit/ops/auditors/graph.py +49 -0
- agent_ops_cockpit/ops/auditors/pivot.py +51 -0
- agent_ops_cockpit/ops/auditors/reasoning.py +67 -0
- agent_ops_cockpit/ops/auditors/reliability.py +53 -0
- agent_ops_cockpit/ops/auditors/security.py +87 -0
- agent_ops_cockpit/ops/auditors/sme_v12.py +76 -0
- agent_ops_cockpit/ops/auditors/sovereignty.py +74 -0
- agent_ops_cockpit/ops/auditors/sre_a2a.py +179 -0
- agent_ops_cockpit/ops/benchmarker.py +97 -0
- agent_ops_cockpit/ops/cost_optimizer.py +15 -24
- agent_ops_cockpit/ops/discovery.py +214 -0
- agent_ops_cockpit/ops/evidence_bridge.py +30 -63
- agent_ops_cockpit/ops/frameworks.py +124 -1
- agent_ops_cockpit/ops/git_portal.py +74 -0
- agent_ops_cockpit/ops/mcp_hub.py +19 -42
- agent_ops_cockpit/ops/orchestrator.py +477 -277
- agent_ops_cockpit/ops/policy_engine.py +38 -38
- agent_ops_cockpit/ops/reliability.py +120 -65
- agent_ops_cockpit/ops/remediator.py +54 -0
- agent_ops_cockpit/ops/secret_scanner.py +34 -22
- agent_ops_cockpit/ops/swarm.py +17 -27
- agent_ops_cockpit/ops/ui_auditor.py +67 -6
- agent_ops_cockpit/ops/watcher.py +41 -70
- agent_ops_cockpit/ops/watchlist.json +30 -0
- agent_ops_cockpit/optimizer.py +157 -407
- agent_ops_cockpit/tests/test_arch_review.py +6 -6
- agent_ops_cockpit/tests/test_discovery.py +96 -0
- agent_ops_cockpit/tests/test_ops_core.py +56 -0
- agent_ops_cockpit/tests/test_orchestrator_fleet.py +73 -0
- agent_ops_cockpit/tests/test_persona_architect.py +75 -0
- agent_ops_cockpit/tests/test_persona_finops.py +31 -0
- agent_ops_cockpit/tests/test_persona_security.py +55 -0
- agent_ops_cockpit/tests/test_persona_sre.py +43 -0
- agent_ops_cockpit/tests/test_persona_ux.py +42 -0
- agent_ops_cockpit/tests/test_quality_climber.py +2 -2
- agent_ops_cockpit/tests/test_remediator.py +75 -0
- agent_ops_cockpit/tests/test_ui_auditor.py +52 -0
- agentops_cockpit-0.9.8.dist-info/METADATA +172 -0
- agentops_cockpit-0.9.8.dist-info/RECORD +71 -0
- agent_ops_cockpit/tests/test_optimizer.py +0 -68
- agent_ops_cockpit/tests/test_red_team.py +0 -35
- agent_ops_cockpit/tests/test_secret_scanner.py +0 -24
- agentops_cockpit-0.9.7.dist-info/METADATA +0 -246
- agentops_cockpit-0.9.7.dist-info/RECORD +0 -47
- {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/WHEEL +0 -0
- {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/entry_points.txt +0 -0
- {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/licenses/LICENSE +0 -0
agent_ops_cockpit/optimizer.py
CHANGED
|
@@ -8,24 +8,24 @@ from rich.table import Table
|
|
|
8
8
|
from rich.panel import Panel
|
|
9
9
|
from rich.syntax import Syntax
|
|
10
10
|
from packaging import version
|
|
11
|
-
|
|
12
|
-
# Import the evidence bridge
|
|
13
11
|
try:
|
|
14
12
|
from agent_ops_cockpit.ops.evidence_bridge import get_package_evidence, get_compatibility_report
|
|
15
13
|
except ImportError:
|
|
16
|
-
# Fallback for local execution
|
|
17
14
|
try:
|
|
18
15
|
from backend.ops.evidence_bridge import get_package_evidence, get_compatibility_report
|
|
19
16
|
except ImportError:
|
|
20
|
-
# Final fallback
|
|
21
|
-
def get_package_evidence(pkg): return {}
|
|
22
|
-
def get_compatibility_report(imports): return []
|
|
23
17
|
|
|
24
|
-
|
|
18
|
+
def get_package_evidence(pkg):
|
|
19
|
+
return {}
|
|
20
|
+
|
|
21
|
+
def get_compatibility_report(imports):
|
|
22
|
+
return []
|
|
23
|
+
app = typer.Typer(help='AgentOps Cockpit: The Agent Optimizer CLI')
|
|
25
24
|
console = Console()
|
|
26
25
|
|
|
27
26
|
class OptimizationIssue:
|
|
28
|
-
|
|
27
|
+
|
|
28
|
+
def __init__(self, id: str, title: str, impact: str, savings: str, description: str, diff: str, package: str=None, fix_pattern: str=None):
|
|
29
29
|
self.id = id
|
|
30
30
|
self.title = title
|
|
31
31
|
self.impact = impact
|
|
@@ -36,477 +36,227 @@ class OptimizationIssue:
|
|
|
36
36
|
self.fix_pattern = fix_pattern
|
|
37
37
|
self.evidence = None
|
|
38
38
|
|
|
39
|
-
def analyze_code(content: str, file_path: str
|
|
39
|
+
def analyze_code(content: str, file_path: str='agent.py', versions: Dict[str, str]=None) -> List[OptimizationIssue]:
|
|
40
40
|
issues = []
|
|
41
41
|
content_lower = content.lower()
|
|
42
|
-
content_no_comments = re.sub(
|
|
42
|
+
content_no_comments = re.sub('#.*', '', content_lower)
|
|
43
43
|
versions = versions or {}
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
if v_ai == "Not Installed":
|
|
50
|
-
issues.append(OptimizationIssue(
|
|
51
|
-
"vertex_install", "Install Modern Vertex SDK", "HIGH", "90% cost savings",
|
|
52
|
-
"You appear to be using Vertex AI logic but the SDK is not in your environment. Install v1.70.0+ to unlock context caching.",
|
|
53
|
-
"+ # pip install google-cloud-aiplatform>=1.70.0",
|
|
54
|
-
package="google-cloud-aiplatform"
|
|
55
|
-
))
|
|
56
|
-
elif v_ai != "Unknown":
|
|
44
|
+
v_ai = versions.get('google-cloud-aiplatform', 'Not Installed')
|
|
45
|
+
if 'google.cloud.aiplatform' in content_lower or 'vertexai' in content_lower:
|
|
46
|
+
if v_ai == 'Not Installed':
|
|
47
|
+
issues.append(OptimizationIssue('vertex_install', 'Install Modern Vertex SDK', 'HIGH', '90% cost savings', 'You appear to be using Vertex AI logic but the SDK is not in your environment. Install v1.70.0+ to unlock context caching.', '+ # pip install google-cloud-aiplatform>=1.70.0', package='google-cloud-aiplatform'))
|
|
48
|
+
elif v_ai != 'Unknown':
|
|
57
49
|
try:
|
|
58
|
-
if version.parse(v_ai) < version.parse(
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
package="google-cloud-aiplatform"
|
|
64
|
-
))
|
|
65
|
-
issues.append(OptimizationIssue(
|
|
66
|
-
"vertex_upgrade_path", "Modernization Path", "HIGH", "90% cost savings",
|
|
67
|
-
"Upgrading to 1.70.0+ enables near-instant token reuse via CachingConfig.",
|
|
68
|
-
"+ # Upgrade to >1.70.0",
|
|
69
|
-
package="google-cloud-aiplatform"
|
|
70
|
-
))
|
|
71
|
-
elif "cache" not in content_lower:
|
|
72
|
-
issues.append(OptimizationIssue(
|
|
73
|
-
"context_caching", "Enable Context Caching", "HIGH", "90% cost reduction",
|
|
74
|
-
"Large model context detected. Use native CachingConfig.",
|
|
75
|
-
"+ cache = vertexai.preview.CachingConfig(ttl=3600)",
|
|
76
|
-
package="google-cloud-aiplatform"
|
|
77
|
-
))
|
|
50
|
+
if version.parse(v_ai) < version.parse('1.70.0'):
|
|
51
|
+
issues.append(OptimizationIssue('vertex_legacy_opt', 'Situational Performance (Legacy SDK)', 'MEDIUM', '20% cost savings', f'Your SDK ({v_ai}) lacks native Context Caching. Optimize by using selective prompt pruning before execution.', '+ from agent_ops_cockpit.ops.cost_optimizer import situational_pruning\n+ pruned = situational_pruning(context)', package='google-cloud-aiplatform'))
|
|
52
|
+
issues.append(OptimizationIssue('vertex_upgrade_path', 'Modernization Path', 'HIGH', '90% cost savings', 'Upgrading to 1.70.0+ enables near-instant token reuse via CachingConfig.', '+ # Upgrade to >1.70.0', package='google-cloud-aiplatform'))
|
|
53
|
+
elif 'cache' not in content_lower:
|
|
54
|
+
issues.append(OptimizationIssue('context_caching', 'Enable Context Caching', 'HIGH', '90% cost reduction', 'Large model context detected. Use native CachingConfig.', '+ cache = vertexai.preview.CachingConfig(ttl=3600)', package='google-cloud-aiplatform'))
|
|
78
55
|
except Exception:
|
|
79
56
|
pass
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
if (
|
|
101
|
-
issues.append(OptimizationIssue(
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
# Microsoft
|
|
109
|
-
if ("autogen" in content_lower or "microsoft" in content_lower) and "workflow" not in content_lower:
|
|
110
|
-
issues.append(OptimizationIssue(
|
|
111
|
-
"ms_workflows", "Microsoft Agent Workflows", "MEDIUM", "40% consistency boost",
|
|
112
|
-
"Using graph-based repeatable workflows ensures enterprise reliability.",
|
|
113
|
-
"+ # Define a repeatable graph-based flow",
|
|
114
|
-
package="pyautogen"
|
|
115
|
-
))
|
|
116
|
-
|
|
117
|
-
# AWS
|
|
118
|
-
if ("bedrock" in content_lower or "boto3" in content_lower) and "actiongroup" not in content_lower:
|
|
119
|
-
issues.append(OptimizationIssue(
|
|
120
|
-
"aws_action_groups", "AWS Bedrock Action Groups", "HIGH", "50% tool reliability",
|
|
121
|
-
"Standardize tool execution via Bedrock Action Group schemas.",
|
|
122
|
-
"+ # Define Bedrock Action Group",
|
|
123
|
-
package="aws-sdk"
|
|
124
|
-
))
|
|
125
|
-
|
|
126
|
-
# CopilotKit
|
|
127
|
-
if "copilotkit" in content_lower and "usecopilotstate" not in content_lower:
|
|
128
|
-
issues.append(OptimizationIssue(
|
|
129
|
-
"copilot_state", "CopilotKit Shared State", "MEDIUM", "60% UI responsiveness",
|
|
130
|
-
"Ensure the Face remains aligned with the Engine via shared state sync.",
|
|
131
|
-
"+ # Use shared state for UI alignment",
|
|
132
|
-
package="@copilotkit/react-core"
|
|
133
|
-
))
|
|
134
|
-
|
|
135
|
-
# Routing
|
|
136
|
-
if "pro" in content_lower and "flash" not in content_lower:
|
|
137
|
-
issues.append(OptimizationIssue(
|
|
138
|
-
"model_routing", "Smart Model Routing", "HIGH", "70% cost savings",
|
|
139
|
-
"Route simple queries to Flash models to minimize consumption.",
|
|
140
|
-
"+ if is_simple(q): model = 'gemini-1.5-flash'",
|
|
141
|
-
package="google-cloud-aiplatform"
|
|
142
|
-
))
|
|
143
|
-
|
|
144
|
-
# Infrastructure (Cloud Run + GKE)
|
|
145
|
-
if "cloud run" in content_lower and "cpu_boost" not in content_lower:
|
|
146
|
-
issues.append(OptimizationIssue(
|
|
147
|
-
"cr_startup_boost", "Cloud Run Startup Boost", "HIGH", "50% latency reduction",
|
|
148
|
-
"Enable Startup CPU Boost to reduce cold-start latency for Python agents.",
|
|
149
|
-
"+ startup_cpu_boost: true",
|
|
150
|
-
package="google-cloud-run"
|
|
151
|
-
))
|
|
152
|
-
if ("gke" in content_lower or "kubernetes" in content_lower) and "identity" not in content_lower:
|
|
153
|
-
issues.append(OptimizationIssue(
|
|
154
|
-
"gke_identity", "GKE Workload Identity", "HIGH", "100% security baseline",
|
|
155
|
-
"Use Workload Identity for secure service-to-service communication.",
|
|
156
|
-
"+ # Use Workload Identity",
|
|
157
|
-
package="google-cloud-gke"
|
|
158
|
-
))
|
|
159
|
-
|
|
160
|
-
# Language Specific (Go + Node)
|
|
161
|
-
if file_path.endswith(".go") and "goroutine" not in content_lower:
|
|
162
|
-
issues.append(OptimizationIssue(
|
|
163
|
-
"go_concurrency", "Go Native Concurrency", "HIGH", "80% throughput boost",
|
|
164
|
-
"Leveraging Goroutines for parallel tool execution is a Go best practice.",
|
|
165
|
-
"+ go func() { tool.execute() }()",
|
|
166
|
-
package="golang"
|
|
167
|
-
))
|
|
168
|
-
if (file_path.endswith(".ts") or file_path.endswith(".js") or "axios" in content_lower) and "fetch" not in content_lower:
|
|
169
|
-
issues.append(OptimizationIssue(
|
|
170
|
-
"node_native_fetch", "Native Fetch API", "MEDIUM", "20% bundle reduction",
|
|
171
|
-
"Node 20+ supports native fetch, reducing dependency on heavy libraries like axios.",
|
|
172
|
-
"+ const res = await fetch(url);",
|
|
173
|
-
package="nodejs"
|
|
174
|
-
))
|
|
175
|
-
|
|
176
|
-
lg_v = versions.get("langgraph", "Not Installed")
|
|
177
|
-
if "langgraph" in content_lower:
|
|
178
|
-
if lg_v != "Not Installed" and lg_v != "Unknown":
|
|
57
|
+
openai_v = versions.get('openai', 'Not Installed')
|
|
58
|
+
if 'openai' in content_lower:
|
|
59
|
+
if openai_v != 'Not Installed' and version.parse(openai_v) < version.parse('1.0.0'):
|
|
60
|
+
issues.append(OptimizationIssue('openai_legacy', 'Found Legacy OpenAI SDK', 'HIGH', '40% latency reduction', f'You are on {openai_v}. Transitioning to the v1.0.0+ Client pattern enables modern streaming and improved error handling.', '+ from openai import OpenAI\n+ client = OpenAI()', package='openai'))
|
|
61
|
+
elif 'prompt_cache' not in content_lower:
|
|
62
|
+
issues.append(OptimizationIssue('openai_caching', 'OpenAI Prompt Caching', 'MEDIUM', '50% latency reduction', 'OpenAI automatically caches repeated input prefixes. Ensure your system prompt is first.', "+ # Ensure system prompt is first\n+ messages = [{'role': 'system', ...}]", package='openai'))
|
|
63
|
+
if ('anthropic' in content_lower or 'claude' in content_lower) and 'orchestra' not in content_lower:
|
|
64
|
+
issues.append(OptimizationIssue('anthropic_orchestration', 'Anthropic Orchestration Pattern', 'HIGH', '30% reliability boost', 'Claude performs best with an Orchestrator-Subagent pattern for complex tasks.', '+ # Use orchestrator to delegate sub-tasks', package='anthropic'))
|
|
65
|
+
if ('autogen' in content_lower or 'microsoft' in content_lower) and 'workflow' not in content_lower:
|
|
66
|
+
issues.append(OptimizationIssue('ms_workflows', 'Microsoft Agent Workflows', 'MEDIUM', '40% consistency boost', 'Using graph-based repeatable workflows ensures enterprise reliability.', '+ # Define a repeatable graph-based flow', package='pyautogen'))
|
|
67
|
+
if ('bedrock' in content_lower or 'boto3' in content_lower) and 'actiongroup' not in content_lower:
|
|
68
|
+
issues.append(OptimizationIssue('aws_action_groups', 'AWS Bedrock Action Groups', 'HIGH', '50% tool reliability', 'Standardize tool execution via Bedrock Action Group schemas.', '+ # Define Bedrock Action Group', package='aws-sdk'))
|
|
69
|
+
if 'copilotkit' in content_lower and 'usecopilotstate' not in content_lower:
|
|
70
|
+
issues.append(OptimizationIssue('copilot_state', 'CopilotKit Shared State', 'MEDIUM', '60% UI responsiveness', 'Ensure the Face remains aligned with the Engine via shared state sync.', '+ # Use shared state for UI alignment', package='@copilotkit/react-core'))
|
|
71
|
+
if 'pro' in content_lower and 'flash' not in content_lower:
|
|
72
|
+
issues.append(OptimizationIssue('model_routing', 'Smart Model Routing', 'HIGH', '70% cost savings', 'Route simple queries to Flash models to minimize consumption.', "+ if is_simple(q): model = 'gemini-1.5-flash'", package='google-cloud-aiplatform'))
|
|
73
|
+
if 'cloud run' in content_lower and 'cpu_boost' not in content_lower:
|
|
74
|
+
issues.append(OptimizationIssue('cr_startup_boost', 'Cloud Run Startup Boost', 'HIGH', '50% latency reduction', 'Enable Startup CPU Boost to reduce cold-start latency for Python agents.', '+ startup_cpu_boost: true', package='google-cloud-run'))
|
|
75
|
+
if ('gke' in content_lower or 'kubernetes' in content_lower) and 'identity' not in content_lower:
|
|
76
|
+
issues.append(OptimizationIssue('gke_identity', 'GKE Workload Identity', 'HIGH', '100% security baseline', 'Use Workload Identity for secure service-to-service communication.', '+ # Use Workload Identity', package='google-cloud-gke'))
|
|
77
|
+
if file_path.endswith('.go') and 'goroutine' not in content_lower:
|
|
78
|
+
issues.append(OptimizationIssue('go_concurrency', 'Go Native Concurrency', 'HIGH', '80% throughput boost', 'Leveraging Goroutines for parallel tool execution is a Go best practice.', '+ go func() { tool.execute() }()', package='golang'))
|
|
79
|
+
if (file_path.endswith('.ts') or file_path.endswith('.js') or 'axios' in content_lower) and 'fetch' not in content_lower:
|
|
80
|
+
issues.append(OptimizationIssue('node_native_fetch', 'Native Fetch API', 'MEDIUM', '20% bundle reduction', 'Node 20+ supports native fetch, reducing dependency on heavy libraries like axios.', '+ const res = await fetch(url);', package='nodejs'))
|
|
81
|
+
lg_v = versions.get('langgraph', 'Not Installed')
|
|
82
|
+
if 'langgraph' in content_lower:
|
|
83
|
+
if lg_v != 'Not Installed' and lg_v != 'Unknown':
|
|
179
84
|
try:
|
|
180
|
-
if version.parse(lg_v) < version.parse(
|
|
181
|
-
|
|
182
|
-
"langgraph_legacy", "Situational Stability (Legacy LangGraph)", "HIGH", "Stability Boost",
|
|
183
|
-
f"You are on {lg_v}. Older versions lack the hardened StateGraph compilation. Upgrade is recommended.",
|
|
184
|
-
"+ # Consider upgrading for better persistence",
|
|
185
|
-
package="langgraph"
|
|
186
|
-
))
|
|
85
|
+
if version.parse(lg_v) < version.parse('0.1.0'):
|
|
86
|
+
issues.append(OptimizationIssue('langgraph_legacy', 'Situational Stability (Legacy LangGraph)', 'HIGH', 'Stability Boost', f'You are on {lg_v}. Older versions lack the hardened StateGraph compilation. Upgrade is recommended.', '+ # Consider upgrading for better persistence', package='langgraph'))
|
|
187
87
|
except Exception:
|
|
188
88
|
pass
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
"langgraph_recursion", "Recursion Limits", "MEDIUM", "Safety Guardrail",
|
|
200
|
-
"Set recursion limits to prevent expensive infinite loops in cyclic graphs.",
|
|
201
|
-
"+ graph.invoke(..., config={'recursion_limit': 50})",
|
|
202
|
-
package="langgraph"
|
|
203
|
-
))
|
|
204
|
-
|
|
205
|
-
# --- ARCHITECTURAL OPTIMIZATIONS ---
|
|
206
|
-
|
|
207
|
-
# Large system instructions (individual docstrings > 200 chars)
|
|
208
|
-
docstrings = re.findall(r'"""([\s\S]*?)"""|\'\'\'([\s\S]*?)\'\'\'', content)
|
|
209
|
-
has_large_prompt = any(len(d[0] or d[1]) > 200 for d in docstrings)
|
|
210
|
-
|
|
211
|
-
if has_large_prompt and "cache" not in content_lower:
|
|
212
|
-
issues.append(OptimizationIssue(
|
|
213
|
-
"context_caching", "Enable Context Caching", "HIGH", "90% cost reduction",
|
|
214
|
-
"Large static system instructions detected. Use context caching.",
|
|
215
|
-
"+ cache = vertexai.preview.CachingConfig(ttl=3600)",
|
|
216
|
-
package="google-cloud-aiplatform"
|
|
217
|
-
))
|
|
218
|
-
|
|
219
|
-
# Missing semantic cache
|
|
220
|
-
if "hive_mind" not in content_lower and "cache" not in content_lower:
|
|
221
|
-
issues.append(OptimizationIssue(
|
|
222
|
-
"semantic_caching", "Implement Semantic Caching", "HIGH", "40-60% savings",
|
|
223
|
-
"No caching layer detected. Adding a semantic cache reduces LLM costs.",
|
|
224
|
-
"+ @hive_mind(cache=global_cache)",
|
|
225
|
-
package="google-adk"
|
|
226
|
-
))
|
|
227
|
-
|
|
228
|
-
# --- BEST PRACTICE OPTIMIZATIONS ---
|
|
229
|
-
|
|
230
|
-
# Prompt Externalization
|
|
89
|
+
if 'persistence' not in content_lower and 'checkpointer' not in content_lower:
|
|
90
|
+
issues.append(OptimizationIssue('langgraph_persistence', 'LangGraph Persistence', 'HIGH', '100% state recovery', 'A checkpointer is mandatory for reliable long-running agents.', '+ graph.compile(checkpointer=checkpointer)', package='langgraph'))
|
|
91
|
+
if 'recursion' not in content_lower:
|
|
92
|
+
issues.append(OptimizationIssue('langgraph_recursion', 'Recursion Limits', 'MEDIUM', 'Safety Guardrail', 'Set recursion limits to prevent expensive infinite loops in cyclic graphs.', "+ graph.invoke(..., config={'recursion_limit': 50})", package='langgraph'))
|
|
93
|
+
docstrings = re.findall('"""([\\s\\S]*?)"""|\\\'\\\'\\\'([\\s\\S]*?)\\\'\\\'\\\'', content)
|
|
94
|
+
has_large_prompt = any((len(d[0] or d[1]) > 200 for d in docstrings))
|
|
95
|
+
if has_large_prompt and 'cache' not in content_lower:
|
|
96
|
+
issues.append(OptimizationIssue('context_caching', 'Enable Context Caching', 'HIGH', '90% cost reduction', 'Large static system instructions detected. Use context caching.', '+ cache = vertexai.preview.CachingConfig(ttl=3600)', package='google-cloud-aiplatform'))
|
|
97
|
+
if 'hive_mind' not in content_lower and 'cache' not in content_lower:
|
|
98
|
+
issues.append(OptimizationIssue('semantic_caching', 'Implement Semantic Caching', 'HIGH', '40-60% savings', 'No caching layer detected. Adding a semantic cache reduces LLM costs.', '+ @hive_mind(cache=global_cache)', package='google-adk'))
|
|
231
99
|
if has_large_prompt:
|
|
232
|
-
issues.append(OptimizationIssue(
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
))
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
if
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
"You are using the standard Pinecone client. Switching to pinecone[grpc] enables low-latency streaming for large vector retrievals.",
|
|
261
|
-
"+ from pinecone.grpc import PineconeGRPC as Pinecone\n+ pc = Pinecone(api_key='...')"
|
|
262
|
-
))
|
|
263
|
-
if "namespace" not in content_lower:
|
|
264
|
-
issues.append(OptimizationIssue(
|
|
265
|
-
"pinecone_isolation", "Pinecone Namespace Isolation", "MEDIUM", "RAG Accuracy Boost",
|
|
266
|
-
"No namespaces detected. Use namespaces to isolate user data or document segments for more accurate retrieval.",
|
|
267
|
-
"+ index.query(..., namespace='customer-a')"
|
|
268
|
-
))
|
|
269
|
-
|
|
270
|
-
# Google Cloud Database Optimizations
|
|
100
|
+
issues.append(OptimizationIssue('external_prompts', 'Externalize System Prompts', 'MEDIUM', 'Architectural Debt Reduction', "Keeping large system prompts in code makes them hard to version and test. Move them to 'system_prompt.md' and load dynamically.", "+ with open('system_prompt.md', 'r') as f:\n+ SYSTEM_PROMPT = f.read()"))
|
|
101
|
+
if 'retry' not in content_lower and 'tenacity' not in content_lower:
|
|
102
|
+
issues.append(OptimizationIssue('resiliency_retries', 'Implement Exponential Backoff', 'HIGH', '99.9% Reliability', "Your agent calls external APIs/DBs but has no retry logic. Use 'tenacity' to handle transient failures.", '+ @retry(wait=wait_exponential(multiplier=1, min=4, max=10), stop=stop_after_attempt(3))', package='tenacity'))
|
|
103
|
+
if 'session' not in content_lower and 'conversation_id' not in content_lower:
|
|
104
|
+
issues.append(OptimizationIssue('session_management', 'Add Session Tracking', 'MEDIUM', 'User Continuity', "No session tracking detected. Agents in production need a 'conversation_id' to maintain multi-turn context.", '+ def chat(q: str, conversation_id: str = None):'))
|
|
105
|
+
if 'pinecone' in content_lower:
|
|
106
|
+
if 'grpc' not in content_lower:
|
|
107
|
+
issues.append(OptimizationIssue('pinecone_grpc', 'Pinecone High-Perf (gRPC)', 'MEDIUM', '40% latency reduction', 'You are using the standard Pinecone client. Switching to pinecone[grpc] enables low-latency streaming for large vector retrievals.', "+ from pinecone.grpc import PineconeGRPC as Pinecone\n+ pc = Pinecone(api_key='...')"))
|
|
108
|
+
if 'namespace' not in content_lower:
|
|
109
|
+
issues.append(OptimizationIssue('pinecone_isolation', 'Pinecone Namespace Isolation', 'MEDIUM', 'RAG Accuracy Boost', 'No namespaces detected. Use namespaces to isolate user data or document segments for more accurate retrieval.', "+ index.query(..., namespace='customer-a')"))
|
|
110
|
+
if 'alloydb' in content_no_comments:
|
|
111
|
+
if 'columnar' not in content_no_comments:
|
|
112
|
+
issues.append(OptimizationIssue('alloydb_columnar', 'AlloyDB Columnar Engine', 'HIGH', '100x Query Speedup', 'AlloyDB detected. Enable the Columnar Engine for analytical and AI-driven vector queries.', '+ # Enable AlloyDB Columnar Engine for vector scaling'))
|
|
113
|
+
if 'bigquery' in content_no_comments or 'bq' in content_no_comments:
|
|
114
|
+
if 'vector_search' not in content_no_comments:
|
|
115
|
+
issues.append(OptimizationIssue('bq_vector_search', 'BigQuery Vector Search', 'HIGH', 'FinOps: Serverless RAG', 'BigQuery detected. Use BQ Vector Search for cost-effective RAG over massive datasets without moving data to a separate DB.', '+ SELECT * FROM VECTOR_SEARCH(TABLE my_dataset.embeddings, ...)'))
|
|
116
|
+
if 'cloudsql' in content_lower or 'psycopg2' in content_lower or 'sqlalchemy' in content_lower:
|
|
117
|
+
if 'cloud-sql-connector' not in content_lower:
|
|
118
|
+
issues.append(OptimizationIssue('cloudsql_connector', 'Cloud SQL Python Connector', 'MEDIUM', '100% Secure Auth', 'Using raw drivers detected. Use the official Cloud SQL Python Connector for IAM-based authentication and automatic encryption.', '+ from google.cloud.sql.connector import Connector\n+ connector = Connector()'))
|
|
119
|
+
if 'firestore' in content_lower:
|
|
120
|
+
if 'vector' not in content_lower:
|
|
121
|
+
issues.append(OptimizationIssue('firestore_vector', 'Firestore Vector Search (Native)', 'HIGH', 'Real-time RAG', 'Firestore detected. Use native Vector Search and KNN queries for high-concurrency mobile/web agent retrieval.', "+ collection.find_nearest(vector_field='embedding', ...)"))
|
|
122
|
+
if 'oci' in content_lower or 'oracle' in content_lower:
|
|
123
|
+
if 'resource_principal' not in content_lower:
|
|
124
|
+
issues.append(OptimizationIssue('oci_auth', 'OCI Resource Principals', 'HIGH', '100% Secure Auth', 'Using static config/keys detected on OCI. Use Resource Principals for secure, credential-less access from OCI compute.', '+ auth = oci.auth.signers.get_resource_principals_signer()'))
|
|
125
|
+
if 'crewai' in content_lower or 'crew(' in content_lower:
|
|
126
|
+
if 'manager_agent' not in content_lower and 'hierarchical' not in content_lower:
|
|
127
|
+
issues.append(OptimizationIssue('crewai_manager', 'Use Hierarchical Manager', 'MEDIUM', '30% Coordination Boost', 'Your crew uses sequential execution. For complex tasks, a Manager Agent improves task handoffs and reasoning.', '+ crew = Crew(..., process=Process.hierarchical, manager_agent=manager)'))
|
|
271
128
|
|
|
272
|
-
#
|
|
273
|
-
if
|
|
274
|
-
if
|
|
275
|
-
issues.append(OptimizationIssue(
|
|
276
|
-
"alloydb_columnar", "AlloyDB Columnar Engine", "HIGH", "100x Query Speedup",
|
|
277
|
-
"AlloyDB detected. Enable the Columnar Engine for analytical and AI-driven vector queries.",
|
|
278
|
-
"+ # Enable AlloyDB Columnar Engine for vector scaling"
|
|
279
|
-
))
|
|
129
|
+
# v1.2 Principal SME Extras
|
|
130
|
+
if 'rag' in content_lower or 'retriev' in content_lower:
|
|
131
|
+
if 'chunk' not in content_lower and 'atomic' not in content_lower:
|
|
132
|
+
issues.append(OptimizationIssue('atomic_rag', 'Implement Atomic RAG', 'HIGH', '30% Token Savings', "You appear to be using RAG but no 'chunking' or 'atomic retrieval' logic was detected. Sending full documents kills margins.", "+ docs = vector_db.search(query, chunk_limit=5)"))
|
|
280
133
|
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
if "vector_search" not in content_no_comments:
|
|
284
|
-
issues.append(OptimizationIssue(
|
|
285
|
-
"bq_vector_search", "BigQuery Vector Search", "HIGH", "FinOps: Serverless RAG",
|
|
286
|
-
"BigQuery detected. Use BQ Vector Search for cost-effective RAG over massive datasets without moving data to a separate DB.",
|
|
287
|
-
"+ SELECT * FROM VECTOR_SEARCH(TABLE my_dataset.embeddings, ...)"
|
|
288
|
-
))
|
|
134
|
+
if 'model' in content_lower and 'router' not in content_lower and 'is_simple' not in content_lower:
|
|
135
|
+
issues.append(OptimizationIssue('tiered_orchestration', 'Implement Tiered Orchestration', 'HIGH', '70% Cost Savings', "No model routing detected. Use a 'Router Agent' to decide if a query needs a Pro model or a Flash model.", "+ if is_simple(query): model = 'gemini-1.5-flash'"))
|
|
289
136
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
if "cloud-sql-connector" not in content_lower:
|
|
293
|
-
issues.append(OptimizationIssue(
|
|
294
|
-
"cloudsql_connector", "Cloud SQL Python Connector", "MEDIUM", "100% Secure Auth",
|
|
295
|
-
"Using raw drivers detected. Use the official Cloud SQL Python Connector for IAM-based authentication and automatic encryption.",
|
|
296
|
-
"+ from google.cloud.sql.connector import Connector\n+ connector = Connector()"
|
|
297
|
-
))
|
|
137
|
+
if any(phrase in content_lower for phrase in ["you are a helpful assistant", "very good at", "please help me"]):
|
|
138
|
+
issues.append(OptimizationIssue('prompt_compression', 'Token Density: Redundant English', 'MEDIUM', '15% Token Savings', "Identified 'filler' tokens in system instructions. Compressing 'You are a helpful assistant who is very good at coding' to 'Expert coder' reduces baseline cost.", "- You are a helpful assistant...\n+ Expert coder"))
|
|
298
139
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
if "vector" not in content_lower:
|
|
302
|
-
issues.append(OptimizationIssue(
|
|
303
|
-
"firestore_vector", "Firestore Vector Search (Native)", "HIGH", "Real-time RAG",
|
|
304
|
-
"Firestore detected. Use native Vector Search and KNN queries for high-concurrency mobile/web agent retrieval.",
|
|
305
|
-
"+ collection.find_nearest(vector_field='embedding', ...)"
|
|
306
|
-
))
|
|
307
|
-
|
|
308
|
-
# Oracle OCI Optimizations
|
|
309
|
-
if "oci" in content_lower or "oracle" in content_lower:
|
|
310
|
-
if "resource_principal" not in content_lower:
|
|
311
|
-
issues.append(OptimizationIssue(
|
|
312
|
-
"oci_auth", "OCI Resource Principals", "HIGH", "100% Secure Auth",
|
|
313
|
-
"Using static config/keys detected on OCI. Use Resource Principals for secure, credential-less access from OCI compute.",
|
|
314
|
-
"+ auth = oci.auth.signers.get_resource_principals_signer()"
|
|
315
|
-
))
|
|
316
|
-
|
|
317
|
-
# CrewAI Optimizations
|
|
318
|
-
if "crewai" in content_lower or "crew(" in content_lower:
|
|
319
|
-
if "manager_agent" not in content_lower and "hierarchical" not in content_lower:
|
|
320
|
-
issues.append(OptimizationIssue(
|
|
321
|
-
"crewai_manager", "Use Hierarchical Manager", "MEDIUM", "30% Coordination Boost",
|
|
322
|
-
"Your crew uses sequential execution. For complex tasks, a Manager Agent improves task handoffs and reasoning.",
|
|
323
|
-
"+ crew = Crew(..., process=Process.hierarchical, manager_agent=manager)"
|
|
324
|
-
))
|
|
140
|
+
if 'model' in content_lower and 'retry' not in content_lower and 'tenacity' not in content_lower:
|
|
141
|
+
issues.append(OptimizationIssue('quota_management', 'Quota Management: Missing Backoff', 'HIGH', 'Resiliency & ROI', "High-volume model calls detected without Exponential Backoff. Failed requests due to rate-limiting represent wasted compute and broken ROI.", "+ @retry(wait=wait_exponential(multiplier=1, max=10))"))
|
|
325
142
|
|
|
326
143
|
return issues
|
|
327
144
|
|
|
328
145
|
def estimate_savings(token_count: int, issues: List[OptimizationIssue]) -> Dict[str, Any]:
|
|
329
146
|
baseline_cost_per_m = 10.0
|
|
330
|
-
monthly_requests = 10000
|
|
331
|
-
current_cost =
|
|
332
|
-
|
|
147
|
+
monthly_requests = 10000
|
|
148
|
+
current_cost = token_count / 1000000 * baseline_cost_per_m * monthly_requests
|
|
333
149
|
total_savings_pct = 0.0
|
|
334
150
|
for issue in issues:
|
|
335
|
-
if
|
|
336
|
-
total_savings_pct += 0.45
|
|
337
|
-
elif
|
|
338
|
-
total_savings_pct += 0.35
|
|
339
|
-
elif
|
|
340
|
-
total_savings_pct += 0.
|
|
341
|
-
elif
|
|
342
|
-
total_savings_pct += 0.25
|
|
151
|
+
if '90%' in issue.savings:
|
|
152
|
+
total_savings_pct += 0.45
|
|
153
|
+
elif '70%' in issue.savings:
|
|
154
|
+
total_savings_pct += 0.35
|
|
155
|
+
elif '50%' in issue.savings:
|
|
156
|
+
total_savings_pct += 0.2
|
|
157
|
+
elif '40-60%' in issue.savings:
|
|
158
|
+
total_savings_pct += 0.25
|
|
343
159
|
else:
|
|
344
|
-
total_savings_pct += 0.05
|
|
345
|
-
|
|
160
|
+
total_savings_pct += 0.05
|
|
346
161
|
projected_savings = current_cost * min(total_savings_pct, 0.85)
|
|
347
|
-
|
|
348
|
-
return {
|
|
349
|
-
"current_monthly": current_cost,
|
|
350
|
-
"projected_savings": projected_savings,
|
|
351
|
-
"new_monthly": current_cost - projected_savings
|
|
352
|
-
}
|
|
162
|
+
return {'current_monthly': current_cost, 'projected_savings': projected_savings, 'new_monthly': current_cost - projected_savings}
|
|
353
163
|
|
|
354
164
|
@app.command()
|
|
355
|
-
def audit(
|
|
356
|
-
|
|
357
|
-
interactive: bool = typer.Option(True, "--interactive/--no-interactive", "-i", help="Run in interactive mode"),
|
|
358
|
-
apply_fix: bool = typer.Option(False, "--apply", "--fix", help="Automatically apply recommended fixes"),
|
|
359
|
-
quick: bool = typer.Option(False, "--quick", "-q", help="Skip live evidence fetching for faster execution")
|
|
360
|
-
):
|
|
361
|
-
console.print(Panel.fit("🔍 [bold blue]GCP AGENT OPS: OPTIMIZER AUDIT[/bold blue]", border_style="blue"))
|
|
165
|
+
def audit(file_path: str=typer.Argument('agent.py', help='Path to the agent code to audit'), interactive: bool=typer.Option(True, '--interactive/--no-interactive', '-i', help='Run in interactive mode'), apply_fix: bool=typer.Option(False, '--apply', '--fix', help='Automatically apply recommended fixes'), quick: bool=typer.Option(False, '--quick', '-q', help='Skip live evidence fetching for faster execution')):
|
|
166
|
+
console.print(Panel.fit('🔍 [bold blue]GCP AGENT OPS: OPTIMIZER AUDIT[/bold blue]', border_style='blue'))
|
|
362
167
|
if not os.path.exists(file_path):
|
|
363
|
-
console.print(f
|
|
168
|
+
console.print(f'❌ [red]Error: Path {file_path} not found.[/red]')
|
|
364
169
|
raise typer.Exit(1)
|
|
365
|
-
|
|
366
|
-
# If it's a directory, try to find the agent entry point
|
|
367
170
|
if os.path.isdir(file_path):
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
if os.path.exists(candidate):
|
|
372
|
-
file_path = candidate
|
|
373
|
-
found = True
|
|
374
|
-
break
|
|
375
|
-
if not found:
|
|
376
|
-
# Look for any .py file if common names aren't found
|
|
377
|
-
for root, dirs, files in os.walk(file_path):
|
|
378
|
-
# Prune excluded directories for performance
|
|
379
|
-
dirs[:] = [d for d in dirs if d not in [".venv", "node_modules", ".git", "__pycache__", "dist", "build"]]
|
|
380
|
-
|
|
381
|
-
for f in files:
|
|
382
|
-
if f.endswith(".py") and f != "__init__.py":
|
|
383
|
-
file_path = os.path.join(root, f)
|
|
384
|
-
found = True
|
|
385
|
-
break
|
|
386
|
-
if found: break
|
|
171
|
+
from agent_ops_cockpit.ops.discovery import DiscoveryEngine
|
|
172
|
+
discovery = DiscoveryEngine(file_path)
|
|
173
|
+
file_path = discovery.find_agent_brain()
|
|
387
174
|
|
|
388
|
-
if not
|
|
389
|
-
console.print(f
|
|
175
|
+
if not os.path.exists(file_path):
|
|
176
|
+
console.print(f'❌ [red]Error: No python entry point found in {discovery.root_path}[/red]')
|
|
390
177
|
raise typer.Exit(1)
|
|
391
|
-
|
|
392
|
-
console.print(f"Target: [yellow]{file_path}[/yellow]")
|
|
393
|
-
|
|
178
|
+
console.print(f'Target: [yellow]{file_path}[/yellow]')
|
|
394
179
|
with open(file_path, 'r') as f:
|
|
395
180
|
content = f.read()
|
|
396
|
-
|
|
397
|
-
# Heuristic: Find all imported packages
|
|
398
|
-
imports = re.findall(r"(?:from|import)\s+([\w\.-]+)", content)
|
|
399
|
-
|
|
181
|
+
imports = re.findall('(?:from|import)\\s+([\\w\\.-]+)', content)
|
|
400
182
|
from agent_ops_cockpit.ops.evidence_bridge import get_installed_version
|
|
401
|
-
package_versions = {
|
|
402
|
-
|
|
403
|
-
token_estimate
|
|
404
|
-
console.print(f"📊 Token Metrics: ~[bold]{token_estimate:.0f}[/bold] prompt tokens detected.")
|
|
405
|
-
|
|
183
|
+
package_versions = {pkg: get_installed_version(pkg) for pkg in ['google-cloud-aiplatform', 'openai', 'anthropic', 'langgraph', 'crewai']}
|
|
184
|
+
token_estimate = len(content.split()) * 1.5
|
|
185
|
+
console.print(f'📊 Token Metrics: ~[bold]{token_estimate:.0f}[/bold] prompt tokens detected.')
|
|
406
186
|
issues = analyze_code(content, file_path, versions=package_versions)
|
|
407
|
-
# Inject live evidence (skip in quick mode)
|
|
408
187
|
if not quick:
|
|
409
188
|
for issue in issues:
|
|
410
189
|
if issue.package:
|
|
411
190
|
issue.evidence = get_package_evidence(issue.package)
|
|
412
|
-
|
|
413
|
-
# --- CROSS-PACKAGE VALIDATION ---
|
|
414
191
|
comp_reports = get_compatibility_report(imports)
|
|
415
|
-
|
|
416
192
|
if comp_reports:
|
|
417
|
-
console.print(
|
|
193
|
+
console.print('\n[bold yellow]🧩 Cross-Package Validation:[/bold yellow]')
|
|
418
194
|
for report in comp_reports:
|
|
419
|
-
if report[
|
|
195
|
+
if report['type'] == 'INCOMPATIBLE':
|
|
420
196
|
console.print(f"❌ [bold red]Conflict Detected:[/bold red] {report['component']} + {report['conflict_with']}")
|
|
421
197
|
console.print(f" [dim]{report['reason']}[/dim]")
|
|
422
|
-
elif report[
|
|
198
|
+
elif report['type'] == 'SYNERGY':
|
|
423
199
|
console.print(f"✅ [bold green]Synergy Verified:[/bold green] {report['component']} is optimally paired.")
|
|
424
|
-
|
|
425
200
|
if not issues:
|
|
426
|
-
console.print(
|
|
201
|
+
console.print('\n[bold green]✅ No immediate code-level optimizations found. Your agent is lean![/bold green]')
|
|
427
202
|
if not comp_reports:
|
|
428
|
-
|
|
203
|
+
return
|
|
429
204
|
else:
|
|
430
|
-
|
|
431
|
-
|
|
205
|
+
raise typer.Exit(0)
|
|
432
206
|
savings = estimate_savings(token_estimate, issues)
|
|
433
|
-
finops_panel = Panel(
|
|
434
|
-
f"💰 [bold]FinOps Projection (Est. 10k req/mo)[/bold]\n"
|
|
435
|
-
f"Current Monthly Spend: [red]${savings['current_monthly']:.2f}[/red]\n"
|
|
436
|
-
f"Projected Savings: [green]${savings['projected_savings']:.2f}[/green]\n"
|
|
437
|
-
f"New Monthly Spend: [blue]${savings['new_monthly']:.2f}[/blue]",
|
|
438
|
-
title="[bold yellow]Financial Optimization[/bold yellow]",
|
|
439
|
-
border_style="yellow"
|
|
440
|
-
)
|
|
207
|
+
finops_panel = Panel(f"💰 [bold]FinOps Projection (Est. 10k req/mo)[/bold]\nCurrent Monthly Spend: [red]${savings['current_monthly']:.2f}[/red]\nProjected Savings: [green]${savings['projected_savings']:.2f}[/green]\nNew Monthly Spend: [blue]${savings['new_monthly']:.2f}[/blue]", title='[bold yellow]Financial Optimization[/bold yellow]', border_style='yellow')
|
|
441
208
|
console.print(finops_panel)
|
|
442
|
-
|
|
443
209
|
applied = 0
|
|
444
210
|
rejected = 0
|
|
445
211
|
fixed_content = content
|
|
446
|
-
|
|
447
212
|
for opt in issues:
|
|
448
|
-
console.print(f
|
|
449
|
-
console.print(f
|
|
450
|
-
console.print(f
|
|
451
|
-
|
|
452
|
-
if opt.evidence and "error" not in opt.evidence:
|
|
213
|
+
console.print(f'\n[bold white on blue] --- [{opt.impact} IMPACT] {opt.title} --- [/bold white on blue]')
|
|
214
|
+
console.print(f'Benefit: [green]{opt.savings}[/green]')
|
|
215
|
+
console.print(f'Reason: {opt.description}')
|
|
216
|
+
if opt.evidence and 'error' not in opt.evidence:
|
|
453
217
|
ev = opt.evidence
|
|
454
|
-
ev_title =
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
if ev.get("upgrade_required"):
|
|
458
|
-
console.print("🚨 [bold yellow]URGENT UPGRADE RECOMMENDED[/bold yellow]")
|
|
218
|
+
ev_title = '[dim]SDK Citation & Evidence[/dim]'
|
|
219
|
+
if ev.get('upgrade_required'):
|
|
220
|
+
console.print('🚨 [bold yellow]URGENT UPGRADE RECOMMENDED[/bold yellow]')
|
|
459
221
|
console.print(f" Current: {ev['installed_version']} | Required for optimization: >={ev['min_optimized_version']}")
|
|
460
|
-
ev_title =
|
|
461
|
-
|
|
462
|
-
ev_panel = Panel(
|
|
463
|
-
f"🔗 [bold]Source[/bold]: {ev['source_url']}\n"
|
|
464
|
-
f"📅 [bold]Latest Release[/bold]: {ev['release_date'][:10]}\n"
|
|
465
|
-
f"📝 [bold]Note[/bold]: {ev['best_practice_context']}",
|
|
466
|
-
title=ev_title,
|
|
467
|
-
border_style="red" if ev.get("upgrade_required") else "dim"
|
|
468
|
-
)
|
|
222
|
+
ev_title = '[bold red]UPGRADE REQUIRED Evidence[/bold red]'
|
|
223
|
+
ev_panel = Panel(f"🔗 [bold]Source[/bold]: {ev['source_url']}\n📅 [bold]Latest Release[/bold]: {ev['release_date'][:10]}\n📝 [bold]Note[/bold]: {ev['best_practice_context']}", title=ev_title, border_style='red' if ev.get('upgrade_required') else 'dim')
|
|
469
224
|
console.print(ev_panel)
|
|
470
|
-
# Orchestrator parsing
|
|
471
225
|
console.print(f"SOURCE: {opt.title} | {ev['source_url']} | {ev['best_practice_context'].replace('\\n', ' ')}")
|
|
472
|
-
|
|
473
|
-
syntax = Syntax(opt.diff, "python", theme="monokai", line_numbers=False)
|
|
226
|
+
syntax = Syntax(opt.diff, 'python', theme='monokai', line_numbers=False)
|
|
474
227
|
console.print(syntax)
|
|
475
|
-
|
|
476
|
-
# Output ACTION: for report generation
|
|
477
|
-
console.print(f"ACTION: {file_path}:1 | Optimization: {opt.title} | {opt.description} (Est. {opt.savings})")
|
|
478
|
-
|
|
228
|
+
console.print(f'ACTION: {file_path}:1 | Optimization: {opt.title} | {opt.description} (Est. {opt.savings})')
|
|
479
229
|
do_apply = False
|
|
480
230
|
if apply_fix:
|
|
481
231
|
do_apply = True
|
|
482
232
|
elif interactive:
|
|
483
|
-
do_apply = typer.confirm(
|
|
484
|
-
|
|
233
|
+
do_apply = typer.confirm('\nDo you want to apply this code-level optimization?', default=True)
|
|
485
234
|
if do_apply:
|
|
486
|
-
console.print(
|
|
235
|
+
console.print('✅ [APPROVED] applying fix...')
|
|
487
236
|
if opt.fix_pattern:
|
|
488
237
|
fixed_content = opt.fix_pattern + fixed_content
|
|
489
238
|
applied += 1
|
|
490
239
|
else:
|
|
491
|
-
console.print(
|
|
240
|
+
console.print('❌ [REJECTED] skipping optimization.')
|
|
492
241
|
rejected += 1
|
|
493
|
-
|
|
494
242
|
if applied > 0:
|
|
495
243
|
with open(file_path, 'w') as f:
|
|
496
244
|
f.write(fixed_content)
|
|
497
|
-
console.print(f
|
|
498
|
-
|
|
499
|
-
summary_table =
|
|
500
|
-
summary_table.add_column(
|
|
501
|
-
summary_table.
|
|
502
|
-
summary_table.add_row(
|
|
503
|
-
summary_table.add_row("Optimizations Rejected", str(rejected))
|
|
245
|
+
console.print(f'\n✨ [bold green]Applied {applied} optimizations to {file_path}![/bold green]')
|
|
246
|
+
summary_table = Table(title='🎯 AUDIT SUMMARY')
|
|
247
|
+
summary_table.add_column('Category', style='cyan')
|
|
248
|
+
summary_table.add_column('Count', style='magenta')
|
|
249
|
+
summary_table.add_row('Optimizations Applied', str(applied))
|
|
250
|
+
summary_table.add_row('Optimizations Rejected', str(rejected))
|
|
504
251
|
console.print(summary_table)
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
if not interactive and any(opt.impact == "HIGH" for opt in issues):
|
|
508
|
-
console.print("\n[bold red]❌ HIGH IMPACT issues detected. Optimization required for production.[/bold red]")
|
|
252
|
+
if not interactive and any((opt.impact == 'HIGH' for opt in issues)):
|
|
253
|
+
console.print('\n[bold red]❌ HIGH IMPACT issues detected. Optimization required for production.[/bold red]')
|
|
509
254
|
raise typer.Exit(code=1)
|
|
510
255
|
|
|
511
|
-
|
|
256
|
+
@app.command()
|
|
257
|
+
def version():
|
|
258
|
+
"""Show the version of the Optimizer module."""
|
|
259
|
+
console.print('[bold cyan]v1.3.0[/bold cyan]')
|
|
260
|
+
|
|
261
|
+
if __name__ == '__main__':
|
|
512
262
|
app()
|