@musashishao/agent-kit 1.6.0 → 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.agent/.shared/ui-ux-pro-max/data/charts.csv +26 -0
- package/.agent/.shared/ui-ux-pro-max/data/colors.csv +97 -0
- package/.agent/.shared/ui-ux-pro-max/data/icons.csv +101 -0
- package/.agent/.shared/ui-ux-pro-max/data/landing.csv +31 -0
- package/.agent/.shared/ui-ux-pro-max/data/products.csv +97 -0
- package/.agent/.shared/ui-ux-pro-max/data/prompts.csv +24 -0
- package/.agent/.shared/ui-ux-pro-max/data/react-performance.csv +45 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/flutter.csv +53 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/html-tailwind.csv +56 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/jetpack-compose.csv +53 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/nextjs.csv +53 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/nuxt-ui.csv +51 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/nuxtjs.csv +59 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/react-native.csv +52 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/react.csv +54 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/shadcn.csv +61 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/svelte.csv +54 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/swiftui.csv +51 -0
- package/.agent/.shared/ui-ux-pro-max/data/stacks/vue.csv +50 -0
- package/.agent/.shared/ui-ux-pro-max/data/styles.csv +59 -0
- package/.agent/.shared/ui-ux-pro-max/data/typography.csv +58 -0
- package/.agent/.shared/ui-ux-pro-max/data/ui-reasoning.csv +101 -0
- package/.agent/.shared/ui-ux-pro-max/data/ux-guidelines.csv +100 -0
- package/.agent/.shared/ui-ux-pro-max/data/web-interface.csv +31 -0
- package/.agent/.shared/ui-ux-pro-max/scripts/core.py +258 -0
- package/.agent/.shared/ui-ux-pro-max/scripts/design_system.py +487 -0
- package/.agent/.shared/ui-ux-pro-max/scripts/search.py +76 -0
- package/.agent/adr/ADR-TEMPLATE.md +57 -0
- package/.agent/adr/README.md +30 -0
- package/.agent/agents/backend-specialist.md +1 -1
- package/.agent/agents/devops-engineer.md +1 -1
- package/.agent/agents/performance-optimizer.md +1 -1
- package/.agent/agents/project-planner.md +22 -2
- package/.agent/agents/security-auditor.md +1 -1
- package/.agent/dashboard/index.html +169 -0
- package/.agent/rules/CODE_RULES.md +88 -0
- package/.agent/rules/GEMINI.md +35 -177
- package/.agent/rules/MEMORY_STATE.md +62 -0
- package/.agent/rules/REFERENCE.md +85 -0
- package/.agent/skills/ai-incident-management/SKILL.md +517 -0
- package/.agent/skills/ai-security-guardrails/SKILL.md +405 -0
- package/.agent/skills/ai-security-guardrails/owasp-llm-top10.md +160 -0
- package/.agent/skills/ai-security-guardrails/scripts/prompt_injection_scanner.py +230 -0
- package/.agent/skills/app-builder/SKILL.md +10 -7
- package/.agent/skills/compliance-for-ai/SKILL.md +411 -0
- package/.agent/skills/observability-patterns/SKILL.md +484 -0
- package/.agent/skills/observability-patterns/scripts/otel_validator.py +330 -0
- package/.agent/skills/opentelemetry-expert/SKILL.md +738 -0
- package/.agent/skills/opentelemetry-expert/scripts/trace_analyzer.py +351 -0
- package/.agent/skills/privacy-preserving-dev/SKILL.md +442 -0
- package/.agent/skills/privacy-preserving-dev/scripts/pii_scanner.py +285 -0
- package/.agent/skills/spec-writing/SKILL.md +189 -0
- package/.agent/skills/tdd-workflow/SKILL.md +30 -0
- package/.agent/workflows/create.md +16 -6
- package/.agent/workflows/plan.md +8 -8
- package/.agent/workflows/spec.md +189 -0
- package/.agent/workflows/test.md +41 -0
- package/package.json +5 -2
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Skill: opentelemetry-expert
|
|
4
|
+
Script: trace_analyzer.py
|
|
5
|
+
Purpose: Analyze trace spans for performance issues in AI applications
|
|
6
|
+
Usage: python trace_analyzer.py <trace_file.json> [--output json|summary]
|
|
7
|
+
python trace_analyzer.py --stdin < trace.json
|
|
8
|
+
Output: Performance analysis and recommendations
|
|
9
|
+
|
|
10
|
+
This script analyzes:
|
|
11
|
+
1. Span duration anomalies
|
|
12
|
+
2. Token usage patterns
|
|
13
|
+
3. Error rates
|
|
14
|
+
4. Agent loop efficiency
|
|
15
|
+
"""
|
|
16
|
+
import os
|
|
17
|
+
import sys
|
|
18
|
+
import json
|
|
19
|
+
import argparse
|
|
20
|
+
from typing import Dict, List, Any, Optional
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from statistics import mean, median, stdev
|
|
23
|
+
|
|
24
|
+
# Fix console encoding
|
|
25
|
+
try:
|
|
26
|
+
sys.stdout.reconfigure(encoding='utf-8', errors='replace')
|
|
27
|
+
sys.stderr.reconfigure(encoding='utf-8', errors='replace')
|
|
28
|
+
except AttributeError:
|
|
29
|
+
pass
|
|
30
|
+
|
|
31
|
+
# ============================================================================
|
|
32
|
+
# ANALYSIS FUNCTIONS
|
|
33
|
+
# ============================================================================
|
|
34
|
+
|
|
35
|
+
def parse_trace_data(data: Any) -> List[Dict]:
|
|
36
|
+
"""Parse trace data from various formats."""
|
|
37
|
+
spans = []
|
|
38
|
+
|
|
39
|
+
if isinstance(data, list):
|
|
40
|
+
for item in data:
|
|
41
|
+
if isinstance(item, dict):
|
|
42
|
+
if "spans" in item:
|
|
43
|
+
spans.extend(item["spans"])
|
|
44
|
+
elif "traceId" in item or "trace_id" in item:
|
|
45
|
+
spans.append(item)
|
|
46
|
+
elif isinstance(data, dict):
|
|
47
|
+
if "spans" in data:
|
|
48
|
+
spans = data["spans"]
|
|
49
|
+
elif "data" in data and isinstance(data["data"], list):
|
|
50
|
+
for trace in data["data"]:
|
|
51
|
+
if "spans" in trace:
|
|
52
|
+
spans.extend(trace["spans"])
|
|
53
|
+
|
|
54
|
+
return spans
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def extract_span_info(span: Dict) -> Dict:
|
|
58
|
+
"""Extract relevant information from a span."""
|
|
59
|
+
# Handle different attribute formats
|
|
60
|
+
attributes = span.get("attributes", {})
|
|
61
|
+
if isinstance(attributes, list):
|
|
62
|
+
attributes = {a.get("key"): a.get("value", {}).get("stringValue") or a.get("value", {}).get("intValue") for a in attributes}
|
|
63
|
+
|
|
64
|
+
# Calculate duration
|
|
65
|
+
start_time = span.get("startTimeUnixNano", 0) or span.get("start_time", 0)
|
|
66
|
+
end_time = span.get("endTimeUnixNano", 0) or span.get("end_time", 0)
|
|
67
|
+
duration_ms = (end_time - start_time) / 1_000_000 if start_time and end_time else 0
|
|
68
|
+
|
|
69
|
+
return {
|
|
70
|
+
"name": span.get("name", "unknown"),
|
|
71
|
+
"trace_id": span.get("traceId") or span.get("trace_id", ""),
|
|
72
|
+
"span_id": span.get("spanId") or span.get("span_id", ""),
|
|
73
|
+
"parent_id": span.get("parentSpanId") or span.get("parent_id", ""),
|
|
74
|
+
"duration_ms": duration_ms,
|
|
75
|
+
"status": span.get("status", {}).get("code", "OK"),
|
|
76
|
+
"attributes": attributes,
|
|
77
|
+
"is_llm": any(k.startswith("llm.") for k in attributes.keys()),
|
|
78
|
+
"is_agent": any(k.startswith("agent.") for k in attributes.keys()),
|
|
79
|
+
"model": attributes.get("llm.model") or attributes.get("llm.request.model", ""),
|
|
80
|
+
"tokens": int(attributes.get("llm.usage.total_tokens", 0) or 0),
|
|
81
|
+
"error": span.get("status", {}).get("code") == "ERROR",
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def analyze_llm_performance(spans: List[Dict]) -> Dict:
|
|
86
|
+
"""Analyze LLM-related spans for performance issues."""
|
|
87
|
+
llm_spans = [s for s in spans if s["is_llm"]]
|
|
88
|
+
|
|
89
|
+
if not llm_spans:
|
|
90
|
+
return {"status": "No LLM spans found"}
|
|
91
|
+
|
|
92
|
+
# Group by model
|
|
93
|
+
by_model: Dict[str, List[Dict]] = {}
|
|
94
|
+
for span in llm_spans:
|
|
95
|
+
model = span["model"] or "unknown"
|
|
96
|
+
if model not in by_model:
|
|
97
|
+
by_model[model] = []
|
|
98
|
+
by_model[model].append(span)
|
|
99
|
+
|
|
100
|
+
analysis = {
|
|
101
|
+
"total_calls": len(llm_spans),
|
|
102
|
+
"total_tokens": sum(s["tokens"] for s in llm_spans),
|
|
103
|
+
"total_duration_ms": sum(s["duration_ms"] for s in llm_spans),
|
|
104
|
+
"error_count": sum(1 for s in llm_spans if s["error"]),
|
|
105
|
+
"by_model": {}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
for model, model_spans in by_model.items():
|
|
109
|
+
durations = [s["duration_ms"] for s in model_spans if s["duration_ms"] > 0]
|
|
110
|
+
tokens = [s["tokens"] for s in model_spans if s["tokens"] > 0]
|
|
111
|
+
|
|
112
|
+
analysis["by_model"][model] = {
|
|
113
|
+
"count": len(model_spans),
|
|
114
|
+
"avg_duration_ms": mean(durations) if durations else 0,
|
|
115
|
+
"p50_duration_ms": median(durations) if durations else 0,
|
|
116
|
+
"max_duration_ms": max(durations) if durations else 0,
|
|
117
|
+
"avg_tokens": mean(tokens) if tokens else 0,
|
|
118
|
+
"total_tokens": sum(tokens),
|
|
119
|
+
"errors": sum(1 for s in model_spans if s["error"]),
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
# Find outliers (> 2 std deviations)
|
|
123
|
+
all_durations = [s["duration_ms"] for s in llm_spans if s["duration_ms"] > 0]
|
|
124
|
+
if len(all_durations) > 2:
|
|
125
|
+
avg_dur = mean(all_durations)
|
|
126
|
+
std_dur = stdev(all_durations)
|
|
127
|
+
threshold = avg_dur + 2 * std_dur
|
|
128
|
+
|
|
129
|
+
analysis["outliers"] = [
|
|
130
|
+
{"name": s["name"], "duration_ms": s["duration_ms"], "tokens": s["tokens"]}
|
|
131
|
+
for s in llm_spans if s["duration_ms"] > threshold
|
|
132
|
+
][:5]
|
|
133
|
+
|
|
134
|
+
return analysis
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def analyze_agent_loops(spans: List[Dict]) -> Dict:
|
|
138
|
+
"""Analyze agent loop patterns."""
|
|
139
|
+
agent_spans = [s for s in spans if s["is_agent"]]
|
|
140
|
+
|
|
141
|
+
if not agent_spans:
|
|
142
|
+
return {"status": "No agent spans found"}
|
|
143
|
+
|
|
144
|
+
# Find root agent runs
|
|
145
|
+
root_runs = [s for s in agent_spans if "agent.run" in s["name"] or s["name"].endswith(".run")]
|
|
146
|
+
|
|
147
|
+
# Find step spans
|
|
148
|
+
step_spans = [s for s in agent_spans if "step" in s["name"].lower()]
|
|
149
|
+
tool_spans = [s for s in agent_spans if "tool" in s["name"].lower()]
|
|
150
|
+
|
|
151
|
+
analysis = {
|
|
152
|
+
"total_runs": len(root_runs),
|
|
153
|
+
"total_steps": len(step_spans),
|
|
154
|
+
"total_tool_calls": len(tool_spans),
|
|
155
|
+
"avg_steps_per_run": len(step_spans) / len(root_runs) if root_runs else 0,
|
|
156
|
+
"tool_usage": {},
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# Analyze tool usage
|
|
160
|
+
for span in tool_spans:
|
|
161
|
+
tool_name = span["attributes"].get("agent.tool.name") or span["attributes"].get("agent.tool", "unknown")
|
|
162
|
+
if tool_name not in analysis["tool_usage"]:
|
|
163
|
+
analysis["tool_usage"][tool_name] = {"count": 0, "errors": 0, "total_duration_ms": 0}
|
|
164
|
+
|
|
165
|
+
analysis["tool_usage"][tool_name]["count"] += 1
|
|
166
|
+
analysis["tool_usage"][tool_name]["total_duration_ms"] += span["duration_ms"]
|
|
167
|
+
if span["error"]:
|
|
168
|
+
analysis["tool_usage"][tool_name]["errors"] += 1
|
|
169
|
+
|
|
170
|
+
# Calculate run durations
|
|
171
|
+
if root_runs:
|
|
172
|
+
run_durations = [s["duration_ms"] for s in root_runs if s["duration_ms"] > 0]
|
|
173
|
+
if run_durations:
|
|
174
|
+
analysis["avg_run_duration_ms"] = mean(run_durations)
|
|
175
|
+
analysis["max_run_duration_ms"] = max(run_durations)
|
|
176
|
+
|
|
177
|
+
return analysis
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def find_bottlenecks(spans: List[Dict]) -> List[Dict]:
|
|
181
|
+
"""Identify performance bottlenecks."""
|
|
182
|
+
bottlenecks = []
|
|
183
|
+
|
|
184
|
+
# Find slowest spans (top 5)
|
|
185
|
+
sorted_by_duration = sorted(spans, key=lambda s: s["duration_ms"], reverse=True)[:5]
|
|
186
|
+
for span in sorted_by_duration:
|
|
187
|
+
if span["duration_ms"] > 1000: # > 1 second
|
|
188
|
+
bottlenecks.append({
|
|
189
|
+
"type": "slow_span",
|
|
190
|
+
"name": span["name"],
|
|
191
|
+
"duration_ms": span["duration_ms"],
|
|
192
|
+
"recommendation": f"Optimize {span['name']} - taking {span['duration_ms']:.0f}ms"
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
# Find high-token operations
|
|
196
|
+
high_token_spans = [s for s in spans if s["tokens"] > 5000]
|
|
197
|
+
for span in high_token_spans[:3]:
|
|
198
|
+
bottlenecks.append({
|
|
199
|
+
"type": "high_tokens",
|
|
200
|
+
"name": span["name"],
|
|
201
|
+
"tokens": span["tokens"],
|
|
202
|
+
"recommendation": f"Reduce token usage in {span['name']} ({span['tokens']} tokens)"
|
|
203
|
+
})
|
|
204
|
+
|
|
205
|
+
# Find error clusters
|
|
206
|
+
error_spans = [s for s in spans if s["error"]]
|
|
207
|
+
if len(error_spans) > 3:
|
|
208
|
+
bottlenecks.append({
|
|
209
|
+
"type": "error_cluster",
|
|
210
|
+
"count": len(error_spans),
|
|
211
|
+
"recommendation": f"Investigate {len(error_spans)} errors - may indicate systematic issue"
|
|
212
|
+
})
|
|
213
|
+
|
|
214
|
+
return bottlenecks
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def generate_recommendations(analysis: Dict) -> List[str]:
|
|
218
|
+
"""Generate actionable recommendations based on analysis."""
|
|
219
|
+
recommendations = []
|
|
220
|
+
|
|
221
|
+
llm = analysis.get("llm_analysis", {})
|
|
222
|
+
agent = analysis.get("agent_analysis", {})
|
|
223
|
+
bottlenecks = analysis.get("bottlenecks", [])
|
|
224
|
+
|
|
225
|
+
# LLM recommendations
|
|
226
|
+
if llm.get("total_calls", 0) > 0:
|
|
227
|
+
error_rate = llm.get("error_count", 0) / llm["total_calls"]
|
|
228
|
+
if error_rate > 0.05:
|
|
229
|
+
recommendations.append(f"High LLM error rate ({error_rate:.1%}) - implement retry logic")
|
|
230
|
+
|
|
231
|
+
for model, stats in llm.get("by_model", {}).items():
|
|
232
|
+
if stats.get("avg_duration_ms", 0) > 5000:
|
|
233
|
+
recommendations.append(f"Consider caching for {model} - avg latency {stats['avg_duration_ms']:.0f}ms")
|
|
234
|
+
|
|
235
|
+
# Agent recommendations
|
|
236
|
+
if agent.get("avg_steps_per_run", 0) > 10:
|
|
237
|
+
recommendations.append(f"Agent averaging {agent['avg_steps_per_run']:.1f} steps - consider optimizing reasoning")
|
|
238
|
+
|
|
239
|
+
for tool, stats in agent.get("tool_usage", {}).items():
|
|
240
|
+
if stats["errors"] > stats["count"] * 0.2:
|
|
241
|
+
recommendations.append(f"Tool '{tool}' has {stats['errors']}/{stats['count']} errors - fix reliability")
|
|
242
|
+
|
|
243
|
+
# Bottleneck recommendations
|
|
244
|
+
for bn in bottlenecks[:3]:
|
|
245
|
+
if bn.get("recommendation"):
|
|
246
|
+
recommendations.append(bn["recommendation"])
|
|
247
|
+
|
|
248
|
+
return recommendations
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
# ============================================================================
|
|
252
|
+
# MAIN
|
|
253
|
+
# ============================================================================
|
|
254
|
+
|
|
255
|
+
def analyze_traces(trace_data: Any) -> Dict[str, Any]:
|
|
256
|
+
"""Main analysis function."""
|
|
257
|
+
spans_raw = parse_trace_data(trace_data)
|
|
258
|
+
|
|
259
|
+
if not spans_raw:
|
|
260
|
+
return {"error": "No spans found in trace data"}
|
|
261
|
+
|
|
262
|
+
spans = [extract_span_info(s) for s in spans_raw]
|
|
263
|
+
|
|
264
|
+
analysis = {
|
|
265
|
+
"timestamp": datetime.now().isoformat(),
|
|
266
|
+
"total_spans": len(spans),
|
|
267
|
+
"llm_analysis": analyze_llm_performance(spans),
|
|
268
|
+
"agent_analysis": analyze_agent_loops(spans),
|
|
269
|
+
"bottlenecks": find_bottlenecks(spans),
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
analysis["recommendations"] = generate_recommendations(analysis)
|
|
273
|
+
|
|
274
|
+
# Overall status
|
|
275
|
+
error_count = sum(1 for s in spans if s["error"])
|
|
276
|
+
if error_count > len(spans) * 0.1:
|
|
277
|
+
analysis["status"] = f"[!!] HIGH ERROR RATE: {error_count}/{len(spans)} spans failed"
|
|
278
|
+
elif analysis["bottlenecks"]:
|
|
279
|
+
analysis["status"] = f"[!] PERFORMANCE ISSUES: {len(analysis['bottlenecks'])} bottlenecks found"
|
|
280
|
+
else:
|
|
281
|
+
analysis["status"] = "[OK] Trace analysis complete"
|
|
282
|
+
|
|
283
|
+
return analysis
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def main():
|
|
287
|
+
parser = argparse.ArgumentParser(
|
|
288
|
+
description="Analyze trace spans for AI application performance"
|
|
289
|
+
)
|
|
290
|
+
parser.add_argument("trace_file", nargs="?", help="Path to trace JSON file")
|
|
291
|
+
parser.add_argument("--stdin", action="store_true", help="Read trace data from stdin")
|
|
292
|
+
parser.add_argument("--output", choices=["json", "summary"], default="json",
|
|
293
|
+
help="Output format")
|
|
294
|
+
|
|
295
|
+
args = parser.parse_args()
|
|
296
|
+
|
|
297
|
+
# Read trace data
|
|
298
|
+
if args.stdin:
|
|
299
|
+
trace_data = json.load(sys.stdin)
|
|
300
|
+
elif args.trace_file:
|
|
301
|
+
if not os.path.isfile(args.trace_file):
|
|
302
|
+
print(json.dumps({"error": f"File not found: {args.trace_file}"}))
|
|
303
|
+
sys.exit(1)
|
|
304
|
+
with open(args.trace_file) as f:
|
|
305
|
+
trace_data = json.load(f)
|
|
306
|
+
else:
|
|
307
|
+
print(json.dumps({"error": "Provide trace file or use --stdin"}))
|
|
308
|
+
sys.exit(1)
|
|
309
|
+
|
|
310
|
+
results = analyze_traces(trace_data)
|
|
311
|
+
|
|
312
|
+
if args.output == "summary":
|
|
313
|
+
print(f"\n{'='*60}")
|
|
314
|
+
print(f"Trace Analysis")
|
|
315
|
+
print(f"{'='*60}")
|
|
316
|
+
print(f"Status: {results['status']}")
|
|
317
|
+
print(f"Total Spans: {results['total_spans']}")
|
|
318
|
+
|
|
319
|
+
llm = results.get("llm_analysis", {})
|
|
320
|
+
if llm.get("total_calls"):
|
|
321
|
+
print(f"\nLLM Performance:")
|
|
322
|
+
print(f" Total Calls: {llm['total_calls']}")
|
|
323
|
+
print(f" Total Tokens: {llm['total_tokens']}")
|
|
324
|
+
print(f" Errors: {llm['error_count']}")
|
|
325
|
+
for model, stats in llm.get("by_model", {}).items():
|
|
326
|
+
print(f" {model}: {stats['count']} calls, avg {stats['avg_duration_ms']:.0f}ms")
|
|
327
|
+
|
|
328
|
+
agent = results.get("agent_analysis", {})
|
|
329
|
+
if agent.get("total_runs"):
|
|
330
|
+
print(f"\nAgent Performance:")
|
|
331
|
+
print(f" Total Runs: {agent['total_runs']}")
|
|
332
|
+
print(f" Avg Steps/Run: {agent['avg_steps_per_run']:.1f}")
|
|
333
|
+
print(f" Tool Calls: {agent['total_tool_calls']}")
|
|
334
|
+
|
|
335
|
+
if results.get("bottlenecks"):
|
|
336
|
+
print(f"\nBottlenecks:")
|
|
337
|
+
for bn in results["bottlenecks"][:5]:
|
|
338
|
+
print(f" • {bn['type']}: {bn.get('name', bn.get('count', 'n/a'))}")
|
|
339
|
+
|
|
340
|
+
if results.get("recommendations"):
|
|
341
|
+
print(f"\nRecommendations:")
|
|
342
|
+
for rec in results["recommendations"]:
|
|
343
|
+
print(f" → {rec}")
|
|
344
|
+
|
|
345
|
+
print(f"{'='*60}\n")
|
|
346
|
+
else:
|
|
347
|
+
print(json.dumps(results, indent=2))
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
if __name__ == "__main__":
|
|
351
|
+
main()
|