arkaos 2.2.2 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/VERSION +1 -1
  2. package/arka/skills/conclave/SKILL.md +194 -0
  3. package/arka/skills/human-writing/SKILL.md +143 -0
  4. package/config/agent-memory-template.md +28 -0
  5. package/config/disc-profiles.json +108 -0
  6. package/config/disc-team-validator.sh +94 -0
  7. package/config/gotchas-fixes.json +148 -0
  8. package/config/profile-template.json +12 -0
  9. package/config/providers-registry.json +56 -0
  10. package/config/settings-template.json +42 -0
  11. package/config/standards/communication.md +64 -0
  12. package/config/standards/orchestration.md +91 -0
  13. package/config/statusline-v2.sh +101 -0
  14. package/config/statusline.sh +139 -0
  15. package/config/system-prompt.sh +190 -0
  16. package/dashboard/LICENSE +21 -0
  17. package/dashboard/README.md +64 -0
  18. package/dashboard/app/app.config.ts +8 -0
  19. package/dashboard/app/app.vue +42 -0
  20. package/dashboard/app/assets/css/main.css +18 -0
  21. package/dashboard/app/composables/useApi.ts +8 -0
  22. package/dashboard/app/composables/useDashboard.ts +19 -0
  23. package/dashboard/app/error.vue +24 -0
  24. package/dashboard/app/layouts/default.vue +114 -0
  25. package/dashboard/app/pages/agents/[id].vue +506 -0
  26. package/dashboard/app/pages/agents/index.vue +225 -0
  27. package/dashboard/app/pages/budget.vue +132 -0
  28. package/dashboard/app/pages/commands.vue +180 -0
  29. package/dashboard/app/pages/health.vue +98 -0
  30. package/dashboard/app/pages/index.vue +126 -0
  31. package/dashboard/app/pages/knowledge.vue +729 -0
  32. package/dashboard/app/pages/personas.vue +597 -0
  33. package/dashboard/app/pages/settings.vue +146 -0
  34. package/dashboard/app/pages/tasks.vue +203 -0
  35. package/dashboard/app/types/index.d.ts +181 -0
  36. package/dashboard/app/utils/index.ts +7 -0
  37. package/dashboard/nuxt.config.ts +39 -0
  38. package/dashboard/package.json +37 -0
  39. package/dashboard/pnpm-workspace.yaml +7 -0
  40. package/dashboard/tsconfig.json +10 -0
  41. package/installer/cli.js +0 -0
  42. package/installer/index.js +262 -62
  43. package/knowledge/INDEX.md +34 -0
  44. package/knowledge/agents-registry.json +254 -0
  45. package/knowledge/channels-config.json +6 -0
  46. package/knowledge/commands-keywords.json +466 -0
  47. package/knowledge/commands-registry.json +2791 -0
  48. package/knowledge/commands-registry.json.bak +2791 -0
  49. package/knowledge/ecosystems.json +7 -0
  50. package/knowledge/obsidian-config.json +112 -0
  51. package/package.json +10 -6
  52. package/pyproject.toml +1 -1
  53. package/scripts/check-version.js +13 -0
  54. package/scripts/dashboard-api.py +636 -0
  55. package/scripts/knowledge-index.py +113 -0
  56. package/scripts/skill_validator.py +217 -0
  57. package/scripts/start-dashboard.sh +54 -0
  58. package/scripts/synapse-bridge.py +199 -0
  59. package/scripts/tools/brand_voice_analyzer.py +192 -0
  60. package/scripts/tools/dcf_calculator.py +168 -0
  61. package/scripts/tools/headline_scorer.py +215 -0
  62. package/scripts/tools/okr_cascade.py +207 -0
  63. package/scripts/tools/rice_prioritizer.py +230 -0
  64. package/scripts/tools/saas_metrics.py +234 -0
  65. package/scripts/tools/seo_checker.py +197 -0
  66. package/scripts/tools/tech_debt_analyzer.py +206 -0
@@ -0,0 +1,197 @@
1
+ #!/usr/bin/env python3
2
+ """SEO Checker -- scores an HTML page 0-100 across 8 on-page factors.
3
+ Part of ArkaOS v2 -- stdlib-only, no pip dependencies.
4
+ """
5
+ from __future__ import annotations
6
+ import argparse, json, re, sys
7
+ from dataclasses import asdict, dataclass, field
8
+ from html.parser import HTMLParser
9
+ from typing import Dict, List, Optional, Tuple
10
+
11
+ class _SEOParser(HTMLParser):
12
+ """Extract SEO-relevant elements from HTML."""
13
+ def __init__(self) -> None:
14
+ super().__init__()
15
+ self.title = ""
16
+ self._in_title = False
17
+ self.meta_description = ""
18
+ self.viewport_meta = False
19
+ self.h_tags: List[Tuple[int, str]] = []
20
+ self._cur_h: Optional[int] = None
21
+ self._cur_h_text: List[str] = []
22
+ self.images: List[Dict[str, Optional[str]]] = []
23
+ self.links: List[Dict[str, str]] = []
24
+ self._in_link = False
25
+ self._link_href = ""
26
+ self._link_text: List[str] = []
27
+ self.body_parts: List[str] = []
28
+ self._in_body = self._in_script = self._in_style = False
29
+
30
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
31
+ a = {k: (v or "") for k, v in attrs}
32
+ t = tag.lower()
33
+ if t == "title":
34
+ self._in_title = True
35
+ elif t == "meta":
36
+ name, prop = a.get("name", "").lower(), a.get("property", "").lower()
37
+ if name == "description": self.meta_description = a.get("content", "")
38
+ if name == "viewport": self.viewport_meta = True
39
+ if prop == "og:description" and not self.meta_description:
40
+ self.meta_description = a.get("content", "")
41
+ elif t in ("h1", "h2", "h3", "h4", "h5", "h6"):
42
+ self._cur_h, self._cur_h_text = int(t[1]), []
43
+ elif t == "img":
44
+ self.images.append({"src": a.get("src", ""), "alt": a.get("alt")})
45
+ elif t == "a":
46
+ self._in_link, self._link_href, self._link_text = True, a.get("href", ""), []
47
+ elif t == "body": self._in_body = True
48
+ elif t == "script": self._in_script = True
49
+ elif t == "style": self._in_style = True
50
+
51
+ def handle_endtag(self, tag: str) -> None:
52
+ t = tag.lower()
53
+ if t == "title": self._in_title = False
54
+ elif t in ("h1", "h2", "h3", "h4", "h5", "h6") and self._cur_h is not None:
55
+ self.h_tags.append((self._cur_h, " ".join(self._cur_h_text).strip()))
56
+ self._cur_h = None
57
+ elif t == "a" and self._in_link:
58
+ self.links.append({"href": self._link_href, "text": " ".join(self._link_text).strip()})
59
+ self._in_link = False
60
+ elif t == "script": self._in_script = False
61
+ elif t == "style": self._in_style = False
62
+
63
+ def handle_data(self, data: str) -> None:
64
+ if self._in_title: self.title += data
65
+ if self._cur_h is not None: self._cur_h_text.append(data)
66
+ if self._in_link: self._link_text.append(data)
67
+ if self._in_body and not self._in_script and not self._in_style:
68
+ self.body_parts.append(data)
69
+
70
+ @dataclass
71
+ class Check:
72
+ name: str
73
+ score: int
74
+ passed: bool
75
+ note: str
76
+ details: Dict = field(default_factory=dict)
77
+
78
+ @dataclass
79
+ class SEOResult:
80
+ overall_score: int = 0
81
+ grade: str = ""
82
+ checks: List[Check] = field(default_factory=list)
83
+
84
+ WEIGHTS: Dict[str, int] = {"title": 20, "meta_description": 15, "h1": 15,
85
+ "heading_hierarchy": 10, "image_alt_text": 10, "link_ratio": 10,
86
+ "word_count": 15, "viewport_meta": 5}
87
+
88
+ def analyze_html(html: str) -> SEOResult:
89
+ """Parse *html* and return an SEOResult with per-check scores."""
90
+ p = _SEOParser()
91
+ p.feed(html)
92
+ checks: List[Check] = []
93
+ # Title
94
+ title, tl = p.title.strip(), len(p.title.strip())
95
+ t_ok = 50 <= tl <= 60
96
+ checks.append(Check("title", 100 if t_ok else (50 if title else 0), t_ok,
97
+ "Good length" if t_ok else (f"Too {'short' if tl < 50 else 'long'} ({tl} chars)" if title else "Missing title tag"),
98
+ {"value": title, "length": tl, "optimal": "50-60 chars"}))
99
+ # Meta description
100
+ desc, dl = p.meta_description.strip(), len(p.meta_description.strip())
101
+ d_ok = 150 <= dl <= 160
102
+ d_score = 100 if d_ok else (50 if 100 <= dl < 150 or 160 < dl <= 200 else (30 if desc else 0))
103
+ checks.append(Check("meta_description", d_score, d_ok,
104
+ "Good length" if d_ok else (f"Too {'short' if dl < 150 else 'long'} ({dl} chars)" if desc else "Missing meta description"),
105
+ {"length": dl, "optimal": "150-160 chars"}))
106
+ # H1
107
+ h1s = [t for lvl, t in p.h_tags if lvl == 1]
108
+ h1_ok = len(h1s) == 1
109
+ checks.append(Check("h1", 100 if h1_ok else (50 if len(h1s) > 1 else 0), h1_ok,
110
+ "Exactly one H1" if h1_ok else (f"Multiple H1s ({len(h1s)})" if h1s else "No H1 found"),
111
+ {"count": len(h1s), "values": h1s}))
112
+ # Heading hierarchy
113
+ issues, prev = [], 0
114
+ for lvl, _ in p.h_tags:
115
+ if prev and lvl > prev + 1:
116
+ issues.append(f"H{prev} -> H{lvl} skips a level")
117
+ prev = lvl
118
+ checks.append(Check("heading_hierarchy", max(0, 100 - len(issues) * 25), not issues,
119
+ "Hierarchy OK" if not issues else f"{len(issues)} level-skip issue(s)",
120
+ {"headings": [f"H{l}: {t[:60]}" for l, t in p.h_tags], "issues": issues}))
121
+ # Image alt text
122
+ total_imgs = len(p.images)
123
+ with_alt = sum(1 for img in p.images if img.get("alt") is not None and img["alt"].strip())
124
+ alt_pct = (with_alt / total_imgs * 100) if total_imgs else 100.0
125
+ checks.append(Check("image_alt_text", round(alt_pct), alt_pct == 100,
126
+ "All images have alt text" if alt_pct == 100 else f"{total_imgs - with_alt} image(s) missing alt",
127
+ {"total": total_imgs, "with_alt": with_alt}))
128
+ # Link ratio
129
+ total_links = len(p.links)
130
+ ext = sum(1 for lk in p.links if lk["href"].startswith(("http://", "https://")))
131
+ internal = total_links - ext
132
+ ratio = (internal / total_links) if total_links else 1.0
133
+ lr_ok = ratio >= 0.5 or total_links == 0
134
+ checks.append(Check("link_ratio", 100 if lr_ok else round(ratio * 100), lr_ok,
135
+ "Good internal/external balance" if lr_ok else "More external than internal links",
136
+ {"total": total_links, "internal": internal, "external": ext}))
137
+ # Word count
138
+ wc = len(re.findall(r"\b\w+\b", " ".join(p.body_parts)))
139
+ wc_ok = wc >= 300
140
+ checks.append(Check("word_count", 100 if wc_ok else min(100, round(wc / 300 * 100)), wc_ok,
141
+ f"{wc} words (good)" if wc_ok else f"Only {wc} words -- need 300+", {"count": wc}))
142
+ # Viewport meta
143
+ checks.append(Check("viewport_meta", 100 if p.viewport_meta else 0, p.viewport_meta,
144
+ "Viewport tag present" if p.viewport_meta else "Missing viewport meta tag"))
145
+ # Overall
146
+ check_map = {c.name: c.score for c in checks}
147
+ total_w = sum(WEIGHTS.values())
148
+ overall = round(sum(check_map.get(k, 0) * w for k, w in WEIGHTS.items()) / total_w)
149
+ grade = "A" if overall >= 90 else "B" if overall >= 75 else "C" if overall >= 60 else "D" if overall >= 40 else "F"
150
+ return SEOResult(overall_score=overall, grade=grade, checks=checks)
151
+
152
+ def _format_text(result: SEOResult) -> str:
153
+ icons = {True: "[PASS]", False: "[FAIL]"}
154
+ lines = ["=" * 60,
155
+ f" SEO AUDIT RESULTS Overall Score: {result.overall_score}/100 Grade: {result.grade}",
156
+ "=" * 60]
157
+ for c in result.checks:
158
+ lines.append(f" {icons[c.passed]} {c.name:<22} [{c.score:>3}/100] {c.note}")
159
+ lines.append("=" * 60)
160
+ return "\n".join(lines)
161
+
162
+ def main() -> int:
163
+ """Entry point. Returns 0=success, 1=warnings, 2=errors."""
164
+ parser = argparse.ArgumentParser(
165
+ description="SEO Checker -- scores an HTML page 0-100 across 8 on-page factors.")
166
+ parser.add_argument("file", nargs="?", default=None,
167
+ help="HTML file to analyze (reads stdin if omitted)")
168
+ parser.add_argument("--json", action="store_true", help="Output as JSON")
169
+ args = parser.parse_args()
170
+ try:
171
+ if args.file:
172
+ with open(args.file, "r", encoding="utf-8", errors="replace") as fh:
173
+ html = fh.read()
174
+ elif not sys.stdin.isatty():
175
+ html = sys.stdin.read()
176
+ else:
177
+ parser.print_help()
178
+ return 2
179
+ except FileNotFoundError:
180
+ print(f"Error: file not found -- {args.file}", file=sys.stderr)
181
+ return 2
182
+ except OSError as exc:
183
+ print(f"Error: {exc}", file=sys.stderr)
184
+ return 2
185
+ if not html.strip():
186
+ print("Error: input is empty.", file=sys.stderr)
187
+ return 2
188
+ result = analyze_html(html)
189
+ if args.json:
190
+ print(json.dumps({"overall_score": result.overall_score, "grade": result.grade,
191
+ "checks": [asdict(c) for c in result.checks]}, indent=2))
192
+ else:
193
+ print(_format_text(result))
194
+ return 1 if result.overall_score < 50 else 0
195
+
196
+ if __name__ == "__main__":
197
+ sys.exit(main())
@@ -0,0 +1,206 @@
1
+ #!/usr/bin/env python3
2
+ """Tech Debt Analyzer -- ArkaOS v2.
3
+
4
+ Scans a codebase directory for tech debt signals: TODO/FIXME counts,
5
+ large files, deep nesting, missing tests, old lock files.
6
+ Scores 0-100 with category breakdown and prioritised action list.
7
+
8
+ Usage:
9
+ python tech_debt_analyzer.py /path/to/project
10
+ python tech_debt_analyzer.py . --json
11
+ python tech_debt_analyzer.py ./src --extensions py,js,ts
12
+ """
13
+ from __future__ import annotations
14
+ import argparse, json, os, re, sys
15
+ from dataclasses import dataclass, field, asdict
16
+ from datetime import datetime
17
+ from typing import Dict, List, Optional
18
+
19
+ DEFAULT_EXT = {"py", "js", "ts", "jsx", "tsx", "php", "rb", "go", "rs", "java"}
20
+ LARGE_FILE = 500
21
+ NEST_THRESHOLD = 5
22
+ TODO_RE = re.compile(r"\b(TODO|FIXME|HACK|XXX|WORKAROUND)\b", re.IGNORECASE)
23
+ SKIP = {".git", "node_modules", "vendor", "__pycache__", ".venv", "venv",
24
+ "dist", "build", ".next", ".nuxt", "storage", "target"}
25
+
26
+ @dataclass
27
+ class FileIssue:
28
+ """A single issue found in a file."""
29
+ path: str; issue: str; severity: str
30
+
31
+ @dataclass
32
+ class CategoryScore:
33
+ """Score for one debt category."""
34
+ name: str; score: float; weight: float
35
+ issues: List[FileIssue] = field(default_factory=list)
36
+
37
+ @dataclass
38
+ class DebtReport:
39
+ """Full tech debt report."""
40
+ directory: str; timestamp: str; files_scanned: int; total_lines: int
41
+ overall_score: float; level: str
42
+ categories: List[CategoryScore] = field(default_factory=list)
43
+ actions: List[str] = field(default_factory=list)
44
+
45
+ def _iter_files(root: str, extensions: set[str]):
46
+ for dirpath, dirnames, filenames in os.walk(root):
47
+ dirnames[:] = [d for d in dirnames if d not in SKIP]
48
+ for f in filenames:
49
+ ext = f.rsplit(".", 1)[-1] if "." in f else ""
50
+ if ext in extensions:
51
+ full = os.path.join(dirpath, f)
52
+ yield os.path.relpath(full, root), full
53
+
54
+ def _read(path: str) -> List[str]:
55
+ try:
56
+ with open(path, encoding="utf-8", errors="replace") as fh:
57
+ return fh.readlines()
58
+ except (OSError, PermissionError):
59
+ return []
60
+
61
+ def _max_nesting(lines: List[str]) -> int:
62
+ depth = mx = 0
63
+ for ln in lines:
64
+ for ch in ln:
65
+ if ch in "({[":
66
+ depth += 1; mx = max(mx, depth)
67
+ elif ch in ")}]":
68
+ depth = max(depth - 1, 0)
69
+ return mx
70
+
71
+ def _has_tests(root: str) -> bool:
72
+ return any(os.path.isdir(os.path.join(root, d))
73
+ for d in ("tests", "test", "spec", "__tests__"))
74
+
75
+ def _lock_age(root: str) -> Optional[int]:
76
+ locks = ["package-lock.json", "yarn.lock", "pnpm-lock.yaml",
77
+ "composer.lock", "Pipfile.lock", "poetry.lock", "Cargo.lock"]
78
+ mt = max((os.path.getmtime(os.path.join(root, f))
79
+ for f in locks if os.path.isfile(os.path.join(root, f))), default=0.0)
80
+ return int((datetime.now().timestamp() - mt) / 86400) if mt else None
81
+
82
+ def analyze(root: str, extensions: set[str]) -> DebtReport:
83
+ """Scan directory and produce a debt report."""
84
+ root = os.path.abspath(root)
85
+ cats = {n: CategoryScore(name=n, score=0, weight=0.20) for n in
86
+ ("TODO/FIXME markers", "Large files", "Deep nesting",
87
+ "Test coverage signals", "Dependency freshness")}
88
+ total_files = total_lines = todo_count = large_count = deep_count = 0
89
+
90
+ for rel, full in _iter_files(root, extensions):
91
+ lines = _read(full)
92
+ lc = len(lines); total_files += 1; total_lines += lc
93
+ ft = sum(1 for ln in lines if TODO_RE.search(ln))
94
+ if ft:
95
+ todo_count += ft
96
+ cats["TODO/FIXME markers"].issues.append(
97
+ FileIssue(rel, f"{ft} markers", "high" if ft > 10 else "medium" if ft > 3 else "low"))
98
+ if lc > LARGE_FILE:
99
+ large_count += 1
100
+ cats["Large files"].issues.append(
101
+ FileIssue(rel, f"{lc} lines", "high" if lc > 1000 else "medium"))
102
+ nest = _max_nesting(lines)
103
+ if nest > NEST_THRESHOLD:
104
+ deep_count += 1
105
+ cats["Deep nesting"].issues.append(
106
+ FileIssue(rel, f"depth {nest}", "high" if nest > 8 else "medium"))
107
+
108
+ if total_lines > 0:
109
+ cats["TODO/FIXME markers"].score = min(100, round((todo_count / total_lines) * 10000, 1))
110
+ if total_files > 0:
111
+ cats["Large files"].score = min(100, round((large_count / total_files) * 200, 1))
112
+ cats["Deep nesting"].score = min(100, round((deep_count / total_files) * 200, 1))
113
+
114
+ ct = cats["Test coverage signals"]
115
+ if not _has_tests(root):
116
+ ct.score = 80.0; ct.issues.append(FileIssue(".", "No test directory found", "high"))
117
+ else:
118
+ test_n = sum(1 for r, _ in _iter_files(root, extensions)
119
+ if "test" in r.lower() or "spec" in r.lower())
120
+ ct.score = max(0, round((1 - min((test_n / max(total_files, 1)) * 4, 1)) * 100, 1))
121
+
122
+ cd = cats["Dependency freshness"]
123
+ age = _lock_age(root)
124
+ if age is None:
125
+ cd.score = 40.0; cd.issues.append(FileIssue(".", "No lock file found", "medium"))
126
+ else:
127
+ cd.score = min(100, round(age / 3.6, 1))
128
+ if age > 90:
129
+ cd.issues.append(FileIssue(".", f"Lock file {age} days old",
130
+ "high" if age > 180 else "medium"))
131
+
132
+ cat_list = list(cats.values())
133
+ overall = round(sum(c.score * c.weight for c in cat_list), 1)
134
+ level = ("Low" if overall < 20 else "Moderate" if overall < 40 else
135
+ "Elevated" if overall < 60 else "High" if overall < 80 else "Critical")
136
+
137
+ actions: List[str] = []
138
+ if overall >= 60:
139
+ actions.append("[URGENT] Dedicate 30%+ capacity to debt reduction")
140
+ elif overall >= 40:
141
+ actions.append("[PLAN] Allocate 20% of sprints to debt reduction")
142
+ for c in sorted(cat_list, key=lambda x: x.score, reverse=True):
143
+ if c.score > 50:
144
+ actions.append(f"[HIGH] Address {c.name} (score: {c.score})")
145
+ elif c.score > 25:
146
+ actions.append(f"[MED] Improve {c.name} (score: {c.score})")
147
+
148
+ return DebtReport(directory=root, timestamp=datetime.now().isoformat(timespec="seconds"),
149
+ files_scanned=total_files, total_lines=total_lines, overall_score=overall,
150
+ level=level, categories=cat_list, actions=actions)
151
+
152
+ def format_report(r: DebtReport) -> str:
153
+ """Render plain-text report."""
154
+ lines = ["=" * 60, "TECH DEBT ANALYSIS REPORT", "=" * 60,
155
+ f"Directory: {r.directory}",
156
+ f"Scanned: {r.files_scanned} files, {r.total_lines:,} lines",
157
+ f"Timestamp: {r.timestamp}", "",
158
+ f"OVERALL SCORE: {r.overall_score}/100 ({r.level})", "",
159
+ "CATEGORY BREAKDOWN",
160
+ f" {'Category':<28} {'Score':>6} {'Weight':>6} Issues",
161
+ " " + "-" * 55]
162
+ for c in r.categories:
163
+ lines.append(f" {c.name:<28} {c.score:>5.1f} {c.weight * 100:>5.0f}% {len(c.issues)}")
164
+ lines += ["", "TOP ISSUES"]
165
+ sev_order = {"high": 0, "medium": 1, "low": 2}
166
+ for c in sorted(r.categories, key=lambda x: x.score, reverse=True):
167
+ if not c.issues: continue
168
+ lines.append(f" {c.name}:")
169
+ for iss in sorted(c.issues, key=lambda i: sev_order[i.severity])[:5]:
170
+ lines.append(f" [{iss.severity.upper()}] {iss.path}: {iss.issue}")
171
+ lines += ["", "PRIORITISED ACTIONS"]
172
+ for i, a in enumerate(r.actions, 1):
173
+ lines.append(f" {i}. {a}")
174
+ lines += ["", "=" * 60]
175
+ return "\n".join(lines)
176
+
177
+ def to_json(r: DebtReport) -> str:
178
+ return json.dumps(asdict(r), indent=2)
179
+
180
+ def main() -> int:
181
+ """Entry point."""
182
+ parser = argparse.ArgumentParser(
183
+ description="Tech Debt Analyzer -- scan a codebase for debt signals")
184
+ parser.add_argument("directory", nargs="?", default=".",
185
+ help="Directory to scan (default: current directory)")
186
+ parser.add_argument("--extensions", "-e", default=None,
187
+ help="Comma-separated file extensions (default: py,js,ts,...)")
188
+ parser.add_argument("--json", action="store_true", help="Output as JSON")
189
+ args = parser.parse_args()
190
+ if not os.path.isdir(args.directory):
191
+ print(f"Error: '{args.directory}' is not a directory", file=sys.stderr)
192
+ return 2
193
+ extensions = DEFAULT_EXT
194
+ if args.extensions:
195
+ extensions = {e.strip().lstrip(".") for e in args.extensions.split(",") if e.strip()}
196
+ report = analyze(args.directory, extensions)
197
+ if args.json:
198
+ print(to_json(report))
199
+ else:
200
+ print(format_report(report))
201
+ if report.overall_score >= 60: return 2
202
+ if report.overall_score >= 40: return 1
203
+ return 0
204
+
205
+ if __name__ == "__main__":
206
+ sys.exit(main())