arkaos 2.2.2 → 2.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/VERSION +1 -1
- package/arka/skills/conclave/SKILL.md +194 -0
- package/arka/skills/human-writing/SKILL.md +143 -0
- package/config/agent-memory-template.md +28 -0
- package/config/disc-profiles.json +108 -0
- package/config/disc-team-validator.sh +94 -0
- package/config/gotchas-fixes.json +148 -0
- package/config/profile-template.json +12 -0
- package/config/providers-registry.json +56 -0
- package/config/settings-template.json +42 -0
- package/config/standards/communication.md +64 -0
- package/config/standards/orchestration.md +91 -0
- package/config/statusline-v2.sh +101 -0
- package/config/statusline.sh +139 -0
- package/config/system-prompt.sh +190 -0
- package/dashboard/LICENSE +21 -0
- package/dashboard/README.md +64 -0
- package/dashboard/app/app.config.ts +8 -0
- package/dashboard/app/app.vue +42 -0
- package/dashboard/app/assets/css/main.css +18 -0
- package/dashboard/app/composables/useApi.ts +8 -0
- package/dashboard/app/composables/useDashboard.ts +19 -0
- package/dashboard/app/error.vue +24 -0
- package/dashboard/app/layouts/default.vue +114 -0
- package/dashboard/app/pages/agents/[id].vue +506 -0
- package/dashboard/app/pages/agents/index.vue +225 -0
- package/dashboard/app/pages/budget.vue +132 -0
- package/dashboard/app/pages/commands.vue +180 -0
- package/dashboard/app/pages/health.vue +98 -0
- package/dashboard/app/pages/index.vue +126 -0
- package/dashboard/app/pages/knowledge.vue +729 -0
- package/dashboard/app/pages/personas.vue +597 -0
- package/dashboard/app/pages/settings.vue +146 -0
- package/dashboard/app/pages/tasks.vue +203 -0
- package/dashboard/app/types/index.d.ts +181 -0
- package/dashboard/app/utils/index.ts +7 -0
- package/dashboard/nuxt.config.ts +39 -0
- package/dashboard/package.json +37 -0
- package/dashboard/pnpm-workspace.yaml +7 -0
- package/dashboard/tsconfig.json +10 -0
- package/installer/cli.js +0 -0
- package/installer/index.js +262 -62
- package/knowledge/INDEX.md +34 -0
- package/knowledge/agents-registry.json +254 -0
- package/knowledge/channels-config.json +6 -0
- package/knowledge/commands-keywords.json +466 -0
- package/knowledge/commands-registry.json +2791 -0
- package/knowledge/commands-registry.json.bak +2791 -0
- package/knowledge/ecosystems.json +7 -0
- package/knowledge/obsidian-config.json +112 -0
- package/package.json +10 -6
- package/pyproject.toml +1 -1
- package/scripts/check-version.js +13 -0
- package/scripts/dashboard-api.py +636 -0
- package/scripts/knowledge-index.py +113 -0
- package/scripts/skill_validator.py +217 -0
- package/scripts/start-dashboard.sh +54 -0
- package/scripts/synapse-bridge.py +199 -0
- package/scripts/tools/brand_voice_analyzer.py +192 -0
- package/scripts/tools/dcf_calculator.py +168 -0
- package/scripts/tools/headline_scorer.py +215 -0
- package/scripts/tools/okr_cascade.py +207 -0
- package/scripts/tools/rice_prioritizer.py +230 -0
- package/scripts/tools/saas_metrics.py +234 -0
- package/scripts/tools/seo_checker.py +197 -0
- package/scripts/tools/tech_debt_analyzer.py +206 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Brand Voice Analyzer -- score content 0-100 for voice consistency and readability.
|
|
3
|
+
Part of ArkaOS v2 -- stdlib-only, no pip dependencies.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
import argparse, json, re, sys
|
|
7
|
+
from dataclasses import asdict, dataclass, field
|
|
8
|
+
from typing import Dict, List
|
|
9
|
+
|
|
10
|
+
VOICE_DIMENSIONS: Dict[str, Dict[str, List[str]]] = {
|
|
11
|
+
"formality": {
|
|
12
|
+
"formal": ["hereby", "therefore", "furthermore", "pursuant", "regarding",
|
|
13
|
+
"accordingly", "henceforth", "notwithstanding", "whereas"],
|
|
14
|
+
"casual": ["hey", "cool", "awesome", "stuff", "yeah", "gonna", "wanna",
|
|
15
|
+
"kinda", "gotta", "yep"],
|
|
16
|
+
},
|
|
17
|
+
"tone": {
|
|
18
|
+
"professional": ["expertise", "solution", "optimize", "leverage", "strategic",
|
|
19
|
+
"implement", "framework", "methodology", "deliverable"],
|
|
20
|
+
"friendly": ["happy", "excited", "love", "enjoy", "together", "share",
|
|
21
|
+
"amazing", "wonderful", "great", "fun"],
|
|
22
|
+
},
|
|
23
|
+
"perspective": {
|
|
24
|
+
"authoritative": ["proven", "research shows", "experts agree", "data indicates",
|
|
25
|
+
"studies confirm", "evidence suggests"],
|
|
26
|
+
"conversational": ["you might", "let's explore", "we think", "imagine if",
|
|
27
|
+
"have you ever", "picture this"],
|
|
28
|
+
},
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class SentenceAnalysis:
|
|
33
|
+
count: int = 0
|
|
34
|
+
average_length: float = 0.0
|
|
35
|
+
variety: str = "low"
|
|
36
|
+
shortest: int = 0
|
|
37
|
+
longest: int = 0
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class VoiceDimension:
|
|
41
|
+
dominant: str = ""
|
|
42
|
+
scores: Dict[str, int] = field(default_factory=dict)
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class AnalysisResult:
|
|
46
|
+
word_count: int = 0
|
|
47
|
+
readability_score: float = 0.0
|
|
48
|
+
readability_grade: str = ""
|
|
49
|
+
overall_score: int = 0
|
|
50
|
+
voice_profile: Dict[str, VoiceDimension] = field(default_factory=dict)
|
|
51
|
+
sentence_analysis: SentenceAnalysis = field(default_factory=SentenceAnalysis)
|
|
52
|
+
recommendations: List[str] = field(default_factory=list)
|
|
53
|
+
|
|
54
|
+
def _count_syllables(word: str) -> int:
|
|
55
|
+
word = word.lower().strip(".,!?;:'\"")
|
|
56
|
+
count, prev = 0, False
|
|
57
|
+
for ch in word:
|
|
58
|
+
v = ch in "aeiou"
|
|
59
|
+
if v and not prev:
|
|
60
|
+
count += 1
|
|
61
|
+
prev = v
|
|
62
|
+
if word.endswith("e") and count > 1:
|
|
63
|
+
count -= 1
|
|
64
|
+
return max(1, count)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _flesch_reading_ease(text: str) -> float:
|
|
68
|
+
sentences = [s.strip() for s in re.split(r"[.!?]+", text) if s.strip()]
|
|
69
|
+
words = text.split()
|
|
70
|
+
if not sentences or not words:
|
|
71
|
+
return 0.0
|
|
72
|
+
syllables = sum(_count_syllables(w) for w in words)
|
|
73
|
+
score = 206.835 - 1.015 * (len(words) / len(sentences)) - 84.6 * (syllables / len(words))
|
|
74
|
+
return round(max(0.0, min(100.0, score)), 1)
|
|
75
|
+
|
|
76
|
+
def _readability_grade(score: float) -> str:
|
|
77
|
+
if score >= 80: return "Easy"
|
|
78
|
+
if score >= 60: return "Standard"
|
|
79
|
+
if score >= 40: return "Fairly Difficult"
|
|
80
|
+
if score >= 20: return "Difficult"
|
|
81
|
+
return "Very Difficult"
|
|
82
|
+
|
|
83
|
+
def _analyze_sentences(text: str) -> SentenceAnalysis:
|
|
84
|
+
sentences = [s.strip() for s in re.split(r"[.!?]+", text) if s.strip()]
|
|
85
|
+
if not sentences:
|
|
86
|
+
return SentenceAnalysis()
|
|
87
|
+
lengths = [len(s.split()) for s in sentences]
|
|
88
|
+
avg = sum(lengths) / len(lengths)
|
|
89
|
+
unique = len(set(lengths))
|
|
90
|
+
variety = "high" if unique >= 5 else ("medium" if unique >= 3 else "low")
|
|
91
|
+
return SentenceAnalysis(count=len(sentences), average_length=round(avg, 1),
|
|
92
|
+
variety=variety, shortest=min(lengths), longest=max(lengths))
|
|
93
|
+
|
|
94
|
+
def _score_voice(text: str) -> Dict[str, VoiceDimension]:
|
|
95
|
+
text_lower = text.lower()
|
|
96
|
+
profile: Dict[str, VoiceDimension] = {}
|
|
97
|
+
for dimension, categories in VOICE_DIMENSIONS.items():
|
|
98
|
+
scores = {cat: sum(1 for kw in kws if kw in text_lower) for cat, kws in categories.items()}
|
|
99
|
+
dominant = max(scores, key=scores.get) if sum(scores.values()) > 0 else "neutral"
|
|
100
|
+
profile[dimension] = VoiceDimension(dominant=dominant, scores=scores)
|
|
101
|
+
return profile
|
|
102
|
+
|
|
103
|
+
def _compute_overall(readability: float, sentence: SentenceAnalysis) -> int:
|
|
104
|
+
variety_map = {"high": 100, "medium": 60, "low": 30}
|
|
105
|
+
return round(readability * 0.50 + variety_map.get(sentence.variety, 30) * 0.25
|
|
106
|
+
+ min(100, sentence.count * 20) * 0.25)
|
|
107
|
+
|
|
108
|
+
def _generate_recommendations(result: AnalysisResult) -> List[str]:
|
|
109
|
+
recs: List[str] = []
|
|
110
|
+
if result.readability_score < 30:
|
|
111
|
+
recs.append("Simplify language -- shorter words and sentences improve readability.")
|
|
112
|
+
elif result.readability_score > 80:
|
|
113
|
+
recs.append("Content is very easy to read -- verify this matches your audience.")
|
|
114
|
+
if result.sentence_analysis.variety == "low":
|
|
115
|
+
recs.append("Vary sentence length for better rhythm and engagement.")
|
|
116
|
+
if result.sentence_analysis.longest > 35:
|
|
117
|
+
recs.append(f"Longest sentence is {result.sentence_analysis.longest} words -- consider splitting.")
|
|
118
|
+
if result.word_count < 100:
|
|
119
|
+
recs.append("Very short content -- consider expanding for depth.")
|
|
120
|
+
for dim, vd in result.voice_profile.items():
|
|
121
|
+
if sum(vd.scores.values()) == 0:
|
|
122
|
+
recs.append(f"No clear {dim} signals detected -- add stronger voice cues.")
|
|
123
|
+
return recs
|
|
124
|
+
|
|
125
|
+
def analyze(text: str) -> AnalysisResult:
|
|
126
|
+
"""Run the full brand voice analysis on *text*."""
|
|
127
|
+
readability = _flesch_reading_ease(text)
|
|
128
|
+
sentence = _analyze_sentences(text)
|
|
129
|
+
result = AnalysisResult(word_count=len(text.split()), readability_score=readability,
|
|
130
|
+
readability_grade=_readability_grade(readability),
|
|
131
|
+
voice_profile=_score_voice(text), sentence_analysis=sentence)
|
|
132
|
+
result.overall_score = _compute_overall(readability, sentence)
|
|
133
|
+
result.recommendations = _generate_recommendations(result)
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
def _format_text(result: AnalysisResult) -> str:
|
|
137
|
+
lines = ["=" * 60,
|
|
138
|
+
f" BRAND VOICE ANALYSIS Overall Score: {result.overall_score}/100",
|
|
139
|
+
"=" * 60,
|
|
140
|
+
f" Word Count: {result.word_count}",
|
|
141
|
+
f" Readability: {result.readability_score}/100 ({result.readability_grade})",
|
|
142
|
+
"", " Voice Profile:"]
|
|
143
|
+
for dim, vd in result.voice_profile.items():
|
|
144
|
+
detail = ", ".join(f"{k}={v}" for k, v in vd.scores.items())
|
|
145
|
+
lines.append(f" {dim:<16} {vd.dominant:<16} ({detail})")
|
|
146
|
+
lines += ["", " Sentence Analysis:",
|
|
147
|
+
f" Count: {result.sentence_analysis.count}",
|
|
148
|
+
f" Avg length: {result.sentence_analysis.average_length} words",
|
|
149
|
+
f" Variety: {result.sentence_analysis.variety}",
|
|
150
|
+
f" Shortest: {result.sentence_analysis.shortest} words",
|
|
151
|
+
f" Longest: {result.sentence_analysis.longest} words",
|
|
152
|
+
"", " Recommendations:"]
|
|
153
|
+
for rec in result.recommendations:
|
|
154
|
+
lines.append(f" - {rec}")
|
|
155
|
+
lines.append("=" * 60)
|
|
156
|
+
return "\n".join(lines)
|
|
157
|
+
|
|
158
|
+
def main() -> int:
|
|
159
|
+
"""Entry point. Returns exit code: 0=success, 1=warnings, 2=errors."""
|
|
160
|
+
parser = argparse.ArgumentParser(
|
|
161
|
+
description="Brand Voice Analyzer -- score content 0-100 for voice consistency and readability.")
|
|
162
|
+
parser.add_argument("file", nargs="?", default=None,
|
|
163
|
+
help="Text file to analyze (reads stdin if omitted)")
|
|
164
|
+
parser.add_argument("--json", action="store_true", help="Output results as JSON")
|
|
165
|
+
args = parser.parse_args()
|
|
166
|
+
try:
|
|
167
|
+
if args.file:
|
|
168
|
+
with open(args.file, "r", encoding="utf-8") as fh:
|
|
169
|
+
text = fh.read()
|
|
170
|
+
elif not sys.stdin.isatty():
|
|
171
|
+
text = sys.stdin.read()
|
|
172
|
+
else:
|
|
173
|
+
parser.print_help()
|
|
174
|
+
return 2
|
|
175
|
+
except FileNotFoundError:
|
|
176
|
+
print(f"Error: file not found -- {args.file}", file=sys.stderr)
|
|
177
|
+
return 2
|
|
178
|
+
except OSError as exc:
|
|
179
|
+
print(f"Error: {exc}", file=sys.stderr)
|
|
180
|
+
return 2
|
|
181
|
+
if not text.strip():
|
|
182
|
+
print("Error: input is empty.", file=sys.stderr)
|
|
183
|
+
return 2
|
|
184
|
+
result = analyze(text)
|
|
185
|
+
if args.json:
|
|
186
|
+
print(json.dumps(asdict(result), indent=2))
|
|
187
|
+
else:
|
|
188
|
+
print(_format_text(result))
|
|
189
|
+
return 1 if result.overall_score < 40 else 0
|
|
190
|
+
|
|
191
|
+
if __name__ == "__main__":
|
|
192
|
+
sys.exit(main())
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""DCF Calculator -- ArkaOS v2.
|
|
3
|
+
|
|
4
|
+
Discounted Cash Flow valuation with projected FCF, terminal value,
|
|
5
|
+
enterprise value, equity value, and WACC-vs-terminal-growth sensitivity table.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python dcf_calculator.py --revenue 10000000 --growth 0.15 --margin 0.12 --wacc 0.10
|
|
9
|
+
python dcf_calculator.py --revenue 5e6 --growth 0.20 --margin 0.15 --wacc 0.09 --json
|
|
10
|
+
python dcf_calculator.py --revenue 5e6 --growth 0.20 --margin 0.15 --wacc 0.09 --net-debt 1e6
|
|
11
|
+
"""
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
import argparse, json, sys
|
|
14
|
+
from dataclasses import dataclass, field, asdict
|
|
15
|
+
from typing import Dict, List, Optional
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class Projection:
|
|
19
|
+
"""Single year projection."""
|
|
20
|
+
year: int; revenue: float; fcf: float; discount_factor: float; pv_fcf: float
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class DCFResult:
|
|
24
|
+
"""Complete DCF valuation output."""
|
|
25
|
+
wacc: float; terminal_growth: float; years: int
|
|
26
|
+
projections: List[Projection]
|
|
27
|
+
terminal_value: float; pv_terminal: float; pv_fcf_total: float
|
|
28
|
+
enterprise_value: float; net_debt: float; equity_value: float
|
|
29
|
+
shares: Optional[float] = None; value_per_share: Optional[float] = None
|
|
30
|
+
|
|
31
|
+
def run_dcf(revenue: float, growth: float, margin: float, wacc: float,
|
|
32
|
+
terminal_growth: float = 0.025, years: int = 5,
|
|
33
|
+
net_debt: float = 0.0, shares: Optional[float] = None) -> DCFResult:
|
|
34
|
+
"""Run a full DCF valuation and return structured results."""
|
|
35
|
+
projections: List[Projection] = []
|
|
36
|
+
pv_fcf_total, current_revenue = 0.0, revenue
|
|
37
|
+
for yr in range(1, years + 1):
|
|
38
|
+
current_revenue *= 1 + growth
|
|
39
|
+
fcf = current_revenue * margin
|
|
40
|
+
df = (1 + wacc) ** yr
|
|
41
|
+
pv = fcf / df
|
|
42
|
+
pv_fcf_total += pv
|
|
43
|
+
projections.append(Projection(year=yr, revenue=current_revenue, fcf=fcf,
|
|
44
|
+
discount_factor=df, pv_fcf=pv))
|
|
45
|
+
terminal_fcf = projections[-1].fcf
|
|
46
|
+
tv = (terminal_fcf * (1 + terminal_growth)) / (wacc - terminal_growth)
|
|
47
|
+
pv_tv = tv / ((1 + wacc) ** years)
|
|
48
|
+
ev = pv_fcf_total + pv_tv
|
|
49
|
+
eq = ev - net_debt
|
|
50
|
+
vps = (eq / shares) if shares and shares > 0 else None
|
|
51
|
+
return DCFResult(wacc=wacc, terminal_growth=terminal_growth, years=years,
|
|
52
|
+
projections=projections, terminal_value=tv, pv_terminal=pv_tv,
|
|
53
|
+
pv_fcf_total=pv_fcf_total, enterprise_value=ev, net_debt=net_debt,
|
|
54
|
+
equity_value=eq, shares=shares, value_per_share=vps)
|
|
55
|
+
|
|
56
|
+
def sensitivity_table(result: DCFResult, steps: int = 5) -> Dict:
|
|
57
|
+
"""Build a WACC vs terminal-growth sensitivity grid for enterprise value."""
|
|
58
|
+
wacc_range = [round(result.wacc + (i - steps // 2) * 0.01, 4) for i in range(steps)]
|
|
59
|
+
growth_range = [round(result.terminal_growth + (i - steps // 2) * 0.005, 4)
|
|
60
|
+
for i in range(steps)]
|
|
61
|
+
terminal_fcf, years = result.projections[-1].fcf, result.years
|
|
62
|
+
table: List[List[Optional[float]]] = []
|
|
63
|
+
for w in wacc_range:
|
|
64
|
+
row: List[Optional[float]] = []
|
|
65
|
+
for g in growth_range:
|
|
66
|
+
if w <= g or w <= 0:
|
|
67
|
+
row.append(None); continue
|
|
68
|
+
pv_fcf = sum(p.fcf / ((1 + w) ** p.year) for p in result.projections)
|
|
69
|
+
tv = (terminal_fcf * (1 + g)) / (w - g)
|
|
70
|
+
row.append(round(pv_fcf + tv / ((1 + w) ** years), 2))
|
|
71
|
+
table.append(row)
|
|
72
|
+
return {"wacc_values": wacc_range, "growth_values": growth_range, "ev_table": table}
|
|
73
|
+
|
|
74
|
+
def _fmt(val: float) -> str:
|
|
75
|
+
"""Format a monetary value for display."""
|
|
76
|
+
if abs(val) >= 1e9: return f"${val / 1e9:,.2f}B"
|
|
77
|
+
if abs(val) >= 1e6: return f"${val / 1e6:,.2f}M"
|
|
78
|
+
if abs(val) >= 1e3: return f"${val / 1e3:,.1f}K"
|
|
79
|
+
return f"${val:,.2f}"
|
|
80
|
+
|
|
81
|
+
def format_text(result: DCFResult, sens: Dict) -> str:
|
|
82
|
+
"""Render human-readable report."""
|
|
83
|
+
lines = ["=" * 65, "DCF VALUATION ANALYSIS", "=" * 65, "",
|
|
84
|
+
f"WACC: {result.wacc * 100:.2f}% | Terminal growth: "
|
|
85
|
+
f"{result.terminal_growth * 100:.2f}% | Projection: {result.years} years",
|
|
86
|
+
"", "PROJECTED CASH FLOWS",
|
|
87
|
+
f" {'Year':>5} {'Revenue':>14} {'FCF':>14} {'PV(FCF)':>14}",
|
|
88
|
+
" " + "-" * 52]
|
|
89
|
+
for p in result.projections:
|
|
90
|
+
lines.append(f" {p.year:>5} {_fmt(p.revenue):>14} "
|
|
91
|
+
f"{_fmt(p.fcf):>14} {_fmt(p.pv_fcf):>14}")
|
|
92
|
+
lines += ["", "VALUATION SUMMARY",
|
|
93
|
+
f" PV of projected FCFs: {_fmt(result.pv_fcf_total)}",
|
|
94
|
+
f" Terminal value: {_fmt(result.terminal_value)}",
|
|
95
|
+
f" PV of terminal value: {_fmt(result.pv_terminal)}",
|
|
96
|
+
f" Enterprise value: {_fmt(result.enterprise_value)}",
|
|
97
|
+
f" Net debt: {_fmt(result.net_debt)}",
|
|
98
|
+
f" Equity value: {_fmt(result.equity_value)}"]
|
|
99
|
+
if result.value_per_share is not None:
|
|
100
|
+
lines.append(f" Value per share: ${result.value_per_share:,.2f}")
|
|
101
|
+
lines += ["", "SENSITIVITY: Enterprise Value (WACC vs Terminal Growth)", ""]
|
|
102
|
+
header = f" {'WACC \\\\ g':>10}"
|
|
103
|
+
for g in sens["growth_values"]:
|
|
104
|
+
header += f" {g * 100:>7.2f}%"
|
|
105
|
+
lines.append(header)
|
|
106
|
+
lines.append(" " + "-" * (10 + 10 * len(sens["growth_values"])))
|
|
107
|
+
for i, w in enumerate(sens["wacc_values"]):
|
|
108
|
+
row = f" {w * 100:>9.2f}%"
|
|
109
|
+
for val in sens["ev_table"][i]:
|
|
110
|
+
row += f" {_fmt(val):>8}" if val is not None else f" {'N/A':>8}"
|
|
111
|
+
lines.append(row)
|
|
112
|
+
lines += ["", "=" * 65]
|
|
113
|
+
return "\n".join(lines)
|
|
114
|
+
|
|
115
|
+
def to_json(result: DCFResult, sens: Dict) -> str:
|
|
116
|
+
"""Serialise to JSON."""
|
|
117
|
+
payload = asdict(result)
|
|
118
|
+
payload["sensitivity"] = sens
|
|
119
|
+
for p in payload["projections"]:
|
|
120
|
+
for k in ("revenue", "fcf", "pv_fcf", "discount_factor"):
|
|
121
|
+
p[k] = round(p[k], 2)
|
|
122
|
+
for k in ("terminal_value", "pv_terminal", "pv_fcf_total",
|
|
123
|
+
"enterprise_value", "equity_value"):
|
|
124
|
+
payload[k] = round(payload[k], 2)
|
|
125
|
+
if payload["value_per_share"] is not None:
|
|
126
|
+
payload["value_per_share"] = round(payload["value_per_share"], 2)
|
|
127
|
+
return json.dumps(payload, indent=2)
|
|
128
|
+
|
|
129
|
+
def main() -> int:
|
|
130
|
+
"""Entry point."""
|
|
131
|
+
parser = argparse.ArgumentParser(
|
|
132
|
+
description="DCF Valuation Calculator -- enterprise and equity valuation")
|
|
133
|
+
parser.add_argument("--revenue", type=float, required=True,
|
|
134
|
+
help="Base annual revenue (e.g. 10000000 or 1e7)")
|
|
135
|
+
parser.add_argument("--growth", type=float, required=True,
|
|
136
|
+
help="Annual revenue growth rate as decimal (e.g. 0.15)")
|
|
137
|
+
parser.add_argument("--margin", type=float, required=True,
|
|
138
|
+
help="Free cash flow margin as decimal (e.g. 0.12)")
|
|
139
|
+
parser.add_argument("--wacc", type=float, required=True,
|
|
140
|
+
help="Weighted average cost of capital (e.g. 0.10)")
|
|
141
|
+
parser.add_argument("--terminal-growth", type=float, default=0.025,
|
|
142
|
+
help="Terminal growth rate (default: 0.025)")
|
|
143
|
+
parser.add_argument("--years", type=int, default=5,
|
|
144
|
+
help="Projection years (default: 5)")
|
|
145
|
+
parser.add_argument("--net-debt", type=float, default=0.0,
|
|
146
|
+
help="Net debt to subtract for equity value (default: 0)")
|
|
147
|
+
parser.add_argument("--shares", type=float, default=None,
|
|
148
|
+
help="Shares outstanding for per-share value")
|
|
149
|
+
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
|
150
|
+
args = parser.parse_args()
|
|
151
|
+
if args.wacc <= args.terminal_growth:
|
|
152
|
+
print("Error: WACC must be greater than terminal growth rate", file=sys.stderr)
|
|
153
|
+
return 2
|
|
154
|
+
if args.revenue <= 0:
|
|
155
|
+
print("Error: revenue must be positive", file=sys.stderr)
|
|
156
|
+
return 2
|
|
157
|
+
result = run_dcf(revenue=args.revenue, growth=args.growth, margin=args.margin,
|
|
158
|
+
wacc=args.wacc, terminal_growth=args.terminal_growth,
|
|
159
|
+
years=args.years, net_debt=args.net_debt, shares=args.shares)
|
|
160
|
+
sens = sensitivity_table(result)
|
|
161
|
+
if args.json:
|
|
162
|
+
print(to_json(result, sens))
|
|
163
|
+
else:
|
|
164
|
+
print(format_text(result, sens))
|
|
165
|
+
return 0
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
sys.exit(main())
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Headline Scorer -- rates headlines 0-100 across 6 dimensions.
|
|
3
|
+
Part of ArkaOS v2 -- stdlib-only, no pip dependencies.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
import argparse, json, re, sys
|
|
7
|
+
from dataclasses import asdict, dataclass, field
|
|
8
|
+
from typing import Dict, List, Tuple
|
|
9
|
+
|
|
10
|
+
POWER_WORDS = frozenset({
|
|
11
|
+
# urgency / scarcity
|
|
12
|
+
"now", "today", "instantly", "immediately", "urgent", "limited",
|
|
13
|
+
"exclusive", "last", "hurry", "deadline", "expires", "fast",
|
|
14
|
+
# value / benefit
|
|
15
|
+
"free", "save", "proven", "guaranteed", "results", "boost",
|
|
16
|
+
"increase", "grow", "maximize", "unlock", "secret", "revealed",
|
|
17
|
+
"transform", "master", "ultimate", "best", "top", "powerful",
|
|
18
|
+
# curiosity / intrigue
|
|
19
|
+
"discover", "uncover", "surprising", "shocking", "hidden",
|
|
20
|
+
"unknown", "insider", "hack", "trick", "truth",
|
|
21
|
+
# authority
|
|
22
|
+
"experts", "researchers", "scientists", "officially", "certified",
|
|
23
|
+
"award-winning", "world-class",
|
|
24
|
+
# ease
|
|
25
|
+
"easy", "simple", "effortless", "quick", "step-by-step",
|
|
26
|
+
"foolproof", "beginner", "without",
|
|
27
|
+
# negative triggers
|
|
28
|
+
"avoid", "stop", "never", "mistake", "fail", "warning", "danger",
|
|
29
|
+
"worst", "deadly", "risky",
|
|
30
|
+
})
|
|
31
|
+
|
|
32
|
+
EMOTIONAL_TRIGGERS = frozenset({
|
|
33
|
+
"love", "hate", "fear", "hope", "joy", "pain", "anger", "envy",
|
|
34
|
+
"trust", "doubt", "regret", "pride", "shame", "relief", "success",
|
|
35
|
+
"failure", "happiness", "frustration", "excitement", "anxiety",
|
|
36
|
+
"lonely", "powerful", "confident", "inspired",
|
|
37
|
+
})
|
|
38
|
+
|
|
39
|
+
JARGON_WORDS = frozenset({
|
|
40
|
+
"synergy", "leverage", "disruptive", "paradigm", "scalable",
|
|
41
|
+
"bandwidth", "holistic", "ecosystem", "utilize", "facilitate",
|
|
42
|
+
"ideate", "incentivize", "stakeholders", "deliverables",
|
|
43
|
+
"actionable", "bespoke", "granular",
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
WEIGHTS: Dict[str, float] = {"power_words": 0.25, "emotional_triggers": 0.15,
|
|
47
|
+
"numbers": 0.15, "length": 0.20, "specificity": 0.15, "clarity": 0.10}
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class DimensionScore:
|
|
51
|
+
score: int
|
|
52
|
+
weight: str
|
|
53
|
+
detail: str
|
|
54
|
+
|
|
55
|
+
@dataclass
|
|
56
|
+
class HeadlineResult:
|
|
57
|
+
headline: str
|
|
58
|
+
overall_score: int = 0
|
|
59
|
+
grade: str = ""
|
|
60
|
+
breakdown: Dict[str, DimensionScore] = field(default_factory=dict)
|
|
61
|
+
|
|
62
|
+
def _tokenize(headline: str) -> List[str]:
|
|
63
|
+
return re.findall(r"\b\w+(?:[-']\w+)*\b", headline.lower())
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _score_power_words(tokens: List[str]) -> Tuple[int, List[str]]:
|
|
67
|
+
found = [t for t in tokens if t in POWER_WORDS]
|
|
68
|
+
score = min(100, len(found) * 35 + (10 if found else 0))
|
|
69
|
+
return score, found
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _score_emotional(tokens: List[str]) -> Tuple[int, List[str]]:
|
|
73
|
+
found = [t for t in tokens if t in EMOTIONAL_TRIGGERS]
|
|
74
|
+
score = min(100, len(found) * 50)
|
|
75
|
+
return score, found
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _score_numbers(headline: str) -> Tuple[int, List[str]]:
|
|
79
|
+
nums = re.findall(r"\b\d+(?:[,.]\d+)?%?\b", headline)
|
|
80
|
+
return (100 if nums else 0), nums
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _score_length(tokens: List[str]) -> Tuple[int, str]:
|
|
84
|
+
n = len(tokens)
|
|
85
|
+
if 6 <= n <= 12:
|
|
86
|
+
return 100, f"{n} words -- optimal (6-12)"
|
|
87
|
+
if n < 6:
|
|
88
|
+
return max(0, 40 + (n - 1) * 12), f"{n} words -- too short (6-12 optimal)"
|
|
89
|
+
return max(0, 100 - (n - 12) * 10), f"{n} words -- too long (6-12 optimal)"
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _score_specificity(headline: str) -> Tuple[int, List[str]]:
|
|
93
|
+
signals: List[str] = []
|
|
94
|
+
if re.search(r"\b\d+\b", headline):
|
|
95
|
+
signals.append("number")
|
|
96
|
+
if re.search(r"\b(?:in \d+|within \d+|\d+ (?:days?|weeks?|months?|hours?|minutes?))\b", headline, re.I):
|
|
97
|
+
signals.append("timeframe")
|
|
98
|
+
if re.search(r"\b(?:how to|step|guide|checklist|strategy|system|framework|formula)\b", headline, re.I):
|
|
99
|
+
signals.append("concrete format")
|
|
100
|
+
if re.search(r"\b\d+%\b", headline):
|
|
101
|
+
signals.append("percentage")
|
|
102
|
+
return min(100, len(signals) * 34), signals
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _score_clarity(tokens: List[str]) -> Tuple[int, List[str]]:
|
|
106
|
+
found = [t for t in tokens if t in JARGON_WORDS]
|
|
107
|
+
return max(0, 100 - len(found) * 30), found
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# ---------------------------------------------------------------------------
|
|
111
|
+
# Core scoring
|
|
112
|
+
# ---------------------------------------------------------------------------
|
|
113
|
+
|
|
114
|
+
def score_headline(headline: str) -> HeadlineResult:
|
|
115
|
+
"""Score a single headline across all dimensions."""
|
|
116
|
+
tokens = _tokenize(headline)
|
|
117
|
+
pw_s, pw_f = _score_power_words(tokens)
|
|
118
|
+
et_s, et_f = _score_emotional(tokens)
|
|
119
|
+
nm_s, nm_f = _score_numbers(headline)
|
|
120
|
+
ln_s, ln_n = _score_length(tokens)
|
|
121
|
+
sp_s, sp_f = _score_specificity(headline)
|
|
122
|
+
cl_s, cl_f = _score_clarity(tokens)
|
|
123
|
+
|
|
124
|
+
breakdown = {
|
|
125
|
+
"power_words": DimensionScore(pw_s, "25%", f"found: {', '.join(pw_f) or 'none'}"),
|
|
126
|
+
"emotional_triggers": DimensionScore(et_s, "15%", f"found: {', '.join(et_f) or 'none'}"),
|
|
127
|
+
"numbers": DimensionScore(nm_s, "15%", f"found: {', '.join(nm_f) or 'none'}"),
|
|
128
|
+
"length": DimensionScore(ln_s, "20%", ln_n),
|
|
129
|
+
"specificity": DimensionScore(sp_s, "15%", f"signals: {', '.join(sp_f) or 'none'}"),
|
|
130
|
+
"clarity": DimensionScore(cl_s, "10%", f"jargon: {', '.join(cl_f) or 'none'}"),
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
overall = round(sum(breakdown[k].score * WEIGHTS[k] for k in WEIGHTS))
|
|
134
|
+
grade = "A" if overall >= 85 else "B" if overall >= 70 else "C" if overall >= 55 else "D" if overall >= 40 else "F"
|
|
135
|
+
|
|
136
|
+
return HeadlineResult(headline=headline, overall_score=overall, grade=grade, breakdown=breakdown)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# ---------------------------------------------------------------------------
|
|
140
|
+
# Output
|
|
141
|
+
# ---------------------------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
def _format_text(results: List[HeadlineResult]) -> str:
|
|
144
|
+
"""Human-readable multi-headline report."""
|
|
145
|
+
lines: List[str] = []
|
|
146
|
+
for r in results:
|
|
147
|
+
lines.append("-" * 60)
|
|
148
|
+
lines.append(f" Headline: {r.headline}")
|
|
149
|
+
lines.append(f" Score: {r.overall_score}/100 Grade: {r.grade}")
|
|
150
|
+
lines.append("-" * 60)
|
|
151
|
+
for name, ds in r.breakdown.items():
|
|
152
|
+
bar_len = round(ds.score / 10)
|
|
153
|
+
bar = "#" * bar_len + "." * (10 - bar_len)
|
|
154
|
+
lines.append(f" {name:<20} [{bar}] {ds.score:>3}/100 {ds.detail}")
|
|
155
|
+
lines.append("")
|
|
156
|
+
|
|
157
|
+
if len(results) > 1:
|
|
158
|
+
avg = round(sum(r.overall_score for r in results) / len(results))
|
|
159
|
+
best = max(results, key=lambda r: r.overall_score)
|
|
160
|
+
lines.append("=" * 60)
|
|
161
|
+
lines.append(f" {len(results)} headlines analyzed | Avg score: {avg}/100")
|
|
162
|
+
lines.append(f" Best: \"{best.headline[:50]}\" ({best.overall_score}/100)")
|
|
163
|
+
lines.append("=" * 60)
|
|
164
|
+
return "\n".join(lines)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
# ---------------------------------------------------------------------------
|
|
168
|
+
# CLI
|
|
169
|
+
# ---------------------------------------------------------------------------
|
|
170
|
+
|
|
171
|
+
def main() -> int:
|
|
172
|
+
"""Entry point. Returns 0=success, 1=warnings, 2=errors."""
|
|
173
|
+
parser = argparse.ArgumentParser(
|
|
174
|
+
description="Headline Scorer -- rates headlines 0-100 across 6 dimensions.",
|
|
175
|
+
)
|
|
176
|
+
parser.add_argument("headline", nargs="?", help="Single headline to score")
|
|
177
|
+
parser.add_argument("--file", help="Text file with one headline per line")
|
|
178
|
+
parser.add_argument("--json", action="store_true", help="Output as JSON")
|
|
179
|
+
args = parser.parse_args()
|
|
180
|
+
|
|
181
|
+
if args.headline:
|
|
182
|
+
headlines = [args.headline]
|
|
183
|
+
elif args.file:
|
|
184
|
+
try:
|
|
185
|
+
with open(args.file, "r", encoding="utf-8") as fh:
|
|
186
|
+
headlines = [ln.strip() for ln in fh if ln.strip()]
|
|
187
|
+
except FileNotFoundError:
|
|
188
|
+
print(f"Error: file not found -- {args.file}", file=sys.stderr)
|
|
189
|
+
return 2
|
|
190
|
+
except OSError as exc:
|
|
191
|
+
print(f"Error: {exc}", file=sys.stderr)
|
|
192
|
+
return 2
|
|
193
|
+
elif not sys.stdin.isatty():
|
|
194
|
+
headlines = [ln.strip() for ln in sys.stdin if ln.strip()]
|
|
195
|
+
else:
|
|
196
|
+
parser.print_help()
|
|
197
|
+
return 2
|
|
198
|
+
|
|
199
|
+
if not headlines:
|
|
200
|
+
print("Error: no headlines provided.", file=sys.stderr)
|
|
201
|
+
return 2
|
|
202
|
+
|
|
203
|
+
results = [score_headline(h) for h in headlines]
|
|
204
|
+
|
|
205
|
+
if args.json:
|
|
206
|
+
print(json.dumps([asdict(r) for r in results], indent=2))
|
|
207
|
+
else:
|
|
208
|
+
print(_format_text(results))
|
|
209
|
+
|
|
210
|
+
avg = sum(r.overall_score for r in results) / len(results)
|
|
211
|
+
return 1 if avg < 50 else 0
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
if __name__ == "__main__":
|
|
215
|
+
sys.exit(main())
|