@jeganwrites/claudash 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTRIBUTING.md +35 -0
- package/LICENSE +21 -0
- package/README.md +261 -0
- package/analyzer.py +890 -0
- package/bin/claudash.js +121 -0
- package/claude_ai_tracker.py +358 -0
- package/cli.py +1034 -0
- package/config.py +100 -0
- package/db.py +1156 -0
- package/fix_tracker.py +539 -0
- package/insights.py +359 -0
- package/mcp_server.py +414 -0
- package/package.json +39 -0
- package/scanner.py +385 -0
- package/server.py +762 -0
- package/templates/accounts.html +936 -0
- package/templates/dashboard.html +1742 -0
- package/tools/get-derived-keys.py +112 -0
- package/tools/mac-sync.py +386 -0
- package/tools/oauth_sync.py +308 -0
- package/tools/setup-pm2.sh +53 -0
- package/waste_patterns.py +334 -0
package/mcp_server.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Claudash MCP server — exposes dashboard data to Claude Code via the
|
|
3
|
+
Model Context Protocol (JSON-RPC 2.0 over stdio).
|
|
4
|
+
|
|
5
|
+
Claude Code loads MCP servers from ~/.claude/settings.json:
|
|
6
|
+
|
|
7
|
+
{
|
|
8
|
+
"mcpServers": {
|
|
9
|
+
"claudash": {
|
|
10
|
+
"command": "python3",
|
|
11
|
+
"args": ["/absolute/path/to/claudash/mcp_server.py"]
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
Supported methods:
|
|
17
|
+
initialize — handshake
|
|
18
|
+
notifications/initialized — post-handshake ack (no response)
|
|
19
|
+
tools/list — return the 5 tool schemas
|
|
20
|
+
tools/call — invoke one of the tools
|
|
21
|
+
|
|
22
|
+
Tools:
|
|
23
|
+
claudash_summary — per-account usage rollup
|
|
24
|
+
claudash_project — detailed project metrics
|
|
25
|
+
claudash_window — current 5-hour window status
|
|
26
|
+
claudash_insights — active actionable insights
|
|
27
|
+
claudash_action_center — top 3 recommended actions
|
|
28
|
+
|
|
29
|
+
The server reads SQLite directly (no HTTP) so it does NOT need the web
|
|
30
|
+
server to be running. Works offline and in cron jobs.
|
|
31
|
+
|
|
32
|
+
Run `python3 mcp_server.py test` to sanity-check without launching the
|
|
33
|
+
JSON-RPC loop.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
import json
|
|
37
|
+
import os
|
|
38
|
+
import sys
|
|
39
|
+
import time
|
|
40
|
+
|
|
41
|
+
# Ensure we can import the rest of the Claudash package regardless of cwd
|
|
42
|
+
_HERE = os.path.dirname(os.path.abspath(__file__))
|
|
43
|
+
if _HERE not in sys.path:
|
|
44
|
+
sys.path.insert(0, _HERE)
|
|
45
|
+
|
|
46
|
+
from db import ( # noqa: E402
|
|
47
|
+
init_db, get_conn, get_session_count, get_accounts_config, get_insights,
|
|
48
|
+
)
|
|
49
|
+
from analyzer import ( # noqa: E402
|
|
50
|
+
account_metrics, project_metrics, window_intelligence,
|
|
51
|
+
compaction_metrics, subagent_metrics, daily_budget_metrics,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
PROTOCOL_VERSION = "2024-11-05"
|
|
56
|
+
SERVER_NAME = "claudash"
|
|
57
|
+
SERVER_VERSION = "1.0.0"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# ─── Tool implementations ────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
def _tool_claudash_summary(args):
|
|
63
|
+
conn = get_conn()
|
|
64
|
+
try:
|
|
65
|
+
accounts_cfg = get_accounts_config(conn)
|
|
66
|
+
out = []
|
|
67
|
+
for acct_id, info in accounts_cfg.items():
|
|
68
|
+
am = account_metrics(conn, acct_id)
|
|
69
|
+
wi = window_intelligence(conn, acct_id)
|
|
70
|
+
projects = project_metrics(conn, acct_id)
|
|
71
|
+
top = projects[0]["name"] if projects else None
|
|
72
|
+
out.append({
|
|
73
|
+
"account_id": acct_id,
|
|
74
|
+
"label": info["label"],
|
|
75
|
+
"plan": info.get("plan", "max"),
|
|
76
|
+
"window_pct": wi.get("window_pct", 0),
|
|
77
|
+
"subscription_roi": am.get("subscription_roi", 0),
|
|
78
|
+
"cache_hit_rate": am.get("cache_hit_rate", 0),
|
|
79
|
+
"sessions_today": am.get("sessions_today", 0),
|
|
80
|
+
"total_cost_30d_usd": am.get("total_cost_30d", 0),
|
|
81
|
+
"top_project": top,
|
|
82
|
+
})
|
|
83
|
+
return {"accounts": out, "generated_at": int(time.time())}
|
|
84
|
+
finally:
|
|
85
|
+
conn.close()
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _tool_claudash_project(args):
|
|
89
|
+
project_name = (args or {}).get("project_name") or ""
|
|
90
|
+
if not project_name:
|
|
91
|
+
return {"error": "project_name is required"}
|
|
92
|
+
conn = get_conn()
|
|
93
|
+
try:
|
|
94
|
+
projects = project_metrics(conn, "all")
|
|
95
|
+
match = next((p for p in projects if p["name"].lower() == project_name.lower()), None)
|
|
96
|
+
if not match:
|
|
97
|
+
return {"error": f"project '{project_name}' not found",
|
|
98
|
+
"available": [p["name"] for p in projects]}
|
|
99
|
+
# Compaction metric for just this project
|
|
100
|
+
comp_rows = conn.execute(
|
|
101
|
+
"SELECT COUNT(*) AS n FROM sessions WHERE project=? AND compaction_detected=1",
|
|
102
|
+
(match["name"],),
|
|
103
|
+
).fetchone()
|
|
104
|
+
comp_count = comp_rows["n"] if comp_rows else 0
|
|
105
|
+
# Average turns per session for this project
|
|
106
|
+
turn_row = conn.execute(
|
|
107
|
+
"SELECT AVG(turns) AS avg_turns FROM "
|
|
108
|
+
"(SELECT session_id, COUNT(*) AS turns FROM sessions "
|
|
109
|
+
" WHERE project=? GROUP BY session_id)",
|
|
110
|
+
(match["name"],),
|
|
111
|
+
).fetchone()
|
|
112
|
+
avg_turns = round(turn_row["avg_turns"] or 0, 1) if turn_row else 0
|
|
113
|
+
return {
|
|
114
|
+
"project": match["name"],
|
|
115
|
+
"account": match.get("account_label") or match.get("account"),
|
|
116
|
+
"cost_30d_usd": match.get("cost_usd_30d", 0),
|
|
117
|
+
"session_count": match.get("session_count", 0),
|
|
118
|
+
"total_tokens": match.get("total_tokens", 0),
|
|
119
|
+
"cache_hit_rate": match.get("cache_hit_rate", 0),
|
|
120
|
+
"dominant_model": match.get("dominant_model", ""),
|
|
121
|
+
"avg_turns_per_session": avg_turns,
|
|
122
|
+
"compaction_events_30d": comp_count,
|
|
123
|
+
"wow_change_pct": match.get("wow_change_pct", 0),
|
|
124
|
+
"rightsizing_savings_usd": match.get("rightsizing_savings", 0),
|
|
125
|
+
}
|
|
126
|
+
finally:
|
|
127
|
+
conn.close()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _tool_claudash_window(args):
|
|
131
|
+
conn = get_conn()
|
|
132
|
+
try:
|
|
133
|
+
accounts_cfg = get_accounts_config(conn)
|
|
134
|
+
out = []
|
|
135
|
+
for acct_id, info in accounts_cfg.items():
|
|
136
|
+
wi = window_intelligence(conn, acct_id)
|
|
137
|
+
pct = wi.get("window_pct", 0)
|
|
138
|
+
exhaust_epoch = wi.get("predicted_limit_time")
|
|
139
|
+
exhaust_iso = None
|
|
140
|
+
if exhaust_epoch:
|
|
141
|
+
exhaust_iso = time.strftime("%Y-%m-%dT%H:%M:%SZ",
|
|
142
|
+
time.gmtime(exhaust_epoch))
|
|
143
|
+
out.append({
|
|
144
|
+
"account_id": acct_id,
|
|
145
|
+
"label": info["label"],
|
|
146
|
+
"pct_used": pct,
|
|
147
|
+
"tokens_used": wi.get("total_tokens", 0),
|
|
148
|
+
"tokens_limit": wi.get("tokens_limit", 0),
|
|
149
|
+
"burn_rate_per_min": wi.get("burn_per_minute", 0),
|
|
150
|
+
"minutes_to_limit": wi.get("minutes_to_limit"),
|
|
151
|
+
"predicted_exhaust_utc": exhaust_iso,
|
|
152
|
+
"safe_to_start_heavy": wi.get("safe_for_heavy_session", pct < 50),
|
|
153
|
+
"best_start_hour_utc": wi.get("best_start_hour"),
|
|
154
|
+
})
|
|
155
|
+
return {"accounts": out, "generated_at": int(time.time())}
|
|
156
|
+
finally:
|
|
157
|
+
conn.close()
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
def _tool_claudash_insights(args):
|
|
161
|
+
conn = get_conn()
|
|
162
|
+
try:
|
|
163
|
+
rows = get_insights(conn, account=None, dismissed=0, limit=50)
|
|
164
|
+
priority_map = {
|
|
165
|
+
"red": "critical", "window_risk": "critical", "cache_spike": "critical",
|
|
166
|
+
"budget_exceeded": "critical", "floundering_detected": "critical",
|
|
167
|
+
"window_combined_risk": "critical", "session_expiry": "high",
|
|
168
|
+
"amber": "warning", "model_waste": "warning",
|
|
169
|
+
"compaction_gap": "warning", "budget_warning": "warning",
|
|
170
|
+
"subagent_cost_spike": "warning", "pro_messages_low": "warning",
|
|
171
|
+
"green": "info", "roi_milestone": "info", "cost_target": "info",
|
|
172
|
+
"blue": "info", "heavy_day": "info", "best_window": "info",
|
|
173
|
+
}
|
|
174
|
+
out = []
|
|
175
|
+
for r in rows:
|
|
176
|
+
itype = r["insight_type"]
|
|
177
|
+
out.append({
|
|
178
|
+
"id": r["id"],
|
|
179
|
+
"type": itype,
|
|
180
|
+
"priority": priority_map.get(itype, "info"),
|
|
181
|
+
"project": r["project"],
|
|
182
|
+
"message": r["message"],
|
|
183
|
+
"created_at": r["created_at"],
|
|
184
|
+
})
|
|
185
|
+
return {"insights": out, "total": len(out)}
|
|
186
|
+
finally:
|
|
187
|
+
conn.close()
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _tool_claudash_action_center(args):
|
|
191
|
+
"""Return up to 3 ranked, actionable recommendations."""
|
|
192
|
+
conn = get_conn()
|
|
193
|
+
try:
|
|
194
|
+
actions = []
|
|
195
|
+
|
|
196
|
+
# Rank 1: budget exceeded
|
|
197
|
+
dbm = daily_budget_metrics(conn, "all")
|
|
198
|
+
for acct_id, b in dbm.items():
|
|
199
|
+
if b.get("has_budget") and b["today_cost"] > b["budget_usd"]:
|
|
200
|
+
actions.append({
|
|
201
|
+
"priority": 1,
|
|
202
|
+
"title": f"{acct_id} over daily budget",
|
|
203
|
+
"why": f"${b['today_cost']:.2f} spent vs ${b['budget_usd']:.2f} limit",
|
|
204
|
+
"action": "Pause heavy runs or switch to Sonnet until midnight UTC",
|
|
205
|
+
"impact": f"${b['today_cost'] - b['budget_usd']:.2f} over budget",
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
# Rank 2: floundering sessions
|
|
209
|
+
flounder_rows = conn.execute(
|
|
210
|
+
"SELECT project, COUNT(*) AS n, SUM(token_cost) AS cost "
|
|
211
|
+
"FROM waste_events WHERE pattern_type='floundering' "
|
|
212
|
+
" AND detected_at >= strftime('%s','now') - 7*86400 "
|
|
213
|
+
"GROUP BY project ORDER BY cost DESC LIMIT 3"
|
|
214
|
+
).fetchall()
|
|
215
|
+
for r in flounder_rows:
|
|
216
|
+
actions.append({
|
|
217
|
+
"priority": 2,
|
|
218
|
+
"title": f"{r['project']} floundering sessions",
|
|
219
|
+
"why": f"Claude stuck in retry loops on {r['n']} session(s)",
|
|
220
|
+
"action": "Check session logs for permission/path errors; add explicit bash error handling",
|
|
221
|
+
"impact": f"~${(r['cost'] or 0):.2f} at risk in retry loops",
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
# Rank 3: Opus overuse
|
|
225
|
+
from analyzer import model_rightsizing
|
|
226
|
+
rs = model_rightsizing(conn, "all")
|
|
227
|
+
for s in rs[:2]:
|
|
228
|
+
if s["monthly_savings"] > 5:
|
|
229
|
+
actions.append({
|
|
230
|
+
"priority": 3,
|
|
231
|
+
"title": f"Opus overuse in {s['project']}",
|
|
232
|
+
"why": f"Avg output is {s['avg_output_tokens']} tokens — Sonnet is sufficient",
|
|
233
|
+
"action": f"Switch {s['project']} default model to claude-sonnet",
|
|
234
|
+
"impact": f"~${s['monthly_savings']:.2f}/mo savings",
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
# Rank 4 (fallback): compaction gap
|
|
238
|
+
comp = compaction_metrics(conn, "all")
|
|
239
|
+
if comp.get("sessions_needing_compact", 0) > 0 and len(actions) < 3:
|
|
240
|
+
actions.append({
|
|
241
|
+
"priority": 4,
|
|
242
|
+
"title": "Context rot risk",
|
|
243
|
+
"why": f"{comp['sessions_needing_compact']} sessions hit 80% context without /compact",
|
|
244
|
+
"action": "Run /compact earlier in long sessions to preserve quality",
|
|
245
|
+
"impact": "Prevents quality degradation + saves tokens on later turns",
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
actions.sort(key=lambda a: a["priority"])
|
|
249
|
+
return {"actions": actions[:3], "generated_at": int(time.time())}
|
|
250
|
+
finally:
|
|
251
|
+
conn.close()
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
# ─── Tool registry ───────────────────────────────────────────────
|
|
255
|
+
|
|
256
|
+
TOOLS = [
|
|
257
|
+
{
|
|
258
|
+
"name": "claudash_summary",
|
|
259
|
+
"description": "Get current Claude usage summary from Claudash (all accounts: window burn, ROI, cache hit rate, sessions today, top project).",
|
|
260
|
+
"inputSchema": {"type": "object", "properties": {}, "additionalProperties": False},
|
|
261
|
+
"handler": _tool_claudash_summary,
|
|
262
|
+
},
|
|
263
|
+
{
|
|
264
|
+
"name": "claudash_project",
|
|
265
|
+
"description": "Get detailed usage metrics for a specific Claude project (cost, sessions, cache hit rate, avg turns, compaction, dominant model, week-over-week change).",
|
|
266
|
+
"inputSchema": {
|
|
267
|
+
"type": "object",
|
|
268
|
+
"properties": {
|
|
269
|
+
"project_name": {
|
|
270
|
+
"type": "string",
|
|
271
|
+
"description": "The project name as shown in Claudash (e.g. 'WikiLoop', 'Tidify').",
|
|
272
|
+
},
|
|
273
|
+
},
|
|
274
|
+
"required": ["project_name"],
|
|
275
|
+
"additionalProperties": False,
|
|
276
|
+
},
|
|
277
|
+
"handler": _tool_claudash_project,
|
|
278
|
+
},
|
|
279
|
+
{
|
|
280
|
+
"name": "claudash_window",
|
|
281
|
+
"description": "Check the current Claude 5-hour window burn status for every account — percentage used, burn rate, predicted exhaust time, and whether it's safe to start a heavy session.",
|
|
282
|
+
"inputSchema": {"type": "object", "properties": {}, "additionalProperties": False},
|
|
283
|
+
"handler": _tool_claudash_window,
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"name": "claudash_insights",
|
|
287
|
+
"description": "Get active actionable insights about Claude usage patterns (cache spikes, model waste, window risk, ROI milestones, waste patterns).",
|
|
288
|
+
"inputSchema": {"type": "object", "properties": {}, "additionalProperties": False},
|
|
289
|
+
"handler": _tool_claudash_insights,
|
|
290
|
+
},
|
|
291
|
+
{
|
|
292
|
+
"name": "claudash_action_center",
|
|
293
|
+
"description": "Get the top 3 recommended actions to optimize Claude usage right now, ranked by priority. Each action has why/action/impact fields.",
|
|
294
|
+
"inputSchema": {"type": "object", "properties": {}, "additionalProperties": False},
|
|
295
|
+
"handler": _tool_claudash_action_center,
|
|
296
|
+
},
|
|
297
|
+
]
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
# ─── JSON-RPC dispatch ───────────────────────────────────────────
|
|
301
|
+
|
|
302
|
+
def _result(req_id, result):
|
|
303
|
+
return {"jsonrpc": "2.0", "id": req_id, "result": result}
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def _error(req_id, code, message, data=None):
|
|
307
|
+
err = {"code": code, "message": message}
|
|
308
|
+
if data is not None:
|
|
309
|
+
err["data"] = data
|
|
310
|
+
return {"jsonrpc": "2.0", "id": req_id, "error": err}
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def handle_request(req):
|
|
314
|
+
method = req.get("method", "")
|
|
315
|
+
req_id = req.get("id")
|
|
316
|
+
params = req.get("params") or {}
|
|
317
|
+
|
|
318
|
+
if method == "initialize":
|
|
319
|
+
return _result(req_id, {
|
|
320
|
+
"protocolVersion": PROTOCOL_VERSION,
|
|
321
|
+
"capabilities": {"tools": {}},
|
|
322
|
+
"serverInfo": {"name": SERVER_NAME, "version": SERVER_VERSION},
|
|
323
|
+
})
|
|
324
|
+
|
|
325
|
+
if method == "notifications/initialized":
|
|
326
|
+
return None # no response for notifications
|
|
327
|
+
|
|
328
|
+
if method == "tools/list":
|
|
329
|
+
return _result(req_id, {
|
|
330
|
+
"tools": [
|
|
331
|
+
{"name": t["name"], "description": t["description"], "inputSchema": t["inputSchema"]}
|
|
332
|
+
for t in TOOLS
|
|
333
|
+
]
|
|
334
|
+
})
|
|
335
|
+
|
|
336
|
+
if method == "tools/call":
|
|
337
|
+
name = params.get("name")
|
|
338
|
+
args = params.get("arguments") or {}
|
|
339
|
+
tool = next((t for t in TOOLS if t["name"] == name), None)
|
|
340
|
+
if not tool:
|
|
341
|
+
return _error(req_id, -32601, f"Unknown tool: {name}")
|
|
342
|
+
try:
|
|
343
|
+
result = tool["handler"](args)
|
|
344
|
+
return _result(req_id, {
|
|
345
|
+
"content": [{"type": "text", "text": json.dumps(result, default=str, indent=2)}],
|
|
346
|
+
})
|
|
347
|
+
except Exception as e:
|
|
348
|
+
return _error(req_id, -32000, f"Tool execution failed: {e}")
|
|
349
|
+
|
|
350
|
+
if req_id is None:
|
|
351
|
+
return None # unknown notification
|
|
352
|
+
return _error(req_id, -32601, f"Method not found: {method}")
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def run_stdio():
|
|
356
|
+
"""Read JSON-RPC requests from stdin line by line, write responses to
|
|
357
|
+
stdout. Each message is one JSON object on its own line."""
|
|
358
|
+
init_db()
|
|
359
|
+
for raw in sys.stdin:
|
|
360
|
+
raw = raw.strip()
|
|
361
|
+
if not raw:
|
|
362
|
+
continue
|
|
363
|
+
try:
|
|
364
|
+
req = json.loads(raw)
|
|
365
|
+
except json.JSONDecodeError:
|
|
366
|
+
err = _error(None, -32700, "Parse error")
|
|
367
|
+
sys.stdout.write(json.dumps(err) + "\n")
|
|
368
|
+
sys.stdout.flush()
|
|
369
|
+
continue
|
|
370
|
+
resp = handle_request(req)
|
|
371
|
+
if resp is not None:
|
|
372
|
+
sys.stdout.write(json.dumps(resp, default=str) + "\n")
|
|
373
|
+
sys.stdout.flush()
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def run_test():
|
|
377
|
+
"""Offline smoke test. Exercises each tool and prints a single OK line."""
|
|
378
|
+
init_db()
|
|
379
|
+
errors = []
|
|
380
|
+
for tool in TOOLS:
|
|
381
|
+
try:
|
|
382
|
+
if tool["name"] == "claudash_project":
|
|
383
|
+
# Needs an arg — pick the first real project
|
|
384
|
+
conn = get_conn()
|
|
385
|
+
row = conn.execute(
|
|
386
|
+
"SELECT project FROM sessions WHERE project IS NOT NULL "
|
|
387
|
+
"GROUP BY project ORDER BY COUNT(*) DESC LIMIT 1"
|
|
388
|
+
).fetchone()
|
|
389
|
+
conn.close()
|
|
390
|
+
arg = {"project_name": row[0]} if row else {"project_name": "test"}
|
|
391
|
+
result = tool["handler"](arg)
|
|
392
|
+
else:
|
|
393
|
+
result = tool["handler"]({})
|
|
394
|
+
if not isinstance(result, dict):
|
|
395
|
+
errors.append(f"{tool['name']}: did not return dict")
|
|
396
|
+
except Exception as e:
|
|
397
|
+
errors.append(f"{tool['name']}: {e}")
|
|
398
|
+
if errors:
|
|
399
|
+
print("MCP server FAILED:")
|
|
400
|
+
for e in errors:
|
|
401
|
+
print(" -", e)
|
|
402
|
+
sys.exit(1)
|
|
403
|
+
print(f"MCP server OK — {len(TOOLS)} tools registered")
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def main():
|
|
407
|
+
if len(sys.argv) > 1 and sys.argv[1] == "test":
|
|
408
|
+
run_test()
|
|
409
|
+
else:
|
|
410
|
+
run_stdio()
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
if __name__ == "__main__":
|
|
414
|
+
main()
|
package/package.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@jeganwrites/claudash",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Claude Code usage intelligence dashboard",
|
|
5
|
+
"bin": {
|
|
6
|
+
"claudash": "./bin/claudash.js"
|
|
7
|
+
},
|
|
8
|
+
"scripts": {
|
|
9
|
+
"start": "node bin/claudash.js"
|
|
10
|
+
},
|
|
11
|
+
"keywords": [
|
|
12
|
+
"claude",
|
|
13
|
+
"claude-code",
|
|
14
|
+
"usage",
|
|
15
|
+
"dashboard",
|
|
16
|
+
"ai"
|
|
17
|
+
],
|
|
18
|
+
"author": "Jegan Nagarajan",
|
|
19
|
+
"license": "MIT",
|
|
20
|
+
"repository": {
|
|
21
|
+
"type": "git",
|
|
22
|
+
"url": "https://github.com/pnjegan/claudash"
|
|
23
|
+
},
|
|
24
|
+
"engines": {
|
|
25
|
+
"node": ">=16.0.0"
|
|
26
|
+
},
|
|
27
|
+
"files": [
|
|
28
|
+
"bin/",
|
|
29
|
+
"*.py",
|
|
30
|
+
"templates/",
|
|
31
|
+
"tools/",
|
|
32
|
+
"README.md",
|
|
33
|
+
"LICENSE",
|
|
34
|
+
"CONTRIBUTING.md"
|
|
35
|
+
],
|
|
36
|
+
"publishConfig": {
|
|
37
|
+
"access": "public"
|
|
38
|
+
}
|
|
39
|
+
}
|