daemora 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. package/README.md +666 -0
  2. package/SOUL.md +104 -0
  3. package/config/hooks.json +14 -0
  4. package/config/mcp.json +145 -0
  5. package/package.json +86 -0
  6. package/skills/.gitkeep +0 -0
  7. package/skills/apple-notes.md +193 -0
  8. package/skills/apple-reminders.md +189 -0
  9. package/skills/camsnap.md +162 -0
  10. package/skills/coding.md +14 -0
  11. package/skills/documents.md +13 -0
  12. package/skills/email.md +13 -0
  13. package/skills/gif-search.md +196 -0
  14. package/skills/healthcheck.md +225 -0
  15. package/skills/image-gen.md +147 -0
  16. package/skills/model-usage.md +182 -0
  17. package/skills/obsidian.md +207 -0
  18. package/skills/pdf.md +211 -0
  19. package/skills/research.md +13 -0
  20. package/skills/skill-creator.md +142 -0
  21. package/skills/spotify.md +149 -0
  22. package/skills/summarize.md +230 -0
  23. package/skills/things.md +199 -0
  24. package/skills/tmux.md +204 -0
  25. package/skills/trello.md +183 -0
  26. package/skills/video-frames.md +202 -0
  27. package/skills/weather.md +127 -0
  28. package/src/a2a/A2AClient.js +136 -0
  29. package/src/a2a/A2AServer.js +316 -0
  30. package/src/a2a/AgentCard.js +79 -0
  31. package/src/agents/SubAgentManager.js +369 -0
  32. package/src/agents/Supervisor.js +192 -0
  33. package/src/channels/BaseChannel.js +104 -0
  34. package/src/channels/DiscordChannel.js +288 -0
  35. package/src/channels/EmailChannel.js +172 -0
  36. package/src/channels/GoogleChatChannel.js +316 -0
  37. package/src/channels/HttpChannel.js +26 -0
  38. package/src/channels/LineChannel.js +168 -0
  39. package/src/channels/SignalChannel.js +186 -0
  40. package/src/channels/SlackChannel.js +329 -0
  41. package/src/channels/TeamsChannel.js +272 -0
  42. package/src/channels/TelegramChannel.js +347 -0
  43. package/src/channels/WhatsAppChannel.js +219 -0
  44. package/src/channels/index.js +198 -0
  45. package/src/cli.js +1267 -0
  46. package/src/config/agentProfiles.js +120 -0
  47. package/src/config/channels.js +32 -0
  48. package/src/config/default.js +206 -0
  49. package/src/config/models.js +123 -0
  50. package/src/config/permissions.js +167 -0
  51. package/src/core/AgentLoop.js +446 -0
  52. package/src/core/Compaction.js +143 -0
  53. package/src/core/CostTracker.js +116 -0
  54. package/src/core/EventBus.js +46 -0
  55. package/src/core/Task.js +67 -0
  56. package/src/core/TaskQueue.js +206 -0
  57. package/src/core/TaskRunner.js +226 -0
  58. package/src/daemon/DaemonManager.js +301 -0
  59. package/src/hooks/HookRunner.js +230 -0
  60. package/src/index.js +482 -0
  61. package/src/mcp/MCPAgentRunner.js +112 -0
  62. package/src/mcp/MCPClient.js +186 -0
  63. package/src/mcp/MCPManager.js +412 -0
  64. package/src/models/ModelRouter.js +180 -0
  65. package/src/safety/AuditLog.js +135 -0
  66. package/src/safety/CircuitBreaker.js +126 -0
  67. package/src/safety/FilesystemGuard.js +169 -0
  68. package/src/safety/GitRollback.js +139 -0
  69. package/src/safety/HumanApproval.js +156 -0
  70. package/src/safety/InputSanitizer.js +72 -0
  71. package/src/safety/PermissionGuard.js +83 -0
  72. package/src/safety/Sandbox.js +70 -0
  73. package/src/safety/SecretScanner.js +100 -0
  74. package/src/safety/SecretVault.js +250 -0
  75. package/src/scheduler/Heartbeat.js +115 -0
  76. package/src/scheduler/Scheduler.js +228 -0
  77. package/src/services/models/outputSchema.js +15 -0
  78. package/src/services/openai.js +25 -0
  79. package/src/services/sessions.js +65 -0
  80. package/src/setup/theme.js +110 -0
  81. package/src/setup/wizard.js +788 -0
  82. package/src/skills/SkillLoader.js +168 -0
  83. package/src/storage/TaskStore.js +69 -0
  84. package/src/systemPrompt.js +526 -0
  85. package/src/tenants/TenantContext.js +19 -0
  86. package/src/tenants/TenantManager.js +379 -0
  87. package/src/tools/ToolRegistry.js +141 -0
  88. package/src/tools/applyPatch.js +144 -0
  89. package/src/tools/browserAutomation.js +223 -0
  90. package/src/tools/createDocument.js +265 -0
  91. package/src/tools/cronTool.js +105 -0
  92. package/src/tools/editFile.js +139 -0
  93. package/src/tools/executeCommand.js +123 -0
  94. package/src/tools/glob.js +67 -0
  95. package/src/tools/grep.js +121 -0
  96. package/src/tools/imageAnalysis.js +120 -0
  97. package/src/tools/index.js +173 -0
  98. package/src/tools/listDirectory.js +47 -0
  99. package/src/tools/manageAgents.js +47 -0
  100. package/src/tools/manageMCP.js +159 -0
  101. package/src/tools/memory.js +478 -0
  102. package/src/tools/messageChannel.js +45 -0
  103. package/src/tools/projectTracker.js +259 -0
  104. package/src/tools/readFile.js +52 -0
  105. package/src/tools/screenCapture.js +112 -0
  106. package/src/tools/searchContent.js +76 -0
  107. package/src/tools/searchFiles.js +75 -0
  108. package/src/tools/sendEmail.js +118 -0
  109. package/src/tools/sendFile.js +63 -0
  110. package/src/tools/textToSpeech.js +161 -0
  111. package/src/tools/transcribeAudio.js +82 -0
  112. package/src/tools/useMCP.js +29 -0
  113. package/src/tools/webFetch.js +150 -0
  114. package/src/tools/webSearch.js +134 -0
  115. package/src/tools/writeFile.js +26 -0
@@ -0,0 +1,225 @@
1
+ ---
2
+ name: healthcheck
3
+ description: Monitor system health, check disk usage, CPU load, memory, running processes, network connectivity, and service status. Use when the user asks about system health, disk space, memory usage, CPU usage, running processes, server status, or whether a service is running. Works on macOS and Linux.
4
+ triggers: health check, disk space, memory usage, CPU usage, system status, process running, server health, is running, disk full, system monitor, uptime, load average
5
+ ---
6
+
7
+ ## When to Use
8
+
9
+ āœ… Check disk space, memory, CPU, running processes, network, service uptime, port availability, web endpoint health, log tails
10
+
11
+ āŒ Deep profiling or performance tracing — use dedicated profilers (Instruments, perf, etc.)
12
+
13
+ ## Full System Health Report
14
+
15
+ ```bash
16
+ python3 << 'EOF'
17
+ import subprocess, platform, os
18
+ from datetime import datetime
19
+
20
+ def run(cmd):
21
+ r = subprocess.run(cmd, shell=True, capture_output=True, text=True)
22
+ return r.stdout.strip()
23
+
24
+ print(f"šŸ–„ System Health Report — {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
25
+ print(f"{'─'*50}")
26
+
27
+ # OS + Uptime
28
+ print(f"OS: {platform.system()} {platform.mac_ver()[0] or platform.release()}")
29
+ print(f"Uptime: {run('uptime | sed s/.*up/up/')}")
30
+
31
+ # CPU
32
+ if platform.system() == "Darwin":
33
+ load = os.getloadavg()
34
+ cpu_count = int(run("sysctl -n hw.ncpu"))
35
+ print(f"CPU: load {load[0]:.2f} / {load[1]:.2f} / {load[2]:.2f} ({cpu_count} cores)")
36
+ else:
37
+ print(f"CPU: {run('top -bn1 | grep Cpu | head -1')}")
38
+
39
+ # Memory
40
+ if platform.system() == "Darwin":
41
+ mem = run("vm_stat")
42
+ page_size = 4096
43
+ lines = {l.split(':')[0].strip(): int(l.split(':')[1].strip().rstrip('.'))
44
+ for l in mem.split('\n') if ':' in l}
45
+ free = (lines.get('Pages free', 0) + lines.get('Pages speculative', 0)) * page_size / 1024**3
46
+ active = lines.get('Pages active', 0) * page_size / 1024**3
47
+ wired = lines.get('Pages wired down', 0) * page_size / 1024**3
48
+ total = float(run("sysctl -n hw.memsize")) / 1024**3
49
+ used = total - free
50
+ print(f"Memory: {used:.1f}GB used / {total:.1f}GB total ({free:.1f}GB free)")
51
+ else:
52
+ print(f"Memory: {run('free -h | grep Mem')}")
53
+
54
+ # Disk
55
+ import shutil
56
+ disks = ['/'] + (['/Volumes/' + d for d in os.listdir('/Volumes')] if os.path.exists('/Volumes') else [])
57
+ print("\nDisk Usage:")
58
+ for disk in disks:
59
+ try:
60
+ total, used, free = shutil.disk_usage(disk)
61
+ pct = used / total * 100
62
+ bar = 'ā–ˆ' * int(pct/5) + 'ā–‘' * (20 - int(pct/5))
63
+ status = "āš ļø" if pct > 85 else "āœ…"
64
+ print(f" {status} {disk:<20} {bar} {pct:.0f}% ({free/1024**3:.0f}GB free)")
65
+ except:
66
+ pass
67
+ EOF
68
+ ```
69
+
70
+ ## Disk Space Check
71
+
72
+ ```bash
73
+ # Quick disk check
74
+ df -h | grep -E "^/dev|Filesystem"
75
+
76
+ # Find largest directories (top 10)
77
+ du -sh /* 2>/dev/null | sort -rh | head -10
78
+
79
+ # Find large files (>100MB) in a directory
80
+ find ~/Downloads -size +100M -exec ls -lh {} \; 2>/dev/null | sort -k5 -rh | head -20
81
+
82
+ # Check inode usage (different from disk space — can be "full" with space remaining)
83
+ df -i /
84
+ ```
85
+
86
+ ## Memory Usage
87
+
88
+ ```bash
89
+ # macOS
90
+ vm_stat && top -l 1 -n 0 | grep -E "PhysMem|CPU"
91
+
92
+ # Top memory-consuming processes
93
+ ps aux --sort=-%mem 2>/dev/null | head -10 || \
94
+ ps aux | sort -k4 -rn | head -10
95
+ ```
96
+
97
+ ## CPU Usage
98
+
99
+ ```bash
100
+ # Top CPU processes
101
+ ps aux --sort=-%cpu 2>/dev/null | head -10 || \
102
+ ps aux | sort -k3 -rn | head -10
103
+
104
+ # macOS: CPU usage over 5 seconds
105
+ top -l 2 -n 0 -stats cpu | tail -5
106
+ ```
107
+
108
+ ## Check if a Process is Running
109
+
110
+ ```python
111
+ #!/usr/bin/env python3
112
+ import subprocess, sys
113
+
114
+ def is_running(process_name: str) -> tuple[bool, list[str]]:
115
+ """Check if a process is running. Returns (running, [pid lines])."""
116
+ result = subprocess.run(
117
+ ["pgrep", "-fl", process_name],
118
+ capture_output=True, text=True
119
+ )
120
+ lines = [l for l in result.stdout.strip().split('\n') if l and process_name.lower() in l.lower()]
121
+ return len(lines) > 0, lines
122
+
123
+ # Check common services
124
+ services = ["nginx", "postgres", "redis-server", "node", "python3", "docker"]
125
+ for svc in services:
126
+ running, pids = is_running(svc)
127
+ icon = "āœ…" if running else "āŒ"
128
+ info = f" PIDs: {', '.join(p.split()[0] for p in pids[:3])}" if running else ""
129
+ print(f"{icon} {svc:<20}{info}")
130
+ ```
131
+
132
+ ## Check Port Availability
133
+
134
+ ```bash
135
+ # Check if a port is in use
136
+ PORT=3000
137
+ lsof -i :$PORT 2>/dev/null | grep LISTEN || echo "Port $PORT is free"
138
+
139
+ # List all listening ports
140
+ lsof -nP -iTCP -sTCP:LISTEN 2>/dev/null | awk 'NR>1 {print $1, $9}' | sort -u
141
+
142
+ # Check if a service is actually responding
143
+ curl -s -o /dev/null -w "%{http_code}" http://localhost:3000/health
144
+ ```
145
+
146
+ ## HTTP Endpoint Health Check
147
+
148
+ ```python
149
+ #!/usr/bin/env python3
150
+ import urllib.request, urllib.error, time, json
151
+
152
+ def check_endpoint(url: str, timeout: int = 5) -> dict:
153
+ """Check if an HTTP endpoint is healthy."""
154
+ start = time.time()
155
+ try:
156
+ req = urllib.request.Request(url)
157
+ with urllib.request.urlopen(req, timeout=timeout) as resp:
158
+ latency = (time.time() - start) * 1000
159
+ return {
160
+ "url": url,
161
+ "status": resp.status,
162
+ "latency_ms": round(latency, 1),
163
+ "healthy": 200 <= resp.status < 300
164
+ }
165
+ except urllib.error.HTTPError as e:
166
+ return {"url": url, "status": e.code, "healthy": False, "error": str(e)}
167
+ except Exception as e:
168
+ return {"url": url, "status": 0, "healthy": False, "error": str(e)}
169
+
170
+ # Check multiple endpoints
171
+ endpoints = [
172
+ "http://localhost:3000/health",
173
+ "http://localhost:8081/health",
174
+ "https://api.example.com/ping",
175
+ ]
176
+
177
+ print("šŸ” Endpoint Health Check")
178
+ print(f"{'─'*60}")
179
+ for ep in endpoints:
180
+ result = check_endpoint(ep)
181
+ icon = "āœ…" if result["healthy"] else "āŒ"
182
+ latency = f"{result['latency_ms']}ms" if result["healthy"] else result.get("error", "unreachable")
183
+ print(f"{icon} {result['status']} {latency:<15} {ep}")
184
+ ```
185
+
186
+ ## Check Recent Errors in Logs
187
+
188
+ ```bash
189
+ # System log (macOS)
190
+ log show --last 1h --predicate 'messageType == 16 or messageType == 17' \
191
+ --style syslog 2>/dev/null | tail -20
192
+
193
+ # Application log tail
194
+ tail -100 ~/Library/Logs/your-app/app.log 2>/dev/null | grep -iE "error|warn|fatal" | tail -20
195
+
196
+ # Daemora agent logs
197
+ tail -50 data/audit/*.jsonl 2>/dev/null | python3 -c "
198
+ import sys, json
199
+ for line in sys.stdin:
200
+ try:
201
+ e = json.loads(line)
202
+ if e.get('type') in ['error', 'warn']:
203
+ print(f\"{e['timestamp']} [{e['type']}] {e.get('message', '')}\")
204
+ except: pass
205
+ "
206
+ ```
207
+
208
+ ## Response Format
209
+
210
+ Present health check results as a clean status dashboard:
211
+
212
+ ```
213
+ šŸ–„ System Health — MacBook Pro
214
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
215
+ āœ… Uptime: up 3 days, 4:22
216
+ āœ… CPU: load 0.8 / 1.2 / 1.5 (8 cores)
217
+ āœ… Memory: 6.2GB used / 16GB total (9.8GB free)
218
+ āš ļø Disk /: ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–‘ā–‘ā–‘ā–‘ 82% (38GB free) ← getting full
219
+ āœ… Disk /Volumes/Data: ā–ˆā–ˆā–ˆā–ˆā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ā–‘ 22% (780GB free)
220
+
221
+ Services:
222
+ āœ… node (PID 4821)
223
+ āœ… postgres (PID 1203)
224
+ āŒ redis not running
225
+ ```
@@ -0,0 +1,147 @@
1
+ ---
2
+ name: image-gen
3
+ description: Generate images using OpenAI's image models (gpt-image-1, DALL-E 3, DALL-E 2). Use when the user asks to create, generate, draw, or illustrate an image, photo, logo, icon, illustration, or visual. Requires OPENAI_API_KEY.
4
+ triggers: generate image, create image, draw, illustrate, make a picture, logo, icon, artwork, visual, dall-e, image generation, render
5
+ ---
6
+
7
+ ## When to Use
8
+
9
+ āœ… Creating images from text descriptions, generating logos/icons, batch image sets, product mockups, illustration requests, visual content
10
+
11
+ āŒ Editing existing images (use gpt-image-1 with input image), video generation, real-time/live images
12
+
13
+ ## Models
14
+
15
+ | Model | Best For | Max Count | Quality Options |
16
+ |-------|---------|-----------|----------------|
17
+ | `gpt-image-1` | Highest quality, follows prompts best | 10/call | `high`, `medium`, `low` |
18
+ | `dall-e-3` | Artistic, stylized | 1/call | `hd`, `standard` |
19
+ | `dall-e-2` | Fast, cheap drafts | 10/call | `standard` |
20
+
21
+ Default: `gpt-image-1` at `high` quality.
22
+
23
+ ## Single Image (Python)
24
+
25
+ ```python
26
+ #!/usr/bin/env python3
27
+ import os, base64, json, urllib.request, urllib.parse
28
+ from datetime import datetime
29
+ from pathlib import Path
30
+
31
+ api_key = os.environ["OPENAI_API_KEY"]
32
+ prompt = "A sleek futuristic dashboard UI in dark mode, neon blue accents, clean typography"
33
+ model = "gpt-image-1" # or "dall-e-3", "dall-e-2"
34
+ size = "1024x1024" # gpt-image-1: 1024x1024, 1536x1024 (landscape), 1024x1536 (portrait)
35
+ quality = "high" # gpt-image-1: high/medium/low | dall-e-3: hd/standard
36
+
37
+ payload = json.dumps({
38
+ "model": model, "prompt": prompt,
39
+ "n": 1, "size": size, "quality": quality,
40
+ "response_format": "b64_json"
41
+ }).encode()
42
+
43
+ req = urllib.request.Request(
44
+ "https://api.openai.com/v1/images/generations",
45
+ data=payload,
46
+ headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
47
+ )
48
+ resp = json.loads(urllib.request.urlopen(req).read())
49
+
50
+ out_dir = Path("/tmp/daemora-images")
51
+ out_dir.mkdir(exist_ok=True)
52
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
53
+ out_file = out_dir / f"image_{ts}.png"
54
+ out_file.write_bytes(base64.b64decode(resp["data"][0]["b64_json"]))
55
+ print(f"Saved: {out_file}")
56
+ ```
57
+
58
+ ## Batch Generation
59
+
60
+ ```python
61
+ #!/usr/bin/env python3
62
+ """Generate multiple images with varied prompts and save to a gallery."""
63
+ import os, base64, json, urllib.request, urllib.parse
64
+ from pathlib import Path
65
+ from datetime import datetime
66
+
67
+ API_KEY = os.environ["OPENAI_API_KEY"]
68
+ OUT_DIR = Path("/tmp/daemora-images") / datetime.now().strftime("%Y%m%d_%H%M%S")
69
+ OUT_DIR.mkdir(parents=True, exist_ok=True)
70
+
71
+ PROMPTS = [
72
+ "prompt 1 here",
73
+ "prompt 2 here",
74
+ ]
75
+
76
+ def generate(prompt, model="gpt-image-1", size="1024x1024", quality="high"):
77
+ payload = json.dumps({
78
+ "model": model, "prompt": prompt,
79
+ "n": 1, "size": size, "quality": quality,
80
+ "response_format": "b64_json"
81
+ }).encode()
82
+ req = urllib.request.Request(
83
+ "https://api.openai.com/v1/images/generations",
84
+ data=payload,
85
+ headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}
86
+ )
87
+ return json.loads(urllib.request.urlopen(req).read())["data"][0]["b64_json"]
88
+
89
+ saved = []
90
+ for i, prompt in enumerate(PROMPTS):
91
+ print(f"Generating {i+1}/{len(PROMPTS)}: {prompt[:50]}...")
92
+ try:
93
+ img_b64 = generate(prompt)
94
+ path = OUT_DIR / f"image_{i+1:02d}.png"
95
+ path.write_bytes(base64.b64decode(img_b64))
96
+ saved.append((path, prompt))
97
+ print(f" āœ… Saved: {path}")
98
+ except Exception as e:
99
+ print(f" āŒ Failed: {e}")
100
+
101
+ # Write HTML gallery
102
+ html = f"""<!DOCTYPE html><html><head><title>Generated Images</title>
103
+ <style>body{{background:#111;color:#eee;font-family:sans-serif;padding:20px}}
104
+ .grid{{display:grid;grid-template-columns:repeat(auto-fill,minmax(300px,1fr));gap:16px}}
105
+ img{{width:100%;border-radius:8px}} p{{font-size:12px;opacity:.7;margin:4px 0 12px}}</style></head>
106
+ <body><h2>Daemora Image Generation — {len(saved)} images</h2><div class="grid">
107
+ """
108
+ for path, prompt in saved:
109
+ html += f'<div><img src="{path.name}" alt=""><p>{prompt}</p></div>\n'
110
+ html += "</div></body></html>"
111
+ (OUT_DIR / "index.html").write_text(html)
112
+ print(f"\nGallery: {OUT_DIR / 'index.html'}")
113
+ print(f"Open: open '{OUT_DIR / 'index.html'}'")
114
+ ```
115
+
116
+ ## Size Options by Model
117
+
118
+ ```
119
+ gpt-image-1: 1024x1024 (square) | 1536x1024 (landscape) | 1024x1536 (portrait) | auto
120
+ dall-e-3: 1024x1024 | 1792x1024 (wide) | 1024x1792 (tall)
121
+ dall-e-2: 256x256 | 512x512 | 1024x1024
122
+ ```
123
+
124
+ ## Prompt Writing Tips
125
+
126
+ - **Be specific**: "A minimalist logo for a fintech startup, dark blue on white, geometric sans-serif font" beats "a logo"
127
+ - **Specify style**: photorealistic / watercolor / flat illustration / 3D render / oil painting
128
+ - **Specify lighting**: soft studio lighting / dramatic shadows / golden hour / neon
129
+ - **Specify composition**: bird's eye view / close-up / wide angle / portrait
130
+ - **Negative guidance**: add "no text, no watermarks" if needed
131
+
132
+ ## After Generation
133
+
134
+ Always:
135
+ 1. Save images to `/tmp/daemora-images/` or a user-specified path
136
+ 2. Report the file path(s) in the response
137
+ 3. For multiple images, open the HTML gallery: `executeCommand("open /tmp/daemora-images/*/index.html")`
138
+ 4. If the user wants to send the image, use `sendFile(path, channel, sessionId)`
139
+
140
+ ## Error Handling
141
+
142
+ | Error | Fix |
143
+ |-------|-----|
144
+ | 400 content policy | Rephrase prompt, remove potentially flagged terms |
145
+ | 401 unauthorized | Check `OPENAI_API_KEY` is set and valid |
146
+ | 429 rate limit | Wait 10s, retry with `n=1` |
147
+ | 503 unavailable | Retry after 30s; fall back to dall-e-3 |
@@ -0,0 +1,182 @@
1
+ ---
2
+ name: model-usage
3
+ description: Track and report AI model API usage, costs, token counts, and spending across OpenAI, Anthropic, and Google. Use when the user asks how much they've spent, their API usage, token counts, cost breakdown, daily/monthly costs, or wants to optimize AI spending. Also check Daemora's built-in cost tracking.
4
+ triggers: model usage, api cost, token usage, spending, how much spent, cost breakdown, openai usage, anthropic usage, api usage, cost report, ai spending, daily cost, monthly cost
5
+ ---
6
+
7
+ ## When to Use
8
+
9
+ āœ… Check today's/this month's API costs, per-model breakdown, token counts, cost trends, optimization suggestions, Daemora internal cost report
10
+
11
+ āŒ Real-time streaming token counts — use the Daemora `/costs/today` endpoint for live data
12
+
13
+ ## Daemora Built-in Cost Tracking
14
+
15
+ ```bash
16
+ # Today's costs (all models, all tasks)
17
+ curl -s http://localhost:8081/costs/today | python3 -c "
18
+ import sys, json
19
+ d = json.load(sys.stdin)
20
+ print(f\"Today's spend: \${d.get('totalCost', 0):.4f}\")
21
+ print(f\"Tasks: {d.get('taskCount', 0)}\")
22
+ print(f\"Tokens: {d.get('totalTokens', 0):,} ({d.get('inputTokens',0):,} in / {d.get('outputTokens',0):,} out)\")
23
+ if 'byModel' in d:
24
+ print('\\nBy model:')
25
+ for model, cost in sorted(d['byModel'].items(), key=lambda x: -x[1]):
26
+ print(f\" {model:<40} \${cost:.4f}\")
27
+ "
28
+ ```
29
+
30
+ ## Parse Daemora Cost Log Directly
31
+
32
+ ```python
33
+ #!/usr/bin/env python3
34
+ """Parse Daemora's JSONL cost log for detailed analysis."""
35
+ import json, os
36
+ from pathlib import Path
37
+ from datetime import datetime, date, timedelta
38
+ from collections import defaultdict
39
+
40
+ COST_DIR = Path("data/costs") # adjust if different
41
+
42
+ def load_cost_log(days: int = 30) -> list[dict]:
43
+ """Load cost entries from the last N days."""
44
+ entries = []
45
+ cutoff = date.today() - timedelta(days=days)
46
+ for log_file in sorted(COST_DIR.glob("*.jsonl")):
47
+ try:
48
+ file_date = date.fromisoformat(log_file.stem)
49
+ if file_date < cutoff:
50
+ continue
51
+ except:
52
+ pass
53
+ for line in log_file.read_text().splitlines():
54
+ if line.strip():
55
+ try:
56
+ entries.append(json.loads(line))
57
+ except:
58
+ pass
59
+ return entries
60
+
61
+ def cost_report(days: int = 7):
62
+ entries = load_cost_log(days)
63
+ if not entries:
64
+ print("No cost data found")
65
+ return
66
+
67
+ by_day = defaultdict(float)
68
+ by_model = defaultdict(float)
69
+ by_tenant = defaultdict(float)
70
+ total_cost = 0
71
+ total_in = 0
72
+ total_out = 0
73
+
74
+ for e in entries:
75
+ cost = e.get("estimatedCost", 0) or 0
76
+ ts = e.get("timestamp", "")[:10]
77
+ model = e.get("modelId", "unknown")
78
+ tenant = e.get("tenantId", "default")
79
+
80
+ total_cost += cost
81
+ total_in += e.get("inputTokens", 0) or 0
82
+ total_out += e.get("outputTokens", 0) or 0
83
+ by_day[ts] += cost
84
+ by_model[model] += cost
85
+ by_tenant[tenant] += cost
86
+
87
+ print(f"šŸ“Š Cost Report — Last {days} days")
88
+ print(f"{'═'*50}")
89
+ print(f"Total spend: ${total_cost:.4f}")
90
+ print(f"Total tokens: {total_in+total_out:,} ({total_in:,} in / {total_out:,} out)")
91
+ print(f"Tasks: {len(entries)}")
92
+
93
+ print(f"\nšŸ“… Daily breakdown:")
94
+ for day in sorted(by_day.keys(), reverse=True)[:days]:
95
+ bar = 'ā–ˆ' * int(by_day[day] / max(by_day.values()) * 20) if by_day else ''
96
+ print(f" {day} {bar:<20} ${by_day[day]:.4f}")
97
+
98
+ print(f"\nšŸ¤– By model:")
99
+ for model, cost in sorted(by_model.items(), key=lambda x: -x[1]):
100
+ pct = cost / total_cost * 100 if total_cost else 0
101
+ print(f" {model:<40} ${cost:.4f} ({pct:.0f}%)")
102
+
103
+ if len(by_tenant) > 1:
104
+ print(f"\nšŸ‘¤ By tenant:")
105
+ for tenant, cost in sorted(by_tenant.items(), key=lambda x: -x[1]):
106
+ print(f" {tenant:<30} ${cost:.4f}")
107
+
108
+ cost_report(days=7)
109
+ ```
110
+
111
+ ## OpenAI Usage via API
112
+
113
+ ```python
114
+ #!/usr/bin/env python3
115
+ """Check OpenAI API usage and costs for current month."""
116
+ import os, json, urllib.request
117
+ from datetime import date
118
+
119
+ API_KEY = os.environ.get("OPENAI_API_KEY", "")
120
+ if not API_KEY:
121
+ print("OPENAI_API_KEY not set")
122
+ exit(1)
123
+
124
+ # OpenAI usage endpoint (requires org-level key for detailed breakdown)
125
+ today = date.today()
126
+ start = f"{today.year}-{today.month:02d}-01"
127
+ end = today.isoformat()
128
+
129
+ req = urllib.request.Request(
130
+ f"https://api.openai.com/v1/usage?date={today.isoformat()}",
131
+ headers={"Authorization": f"Bearer {API_KEY}"}
132
+ )
133
+ try:
134
+ resp = json.loads(urllib.request.urlopen(req).read())
135
+ # Parse usage data
136
+ data = resp.get("data", [])
137
+ total_requests = sum(d.get("n_requests", 0) for d in data)
138
+ total_tokens = sum(d.get("n_context_tokens_total", 0) + d.get("n_generated_tokens_total", 0) for d in data)
139
+ print(f"OpenAI Usage — {today}")
140
+ print(f"Requests: {total_requests:,}")
141
+ print(f"Tokens: {total_tokens:,}")
142
+ print("\nNote: Cost data available in OpenAI dashboard → Usage")
143
+ except Exception as e:
144
+ print(f"Could not fetch OpenAI usage: {e}")
145
+ print("Check usage at: https://platform.openai.com/usage")
146
+ ```
147
+
148
+ ## Cost Optimization Suggestions
149
+
150
+ When a user asks about costs, always include optimization tips:
151
+
152
+ ```
153
+ šŸ’” Cost Optimization Tips:
154
+
155
+ 1. **Model routing** — Use CODE_MODEL=anthropic:claude-sonnet-4-6 for code,
156
+ RESEARCH_MODEL=google:gemini-2.0-flash for research (fast + cheap).
157
+ Simple tasks don't need the most expensive model.
158
+
159
+ 2. **Sub-agent profiles** — spawnAgent with profile="researcher" uses the
160
+ cheaper RESEARCH_MODEL, not the default expensive model.
161
+
162
+ 3. **Per-tenant limits** — Set maxDailyCost per user to prevent runaway spending:
163
+ `daemora tenant set <id> maxDailyCost 1.00`
164
+
165
+ 4. **Global daily cap** — Set MAX_DAILY_COST=5.00 in .env
166
+
167
+ 5. **Task cost cap** — Set MAX_COST_PER_TASK=0.25 to kill expensive tasks early
168
+
169
+ 6. **Check cost/task** — Use /costs/today to see which tasks are expensive,
170
+ then route those to cheaper models
171
+ ```
172
+
173
+ ## Cost Estimates (approximate, 2026 pricing)
174
+
175
+ | Model | Input (per 1M) | Output (per 1M) | Good for |
176
+ |-------|---------------|-----------------|---------|
177
+ | gpt-4.1-mini | $0.15 | $0.60 | Most tasks |
178
+ | gpt-4.1 | $2.00 | $8.00 | Complex reasoning |
179
+ | claude-sonnet-4-6 | $3.00 | $15.00 | Code, analysis |
180
+ | claude-opus-4-6 | $15.00 | $75.00 | Hard problems only |
181
+ | gemini-2.0-flash | $0.075 | $0.30 | Research, summaries |
182
+ | gemini-2.5-pro | $1.25 | $10.00 | Long context |