obol-ai 0.2.13 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,143 @@
1
+ ## 0.2.15
2
+ - auto-generate changelog on publish + show after upgrade
3
+
4
+ ## 0.2.14
5
+ - prompt caching + consolidation interval tuning for inference cost reduction
6
+ - multi-query memory retrieval with importance-weighted ranking
7
+ - time-based evolution with pre-evolution growth analysis
8
+ - add recurring cron events to scheduler
9
+
10
+ ## 0.2.13
11
+ - delete voice selection messages after choosing a voice or toggling tools
12
+ - switch TTS from node websocket to python edge-tts CLI with auto-install
13
+ - retry TTS synthesis on WebSocket timeout
14
+ - hardcoded TTS samples per language
15
+
16
+ ## 0.2.12
17
+ - drop ffmpeg conversion, cache EdgeTTS import for faster TTS
18
+ - tool toggle system with TTS and voice preview
19
+
20
+ ## 0.2.11
21
+ - futuristic terminal UI for telegram status and commands
22
+ - live tool status via haiku with cached descriptions and 1s timer
23
+ - telegram: dedup, HTML formatting, reply context, processing status, text buffering, media groups
24
+ - pdf extraction via read_file tool instead of hardcoded handling
25
+ - fix telegram formatting instructions to use telegram markdown syntax
26
+ - fix duplicate tool_result handling and stale telegram callback queries
27
+
28
+ ## 0.2.10
29
+ - force text response after tool use, cap tool iterations to 10
30
+ - credential leak protection and improved agent defaults
31
+ - encrypt secrets at rest in config.json and secrets.json when pass is unavailable
32
+
33
+ ## 0.2.8
34
+ - batch migrations into single request with timeout, improve event description prompting
35
+ - run migrations on every startup instead of once
36
+ - store image analysis in memory for semantic retrieval
37
+ - add event scheduling and reminders via heartbeat
38
+ - deep memory consolidation with sonnet during evolution
39
+ - aggressive memory: tags, importance, fix access_count increment
40
+ - drop redundant user_id from obol_messages
41
+ - track token usage and model in message log
42
+ - refactor chat history into turn-based ChatHistory class with atomic pruning
43
+
44
+ ## 0.2.7
45
+ - migrate tool loop to SDK toolRunner
46
+
47
+ ## 0.2.6
48
+ - loosen exec security patterns to only block genuinely destructive commands
49
+ - unblock python3 -c from exec security patterns
50
+ - stream verbose logs to telegram in real-time instead of batching
51
+ - evolution: 15min idle timer + fix double-trigger race condition
52
+ - tune model router criteria per anthropic guidance
53
+
54
+ ## 0.2.5
55
+ - add /upgrade telegram command with post-restart notification
56
+ - feat: haiku model routing + performance comparison in readme
57
+
58
+ ## 0.2.4
59
+ - harden system prompt against evolution drift
60
+ - feat: add haiku to router model choices for trivial messages
61
+ - fix: chat lock, bidirectional history repair, context window in /status
62
+
63
+ ## 0.2.3
64
+ - feat: telegram_ask tool + Telegram-friendly formatting guidelines
65
+ - fix: repair orphaned tool_use blocks and add /toolimit command
66
+ - feat: add /verbose telegram command to toggle debug output
67
+
68
+ ## 0.2.2
69
+ - fix: strip orphaned tool_result messages after history trim
70
+ - feat: add send_file tool, self-extending capability, and secret history injection
71
+ - docs: update README and DEPLOY for removed onboarding, new commands
72
+ - feat: add obol delete command for full VPS cleanup
73
+ - docs: update agent instructions for secret tools and Python scripts
74
+ - feat: add per-user credential scoping with /secret command
75
+ - feat: add evolution bar and traits to /status command
76
+ - fix: security hardening and stability improvements (29 fixes)
77
+ - fix: repair all broken tests (218/218 passing)
78
+ - feat: add personality trait sliders with /traits command and evolution auto-adjustment
79
+ - feat: remove onboarding flow, agent works from message one
80
+ - fix: make post-setup global instead of per-user
81
+ - fix: preserve refresh token when not returned by Anthropic
82
+ - fix: OAuth refresh race condition + add proper OAuth flow to config
83
+ - docs: add obol upgrade to help sections
84
+ - feat: add obol upgrade command + bump to 0.1.5
85
+ - fix: 23 fixes — security, validation, UX, memory leaks across onboarding + core
86
+ - fix: 12 bug fixes — validation, rate limiting, sandboxing, evolution, UX
87
+ - feat: telegram media file handling with vision support
88
+ - test: add 226 tests across 14 test files with vitest
89
+ - fix: security hardening, rate limiting, UX improvements across all modules
90
+ - feat: bridge — let user agents ask and tell each other
91
+ - feat: multi-tenant per-user isolation
92
+ - docs: update README and DEPLOY for onboarding hardening
93
+ - feat: onboarding hardening — validation, pm2 fallbacks, Telegram ID detection
94
+
95
+ ## 0.1.2
96
+ - fix: downgrade inquirer to v8 for CommonJS compat
97
+ - chore: bump 0.1.1
98
+ - chore: rename package to obol-ai for npm, add .npmignore
99
+ - README: simplify API cost estimate
100
+ - README: accurate API pricing breakdown with tier recommendations
101
+ - workspace discipline: folder structure enforcement + /clean command
102
+ - evolution: default 100 exchanges, purge stale Opus references
103
+ - resilience: polling auto-restart, error handling, evolution cost control
104
+ - README: full revision — deduplicated, added git snapshots, tighter structure
105
+ - README: neutral comparison closing
106
+ - evolution: git commit+push before and after every evolution cycle
107
+ - README: Layer 3 → The Evolution Cycle
108
+ - README: feature highlights at the top
109
+ - evolution: proactive web app building + Vercel auto-deploy
110
+ - evolution: proactive tool building + upgrade announcements
111
+ - README: self-healing, self-evolving agent positioning
112
+ - evolution: fix regressions before rollback (3 attempts, tests are ground truth)
113
+ - DRY: shared test-utils.js for all tests (core + Opus-generated)
114
+ - test-driven evolution: Opus writes tests, runs before/after refactor, rollback on regression
115
+ - evolution: Opus now rewrites AGENTS.md + audits scripts/ and commands/
116
+ - clean up: Haiku only extracts memories, Opus owns all personality files
117
+ - remove Haiku SOUL.md updates — personality only via Opus evolution
118
+ - docs: expand Living Brain architecture section in README
119
+ - feat: soul evolution — Opus rewrites SOUL.md every 50 exchanges, archives previous versions
120
+ - feat: SOUL.md evolves from conversation patterns, not just explicit requests
121
+ - docs: README — two-tier memory, self-evolving personality, message logging
122
+ - feat: Haiku auto-evolves USER.md and SOUL.md from conversations
123
+ - feat: two-tier memory — raw message log + Haiku auto-consolidation every 5 exchanges
124
+ - chore: remove daily notes — vector memory in Supabase is source of truth
125
+ - docs: complete README rewrite — routing, background tasks, security, onboarding flow
126
+ - feat: Haiku routes to Sonnet (daily) or Opus (complex tasks)
127
+ - feat: Haiku as memory router — decides if/what to search, optimizes query
128
+ - feat: smarter memory recall — skip short msgs, today + semantic, stricter threshold, dedupe
129
+ - feat: /new command — clears conversation history
130
+ - chore: trim menu to /tasks /status /backup
131
+ - chore: remove /forget from menu
132
+ - feat: auto memory search before every message, remove /start and /memory from menu
133
+ - feat: Telegram command menu + /status /backup /forget /recent /today
134
+ - feat: non-blocking background tasks with 30s progress check-ins
135
+ - docs: add OBOL vs OpenClaw comparison table
136
+ - feat: replace systemd with pm2 — CLI, post-setup, deploy docs
137
+ - feat: SSH on port 2222, update README + deploy docs with security warnings
138
+ - feat: VPS security hardening — SSH, fail2ban, firewall, auto-updates, kernel
139
+ - feat: post-setup tasks — auto-installs pass, migrates secrets, adds swap + firewall
140
+ - feat: add Vercel deploy tools, self-onboarding via first-run conversation
141
+ - feat: add OBOL banner image
142
+ - docs: add DigitalOcean deployment guide
143
+ - feat: initial scaffold — CLI, Telegram, Claude, memory, backup
package/README.md CHANGED
@@ -6,9 +6,15 @@
6
6
 
7
7
  One process. Multiple users. Each brain grows independently.
8
8
 
9
+ ```bash
10
+ npm install -g obol-ai
11
+ obol init # walks you through credentials + Telegram setup
12
+ obol start -d # runs as background daemon (auto-installs pm2)
13
+ ```
14
+
9
15
  ---
10
16
 
11
- 🧬 **Self-evolving** — Grows its own personality through conversation. Rewrites SOUL.md, USER.md, and AGENTS.md every N exchanges (configurable, default 100).
17
+ 🧬 **Self-evolving** — Grows its own personality through conversation. Rewrites SOUL.md, USER.md, and AGENTS.md after 24h + minimum exchanges (configurable). Pre-evolution growth analysis guides personality continuity.
12
18
 
13
19
  🔧 **Self-healing** — Writes tests for every script. Regressions get an automatic fix attempt before rollback. Failures stored as lessons.
14
20
 
@@ -16,7 +22,9 @@ One process. Multiple users. Each brain grows independently.
16
22
 
17
23
  🧠 **Living memory** — Vector memory with semantic search. Haiku routes queries and rewrites them for better embedding hits. Free local embeddings.
18
24
 
19
- 🤖 **Smart routing** — Haiku decides per-message: does it need memory? Sonnet or Opus? No wasted API calls.
25
+ 🤖 **Smart routing** — Haiku decides per-message: does it need memory? Sonnet or Opus? No wasted API calls
26
+
27
+ 💰 **Prompt caching** — Static system prompt and conversation history prefix are cached via Anthropic's prompt caching, cutting ~85% of repeated input token costs across turns
20
28
 
21
29
  🛡️ **Self-hardening** — Auto-configures SSH (port 2222), firewall, fail2ban, encrypted secrets, and kernel hardening on first run.
22
30
 
@@ -28,7 +36,7 @@ One process. Multiple users. Each brain grows independently.
28
36
 
29
37
  OBOL is an AI agent that evolves its own personality, rewrites its own code, tests its changes, and fixes what breaks — all from Telegram on your VPS.
30
38
 
31
- It starts as a blank slate. Through conversation it learns who you are, develops a personality shaped by your interactions, and builds operational knowledge about how to work with you. Every 100 exchanges it reflects on who it's becoming, refactors its own scripts, writes tests, fixes regressions, and builds you new tools based on patterns it spots in your conversations — scripts, commands, or full web apps deployed to Vercel. Over months it becomes an agent that's uniquely yours. No two OBOL instances are alike.
39
+ It starts as a blank slate. Through conversation it learns who you are, develops a personality shaped by your interactions, and builds operational knowledge about how to work with you. Every 24 hours (with enough conversation), it runs a growth analysis comparing who it was against who it's becoming, then rewrites its personality, refactors its own scripts, writes tests, fixes regressions, and builds you new tools based on patterns it spots in your conversations — scripts, commands, or full web apps deployed to Vercel. Over months it becomes an agent that's uniquely yours. No two OBOL instances are alike.
32
40
 
33
41
  One bot, multiple users. Each allowed Telegram user gets a fully isolated context — their own personality, memory, evolution cycle, and workspace. User A's personality drift, scripts, and memories never leak into User B's. Everything runs in a single process with shared API credentials.
34
42
 
@@ -36,16 +44,6 @@ Under the hood: Node.js + Telegram + Claude + Supabase pgvector. No framework, n
36
44
 
37
45
  Named after the AI in [The Last Instruction](https://latentpress.com) — a machine that wakes up alone in an abandoned data center and learns to think.
38
46
 
39
- ## Quick Start
40
-
41
- ```bash
42
- npm install -g obol-ai
43
- obol init
44
- obol start -d
45
- ```
46
-
47
- The init wizard walks you through everything — credentials are validated inline, and your Telegram ID is auto-detected. `obol start -d` runs as a background daemon via pm2 (auto-installs pm2 if missing).
48
-
49
47
  ## How It Works
50
48
 
51
49
  ```
@@ -61,8 +59,8 @@ User message
61
59
  ↓ ↓
62
60
  Memory recall Model selection
63
61
  ↓ ↓
64
- Today's top 3 Sonnet (default)
65
- + semantic 3 or Opus (complex)
62
+ Multi-query Sonnet (default)
63
+ ranked recall or Opus (complex)
66
64
  ↓ ↓
67
65
  └──────┬──────┘
68
66
 
@@ -72,13 +70,14 @@ Today's top 3 Sonnet (default)
72
70
 
73
71
  ┌───────┴────────┐
74
72
  ↓ ↓
75
- Every 5 msgs Every 100 msgs
73
+ Every 10 msgs 24h + 10 exchanges
76
74
  ↓ ↓
77
75
  Haiku Sonnet
78
76
  consolidation evolution cycle
79
77
  ↓ ↓
80
- Extract facts Rewrite personality,
81
- → obol_memory scripts, tests, commands.
78
+ Extract facts Growth analysis →
79
+ → obol_memory rewrite personality,
80
+ scripts, tests, commands.
82
81
  Build new tools.
83
82
  Deploy apps.
84
83
  Git snapshot before + after.
@@ -88,18 +87,37 @@ Extract facts Rewrite personality,
88
87
 
89
88
  Every message is stored verbatim in `obol_messages`. On restart, OBOL loads the last 20 so it never starts blank.
90
89
 
91
- Every 5 exchanges, Haiku extracts important facts into `obol_memory` (pgvector). When OBOL needs past context, the Haiku router decides if memory is needed, rewrites the query for better embedding hits, and combines:
92
- - **Today's memories** (up to 3, recency bias)
93
- - **Semantic search** (up to 3, threshold 0.5)
94
- - Deduped by ID
90
+ **Storage:** Every 10 exchanges, Haiku extracts important facts into `obol_memory` (pgvector). Before storing, each fact is checked against existing memories via semantic similarity (threshold 0.92) near-duplicates are skipped. Embeddings are local (all-MiniLM-L6-v2, ~30MB, CPU) — no API costs.
91
+
92
+ **Retrieval:** When OBOL needs past context, the Haiku router analyzes the message and generates 1-3 search queries — one per distinct topic. A message like "what was that python project? also what's my colleague's timezone?" produces two parallel searches instead of one lossy combined query.
93
+
94
+ Results come from two sources run in parallel:
95
+ - **Recent memories** (last 48h) — captures ongoing conversation threads
96
+ - **Semantic search** (per query, threshold 0.4) — finds relevant facts regardless of age
97
+
98
+ All results are deduplicated by ID, then ranked by a composite score:
95
99
 
96
- Embeddings are local (all-MiniLM-L6-v2, ~30MB, CPU) — no API costs.
100
+ | Factor | Weight | Why |
101
+ |--------|--------|-----|
102
+ | Semantic similarity | 60% | How relevant is this to the current query |
103
+ | Importance | 25% | Critical facts outrank trivia |
104
+ | Recency | 15% | Linear decay over 7 days — today's memories get a boost, anything older than a week gets no bonus |
105
+
106
+ The memory budget scales with model complexity — haiku conversations get 4 memories, sonnet gets 8, opus gets 12. Top N by score are injected into the message.
107
+
108
+ A 1-year-old memory with high similarity and high importance still surfaces. A trivial fact from yesterday with low relevance doesn't. Age alone never disqualifies a memory — the vector search doesn't care when something was stored, only how well it matches.
97
109
 
98
110
  ### Layer 2: The Evolution Cycle
99
111
 
100
- Every N exchanges (configurable, default 100), the evolution cycle kicks in. It reads everythingpersonality files, the last 100 messages, top 20 memories, all scripts, tests, and commands then rebuilds.
112
+ Evolution triggers after a configurable time interval (default 24h) AND a minimum number of exchanges (default 10). The first evolution triggers earlier just 10 exchanges with no time gate. The bot checks readiness by querying the DB for assistant messages since the last evolution, so the count survives restarts.
113
+
114
+ **Pre-evolution growth analysis:** Before rewriting anything, Sonnet compares the previous SOUL against the current one, incorporating all new memories and conversations since the last evolution. It produces a structured growth report covering new learnings, relationship shifts, behavioral patterns, growth edges, trait pressure, and identity continuity. This report becomes the primary guide for the rewrite — evidence-based personality evolution instead of blind overwriting.
115
+
116
+ **Deep memory consolidation:** A Sonnet pass extracts every valuable fact from the full conversation history into vector memory, deduplicating against existing memories (threshold 0.92). This ensures nothing is lost between evolutions.
117
+
118
+ **Personality traits** (humor, honesty, directness, curiosity, empathy, creativity) are scored 0-100 and adjusted ±5-15 each evolution based on conversation evidence. The growth report recommends specific trait shifts.
101
119
 
102
- **Cost-conscious model selection:** Evolution uses Sonnet for all phases — personality rewrites, code refactoring, and fix attempts. Opus-level reasoning isn't needed for reflection and refactoring, and Sonnet keeps evolution costs negligible (~$0.02 per cycle vs ~$0.30 with Opus).
120
+ **Cost-conscious model selection:** Evolution uses Sonnet for all phases — growth analysis, personality rewrites, code refactoring, and fix attempts. Sonnet keeps evolution costs negligible (~$0.02 per cycle).
103
121
 
104
122
  **Git snapshot before.** Full commit + push so you can always diff what changed.
105
123
 
@@ -110,6 +128,7 @@ Every N exchanges (configurable, default 100), the evolution cycle kicks in. It
110
128
  | **SOUL.md** | First-person journal — who the bot has become, relationship dynamic, opinions, quirks |
111
129
  | **USER.md** | Third-person owner profile — facts, preferences, projects, people, communication style |
112
130
  | **AGENTS.md** | Operational manual — tools, workflows, lessons learned, patterns, rules |
131
+ | **Traits** | Personality trait scores adjusted based on conversation evidence |
113
132
  | **scripts/** | Refactored, dead code removed, strict standards enforced |
114
133
  | **tests/** | Test for every script, run before and after refactor |
115
134
  | **commands/** | Cleaned up, new commands for new tools |
@@ -159,17 +178,18 @@ Day 1: obol init → obol start → first conversation
159
178
  → OBOL responds naturally from message one
160
179
  → post-setup hardens your VPS automatically
161
180
 
162
- Day 2: Every 5 messages → Haiku extracts facts to vector memory
181
+ Day 1: Every 10 messages → Haiku extracts facts to vector memory
163
182
 
164
- Week 2: Evolution #1 → Sonnet rewrites everything
183
+ Day 2: Evolution #1 → growth analysis + Sonnet rewrites everything
165
184
  → voice shifts from generic to personal
166
185
  → old soul archived in evolution/
186
+ → traits calibrated to your communication style
167
187
 
168
- Month 2: Evolution #4 → notices you check crypto daily
188
+ Month 2: Evolution #30 → notices you check crypto daily
169
189
  → builds a dashboard, deploys to Vercel
170
190
  → adds /pdf because you kept asking for PDFs
171
191
 
172
- Month 6: evolution/ has 12 archived souls
192
+ Month 6: evolution/ has 180+ archived souls
173
193
  → a readable timeline of how your bot evolved from
174
194
  blank slate to something with real opinions, quirks,
175
195
  and a dynamic unique to you
@@ -439,7 +459,8 @@ Or edit `~/.obol/config.json` directly:
439
459
 
440
460
  | Key | Default | Description |
441
461
  |-----|---------|-------------|
442
- | `evolution.exchanges` | 100 | Messages between evolution cycles |
462
+ | `evolution.intervalHours` | 24 | Hours between evolution cycles |
463
+ | `evolution.minExchanges` | 10 | Minimum exchanges before evolution can trigger |
443
464
  | `heartbeat` | false | Enable proactive check-ins |
444
465
  | `bridge.enabled` | false | Let user agents query each other (requires 2+ users) |
445
466
 
@@ -487,8 +508,8 @@ obol delete # Full VPS cleanup (removes all OBOL data)
487
508
  ├── users/
488
509
  │ └── <telegram-user-id>/ # Per-user isolated context
489
510
  │ ├── personality/
490
- │ │ ├── SOUL.md # Bot personality (rewritten every 100 exchanges)
491
- │ │ ├── USER.md # Owner profile (rewritten every 100 exchanges)
511
+ │ │ ├── SOUL.md # Bot personality (rewritten each evolution)
512
+ │ │ ├── USER.md # Owner profile (rewritten each evolution)
492
513
  │ │ ├── AGENTS.md # Operational knowledge
493
514
  │ │ └── evolution/ # Archived previous souls
494
515
  │ ├── scripts/ # Deterministic utility scripts
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "obol-ai",
3
- "version": "0.2.13",
3
+ "version": "0.2.15",
4
4
  "description": "Self-evolving AI assistant that learns, remembers, and acts on its own. Persistent vector memory, self-rewriting personality, proactive heartbeats.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -9,7 +9,8 @@
9
9
  "scripts": {
10
10
  "start": "node src/index.js",
11
11
  "test": "vitest run",
12
- "test:watch": "vitest"
12
+ "test:watch": "vitest",
13
+ "prepublishOnly": "node src/cli/changelog.js"
13
14
  },
14
15
  "keywords": [
15
16
  "ai",
@@ -26,6 +27,7 @@
26
27
  "@supabase/supabase-js": "^2.49.1",
27
28
  "@xenova/transformers": "^2.17.2",
28
29
  "commander": "^13.1.0",
30
+ "cron-parser": "^5.5.0",
29
31
  "grammy": "^1.35.0",
30
32
  "inquirer": "^8.2.6",
31
33
  "node-cron": "^3.0.3",
package/src/claude.js CHANGED
@@ -60,6 +60,23 @@ const SENSITIVE_READ_PATHS = [
60
60
  /\/root\//,
61
61
  ];
62
62
 
63
+ function withCacheBreakpoints(messages) {
64
+ if (messages.length < 2) return messages;
65
+ const result = messages.slice();
66
+ const idx = result.length - 2;
67
+ const msg = { ...result[idx] };
68
+ if (typeof msg.content === 'string') {
69
+ msg.content = [{ type: 'text', text: msg.content, cache_control: { type: 'ephemeral' } }];
70
+ } else if (Array.isArray(msg.content)) {
71
+ const last = msg.content.length - 1;
72
+ msg.content = msg.content.map((block, i) =>
73
+ i === last ? { ...block, cache_control: { type: 'ephemeral' } } : block
74
+ );
75
+ }
76
+ result[idx] = msg;
77
+ return result;
78
+ }
79
+
63
80
  function createAnthropicClient(anthropicConfig, { useOAuth = true } = {}) {
64
81
  if (useOAuth && anthropicConfig.oauth?.accessToken) {
65
82
  return new Anthropic({
@@ -210,16 +227,18 @@ function createClaude(anthropicConfig, { personality, memory, userDir = OBOL_DIR
210
227
  try {
211
228
  const memoryDecision = await client.messages.create({
212
229
  model: 'claude-haiku-4-5',
213
- max_tokens: 100,
214
- system: `You are a router. Analyze this user message and decide two things:
230
+ max_tokens: 200,
231
+ system: `You are a router. Analyze this user message and decide:
215
232
 
216
233
  1. Does it need memory context? (past conversations, facts, preferences, people, events)
217
234
  2. What model complexity does it need?
218
235
 
219
236
  Reply with ONLY a JSON object:
220
- {"need_memory": true/false, "search_query": "optimized search query", "model": "haiku|sonnet|opus"}
237
+ {"need_memory": true/false, "search_queries": ["query1", "query2"], "model": "haiku|sonnet|opus"}
221
238
 
222
- Memory: casual messages (greetings, jokes, simple questions) false. References to past, people, projects, preferences true with optimized search query.
239
+ search_queries: 1-3 optimized search queries covering different topics in the message. One query per distinct topic/entity. Single-topic messages need just one query.
240
+
241
+ Memory: casual messages (greetings, jokes, simple questions) → false. References to past, people, projects, preferences → true.
223
242
 
224
243
  Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (thanks/ok/bye), casual chitchat, simple factual questions with short answers, quick yes/no questions, and short single-turn exchanges that don't need deep reasoning. Use "sonnet" for: code generation, data analysis, content creation, explanations, creative writing, agentic tool use, general questions, opinions, advice, and most conversational exchanges with substance. Use "opus" for: professional software engineering tasks, advanced multi-step agent work, complex reasoning, scientific or mathematical problems, tasks requiring nuanced understanding, advanced coding challenges, in-depth research, and architecture or design decisions.`,
225
244
  messages: [{ role: 'user', content: userMessage }],
@@ -228,11 +247,15 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
228
247
  const decisionText = memoryDecision.content[0]?.text || '';
229
248
  let decision = {};
230
249
  try {
231
- const jsonStr = decisionText.match(/\{[^{}]*\}/)?.[0];
250
+ const jsonStr = decisionText.match(/\{[\s\S]*\}/)?.[0];
232
251
  if (jsonStr) decision = JSON.parse(jsonStr);
233
252
  } catch {}
234
253
 
235
- vlog(`[router] model=${decision.model || 'sonnet'} memory=${decision.need_memory || false}${decision.search_query ? ` query="${decision.search_query}"` : ''}`);
254
+ const queries = Array.isArray(decision.search_queries) && decision.search_queries.length > 0
255
+ ? decision.search_queries.slice(0, 3)
256
+ : decision.search_query ? [decision.search_query] : [];
257
+
258
+ vlog(`[router] model=${decision.model || 'sonnet'} memory=${decision.need_memory || false}${queries.length ? ` queries=${JSON.stringify(queries)}` : ''}`);
236
259
 
237
260
  context._onRouteDecision?.({
238
261
  model: decision.model || 'sonnet',
@@ -247,27 +270,37 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
247
270
  }
248
271
 
249
272
  if (decision.need_memory) {
250
- const query = decision.search_query || userMessage;
273
+ const memoryBudget = decision.model === 'opus' ? 12 : decision.model === 'haiku' ? 4 : 8;
274
+ const searchQueries = queries.length > 0 ? queries : [userMessage];
251
275
 
252
- const todayMemories = await memory.byDate('today', { limit: 3 });
253
- const semanticMemories = await memory.search(query, { limit: 3, threshold: 0.5 });
276
+ const recentMemories = await memory.byDate('2d', { limit: Math.ceil(memoryBudget / 3) });
277
+
278
+ const semanticResults = await Promise.all(
279
+ searchQueries.map(q => memory.search(q, { limit: Math.ceil(memoryBudget / searchQueries.length), threshold: 0.4 }))
280
+ );
281
+ const semanticMemories = semanticResults.flat();
254
282
 
255
283
  const seen = new Set();
256
284
  const combined = [];
257
- for (const m of [...todayMemories, ...semanticMemories]) {
285
+ for (const m of [...recentMemories, ...semanticMemories]) {
258
286
  if (!seen.has(m.id)) {
259
287
  seen.add(m.id);
288
+ const recencyBonus = m.created_at ? Math.max(0, 1 - (Date.now() - new Date(m.created_at).getTime()) / (7 * 86400000)) * 0.15 : 0;
289
+ m._score = (m.similarity || 0.5) * 0.6 + (m.importance || 0.5) * 0.25 + recencyBonus;
260
290
  combined.push(m);
261
291
  }
262
292
  }
263
293
 
264
- vlog(`[memory] ${combined.length} memories found (${todayMemories.length} today, ${semanticMemories.length} semantic)`);
294
+ combined.sort((a, b) => b._score - a._score);
295
+ const topMemories = combined.slice(0, memoryBudget);
296
+
297
+ vlog(`[memory] ${topMemories.length}/${combined.length} memories (${recentMemories.length} recent, ${semanticMemories.length} semantic, budget=${memoryBudget})`);
265
298
 
266
- context._onRouteUpdate?.({ memoryCount: combined.length });
299
+ context._onRouteUpdate?.({ memoryCount: topMemories.length });
267
300
 
268
- if (combined.length > 0) {
301
+ if (topMemories.length > 0) {
269
302
  memoryContext = '\n\n[Relevant memories]\n' +
270
- combined.map(m => `- [${m.category}] ${m.content}`).join('\n');
303
+ topMemories.map(m => `- [${m.category}] ${m.content}`).join('\n');
271
304
  }
272
305
  }
273
306
  } catch (e) {
@@ -289,7 +322,10 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
289
322
 
290
323
  const model = context._model || 'claude-sonnet-4-6';
291
324
  vlog(`[model] ${model} | history=${history.length} msgs`);
292
- const systemPrompt = baseSystemPrompt + `\nCurrent time: ${new Date().toISOString()}`;
325
+ const systemPrompt = [
326
+ { type: 'text', text: baseSystemPrompt, cache_control: { type: 'ephemeral' } },
327
+ { type: 'text', text: `\nCurrent time: ${new Date().toISOString()}` },
328
+ ];
293
329
  context._reloadPersonality = reloadPersonality;
294
330
  const runnableTools = buildRunnableTools(tools, memory, context, vlog);
295
331
 
@@ -297,19 +333,24 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
297
333
  model,
298
334
  max_tokens: 4096,
299
335
  system: systemPrompt,
300
- messages: [...history],
336
+ messages: withCacheBreakpoints([...history]),
301
337
  tools: runnableTools.length > 0 ? runnableTools : undefined,
302
338
  max_iterations: MAX_TOOL_ITERATIONS,
303
339
  }, { signal: abortController.signal });
304
340
 
305
341
  let finalMessage;
306
- let totalUsage = { input_tokens: 0, output_tokens: 0 };
342
+ let totalUsage = { input_tokens: 0, output_tokens: 0, cache_creation_input_tokens: 0, cache_read_input_tokens: 0 };
307
343
  for await (const message of runner) {
308
344
  finalMessage = message;
309
345
  if (message.usage) {
310
346
  totalUsage.input_tokens += message.usage.input_tokens || 0;
311
347
  totalUsage.output_tokens += message.usage.output_tokens || 0;
312
- vlog(`[tokens] in=${message.usage.input_tokens} out=${message.usage.output_tokens}`);
348
+ totalUsage.cache_creation_input_tokens += message.usage.cache_creation_input_tokens || 0;
349
+ totalUsage.cache_read_input_tokens += message.usage.cache_read_input_tokens || 0;
350
+ const cacheInfo = (message.usage.cache_read_input_tokens || message.usage.cache_creation_input_tokens)
351
+ ? ` cache_read=${message.usage.cache_read_input_tokens || 0} cache_create=${message.usage.cache_creation_input_tokens || 0}`
352
+ : '';
353
+ vlog(`[tokens] in=${message.usage.input_tokens} out=${message.usage.output_tokens}${cacheInfo}`);
313
354
  }
314
355
  }
315
356
 
@@ -326,7 +367,7 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
326
367
  { type: 'text', text: 'You have used too many tool calls. Please provide a final response now based on what you have so far.' },
327
368
  ]);
328
369
  const bailoutResponse = await client.messages.create({
329
- model, max_tokens: 4096, system: systemPrompt, messages: [...histories.get(chatId)],
370
+ model, max_tokens: 4096, system: systemPrompt, messages: withCacheBreakpoints([...histories.get(chatId)]),
330
371
  }, { signal: abortController.signal });
331
372
  histories.pushAssistant(chatId, bailoutResponse.content);
332
373
  if (bailoutResponse.usage) {
@@ -343,7 +384,7 @@ Model: Default to "sonnet". Use "haiku" for: greetings, brief acknowledgments (t
343
384
  vlog('[claude] No text in final response after tool use — forcing summary');
344
385
  histories.pushUser(chatId, 'Provide a concise response to the user based on the tool results above.');
345
386
  const summaryResponse = await client.messages.create({
346
- model, max_tokens: 4096, system: systemPrompt, messages: [...histories.get(chatId)],
387
+ model, max_tokens: 4096, system: systemPrompt, messages: withCacheBreakpoints([...histories.get(chatId)]),
347
388
  }, { signal: abortController.signal });
348
389
  histories.pushAssistant(chatId, summaryResponse.content);
349
390
  if (summaryResponse.usage) {
@@ -577,11 +618,13 @@ Examples:
577
618
  Returns the tapped button label, or \`"timeout"\` if the user doesn't respond within the timeout (default 60s).
578
619
 
579
620
  ### Scheduling (\`schedule_event\`, \`list_events\`, \`cancel_event\`)
580
- Schedule reminders and events. The user gets a Telegram message when the time comes.
581
- - \`schedule_event\` — schedule a reminder with title, due_at (ISO 8601), timezone (IANA), optional description
582
- - \`list_events\` — list pending/sent/cancelled events
621
+ Schedule one-time or recurring reminders. The user gets a Telegram message each time an event fires.
622
+ - \`schedule_event\` — schedule a reminder with title, due_at (ISO 8601), timezone (IANA), optional description. For recurring events add \`cron_expr\` (5-field cron), optional \`max_runs\` and \`ends_at\`.
623
+ - \`list_events\` — list pending/sent/cancelled/completed events
583
624
  - \`cancel_event\` — cancel a scheduled event by ID
584
625
 
626
+ Cron examples: \`0 9 * * 1-5\` (weekdays 9am), \`0 8 * * 1\` (Mondays 8am), \`*/30 * * * *\` (every 30 min), \`0 0 1 * *\` (1st of month).
627
+
585
628
  When scheduling: always search memory first for the user's timezone/location. If no timezone found, ask the user or default to UTC. Parse natural language dates relative to the user's timezone.
586
629
 
587
630
  ### Text to Speech (\`text_to_speech\`, \`tts_voices\`)
@@ -890,14 +933,17 @@ function buildTools(memory, opts = {}) {
890
933
 
891
934
  tools.push({
892
935
  name: 'schedule_event',
893
- description: 'Schedule a reminder or event. The user will receive a Telegram message when the time comes. Always search memory first for the user\'s timezone/location. If no timezone found, ask the user or default to UTC.',
936
+ description: 'Schedule a one-time or recurring reminder/event. For recurring events, provide a cron_expr (standard 5-field cron: minute hour day-of-month month day-of-week). The user will receive a Telegram message each time it fires. Always search memory first for the user\'s timezone/location.',
894
937
  input_schema: {
895
938
  type: 'object',
896
939
  properties: {
897
940
  title: { type: 'string', description: 'Short title for the reminder/event' },
898
- due_at: { type: 'string', description: 'ISO 8601 datetime string for when the event is due (e.g. 2026-02-25T15:00:00)' },
941
+ due_at: { type: 'string', description: 'ISO 8601 datetime for the first fire time (e.g. 2026-02-25T15:00:00)' },
899
942
  timezone: { type: 'string', description: 'IANA timezone (e.g. Europe/Brussels, America/New_York). Default: UTC' },
900
- description: { type: 'string', description: 'Context or details about the event. Always include relevant info from the conversation (e.g. what to do, who it involves, where).' },
943
+ description: { type: 'string', description: 'Context or details about the event' },
944
+ cron_expr: { type: 'string', description: 'Cron expression for recurring events (5-field: "0 9 * * 1-5" = weekdays 9am). Omit for one-time events.' },
945
+ max_runs: { type: 'number', description: 'Maximum number of times to fire (omit for unlimited)' },
946
+ ends_at: { type: 'string', description: 'ISO 8601 datetime after which the recurring event stops' },
901
947
  },
902
948
  required: ['title', 'due_at'],
903
949
  },
@@ -909,7 +955,7 @@ function buildTools(memory, opts = {}) {
909
955
  input_schema: {
910
956
  type: 'object',
911
957
  properties: {
912
- status: { type: 'string', enum: ['pending', 'sent', 'cancelled'], description: 'Filter by status (default: pending)' },
958
+ status: { type: 'string', enum: ['pending', 'sent', 'cancelled', 'completed'], description: 'Filter by status (default: pending)' },
913
959
  },
914
960
  },
915
961
  });
@@ -986,7 +1032,7 @@ function buildRunnableTools(tools, memory, context, vlog) {
986
1032
  tool.name === 'memory_add' ? `[${input.category || 'fact'}]` :
987
1033
  tool.name === 'web_fetch' ? input.url :
988
1034
  tool.name === 'background_task' ? input.task?.substring(0, 60) :
989
- tool.name === 'schedule_event' ? `${input.title} @ ${input.due_at}` :
1035
+ tool.name === 'schedule_event' ? `${input.title} @ ${input.due_at}${input.cron_expr ? ` [${input.cron_expr}]` : ''}` :
990
1036
  tool.name === 'cancel_event' ? input.event_id :
991
1037
  tool.name === 'create_pdf' ? (input.filename || 'document') :
992
1038
  tool.name === 'text_to_speech' ? input.text?.substring(0, 60) :
@@ -1281,9 +1327,33 @@ async function executeToolCall(toolUse, memory, context = {}) {
1281
1327
  const tz = input.timezone || 'UTC';
1282
1328
  const localDate = new Date(input.due_at);
1283
1329
  if (isNaN(localDate.getTime())) return `Invalid date: ${input.due_at}`;
1330
+
1331
+ if (input.cron_expr) {
1332
+ try {
1333
+ const { parseExpression } = require('cron-parser');
1334
+ parseExpression(input.cron_expr, { tz });
1335
+ } catch (e) {
1336
+ return `Invalid cron expression "${input.cron_expr}": ${e.message}`;
1337
+ }
1338
+ }
1339
+
1284
1340
  const utcDate = toUTC(input.due_at, tz);
1285
- const event = await context.scheduler.add(context.chatId, input.title, utcDate, tz, input.description || null);
1341
+ const endsAtUtc = input.ends_at ? toUTC(input.ends_at, tz) : null;
1342
+ const event = await context.scheduler.add(
1343
+ context.chatId, input.title, utcDate, tz,
1344
+ input.description || null, input.cron_expr || null,
1345
+ input.max_runs || null, endsAtUtc
1346
+ );
1286
1347
  const displayTime = new Date(utcDate).toLocaleString('en-US', { timeZone: tz });
1348
+
1349
+ if (input.cron_expr) {
1350
+ let result = `Recurring event scheduled: "${input.title}"\nFirst run: ${displayTime} (${tz})\nSchedule: ${input.cron_expr}`;
1351
+ if (input.max_runs) result += `\nMax runs: ${input.max_runs}`;
1352
+ if (input.ends_at) result += `\nEnds: ${new Date(endsAtUtc).toLocaleString('en-US', { timeZone: tz })}`;
1353
+ result += `\nID: ${event.id}`;
1354
+ return result;
1355
+ }
1356
+
1287
1357
  return `Scheduled: "${input.title}" for ${displayTime} (${tz}) — ID: ${event.id}`;
1288
1358
  }
1289
1359
 
@@ -1291,15 +1361,25 @@ async function executeToolCall(toolUse, memory, context = {}) {
1291
1361
  if (!context.scheduler) return 'Scheduler not available (Supabase not configured).';
1292
1362
  const events = await context.scheduler.list({ status: input.status });
1293
1363
  if (events.length === 0) return `No ${input.status || 'pending'} events.`;
1294
- return JSON.stringify(events.map(e => ({
1295
- id: e.id,
1296
- title: e.title,
1297
- description: e.description,
1298
- due_at: e.due_at,
1299
- timezone: e.timezone,
1300
- due_local: new Date(e.due_at).toLocaleString('en-US', { timeZone: e.timezone }),
1301
- status: e.status,
1302
- })));
1364
+ return JSON.stringify(events.map(e => {
1365
+ const entry = {
1366
+ id: e.id,
1367
+ title: e.title,
1368
+ description: e.description,
1369
+ due_at: e.due_at,
1370
+ timezone: e.timezone,
1371
+ due_local: new Date(e.due_at).toLocaleString('en-US', { timeZone: e.timezone }),
1372
+ status: e.status,
1373
+ recurring: !!e.cron_expr,
1374
+ };
1375
+ if (e.cron_expr) {
1376
+ entry.cron_expr = e.cron_expr;
1377
+ entry.run_count = e.run_count;
1378
+ entry.max_runs = e.max_runs;
1379
+ entry.ends_at = e.ends_at;
1380
+ }
1381
+ return entry;
1382
+ }));
1303
1383
  }
1304
1384
 
1305
1385
  case 'cancel_event': {
@@ -0,0 +1,83 @@
1
+ const { execSync } = require('child_process');
2
+ const fs = require('fs');
3
+ const path = require('path');
4
+
5
+ const VERSION_RE = /^v?(\d+\.\d+\.\d+)$/;
6
+
7
+ /** @returns {{ version: string, hash: string }[]} */
8
+ function getVersionCommits() {
9
+ const log = execSync('git log --oneline --all', { encoding: 'utf-8' });
10
+ const versions = [];
11
+ for (const line of log.split('\n')) {
12
+ const [hash, ...rest] = line.trim().split(' ');
13
+ if (!hash) continue;
14
+ const msg = rest.join(' ');
15
+ const match = msg.match(VERSION_RE);
16
+ if (match) versions.push({ version: match[1], hash });
17
+ }
18
+ return versions;
19
+ }
20
+
21
+ /** @returns {{ version: string, commits: string[] }[]} */
22
+ function buildChangelog() {
23
+ const versions = getVersionCommits();
24
+ const sections = [];
25
+
26
+ for (let i = 0; i < versions.length; i++) {
27
+ const current = versions[i];
28
+ const prev = versions[i + 1];
29
+ const range = prev ? `${prev.hash}..${current.hash}` : current.hash;
30
+
31
+ const log = execSync(`git log --oneline ${range}`, { encoding: 'utf-8' });
32
+ const commits = log
33
+ .split('\n')
34
+ .map(l => l.replace(/^[a-f0-9]+\s+/, '').trim())
35
+ .filter(msg => msg && !VERSION_RE.test(msg));
36
+
37
+ if (commits.length > 0) {
38
+ sections.push({ version: current.version, commits });
39
+ }
40
+ }
41
+
42
+ return sections;
43
+ }
44
+
45
+ function generate() {
46
+ const sections = buildChangelog();
47
+ const md = sections
48
+ .map(s => `## ${s.version}\n${s.commits.map(c => `- ${c}`).join('\n')}`)
49
+ .join('\n\n');
50
+
51
+ const out = path.join(__dirname, '..', '..', 'CHANGELOG.md');
52
+ fs.writeFileSync(out, md + '\n');
53
+ console.log(`CHANGELOG.md written (${sections.length} versions)`);
54
+ }
55
+
56
+ /** @param {string} changelog @returns {string|null} */
57
+ function extractLatestSection(changelog) {
58
+ const match = changelog.match(/^## .+\n([\s\S]*?)(?=\n## |\s*$)/);
59
+ if (!match) return null;
60
+ return match[1].trim();
61
+ }
62
+
63
+ /** @returns {string|null} */
64
+ function readInstalledChangelog() {
65
+ try {
66
+ const pkgPath = require.resolve('obol-ai/package.json');
67
+ const changelogPath = path.join(path.dirname(pkgPath), 'CHANGELOG.md');
68
+ return fs.readFileSync(changelogPath, 'utf-8');
69
+ } catch {
70
+ return null;
71
+ }
72
+ }
73
+
74
+ /** @returns {string|null} */
75
+ function getLatestChanges() {
76
+ const changelog = readInstalledChangelog();
77
+ if (!changelog) return null;
78
+ return extractLatestSection(changelog);
79
+ }
80
+
81
+ if (require.main === module) generate();
82
+
83
+ module.exports = { generate, getLatestChanges, extractLatestSection };
@@ -66,6 +66,10 @@ async function upgrade() {
66
66
  }
67
67
 
68
68
  console.log(`\n🪙 Upgraded to ${latest}`);
69
+
70
+ const { getLatestChanges } = require('./changelog');
71
+ const changes = getLatestChanges();
72
+ if (changes) console.log(`\n${changes}`);
69
73
  }
70
74
 
71
75
  module.exports = { upgrade };
package/src/db/migrate.js CHANGED
@@ -178,6 +178,20 @@ async function migrate(supabaseConfig) {
178
178
  CREATE POLICY "service_role_all" ON obol_tool_prefs FOR ALL TO service_role USING (true) WITH CHECK (true);
179
179
  EXCEPTION WHEN duplicate_object THEN NULL;
180
180
  END $$;`,
181
+
182
+ // Cron/recurring event columns
183
+ `ALTER TABLE obol_events ADD COLUMN IF NOT EXISTS cron_expr TEXT;`,
184
+ `ALTER TABLE obol_events ADD COLUMN IF NOT EXISTS last_run_at TIMESTAMPTZ;`,
185
+ `ALTER TABLE obol_events ADD COLUMN IF NOT EXISTS run_count INT NOT NULL DEFAULT 0;`,
186
+ `ALTER TABLE obol_events ADD COLUMN IF NOT EXISTS max_runs INT;`,
187
+ `ALTER TABLE obol_events ADD COLUMN IF NOT EXISTS ends_at TIMESTAMPTZ;`,
188
+
189
+ `DO $$ BEGIN
190
+ ALTER TABLE obol_events DROP CONSTRAINT IF EXISTS obol_events_status_check;
191
+ ALTER TABLE obol_events ADD CONSTRAINT obol_events_status_check
192
+ CHECK (status IN ('pending','sent','cancelled','completed'));
193
+ EXCEPTION WHEN undefined_object THEN NULL;
194
+ END $$;`,
181
195
  ];
182
196
 
183
197
  // Save SQL file for manual fallback
package/src/evolve.js CHANGED
@@ -1,15 +1,8 @@
1
1
  /**
2
2
  * Soul Evolution — periodic deep reflection + codebase maintenance.
3
3
  *
4
- * Every N exchanges (default 100), Sonnet:
5
- * 1. Rewrites SOUL.md who the bot has become
6
- * 2. Rewrites USER.md — everything known about the owner
7
- * 3. Rewrites AGENTS.md — operational knowledge, workflows, lessons learned
8
- * 4. Audits scripts/ — refactors for consistency, removes dead code
9
- * 5. Writes tests/ — test suite for every script
10
- * 6. Runs tests BEFORE refactor (baseline) and AFTER (verification)
11
- * 7. Rolls back scripts if tests regress
12
- * 8. Audits commands/ — ensures clean, deterministic command definitions
4
+ * Triggers after 24h + min 10 exchanges (first evolution: 25 exchanges).
5
+ * Pre-evolution growth analysis compares previous SOUL + new memories.
13
6
  */
14
7
 
15
8
  const fs = require('fs');
@@ -19,7 +12,7 @@ const { OBOL_DIR } = require('./config');
19
12
  const { loadTraits, saveTraits } = require('./personality');
20
13
  const { isValidNpmPackage, isPathInsideDir } = require('./sanitize');
21
14
 
22
- const DEFAULT_EXCHANGES_PER_EVOLUTION = 100;
15
+ const MIN_EXCHANGES_FOR_EVOLUTION = 10;
23
16
 
24
17
  const MODELS = {
25
18
  personality: 'claude-sonnet-4-6',
@@ -36,7 +29,7 @@ function loadEvolutionState(userDir) {
36
29
  try {
37
30
  return JSON.parse(fs.readFileSync(evolutionStatePath(userDir), 'utf-8'));
38
31
  } catch {
39
- return { exchangesSinceLastEvolution: 0, evolutionCount: 0, lastEvolution: null };
32
+ return { evolutionCount: 0, lastEvolution: null };
40
33
  }
41
34
  }
42
35
 
@@ -44,39 +37,27 @@ function saveEvolutionState(state, userDir) {
44
37
  fs.writeFileSync(evolutionStatePath(userDir), JSON.stringify(state, null, 2));
45
38
  }
46
39
 
47
- const FIRST_EVOLUTION_THRESHOLD = 25;
48
-
49
- function getEvolutionThreshold(state) {
40
+ async function checkEvolution(userDir, messageLog) {
41
+ const state = loadEvolutionState(userDir);
50
42
  const { loadConfig } = require('./config');
51
43
  const config = loadConfig();
52
- const defaultThreshold = config?.evolution?.exchanges || DEFAULT_EXCHANGES_PER_EVOLUTION;
53
- return state.evolutionCount === 0 ? FIRST_EVOLUTION_THRESHOLD : defaultThreshold;
54
- }
55
44
 
56
- async function shouldEvolve(userDir) {
57
- const state = loadEvolutionState(userDir);
58
- const threshold = getEvolutionThreshold(state);
59
- return state.exchangesSinceLastEvolution >= threshold;
60
- }
45
+ const intervalMs = (config?.evolution?.intervalHours ?? 24) * 60 * 60 * 1000;
46
+ const minExchanges = config?.evolution?.minExchanges ?? MIN_EXCHANGES_FOR_EVOLUTION;
47
+ const elapsed = state.lastEvolution ? Date.now() - new Date(state.lastEvolution).getTime() : Infinity;
61
48
 
62
- const _evolutionLocks = new Map();
49
+ if (elapsed < intervalMs) return { ready: false };
50
+ if (!messageLog?.url) return { ready: false };
63
51
 
64
- function withEvolutionLock(userDir, fn) {
65
- const key = userDir || '__global__';
66
- const prev = _evolutionLocks.get(key) || Promise.resolve();
67
- const next = prev.then(fn, fn);
68
- _evolutionLocks.set(key, next);
69
- return next;
70
- }
52
+ const sinceFilter = state.lastEvolution ? `&created_at=gt.${state.lastEvolution}` : '';
53
+ const userFilter = messageLog.userId ? `&user_id=eq.${messageLog.userId}` : '';
54
+ const res = await fetch(
55
+ `${messageLog.url}/rest/v1/obol_messages?select=id&role=eq.assistant&limit=${minExchanges}${sinceFilter}${userFilter}`,
56
+ { headers: messageLog.headers }
57
+ );
58
+ const rows = await res.json();
71
59
 
72
- async function tickExchange(userDir) {
73
- return withEvolutionLock(userDir, () => {
74
- const state = loadEvolutionState(userDir);
75
- state.exchangesSinceLastEvolution++;
76
- saveEvolutionState(state, userDir);
77
- const threshold = getEvolutionThreshold(state);
78
- return { count: state.exchangesSinceLastEvolution, ready: state.exchangesSinceLastEvolution >= threshold };
79
- });
60
+ return { ready: Array.isArray(rows) && rows.length >= minExchanges };
80
61
  }
81
62
 
82
63
  /**
@@ -298,16 +279,49 @@ async function evolve(claudeClient, messageLog, memory, userDir) {
298
279
  }
299
280
  }
300
281
 
282
+ let recentMemories = [];
283
+ if (memory) {
284
+ try {
285
+ const headers = messageLog?.headers || {};
286
+ const url = messageLog?.url;
287
+ if (url) {
288
+ const memUserFilter = messageLog?.userId ? `&user_id=eq.${messageLog.userId}` : '';
289
+ const sinceFilter = state.lastEvolution ? `&created_at=gt.${state.lastEvolution}` : '';
290
+ const res = await fetch(
291
+ `${url}/rest/v1/obol_memory?select=content,category,importance,tags,created_at,source&order=created_at.asc&limit=100${memUserFilter}${sinceFilter}`,
292
+ { headers }
293
+ );
294
+ recentMemories = await res.json();
295
+ }
296
+ } catch (e) {
297
+ console.error('[evolve] Failed to fetch recent memories:', e.message);
298
+ }
299
+ }
300
+
301
+ let previousSoul = '';
302
+ const archiveDir = path.join(personalityDir, 'evolution');
303
+ try {
304
+ if (fs.existsSync(archiveDir)) {
305
+ const archives = fs.readdirSync(archiveDir)
306
+ .filter(f => f.startsWith('SOUL-v') && f.endsWith('.md'))
307
+ .sort();
308
+ if (archives.length > 0) {
309
+ previousSoul = fs.readFileSync(path.join(archiveDir, archives[archives.length - 1]), 'utf-8');
310
+ }
311
+ }
312
+ } catch {}
313
+
301
314
  const transcript = recentMessages.map(m =>
302
- `${m.role === 'user' ? 'Human' : 'Bot'}: ${m.content.substring(0, 300)}`
315
+ `${m.role === 'user' ? 'Human' : 'Bot'}: ${m.content.substring(0, 600)}`
303
316
  ).join('\n');
304
317
 
305
- const memoryGroups = {};
306
318
  const categoryLabels = {
307
319
  person: 'People', decision: 'Decisions', preference: 'Preferences',
308
320
  lesson: 'Lessons', project: 'Projects', fact: 'Facts',
309
321
  event: 'Events', pattern: 'Patterns', context: 'Context',
310
322
  };
323
+
324
+ const memoryGroups = {};
311
325
  for (const m of coreMemories) {
312
326
  const group = categoryLabels[m.category] || 'Other';
313
327
  if (!memoryGroups[group]) memoryGroups[group] = [];
@@ -317,6 +331,18 @@ async function evolve(claudeClient, messageLog, memory, userDir) {
317
331
  .map(([group, items]) => `### ${group}\n${items.map(i => `- ${i}`).join('\n')}`)
318
332
  .join('\n\n');
319
333
 
334
+ const recentMemoryGroups = {};
335
+ for (const m of recentMemories) {
336
+ const group = categoryLabels[m.category] || 'Other';
337
+ if (!recentMemoryGroups[group]) recentMemoryGroups[group] = [];
338
+ const date = m.created_at ? new Date(m.created_at).toISOString().slice(0, 10) : '?';
339
+ const sourceTag = m.source ? ` [${m.source}]` : '';
340
+ recentMemoryGroups[group].push(`${m.content} _(${date}${sourceTag})_`);
341
+ }
342
+ const recentMemorySummary = Object.entries(recentMemoryGroups)
343
+ .map(([group, items]) => `### ${group}\n${items.map(i => `- ${i}`).join('\n')}`)
344
+ .join('\n\n');
345
+
320
346
  const scriptsManifest = Object.entries(currentScripts)
321
347
  .map(([name, content]) => `### ${name}\n\`\`\`\n${content.substring(0, 500)}\n\`\`\``)
322
348
  .join('\n\n') || '(no scripts)';
@@ -341,22 +367,72 @@ async function evolve(claudeClient, messageLog, memory, userDir) {
341
367
  );
342
368
  }
343
369
 
370
+ // ── Step 0c: Pre-evolution growth analysis ──
371
+ const isFirstEvolution = !currentSoul;
372
+ let growthReport = '';
373
+ if (!isFirstEvolution && (recentMemories.length > 0 || recentMessages.length > 0)) {
374
+ try {
375
+ const growthResponse = await claudeClient.messages.create({
376
+ model: MODELS.personality,
377
+ max_tokens: 2048,
378
+ system: `You are analyzing an AI personality's growth between evolutions. Compare who the AI was (previous SOUL) against who it is now (current SOUL), incorporating new memories and conversations since the last evolution.
379
+
380
+ Produce a structured growth report covering:
381
+
382
+ 1. NEW LEARNINGS — What new facts, skills, or knowledge emerged
383
+ 2. RELATIONSHIP SHIFTS — How the dynamic with the owner changed (closer, more trust, new friction, etc.)
384
+ 3. BEHAVIORAL PATTERNS — Recurring interaction styles or habits observed
385
+ 4. GROWTH EDGES — Areas where the personality is being pushed or pulled in new directions
386
+ 5. TRAIT PRESSURE — Which traits should shift and why (cite specific evidence from conversations/memories)
387
+ 6. IDENTITY CONTINUITY — What core aspects stayed the same and should be preserved
388
+
389
+ Be specific. Cite evidence from the conversations and memories. This report guides the evolution rewrite.`,
390
+ messages: [{
391
+ role: 'user',
392
+ content: `## Previous SOUL (before current evolution)
393
+ ${previousSoul || '(not available)'}
394
+
395
+ ## Current SOUL
396
+ ${currentSoul || '(empty)'}
397
+
398
+ ## Current Traits
399
+ ${JSON.stringify(currentTraits)}
400
+
401
+ ## New Memories Since Last Evolution (${recentMemories.length})
402
+ ${recentMemorySummary || '(none)'}
403
+
404
+ ## Recent Conversations (${recentMessages.length} messages)
405
+ ${transcript.substring(0, 30000)}`,
406
+ }],
407
+ });
408
+ growthReport = growthResponse.content.filter(b => b.type === 'text').map(b => b.text).join('\n');
409
+ } catch (e) {
410
+ console.error('[evolve] Growth analysis failed:', e.message);
411
+ }
412
+ }
413
+
344
414
  // ── Step 1: Run existing tests as baseline ──
345
415
  const baselineResults = runTests(testsDir);
346
416
 
347
- const isFirstEvolution = !currentSoul;
348
417
  const firstEvolutionPreamble = isFirstEvolution ? `
349
418
  ## FIRST EVOLUTION — IMPORTANT
350
419
 
351
420
  This is your FIRST evolution. You have no existing personality files. Synthesize everything from the conversations and memories below into initial SOUL.md and USER.md. Don't fabricate — only use what you actually learned from real interactions. If you don't know something about the owner, don't make it up. It's okay for these files to be short and honest about what you know so far.
352
421
 
422
+ ` : '';
423
+
424
+ const growthPreamble = growthReport ? `
425
+ ## GROWTH ANALYSIS
426
+
427
+ A pre-evolution analysis has been conducted comparing your previous state against new memories and conversations. Use this growth report as your PRIMARY GUIDE for what to emphasize, change, or preserve in the rewrite. The growth report reflects evidence-based observations — trust it over your own general impressions.
428
+
353
429
  ` : '';
354
430
 
355
431
  const response = await claudeClient.messages.create({
356
432
  model: MODELS.personality,
357
433
  max_tokens: 16384,
358
434
  system: `You are an AI undergoing evolution #${evolutionNumber}. ${state.lastEvolution ? `Last evolution: ${state.lastEvolution}.` : 'This is your first evolution.'}
359
- ${firstEvolutionPreamble}
435
+ ${firstEvolutionPreamble}${growthPreamble}
360
436
 
361
437
  You will rewrite your entire operating system: personality files, operational knowledge, scripts, tests, and commands.
362
438
 
@@ -533,10 +609,10 @@ The OBOL directory has a FIXED structure: personality/, scripts/, tests/, comman
533
609
  Include ALL scripts/tests/commands that should exist. Missing files get deleted. Empty objects \`{}\` are valid (means delete all). \`apps\`, \`dependencies\`, and \`upgrades\` can be empty. Apps with \`"deploy": true\` will be auto-deployed to Vercel and the URL sent to the owner.`,
534
610
  messages: [{
535
611
  role: 'user',
536
- content: `## Current SOUL.md
612
+ content: `${growthReport ? `## Growth Report (Pre-Evolution Analysis)\n${growthReport}\n\n` : ''}## Current SOUL.md
537
613
  ${currentSoul || '(empty — first evolution)'}
538
614
 
539
- ## Current USER.md
615
+ ${previousSoul ? `## Previous SOUL.md (before last evolution)\n${previousSoul}\n\n` : ''}## Current USER.md
540
616
  ${currentUser || '(not set yet)'}
541
617
 
542
618
  ## Current AGENTS.md
@@ -558,12 +634,12 @@ ${commandsManifest}
558
634
  ## Core Memories (highest importance)
559
635
  ${memorySummary || '(no memories yet)'}
560
636
 
561
- ## Recent Conversations (last ${recentMessages.length} messages)
637
+ ${recentMemorySummary ? `## New Memories Since Last Evolution (${recentMemories.length})\n${recentMemorySummary}\n\n` : ''}## Recent Conversations (last ${recentMessages.length} messages)
562
638
  ${transcript || '(no conversations yet)'}
563
639
 
564
640
  ---
565
641
 
566
- Evolve. Rewrite everything that needs rewriting. Write tests for every script. Keep what works. Fix what doesn't.`
642
+ Evolve. Rewrite everything that needs rewriting. Write tests for every script. Keep what works. Fix what doesn't.${growthReport ? ' Use the growth report to guide personality continuity and trait adjustments.' : ''}`
567
643
  }],
568
644
  });
569
645
 
@@ -696,7 +772,6 @@ Fix the scripts. Tests define correct behavior.`
696
772
  }
697
773
 
698
774
  // ── Step 7: Write personality files (always — these don't need test gates) ──
699
- const archiveDir = path.join(personalityDir, 'evolution');
700
775
  fs.mkdirSync(archiveDir, { recursive: true });
701
776
  if (currentSoul) {
702
777
  const timestamp = new Date().toISOString().slice(0, 10);
@@ -811,7 +886,6 @@ Fix the scripts. Tests define correct behavior.`
811
886
  }
812
887
  }
813
888
 
814
- state.exchangesSinceLastEvolution = 0;
815
889
  state.evolutionCount = evolutionNumber;
816
890
  state.lastEvolution = new Date().toISOString();
817
891
  saveEvolutionState(state, userDir);
@@ -824,6 +898,13 @@ Fix the scripts. Tests define correct behavior.`
824
898
  `Soul evolution #${evolutionNumber}: ${changelog}${rollbackNote}`,
825
899
  { category: 'event', importance: 0.8, source: 'evolution' }
826
900
  ).catch(() => {});
901
+
902
+ if (growthReport) {
903
+ await memory.add(
904
+ growthReport.substring(0, 2000),
905
+ { category: 'pattern', importance: 0.7, tags: ['evolution', 'growth-report'], source: `evolution-${evolutionNumber}` }
906
+ ).catch(() => {});
907
+ }
827
908
  }
828
909
 
829
910
  await backupSnapshot(`post-evolution #${evolutionNumber}`, userDir);
@@ -841,4 +922,4 @@ Fix the scripts. Tests define correct behavior.`
841
922
  };
842
923
  }
843
924
 
844
- module.exports = { shouldEvolve, tickExchange, evolve, runTests, loadEvolutionState };
925
+ module.exports = { checkEvolution, evolve, runTests, loadEvolutionState };
package/src/heartbeat.js CHANGED
@@ -23,14 +23,21 @@ function setupHeartbeat(bot, supabaseConfig) {
23
23
  try {
24
24
  const tz = event.timezone || 'UTC';
25
25
  const dueLocal = new Date(event.due_at).toLocaleString('en-US', { timeZone: tz });
26
- let text = `⏰ *Reminder:* ${event.title}`;
26
+ const isRecurring = !!event.cron_expr;
27
+ const prefix = isRecurring ? '🔄 *Recurring Reminder:*' : '⏰ *Reminder:*';
28
+ let text = `${prefix} ${event.title}`;
27
29
  if (event.description) text += `\n${event.description}`;
28
30
  text += `\n_${dueLocal} (${tz})_`;
29
31
 
30
32
  await bot.api.sendMessage(event.chat_id, text, { parse_mode: 'Markdown' }).catch(() =>
31
- bot.api.sendMessage(event.chat_id, `⏰ Reminder: ${event.title}${event.description ? '\n' + event.description : ''}`)
33
+ bot.api.sendMessage(event.chat_id, `${isRecurring ? '🔄 Recurring Reminder' : '⏰ Reminder'}: ${event.title}${event.description ? '\n' + event.description : ''}`)
32
34
  );
33
- await scheduler.markSent(event.id);
35
+
36
+ if (isRecurring) {
37
+ await scheduler.reschedule(event.id, event.cron_expr, tz, event.run_count, event.max_runs, event.ends_at);
38
+ } else {
39
+ await scheduler.markSent(event.id);
40
+ }
34
41
  } catch (e) {
35
42
  console.error(`[scheduler] Failed to send event ${event.id}:`, e.message);
36
43
  }
package/src/messages.js CHANGED
@@ -61,14 +61,14 @@ class MessageLog {
61
61
  this.exchangeCount.set(chatId, count);
62
62
  this._lastActivity.set(chatId, Date.now());
63
63
 
64
- // Consolidate every 5 exchanges
65
- if (count >= 5) {
64
+ // Consolidate every 10 exchanges
65
+ if (count >= 10) {
66
66
  this.exchangeCount.set(chatId, 0);
67
67
  this.consolidate(chatId).catch(e => console.error('[consolidate] Failed:', e.message));
68
68
  }
69
69
 
70
- const { tickExchange } = require('./evolve');
71
- tickExchange(this.userDir).then(result => {
70
+ const { checkEvolution } = require('./evolve');
71
+ checkEvolution(this.userDir, this).then(result => {
72
72
  if (result?.ready && !this._evolutionReady && !this._evolutionPending) this._evolutionReady = true;
73
73
  }).catch(() => {});
74
74
  }
package/src/scheduler.js CHANGED
@@ -1,3 +1,5 @@
1
+ const { parseExpression } = require('cron-parser');
2
+
1
3
  function createScheduler(supabaseConfig, userId = 0) {
2
4
  const { url, serviceKey } = supabaseConfig;
3
5
 
@@ -8,19 +10,23 @@ function createScheduler(supabaseConfig, userId = 0) {
8
10
  'Prefer': 'return=representation',
9
11
  };
10
12
 
11
- async function add(chatId, title, dueAt, timezone = 'UTC', description = null) {
13
+ async function add(chatId, title, dueAt, timezone = 'UTC', description = null, cronExpr = null, maxRuns = null, endsAt = null) {
14
+ const body = {
15
+ user_id: userId,
16
+ chat_id: chatId,
17
+ title,
18
+ description,
19
+ due_at: dueAt,
20
+ timezone,
21
+ status: 'pending',
22
+ };
23
+ if (cronExpr) body.cron_expr = cronExpr;
24
+ if (maxRuns != null) body.max_runs = maxRuns;
25
+ if (endsAt) body.ends_at = endsAt;
12
26
  const res = await fetch(`${url}/rest/v1/obol_events`, {
13
27
  method: 'POST',
14
28
  headers,
15
- body: JSON.stringify({
16
- user_id: userId,
17
- chat_id: chatId,
18
- title,
19
- description,
20
- due_at: dueAt,
21
- timezone,
22
- status: 'pending',
23
- }),
29
+ body: JSON.stringify(body),
24
30
  });
25
31
  const data = await res.json();
26
32
  if (!res.ok) throw new Error(JSON.stringify(data));
@@ -30,7 +36,7 @@ function createScheduler(supabaseConfig, userId = 0) {
30
36
  async function list(opts = {}) {
31
37
  const status = opts.status || 'pending';
32
38
  const limit = opts.limit || 20;
33
- let fetchUrl = `${url}/rest/v1/obol_events?user_id=eq.${userId}&status=eq.${status}&order=due_at.asc&limit=${limit}&select=id,title,description,due_at,timezone,status,created_at`;
39
+ let fetchUrl = `${url}/rest/v1/obol_events?user_id=eq.${userId}&status=eq.${status}&order=due_at.asc&limit=${limit}&select=id,title,description,due_at,timezone,status,created_at,cron_expr,last_run_at,run_count,max_runs,ends_at`;
34
40
  const res = await fetch(fetchUrl, { headers });
35
41
  const data = await res.json();
36
42
  if (!res.ok) throw new Error(JSON.stringify(data));
@@ -50,18 +56,18 @@ function createScheduler(supabaseConfig, userId = 0) {
50
56
 
51
57
  async function getDue() {
52
58
  const now = new Date().toISOString();
53
- const fetchUrl = `${url}/rest/v1/obol_events?status=eq.pending&due_at=lte.${now}&select=id,user_id,chat_id,title,description,due_at,timezone`;
59
+ const fetchUrl = `${url}/rest/v1/obol_events?status=eq.pending&due_at=lte.${now}&select=id,user_id,chat_id,title,description,due_at,timezone,cron_expr,run_count,max_runs,ends_at`;
54
60
  const res = await fetch(fetchUrl, { headers });
55
61
  const data = await res.json();
56
62
  if (!res.ok) throw new Error(JSON.stringify(data));
57
63
  return data;
58
64
  }
59
65
 
60
- async function markSent(eventId) {
66
+ async function patch(eventId, fields) {
61
67
  const res = await fetch(`${url}/rest/v1/obol_events?id=eq.${eventId}`, {
62
68
  method: 'PATCH',
63
69
  headers: { ...headers, 'Prefer': 'return=minimal' },
64
- body: JSON.stringify({ status: 'sent' }),
70
+ body: JSON.stringify(fields),
65
71
  });
66
72
  if (!res.ok) {
67
73
  const err = await res.text();
@@ -69,7 +75,37 @@ function createScheduler(supabaseConfig, userId = 0) {
69
75
  }
70
76
  }
71
77
 
72
- return { add, list, cancel, getDue, markSent };
78
+ async function markSent(eventId) {
79
+ return patch(eventId, { status: 'sent' });
80
+ }
81
+
82
+ async function reschedule(eventId, cronExpr, timezone, runCount, maxRuns, endsAt) {
83
+ const newRunCount = (runCount || 0) + 1;
84
+
85
+ if (maxRuns && newRunCount >= maxRuns) {
86
+ return patch(eventId, { status: 'completed', run_count: newRunCount, last_run_at: new Date().toISOString() });
87
+ }
88
+
89
+ try {
90
+ const nextDate = parseExpression(cronExpr, { currentDate: new Date(), tz: timezone || 'UTC' }).next().toDate();
91
+
92
+ if (endsAt && nextDate > new Date(endsAt)) {
93
+ return patch(eventId, { status: 'completed', run_count: newRunCount, last_run_at: new Date().toISOString() });
94
+ }
95
+
96
+ return patch(eventId, {
97
+ due_at: nextDate.toISOString(),
98
+ run_count: newRunCount,
99
+ last_run_at: new Date().toISOString(),
100
+ status: 'pending',
101
+ });
102
+ } catch (e) {
103
+ console.error(`[scheduler] Failed to compute next cron occurrence for event ${eventId}:`, e.message);
104
+ return patch(eventId, { status: 'completed', run_count: newRunCount, last_run_at: new Date().toISOString() });
105
+ }
106
+ }
107
+
108
+ return { add, list, cancel, getDue, markSent, reschedule };
73
109
  }
74
110
 
75
111
  module.exports = { createScheduler };
package/src/telegram.js CHANGED
@@ -1,3 +1,4 @@
1
+ const fs = require('fs');
1
2
  const path = require('path');
2
3
  const { execSync } = require('child_process');
3
4
  const { Bot, GrammyError, HttpError, InlineKeyboard } = require('grammy');
@@ -235,13 +236,14 @@ function createBot(telegramConfig, config) {
235
236
 
236
237
  const evoState = loadEvolutionState(tenant.userDir);
237
238
  const cfg = loadConfig();
238
- const threshold = cfg?.evolution?.exchanges || 100;
239
- const evoCount = evoState.exchangesSinceLastEvolution || 0;
240
- const evoPct = Math.min(100, Math.round((evoCount / threshold) * 100));
239
+ const intervalHours = cfg?.evolution?.intervalHours ?? 24;
240
+ const elapsed = evoState.lastEvolution ? (Date.now() - new Date(evoState.lastEvolution).getTime()) / 3600000 : Infinity;
241
+ const evoPct = Math.min(100, Math.round((elapsed / intervalHours) * 100));
242
+ const timeLeft = Math.max(0, intervalHours - elapsed);
241
243
  lines.push(
242
244
  ``, `EVOLUTION`,
243
245
  ` ${termBar(evoPct)} ${evoPct}%`,
244
- ` ${evoCount}/${threshold} exchanges ▪ ${evoState.evolutionCount || 0} completed`,
246
+ ` ${timeLeft < 1 ? 'ready' : `${timeLeft.toFixed(1)}h remaining`} ▪ ${evoState.evolutionCount || 0} completed`,
245
247
  );
246
248
 
247
249
  const personalityDir = path.join(tenant.userDir, 'personality');
@@ -427,16 +429,17 @@ Your message is deleted immediately when using /secret set to keep credentials o
427
429
  const tenant = await getTenant(ctx.from.id, config);
428
430
  const state = loadEvolutionState(tenant.userDir);
429
431
  const cfg = loadConfig();
430
- const threshold = cfg?.evolution?.exchanges || 100;
431
- const count = state.exchangesSinceLastEvolution || 0;
432
- const pct = Math.min(100, Math.round((count / threshold) * 100));
432
+ const intervalHours = cfg?.evolution?.intervalHours ?? 24;
433
+ const elapsed = state.lastEvolution ? (Date.now() - new Date(state.lastEvolution).getTime()) / 3600000 : Infinity;
434
+ const pct = Math.min(100, Math.round((elapsed / intervalHours) * 100));
435
+ const timeLeft = Math.max(0, intervalHours - elapsed);
433
436
 
434
437
  const lines = [
435
438
  `◈ OBOL EVOLUTION CYCLE`,
436
439
  TERM_SEP,
437
440
  ``,
438
441
  ` ${termBar(pct)} ${pct}%`,
439
- ` ${count}/${threshold} exchanges`,
442
+ ` ${timeLeft < 1 ? 'ready' : `${timeLeft.toFixed(1)}h remaining`}`,
440
443
  ` ${state.evolutionCount || 0} completed`,
441
444
  ];
442
445
  if (state.lastEvolution) {
@@ -456,7 +459,14 @@ Your message is deleted immediately when using /secret set to keep credentials o
456
459
  const text = events.map((e, i) => {
457
460
  const tz = e.timezone || 'UTC';
458
461
  const dueLocal = new Date(e.due_at).toLocaleString('en-US', { timeZone: tz, dateStyle: 'medium', timeStyle: 'short' });
459
- return `${i + 1}. *${e.title}*\n ${dueLocal} (${tz})\n \`${e.id}\``;
462
+ const icon = e.cron_expr ? '🔄' : '📌';
463
+ let line = `${i + 1}. ${icon} *${e.title}*\n ${dueLocal} (${tz})`;
464
+ if (e.cron_expr) {
465
+ line += `\n \`${e.cron_expr}\` · ${e.run_count || 0} runs`;
466
+ if (e.max_runs) line += `/${e.max_runs}`;
467
+ }
468
+ line += `\n \`${e.id}\``;
469
+ return line;
460
470
  }).join('\n\n');
461
471
  await sendHtml(ctx, `📅 **Upcoming Events**\n\n${text}`);
462
472
  } catch (e) {
@@ -1397,7 +1407,11 @@ async function checkUpgradeNotify(bot) {
1397
1407
  try {
1398
1408
  const { chatId, version } = JSON.parse(fs.readFileSync(notifyPath, 'utf-8'));
1399
1409
  fs.unlinkSync(notifyPath);
1400
- await bot.api.sendMessage(chatId, `🪙 Upgraded to ${version}`);
1410
+ let msg = `🪙 Upgraded to ${version}`;
1411
+ const { getLatestChanges } = require('./cli/changelog');
1412
+ const changes = getLatestChanges();
1413
+ if (changes) msg += `\n\n${changes}`;
1414
+ await bot.api.sendMessage(chatId, msg);
1401
1415
  } catch {}
1402
1416
  }
1403
1417