@onmars/lunar-core 0.4.4 → 0.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@onmars/lunar-core",
3
- "version": "0.4.4",
3
+ "version": "0.4.7",
4
4
  "type": "module",
5
5
  "main": "src/index.ts",
6
6
  "types": "src/index.ts",
@@ -144,8 +144,22 @@ export function evaluateGuard(
144
144
  // Constants
145
145
  // ════════════════════════════════════════════════════════════
146
146
 
147
- /** Maximum transcript characters sent to LLM for summarization (~1000 tokens) */
148
- const MAX_TRANSCRIPT_CHARS = 4000
147
+ /** Maximum transcript characters sent to LLM for summarization (~4000 tokens) */
148
+ const MAX_TRANSCRIPT_CHARS = 16000
149
+
150
+ /**
151
+ * Smart truncation: if the full text fits, return it.
152
+ * Otherwise keep the first 20% and last 80% with a cut marker.
153
+ * This ensures recent context (decisions, conclusions) is always captured.
154
+ */
155
+ function smartTruncate(text: string, maxChars: number): string {
156
+ if (text.length <= maxChars) return text
157
+ const marker = '\n\n[... middle of conversation truncated ...]\n\n'
158
+ const available = maxChars - marker.length
159
+ const headSize = Math.floor(available * 0.2)
160
+ const tailSize = available - headSize
161
+ return text.slice(0, headSize) + marker + text.slice(-tailSize)
162
+ }
149
163
 
150
164
  /**
151
165
  * System prompt for LLM-powered session summarization.
@@ -474,10 +488,10 @@ const executeSummarize: ActionExecutor = async (hook, context, providers) => {
474
488
 
475
489
  if (!summary && context.llm && context.messages.length >= 2) {
476
490
  try {
477
- const transcript = context.messages
478
- .map((m) => `${m.role}: ${m.content}`)
479
- .join('\n')
480
- .slice(0, MAX_TRANSCRIPT_CHARS)
491
+ const transcript = smartTruncate(
492
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
493
+ MAX_TRANSCRIPT_CHARS,
494
+ )
481
495
 
482
496
  log.debug(
483
497
  {
@@ -647,10 +661,10 @@ const executeJournal: ActionExecutor = async (hook, context, providers) => {
647
661
  }
648
662
 
649
663
  try {
650
- const transcript = context.messages
651
- .map((m) => `${m.role}: ${m.content}`)
652
- .join('\n')
653
- .slice(0, MAX_TRANSCRIPT_CHARS)
664
+ const transcript = smartTruncate(
665
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
666
+ MAX_TRANSCRIPT_CHARS,
667
+ )
654
668
 
655
669
  const journal = await context.llm(JOURNAL_PROMPT + transcript)
656
670
 
@@ -723,10 +737,10 @@ const executeExperience: ActionExecutor = async (hook, context, providers) => {
723
737
  }
724
738
 
725
739
  try {
726
- const transcript = context.messages
727
- .map((m) => `${m.role}: ${m.content}`)
728
- .join('\n')
729
- .slice(0, MAX_TRANSCRIPT_CHARS)
740
+ const transcript = smartTruncate(
741
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
742
+ MAX_TRANSCRIPT_CHARS,
743
+ )
730
744
 
731
745
  const experience = await context.llm(EXPERIENCE_PROMPT + transcript)
732
746
 
@@ -146,6 +146,11 @@ export class MemoryOrchestrator implements MemoryProvider {
146
146
  return this.currentSessionDate
147
147
  }
148
148
 
149
+ /** Number of diary context entries loaded at sessionStart */
150
+ get diaryCount(): number {
151
+ return this.diaryResults.length
152
+ }
153
+
149
154
  /**
150
155
  * Destroy all providers.
151
156
  */
package/src/lib/router.ts CHANGED
@@ -71,6 +71,9 @@ export class Router {
71
71
  /** Agent-level thinking defaults: agentId → thinking level */
72
72
  private agentThinkingMap = new Map<string, string>()
73
73
 
74
+ /** Per-session lock to serialize concurrent messages and prevent race conditions */
75
+ private routeLocks = new Map<string, Promise<void>>()
76
+
74
77
  /** Slash command handler */
75
78
  private commandHandler: CommandHandler
76
79
 
@@ -219,10 +222,26 @@ export class Router {
219
222
  // When Deimos and Hermes both handle different channels, their sessions
220
223
  // must not interfere even if they share a channel adapter.
221
224
  const sessionKey = `${agent.id}:${channelId}:${message.channelId}`
225
+
226
+ // Serialize concurrent messages for the same session to prevent race conditions
227
+ // (e.g., double "Nueva sesión" blocks when messages arrive while agent is responding)
228
+ const pending = this.routeLocks.get(sessionKey)
229
+ if (pending) {
230
+ await pending
231
+ }
232
+ let unlockRoute!: () => void
233
+ const routeLock = new Promise<void>((resolve) => {
234
+ unlockRoute = resolve
235
+ })
236
+ this.routeLocks.set(sessionKey, routeLock)
237
+
222
238
  const session = this.sessions.get(sessionKey)
223
239
 
224
240
  // ─── Command interception (before system prompt — zero token cost) ───
225
241
  if (this.commandHandler.isCommand(message.text)) {
242
+ // Show typing indicator while command executes (especially /clear with hooks)
243
+ const typingInterval = this.startTyping(channel, message.channelId)
244
+
226
245
  const ctx: CommandContext = {
227
246
  sessionKey,
228
247
  message,
@@ -244,11 +263,22 @@ export class Router {
244
263
  scheduler: this.scheduler,
245
264
  }
246
265
 
266
+ // For /clear: send immediate ack before hooks run (they can take 15-30s)
267
+ const commandName = message.text.trim().slice(1).split(/\s+/)[0]?.toLowerCase()
268
+ if (commandName === 'clear' || commandName === 'reset') {
269
+ await channel.send(message.channelId, {
270
+ text: '🧠 Guardando memoria de la sesión...',
271
+ })
272
+ }
273
+
247
274
  const response = await this.commandHandler.handle(message.text, ctx)
275
+ clearInterval(typingInterval)
248
276
  if (response) {
249
277
  const outgoing: OutgoingMessage = { text: this.redact(response) }
250
278
  await channel.send(message.channelId, outgoing)
251
279
  log.info({ command: message.text.split(/\s+/)[0] }, 'Command handled')
280
+ unlockRoute()
281
+ this.routeLocks.delete(sessionKey)
252
282
  return
253
283
  }
254
284
  }
@@ -397,6 +427,22 @@ export class Router {
397
427
  'Routing message',
398
428
  )
399
429
 
430
+ // New session greeting: send memory context summary on first message
431
+ if (!session) {
432
+ const orchestrator = this.memory as import('./memory-orchestrator').MemoryOrchestrator | undefined
433
+ const episodeId = orchestrator?.episodeId
434
+ const diaryCount = orchestrator?.diaryCount ?? 0
435
+ const moonName = moon?.name ?? 'Lunar'
436
+ const channelName = channelPersona?.name ?? message.channelId
437
+
438
+ const parts: string[] = [`🌑 **${moonName}** — Nueva sesión · #${channelName}`]
439
+ if (episodeId) parts.push(`📎 Episodio: \`${episodeId.slice(0, 8)}\``)
440
+ if (diaryCount > 0) parts.push(`🧠 ${diaryCount} entries de memoria cargadas`)
441
+ parts.push('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━')
442
+
443
+ await channel.send(message.channelId, { text: parts.join('\n') })
444
+ }
445
+
400
446
  // Start typing
401
447
  const typingInterval = this.startTyping(channel, message.channelId)
402
448
 
@@ -511,6 +557,10 @@ export class Router {
511
557
  text: '⚠️ Something went wrong. Please try again.',
512
558
  replyTo: message.id,
513
559
  })
560
+ } finally {
561
+ // Release the per-session lock so queued messages can proceed
562
+ unlockRoute()
563
+ this.routeLocks.delete(sessionKey)
514
564
  }
515
565
  }
516
566