@onmars/lunar-core 0.4.5 → 0.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@onmars/lunar-core",
3
- "version": "0.4.5",
3
+ "version": "0.4.7",
4
4
  "type": "module",
5
5
  "main": "src/index.ts",
6
6
  "types": "src/index.ts",
@@ -144,8 +144,22 @@ export function evaluateGuard(
144
144
  // Constants
145
145
  // ════════════════════════════════════════════════════════════
146
146
 
147
- /** Maximum transcript characters sent to LLM for summarization (~1000 tokens) */
148
- const MAX_TRANSCRIPT_CHARS = 4000
147
+ /** Maximum transcript characters sent to LLM for summarization (~4000 tokens) */
148
+ const MAX_TRANSCRIPT_CHARS = 16000
149
+
150
+ /**
151
+ * Smart truncation: if the full text fits, return it.
152
+ * Otherwise keep the first 20% and last 80% with a cut marker.
153
+ * This ensures recent context (decisions, conclusions) is always captured.
154
+ */
155
+ function smartTruncate(text: string, maxChars: number): string {
156
+ if (text.length <= maxChars) return text
157
+ const marker = '\n\n[... middle of conversation truncated ...]\n\n'
158
+ const available = maxChars - marker.length
159
+ const headSize = Math.floor(available * 0.2)
160
+ const tailSize = available - headSize
161
+ return text.slice(0, headSize) + marker + text.slice(-tailSize)
162
+ }
149
163
 
150
164
  /**
151
165
  * System prompt for LLM-powered session summarization.
@@ -474,10 +488,10 @@ const executeSummarize: ActionExecutor = async (hook, context, providers) => {
474
488
 
475
489
  if (!summary && context.llm && context.messages.length >= 2) {
476
490
  try {
477
- const transcript = context.messages
478
- .map((m) => `${m.role}: ${m.content}`)
479
- .join('\n')
480
- .slice(0, MAX_TRANSCRIPT_CHARS)
491
+ const transcript = smartTruncate(
492
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
493
+ MAX_TRANSCRIPT_CHARS,
494
+ )
481
495
 
482
496
  log.debug(
483
497
  {
@@ -647,10 +661,10 @@ const executeJournal: ActionExecutor = async (hook, context, providers) => {
647
661
  }
648
662
 
649
663
  try {
650
- const transcript = context.messages
651
- .map((m) => `${m.role}: ${m.content}`)
652
- .join('\n')
653
- .slice(0, MAX_TRANSCRIPT_CHARS)
664
+ const transcript = smartTruncate(
665
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
666
+ MAX_TRANSCRIPT_CHARS,
667
+ )
654
668
 
655
669
  const journal = await context.llm(JOURNAL_PROMPT + transcript)
656
670
 
@@ -723,10 +737,10 @@ const executeExperience: ActionExecutor = async (hook, context, providers) => {
723
737
  }
724
738
 
725
739
  try {
726
- const transcript = context.messages
727
- .map((m) => `${m.role}: ${m.content}`)
728
- .join('\n')
729
- .slice(0, MAX_TRANSCRIPT_CHARS)
740
+ const transcript = smartTruncate(
741
+ context.messages.map((m) => `${m.role}: ${m.content}`).join('\n'),
742
+ MAX_TRANSCRIPT_CHARS,
743
+ )
730
744
 
731
745
  const experience = await context.llm(EXPERIENCE_PROMPT + transcript)
732
746
 
package/src/lib/router.ts CHANGED
@@ -71,6 +71,9 @@ export class Router {
71
71
  /** Agent-level thinking defaults: agentId → thinking level */
72
72
  private agentThinkingMap = new Map<string, string>()
73
73
 
74
+ /** Per-session lock to serialize concurrent messages and prevent race conditions */
75
+ private routeLocks = new Map<string, Promise<void>>()
76
+
74
77
  /** Slash command handler */
75
78
  private commandHandler: CommandHandler
76
79
 
@@ -219,6 +222,19 @@ export class Router {
219
222
  // When Deimos and Hermes both handle different channels, their sessions
220
223
  // must not interfere even if they share a channel adapter.
221
224
  const sessionKey = `${agent.id}:${channelId}:${message.channelId}`
225
+
226
+ // Serialize concurrent messages for the same session to prevent race conditions
227
+ // (e.g., double "Nueva sesión" blocks when messages arrive while agent is responding)
228
+ const pending = this.routeLocks.get(sessionKey)
229
+ if (pending) {
230
+ await pending
231
+ }
232
+ let unlockRoute!: () => void
233
+ const routeLock = new Promise<void>((resolve) => {
234
+ unlockRoute = resolve
235
+ })
236
+ this.routeLocks.set(sessionKey, routeLock)
237
+
222
238
  const session = this.sessions.get(sessionKey)
223
239
 
224
240
  // ─── Command interception (before system prompt — zero token cost) ───
@@ -261,6 +277,8 @@ export class Router {
261
277
  const outgoing: OutgoingMessage = { text: this.redact(response) }
262
278
  await channel.send(message.channelId, outgoing)
263
279
  log.info({ command: message.text.split(/\s+/)[0] }, 'Command handled')
280
+ unlockRoute()
281
+ this.routeLocks.delete(sessionKey)
264
282
  return
265
283
  }
266
284
  }
@@ -539,6 +557,10 @@ export class Router {
539
557
  text: '⚠️ Something went wrong. Please try again.',
540
558
  replyTo: message.id,
541
559
  })
560
+ } finally {
561
+ // Release the per-session lock so queued messages can proceed
562
+ unlockRoute()
563
+ this.routeLocks.delete(sessionKey)
542
564
  }
543
565
  }
544
566