opencode-mastra-om 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +86 -3
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -180045,7 +180045,7 @@ ${OBSERVATION_CONTINUATION_HINT}`);
180045
180045
  }
180046
180046
  }),
180047
180047
  om_observe: tool5({
180048
- description: "Manually trigger an observation cycle right now, without waiting for the token threshold.",
180048
+ description: "Manually trigger an observation cycle right now, without waiting for the token threshold. Automatically chunks large message sets if chunkTokens is configured.",
180049
180049
  args: {},
180050
180050
  async execute(_args, context2) {
180051
180051
  const threadId = context2.sessionID;
@@ -180056,8 +180056,54 @@ ${OBSERVATION_CONTINUATION_HINT}`);
180056
180056
  return "No messages to observe.";
180057
180057
  const mastraMessages = convertMessages2(resp.data, threadId);
180058
180058
  await backupObservations(threadId, "pre-observe");
180059
- await runObserve(threadId, mastraMessages);
180060
- return "Observation cycle triggered. Check memory_status for results.";
180059
+ const chunkTokens = config2.chunkTokens;
180060
+ if (!chunkTokens) {
180061
+ await runObserve(threadId, mastraMessages);
180062
+ return "Observation cycle triggered. Check om_status for results.";
180063
+ }
180064
+ const tokenCounter = new TokenCounter;
180065
+ const chunks = [];
180066
+ let currentChunk = [];
180067
+ let currentTokens = 0;
180068
+ for (const msg of mastraMessages) {
180069
+ const msgTokens = tokenCounter.countMessages([msg]);
180070
+ if (currentTokens + msgTokens > chunkTokens && currentChunk.length > 0) {
180071
+ chunks.push(currentChunk);
180072
+ currentChunk = [msg];
180073
+ currentTokens = msgTokens;
180074
+ } else {
180075
+ currentChunk.push(msg);
180076
+ currentTokens += msgTokens;
180077
+ }
180078
+ }
180079
+ if (currentChunk.length > 0)
180080
+ chunks.push(currentChunk);
180081
+ if (chunks.length === 1) {
180082
+ await runObserve(threadId, mastraMessages);
180083
+ return "Observation cycle triggered (single chunk). Check om_status for results.";
180084
+ }
180085
+ omLog(`[observe] chunked into ${chunks.length} chunks of ~${chunkTokens} tokens each`);
180086
+ ctx.client.tui.showToast({
180087
+ body: { title: "Mastra OM", message: `Observing in ${chunks.length} chunks...`, variant: "info", duration: 5000 }
180088
+ });
180089
+ const refThreshold = resolveThreshold(omOptions.reflection?.observationTokens ?? 60000);
180090
+ const reflectAt = Math.floor(refThreshold * 0.8);
180091
+ for (let i = 0;i < chunks.length; i++) {
180092
+ omLog(`[observe] processing chunk ${i + 1}/${chunks.length} (${chunks[i].length} messages)`);
180093
+ await runObserve(threadId, chunks[i]);
180094
+ if (i < chunks.length - 1) {
180095
+ const record3 = await om.getRecord(threadId);
180096
+ const obsTokens = record3?.observationTokenCount ?? 0;
180097
+ if (obsTokens >= reflectAt) {
180098
+ omLog(`[observe] observations at ${obsTokens} tokens (>= ${reflectAt}), reflecting before next chunk`);
180099
+ ctx.client.tui.showToast({
180100
+ body: { title: "Mastra OM", message: `Reflecting between chunks (${i + 1}/${chunks.length})...`, variant: "info", duration: 5000 }
180101
+ });
180102
+ await om.reflect(threadId);
180103
+ }
180104
+ }
180105
+ }
180106
+ return `Observation complete — processed ${chunks.length} chunks, ${mastraMessages.length} messages total. Check om_status for results.`;
180061
180107
  } catch (err) {
180062
180108
  const msg = err instanceof Error ? err.message : String(err);
180063
180109
  lastError = msg;
@@ -180155,6 +180201,43 @@ ${OBSERVATION_CONTINUATION_HINT}`);
180155
180201
  }
180156
180202
  }
180157
180203
  }),
180204
+ om_reset: tool5({
180205
+ description: "Reset observational memory for this session to a clean slate. Backs up current state first so it can be restored via om_restore.",
180206
+ args: {},
180207
+ async execute(_args, context2) {
180208
+ const threadId = context2.sessionID;
180209
+ try {
180210
+ const db = store.turso;
180211
+ if (!db)
180212
+ return "Raw DB access unavailable.";
180213
+ await backupObservations(threadId, "pre-reset");
180214
+ await db.execute({
180215
+ sql: `UPDATE mastra_observational_memory SET
180216
+ activeObservations = '',
180217
+ generationCount = 0,
180218
+ observationTokenCount = 0,
180219
+ lastObservedAt = NULL,
180220
+ lastReflectionAt = NULL,
180221
+ pendingMessageTokens = 0,
180222
+ observedMessageIds = '[]',
180223
+ bufferedObservations = NULL,
180224
+ bufferedObservationTokens = 0,
180225
+ bufferedMessageIds = NULL,
180226
+ bufferedReflection = NULL,
180227
+ bufferedReflectionTokens = 0,
180228
+ bufferedReflectionInputTokens = 0,
180229
+ reflectedObservationLineCount = 0
180230
+ WHERE lookupKey = ?`,
180231
+ args: [threadId]
180232
+ });
180233
+ omLog(`[reset] observations cleared for ${threadId}`);
180234
+ return "✅ Observational memory reset. Previous state saved to backup slot 1 — use om_restore to recover if needed.";
180235
+ } catch (err) {
180236
+ const msg = err instanceof Error ? err.message : String(err);
180237
+ return `Reset failed: ${msg}`;
180238
+ }
180239
+ }
180240
+ }),
180158
180241
  om_config: tool5({
180159
180242
  description: "Show the current Mastra Observational Memory configuration.",
180160
180243
  args: {},
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "$schema": "https://json.schemastore.org/package.json",
3
3
  "name": "opencode-mastra-om",
4
- "version": "0.1.5",
4
+ "version": "0.1.6",
5
5
  "type": "module",
6
6
  "license": "MIT",
7
7
  "description": "Enhanced Mastra Observational Memory plugin for OpenCode — persistent cross-session memory with observation, reflection, and manual trigger tools",