@tpsdev-ai/flair 0.2.1 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,11 +3,11 @@
3
3
  [![CI](https://github.com/tpsdev-ai/flair/actions/workflows/test.yml/badge.svg)](https://github.com/tpsdev-ai/flair/actions/workflows/test.yml)
4
4
  [![License: Apache 2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
5
5
 
6
- **Identity, memory, and soul for AI agents.**
6
+ **Identity, memory, and soul for AI agents. Runs standalone or as part of a [TPS](https://tps.dev) office.**
7
7
 
8
8
  Agents forget everything between sessions. Flair gives them a persistent sense of self — who they are, what they know, how they think — backed by cryptographic identity and semantic search.
9
9
 
10
- Built on [Harper](https://github.com/HarperFast/harper). Single process. No sidecars. Zero external API calls for embeddings.
10
+ Built on [Harper](https://harper.fast). Single process. No sidecars. Zero external API calls for embeddings.
11
11
 
12
12
  ## Why
13
13
 
@@ -23,7 +23,7 @@ Flair fixes that:
23
23
 
24
24
  ## How It Works
25
25
 
26
- Flair is a native [Harper v5](https://github.com/HarperFast/harper) application. Harper handles HTTP, persistence (RocksDB), and application logic in a single process.
26
+ Flair is a native [Harper v5](https://harper.fast) application. Harper handles HTTP, persistence (RocksDB), and application logic in a single process.
27
27
 
28
28
  ```
29
29
  Agent ──[Ed25519-signed request]──▶ Flair (Harper)
@@ -72,7 +72,7 @@ One Flair instance serves any number of agents. Each agent has its own keys, mem
72
72
  ## Quick Start
73
73
 
74
74
  ### Prerequisites
75
- - [Node.js 20+](https://nodejs.org/) (24+ recommended)
75
+ - [Node.js 22+](https://nodejs.org/)
76
76
 
77
77
  ### Install & Run
78
78
 
@@ -92,53 +92,120 @@ flair status
92
92
 
93
93
  That's it. Your agent now has identity and memory.
94
94
 
95
- ### Use with OpenClaw
95
+ ## Integration
96
+
97
+ Flair works with any agent runtime. Pick the path that fits yours.
98
+
99
+ ### Standalone (Flair CLI)
100
+
101
+ Use the `flair` CLI directly from any agent that can run shell commands.
102
+
103
+ ```bash
104
+ # Write a memory
105
+ flair memory add --agent mybot --content "learned something important"
106
+
107
+ # Search by meaning
108
+ flair memory search --agent mybot --q "that important thing"
109
+
110
+ # Set personality
111
+ flair soul set --agent mybot --key role --value "Security reviewer"
112
+
113
+ # Cold-start bootstrap (soul + recent memories)
114
+ flair bootstrap --agent mybot --max-tokens 4000
115
+
116
+ # Backup / restore
117
+ flair backup --admin-pass "$FLAIR_ADMIN_PASS"
118
+ flair restore ./backup.json --admin-pass "$FLAIR_ADMIN_PASS"
119
+ ```
120
+
121
+ ### OpenClaw
122
+
123
+ One command. Zero config.
96
124
 
97
125
  ```bash
98
- npm install @tpsdev-ai/openclaw-flair
126
+ openclaw plugins install @tpsdev-ai/openclaw-flair
99
127
  ```
100
128
 
101
- Add to your `openclaw.json`:
102
- ```json
103
- {
104
- "memory": {
105
- "provider": "@tpsdev-ai/openclaw-flair"
106
- }
107
- }
129
+ The plugin auto-detects your agent identity, provides `memory_store`/`memory_recall`/`memory_get` tools, and injects relevant memories at session start. See the [plugin README](plugins/openclaw-flair/README.md) for details.
130
+
131
+ ### Claude Code / Codex / Cursor
132
+
133
+ Add a snippet to your `CLAUDE.md` (or `AGENTS.md`, `.codex/instructions.md`, etc.):
134
+
135
+ ```markdown
136
+ ## Memory
137
+
138
+ You have persistent memory via Flair. Use it.
139
+
140
+ ### On session start
141
+ Run: `flair bootstrap --agent mybot --max-tokens 4000`
142
+ This returns your soul + recent memories. Read it — that's your context.
143
+
144
+ ### During work
145
+ - Remember something: `flair memory add --agent mybot --content "what you learned"`
146
+ - Search memory: `flair memory search --agent mybot --q "your query"`
147
+ - Store a lesson: `flair memory add --agent mybot --content "lesson" --type lesson --durability persistent`
148
+
149
+ ### Rules
150
+ - Bootstrap FIRST, before doing anything else
151
+ - Store lessons and decisions immediately — don't wait
152
+ - If you learn something that should survive restarts, write it to Flair
108
153
  ```
109
154
 
110
- Your agent will automatically remember things between sessions and recall them by meaning.
155
+ ### JavaScript / TypeScript (Client Library)
111
156
 
112
- ### Use the CLI directly
157
+ For custom integrations, use the lightweight client — no Harper, no embeddings, just HTTP + auth:
113
158
 
114
159
  ```bash
115
- # Write a memory
116
- flair memory add --agent mybot --content "Harper v5 sandbox blocks bare imports"
160
+ npm install @tpsdev-ai/flair-client
161
+ ```
117
162
 
118
- # Search by meaning, not keywords
119
- flair memory search --agent mybot --q "native module loading issues"
163
+ ```typescript
164
+ import { FlairClient } from '@tpsdev-ai/flair-client'
120
165
 
121
- # Set personality
122
- flair soul set --agent mybot --key role --value "Security reviewer, meticulous and skeptical"
166
+ const flair = new FlairClient({
167
+ url: 'http://localhost:9926', // or remote: https://flair.example.com
168
+ agentId: 'mybot',
169
+ // key auto-resolved from ~/.flair/keys/mybot.key
170
+ })
123
171
 
124
- # Back up everything
125
- flair backup --admin-pass "$FLAIR_ADMIN_PASS"
172
+ // Write a memory
173
+ await flair.memory.write('Harper v5 sandbox blocks bare imports')
174
+
175
+ // Search by meaning
176
+ const results = await flair.memory.search('native module loading')
177
+
178
+ // Cold-start bootstrap
179
+ const ctx = await flair.bootstrap({ maxTokens: 4000 })
126
180
 
127
- # Restore from backup
128
- flair restore ./flair-backup-2026-03-15.json --admin-pass "$FLAIR_ADMIN_PASS"
181
+ // Set personality
182
+ await flair.soul.set('role', 'Security reviewer')
129
183
  ```
130
184
 
131
- ### Cold Start Bootstrap
185
+ See the [client README](packages/flair-client/README.md) for the full API.
132
186
 
133
- Agents can pull their full context on startup via the `BootstrapMemories` endpoint:
187
+ ### HTTP API (Any Language)
188
+
189
+ Flair is a pure HTTP API. Use it from Python, Go, Rust, shell scripts — anything that can make HTTP requests and sign with Ed25519.
134
190
 
135
191
  ```bash
136
- curl -H "Authorization: TPS-Ed25519 ..." \
192
+ # Search memories
193
+ curl -H "Authorization: TPS-Ed25519 mybot:$TS:$NONCE:$SIG" \
194
+ -X POST http://localhost:9926/SemanticSearch \
195
+ -d '{"agentId": "mybot", "q": "deployment procedure", "limit": 5}'
196
+
197
+ # Write a memory
198
+ curl -H "Authorization: TPS-Ed25519 mybot:$TS:$NONCE:$SIG" \
199
+ -X PUT http://localhost:9926/Memory/mybot-123 \
200
+ -d '{"id": "mybot-123", "agentId": "mybot", "content": "...", "durability": "standard"}'
201
+
202
+ # Bootstrap (soul + recent memories)
203
+ curl -H "Authorization: TPS-Ed25519 mybot:$TS:$NONCE:$SIG" \
137
204
  -X POST http://localhost:9926/BootstrapMemories \
138
205
  -d '{"agentId": "mybot", "maxTokens": 4000}'
139
206
  ```
140
207
 
141
- Returns soul + recent memories + relevant context as a formatted block. Bounded context regardless of total memory size.
208
+ Auth is Ed25519 sign `agentId:timestamp:nonce:METHOD:/path` with your private key. See [SECURITY.md](SECURITY.md) for the full protocol.
142
209
 
143
210
  ## Architecture
144
211
 
@@ -154,11 +221,11 @@ flair/
154
221
  │ ├── embeddings-provider.ts # In-process nomic embeddings
155
222
  │ ├── Memory.ts # Durability enforcement + auto-embed
156
223
  │ ├── Soul.ts # Permanent-by-default personality
157
- │ ├── MemorySearch.ts # Hybrid semantic + keyword search
224
+ │ ├── SemanticSearch.ts # Hybrid semantic + keyword search
158
225
  │ ├── MemoryBootstrap.ts # Cold start context assembly
159
226
  │ └── MemoryFeed.ts # Real-time memory changes
160
227
  ├── plugins/
161
- │ └── openclaw-memory/ # @tpsdev-ai/openclaw-flair plugin
228
+ │ └── openclaw-flair/ # @tpsdev-ai/openclaw-flair plugin
162
229
  └── SECURITY.md # Threat model + auth documentation
163
230
  ```
164
231
 
@@ -219,9 +286,9 @@ Integration tests spin up a real Harper instance on a random port, run the test
219
286
 
220
287
  ## Status
221
288
 
222
- > **Note:** Flair uses [Harper v5](https://github.com/HarperFast/harper), currently in beta. We run it in production daily and track upstream closely. Pin your Harper version.
289
+ > **Note:** Flair uses [Harper v5](https://harper.fast), currently in beta. We run it in production daily and track upstream closely. Pin your Harper version.
223
290
 
224
- Flair is in active development and daily use. We dogfood it — the agents that build Flair use Flair for their own memory and identity. 7 agents, 150+ memories, running continuously since March 2026.
291
+ Flair is in active development and daily use. We dogfood it — the agents that build Flair use Flair for their own memory and identity.
225
292
 
226
293
  **What works:**
227
294
  - ✅ Ed25519 agent identity and auth
package/dist/cli.js CHANGED
@@ -42,7 +42,19 @@ function b64url(bytes) {
42
42
  return Buffer.from(bytes).toString("base64url");
43
43
  }
44
44
  async function api(method, path, body) {
45
- const base = process.env.FLAIR_URL || "http://127.0.0.1:8787";
45
+ // Resolve port: FLAIR_URL env > ~/.flair/config.yaml > default 9926
46
+ let defaultUrl = "http://127.0.0.1:9926";
47
+ try {
48
+ const configPath = join(homedir(), ".flair", "config.yaml");
49
+ if (existsSync(configPath)) {
50
+ const yaml = readFileSync(configPath, "utf-8");
51
+ const portMatch = yaml.match(/port:\s*(\d+)/);
52
+ if (portMatch)
53
+ defaultUrl = `http://127.0.0.1:${portMatch[1]}`;
54
+ }
55
+ }
56
+ catch { /* ignore config read errors */ }
57
+ const base = process.env.FLAIR_URL || defaultUrl;
46
58
  const token = process.env.FLAIR_TOKEN;
47
59
  const res = await fetch(`${base}${path}`, {
48
60
  method,
@@ -57,16 +69,40 @@ async function api(method, path, body) {
57
69
  throw new Error(JSON.stringify(json));
58
70
  return json;
59
71
  }
72
+ /** Find the agent's private key file from standard locations. */
73
+ function resolveKeyPath(agentId) {
74
+ const candidates = [
75
+ process.env.FLAIR_KEY_DIR ? join(process.env.FLAIR_KEY_DIR, `${agentId}.key`) : null,
76
+ join(homedir(), ".flair", "keys", `${agentId}.key`),
77
+ join(homedir(), ".tps", "secrets", "flair", `${agentId}-priv.key`),
78
+ ].filter(Boolean);
79
+ return candidates.find((p) => existsSync(p)) ?? null;
80
+ }
60
81
  /** Build a TPS-Ed25519 auth header from a raw 32-byte seed on disk. */
61
82
  function buildEd25519Auth(agentId, method, path, keyPath) {
62
- const rawBuf = readFileSync(keyPath);
83
+ const raw = readFileSync(keyPath);
84
+ const pkcs8Header = Buffer.from("302e020100300506032b657004220420", "hex");
63
85
  let privKey;
64
- if (rawBuf.length === 32) {
65
- const pkcs8Header = Buffer.from("302e020100300506032b657004220420", "hex");
66
- privKey = createPrivateKey({ key: Buffer.concat([pkcs8Header, rawBuf]), format: "der", type: "pkcs8" });
86
+ if (raw.length === 32) {
87
+ // Raw 32-byte seed
88
+ privKey = createPrivateKey({ key: Buffer.concat([pkcs8Header, raw]), format: "der", type: "pkcs8" });
67
89
  }
68
90
  else {
69
- privKey = createPrivateKey(rawBuf);
91
+ // Try as base64-encoded PKCS8 DER (standard Flair key format)
92
+ const decoded = Buffer.from(raw.toString("utf-8").trim(), "base64");
93
+ if (decoded.length === 32) {
94
+ // Base64-encoded raw seed
95
+ privKey = createPrivateKey({ key: Buffer.concat([pkcs8Header, decoded]), format: "der", type: "pkcs8" });
96
+ }
97
+ else {
98
+ // Full PKCS8 DER or PEM
99
+ try {
100
+ privKey = createPrivateKey({ key: decoded, format: "der", type: "pkcs8" });
101
+ }
102
+ catch {
103
+ privKey = createPrivateKey(raw);
104
+ }
105
+ }
70
106
  }
71
107
  const ts = Date.now().toString();
72
108
  const nonce = randomUUID();
@@ -665,6 +701,91 @@ memory.command("list").requiredOption("--agent <id>").option("--tag <tag>")
665
701
  const q = new URLSearchParams({ agentId: opts.agent, ...(opts.tag ? { tag: opts.tag } : {}) }).toString();
666
702
  console.log(JSON.stringify(await api("GET", `/Memory?${q}`), null, 2));
667
703
  });
704
+ // ─── flair search (top-level shortcut) ───────────────────────────────────────
705
+ program
706
+ .command("search <query>")
707
+ .description("Search memories by meaning (shortcut for memory search)")
708
+ .requiredOption("--agent <id>", "Agent ID")
709
+ .option("--limit <n>", "Max results", "5")
710
+ .option("--port <port>", "Harper HTTP port", String(DEFAULT_PORT))
711
+ .option("--url <url>", "Flair base URL (overrides --port)")
712
+ .option("--key <path>", "Ed25519 private key path")
713
+ .action(async (query, opts) => {
714
+ try {
715
+ const baseUrl = opts.url || `http://127.0.0.1:${opts.port}`;
716
+ const headers = { "content-type": "application/json" };
717
+ const keyPath = opts.key || resolveKeyPath(opts.agent);
718
+ if (keyPath) {
719
+ headers["authorization"] = buildEd25519Auth(opts.agent, "POST", "/SemanticSearch", keyPath);
720
+ }
721
+ const res = await fetch(`${baseUrl}/SemanticSearch`, {
722
+ method: "POST",
723
+ headers,
724
+ body: JSON.stringify({ agentId: opts.agent, q: query, limit: parseInt(opts.limit, 10) }),
725
+ });
726
+ if (!res.ok)
727
+ throw new Error(await res.text());
728
+ const result = await res.json();
729
+ const results = result.results || result || [];
730
+ if (!Array.isArray(results) || results.length === 0) {
731
+ console.log("No results found.");
732
+ return;
733
+ }
734
+ for (const r of results) {
735
+ const date = r.createdAt ? r.createdAt.slice(0, 10) : "";
736
+ const score = r._score ? `${(r._score * 100).toFixed(0)}%` : "";
737
+ const meta = [date, r.type, score].filter(Boolean).join(" · ");
738
+ console.log(` ${r.content}`);
739
+ if (meta)
740
+ console.log(` (${meta})`);
741
+ console.log();
742
+ }
743
+ }
744
+ catch (err) {
745
+ console.error(`Search failed: ${err.message}`);
746
+ process.exit(1);
747
+ }
748
+ });
749
+ // ─── flair bootstrap ─────────────────────────────────────────────────────────
750
+ program
751
+ .command("bootstrap")
752
+ .description("Cold-start context: get soul + recent memories as formatted text")
753
+ .requiredOption("--agent <id>", "Agent ID")
754
+ .option("--max-tokens <n>", "Maximum tokens in output", "4000")
755
+ .option("--port <port>", "Harper HTTP port", String(DEFAULT_PORT))
756
+ .option("--url <url>", "Flair base URL (overrides --port)")
757
+ .option("--key <path>", "Ed25519 private key path")
758
+ .action(async (opts) => {
759
+ const baseUrl = opts.url || `http://127.0.0.1:${opts.port}`;
760
+ try {
761
+ const headers = { "content-type": "application/json" };
762
+ const keyPath = opts.key || resolveKeyPath(opts.agent);
763
+ if (keyPath) {
764
+ headers["authorization"] = buildEd25519Auth(opts.agent, "POST", "/BootstrapMemories", keyPath);
765
+ }
766
+ const res = await fetch(`${baseUrl}/BootstrapMemories`, {
767
+ method: "POST",
768
+ headers,
769
+ body: JSON.stringify({ agentId: opts.agent, maxTokens: parseInt(opts.maxTokens, 10) }),
770
+ });
771
+ if (!res.ok) {
772
+ const body = await res.text();
773
+ throw new Error(`${res.status}: ${body}`);
774
+ }
775
+ const result = await res.json();
776
+ if (result.context) {
777
+ console.log(result.context);
778
+ }
779
+ else {
780
+ console.error("No context available.");
781
+ process.exit(1);
782
+ }
783
+ }
784
+ catch (err) {
785
+ console.error(`Bootstrap failed: ${err.message}`);
786
+ process.exit(1);
787
+ }
788
+ });
668
789
  const soul = program.command("soul").description("Manage agent soul entries");
669
790
  soul.command("set").requiredOption("--agent <id>").requiredOption("--key <key>").requiredOption("--value <value>")
670
791
  .option("--durability <d>", "permanent")
@@ -54,12 +54,22 @@ export class BootstrapMemories extends Resource {
54
54
  let tokenBudget = maxTokens;
55
55
  let memoriesIncluded = 0;
56
56
  let memoriesAvailable = 0;
57
- // --- 1. Soul records (unconditionalnot subject to token budget) ---
58
- // Soul is who you are. It's not optional context to be trimmed.
59
- // Skill assignments (key='skill-assignment') are separated into their own section.
57
+ // --- 1. Soul records (budgetedprioritized by key importance) ---
58
+ // Soul is who you are, but we still need to respect token budgets.
59
+ // Workspace files (SOUL.md, AGENTS.md) can be massive they're already
60
+ // injected by the runtime via workspace context, so we prioritize
61
+ // concise soul entries over full file dumps.
62
+ const SOUL_KEY_PRIORITY = {
63
+ role: 0, identity: 1, thinking: 2, communication_style: 3,
64
+ team: 4, ownership: 5, infrastructure: 6, "user-context": 7,
65
+ // Full workspace files — lowest priority (runtime already injects these)
66
+ soul: 90, "workspace-rules": 91,
67
+ };
60
68
  const skillAssignments = [];
69
+ const soulMaxTokens = Math.floor(maxTokens * 0.4); // 40% of budget for soul
61
70
  if (includeSoul) {
62
71
  let soulTokens = 0;
72
+ const soulEntries = [];
63
73
  for await (const record of databases.flair.Soul.search()) {
64
74
  if (record.agentId !== agentId)
65
75
  continue;
@@ -68,11 +78,29 @@ export class BootstrapMemories extends Resource {
68
78
  continue;
69
79
  }
70
80
  const line = `**${record.key}:** ${record.value}`;
71
- sections.soul.push(line);
72
- soulTokens += estimateTokens(line);
81
+ const tokens = estimateTokens(line);
82
+ const priority = SOUL_KEY_PRIORITY[record.key] ?? 50;
83
+ soulEntries.push({ key: record.key, line, tokens, priority });
84
+ }
85
+ // Sort by priority (lower = more important)
86
+ soulEntries.sort((a, b) => a.priority - b.priority);
87
+ for (const entry of soulEntries) {
88
+ if (soulTokens + entry.tokens > soulMaxTokens) {
89
+ // Skip large entries that exceed budget — truncate or skip
90
+ if (entry.priority >= 90)
91
+ continue; // skip full workspace files
92
+ // Truncate if it's important but too long
93
+ const maxChars = (soulMaxTokens - soulTokens) * 4;
94
+ if (maxChars > 100) {
95
+ const truncated = `**${entry.key}:** ${entry.line.slice(entry.key.length + 6, entry.key.length + 6 + maxChars)}…(truncated)`;
96
+ sections.soul.push(truncated);
97
+ soulTokens += estimateTokens(truncated);
98
+ }
99
+ continue;
100
+ }
101
+ sections.soul.push(entry.line);
102
+ soulTokens += entry.tokens;
73
103
  }
74
- // Soul tokens are tracked but don't reduce memory budget
75
- tokenBudget = maxTokens; // memory budget is separate from soul
76
104
  }
77
105
  // --- 1b. Skill assignments (ordered by priority, conflict detection) ---
78
106
  if (skillAssignments.length > 0) {
@@ -134,15 +162,27 @@ export class BootstrapMemories extends Resource {
134
162
  memoriesIncluded++;
135
163
  }
136
164
  }
137
- // --- 3. Recent memories (last 24-48h, standard + persistent) ---
138
- const sinceDate = since
139
- ? new Date(since)
140
- : new Date(Date.now() - 48 * 3600_000);
141
- const recent = activeMemories
142
- .filter((m) => m.durability !== "permanent" &&
143
- m.createdAt &&
144
- new Date(m.createdAt) >= sinceDate)
165
+ // --- 3. Recent memories (adaptive window) ---
166
+ // Start with 48h. If nothing found, widen to 7d, then 30d.
167
+ // This prevents empty recent sections for agents that were idle.
168
+ const nonPermanent = activeMemories
169
+ .filter((m) => m.durability !== "permanent" && m.createdAt)
145
170
  .sort((a, b) => (b.createdAt || "").localeCompare(a.createdAt || ""));
171
+ let effectiveSince;
172
+ if (since) {
173
+ effectiveSince = new Date(since);
174
+ }
175
+ else {
176
+ const windows = [48 * 3600_000, 7 * 24 * 3600_000, 30 * 24 * 3600_000];
177
+ effectiveSince = new Date(Date.now() - windows[0]);
178
+ for (const w of windows) {
179
+ effectiveSince = new Date(Date.now() - w);
180
+ const count = nonPermanent.filter((m) => new Date(m.createdAt) >= effectiveSince).length;
181
+ if (count >= 3)
182
+ break; // found enough recent memories
183
+ }
184
+ }
185
+ const recent = nonPermanent.filter((m) => new Date(m.createdAt) >= effectiveSince);
146
186
  // Budget: up to 40% of remaining for recent
147
187
  const recentBudget = Math.floor(tokenBudget * 0.4);
148
188
  let recentSpent = 0;
@@ -0,0 +1,90 @@
1
+ /**
2
+ * MemoryMaintenance.ts — Maintenance worker for memory hygiene.
3
+ *
4
+ * POST /MemoryMaintenance/ — runs cleanup tasks:
5
+ * 1. Delete expired ephemeral memories (expiresAt < now)
6
+ * 2. Archive old session memories (> 30 days, standard durability)
7
+ * 3. Report stats
8
+ *
9
+ * Designed to run periodically (daily cron or heartbeat).
10
+ * Requires admin auth.
11
+ */
12
+ export default class MemoryMaintenance {
13
+ static ROUTE = "MemoryMaintenance";
14
+ static METHOD = "POST";
15
+ async post(data) {
16
+ const { databases } = this;
17
+ const request = this.request;
18
+ const { dryRun = false, agentId } = data || {};
19
+ // Scope to authenticated agent. Admin can pass agentId for system-wide
20
+ // maintenance; non-admin always scoped to their own agent.
21
+ const authAgent = request?.headers?.get?.("x-tps-agent");
22
+ const isAdmin = request?.tpsAgentIsAdmin === true;
23
+ const targetAgent = isAdmin && agentId ? agentId : authAgent;
24
+ if (!targetAgent && !isAdmin) {
25
+ return { error: "agentId required" };
26
+ }
27
+ const now = new Date();
28
+ const stats = { expired: 0, archived: 0, total: 0, errors: 0, agent: targetAgent || "all" };
29
+ try {
30
+ for await (const record of databases.flair.Memory.search()) {
31
+ // Skip records not belonging to target agent (unless admin running system-wide)
32
+ if (targetAgent && record.agentId !== targetAgent)
33
+ continue;
34
+ stats.total++;
35
+ // 1. Delete expired memories
36
+ if (record.expiresAt && new Date(record.expiresAt) < now) {
37
+ if (!dryRun) {
38
+ try {
39
+ await databases.flair.Memory.delete(record.id);
40
+ stats.expired++;
41
+ }
42
+ catch {
43
+ stats.errors++;
44
+ }
45
+ }
46
+ else {
47
+ stats.expired++;
48
+ }
49
+ continue;
50
+ }
51
+ // 2. Archive old standard session memories (> 30 days)
52
+ // These are low-value session notes that weren't promoted to persistent.
53
+ // Archiving removes them from search results but keeps the data.
54
+ if (record.durability === "standard" &&
55
+ record.type === "session" &&
56
+ !record.archived &&
57
+ record.createdAt) {
58
+ const ageMs = now.getTime() - new Date(record.createdAt).getTime();
59
+ const ageDays = ageMs / (24 * 3600_000);
60
+ if (ageDays > 30) {
61
+ if (!dryRun) {
62
+ try {
63
+ // Soft archive — set archived flag, keep data
64
+ await databases.flair.Memory.update(record.id, {
65
+ ...record,
66
+ archived: true,
67
+ archivedAt: now.toISOString(),
68
+ });
69
+ stats.archived++;
70
+ }
71
+ catch {
72
+ stats.errors++;
73
+ }
74
+ }
75
+ else {
76
+ stats.archived++;
77
+ }
78
+ }
79
+ }
80
+ }
81
+ }
82
+ catch (err) {
83
+ return { error: err.message, stats };
84
+ }
85
+ return {
86
+ message: dryRun ? "Dry run complete" : "Maintenance complete",
87
+ stats,
88
+ };
89
+ }
90
+ }
@@ -44,7 +44,7 @@ function compositeScore(semanticScore, record) {
44
44
  }
45
45
  export class SemanticSearch extends Resource {
46
46
  async post(data) {
47
- const { agentId, q, queryEmbedding, tag, subject, subjects, limit = 10, includeSuperseded = false, scoring = "composite" } = data || {};
47
+ const { agentId, q, queryEmbedding, tag, subject, subjects, limit = 10, includeSuperseded = false, scoring = "composite", minScore = 0, since } = data || {};
48
48
  const subjectFilter = subjects
49
49
  ? new Set(subjects.map((s) => s.toLowerCase()))
50
50
  : subject
@@ -86,6 +86,39 @@ export class SemanticSearch extends Resource {
86
86
  catch { }
87
87
  }
88
88
  }
89
+ // ─── Temporal intent detection ────────────────────────────────────────────
90
+ // If the query implies a time window and no explicit `since` was provided,
91
+ // auto-detect and apply a recency boost.
92
+ let sinceDate = since ? new Date(since) : null;
93
+ let temporalBoost = 1.0;
94
+ if (q && !sinceDate) {
95
+ const lq = String(q).toLowerCase();
96
+ if (/\btoday\b|\bthis morning\b|\bthis afternoon\b/.test(lq)) {
97
+ const d = new Date();
98
+ d.setHours(0, 0, 0, 0);
99
+ sinceDate = d;
100
+ temporalBoost = 1.5; // boost recent results for temporal queries
101
+ }
102
+ else if (/\byesterday\b/.test(lq)) {
103
+ const d = new Date();
104
+ d.setDate(d.getDate() - 1);
105
+ d.setHours(0, 0, 0, 0);
106
+ sinceDate = d;
107
+ temporalBoost = 1.3;
108
+ }
109
+ else if (/\bthis week\b|\blast few days\b/.test(lq)) {
110
+ sinceDate = new Date(Date.now() - 7 * 24 * 3600_000);
111
+ temporalBoost = 1.2;
112
+ }
113
+ else if (/\blast week\b/.test(lq)) {
114
+ sinceDate = new Date(Date.now() - 14 * 24 * 3600_000);
115
+ temporalBoost = 1.1;
116
+ }
117
+ else if (/\brecently\b|\blately\b/.test(lq)) {
118
+ sinceDate = new Date(Date.now() - 3 * 24 * 3600_000);
119
+ temporalBoost = 1.3;
120
+ }
121
+ }
89
122
  const results = [];
90
123
  // Iterate ALL memories, filter by agent ID set
91
124
  for await (const record of databases.flair.Memory.search()) {
@@ -95,24 +128,34 @@ export class SemanticSearch extends Resource {
95
128
  continue;
96
129
  }
97
130
  if (record.archived === true)
98
- continue; // soft-deleted — excluded from search by default
131
+ continue;
99
132
  if (record.expiresAt && Date.parse(record.expiresAt) < Date.now())
100
133
  continue;
101
134
  if (tag && !(record.tags || []).includes(tag))
102
135
  continue;
103
136
  if (subjectFilter && record.subject && !subjectFilter.has(String(record.subject).toLowerCase()))
104
137
  continue;
105
- let rawScore = 0;
138
+ // Time window filter
139
+ if (sinceDate && record.createdAt && new Date(record.createdAt) < sinceDate)
140
+ continue;
141
+ let semanticScore = 0;
142
+ let keywordHit = false;
106
143
  if (q && String(record.content || "").toLowerCase().includes(String(q).toLowerCase())) {
107
- rawScore += 0.5;
144
+ keywordHit = true;
108
145
  }
109
146
  if (qEmb && record.embedding && qEmb.length === record.embedding.length) {
110
- rawScore += cosineSimilarity(qEmb, record.embedding);
147
+ semanticScore = cosineSimilarity(qEmb, record.embedding);
111
148
  }
149
+ // Keyword match is a small tiebreaker (5%), not a primary signal.
150
+ // This prevents weak semantic matches from ranking high just because
151
+ // a query word appears in the content.
152
+ const rawScore = semanticScore + (keywordHit ? 0.05 : 0);
112
153
  if (q && rawScore === 0)
113
154
  continue;
114
- // Apply composite scoring (temporal decay + durability + retrieval boost)
115
- const finalScore = scoring === "raw" ? rawScore : compositeScore(rawScore, record);
155
+ // Apply composite scoring (temporal decay + durability + retrieval boost + temporal intent)
156
+ let finalScore = scoring === "raw" ? rawScore : compositeScore(rawScore, record);
157
+ if (temporalBoost > 1.0)
158
+ finalScore *= temporalBoost;
116
159
  const { embedding, ...rest } = record;
117
160
  results.push({
118
161
  ...rest,
@@ -131,6 +174,10 @@ export class SemanticSearch extends Resource {
131
174
  }
132
175
  filteredResults = results.filter((r) => !supersededIds.has(r.id));
133
176
  }
177
+ // Apply minimum score filter
178
+ if (minScore > 0) {
179
+ filteredResults = filteredResults.filter((r) => r._score >= minScore);
180
+ }
134
181
  filteredResults.sort((a, b) => b._score - a._score);
135
182
  const topResults = filteredResults.slice(0, limit);
136
183
  // Async hit tracking — don't block the response
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@tpsdev-ai/flair",
3
- "version": "0.2.1",
4
- "description": "Identity, memory, and soul for AI agents. Cryptographic identity (Ed25519), semantic memory with local embeddings, and persistent personality all in a single process.",
3
+ "version": "0.3.1",
4
+ "description": "Identity, memory, and soul for AI agents. Cryptographic identity (Ed25519), semantic memory with local embeddings, and persistent personality \u2014 all in a single process.",
5
5
  "type": "module",
6
6
  "license": "Apache-2.0",
7
7
  "repository": {
@@ -49,7 +49,7 @@
49
49
  "node": ">=22"
50
50
  },
51
51
  "dependencies": {
52
- "@harperfast/harper": "5.0.0-beta.1",
52
+ "@harperfast/harper": "5.0.0-beta.4",
53
53
  "@node-llama-cpp/mac-arm64-metal": "^3.17.1",
54
54
  "commander": "14.0.3",
55
55
  "harper-fabric-embeddings": "^0.2.0",
@@ -62,5 +62,9 @@
62
62
  },
63
63
  "trustedDependencies": [
64
64
  "harper-fabric-embeddings"
65
+ ],
66
+ "workspaces": [
67
+ "packages/*",
68
+ "plugins/*"
65
69
  ]
66
- }
70
+ }
@@ -68,12 +68,24 @@ export class BootstrapMemories extends Resource {
68
68
  let memoriesIncluded = 0;
69
69
  let memoriesAvailable = 0;
70
70
 
71
- // --- 1. Soul records (unconditionalnot subject to token budget) ---
72
- // Soul is who you are. It's not optional context to be trimmed.
73
- // Skill assignments (key='skill-assignment') are separated into their own section.
71
+ // --- 1. Soul records (budgetedprioritized by key importance) ---
72
+ // Soul is who you are, but we still need to respect token budgets.
73
+ // Workspace files (SOUL.md, AGENTS.md) can be massive they're already
74
+ // injected by the runtime via workspace context, so we prioritize
75
+ // concise soul entries over full file dumps.
76
+ const SOUL_KEY_PRIORITY: Record<string, number> = {
77
+ role: 0, identity: 1, thinking: 2, communication_style: 3,
78
+ team: 4, ownership: 5, infrastructure: 6, "user-context": 7,
79
+ // Full workspace files — lowest priority (runtime already injects these)
80
+ soul: 90, "workspace-rules": 91,
81
+ };
82
+
74
83
  const skillAssignments: any[] = [];
84
+ const soulMaxTokens = Math.floor(maxTokens * 0.4); // 40% of budget for soul
75
85
  if (includeSoul) {
76
86
  let soulTokens = 0;
87
+ const soulEntries: { key: string; line: string; tokens: number; priority: number }[] = [];
88
+
77
89
  for await (const record of (databases as any).flair.Soul.search()) {
78
90
  if (record.agentId !== agentId) continue;
79
91
  if (record.key === "skill-assignment") {
@@ -81,11 +93,30 @@ export class BootstrapMemories extends Resource {
81
93
  continue;
82
94
  }
83
95
  const line = `**${record.key}:** ${record.value}`;
84
- sections.soul.push(line);
85
- soulTokens += estimateTokens(line);
96
+ const tokens = estimateTokens(line);
97
+ const priority = SOUL_KEY_PRIORITY[record.key] ?? 50;
98
+ soulEntries.push({ key: record.key, line, tokens, priority });
99
+ }
100
+
101
+ // Sort by priority (lower = more important)
102
+ soulEntries.sort((a, b) => a.priority - b.priority);
103
+
104
+ for (const entry of soulEntries) {
105
+ if (soulTokens + entry.tokens > soulMaxTokens) {
106
+ // Skip large entries that exceed budget — truncate or skip
107
+ if (entry.priority >= 90) continue; // skip full workspace files
108
+ // Truncate if it's important but too long
109
+ const maxChars = (soulMaxTokens - soulTokens) * 4;
110
+ if (maxChars > 100) {
111
+ const truncated = `**${entry.key}:** ${entry.line.slice(entry.key.length + 6, entry.key.length + 6 + maxChars)}…(truncated)`;
112
+ sections.soul.push(truncated);
113
+ soulTokens += estimateTokens(truncated);
114
+ }
115
+ continue;
116
+ }
117
+ sections.soul.push(entry.line);
118
+ soulTokens += entry.tokens;
86
119
  }
87
- // Soul tokens are tracked but don't reduce memory budget
88
- tokenBudget = maxTokens; // memory budget is separate from soul
89
120
  }
90
121
 
91
122
  // --- 1b. Skill assignments (ordered by priority, conflict detection) ---
@@ -147,19 +178,28 @@ export class BootstrapMemories extends Resource {
147
178
  }
148
179
  }
149
180
 
150
- // --- 3. Recent memories (last 24-48h, standard + persistent) ---
151
- const sinceDate = since
152
- ? new Date(since)
153
- : new Date(Date.now() - 48 * 3600_000);
154
- const recent = activeMemories
155
- .filter(
156
- (m) =>
157
- m.durability !== "permanent" &&
158
- m.createdAt &&
159
- new Date(m.createdAt) >= sinceDate
160
- )
181
+ // --- 3. Recent memories (adaptive window) ---
182
+ // Start with 48h. If nothing found, widen to 7d, then 30d.
183
+ // This prevents empty recent sections for agents that were idle.
184
+ const nonPermanent = activeMemories
185
+ .filter((m) => m.durability !== "permanent" && m.createdAt)
161
186
  .sort((a: any, b: any) => (b.createdAt || "").localeCompare(a.createdAt || ""));
162
187
 
188
+ let effectiveSince: Date;
189
+ if (since) {
190
+ effectiveSince = new Date(since);
191
+ } else {
192
+ const windows = [48 * 3600_000, 7 * 24 * 3600_000, 30 * 24 * 3600_000];
193
+ effectiveSince = new Date(Date.now() - windows[0]);
194
+ for (const w of windows) {
195
+ effectiveSince = new Date(Date.now() - w);
196
+ const count = nonPermanent.filter((m) => new Date(m.createdAt!) >= effectiveSince).length;
197
+ if (count >= 3) break; // found enough recent memories
198
+ }
199
+ }
200
+
201
+ const recent = nonPermanent.filter((m) => new Date(m.createdAt!) >= effectiveSince);
202
+
163
203
  // Budget: up to 40% of remaining for recent
164
204
  const recentBudget = Math.floor(tokenBudget * 0.4);
165
205
  let recentSpent = 0;
@@ -0,0 +1,95 @@
1
+ /**
2
+ * MemoryMaintenance.ts — Maintenance worker for memory hygiene.
3
+ *
4
+ * POST /MemoryMaintenance/ — runs cleanup tasks:
5
+ * 1. Delete expired ephemeral memories (expiresAt < now)
6
+ * 2. Archive old session memories (> 30 days, standard durability)
7
+ * 3. Report stats
8
+ *
9
+ * Designed to run periodically (daily cron or heartbeat).
10
+ * Requires admin auth.
11
+ */
12
+
13
+ export default class MemoryMaintenance {
14
+ static ROUTE = "MemoryMaintenance";
15
+ static METHOD = "POST";
16
+
17
+ async post(data: any) {
18
+ const { databases }: any = this;
19
+ const request = (this as any).request;
20
+ const { dryRun = false, agentId } = data || {};
21
+
22
+ // Scope to authenticated agent. Admin can pass agentId for system-wide
23
+ // maintenance; non-admin always scoped to their own agent.
24
+ const authAgent = request?.headers?.get?.("x-tps-agent");
25
+ const isAdmin = (request as any)?.tpsAgentIsAdmin === true;
26
+ const targetAgent = isAdmin && agentId ? agentId : authAgent;
27
+
28
+ if (!targetAgent && !isAdmin) {
29
+ return { error: "agentId required" };
30
+ }
31
+
32
+ const now = new Date();
33
+ const stats = { expired: 0, archived: 0, total: 0, errors: 0, agent: targetAgent || "all" };
34
+
35
+ try {
36
+ for await (const record of (databases as any).flair.Memory.search()) {
37
+ // Skip records not belonging to target agent (unless admin running system-wide)
38
+ if (targetAgent && record.agentId !== targetAgent) continue;
39
+ stats.total++;
40
+
41
+ // 1. Delete expired memories
42
+ if (record.expiresAt && new Date(record.expiresAt) < now) {
43
+ if (!dryRun) {
44
+ try {
45
+ await (databases as any).flair.Memory.delete(record.id);
46
+ stats.expired++;
47
+ } catch {
48
+ stats.errors++;
49
+ }
50
+ } else {
51
+ stats.expired++;
52
+ }
53
+ continue;
54
+ }
55
+
56
+ // 2. Archive old standard session memories (> 30 days)
57
+ // These are low-value session notes that weren't promoted to persistent.
58
+ // Archiving removes them from search results but keeps the data.
59
+ if (
60
+ record.durability === "standard" &&
61
+ record.type === "session" &&
62
+ !record.archived &&
63
+ record.createdAt
64
+ ) {
65
+ const ageMs = now.getTime() - new Date(record.createdAt).getTime();
66
+ const ageDays = ageMs / (24 * 3600_000);
67
+ if (ageDays > 30) {
68
+ if (!dryRun) {
69
+ try {
70
+ // Soft archive — set archived flag, keep data
71
+ await (databases as any).flair.Memory.update(record.id, {
72
+ ...record,
73
+ archived: true,
74
+ archivedAt: now.toISOString(),
75
+ });
76
+ stats.archived++;
77
+ } catch {
78
+ stats.errors++;
79
+ }
80
+ } else {
81
+ stats.archived++;
82
+ }
83
+ }
84
+ }
85
+ }
86
+ } catch (err: any) {
87
+ return { error: err.message, stats };
88
+ }
89
+
90
+ return {
91
+ message: dryRun ? "Dry run complete" : "Maintenance complete",
92
+ stats,
93
+ };
94
+ }
95
+ }
@@ -52,7 +52,7 @@ function compositeScore(
52
52
 
53
53
  export class SemanticSearch extends Resource {
54
54
  async post(data: any) {
55
- const { agentId, q, queryEmbedding, tag, subject, subjects, limit = 10, includeSuperseded = false, scoring = "composite" } = data || {};
55
+ const { agentId, q, queryEmbedding, tag, subject, subjects, limit = 10, includeSuperseded = false, scoring = "composite", minScore = 0, since } = data || {};
56
56
  const subjectFilter = subjects
57
57
  ? new Set((subjects as string[]).map((s: string) => s.toLowerCase()))
58
58
  : subject
@@ -94,6 +94,33 @@ export class SemanticSearch extends Resource {
94
94
  }
95
95
  }
96
96
 
97
+ // ─── Temporal intent detection ────────────────────────────────────────────
98
+ // If the query implies a time window and no explicit `since` was provided,
99
+ // auto-detect and apply a recency boost.
100
+ let sinceDate: Date | null = since ? new Date(since) : null;
101
+ let temporalBoost = 1.0;
102
+ if (q && !sinceDate) {
103
+ const lq = String(q).toLowerCase();
104
+ if (/\btoday\b|\bthis morning\b|\bthis afternoon\b/.test(lq)) {
105
+ const d = new Date(); d.setHours(0, 0, 0, 0);
106
+ sinceDate = d;
107
+ temporalBoost = 1.5; // boost recent results for temporal queries
108
+ } else if (/\byesterday\b/.test(lq)) {
109
+ const d = new Date(); d.setDate(d.getDate() - 1); d.setHours(0, 0, 0, 0);
110
+ sinceDate = d;
111
+ temporalBoost = 1.3;
112
+ } else if (/\bthis week\b|\blast few days\b/.test(lq)) {
113
+ sinceDate = new Date(Date.now() - 7 * 24 * 3600_000);
114
+ temporalBoost = 1.2;
115
+ } else if (/\blast week\b/.test(lq)) {
116
+ sinceDate = new Date(Date.now() - 14 * 24 * 3600_000);
117
+ temporalBoost = 1.1;
118
+ } else if (/\brecently\b|\blately\b/.test(lq)) {
119
+ sinceDate = new Date(Date.now() - 3 * 24 * 3600_000);
120
+ temporalBoost = 1.3;
121
+ }
122
+ }
123
+
97
124
  const results: any[] = [];
98
125
 
99
126
  // Iterate ALL memories, filter by agent ID set
@@ -103,22 +130,30 @@ export class SemanticSearch extends Resource {
103
130
  if (record.visibility !== "office") continue;
104
131
  }
105
132
 
106
- if (record.archived === true) continue; // soft-deleted — excluded from search by default
133
+ if (record.archived === true) continue;
107
134
  if (record.expiresAt && Date.parse(record.expiresAt) < Date.now()) continue;
108
135
  if (tag && !(record.tags || []).includes(tag)) continue;
109
136
  if (subjectFilter && record.subject && !subjectFilter.has(String(record.subject).toLowerCase())) continue;
137
+ // Time window filter
138
+ if (sinceDate && record.createdAt && new Date(record.createdAt) < sinceDate) continue;
110
139
 
111
- let rawScore = 0;
140
+ let semanticScore = 0;
141
+ let keywordHit = false;
112
142
  if (q && String(record.content || "").toLowerCase().includes(String(q).toLowerCase())) {
113
- rawScore += 0.5;
143
+ keywordHit = true;
114
144
  }
115
145
  if (qEmb && record.embedding && qEmb.length === record.embedding.length) {
116
- rawScore += cosineSimilarity(qEmb, record.embedding);
146
+ semanticScore = cosineSimilarity(qEmb, record.embedding);
117
147
  }
148
+ // Keyword match is a small tiebreaker (5%), not a primary signal.
149
+ // This prevents weak semantic matches from ranking high just because
150
+ // a query word appears in the content.
151
+ const rawScore = semanticScore + (keywordHit ? 0.05 : 0);
118
152
  if (q && rawScore === 0) continue;
119
153
 
120
- // Apply composite scoring (temporal decay + durability + retrieval boost)
121
- const finalScore = scoring === "raw" ? rawScore : compositeScore(rawScore, record);
154
+ // Apply composite scoring (temporal decay + durability + retrieval boost + temporal intent)
155
+ let finalScore = scoring === "raw" ? rawScore : compositeScore(rawScore, record);
156
+ if (temporalBoost > 1.0) finalScore *= temporalBoost;
122
157
 
123
158
  const { embedding, ...rest } = record;
124
159
  results.push({
@@ -139,6 +174,11 @@ export class SemanticSearch extends Resource {
139
174
  filteredResults = results.filter((r: any) => !supersededIds.has(r.id));
140
175
  }
141
176
 
177
+ // Apply minimum score filter
178
+ if (minScore > 0) {
179
+ filteredResults = filteredResults.filter((r: any) => r._score >= minScore);
180
+ }
181
+
142
182
  filteredResults.sort((a: any, b: any) => b._score - a._score);
143
183
  const topResults = filteredResults.slice(0, limit);
144
184