agenr 0.8.2 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,23 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.8.3]
4
+
5
+ ### Fixed
6
+ - setup: custom model aliases (gpt-4.1-nano, gpt-4.1-mini) now appear in
7
+ the model picker when using openai-api-key auth (issue #136)
8
+ - setup: revert hint null-normalization regression (details?.name ?? undefined)
9
+ - setup: warn user when empty credential is entered during key rotation
10
+ - setup: note that updated credential is saved but not re-validated
11
+ - setup: openai-api-key now prioritizes gpt-4.1-nano, gpt-4.1-mini, and
12
+ gpt-5-nano in preferred model selection, and adds gpt-5-nano alias
13
+ resolution for OpenAI model lookup
14
+ - setup: reconfigure now offers to update stored API key even when existing
15
+ credential is valid (issue #13)
16
+ - embeddings: EmbeddingCache is now bounded with LRU eviction (default
17
+ max 5000 entries) to prevent unbounded heap growth during large ingests
18
+ (issue #57)
19
+ - embeddings: EmbeddingCache constructor throws RangeError for maxSize < 1
20
+
3
21
  ## [0.8.2] - 2026-02-22
4
22
 
5
23
  ### Added
package/README.md CHANGED
@@ -12,7 +12,7 @@ One local database. Your memory stays on your machine.
12
12
 
13
13
  AGENR uses embeddings to make your memory searchable. The best setup we've found: an **OpenAI API key** with `text-embedding-3-small`. Embeddings cost fractions of a penny per operation - a full ingestion of 100+ session transcripts runs about $0.10 total.
14
14
 
15
- AGENR also supports **OpenAI Pro subscriptions** and **Anthropic Claude subscriptions** (no API key needed) for the LLM extraction step. But for the best balance of speed, accuracy, and cost, we recommend `gpt-4o-mini` with an API key. `agenr setup` walks you through all of this.
15
+ AGENR also supports **OpenAI Pro subscriptions** and **Anthropic Claude subscriptions** (no API key needed) for the LLM extraction step. But for the best balance of speed, accuracy, and cost, we recommend `gpt-4.1-nano` with an API key. `agenr setup` walks you through all of this.
16
16
 
17
17
  ```bash
18
18
  export OPENAI_API_KEY=sk-... # for embeddings + extraction
package/dist/cli-main.js CHANGED
@@ -43,7 +43,8 @@ var MODEL_ALIASES = {
43
43
  "gpt-codex": "gpt-5.2-codex",
44
44
  "gpt-4.1-nano": "openai/gpt-4.1-nano",
45
45
  "gpt-4.1-mini": "openai/gpt-4.1-mini",
46
- "gpt-4.1": "openai/gpt-4.1"
46
+ "gpt-4.1": "openai/gpt-4.1",
47
+ "gpt-5-nano": "openai/gpt-5-nano"
47
48
  },
48
49
  "openai-codex": {
49
50
  codex: "gpt-5.3-codex",
@@ -138,7 +139,7 @@ var AUTH_METHOD_DEFINITIONS = [
138
139
  provider: "openai",
139
140
  title: "OpenAI -- API key",
140
141
  setupDescription: "Standard API key from platform.openai.com. Pay per token.",
141
- preferredModels: ["gpt-4o", "openai/gpt-4.1-nano", "gpt-4o-mini"]
142
+ preferredModels: ["gpt-4.1-nano", "gpt-4.1-mini", "gpt-5-nano"]
142
143
  }
143
144
  ];
144
145
  var AUTH_METHOD_SET = new Set(AUTH_METHOD_DEFINITIONS.map((item) => item.id));
@@ -4533,12 +4534,75 @@ async function retireEntries(opts) {
4533
4534
 
4534
4535
  // src/embeddings/cache.ts
4535
4536
  var EmbeddingCache = class {
4536
- embeddings = /* @__PURE__ */ new Map();
4537
+ maxSize;
4538
+ map = /* @__PURE__ */ new Map();
4539
+ head = null;
4540
+ // most recently used key
4541
+ tail = null;
4542
+ // least recently used key
4543
+ constructor(maxSize = 5e3) {
4544
+ if (!(maxSize >= 1)) {
4545
+ throw new RangeError(`EmbeddingCache maxSize must be at least 1. Received: ${String(maxSize)}.`);
4546
+ }
4547
+ this.maxSize = maxSize;
4548
+ }
4537
4549
  get(text2) {
4538
- return this.embeddings.get(text2);
4550
+ const node = this.map.get(text2);
4551
+ if (!node) return void 0;
4552
+ this.moveToHead(text2, node);
4553
+ return node.value;
4539
4554
  }
4540
4555
  set(text2, embedding) {
4541
- this.embeddings.set(text2, embedding);
4556
+ if (this.map.has(text2)) {
4557
+ const node2 = this.map.get(text2);
4558
+ node2.value = embedding;
4559
+ this.moveToHead(text2, node2);
4560
+ return;
4561
+ }
4562
+ if (this.map.size >= this.maxSize) {
4563
+ this.evictTail();
4564
+ }
4565
+ const node = { value: embedding, prev: null, next: this.head };
4566
+ this.map.set(text2, node);
4567
+ if (this.head !== null) {
4568
+ this.map.get(this.head).prev = text2;
4569
+ }
4570
+ this.head = text2;
4571
+ if (this.tail === null) {
4572
+ this.tail = text2;
4573
+ }
4574
+ }
4575
+ get size() {
4576
+ return this.map.size;
4577
+ }
4578
+ moveToHead(key, node) {
4579
+ if (this.head === key) return;
4580
+ if (node.prev !== null) {
4581
+ this.map.get(node.prev).next = node.next;
4582
+ }
4583
+ if (node.next !== null) {
4584
+ this.map.get(node.next).prev = node.prev;
4585
+ } else {
4586
+ this.tail = node.prev;
4587
+ }
4588
+ node.prev = null;
4589
+ node.next = this.head;
4590
+ if (this.head !== null) {
4591
+ this.map.get(this.head).prev = key;
4592
+ }
4593
+ this.head = key;
4594
+ }
4595
+ evictTail() {
4596
+ if (this.tail === null) return;
4597
+ const tailKey = this.tail;
4598
+ const tailNode = this.map.get(tailKey);
4599
+ this.tail = tailNode.prev;
4600
+ if (this.tail !== null) {
4601
+ this.map.get(this.tail).next = null;
4602
+ } else {
4603
+ this.head = null;
4604
+ }
4605
+ this.map.delete(tailKey);
4542
4606
  }
4543
4607
  };
4544
4608
 
@@ -18131,7 +18195,14 @@ function promptToEnterCredential(auth) {
18131
18195
  function modelChoicesForAuth(auth, provider) {
18132
18196
  const definition = getAuthMethodDefinition(auth);
18133
18197
  const allModels = getModels(provider).map((model) => model.id);
18134
- const preferred = definition.preferredModels.filter((modelId) => allModels.includes(modelId));
18198
+ const preferred = definition.preferredModels.filter((modelId) => {
18199
+ try {
18200
+ resolveModel(provider, modelId);
18201
+ return true;
18202
+ } catch {
18203
+ return false;
18204
+ }
18205
+ });
18135
18206
  const fallback = allModels.filter((modelId) => !preferred.includes(modelId));
18136
18207
  if (provider === "openai") {
18137
18208
  const prioritizedFallback = fallback.filter(
@@ -18194,9 +18265,9 @@ async function runSetup(env = process.env) {
18194
18265
  storedCredentials: working.credentials,
18195
18266
  env
18196
18267
  });
18268
+ const credentialKey = credentialKeyForAuth(auth);
18197
18269
  if (!probe.available) {
18198
18270
  clack11.log.warn(probe.guidance);
18199
- const credentialKey = credentialKeyForAuth(auth);
18200
18271
  if (credentialKey) {
18201
18272
  const shouldEnterNow = await clack11.confirm({
18202
18273
  message: "Enter the credential now?",
@@ -18225,6 +18296,36 @@ async function runSetup(env = process.env) {
18225
18296
  }
18226
18297
  }
18227
18298
  }
18299
+ } else if (existing && credentialKey) {
18300
+ const updateKey = await clack11.confirm({
18301
+ message: "Update stored credential?",
18302
+ initialValue: false
18303
+ });
18304
+ if (clack11.isCancel(updateKey)) {
18305
+ clack11.cancel("Setup cancelled.");
18306
+ return;
18307
+ }
18308
+ if (updateKey) {
18309
+ const entered = await clack11.password({
18310
+ message: promptToEnterCredential(auth)
18311
+ });
18312
+ if (clack11.isCancel(entered)) {
18313
+ clack11.cancel("Setup cancelled.");
18314
+ return;
18315
+ }
18316
+ const normalized = entered.trim();
18317
+ if (normalized) {
18318
+ working = setStoredCredential(working, credentialKey, normalized);
18319
+ probe = probeCredentials({
18320
+ auth,
18321
+ storedCredentials: working.credentials,
18322
+ env
18323
+ });
18324
+ clack11.log.info("Credential updated.");
18325
+ } else {
18326
+ clack11.log.warn("Credential not updated - empty input.");
18327
+ }
18328
+ }
18228
18329
  }
18229
18330
  const modelChoices = modelChoicesForAuth(auth, provider);
18230
18331
  if (modelChoices.length === 0) {
@@ -2,7 +2,7 @@
2
2
  "id": "agenr",
3
3
  "name": "agenr Memory",
4
4
  "description": "Local memory layer - injects agenr context at session start",
5
- "version": "0.8.2",
5
+ "version": "0.8.3",
6
6
  "skills": [
7
7
  "skills"
8
8
  ],
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "agenr",
3
- "version": "0.8.2",
3
+ "version": "0.8.3",
4
4
  "openclaw": {
5
5
  "extensions": [
6
6
  "dist/openclaw-plugin/index.js"