@pentatonic-ai/ai-agent-sdk 0.5.1 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pentatonic-ai/ai-agent-sdk",
3
- "version": "0.5.1",
3
+ "version": "0.5.3",
4
4
  "description": "TES SDK — LLM observability and lifecycle tracking via Pentatonic Thing Event System. Track token usage, tool calls, and conversations. Manage things through event-sourced lifecycle stages with AI enrichment and vector search.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -168,6 +168,11 @@ describe("named exports", () => {
168
168
  // --- AI client ---
169
169
 
170
170
  describe("createAIClient", () => {
171
+ const realFetch = globalThis.fetch;
172
+ afterEach(() => {
173
+ globalThis.fetch = realFetch;
174
+ });
175
+
171
176
  it("returns an object with embed() and chat()", () => {
172
177
  const client = createAIClient({
173
178
  url: "http://localhost:11434/v1",
@@ -185,6 +190,79 @@ describe("createAIClient", () => {
185
190
  });
186
191
  expect(client).toBeDefined();
187
192
  });
193
+
194
+ it("hits /embeddings by default (OpenAI spec)", async () => {
195
+ let hitUrl;
196
+ globalThis.fetch = async (url) => {
197
+ hitUrl = url;
198
+ return { ok: true, json: async () => ({ data: [{ embedding: [0.1, 0.2] }] }) };
199
+ };
200
+ const client = createAIClient({
201
+ url: "http://localhost:11434/v1",
202
+ model: "test",
203
+ });
204
+ await client.embed("hello");
205
+ expect(hitUrl).toBe("http://localhost:11434/v1/embeddings");
206
+ });
207
+
208
+ it("uses embeddingPath override (e.g. Pentatonic AI Gateway)", async () => {
209
+ let hitUrl;
210
+ globalThis.fetch = async (url) => {
211
+ hitUrl = url;
212
+ return { ok: true, json: async () => ({ data: [{ embedding: [0.1] }] }) };
213
+ };
214
+ const client = createAIClient({
215
+ url: "https://lambda-gateway.pentatonic.com/v1",
216
+ model: "NV-Embed-v2",
217
+ embeddingPath: "embed",
218
+ });
219
+ await client.embed("hello");
220
+ expect(hitUrl).toBe("https://lambda-gateway.pentatonic.com/v1/embed");
221
+ });
222
+
223
+ it("normalises leading slashes and trailing base-url slashes", async () => {
224
+ let hitUrl;
225
+ globalThis.fetch = async (url) => {
226
+ hitUrl = url;
227
+ return { ok: true, json: async () => ({ data: [{ embedding: [0.1] }] }) };
228
+ };
229
+ const client = createAIClient({
230
+ url: "https://gateway.test/v1/",
231
+ model: "m",
232
+ embeddingPath: "/embed",
233
+ });
234
+ await client.embed("hi");
235
+ expect(hitUrl).toBe("https://gateway.test/v1/embed");
236
+ });
237
+
238
+ it("chatPath override applies to chat() too", async () => {
239
+ let hitUrl;
240
+ globalThis.fetch = async (url) => {
241
+ hitUrl = url;
242
+ return { ok: true, json: async () => ({ choices: [{ message: { content: "hi" } }] }) };
243
+ };
244
+ const client = createAIClient({
245
+ url: "https://gateway.test/v1",
246
+ model: "m",
247
+ chatPath: "chat",
248
+ });
249
+ await client.chat([{ role: "user", content: "q" }]);
250
+ expect(hitUrl).toBe("https://gateway.test/v1/chat");
251
+ });
252
+
253
+ it("chat defaults to /chat/completions", async () => {
254
+ let hitUrl;
255
+ globalThis.fetch = async (url) => {
256
+ hitUrl = url;
257
+ return { ok: true, json: async () => ({ choices: [{ message: { content: "hi" } }] }) };
258
+ };
259
+ const client = createAIClient({
260
+ url: "http://localhost:11434/v1",
261
+ model: "m",
262
+ });
263
+ await client.chat([{ role: "user", content: "q" }]);
264
+ expect(hitUrl).toBe("http://localhost:11434/v1/chat/completions");
265
+ });
188
266
  });
189
267
 
190
268
  // --- Search options contract ---
@@ -205,6 +283,89 @@ describe("search options contract", () => {
205
283
 
206
284
  expect(Array.isArray(results)).toBe(true);
207
285
  });
286
+
287
+ it("SQL includes atomBoost and verbosityPenalty terms", async () => {
288
+ const seenSqls = [];
289
+ const mockDb = async (sql) => {
290
+ seenSqls.push(sql);
291
+ if (sql.includes("information_schema.columns")) return { rows: [{ "?column?": 1 }] };
292
+ return { rows: [] };
293
+ };
294
+ const mockAi = { embed: async () => ({ embedding: [0.1], dimensions: 1, model: "t" }) };
295
+
296
+ await search(mockDb, mockAi, "q", { clientId: "c" });
297
+
298
+ const scoringSql = seenSqls.find((s) => s.includes("final_score"));
299
+ expect(scoringSql).toBeDefined();
300
+ expect(scoringSql).toMatch(/source_id IS NOT NULL/);
301
+ expect(scoringSql).toMatch(/length\(mn\.content\)/);
302
+ });
303
+
304
+ it("dedupeBySource drops raw rows whose id is a source of a matched atom", async () => {
305
+ const rows = [
306
+ { id: "raw-1", client_id: "c", layer_id: "l", content: "long raw turn",
307
+ confidence: 1, decay_rate: 0.05, access_count: 0, final_score: 0.9, source_id: null },
308
+ { id: "atom-1", client_id: "c", layer_id: "l", content: "Phil owns a Subaru",
309
+ confidence: 1, decay_rate: 0.05, access_count: 0, final_score: 0.8, source_id: "raw-1" },
310
+ ];
311
+ let searchCallCount = 0;
312
+ const mockDb = async (sql) => {
313
+ if (sql.includes("information_schema.columns")) return { rows: [{ "?column?": 1 }] };
314
+ if (sql.includes("final_score")) {
315
+ searchCallCount++;
316
+ return { rows };
317
+ }
318
+ return { rows: [] };
319
+ };
320
+ const mockAi = { embed: async () => ({ embedding: [0.1], dimensions: 1, model: "t" }) };
321
+
322
+ const out = await search(mockDb, mockAi, "q", { clientId: "c", minScore: 0 });
323
+
324
+ expect(searchCallCount).toBe(1);
325
+ expect(out.length).toBe(1);
326
+ expect(out[0].id).toBe("atom-1");
327
+ expect(out[0].source_id).toBe("raw-1");
328
+ });
329
+
330
+ it("dedupeBySource: false keeps both atom and its raw source", async () => {
331
+ const rows = [
332
+ { id: "raw-1", client_id: "c", layer_id: "l", content: "long",
333
+ confidence: 1, decay_rate: 0.05, access_count: 0, final_score: 0.9, source_id: null },
334
+ { id: "atom-1", client_id: "c", layer_id: "l", content: "short",
335
+ confidence: 1, decay_rate: 0.05, access_count: 0, final_score: 0.8, source_id: "raw-1" },
336
+ ];
337
+ const mockDb = async (sql) => {
338
+ if (sql.includes("information_schema.columns")) return { rows: [{ "?column?": 1 }] };
339
+ if (sql.includes("final_score")) return { rows };
340
+ return { rows: [] };
341
+ };
342
+ const mockAi = { embed: async () => ({ embedding: [0.1], dimensions: 1, model: "t" }) };
343
+
344
+ const out = await search(mockDb, mockAi, "q", {
345
+ clientId: "c",
346
+ minScore: 0,
347
+ dedupeBySource: false,
348
+ });
349
+
350
+ expect(out.length).toBe(2);
351
+ expect(out.map((r) => r.id).sort()).toEqual(["atom-1", "raw-1"]);
352
+ });
353
+
354
+ it("search results include source_id (null for raw, set for atoms)", async () => {
355
+ const rows = [
356
+ { id: "atom-1", client_id: "c", layer_id: "l", content: "atom",
357
+ confidence: 1, decay_rate: 0.05, access_count: 0, final_score: 0.9, source_id: "raw-1" },
358
+ ];
359
+ const mockDb = async (sql) => {
360
+ if (sql.includes("information_schema.columns")) return { rows: [{ "?column?": 1 }] };
361
+ if (sql.includes("final_score")) return { rows };
362
+ return { rows: [] };
363
+ };
364
+ const mockAi = { embed: async () => ({ embedding: [0.1], dimensions: 1, model: "t" }) };
365
+
366
+ const out = await search(mockDb, mockAi, "q", { clientId: "c", minScore: 0 });
367
+ expect(out[0].source_id).toBe("raw-1");
368
+ });
208
369
  });
209
370
 
210
371
  // --- Ingest options contract ---
@@ -8,10 +8,16 @@
8
8
  /**
9
9
  * Create an AI client from config.
10
10
  *
11
+ * Defaults to OpenAI-standard paths (`/embeddings`, `/chat/completions`).
12
+ * Override with `embeddingPath` / `chatPath` for gateways that use
13
+ * different routes — e.g. Pentatonic AI Gateway exposes `/embed`.
14
+ *
11
15
  * @param {object} config
12
16
  * @param {string} config.url - Base URL (e.g. "http://ollama:11434/v1")
13
17
  * @param {string} config.model - Model name
14
18
  * @param {string} [config.apiKey] - Optional API key
19
+ * @param {string} [config.embeddingPath="embeddings"] - Path appended to url
20
+ * @param {string} [config.chatPath="chat/completions"] - Path appended to url
15
21
  * @param {number} [config.dimensions] - Expected embedding dimensions
16
22
  * @returns {object} Client with embed() and chat() methods
17
23
  */
@@ -22,6 +28,23 @@ export function createAIClient(config) {
22
28
  headers["X-API-Key"] = config.apiKey;
23
29
  }
24
30
 
31
+ // Strip leading slashes so callers can use "embed" or "/embed"
32
+ // interchangeably. Base url may or may not have a trailing slash.
33
+ // Plain loops (not regex) to avoid polynomial-regex scanner flags.
34
+ const stripLeading = (s) => {
35
+ let i = 0;
36
+ while (i < s.length && s[i] === "/") i++;
37
+ return i === 0 ? s : s.slice(i);
38
+ };
39
+ const stripTrailing = (s) => {
40
+ let i = s.length;
41
+ while (i > 0 && s[i - 1] === "/") i--;
42
+ return i === s.length ? s : s.slice(0, i);
43
+ };
44
+ const embeddingPath = stripLeading(config.embeddingPath || "embeddings");
45
+ const chatPath = stripLeading(config.chatPath || "chat/completions");
46
+ const baseUrl = stripTrailing(config.url);
47
+
25
48
  return {
26
49
  /**
27
50
  * Generate an embedding vector for text.
@@ -32,7 +55,7 @@ export function createAIClient(config) {
32
55
  */
33
56
  async embed(text, inputType = "passage") {
34
57
  try {
35
- const res = await fetch(`${config.url}/embeddings`, {
58
+ const res = await fetch(`${baseUrl}/${embeddingPath}`, {
36
59
  method: "POST",
37
60
  headers,
38
61
  body: JSON.stringify({
@@ -70,7 +93,7 @@ export function createAIClient(config) {
70
93
  */
71
94
  async chat(messages, opts = {}) {
72
95
  try {
73
- const res = await fetch(`${config.url}/chat/completions`, {
96
+ const res = await fetch(`${baseUrl}/${chatPath}`, {
74
97
  method: "POST",
75
98
  headers,
76
99
  body: JSON.stringify({
@@ -10,6 +10,11 @@ const DEFAULT_WEIGHTS = {
10
10
  relevance: 0.6,
11
11
  recency: 0.25,
12
12
  frequency: 0.15,
13
+ // Boost distilled atoms — they're high signal per token by design.
14
+ atomBoost: 0.15,
15
+ // Penalty on verbose raw turns. Short focused memories rank higher.
16
+ // Atoms are exempt (penalty skipped when source_id IS NOT NULL).
17
+ verbosityPenalty: 0.1,
13
18
  };
14
19
 
15
20
  /**
@@ -25,6 +30,10 @@ const DEFAULT_WEIGHTS = {
25
30
  * @param {number} [opts.minScore=0.5] - Minimum score threshold
26
31
  * @param {string} [opts.userId] - Optional user scope
27
32
  * @param {object} [opts.weights] - Override scoring weights
33
+ * (relevance, recency, frequency, atomBoost, verbosityPenalty)
34
+ * @param {boolean} [opts.dedupeBySource=true] - When an atom matches,
35
+ * drop its raw source memory from the results (atoms are already
36
+ * distillations of the source, so returning both is redundant).
28
37
  * @param {Function} [opts.logger] - Optional logger
29
38
  * @returns {Promise<Array>} Scored memory results
30
39
  */
@@ -107,7 +116,19 @@ export async function search(db, ai, query, opts = {}) {
107
116
  ${w.recency} * exp(
108
117
  -0.01 * EXTRACT(EPOCH FROM NOW() - COALESCE(mn.last_accessed, mn.created_at)) / 3600
109
118
  ) +
110
- ${w.frequency} * (ln(mn.access_count + 1) / ln(ma.val + 1))
119
+ ${w.frequency} * (ln(mn.access_count + 1) / ln(ma.val + 1)) +
120
+ ${w.atomBoost} * (CASE WHEN mn.source_id IS NOT NULL THEN 1 ELSE 0 END) -
121
+ ${w.verbosityPenalty} * (
122
+ CASE WHEN mn.source_id IS NULL THEN
123
+ LEAST(
124
+ GREATEST(
125
+ (ln(length(mn.content) + 1) - ln(200)) / (ln(10000) - ln(200)),
126
+ 0
127
+ ),
128
+ 1
129
+ )
130
+ ELSE 0 END
131
+ )
111
132
  ) AS final_score
112
133
  FROM memory_nodes mn
113
134
  CROSS JOIN max_ac ma
@@ -123,10 +144,21 @@ export async function search(db, ai, query, opts = {}) {
123
144
 
124
145
  const result = await db(sql, params);
125
146
 
126
- const filtered = (result.rows || []).filter(
147
+ let filtered = (result.rows || []).filter(
127
148
  (r) => parseFloat(r.final_score) >= threshold
128
149
  );
129
150
 
151
+ // De-dupe: when an atom matches, drop its raw source from the set.
152
+ // Default on; set opts.dedupeBySource: false to keep both.
153
+ if (opts.dedupeBySource !== false) {
154
+ const atomSources = new Set(
155
+ filtered.filter((r) => r.source_id).map((r) => r.source_id)
156
+ );
157
+ if (atomSources.size > 0) {
158
+ filtered = filtered.filter((r) => !atomSources.has(r.id));
159
+ }
160
+ }
161
+
130
162
  // Increment access counts
131
163
  const ids = filtered.map((r) => r.id);
132
164
  if (ids.length) {
@@ -182,6 +214,7 @@ function mapRow(row) {
182
214
  client_id: row.client_id,
183
215
  user_id: row.user_id || null,
184
216
  layer_id: row.layer_id,
217
+ source_id: row.source_id || null,
185
218
  content: row.content,
186
219
  metadata:
187
220
  typeof row.metadata === "string"
@@ -13,6 +13,9 @@
13
13
  * LLM_URL — OpenAI-compatible chat endpoint (required)
14
14
  * LLM_MODEL — Chat model name for HyDE (required)
15
15
  * API_KEY — API key for embedding/LLM endpoints (optional)
16
+ * EMBEDDING_PATH — Path appended to EMBEDDING_URL (default: "embeddings").
17
+ * Set to "embed" for the Pentatonic AI Gateway.
18
+ * CHAT_PATH — Path appended to LLM_URL (default: "chat/completions")
16
19
  * CLIENT_ID — Client ID for memory scoping (default: "default")
17
20
  * PORT — HTTP port for SSE transport (default: 3333)
18
21
  */
@@ -46,11 +49,13 @@ function createMemory() {
46
49
  url: process.env.EMBEDDING_URL,
47
50
  model: process.env.EMBEDDING_MODEL,
48
51
  apiKey: process.env.API_KEY,
52
+ embeddingPath: process.env.EMBEDDING_PATH,
49
53
  },
50
54
  llm: {
51
55
  url: process.env.LLM_URL,
52
56
  model: process.env.LLM_MODEL,
53
57
  apiKey: process.env.API_KEY,
58
+ chatPath: process.env.CHAT_PATH,
54
59
  },
55
60
  logger: (msg) => process.stderr.write(`[memory] ${msg}\n`),
56
61
  });
@@ -342,7 +347,7 @@ async function main() {
342
347
  const health = {
343
348
  status: "ok",
344
349
  client: CLIENT_ID,
345
- version: "0.5.1",
350
+ version: "0.5.3",
346
351
  search: "text",
347
352
  db: false,
348
353
  ollama: false,