@pentatonic-ai/ai-agent-sdk 0.5.5 → 0.5.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,6 +45,40 @@ export function createAIClient(config) {
45
45
  const chatPath = stripLeading(config.chatPath || "chat/completions");
46
46
  const baseUrl = stripTrailing(config.url);
47
47
 
48
+ /**
49
+ * Send an embedding request with N inputs. Shared by embed() and
50
+ * embedBatch(). Returns an array of { embedding, dimensions, model } or
51
+ * nulls (one per input, preserving order).
52
+ */
53
+ async function rawEmbed(texts, inputType) {
54
+ if (!texts.length) return [];
55
+ try {
56
+ const res = await fetch(`${baseUrl}/${embeddingPath}`, {
57
+ method: "POST",
58
+ headers,
59
+ body: JSON.stringify({
60
+ input: texts.map((t) => (t ?? "").substring(0, 8192)),
61
+ model: config.model,
62
+ input_type: inputType,
63
+ }),
64
+ signal: AbortSignal.timeout(30000),
65
+ });
66
+ if (!res.ok) return texts.map(() => null);
67
+ const data = await res.json();
68
+ // OpenAI-compat: data.data = [{embedding, index}, ...]
69
+ // Pentatonic gateway / Ollama: data.embeddings = [[...], [...], ...]
70
+ const vectors =
71
+ data.data?.map((d) => d.embedding) || data.embeddings || [];
72
+ return texts.map((_, i) => {
73
+ const embedding = vectors[i];
74
+ if (!embedding) return null;
75
+ return { embedding, dimensions: embedding.length, model: config.model };
76
+ });
77
+ } catch {
78
+ return texts.map(() => null);
79
+ }
80
+ }
81
+
48
82
  return {
49
83
  /**
50
84
  * Generate an embedding vector for text.
@@ -54,32 +88,25 @@ export function createAIClient(config) {
54
88
  * @returns {Promise<{embedding: number[], dimensions: number, model: string} | null>}
55
89
  */
56
90
  async embed(text, inputType = "passage") {
57
- try {
58
- const res = await fetch(`${baseUrl}/${embeddingPath}`, {
59
- method: "POST",
60
- headers,
61
- body: JSON.stringify({
62
- input: [text.substring(0, 8192)],
63
- model: config.model,
64
- input_type: inputType,
65
- }),
66
- signal: AbortSignal.timeout(30000),
67
- });
68
-
69
- if (!res.ok) return null;
70
-
71
- const data = await res.json();
72
- const embedding = data.data?.[0]?.embedding || data.embeddings?.[0];
73
- if (!embedding) return null;
91
+ const results = await rawEmbed([text], inputType);
92
+ return results[0];
93
+ },
74
94
 
75
- return {
76
- embedding,
77
- dimensions: embedding.length,
78
- model: config.model,
79
- };
80
- } catch {
81
- return null;
82
- }
95
+ /**
96
+ * Generate embeddings for N texts in a single HTTP round-trip. Returns
97
+ * an array the same length as the input; each entry is either the
98
+ * embedding object or null on failure.
99
+ *
100
+ * Batching matters under load — one call instead of N cuts GPU overhead
101
+ * and downstream queueing. Used by distill() to embed all atoms from a
102
+ * raw memory in one shot rather than N serial calls.
103
+ *
104
+ * @param {string[]} texts
105
+ * @param {string} [inputType="passage"]
106
+ * @returns {Promise<Array<{embedding: number[], dimensions: number, model: string} | null>>}
107
+ */
108
+ async embedBatch(texts, inputType = "passage") {
109
+ return rawEmbed(texts, inputType);
83
110
  },
84
111
 
85
112
  /**
@@ -104,8 +104,27 @@ export async function distill(db, ai, llm, sourceId, content, opts = {}) {
104
104
  }
105
105
  const layerId = layerResult.rows[0].id;
106
106
 
107
+ // Batch-embed all atoms in one HTTP call. Under load this is a big
108
+ // win over N serial embed calls — one GPU forward pass instead of N,
109
+ // less downstream queueing.
110
+ let embeddings;
111
+ if (ai.embedBatch) {
112
+ try {
113
+ embeddings = await ai.embedBatch(facts, "passage");
114
+ } catch (err) {
115
+ log(`distill: batch embed failed: ${err.message}`);
116
+ embeddings = facts.map(() => null);
117
+ }
118
+ } else {
119
+ // Older AI clients without embedBatch — fall through to per-atom embed
120
+ // inside the loop below. Kept for backwards compat with any custom
121
+ // client passed into createMemorySystem.
122
+ embeddings = null;
123
+ }
124
+
107
125
  const stored = [];
108
- for (const fact of facts) {
126
+ for (let i = 0; i < facts.length; i++) {
127
+ const fact = facts[i];
109
128
  try {
110
129
  const atomId = `mem_${crypto.randomUUID()}`;
111
130
 
@@ -124,9 +143,13 @@ export async function distill(db, ai, llm, sourceId, content, opts = {}) {
124
143
  ]
125
144
  );
126
145
 
127
- // Embed the atom (non-fatal)
146
+ // Attach embedding — from the batch when available, else fall back
147
+ // to a per-atom call.
128
148
  try {
129
- const embResult = await ai.embed(fact, "passage");
149
+ let embResult = embeddings ? embeddings[i] : null;
150
+ if (!embResult && !embeddings) {
151
+ embResult = await ai.embed(fact, "passage");
152
+ }
130
153
  if (embResult?.embedding) {
131
154
  await db(
132
155
  `UPDATE memory_nodes SET embedding = $1, updated_at = NOW() WHERE id = $2`,
@@ -137,7 +160,9 @@ export async function distill(db, ai, llm, sourceId, content, opts = {}) {
137
160
  log(`distill: embedding failed for ${atomId}: ${err.message}`);
138
161
  }
139
162
 
140
- // HyDE (2 queries for atoms — they're already focused)
163
+ // HyDE (2 queries for atoms — they're already focused).
164
+ // Still per-atom — chat completions don't share a batch surface
165
+ // across providers the way embeddings do.
141
166
  try {
142
167
  const queries = await generateHypotheticalQueries(llm, fact);
143
168
  const trimmed = queries.slice(0, 2);
@@ -0,0 +1,372 @@
1
+ /**
2
+ * Hosted-mode helpers for the Pentatonic memory system.
3
+ *
4
+ * These talk to a remote TES tenant over HTTPS using GraphQL, with a
5
+ * `tes_<clientId>_<rand>` bearer token in the Authorization header.
6
+ * They are deliberately thin wrappers around the GraphQL surface so
7
+ * any caller (the OpenClaw plugin, the LLM proxy worker, a custom
8
+ * integration) gets the same wire shape, the same error handling, and
9
+ * the same operational patterns.
10
+ *
11
+ * No `pg`, no Node-only APIs — Workers-compatible. Pure `fetch`.
12
+ *
13
+ * @example
14
+ * import { hostedSearch, hostedEmitChatTurn } from
15
+ * "@pentatonic-ai/ai-agent-sdk/memory/hosted";
16
+ *
17
+ * const config = {
18
+ * endpoint: "https://acme.api.pentatonic.com",
19
+ * clientId: "acme",
20
+ * apiKey: "tes_acme_xxxxx",
21
+ * };
22
+ *
23
+ * const { memories } = await hostedSearch(config, "What's my name?", {
24
+ * limit: 6, minScore: 0.55, timeoutMs: 800,
25
+ * });
26
+ *
27
+ * await hostedEmitChatTurn(config, {
28
+ * userMessage: "Hi",
29
+ * assistantResponse: "Hello!",
30
+ * model: "gpt-4o-mini",
31
+ * }, { source: "my-product" });
32
+ */
33
+
34
+ const SEMANTIC_SEARCH_QUERY = `
35
+ query SemanticSearchMemories($clientId: String!, $query: String!, $limit: Int, $minScore: Float) {
36
+ semanticSearchMemories(clientId: $clientId, query: $query, limit: $limit, minScore: $minScore) {
37
+ id
38
+ content
39
+ similarity
40
+ }
41
+ }
42
+ `;
43
+
44
+ const CREATE_MODULE_EVENT_MUTATION = `
45
+ mutation CreateModuleEvent($moduleId: String!, $input: ModuleEventInput!) {
46
+ createModuleEvent(moduleId: $moduleId, input: $input) { success eventId }
47
+ }
48
+ `;
49
+
50
+ const DEFAULT_SEARCH_TIMEOUT_MS = 5000;
51
+ const DEFAULT_EMIT_TIMEOUT_MS = 10000;
52
+ const DEFAULT_SEARCH_LIMIT = 6;
53
+ const DEFAULT_SEARCH_MIN_SCORE = 0.55;
54
+
55
+ /**
56
+ * Normalise a config object — accepts both modern (`endpoint/clientId/apiKey`)
57
+ * and legacy openclaw-style (`tes_endpoint/tes_client_id/tes_api_key`) keys.
58
+ *
59
+ * @param {object} config
60
+ * @returns {{endpoint: string, clientId: string, apiKey: string}}
61
+ */
62
+ function normalizeConfig(config) {
63
+ if (!config) throw new Error("hosted: config is required");
64
+ const endpoint = config.endpoint || config.tes_endpoint;
65
+ const clientId = config.clientId || config.tes_client_id;
66
+ const apiKey = config.apiKey || config.tes_api_key;
67
+ if (!endpoint || !clientId || !apiKey) {
68
+ throw new Error(
69
+ "hosted: config requires { endpoint, clientId, apiKey } (or legacy tes_* equivalents)"
70
+ );
71
+ }
72
+ return { endpoint, clientId, apiKey };
73
+ }
74
+
75
+ /**
76
+ * Build the request headers TES expects for hosted-mode calls.
77
+ * Bearer auth if the apiKey starts with `tes_`; otherwise treated as a
78
+ * service key (for internal callers).
79
+ */
80
+ export function buildHostedHeaders(config) {
81
+ const { clientId, apiKey } = normalizeConfig(config);
82
+ const headers = {
83
+ "Content-Type": "application/json",
84
+ "x-client-id": clientId,
85
+ };
86
+ if (apiKey.startsWith("tes_")) {
87
+ headers["Authorization"] = `Bearer ${apiKey}`;
88
+ } else {
89
+ headers["x-service-key"] = apiKey;
90
+ }
91
+ return headers;
92
+ }
93
+
94
+ /**
95
+ * Run a semantic memory search against a remote TES tenant.
96
+ *
97
+ * @param {object} config — { endpoint, clientId, apiKey }
98
+ * @param {string} query — natural-language query
99
+ * @param {object} [opts]
100
+ * @param {number} [opts.limit=6]
101
+ * @param {number} [opts.minScore=0.55]
102
+ * @param {number} [opts.timeoutMs=5000]
103
+ * @returns {Promise<{
104
+ * memories: Array<{id: string, content: string, similarity: number}>,
105
+ * skipped?: string,
106
+ * }>}
107
+ *
108
+ * Failure mode: any error returns `{ memories: [], skipped: <reason> }`.
109
+ * Callers (e.g. the LLM proxy) inspect `skipped` to set `X-TES-Skipped`
110
+ * on their response, then forward unmodified. We never throw — the
111
+ * fail-soft contract means a hosted-search call never breaks the
112
+ * caller's primary user-facing flow.
113
+ */
114
+ export async function hostedSearch(config, query, opts = {}) {
115
+ if (!query) return { memories: [], skipped: "no_query" };
116
+
117
+ let cfg;
118
+ try {
119
+ cfg = normalizeConfig(config);
120
+ } catch (err) {
121
+ return { memories: [], skipped: `config_error:${err.message}` };
122
+ }
123
+
124
+ const limit = opts.limit ?? DEFAULT_SEARCH_LIMIT;
125
+ const minScore = opts.minScore ?? DEFAULT_SEARCH_MIN_SCORE;
126
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_SEARCH_TIMEOUT_MS;
127
+
128
+ const controller = new AbortController();
129
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
130
+
131
+ let response;
132
+ try {
133
+ response = await fetch(`${cfg.endpoint}/api/graphql`, {
134
+ method: "POST",
135
+ headers: buildHostedHeaders(cfg),
136
+ body: JSON.stringify({
137
+ query: SEMANTIC_SEARCH_QUERY,
138
+ variables: { clientId: cfg.clientId, query, limit, minScore },
139
+ }),
140
+ signal: controller.signal,
141
+ });
142
+ } catch (err) {
143
+ clearTimeout(timer);
144
+ return {
145
+ memories: [],
146
+ skipped: err.name === "AbortError" ? "tes_timeout" : "tes_unreachable",
147
+ };
148
+ }
149
+ clearTimeout(timer);
150
+
151
+ if (!response.ok) {
152
+ return { memories: [], skipped: `tes_http_${response.status}` };
153
+ }
154
+
155
+ let payload;
156
+ try {
157
+ payload = await response.json();
158
+ } catch {
159
+ return { memories: [], skipped: "tes_invalid_json" };
160
+ }
161
+
162
+ if (payload.errors?.length) {
163
+ const reason = payload.errors[0].message || "tes_graphql_error";
164
+ return { memories: [], skipped: `tes_graphql:${shortenReason(reason)}` };
165
+ }
166
+
167
+ return { memories: payload.data?.semanticSearchMemories || [] };
168
+ }
169
+
170
+ /**
171
+ * Emit a CHAT_TURN event to the conversation-analytics module of a
172
+ * remote TES tenant. The deep-memory consumer also subscribes to
173
+ * CHAT_TURN, so a single emit lands in both pipelines via consumer
174
+ * fan-out at the queue layer.
175
+ *
176
+ * @param {object} config — { endpoint, clientId, apiKey }
177
+ * @param {object} payload
178
+ * @param {string} [payload.userMessage]
179
+ * @param {string} [payload.assistantResponse]
180
+ * @param {string} [payload.model]
181
+ * @param {object} [payload.usage]
182
+ * @param {Array} [payload.toolCalls]
183
+ * @param {number} [payload.turnNumber]
184
+ * @param {string} [payload.systemPrompt]
185
+ * @param {string} [payload.sessionId]
186
+ * @param {string} [payload.userId]
187
+ * @param {object} [payload.extra] — additional attributes merged onto the event
188
+ * @param {object} [opts]
189
+ * @param {string} [opts.source="tes-sdk"] — attribution string written into attributes.source
190
+ * @param {number} [opts.timeoutMs=10000]
191
+ * @returns {Promise<{ ok: boolean, eventId?: string, skipped?: string }>}
192
+ */
193
+ export async function hostedEmitChatTurn(config, payload, opts = {}) {
194
+ if (!payload) return { ok: false, skipped: "no_payload" };
195
+ if (!payload.userMessage && !payload.assistantResponse) {
196
+ return { ok: false, skipped: "empty_turn" };
197
+ }
198
+
199
+ let cfg;
200
+ try {
201
+ cfg = normalizeConfig(config);
202
+ } catch (err) {
203
+ return { ok: false, skipped: `config_error:${err.message}` };
204
+ }
205
+
206
+ const source = opts.source || "tes-sdk";
207
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_EMIT_TIMEOUT_MS;
208
+
209
+ const attributes = { source };
210
+ if (payload.userMessage !== undefined)
211
+ attributes.user_message = payload.userMessage;
212
+ if (payload.assistantResponse !== undefined)
213
+ attributes.assistant_response = payload.assistantResponse;
214
+ if (payload.model) attributes.model = payload.model;
215
+ if (payload.usage) attributes.usage = payload.usage;
216
+ if (payload.toolCalls?.length) attributes.tool_calls = payload.toolCalls;
217
+ if (payload.turnNumber !== undefined)
218
+ attributes.turn_number = payload.turnNumber;
219
+ if (payload.systemPrompt) attributes.system_prompt = payload.systemPrompt;
220
+ if (payload.userId) attributes.user_id = payload.userId;
221
+ if (payload.extra && typeof payload.extra === "object") {
222
+ Object.assign(attributes, payload.extra);
223
+ }
224
+
225
+ const data = { attributes };
226
+ if (payload.sessionId) data.entity_id = payload.sessionId;
227
+
228
+ const input = { eventType: "CHAT_TURN", data };
229
+
230
+ const controller = new AbortController();
231
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
232
+
233
+ let response;
234
+ try {
235
+ response = await fetch(`${cfg.endpoint}/api/graphql`, {
236
+ method: "POST",
237
+ headers: buildHostedHeaders(cfg),
238
+ body: JSON.stringify({
239
+ query: CREATE_MODULE_EVENT_MUTATION,
240
+ variables: { moduleId: "conversation-analytics", input },
241
+ }),
242
+ signal: controller.signal,
243
+ });
244
+ } catch (err) {
245
+ clearTimeout(timer);
246
+ return {
247
+ ok: false,
248
+ skipped: err.name === "AbortError" ? "tes_timeout" : "tes_unreachable",
249
+ };
250
+ }
251
+ clearTimeout(timer);
252
+
253
+ if (!response.ok) {
254
+ return { ok: false, skipped: `tes_http_${response.status}` };
255
+ }
256
+
257
+ let body;
258
+ try {
259
+ body = await response.json();
260
+ } catch {
261
+ return { ok: false, skipped: "tes_invalid_json" };
262
+ }
263
+
264
+ if (body.errors?.length) {
265
+ return {
266
+ ok: false,
267
+ skipped: `tes_graphql:${shortenReason(body.errors[0].message)}`,
268
+ };
269
+ }
270
+
271
+ return {
272
+ ok: !!body.data?.createModuleEvent?.success,
273
+ eventId: body.data?.createModuleEvent?.eventId,
274
+ };
275
+ }
276
+
277
+ /**
278
+ * Emit a STORE_MEMORY event against the deep-memory module. Used by the
279
+ * OpenClaw plugin for explicit memory-write tools.
280
+ *
281
+ * @param {object} config
282
+ * @param {string} content
283
+ * @param {object} [metadata]
284
+ * @param {object} [opts]
285
+ * @param {string} [opts.source="tes-sdk"]
286
+ * @param {number} [opts.timeoutMs=10000]
287
+ * @returns {Promise<{ ok: boolean, eventId?: string, skipped?: string }>}
288
+ */
289
+ export async function hostedStoreMemory(
290
+ config,
291
+ content,
292
+ metadata = {},
293
+ opts = {}
294
+ ) {
295
+ if (!content) return { ok: false, skipped: "no_content" };
296
+
297
+ let cfg;
298
+ try {
299
+ cfg = normalizeConfig(config);
300
+ } catch (err) {
301
+ return { ok: false, skipped: `config_error:${err.message}` };
302
+ }
303
+
304
+ const source = opts.source || "tes-sdk";
305
+ const timeoutMs = opts.timeoutMs ?? DEFAULT_EMIT_TIMEOUT_MS;
306
+
307
+ const data = {
308
+ entity_id: metadata.session_id || metadata.sessionId || source,
309
+ attributes: {
310
+ ...metadata,
311
+ content,
312
+ source,
313
+ },
314
+ };
315
+
316
+ const controller = new AbortController();
317
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
318
+
319
+ let response;
320
+ try {
321
+ response = await fetch(`${cfg.endpoint}/api/graphql`, {
322
+ method: "POST",
323
+ headers: buildHostedHeaders(cfg),
324
+ body: JSON.stringify({
325
+ query: CREATE_MODULE_EVENT_MUTATION,
326
+ variables: {
327
+ moduleId: "deep-memory",
328
+ input: { eventType: "STORE_MEMORY", data },
329
+ },
330
+ }),
331
+ signal: controller.signal,
332
+ });
333
+ } catch (err) {
334
+ clearTimeout(timer);
335
+ return {
336
+ ok: false,
337
+ skipped: err.name === "AbortError" ? "tes_timeout" : "tes_unreachable",
338
+ };
339
+ }
340
+ clearTimeout(timer);
341
+
342
+ if (!response.ok) {
343
+ return { ok: false, skipped: `tes_http_${response.status}` };
344
+ }
345
+
346
+ let body;
347
+ try {
348
+ body = await response.json();
349
+ } catch {
350
+ return { ok: false, skipped: "tes_invalid_json" };
351
+ }
352
+
353
+ if (body.errors?.length) {
354
+ return {
355
+ ok: false,
356
+ skipped: `tes_graphql:${shortenReason(body.errors[0].message)}`,
357
+ };
358
+ }
359
+
360
+ return {
361
+ ok: !!body.data?.createModuleEvent?.success,
362
+ eventId: body.data?.createModuleEvent?.eventId,
363
+ };
364
+ }
365
+
366
+ function shortenReason(msg) {
367
+ if (typeof msg !== "string") return "unknown";
368
+ return msg
369
+ .toLowerCase()
370
+ .replace(/[^a-z0-9]+/g, "_")
371
+ .slice(0, 60);
372
+ }