agentid-sdk 0.1.17 → 0.1.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,65 +1,197 @@
1
- # 🛡️ AgentID: Enterprise AI Governance & PII Shield
1
+ # agentid-sdk (Node.js / TypeScript)
2
2
 
3
- AgentID je inteligentní brána (gateway) pro vaše LLM aplikace. Monitoruje náklady, filtruje PII (osobní údaje) v reálném čase a zajišťuje soulad s GDPR.
3
+ [![npm version](https://img.shields.io/npm/v/agentid-sdk.svg)](https://www.npmjs.com/package/agentid-sdk)
4
+ [![Node](https://img.shields.io/node/v/agentid-sdk.svg)](https://www.npmjs.com/package/agentid-sdk)
5
+ [![Node >=18](https://img.shields.io/badge/node-%3E%3D18-339933.svg)](https://nodejs.org/)
6
+ [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
4
7
 
5
- ## 🚀 Rychlý start (60 sekund)
8
+ ## 1. Introduction
6
9
 
7
- ### 1. Instalace
10
+ `agentid-sdk` is the official Node.js/TypeScript SDK for AgentID, an AI security and compliance System of Record. It allows you to gate LLM traffic through guard checks, enforce policy before execution, and capture durable telemetry for audit and governance workflows.
8
11
 
9
- ```bash
10
- # Pro Python projekty
11
- pip install agentid-sdk
12
+ ## 2. Installation
12
13
 
13
- # Pro JavaScript/TypeScript projekty
14
+ ```bash
14
15
  npm install agentid-sdk
15
16
  ```
16
17
 
17
- ### 2. Integrace (Python)
18
+ ## 3. Prerequisites
19
+
20
+ 1. Create an account at `https://app.getagentid.com`.
21
+ 2. Create an AI system and copy:
22
+ - `AGENTID_API_KEY` (for example `sk_live_...`)
23
+ - `AGENTID_SYSTEM_ID` (UUID)
24
+ 3. If using OpenAI/LangChain, set:
25
+ - `OPENAI_API_KEY`
26
+
27
+ ```bash
28
+ export AGENTID_API_KEY="sk_live_..."
29
+ export AGENTID_SYSTEM_ID="00000000-0000-0000-0000-000000000000"
30
+ export OPENAI_API_KEY="sk-proj-..."
31
+ ```
18
32
 
19
- Stačí zabalit vašeho stávajícího OpenAI klienta.
33
+ ## 4. Quickstart
20
34
 
21
- ```python
22
- from openai import OpenAI
23
- from agentid import AgentID
35
+ ```ts
36
+ import { AgentID } from "agentid-sdk";
24
37
 
25
- client = OpenAI(api_key="your_openai_key")
26
- agent = AgentID(api_key="ag_prod_...")
38
+ const agent = new AgentID(); // auto-loads AGENTID_API_KEY
39
+ const systemId = process.env.AGENTID_SYSTEM_ID!;
27
40
 
28
- # Automaticky filtruje PII a loguje metriky
29
- response = agent.wrap_openai(client, system_id="sys_...").chat.completions.create(
30
- model="gpt-4",
31
- messages=[{"role": "user", "content": "Můj email je jan.novak@firma.cz"}]
32
- )
41
+ const verdict = await agent.guard({
42
+ system_id: systemId,
43
+ input: "Summarize this ticket in one sentence.",
44
+ model: "gpt-4o-mini",
45
+ user_id: "quickstart-user",
46
+ });
47
+ if (!verdict.allowed) throw new Error(`Blocked: ${verdict.reason}`);
48
+
49
+ await agent.log({
50
+ system_id: systemId,
51
+ event_id: verdict.client_event_id,
52
+ model: "gpt-4o-mini",
53
+ input: "Summarize this ticket in one sentence.",
54
+ output: "Summary generated.",
55
+ metadata: { agent_role: "support-assistant" },
56
+ });
33
57
  ```
34
58
 
35
- ### 3. Integrace (Node.js)
59
+ ## 5. Core Integrations
60
+
61
+ ### OpenAI Wrapper
62
+
63
+ ```bash
64
+ npm install agentid-sdk openai
65
+ ```
36
66
 
37
67
  ```ts
38
- import { AgentID } from "agentid-sdk";
39
68
  import OpenAI from "openai";
69
+ import { AgentID } from "agentid-sdk";
40
70
 
41
- const openai = new OpenAI();
42
71
  const agent = new AgentID({
43
- apiKey: "ag_prod_...",
44
- strictMode: false, // default (fail-open on timeout/unreachable AgentID API)
45
- guardTimeoutMs: 6000, // optional: timeout for AgentID /guard call in ms
46
- // strictMode: true, // fail-closed for high-risk workloads
72
+ piiMasking: true,
73
+ });
74
+
75
+ const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY! });
76
+ const secured = agent.wrapOpenAI(openai, {
77
+ system_id: process.env.AGENTID_SYSTEM_ID!,
78
+ user_id: "customer-123",
79
+ });
80
+
81
+ const response = await secured.chat.completions.create({
82
+ model: "gpt-4o-mini",
83
+ messages: [{ role: "user", content: "What is the capital of the Czech Republic?" }],
84
+ });
85
+
86
+ console.log(response.choices[0]?.message?.content ?? "");
87
+ ```
88
+
89
+ ### LangChain Integration
90
+
91
+ ```bash
92
+ npm install agentid-sdk openai @langchain/core @langchain/openai
93
+ ```
94
+
95
+ ```ts
96
+ import { AgentID } from "agentid-sdk";
97
+ import { AgentIDCallbackHandler } from "agentid-sdk/langchain";
98
+ import { ChatOpenAI } from "@langchain/openai";
99
+ import { ChatPromptTemplate } from "@langchain/core/prompts";
100
+ import { StringOutputParser } from "@langchain/core/output_parsers";
101
+
102
+ const agent = new AgentID();
103
+ const handler = new AgentIDCallbackHandler(agent, {
104
+ system_id: process.env.AGENTID_SYSTEM_ID!,
47
105
  });
48
106
 
49
- const proxiedOpenAI = agent.wrapOpenAI(openai, {
50
- system_id: "sys_...",
51
- user_id: "system-auto-summary", // optional service/user identity
107
+ const prompt = ChatPromptTemplate.fromTemplate("Answer in one sentence: {question}");
108
+ const model = new ChatOpenAI({
109
+ apiKey: process.env.OPENAI_API_KEY!,
110
+ model: "gpt-4o-mini",
52
111
  });
112
+ const chain = prompt.pipe(model).pipe(new StringOutputParser());
113
+
114
+ const result = await chain.invoke(
115
+ { question: "What is the capital of the Czech Republic?" },
116
+ { callbacks: [handler] }
117
+ );
118
+ console.log(result);
53
119
  ```
54
120
 
55
- ## 🔒 Klíčové vlastnosti
121
+ ### Raw Ingest API (Telemetry Only)
122
+
123
+ ```ts
124
+ import { AgentID } from "agentid-sdk";
125
+
126
+ const agent = new AgentID();
127
+
128
+ await agent.log({
129
+ system_id: process.env.AGENTID_SYSTEM_ID!,
130
+ event_type: "complete",
131
+ severity: "info",
132
+ model: "gpt-4o-mini",
133
+ input: "Raw telemetry prompt",
134
+ output: '{"ok": true}',
135
+ metadata: { agent_role: "batch-worker", channel: "manual_ingest" },
136
+ });
137
+ ```
138
+
139
+ ## 6. Advanced Configuration
140
+
141
+ ### Custom identity / role metadata
142
+
143
+ ```ts
144
+ await agent.guard({
145
+ system_id: process.env.AGENTID_SYSTEM_ID!,
146
+ input: "Process user request",
147
+ user_id: "service:billing-agent",
148
+ model: "gpt-4o-mini",
149
+ });
150
+
151
+ await agent.log({
152
+ system_id: process.env.AGENTID_SYSTEM_ID!,
153
+ model: "gpt-4o-mini",
154
+ input: "Process user request",
155
+ output: "Done",
156
+ metadata: { agent_role: "billing-agent", environment: "prod" },
157
+ });
158
+ ```
159
+
160
+ ### Strict mode and timeout tuning
161
+
162
+ ```ts
163
+ const agent = new AgentID({
164
+ strictMode: true, // fail-closed on guard connectivity/timeouts
165
+ guardTimeoutMs: 10000, // default guard timeout is 10000ms
166
+ });
167
+ ```
168
+
169
+ ### Error handling behavior
170
+
171
+ - `guard()` returns a verdict (`allowed`, `reason`); handle deny paths explicitly.
172
+ - `wrapOpenAI()` throws `Error("AgentID: Security Blocked (...)")` when blocked.
173
+ - Default mode is fail-open for connectivity/timeouts (`timeout_fallback`, `guard_unreachable`, `system_failure_fail_open`).
174
+ - If `strictMode` is not explicitly set in SDK code, runtime behavior follows the system configuration from AgentID (`strict_security_mode` / `failure_mode`).
175
+ - Set `strictMode: true` to fail-closed (`network_error_strict_mode` / `server_error`) in high-sensitivity environments.
176
+ - Ingest retries transient failures (5xx/429) and logs warnings if persistence fails.
177
+
178
+ ## 7. Security & Compliance
179
+
180
+ - Optional local PII masking and local policy enforcement before model dispatch.
181
+ - Prompt-injection scanning in the SDK request path.
182
+ - Guard checks run pre-execution; ingest telemetry captures prompt/output lifecycle.
183
+ - Safe for server and serverless runtimes (including async completion flows).
184
+ - Supports compliance and forensics workflows with durable event records.
185
+
186
+ ## 8. Support
187
+
188
+ - Dashboard: `https://app.getagentid.com`
189
+ - Repository: `https://github.com/ondrejsukac-rgb/agentid/tree/main/js-sdk`
190
+ - Issues: `https://github.com/ondrejsukac-rgb/agentid/issues`
56
191
 
57
- - PII Scrubbing: Automatická redakce e-mailů, rodných čísel a hesel před odesláním do cloudu.
58
- - Crypto-Shredding: Možnost nenávratně smazat citlivá data z logů na žádost uživatele (GDPR).
59
- - Fail-Safe architektura: Inteligentní přepínání mezi bezpečností a dostupností (Fail-Open/Closed).
60
- - Strict mode: při timeoutu Guard API můžeš vynutit fail-closed (`strictMode: true`).
192
+ ## 9. Publishing Notes (NPM)
61
193
 
62
- ## Guard transformed input
194
+ NPM automatically renders `README.md` from the package root during `npm publish`.
63
195
 
64
- If `/guard` returns `transformed_input`, the OpenAI and LangChain wrappers automatically
65
- forward that transformed value to the downstream LLM call.
196
+ - File location: next to `package.json` in `js-sdk/`.
197
+ - No additional NPM config is required for README rendering.
@@ -0,0 +1,424 @@
1
+ // src/langchain.ts
2
+ import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
3
+ function safeString(val) {
4
+ return typeof val === "string" ? val : "";
5
+ }
6
+ function callbackDebugEnabled() {
7
+ try {
8
+ return typeof process !== "undefined" && process?.env?.AGENTID_DEBUG_CALLBACK === "1";
9
+ } catch {
10
+ return false;
11
+ }
12
+ }
13
+ function logCallbackDebug(message, details) {
14
+ if (!callbackDebugEnabled()) return;
15
+ if (details) {
16
+ console.log(`[AgentID][LC] ${message}`, details);
17
+ return;
18
+ }
19
+ console.log(`[AgentID][LC] ${message}`);
20
+ }
21
+ function extractTextFromContent(content) {
22
+ if (typeof content === "string") {
23
+ return content;
24
+ }
25
+ if (Array.isArray(content)) {
26
+ const parts = content.map((item) => {
27
+ if (typeof item === "string") return item;
28
+ if (!item || typeof item !== "object") return "";
29
+ const record = item;
30
+ if (typeof record.text === "string") return record.text;
31
+ if (typeof record.content === "string") return record.content;
32
+ return "";
33
+ }).filter((part) => part.length > 0);
34
+ return parts.join("\n");
35
+ }
36
+ if (content && typeof content === "object") {
37
+ const record = content;
38
+ if (typeof record.text === "string") return record.text;
39
+ if (typeof record.content === "string") return record.content;
40
+ }
41
+ return "";
42
+ }
43
+ function getMessageRole(msg) {
44
+ if (!msg || typeof msg !== "object") return null;
45
+ const typed = msg;
46
+ if (typeof typed.role === "string") return typed.role;
47
+ if (typeof typed.type === "string") return typed.type;
48
+ if (typeof typed._getType === "function") {
49
+ try {
50
+ const role = typed._getType();
51
+ if (typeof role === "string") return role;
52
+ } catch {
53
+ }
54
+ }
55
+ if (typeof typed.getType === "function") {
56
+ try {
57
+ const role = typed.getType();
58
+ if (typeof role === "string") return role;
59
+ } catch {
60
+ }
61
+ }
62
+ return null;
63
+ }
64
+ function extractPromptFromPrompts(prompts) {
65
+ if (Array.isArray(prompts) && prompts.length > 0) {
66
+ return safeString(prompts[prompts.length - 1]);
67
+ }
68
+ return "";
69
+ }
70
+ function extractPromptFromMessages(messages) {
71
+ const flat = [];
72
+ if (Array.isArray(messages)) {
73
+ for (const item of messages) {
74
+ if (Array.isArray(item)) {
75
+ flat.push(...item);
76
+ } else {
77
+ flat.push(item);
78
+ }
79
+ }
80
+ }
81
+ let last = null;
82
+ for (const msg of flat) {
83
+ const typed = msg;
84
+ const role = getMessageRole(msg);
85
+ if (role === "user" || role === "human") {
86
+ last = typed;
87
+ }
88
+ }
89
+ if (!last || typeof last !== "object") {
90
+ return "";
91
+ }
92
+ const typedLast = last;
93
+ return extractTextFromContent(typedLast.content ?? typedLast.text);
94
+ }
95
+ function setPromptInPrompts(prompts, sanitizedInput) {
96
+ if (!Array.isArray(prompts) || prompts.length === 0) {
97
+ return false;
98
+ }
99
+ prompts[prompts.length - 1] = sanitizedInput;
100
+ return true;
101
+ }
102
+ function setPromptInMessages(messages, sanitizedInput) {
103
+ if (!Array.isArray(messages)) {
104
+ return false;
105
+ }
106
+ const flat = [];
107
+ for (const item of messages) {
108
+ if (Array.isArray(item)) {
109
+ flat.push(...item);
110
+ } else {
111
+ flat.push(item);
112
+ }
113
+ }
114
+ for (let i = flat.length - 1; i >= 0; i -= 1) {
115
+ const candidate = flat[i];
116
+ if (!candidate || typeof candidate !== "object") {
117
+ continue;
118
+ }
119
+ const typed = candidate;
120
+ const role = typed.role ?? typed.type;
121
+ if (role !== "user" && role !== "human") {
122
+ continue;
123
+ }
124
+ if ("content" in typed) {
125
+ typed.content = sanitizedInput;
126
+ return true;
127
+ }
128
+ if ("text" in typed) {
129
+ typed.text = sanitizedInput;
130
+ return true;
131
+ }
132
+ typed.content = sanitizedInput;
133
+ return true;
134
+ }
135
+ return false;
136
+ }
137
+ function extractModel(serialized, kwargs) {
138
+ const kw = (kwargs && typeof kwargs === "object" ? kwargs : null) ?? null;
139
+ const directModel = kw?.model ?? kw?.model_name ?? kw?.modelName;
140
+ if (typeof directModel === "string" && directModel) return directModel;
141
+ const invocationModel = kw?.invocation_params?.model ?? kw?.invocation_params?.model_name ?? kw?.invocation_params?.modelName;
142
+ if (typeof invocationModel === "string" && invocationModel) return invocationModel;
143
+ const nestedModel = kw?.options?.model ?? kw?.options?.model_name ?? kw?.options?.modelName ?? kw?.kwargs?.model ?? kw?.kwargs?.model_name ?? kw?.kwargs?.modelName;
144
+ if (typeof nestedModel === "string" && nestedModel) return nestedModel;
145
+ const ser = (serialized && typeof serialized === "object" ? serialized : null) ?? null;
146
+ const serKw = (ser?.kwargs && typeof ser.kwargs === "object" ? ser.kwargs : null) ?? null;
147
+ const serModel = serKw?.model ?? serKw?.model_name ?? serKw?.modelName;
148
+ if (typeof serModel === "string" && serModel) return serModel;
149
+ const name = ser?.name ?? ser?.id;
150
+ if (typeof name === "string" && name) return name;
151
+ return void 0;
152
+ }
153
+ function extractModelFromOutput(output) {
154
+ const llmOutput = output?.llmOutput ?? output?.llm_output;
155
+ const llmModel = llmOutput?.model ?? llmOutput?.model_name ?? llmOutput?.modelName;
156
+ if (typeof llmModel === "string" && llmModel) return llmModel;
157
+ const first = output?.generations?.[0]?.[0];
158
+ const responseMeta = first?.message?.response_metadata ?? first?.message?.responseMetadata;
159
+ const responseModel = responseMeta?.model_name ?? responseMeta?.model ?? responseMeta?.modelName;
160
+ if (typeof responseModel === "string" && responseModel) return responseModel;
161
+ const generationInfo = first?.generation_info ?? first?.generationInfo;
162
+ const genModel = generationInfo?.model_name ?? generationInfo?.model ?? generationInfo?.modelName;
163
+ if (typeof genModel === "string" && genModel) return genModel;
164
+ return void 0;
165
+ }
166
+ function extractOutputText(output) {
167
+ const gens = output?.generations;
168
+ const first = gens?.[0]?.[0];
169
+ const text = first?.text ?? first?.message?.content;
170
+ return typeof text === "string" ? text : "";
171
+ }
172
+ function extractTokenUsage(output) {
173
+ const llmOutput = output?.llmOutput ?? output?.llm_output;
174
+ const usage = llmOutput?.tokenUsage ?? llmOutput?.token_usage ?? llmOutput?.usage ?? void 0;
175
+ if (usage && typeof usage === "object") {
176
+ return usage;
177
+ }
178
+ const first = output?.generations?.[0]?.[0];
179
+ const usageMetadata = first?.message?.usage_metadata;
180
+ if (usageMetadata && typeof usageMetadata === "object") {
181
+ return usageMetadata;
182
+ }
183
+ const responseTokenUsage = first?.message?.response_metadata?.token_usage ?? first?.message?.response_metadata?.tokenUsage ?? void 0;
184
+ if (responseTokenUsage && typeof responseTokenUsage === "object") {
185
+ return responseTokenUsage;
186
+ }
187
+ const generationTokenUsage = first?.generation_info?.token_usage ?? first?.generation_info?.tokenUsage ?? void 0;
188
+ if (generationTokenUsage && typeof generationTokenUsage === "object") {
189
+ return generationTokenUsage;
190
+ }
191
+ return void 0;
192
+ }
193
+ function isUuidLike(value) {
194
+ return typeof value === "string" && /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(value.trim());
195
+ }
196
+ function createClientEventId() {
197
+ if (typeof crypto !== "undefined" && typeof crypto.randomUUID === "function") {
198
+ return crypto.randomUUID();
199
+ }
200
+ const segment = () => Math.floor((1 + Math.random()) * 65536).toString(16).slice(1);
201
+ return `${segment()}${segment()}-${segment()}-4${segment().slice(1)}-a${segment().slice(1)}-${segment()}${segment()}${segment()}`;
202
+ }
203
+ function readBooleanField(value) {
204
+ return typeof value === "boolean" ? value : null;
205
+ }
206
+ function extractStreamFlag(serialized, extraParams) {
207
+ const extras = extraParams && typeof extraParams === "object" ? extraParams : null;
208
+ const direct = readBooleanField(extras?.stream) ?? readBooleanField(extras?.streaming);
209
+ if (direct !== null) {
210
+ return direct;
211
+ }
212
+ const invocation = extras?.invocation_params && typeof extras.invocation_params === "object" ? extras.invocation_params : null;
213
+ const invocationStream = readBooleanField(invocation?.stream) ?? readBooleanField(invocation?.streaming);
214
+ if (invocationStream !== null) {
215
+ return invocationStream;
216
+ }
217
+ const serializedRecord = serialized && typeof serialized === "object" ? serialized : null;
218
+ const kwargs = serializedRecord?.kwargs && typeof serializedRecord.kwargs === "object" ? serializedRecord.kwargs : null;
219
+ return readBooleanField(kwargs?.stream) ?? readBooleanField(kwargs?.streaming) ?? false;
220
+ }
221
+ var AgentIDCallbackHandler = class extends BaseCallbackHandler {
222
+ constructor(agent, options) {
223
+ super();
224
+ this.name = "agentid_callback_handler";
225
+ this.runs = /* @__PURE__ */ new Map();
226
+ this.agent = agent;
227
+ this.systemId = options.system_id;
228
+ this.apiKeyOverride = options.apiKey?.trim() || options.api_key?.trim() || void 0;
229
+ }
230
+ get requestOptions() {
231
+ return this.apiKeyOverride ? { apiKey: this.apiKeyOverride } : void 0;
232
+ }
233
+ getLangchainCapabilities() {
234
+ const piiMaskingEnabled = Boolean(
235
+ this.agent.piiMasking
236
+ );
237
+ return {
238
+ capabilities: {
239
+ has_feedback_handler: true,
240
+ pii_masking_enabled: piiMaskingEnabled,
241
+ framework: "langchain"
242
+ }
243
+ };
244
+ }
245
+ async preflight(input, stream) {
246
+ await this.agent.scanPromptInjection(input, this.requestOptions);
247
+ const prepared = await this.agent.prepareInputForDispatch({
248
+ input,
249
+ systemId: this.systemId,
250
+ stream,
251
+ skipInjectionScan: true
252
+ }, this.requestOptions);
253
+ return prepared.sanitizedInput;
254
+ }
255
+ async handleLLMStart(serialized, prompts, runId, _parentRunId, extraParams) {
256
+ const input = extractPromptFromPrompts(prompts);
257
+ const id = String(runId ?? "");
258
+ logCallbackDebug("handleLLMStart", { runId: id, hasInput: input.length > 0 });
259
+ if (!input) {
260
+ return;
261
+ }
262
+ const stream = extractStreamFlag(serialized, extraParams);
263
+ const sanitizedInput = await this.preflight(input, stream);
264
+ if (sanitizedInput !== input) {
265
+ const mutated = setPromptInPrompts(prompts, sanitizedInput);
266
+ if (!mutated) {
267
+ throw new Error(
268
+ "AgentID: Strict PII mode requires mutable LangChain prompt payload."
269
+ );
270
+ }
271
+ }
272
+ const requestedClientEventId = isUuidLike(id) ? id.trim() : createClientEventId();
273
+ const modelName = extractModel(serialized, extraParams);
274
+ const verdict = await this.agent.guard({
275
+ input: sanitizedInput,
276
+ system_id: this.systemId,
277
+ model: modelName,
278
+ client_event_id: requestedClientEventId,
279
+ client_capabilities: this.getLangchainCapabilities()
280
+ }, this.requestOptions);
281
+ if (!verdict.allowed) {
282
+ throw new Error(`AgentID: Security Blocked (${verdict.reason ?? "guard_denied"})`);
283
+ }
284
+ const canonicalClientEventId = isUuidLike(verdict.client_event_id) ? verdict.client_event_id.trim() : requestedClientEventId;
285
+ const guardEventId = typeof verdict.guard_event_id === "string" && verdict.guard_event_id.length > 0 ? verdict.guard_event_id : void 0;
286
+ let transformedInput = typeof verdict.transformed_input === "string" && verdict.transformed_input.length > 0 ? verdict.transformed_input : sanitizedInput;
287
+ if (transformedInput !== sanitizedInput) {
288
+ const mutated = setPromptInPrompts(prompts, transformedInput);
289
+ if (!mutated) {
290
+ transformedInput = sanitizedInput;
291
+ }
292
+ }
293
+ this.runs.set(id, {
294
+ input: transformedInput,
295
+ startedAtMs: Date.now(),
296
+ model: modelName,
297
+ clientEventId: canonicalClientEventId,
298
+ guardEventId
299
+ });
300
+ logCallbackDebug("handleLLMStart state_set", {
301
+ runId: id,
302
+ clientEventId: canonicalClientEventId,
303
+ guardEventId: guardEventId ?? null
304
+ });
305
+ }
306
+ async handleChatModelStart(serialized, messages, runId, _parentRunId, extraParams) {
307
+ const input = extractPromptFromMessages(messages);
308
+ const id = String(runId ?? "");
309
+ logCallbackDebug("handleChatModelStart", { runId: id, hasInput: input.length > 0 });
310
+ if (!input) {
311
+ return;
312
+ }
313
+ const stream = extractStreamFlag(serialized, extraParams);
314
+ const sanitizedInput = await this.preflight(input, stream);
315
+ if (sanitizedInput !== input) {
316
+ const mutated = setPromptInMessages(messages, sanitizedInput);
317
+ if (!mutated) {
318
+ throw new Error(
319
+ "AgentID: Strict PII mode requires mutable LangChain message payload."
320
+ );
321
+ }
322
+ }
323
+ const requestedClientEventId = isUuidLike(id) ? id.trim() : createClientEventId();
324
+ const modelName = extractModel(serialized, extraParams);
325
+ const verdict = await this.agent.guard({
326
+ input: sanitizedInput,
327
+ system_id: this.systemId,
328
+ model: modelName,
329
+ client_event_id: requestedClientEventId,
330
+ client_capabilities: this.getLangchainCapabilities()
331
+ }, this.requestOptions);
332
+ if (!verdict.allowed) {
333
+ throw new Error(`AgentID: Security Blocked (${verdict.reason ?? "guard_denied"})`);
334
+ }
335
+ const canonicalClientEventId = isUuidLike(verdict.client_event_id) ? verdict.client_event_id.trim() : requestedClientEventId;
336
+ const guardEventId = typeof verdict.guard_event_id === "string" && verdict.guard_event_id.length > 0 ? verdict.guard_event_id : void 0;
337
+ let transformedInput = typeof verdict.transformed_input === "string" && verdict.transformed_input.length > 0 ? verdict.transformed_input : sanitizedInput;
338
+ if (transformedInput !== sanitizedInput) {
339
+ const mutated = setPromptInMessages(messages, transformedInput);
340
+ if (!mutated) {
341
+ transformedInput = sanitizedInput;
342
+ }
343
+ }
344
+ this.runs.set(id, {
345
+ input: transformedInput,
346
+ startedAtMs: Date.now(),
347
+ model: modelName,
348
+ clientEventId: canonicalClientEventId,
349
+ guardEventId
350
+ });
351
+ logCallbackDebug("handleChatModelStart state_set", {
352
+ runId: id,
353
+ clientEventId: canonicalClientEventId,
354
+ guardEventId: guardEventId ?? null
355
+ });
356
+ }
357
+ async handleLLMEnd(output, runId) {
358
+ const id = String(runId ?? "");
359
+ logCallbackDebug("handleLLMEnd", { runId: id });
360
+ const state = this.runs.get(id);
361
+ if (!state) {
362
+ logCallbackDebug("handleLLMEnd missing_state", { runId: id });
363
+ return;
364
+ }
365
+ this.runs.delete(id);
366
+ const latency = Date.now() - state.startedAtMs;
367
+ const outText = extractOutputText(output);
368
+ const usage = extractTokenUsage(output);
369
+ const metadata = {};
370
+ if (state.clientEventId) {
371
+ metadata.client_event_id = state.clientEventId;
372
+ }
373
+ if (state.guardEventId) {
374
+ metadata.guard_event_id = state.guardEventId;
375
+ }
376
+ const resolvedModel = state.model ?? extractModelFromOutput(output) ?? "unknown";
377
+ await this.agent.log({
378
+ system_id: this.systemId,
379
+ input: state.input,
380
+ output: outText,
381
+ event_id: state.clientEventId,
382
+ model: resolvedModel,
383
+ usage,
384
+ latency,
385
+ metadata: Object.keys(metadata).length > 0 ? metadata : void 0,
386
+ client_capabilities: this.getLangchainCapabilities()
387
+ }, this.requestOptions);
388
+ logCallbackDebug("handleLLMEnd logged", {
389
+ runId: id,
390
+ clientEventId: state.clientEventId ?? null
391
+ });
392
+ }
393
+ async handleLLMError(err, runId) {
394
+ const id = String(runId ?? "");
395
+ logCallbackDebug("handleLLMError", { runId: id });
396
+ const state = this.runs.get(id);
397
+ if (state) this.runs.delete(id);
398
+ const message = err && typeof err === "object" && "message" in err ? String(err.message) : String(err ?? "");
399
+ const metadata = {
400
+ error_message: message
401
+ };
402
+ if (state?.clientEventId) {
403
+ metadata.client_event_id = state.clientEventId;
404
+ }
405
+ if (state?.guardEventId) {
406
+ metadata.guard_event_id = state.guardEventId;
407
+ }
408
+ await this.agent.log({
409
+ system_id: this.systemId,
410
+ input: state?.input ?? "",
411
+ output: "",
412
+ event_id: state?.clientEventId,
413
+ model: state?.model ?? "unknown",
414
+ event_type: "error",
415
+ severity: "error",
416
+ metadata,
417
+ client_capabilities: this.getLangchainCapabilities()
418
+ }, this.requestOptions);
419
+ }
420
+ };
421
+
422
+ export {
423
+ AgentIDCallbackHandler
424
+ };
package/dist/index.d.mts CHANGED
@@ -1,4 +1,5 @@
1
- export { A as AgentID, a as AgentIDCallbackHandler, G as GuardParams, b as GuardResponse, L as LogParams, P as PreparedInput, R as RequestOptions } from './langchain-CWHTxef0.mjs';
1
+ export { A as AgentID, a as AgentIDCallbackHandler, G as GuardParams, b as GuardResponse, L as LogParams, P as PreparedInput, R as RequestOptions } from './langchain-DJDqqpbT.mjs';
2
+ import '@langchain/core/callbacks/base';
2
3
 
3
4
  type PIIMapping = Record<string, string>;
4
5
  declare class PIIManager {
package/dist/index.d.ts CHANGED
@@ -1,4 +1,5 @@
1
- export { A as AgentID, a as AgentIDCallbackHandler, G as GuardParams, b as GuardResponse, L as LogParams, P as PreparedInput, R as RequestOptions } from './langchain-CWHTxef0.js';
1
+ export { A as AgentID, a as AgentIDCallbackHandler, G as GuardParams, b as GuardResponse, L as LogParams, P as PreparedInput, R as RequestOptions } from './langchain-DJDqqpbT.js';
2
+ import '@langchain/core/callbacks/base';
2
3
 
3
4
  type PIIMapping = Record<string, string>;
4
5
  declare class PIIManager {