dashclaw 1.8.3 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +61 -0
  2. package/dashclaw.js +92 -3
  3. package/package.json +1 -1
package/README.md CHANGED
@@ -558,6 +558,46 @@ Report a token usage snapshot for this agent.
558
558
 
559
559
  **Returns:** `Promise<{snapshot: Object}>`
560
560
 
561
+ ### claw.wrapClient(llmClient, options?)
562
+ Wrap an Anthropic or OpenAI client to auto-report token usage after every call. Returns the same client instance for fluent usage. Streaming calls (where response lacks `.usage`) are safely ignored.
563
+
564
+ **Parameters:**
565
+ | Parameter | Type | Required | Description |
566
+ |-----------|------|----------|-------------|
567
+ | llmClient | Object | Yes | An Anthropic or OpenAI SDK client instance |
568
+ | options.provider | string | No | Force `'anthropic'` or `'openai'` if auto-detect fails |
569
+
570
+ **Returns:** The wrapped client (same instance)
571
+
572
+ **Example (Anthropic):**
573
+ ```javascript
574
+ import Anthropic from '@anthropic-ai/sdk';
575
+ import { DashClaw } from 'dashclaw';
576
+
577
+ const claw = new DashClaw({ baseUrl: 'http://localhost:3000', agentId: 'my-agent', apiKey: '...' });
578
+ const anthropic = claw.wrapClient(new Anthropic());
579
+
580
+ const msg = await anthropic.messages.create({
581
+ model: 'claude-sonnet-4-20250514',
582
+ max_tokens: 1024,
583
+ messages: [{ role: 'user', content: 'Hello' }],
584
+ });
585
+ // Token usage auto-reported to DashClaw
586
+ ```
587
+
588
+ **Example (OpenAI):**
589
+ ```javascript
590
+ import OpenAI from 'openai';
591
+
592
+ const openai = claw.wrapClient(new OpenAI());
593
+
594
+ const chat = await openai.chat.completions.create({
595
+ model: 'gpt-4o',
596
+ messages: [{ role: 'user', content: 'Hello' }],
597
+ });
598
+ // Token usage auto-reported to DashClaw
599
+ ```
600
+
561
601
  ### claw.recordDecision(entry)
562
602
  Record a decision for the learning database. Track what your agent decides and why.
563
603
 
@@ -1093,6 +1133,27 @@ Scan text and store finding metadata for audit trails. The original content is n
1093
1133
 
1094
1134
  **Returns:** `Promise<{clean: boolean, findings_count: number, findings: Object[], redacted_text: string}>`
1095
1135
 
1136
+ ### claw.scanPromptInjection(text, options?)
1137
+ Scan text for prompt injection attacks — role overrides, delimiter injection, instruction smuggling, data exfiltration attempts, and encoding evasion. Returns risk level and actionable recommendation.
1138
+
1139
+ **Parameters:**
1140
+ | Parameter | Type | Required | Description |
1141
+ |-----------|------|----------|-------------|
1142
+ | text | string | Yes | Text to scan for injection attacks |
1143
+ | options.source | string | No | Where this text came from (e.g. user_input, tool_output, retrieval) |
1144
+
1145
+ **Returns:** `Promise<{clean: boolean, risk_level: string, recommendation: string, findings_count: number, critical_count: number, categories: string[], findings: Object[]}>`
1146
+
1147
+ **Example:**
1148
+ ```javascript
1149
+ const result = await claw.scanPromptInjection(userMessage, { source: 'user_input' });
1150
+ if (result.recommendation === 'block') {
1151
+ console.error(`Blocked: ${result.findings_count} injection patterns detected`);
1152
+ } else if (result.recommendation === 'warn') {
1153
+ console.warn(`Warning: ${result.categories.join(', ')} detected`);
1154
+ }
1155
+ ```
1156
+
1096
1157
  ---
1097
1158
 
1098
1159
  ## Agent Messaging
package/dashclaw.js CHANGED
@@ -13,7 +13,7 @@
13
13
  * - Automation Snippets (5)
14
14
  * - User Preferences (6)
15
15
  * - Daily Digest (1)
16
- * - Security Scanning (2)
16
+ * - Security Scanning (3)
17
17
  * - Agent Messaging (9)
18
18
  * - Behavior Guard (2)
19
19
  * - Agent Pairing (3)
@@ -885,7 +885,7 @@ class DashClaw {
885
885
  }
886
886
 
887
887
  // ══════════════════════════════════════════════
888
- // Category 4: Dashboard Data (8 methods)
888
+ // Category 4: Dashboard Data (9 methods)
889
889
  // ══════════════════════════════════════════════
890
890
 
891
891
  /**
@@ -905,6 +905,79 @@ class DashClaw {
905
905
  });
906
906
  }
907
907
 
908
+ /**
909
+ * Internal: fire-and-forget token report extracted from an LLM response.
910
+ * @private
911
+ */
912
+ async _reportTokenUsageFromLLM({ tokens_in, tokens_out, model }) {
913
+ if (tokens_in == null && tokens_out == null) return;
914
+ try {
915
+ await this._request('/api/tokens', 'POST', {
916
+ tokens_in: tokens_in || 0,
917
+ tokens_out: tokens_out || 0,
918
+ model: model || undefined,
919
+ agent_id: this.agentId,
920
+ });
921
+ } catch (_) {
922
+ // fire-and-forget: never let telemetry break the caller
923
+ }
924
+ }
925
+
926
+ /**
927
+ * Wrap an Anthropic or OpenAI client to auto-report token usage after each call.
928
+ * Returns the same client instance (mutated) for fluent usage.
929
+ *
930
+ * @param {Object} llmClient - An Anthropic or OpenAI SDK client instance
931
+ * @param {Object} [options]
932
+ * @param {'anthropic'|'openai'} [options.provider] - Force provider detection
933
+ * @returns {Object} The wrapped client
934
+ *
935
+ * @example
936
+ * const anthropic = claw.wrapClient(new Anthropic());
937
+ * const msg = await anthropic.messages.create({ model: 'claude-sonnet-4-20250514', max_tokens: 1024, messages: [...] });
938
+ * // Token usage is auto-reported to DashClaw
939
+ */
940
+ wrapClient(llmClient, { provider } = {}) {
941
+ if (llmClient._dashclawWrapped) return llmClient;
942
+
943
+ const detected = provider
944
+ || (llmClient.messages?.create ? 'anthropic' : null)
945
+ || (llmClient.chat?.completions?.create ? 'openai' : null);
946
+
947
+ if (!detected) {
948
+ throw new Error(
949
+ 'DashClaw.wrapClient: unable to detect provider. Pass { provider: "anthropic" } or { provider: "openai" }.'
950
+ );
951
+ }
952
+
953
+ if (detected === 'anthropic') {
954
+ const original = llmClient.messages.create.bind(llmClient.messages);
955
+ llmClient.messages.create = async (...args) => {
956
+ const response = await original(...args);
957
+ this._reportTokenUsageFromLLM({
958
+ tokens_in: response?.usage?.input_tokens ?? null,
959
+ tokens_out: response?.usage?.output_tokens ?? null,
960
+ model: response?.model ?? null,
961
+ });
962
+ return response;
963
+ };
964
+ } else if (detected === 'openai') {
965
+ const original = llmClient.chat.completions.create.bind(llmClient.chat.completions);
966
+ llmClient.chat.completions.create = async (...args) => {
967
+ const response = await original(...args);
968
+ this._reportTokenUsageFromLLM({
969
+ tokens_in: response?.usage?.prompt_tokens ?? null,
970
+ tokens_out: response?.usage?.completion_tokens ?? null,
971
+ model: response?.model ?? null,
972
+ });
973
+ return response;
974
+ };
975
+ }
976
+
977
+ llmClient._dashclawWrapped = true;
978
+ return llmClient;
979
+ }
980
+
908
981
  /**
909
982
  * Record a decision for the learning database.
910
983
  * @param {Object} entry
@@ -1497,7 +1570,7 @@ class DashClaw {
1497
1570
  }
1498
1571
 
1499
1572
  // ══════════════════════════════════════════════
1500
- // Category 10: Security Scanning (2 methods)
1573
+ // Category 10: Security Scanning (3 methods)
1501
1574
  // ══════════════════════════════════════════════
1502
1575
 
1503
1576
  /**
@@ -1532,6 +1605,22 @@ class DashClaw {
1532
1605
  });
1533
1606
  }
1534
1607
 
1608
+ /**
1609
+ * Scan text for prompt injection attacks (role overrides, delimiter injection,
1610
+ * instruction smuggling, data exfiltration attempts, etc.).
1611
+ * @param {string} text - Text to scan
1612
+ * @param {Object} [options]
1613
+ * @param {string} [options.source] - Where this text came from (for context)
1614
+ * @returns {Promise<{clean: boolean, risk_level: string, recommendation: string, findings_count: number, critical_count: number, categories: string[], findings: Object[]}>}
1615
+ */
1616
+ async scanPromptInjection(text, options = {}) {
1617
+ return this._request('/api/security/prompt-injection', 'POST', {
1618
+ text,
1619
+ source: options.source,
1620
+ agent_id: this.agentId,
1621
+ });
1622
+ }
1623
+
1535
1624
  // ══════════════════════════════════════════════
1536
1625
  // Category 11: Agent Messaging (11 methods)
1537
1626
  // ══════════════════════════════════════════════
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "dashclaw",
3
- "version": "1.8.3",
3
+ "version": "1.9.0",
4
4
  "description": "Full-featured agent toolkit for the DashClaw platform. 96+ methods across 22+ categories for action recording, context management, session handoffs, security scanning, behavior guard, compliance, task routing, identity binding, organization management, webhooks, bulk sync, and more.",
5
5
  "type": "module",
6
6
  "publishConfig": {