@okf/ootils 1.28.5 → 1.29.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/node.d.mts CHANGED
@@ -1761,7 +1761,7 @@ interface IAIChat extends Document {
1761
1761
  lastActivity: Date;
1762
1762
  messages: IMessage[];
1763
1763
  ACLScope: "aiChat" | "reports";
1764
- widgetAskAI_Id?: string;
1764
+ widgetAskAICacheKey?: string;
1765
1765
  immutableActiveFilters?: any;
1766
1766
  immutableContentTypes?: string[];
1767
1767
  }
package/dist/node.d.ts CHANGED
@@ -1761,7 +1761,7 @@ interface IAIChat extends Document {
1761
1761
  lastActivity: Date;
1762
1762
  messages: IMessage[];
1763
1763
  ACLScope: "aiChat" | "reports";
1764
- widgetAskAI_Id?: string;
1764
+ widgetAskAICacheKey?: string;
1765
1765
  immutableActiveFilters?: any;
1766
1766
  immutableContentTypes?: string[];
1767
1767
  }
package/dist/node.js CHANGED
@@ -1047,13 +1047,13 @@ var init_AIChat = __esm({
1047
1047
  default: "aiChat",
1048
1048
  index: true
1049
1049
  },
1050
- // Deterministic ID linking this chat to a specific WidgetAskAI widget in a report.
1051
- // The Report Agent generates widget configs on the backend, so at report creation
1052
- // time no chat exists yet the LLM query hasn't been fired. Without this ID,
1053
- // every page load would trigger a fresh LLM call. With it, the first render runs
1054
- // the LLM and saves the chat under this ID; subsequent renders find the existing
1055
- // chat and return the cached response immediately.
1056
- widgetAskAI_Id: {
1050
+ // Used exclusively by WidgetAskAI (the Ask AI widget inside Reports).
1051
+ // This field is NOT used by the standalone AskAI chat feature.
1052
+ // It is a SHA256 hash of (query + contentTypes + activeFilters), computed
1053
+ // on the backend. Enables content-addressable caching so that identical
1054
+ // WidgetAskAI configurations return cached responses instantly without
1055
+ // re-invoking the LLM.
1056
+ widgetAskAICacheKey: {
1057
1057
  type: String,
1058
1058
  sparse: true,
1059
1059
  unique: true
package/dist/node.mjs CHANGED
@@ -1052,13 +1052,13 @@ var init_AIChat = __esm({
1052
1052
  default: "aiChat",
1053
1053
  index: true
1054
1054
  },
1055
- // Deterministic ID linking this chat to a specific WidgetAskAI widget in a report.
1056
- // The Report Agent generates widget configs on the backend, so at report creation
1057
- // time no chat exists yet the LLM query hasn't been fired. Without this ID,
1058
- // every page load would trigger a fresh LLM call. With it, the first render runs
1059
- // the LLM and saves the chat under this ID; subsequent renders find the existing
1060
- // chat and return the cached response immediately.
1061
- widgetAskAI_Id: {
1055
+ // Used exclusively by WidgetAskAI (the Ask AI widget inside Reports).
1056
+ // This field is NOT used by the standalone AskAI chat feature.
1057
+ // It is a SHA256 hash of (query + contentTypes + activeFilters), computed
1058
+ // on the backend. Enables content-addressable caching so that identical
1059
+ // WidgetAskAI configurations return cached responses instantly without
1060
+ // re-invoking the LLM.
1061
+ widgetAskAICacheKey: {
1062
1062
  type: String,
1063
1063
  sparse: true,
1064
1064
  unique: true
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "publishConfig": {
4
4
  "access": "public"
5
5
  },
6
- "version": "1.28.5",
6
+ "version": "1.29.0",
7
7
  "description": "Utility functions for both browser and Node.js",
8
8
  "main": "dist/index.js",
9
9
  "module": "dist/index.mjs",