@newfold/wp-module-ai-chat 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/README.md +98 -0
  2. package/package.json +51 -0
  3. package/src/components/chat/ChatHeader.jsx +63 -0
  4. package/src/components/chat/ChatHistoryDropdown.jsx +182 -0
  5. package/src/components/chat/ChatHistoryList.jsx +257 -0
  6. package/src/components/chat/ChatInput.jsx +157 -0
  7. package/src/components/chat/ChatMessage.jsx +157 -0
  8. package/src/components/chat/ChatMessages.jsx +137 -0
  9. package/src/components/chat/WelcomeScreen.jsx +115 -0
  10. package/src/components/icons/CloseIcon.jsx +27 -0
  11. package/src/components/icons/SparklesOutlineIcon.jsx +30 -0
  12. package/src/components/icons/index.js +5 -0
  13. package/src/components/ui/AILogo.jsx +47 -0
  14. package/src/components/ui/BluBetaHeading.jsx +18 -0
  15. package/src/components/ui/ErrorAlert.jsx +30 -0
  16. package/src/components/ui/HeaderBar.jsx +34 -0
  17. package/src/components/ui/SuggestionButton.jsx +28 -0
  18. package/src/components/ui/ToolExecutionList.jsx +264 -0
  19. package/src/components/ui/TypingIndicator.jsx +268 -0
  20. package/src/constants/nfdAgents/input.js +13 -0
  21. package/src/constants/nfdAgents/storageKeys.js +102 -0
  22. package/src/constants/nfdAgents/typingStatus.js +40 -0
  23. package/src/constants/nfdAgents/websocket.js +44 -0
  24. package/src/hooks/useAIChat.js +432 -0
  25. package/src/hooks/useNfdAgentsWebSocket.js +964 -0
  26. package/src/index.js +66 -0
  27. package/src/services/mcpClient.js +433 -0
  28. package/src/services/openaiClient.js +416 -0
  29. package/src/styles/_branding.scss +151 -0
  30. package/src/styles/_history.scss +180 -0
  31. package/src/styles/_input.scss +170 -0
  32. package/src/styles/_messages.scss +272 -0
  33. package/src/styles/_mixins.scss +21 -0
  34. package/src/styles/_typing-indicator.scss +162 -0
  35. package/src/styles/_ui.scss +173 -0
  36. package/src/styles/_vars.scss +103 -0
  37. package/src/styles/_welcome.scss +81 -0
  38. package/src/styles/app.scss +10 -0
  39. package/src/utils/helpers.js +75 -0
  40. package/src/utils/markdownParser.js +319 -0
  41. package/src/utils/nfdAgents/archiveConversation.js +82 -0
  42. package/src/utils/nfdAgents/chatHistoryList.js +130 -0
  43. package/src/utils/nfdAgents/configFetcher.js +137 -0
  44. package/src/utils/nfdAgents/greeting.js +55 -0
  45. package/src/utils/nfdAgents/jwtUtils.js +59 -0
  46. package/src/utils/nfdAgents/messageHandler.js +328 -0
  47. package/src/utils/nfdAgents/storage.js +112 -0
  48. package/src/utils/nfdAgents/typingIndicatorToolDisplay.js +180 -0
  49. package/src/utils/nfdAgents/url.js +101 -0
  50. package/src/utils/restApi.js +87 -0
  51. package/src/utils/sanitizeHtml.js +94 -0
@@ -0,0 +1,416 @@
1
+ /* eslint-disable no-console */
2
+ /**
3
+ * OpenAI Client that proxies requests through WordPress REST API
4
+ *
5
+ * This client uses the OpenAI SDK configured to route requests through
6
+ * the WordPress proxy endpoint, which then forwards to Cloudflare AI Gateway
7
+ * or direct OpenAI API.
8
+ *
9
+ * Configurable for use across different modules.
10
+ */
11
+ import OpenAI from "openai";
12
+
13
+ const DEFAULT_MODEL = "gpt-4o-mini";
14
+
15
+ /**
16
+ * Custom error class for OpenAI errors
17
+ */
18
+ export class OpenAIError extends Error {
19
+ constructor(message, status = null, code = null) {
20
+ super(message);
21
+ this.name = "OpenAIError";
22
+ this.status = status;
23
+ this.code = code;
24
+ }
25
+ }
26
+
27
+ /**
28
+ * OpenAI client that proxies requests through WordPress REST API
29
+ *
30
+ * @param {Object} options Configuration options
31
+ * @param {string} options.configKey - Window config object name (default: 'nfdAIChat')
32
+ * @param {string} options.apiPath - REST API path suffix (default: 'ai')
33
+ * @param {string} options.mode - Mode for system prompt selection (default: 'help')
34
+ */
35
+ export class CloudflareOpenAIClient {
36
+ constructor(options = {}) {
37
+ this.configKey = options.configKey || "nfdAIChat";
38
+ this.apiPath = options.apiPath || "ai";
39
+ this.mode = options.mode || "help";
40
+ this.openai = null;
41
+ this.config = null;
42
+ }
43
+
44
+ /**
45
+ * Get configuration from WordPress
46
+ *
47
+ * @return {Object} Configuration object
48
+ */
49
+ getConfig() {
50
+ if (this.config) {
51
+ return this.config;
52
+ }
53
+
54
+ // Get config from WordPress localized script
55
+ if (typeof window !== "undefined" && window[this.configKey]) {
56
+ this.config = {
57
+ nonce: window[this.configKey].nonce,
58
+ restUrl: window[this.configKey].restUrl,
59
+ homeUrl: window[this.configKey].homeUrl,
60
+ };
61
+ } else {
62
+ this.config = {
63
+ nonce: "",
64
+ restUrl: "",
65
+ homeUrl: "",
66
+ };
67
+ }
68
+
69
+ return this.config;
70
+ }
71
+
72
+ /**
73
+ * Initialize the OpenAI client
74
+ *
75
+ * @return {OpenAI} OpenAI client instance
76
+ */
77
+ getOpenAIClient() {
78
+ if (this.openai) {
79
+ return this.openai;
80
+ }
81
+
82
+ const config = this.getConfig();
83
+
84
+ // Use WordPress proxy endpoint - all authentication handled server-side
85
+ this.openai = new OpenAI({
86
+ apiKey: "proxy", // Dummy key - real key is on the server
87
+ baseURL: `${config.restUrl}${this.apiPath}`,
88
+ dangerouslyAllowBrowser: true,
89
+ defaultHeaders: {
90
+ "X-WP-Nonce": config.nonce,
91
+ },
92
+ });
93
+
94
+ return this.openai;
95
+ }
96
+
97
+ /**
98
+ * Create a chat completion request (non-streaming)
99
+ *
100
+ * @param {Object} request Chat completion request params
101
+ * @return {Promise<Object>} Chat completion response
102
+ */
103
+ async createChatCompletion(request) {
104
+ try {
105
+ const openai = this.getOpenAIClient();
106
+ const response = await openai.chat.completions.create({
107
+ model: request.model || DEFAULT_MODEL,
108
+ messages: request.messages,
109
+ tools: request.tools,
110
+ tool_choice: request.tool_choice,
111
+ stream: false,
112
+ max_tokens: request.max_tokens,
113
+ temperature: request.temperature,
114
+ mode: request.mode || this.mode,
115
+ });
116
+
117
+ return response;
118
+ } catch (error) {
119
+ throw new OpenAIError(error.message || "OpenAI API request failed", error.status, error.code);
120
+ }
121
+ }
122
+
123
+ /**
124
+ * Create a streaming chat completion
125
+ *
126
+ * @param {Object} request Chat completion request params
127
+ * @param {Function} onChunk Callback for each chunk
128
+ * @param {Function} onComplete Callback when complete
129
+ * @param {Function} onError Callback for errors
130
+ * @return {Promise<void>}
131
+ */
132
+ async createStreamingCompletion(request, onChunk, onComplete, onError) {
133
+ try {
134
+ const openai = this.getOpenAIClient();
135
+ const stream = await openai.chat.completions.create({
136
+ ...request,
137
+ messages: request.messages,
138
+ stream: true,
139
+ stream_options: { include_usage: true },
140
+ mode: request.mode || this.mode,
141
+ });
142
+
143
+ let fullMessage = "";
144
+ let usage = null;
145
+ const toolCallsInProgress = {};
146
+
147
+ let finishReason = null;
148
+
149
+ for await (const chunk of stream) {
150
+ const delta = chunk.choices[0]?.delta;
151
+
152
+ if (delta?.reasoning) {
153
+ onChunk({
154
+ type: "reasoning",
155
+ content: delta.reasoning,
156
+ });
157
+ }
158
+
159
+ if (delta?.content) {
160
+ fullMessage += delta.content;
161
+ onChunk({
162
+ type: "content",
163
+ content: delta.content,
164
+ });
165
+ }
166
+
167
+ // Handle streaming tool calls
168
+ if (delta?.tool_calls) {
169
+ for (const toolCall of delta.tool_calls) {
170
+ const index = toolCall.index;
171
+
172
+ if (!toolCallsInProgress[index]) {
173
+ toolCallsInProgress[index] = {
174
+ id: toolCall.id || "",
175
+ type: "function",
176
+ function: {
177
+ name: toolCall.function?.name || "",
178
+ arguments: "",
179
+ },
180
+ };
181
+ }
182
+
183
+ if (toolCall.id) {
184
+ toolCallsInProgress[index].id = toolCall.id;
185
+ }
186
+
187
+ if (toolCall.function?.name) {
188
+ toolCallsInProgress[index].function.name = toolCall.function.name;
189
+ }
190
+
191
+ if (toolCall.function?.arguments) {
192
+ toolCallsInProgress[index].function.arguments += toolCall.function.arguments;
193
+ }
194
+ }
195
+
196
+ onChunk({
197
+ type: "tool_calls",
198
+ tool_calls: Object.values(toolCallsInProgress),
199
+ });
200
+ }
201
+
202
+ if (chunk.choices[0]?.finish_reason) {
203
+ finishReason = chunk.choices[0].finish_reason;
204
+ }
205
+
206
+ // Usage arrives in a separate final chunk after finish_reason
207
+ if (chunk.usage) {
208
+ usage = chunk.usage;
209
+ console.log(
210
+ `[Token Usage] prompt: ${usage.prompt_tokens} | completion: ${usage.completion_tokens} | total: ${usage.total_tokens}`
211
+ );
212
+ }
213
+ }
214
+
215
+ // Fallback: SDK stores usage on the stream object after iteration
216
+ if (!usage && stream.usage) {
217
+ usage = stream.usage;
218
+ }
219
+
220
+ if (usage) {
221
+ console.log(
222
+ `[Token Usage] prompt: ${usage.prompt_tokens} | completion: ${usage.completion_tokens} | total: ${usage.total_tokens}`
223
+ );
224
+ }
225
+
226
+ // Stream ended — call onComplete with collected data
227
+ if (finishReason) {
228
+ const finalToolCalls = Object.values(toolCallsInProgress).map((tc) => ({
229
+ id: tc.id,
230
+ name: tc.function.name,
231
+ arguments: tc.function.arguments ? JSON.parse(tc.function.arguments) : {},
232
+ }));
233
+
234
+ await onComplete(fullMessage, finalToolCalls.length > 0 ? finalToolCalls : null, usage);
235
+ }
236
+ } catch (error) {
237
+ onError(
238
+ new OpenAIError(error.message || "Streaming request failed", error.status, error.code)
239
+ );
240
+ }
241
+ }
242
+
243
+ /**
244
+ * Convert chat messages to OpenAI format
245
+ * Optimizes token usage by truncating assistant content and summarizing tool results
246
+ *
247
+ * @param {Array} messages Array of chat messages
248
+ * @return {Array} OpenAI formatted messages
249
+ */
250
+ convertMessagesToOpenAI(messages) {
251
+ const openaiMessages = [];
252
+
253
+ for (const message of messages) {
254
+ if (message.role === "system" || message.role === "user") {
255
+ openaiMessages.push({
256
+ role: message.role,
257
+ content: message.content ?? "",
258
+ });
259
+ } else if (message.role === "assistant") {
260
+ const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
261
+ const hasContent =
262
+ message.content !== null && message.content !== undefined && message.content !== "";
263
+
264
+ // Skip invalid assistant messages
265
+ if (!hasContent && !hasToolCalls) {
266
+ console.warn("Skipping invalid assistant message with no content and no tool calls");
267
+ continue;
268
+ }
269
+
270
+ // Truncate assistant content when tool calls present to save tokens
271
+ let content = message.content ?? "";
272
+ if (hasToolCalls && content.length > 200) {
273
+ content = content.substring(0, 200) + "...";
274
+ }
275
+
276
+ const assistantMessage = {
277
+ role: "assistant",
278
+ content: hasToolCalls ? content || null : content,
279
+ };
280
+
281
+ if (hasToolCalls) {
282
+ assistantMessage.tool_calls = message.toolCalls.map((call) => ({
283
+ id: call.id,
284
+ type: "function",
285
+ function: {
286
+ name: call.name,
287
+ arguments:
288
+ typeof call.arguments === "string"
289
+ ? call.arguments
290
+ : JSON.stringify(call.arguments),
291
+ },
292
+ }));
293
+ }
294
+
295
+ openaiMessages.push(assistantMessage);
296
+
297
+ // Add summarized tool results if present (save tokens by only sending status)
298
+ if (hasToolCalls && message.toolResults && message.toolResults.length > 0) {
299
+ for (const result of message.toolResults) {
300
+ const hasMatchingCall = message.toolCalls.some((call) => call.id === result.id);
301
+ if (hasMatchingCall) {
302
+ // Summarize result to save tokens - just status, not full content
303
+ const summary = result.error ? `Error: ${result.error.substring(0, 100)}` : "Success";
304
+ openaiMessages.push({
305
+ role: "tool",
306
+ content: summary,
307
+ tool_call_id: result.id,
308
+ });
309
+ }
310
+ }
311
+ }
312
+ }
313
+ }
314
+
315
+ return openaiMessages;
316
+ }
317
+
318
+ /**
319
+ * Convert MCP tools to OpenAI tools format
320
+ *
321
+ * @param {Array} mcpTools Array of MCP tools
322
+ * @return {Array} OpenAI tools array
323
+ */
324
+ convertMCPToolsToOpenAI(mcpTools) {
325
+ return mcpTools.map((tool) => ({
326
+ type: "function",
327
+ function: {
328
+ name: tool.name,
329
+ description: tool.description,
330
+ parameters: tool.inputSchema,
331
+ },
332
+ }));
333
+ }
334
+
335
+ /**
336
+ * Process tool calls from OpenAI response
337
+ *
338
+ * @param {Array} toolCalls Raw tool calls from OpenAI
339
+ * @return {Array} Processed tool calls
340
+ */
341
+ processToolCalls(toolCalls) {
342
+ return toolCalls.map((call) => ({
343
+ id: call.id,
344
+ name: call.function.name,
345
+ arguments: JSON.parse(call.function.arguments || "{}"),
346
+ }));
347
+ }
348
+
349
+ /**
350
+ * Send a simple chat message
351
+ *
352
+ * @param {string} message User message
353
+ * @param {Array} context Previous messages for context
354
+ * @param {Array} tools Available MCP tools
355
+ * @return {Promise<Object>} Response with message and optional tool calls
356
+ */
357
+ async sendMessage(message, context = [], tools = []) {
358
+ const messages = this.convertMessagesToOpenAI([
359
+ ...context,
360
+ {
361
+ id: `user-${Date.now()}`,
362
+ role: "user",
363
+ content: message,
364
+ timestamp: new Date(),
365
+ },
366
+ ]);
367
+
368
+ const request = {
369
+ model: DEFAULT_MODEL,
370
+ messages,
371
+ tools: tools.length > 0 ? this.convertMCPToolsToOpenAI(tools) : undefined,
372
+ tool_choice: tools.length > 0 ? "auto" : undefined,
373
+ temperature: 0.1,
374
+ max_tokens: 4000,
375
+ };
376
+
377
+ try {
378
+ const response = await this.createChatCompletion(request);
379
+ const choice = response.choices[0];
380
+
381
+ if (!choice) {
382
+ throw new OpenAIError("No response from OpenAI");
383
+ }
384
+
385
+ const result = {
386
+ message: choice.message.content || "",
387
+ };
388
+
389
+ if (choice.message.tool_calls) {
390
+ result.toolCalls = this.processToolCalls(choice.message.tool_calls);
391
+ }
392
+
393
+ return result;
394
+ } catch (error) {
395
+ if (error instanceof OpenAIError) {
396
+ throw error;
397
+ }
398
+ throw new OpenAIError(`Failed to send message: ${error}`);
399
+ }
400
+ }
401
+ }
402
+
403
+ /**
404
+ * Create a new OpenAI client instance
405
+ *
406
+ * @param {Object} options Configuration options
407
+ * @return {CloudflareOpenAIClient} New client instance
408
+ */
409
+ export const createOpenAIClient = (options = {}) => {
410
+ return new CloudflareOpenAIClient(options);
411
+ };
412
+
413
+ // Default singleton instance for backwards compatibility
414
+ export const openaiClient = new CloudflareOpenAIClient();
415
+
416
+ export default openaiClient;
@@ -0,0 +1,151 @@
1
+ /* -------------------------------------------------------------------------- */
2
+
3
+ /* Brand-Specific Styling */
4
+
5
+ /* -------------------------------------------------------------------------- */
6
+
7
+ /* Base chat container with brand support */
8
+ .nfd-ai-chat-container {
9
+
10
+ /* Brand class will be applied to this container */
11
+ height: 100%;
12
+ display: flex;
13
+ flex-direction: column;
14
+ }
15
+
16
+ /* Bluehost Brand Styling */
17
+ .nfd-brand-bluehost,
18
+ [data-brand="bluehost"] {
19
+
20
+ /* Enhanced message bubbles for professional look */
21
+ .nfd-ai-chat-message {
22
+
23
+ &--user {
24
+
25
+ .nfd-ai-chat-message__content {
26
+ background: var(--nfd-ai-chat-color-primary);
27
+ color: var(--nfd-ai-chat-color-white);
28
+ font-weight: 500;
29
+ box-shadow: var(--nfd-ai-chat-shadow-sm);
30
+ }
31
+ }
32
+
33
+ &--assistant {
34
+
35
+ .nfd-ai-chat-message__content {
36
+ color: var(--nfd-ai-chat-color-text);
37
+ }
38
+ }
39
+ }
40
+
41
+ /* Enhanced input styling */
42
+ .nfd-ai-chat-input {
43
+ background: var(--nfd-ai-chat-color-white);
44
+ border-top: 1px solid var(--nfd-ai-chat-color-border);
45
+ box-shadow: 0 -2px 8px rgba(0, 0, 0, 0.04);
46
+
47
+ .nfd-ai-chat-input__textarea:focus {
48
+ outline: none;
49
+ }
50
+
51
+ .components-button.nfd-ai-chat-input__submit:hover:not(:disabled) {
52
+ background: var(--nfd-ai-chat-color-primary-hover);
53
+ transform: translateY(-1px);
54
+ box-shadow: var(--nfd-ai-chat-shadow-md);
55
+ }
56
+
57
+ .components-button.nfd-ai-chat-input__submit:active:not(:disabled) {
58
+ background: var(--nfd-ai-chat-color-primary-active);
59
+ transform: translateY(0);
60
+ }
61
+ }
62
+
63
+ /* Professional welcome screen */
64
+ .nfd-ai-chat-welcome {
65
+
66
+ .nfd-ai-chat-welcome__title {
67
+ color: var(--nfd-ai-chat-color-text);
68
+ font-weight: 600;
69
+ }
70
+
71
+ .nfd-ai-chat-welcome__subtitle {
72
+ color: var(--nfd-ai-chat-color-text-secondary);
73
+ }
74
+ }
75
+
76
+ /* Enhanced avatar with brand colors */
77
+ .nfd-ai-chat-avatar {
78
+ background: linear-gradient(135deg, var(--nfd-ai-chat-color-primary) 0%, var(--nfd-ai-chat-color-primary-hover) 100%);
79
+ box-shadow: var(--nfd-ai-chat-shadow-md);
80
+ }
81
+
82
+ /* Professional suggestion buttons */
83
+ .nfd-ai-chat-suggestion {
84
+
85
+ &:hover {
86
+ background: var(--nfd-ai-chat-color-primary-light-8);
87
+ border-color: var(--nfd-ai-chat-color-primary);
88
+ color: var(--nfd-ai-chat-color-primary);
89
+ transform: translateY(-1px);
90
+ box-shadow: var(--nfd-ai-chat-shadow-sm);
91
+ }
92
+ }
93
+
94
+ /* Enhanced typing indicator dots (spans inside __dots) */
95
+ .nfd-ai-chat-typing-indicator .nfd-ai-chat-typing-indicator__dots span {
96
+ background: var(--nfd-ai-chat-color-primary);
97
+ }
98
+
99
+ /* Professional scrollbar and message area */
100
+ .nfd-ai-chat-messages {
101
+ background: var(--nfd-ai-chat-color-white);
102
+
103
+ &::-webkit-scrollbar {
104
+ width: 8px;
105
+ }
106
+
107
+ &::-webkit-scrollbar-track {
108
+ background: var(--nfd-ai-chat-color-background);
109
+ }
110
+
111
+ &::-webkit-scrollbar-thumb {
112
+ background: var(--nfd-ai-chat-color-grey-light);
113
+ border-radius: var(--nfd-ai-chat-radius-full);
114
+
115
+ &:hover {
116
+ background: var(--nfd-ai-chat-color-grey-medium);
117
+ }
118
+ }
119
+ }
120
+
121
+ /* Enhanced blockquote styling */
122
+ .nfd-ai-chat-message__content {
123
+
124
+ blockquote {
125
+ border-left-color: var(--nfd-ai-chat-color-primary);
126
+ background: var(--nfd-ai-chat-color-primary-light-8);
127
+ }
128
+
129
+ a {
130
+ color: var(--nfd-ai-chat-color-primary);
131
+ font-weight: 500;
132
+
133
+ &:hover {
134
+ color: var(--nfd-ai-chat-color-primary-hover);
135
+ text-decoration: underline;
136
+ }
137
+ }
138
+
139
+ mark {
140
+ background-color: var(--nfd-ai-chat-color-primary-light-20);
141
+ color: var(--nfd-ai-chat-color-primary-active);
142
+ }
143
+ }
144
+
145
+ /* Professional error styling */
146
+ .nfd-ai-chat-error-alert {
147
+ background: rgba(214, 54, 56, 0.08);
148
+ border-color: var(--nfd-ai-chat-color-error);
149
+ border-radius: var(--nfd-ai-chat-radius-md);
150
+ }
151
+ }