@alliance-droid/chat-widget 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +102 -0
  2. package/dist/client/connection.d.ts +39 -0
  3. package/dist/client/connection.js +198 -0
  4. package/dist/client/index.d.ts +6 -0
  5. package/dist/client/index.js +6 -0
  6. package/dist/components/ChatInput.svelte +73 -0
  7. package/dist/components/ChatInput.svelte.d.ts +9 -0
  8. package/dist/components/ChatPanel.svelte +117 -0
  9. package/dist/components/ChatPanel.svelte.d.ts +20 -0
  10. package/dist/components/ChatWidget.svelte +160 -0
  11. package/dist/components/ChatWidget.svelte.d.ts +4 -0
  12. package/dist/components/MessageBubble.svelte +60 -0
  13. package/dist/components/MessageBubble.svelte.d.ts +8 -0
  14. package/dist/components/MessageList.svelte +58 -0
  15. package/dist/components/MessageList.svelte.d.ts +10 -0
  16. package/dist/components/SupportChat.svelte +133 -0
  17. package/dist/components/SupportChat.svelte.d.ts +4 -0
  18. package/dist/index.d.ts +15 -0
  19. package/dist/index.js +17 -0
  20. package/dist/server/ai/anthropic.d.ts +16 -0
  21. package/dist/server/ai/anthropic.js +109 -0
  22. package/dist/server/ai/openai.d.ts +16 -0
  23. package/dist/server/ai/openai.js +112 -0
  24. package/dist/server/ai/provider.d.ts +21 -0
  25. package/dist/server/ai/provider.js +6 -0
  26. package/dist/server/ai/xai.d.ts +16 -0
  27. package/dist/server/ai/xai.js +113 -0
  28. package/dist/server/escalation.d.ts +18 -0
  29. package/dist/server/escalation.js +61 -0
  30. package/dist/server/handler.d.ts +8 -0
  31. package/dist/server/handler.js +235 -0
  32. package/dist/server/index.d.ts +11 -0
  33. package/dist/server/index.js +10 -0
  34. package/dist/server/session.d.ts +52 -0
  35. package/dist/server/session.js +115 -0
  36. package/dist/stores/chat.d.ts +18 -0
  37. package/dist/stores/chat.js +40 -0
  38. package/dist/stores/connection.d.ts +20 -0
  39. package/dist/stores/connection.js +52 -0
  40. package/dist/types.d.ts +79 -0
  41. package/dist/types.js +4 -0
  42. package/package.json +73 -0
@@ -0,0 +1,112 @@
1
+ /**
2
+ * OpenAI Provider
3
+ *
4
+ * Fallback AI provider using OpenAI's API
5
+ */
6
+ const OPENAI_API_URL = 'https://api.openai.com/v1/chat/completions';
7
+ export class OpenAIProvider {
8
+ apiKey;
9
+ model;
10
+ maxTokens;
11
+ constructor(options) {
12
+ this.apiKey = options.apiKey;
13
+ this.model = options.model || 'gpt-4o-mini';
14
+ this.maxTokens = options.maxTokens || 1024;
15
+ }
16
+ async *generateResponse(messages, systemPrompt) {
17
+ const apiMessages = this.formatMessages(messages, systemPrompt);
18
+ const response = await fetch(OPENAI_API_URL, {
19
+ method: 'POST',
20
+ headers: {
21
+ 'Content-Type': 'application/json',
22
+ Authorization: `Bearer ${this.apiKey}`
23
+ },
24
+ body: JSON.stringify({
25
+ model: this.model,
26
+ messages: apiMessages,
27
+ max_tokens: this.maxTokens,
28
+ stream: true
29
+ })
30
+ });
31
+ if (!response.ok) {
32
+ const error = await response.text();
33
+ throw new Error(`OpenAI API error: ${response.status} - ${error}`);
34
+ }
35
+ const reader = response.body?.getReader();
36
+ if (!reader)
37
+ throw new Error('No response body');
38
+ const decoder = new TextDecoder();
39
+ let buffer = '';
40
+ while (true) {
41
+ const { done, value } = await reader.read();
42
+ if (done)
43
+ break;
44
+ buffer += decoder.decode(value, { stream: true });
45
+ const lines = buffer.split('\n');
46
+ buffer = lines.pop() || '';
47
+ for (const line of lines) {
48
+ if (line.startsWith('data: ')) {
49
+ const data = line.slice(6);
50
+ if (data === '[DONE]')
51
+ return;
52
+ try {
53
+ const parsed = JSON.parse(data);
54
+ const content = parsed.choices?.[0]?.delta?.content;
55
+ if (content) {
56
+ yield content;
57
+ }
58
+ }
59
+ catch {
60
+ // Skip invalid JSON
61
+ }
62
+ }
63
+ }
64
+ }
65
+ }
66
+ async summarize(messages) {
67
+ const conversationText = messages
68
+ .map(m => `${m.sender}: ${m.content}`)
69
+ .join('\n');
70
+ const response = await fetch(OPENAI_API_URL, {
71
+ method: 'POST',
72
+ headers: {
73
+ 'Content-Type': 'application/json',
74
+ Authorization: `Bearer ${this.apiKey}`
75
+ },
76
+ body: JSON.stringify({
77
+ model: this.model,
78
+ messages: [
79
+ {
80
+ role: 'system',
81
+ content: 'Summarize this customer support conversation in 1-2 sentences. Focus on what the customer needs help with.'
82
+ },
83
+ {
84
+ role: 'user',
85
+ content: conversationText
86
+ }
87
+ ],
88
+ max_tokens: 150
89
+ })
90
+ });
91
+ if (!response.ok) {
92
+ return 'Customer requested support assistance.';
93
+ }
94
+ const data = await response.json();
95
+ return data.choices?.[0]?.message?.content || 'Customer requested support assistance.';
96
+ }
97
+ formatMessages(messages, systemPrompt) {
98
+ const apiMessages = [];
99
+ if (systemPrompt) {
100
+ apiMessages.push({ role: 'system', content: systemPrompt });
101
+ }
102
+ for (const msg of messages) {
103
+ if (msg.sender === 'user') {
104
+ apiMessages.push({ role: 'user', content: msg.content });
105
+ }
106
+ else if (msg.sender === 'ai') {
107
+ apiMessages.push({ role: 'assistant', content: msg.content });
108
+ }
109
+ }
110
+ return apiMessages;
111
+ }
112
+ }
@@ -0,0 +1,21 @@
1
+ /**
2
+ * AI Provider Interface
3
+ *
4
+ * Abstract interface for AI chat providers
5
+ */
6
+ import type { Message } from '../../types.js';
7
+ export interface AIProvider {
8
+ /**
9
+ * Generate a response to the conversation
10
+ */
11
+ generateResponse(messages: Message[], systemPrompt?: string): AsyncGenerator<string, void, unknown>;
12
+ /**
13
+ * Generate a summary of the conversation (for escalation)
14
+ */
15
+ summarize(messages: Message[]): Promise<string>;
16
+ }
17
+ export interface AIProviderOptions {
18
+ apiKey: string;
19
+ model?: string;
20
+ maxTokens?: number;
21
+ }
@@ -0,0 +1,6 @@
1
+ /**
2
+ * AI Provider Interface
3
+ *
4
+ * Abstract interface for AI chat providers
5
+ */
6
+ export {};
@@ -0,0 +1,16 @@
1
+ /**
2
+ * xAI Grok Provider
3
+ *
4
+ * Primary AI provider using xAI's Grok model
5
+ */
6
+ import type { AIProvider, AIProviderOptions } from './provider.js';
7
+ import type { Message } from '../../types.js';
8
+ export declare class XAIProvider implements AIProvider {
9
+ private apiKey;
10
+ private model;
11
+ private maxTokens;
12
+ constructor(options: AIProviderOptions);
13
+ generateResponse(messages: Message[], systemPrompt?: string): AsyncGenerator<string, void, unknown>;
14
+ summarize(messages: Message[]): Promise<string>;
15
+ private formatMessages;
16
+ }
@@ -0,0 +1,113 @@
1
+ /**
2
+ * xAI Grok Provider
3
+ *
4
+ * Primary AI provider using xAI's Grok model
5
+ */
6
+ const XAI_API_URL = 'https://api.x.ai/v1/chat/completions';
7
+ export class XAIProvider {
8
+ apiKey;
9
+ model;
10
+ maxTokens;
11
+ constructor(options) {
12
+ this.apiKey = options.apiKey;
13
+ this.model = options.model || 'grok-2-latest';
14
+ this.maxTokens = options.maxTokens || 1024;
15
+ }
16
+ async *generateResponse(messages, systemPrompt) {
17
+ const apiMessages = this.formatMessages(messages, systemPrompt);
18
+ const response = await fetch(XAI_API_URL, {
19
+ method: 'POST',
20
+ headers: {
21
+ 'Content-Type': 'application/json',
22
+ Authorization: `Bearer ${this.apiKey}`
23
+ },
24
+ body: JSON.stringify({
25
+ model: this.model,
26
+ messages: apiMessages,
27
+ max_tokens: this.maxTokens,
28
+ stream: true
29
+ })
30
+ });
31
+ if (!response.ok) {
32
+ const error = await response.text();
33
+ throw new Error(`xAI API error: ${response.status} - ${error}`);
34
+ }
35
+ const reader = response.body?.getReader();
36
+ if (!reader)
37
+ throw new Error('No response body');
38
+ const decoder = new TextDecoder();
39
+ let buffer = '';
40
+ while (true) {
41
+ const { done, value } = await reader.read();
42
+ if (done)
43
+ break;
44
+ buffer += decoder.decode(value, { stream: true });
45
+ const lines = buffer.split('\n');
46
+ buffer = lines.pop() || '';
47
+ for (const line of lines) {
48
+ if (line.startsWith('data: ')) {
49
+ const data = line.slice(6);
50
+ if (data === '[DONE]')
51
+ return;
52
+ try {
53
+ const parsed = JSON.parse(data);
54
+ const content = parsed.choices?.[0]?.delta?.content;
55
+ if (content) {
56
+ yield content;
57
+ }
58
+ }
59
+ catch {
60
+ // Skip invalid JSON
61
+ }
62
+ }
63
+ }
64
+ }
65
+ }
66
+ async summarize(messages) {
67
+ const conversationText = messages
68
+ .map(m => `${m.sender}: ${m.content}`)
69
+ .join('\n');
70
+ const response = await fetch(XAI_API_URL, {
71
+ method: 'POST',
72
+ headers: {
73
+ 'Content-Type': 'application/json',
74
+ Authorization: `Bearer ${this.apiKey}`
75
+ },
76
+ body: JSON.stringify({
77
+ model: this.model,
78
+ messages: [
79
+ {
80
+ role: 'system',
81
+ content: 'Summarize this customer support conversation in 1-2 sentences. Focus on what the customer needs help with.'
82
+ },
83
+ {
84
+ role: 'user',
85
+ content: conversationText
86
+ }
87
+ ],
88
+ max_tokens: 150
89
+ })
90
+ });
91
+ if (!response.ok) {
92
+ return 'Customer requested support assistance.';
93
+ }
94
+ const data = await response.json();
95
+ return data.choices?.[0]?.message?.content || 'Customer requested support assistance.';
96
+ }
97
+ formatMessages(messages, systemPrompt) {
98
+ const apiMessages = [];
99
+ if (systemPrompt) {
100
+ apiMessages.push({ role: 'system', content: systemPrompt });
101
+ }
102
+ for (const msg of messages) {
103
+ if (msg.sender === 'user') {
104
+ apiMessages.push({ role: 'user', content: msg.content });
105
+ }
106
+ else if (msg.sender === 'ai') {
107
+ apiMessages.push({ role: 'assistant', content: msg.content });
108
+ }
109
+ // Skip system and human messages for AI context
110
+ }
111
+ return apiMessages;
112
+ }
113
+ }
@@ -0,0 +1,18 @@
1
+ /**
2
+ * Escalation Manager
3
+ *
4
+ * Handles escalation to human support via Telegram
5
+ */
6
+ import type { ChatSession, EscalationConfig } from '../types.js';
7
+ export declare class EscalationManager {
8
+ private config;
9
+ constructor(config: EscalationConfig);
10
+ /**
11
+ * Send escalation notification to Telegram
12
+ */
13
+ notifySupport(session: ChatSession, summary: string): Promise<boolean>;
14
+ /**
15
+ * Escape Markdown special characters for Telegram
16
+ */
17
+ private escapeMarkdown;
18
+ }
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Escalation Manager
3
+ *
4
+ * Handles escalation to human support via Telegram
5
+ */
6
+ const TELEGRAM_API_URL = 'https://api.telegram.org/bot';
7
+ export class EscalationManager {
8
+ config;
9
+ constructor(config) {
10
+ this.config = config;
11
+ }
12
+ /**
13
+ * Send escalation notification to Telegram
14
+ */
15
+ async notifySupport(session, summary) {
16
+ const joinUrl = this.config.joinUrl(session.id);
17
+ const recentMessages = session.messages
18
+ .slice(-5)
19
+ .map(m => {
20
+ const icon = m.sender === 'user' ? '👤' : m.sender === 'ai' ? '🤖' : '📢';
21
+ return `${icon} ${m.content.slice(0, 100)}${m.content.length > 100 ? '...' : ''}`;
22
+ })
23
+ .join('\n');
24
+ const message = `🆘 *Support Request*
25
+
26
+ *Summary:* ${this.escapeMarkdown(summary)}
27
+
28
+ *Recent messages:*
29
+ ${this.escapeMarkdown(recentMessages)}
30
+
31
+ ➡️ [Join Chat](${joinUrl})`;
32
+ try {
33
+ const response = await fetch(`${TELEGRAM_API_URL}${this.config.telegram.botToken}/sendMessage`, {
34
+ method: 'POST',
35
+ headers: { 'Content-Type': 'application/json' },
36
+ body: JSON.stringify({
37
+ chat_id: this.config.telegram.chatId,
38
+ text: message,
39
+ parse_mode: 'Markdown',
40
+ disable_web_page_preview: true
41
+ })
42
+ });
43
+ if (!response.ok) {
44
+ const error = await response.text();
45
+ console.error('[Escalation] Telegram API error:', error);
46
+ return false;
47
+ }
48
+ return true;
49
+ }
50
+ catch (err) {
51
+ console.error('[Escalation] Failed to send Telegram notification:', err);
52
+ return false;
53
+ }
54
+ }
55
+ /**
56
+ * Escape Markdown special characters for Telegram
57
+ */
58
+ escapeMarkdown(text) {
59
+ return text.replace(/[_*[\]()~`>#+\-=|{}.!]/g, '\\$&');
60
+ }
61
+ }
@@ -0,0 +1,8 @@
1
+ /**
2
+ * WebSocket Chat Handler
3
+ *
4
+ * Main server-side handler for chat connections
5
+ */
6
+ import type { WebSocket } from 'ws';
7
+ import type { ChatServerConfig } from '../types.js';
8
+ export declare function createChatHandler(config: ChatServerConfig): (ws: WebSocket) => void;
@@ -0,0 +1,235 @@
1
+ /**
2
+ * WebSocket Chat Handler
3
+ *
4
+ * Main server-side handler for chat connections
5
+ */
6
+ import { XAIProvider } from './ai/xai.js';
7
+ import { OpenAIProvider } from './ai/openai.js';
8
+ import { AnthropicProvider } from './ai/anthropic.js';
9
+ import { SessionManager } from './session.js';
10
+ import { EscalationManager } from './escalation.js';
11
+ export function createChatHandler(config) {
12
+ const aiProvider = createAIProvider(config.ai);
13
+ const sessionManager = new SessionManager(config.sessionTTL);
14
+ const escalationManager = config.escalation
15
+ ? new EscalationManager(config.escalation)
16
+ : null;
17
+ // Track active connections by session
18
+ const connections = new Map();
19
+ return function handleConnection(ws) {
20
+ let clientConnection = null;
21
+ ws.on('message', async (data) => {
22
+ try {
23
+ const message = JSON.parse(data.toString());
24
+ await handleMessage(message);
25
+ }
26
+ catch (err) {
27
+ console.error('[ChatHandler] Error handling message:', err);
28
+ sendError('Failed to process message');
29
+ }
30
+ });
31
+ ws.on('close', () => {
32
+ if (clientConnection) {
33
+ removeConnection(clientConnection);
34
+ }
35
+ });
36
+ async function handleMessage(message) {
37
+ switch (message.type) {
38
+ case 'message':
39
+ await handleChatMessage(message);
40
+ break;
41
+ case 'escalate':
42
+ await handleEscalation();
43
+ break;
44
+ case 'join':
45
+ await handleSupportJoin(message);
46
+ break;
47
+ case 'typing':
48
+ broadcastToSession(clientConnection?.sessionId, {
49
+ type: 'typing',
50
+ sender: clientConnection?.isSupport ? 'human' : 'user'
51
+ });
52
+ break;
53
+ }
54
+ }
55
+ async function handleChatMessage(message) {
56
+ if (!clientConnection) {
57
+ // New connection - create session
58
+ const session = sessionManager.create();
59
+ clientConnection = { ws, sessionId: session.id, isSupport: false };
60
+ addConnection(clientConnection);
61
+ // Send connected message with session ID
62
+ send({ type: 'connected', sessionId: session.id });
63
+ }
64
+ const session = sessionManager.get(clientConnection.sessionId);
65
+ if (!session) {
66
+ sendError('Session not found');
67
+ return;
68
+ }
69
+ // Store user message
70
+ const userMessage = {
71
+ id: generateId(),
72
+ content: message.content || '',
73
+ sender: clientConnection.isSupport ? 'human' : 'user',
74
+ timestamp: Date.now()
75
+ };
76
+ sessionManager.addMessage(session.id, userMessage);
77
+ // Broadcast user message to all participants
78
+ broadcastToSession(session.id, {
79
+ type: 'message',
80
+ ...userMessage
81
+ });
82
+ // If not escalated, generate AI response
83
+ if (session.status === 'ai' && !clientConnection.isSupport) {
84
+ await generateAIResponse(session);
85
+ }
86
+ }
87
+ async function generateAIResponse(session) {
88
+ const messageId = generateId();
89
+ // Send typing indicator
90
+ broadcastToSession(session.id, { type: 'typing', sender: 'ai' });
91
+ try {
92
+ let fullContent = '';
93
+ for await (const chunk of aiProvider.generateResponse(session.messages, config.ai.systemPrompt)) {
94
+ fullContent += chunk;
95
+ // Stream partial response
96
+ broadcastToSession(session.id, {
97
+ type: 'message',
98
+ id: messageId,
99
+ content: fullContent,
100
+ sender: 'ai',
101
+ timestamp: Date.now(),
102
+ streaming: true
103
+ });
104
+ }
105
+ // Store final message
106
+ const aiMessage = {
107
+ id: messageId,
108
+ content: fullContent,
109
+ sender: 'ai',
110
+ timestamp: Date.now()
111
+ };
112
+ sessionManager.addMessage(session.id, aiMessage);
113
+ // Send final message (streaming = false)
114
+ broadcastToSession(session.id, {
115
+ type: 'message',
116
+ ...aiMessage,
117
+ streaming: false
118
+ });
119
+ }
120
+ catch (err) {
121
+ console.error('[ChatHandler] AI generation error:', err);
122
+ sendError('Failed to generate response');
123
+ }
124
+ }
125
+ async function handleEscalation() {
126
+ if (!clientConnection)
127
+ return;
128
+ const session = sessionManager.get(clientConnection.sessionId);
129
+ if (!session) {
130
+ sendError('Session not found');
131
+ return;
132
+ }
133
+ sessionManager.setStatus(session.id, 'escalating');
134
+ // Notify via Telegram
135
+ if (escalationManager) {
136
+ const summary = await aiProvider.summarize(session.messages);
137
+ const sent = await escalationManager.notifySupport(session, summary);
138
+ if (sent) {
139
+ broadcastToSession(session.id, {
140
+ type: 'status',
141
+ content: 'Support has been notified. Please wait...'
142
+ });
143
+ }
144
+ else {
145
+ sendError('Failed to notify support');
146
+ sessionManager.setStatus(session.id, 'ai');
147
+ }
148
+ }
149
+ else {
150
+ sendError('Escalation not configured');
151
+ sessionManager.setStatus(session.id, 'ai');
152
+ }
153
+ }
154
+ async function handleSupportJoin(message) {
155
+ const sessionId = message.sessionId;
156
+ if (!sessionId) {
157
+ sendError('Session ID required');
158
+ return;
159
+ }
160
+ const session = sessionManager.get(sessionId);
161
+ if (!session) {
162
+ sendError('Session not found');
163
+ return;
164
+ }
165
+ // Try to claim the session
166
+ const agentId = generateId();
167
+ const claimed = sessionManager.assignSupport(sessionId, agentId);
168
+ if (!claimed && session.supportAgentId) {
169
+ sendError('Session already claimed by another agent');
170
+ return;
171
+ }
172
+ // Register support connection
173
+ clientConnection = { ws, sessionId, isSupport: true };
174
+ addConnection(clientConnection);
175
+ // Send session history to support
176
+ send({ type: 'connected', sessionId });
177
+ for (const msg of session.messages) {
178
+ send({ type: 'message', ...msg });
179
+ }
180
+ // Notify user that support joined
181
+ broadcastToSession(sessionId, { type: 'escalated' });
182
+ }
183
+ function send(message) {
184
+ if (ws.readyState === ws.OPEN) {
185
+ ws.send(JSON.stringify(message));
186
+ }
187
+ }
188
+ function sendError(error) {
189
+ send({ type: 'error', error });
190
+ }
191
+ function addConnection(conn) {
192
+ const existing = connections.get(conn.sessionId) || [];
193
+ existing.push(conn);
194
+ connections.set(conn.sessionId, existing);
195
+ }
196
+ function removeConnection(conn) {
197
+ const existing = connections.get(conn.sessionId) || [];
198
+ const filtered = existing.filter(c => c.ws !== conn.ws);
199
+ if (filtered.length > 0) {
200
+ connections.set(conn.sessionId, filtered);
201
+ }
202
+ else {
203
+ connections.delete(conn.sessionId);
204
+ }
205
+ }
206
+ function broadcastToSession(sessionId, message) {
207
+ if (!sessionId)
208
+ return;
209
+ const sessionConnections = connections.get(sessionId) || [];
210
+ for (const conn of sessionConnections) {
211
+ if (conn.ws.readyState === conn.ws.OPEN) {
212
+ conn.ws.send(JSON.stringify(message));
213
+ }
214
+ }
215
+ }
216
+ };
217
+ }
218
+ function createAIProvider(config) {
219
+ const options = {
220
+ apiKey: config.apiKey,
221
+ model: config.model
222
+ };
223
+ switch (config.provider) {
224
+ case 'openai':
225
+ return new OpenAIProvider(options);
226
+ case 'anthropic':
227
+ return new AnthropicProvider(options);
228
+ case 'xai':
229
+ default:
230
+ return new XAIProvider(options);
231
+ }
232
+ }
233
+ function generateId() {
234
+ return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
235
+ }
@@ -0,0 +1,11 @@
1
+ /**
2
+ * Server-side exports
3
+ */
4
+ export { createChatHandler } from './handler.js';
5
+ export { SessionManager } from './session.js';
6
+ export { EscalationManager } from './escalation.js';
7
+ export { XAIProvider } from './ai/xai.js';
8
+ export { OpenAIProvider } from './ai/openai.js';
9
+ export { AnthropicProvider } from './ai/anthropic.js';
10
+ export type { AIProvider, AIProviderOptions } from './ai/provider.js';
11
+ export type { ChatServerConfig, AIProviderConfig, EscalationConfig, ChatSession } from '../types.js';
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Server-side exports
3
+ */
4
+ export { createChatHandler } from './handler.js';
5
+ export { SessionManager } from './session.js';
6
+ export { EscalationManager } from './escalation.js';
7
+ // AI Providers
8
+ export { XAIProvider } from './ai/xai.js';
9
+ export { OpenAIProvider } from './ai/openai.js';
10
+ export { AnthropicProvider } from './ai/anthropic.js';
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Session Manager
3
+ *
4
+ * In-memory session storage (Redis adapter TODO)
5
+ */
6
+ import type { ChatSession, Message, ChatStatus } from '../types.js';
7
+ export declare class SessionManager {
8
+ private sessions;
9
+ private ttl;
10
+ private cleanupInterval;
11
+ constructor(ttlMs?: number);
12
+ /**
13
+ * Create a new session
14
+ */
15
+ create(userId?: string, userMeta?: Record<string, unknown>): ChatSession;
16
+ /**
17
+ * Get a session by ID
18
+ */
19
+ get(id: string): ChatSession | undefined;
20
+ /**
21
+ * Add a message to a session
22
+ */
23
+ addMessage(sessionId: string, message: Message): void;
24
+ /**
25
+ * Update session status
26
+ */
27
+ setStatus(sessionId: string, status: ChatStatus): void;
28
+ /**
29
+ * Assign a support agent to a session
30
+ */
31
+ assignSupport(sessionId: string, agentId: string): boolean;
32
+ /**
33
+ * Delete a session
34
+ */
35
+ delete(id: string): void;
36
+ /**
37
+ * Cleanup expired sessions
38
+ */
39
+ private cleanup;
40
+ /**
41
+ * Check if session is expired
42
+ */
43
+ private isExpired;
44
+ /**
45
+ * Generate unique session ID
46
+ */
47
+ private generateId;
48
+ /**
49
+ * Cleanup on shutdown
50
+ */
51
+ destroy(): void;
52
+ }