nextjs-chatbot-ui 1.6.0 → 1.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -76,6 +76,40 @@ OPENAI_API_KEY=your_openai_api_key_here
76
76
  CHROMADB_URL=http://localhost:8000
77
77
  ```
78
78
 
79
+ ### Step 2.5: Start ChromaDB (Automatic)
80
+
81
+ The package includes a script to automatically check and start ChromaDB:
82
+
83
+ ```bash
84
+ # Check and start ChromaDB automatically
85
+ npm run chromadb:check
86
+
87
+ # Or use Docker Compose
88
+ npm run chromadb:start
89
+
90
+ # Or manually with Docker
91
+ docker run -d --name chromadb -p 8000:8000 chromadb/chroma
92
+ ```
93
+
94
+ **What the script does:**
95
+ - ✅ Checks if ChromaDB npm package is installed (installs if missing)
96
+ - ✅ Checks if Docker is installed and running
97
+ - ✅ Checks if ChromaDB container is running
98
+ - ✅ Starts ChromaDB container if not running
99
+ - ✅ Verifies ChromaDB is accessible
100
+
101
+ **Alternative: Using Docker Compose**
102
+ ```bash
103
+ # Start ChromaDB
104
+ docker-compose up -d
105
+
106
+ # Stop ChromaDB
107
+ docker-compose stop
108
+
109
+ # Restart ChromaDB
110
+ docker-compose restart
111
+ ```
112
+
79
113
  ### Step 3: Use Components
80
114
 
81
115
  ```javascript
package/api/chat.ts CHANGED
@@ -2,6 +2,13 @@ import { NextRequest, NextResponse } from 'next/server';
2
2
  import { ChromaClient } from 'chromadb';
3
3
  import OpenAI from 'openai';
4
4
 
5
+ // Configuration constants
6
+ const MAX_RETRIES = 3;
7
+ const RETRY_DELAY = 1000;
8
+ const DEFAULT_N_RESULTS = 5;
9
+ const MAX_CONTEXT_LENGTH = 3000; // Max characters for context
10
+
11
+ // Initialize clients
5
12
  const openai = new OpenAI({
6
13
  apiKey: process.env.OPENAI_API_KEY,
7
14
  });
@@ -10,86 +17,258 @@ const chromaClient = new ChromaClient({
10
17
  path: process.env.CHROMADB_URL || 'http://localhost:8000',
11
18
  });
12
19
 
20
+ // Utility: Sleep function
21
+ const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
22
+
23
+ // Utility: Retry wrapper with exponential backoff
24
+ async function retryWithBackoff<T>(
25
+ fn: () => Promise<T>,
26
+ maxRetries: number = MAX_RETRIES,
27
+ delay: number = RETRY_DELAY
28
+ ): Promise<T> {
29
+ let lastError: Error;
30
+
31
+ for (let attempt = 0; attempt < maxRetries; attempt++) {
32
+ try {
33
+ return await fn();
34
+ } catch (error: any) {
35
+ lastError = error;
36
+
37
+ // Don't retry on certain errors
38
+ if (error.status === 400 || error.status === 401 || error.status === 403) {
39
+ throw error;
40
+ }
41
+
42
+ // Exponential backoff
43
+ const waitTime = delay * Math.pow(2, attempt);
44
+ console.warn(`[Chat] Attempt ${attempt + 1} failed, retrying in ${waitTime}ms...`, error.message);
45
+ await sleep(waitTime);
46
+ }
47
+ }
48
+
49
+ throw lastError!;
50
+ }
51
+
52
+ // Utility: Truncate context if too long
53
+ function truncateContext(contexts: string[]): string {
54
+ let contextText = contexts.join('\n\n');
55
+
56
+ if (contextText.length > MAX_CONTEXT_LENGTH) {
57
+ // Try to keep complete sentences
58
+ const truncated = contextText.substring(0, MAX_CONTEXT_LENGTH);
59
+ const lastPeriod = truncated.lastIndexOf('.');
60
+ if (lastPeriod > MAX_CONTEXT_LENGTH * 0.8) {
61
+ contextText = truncated.substring(0, lastPeriod + 1);
62
+ } else {
63
+ contextText = truncated + '...';
64
+ }
65
+ }
66
+
67
+ return contextText;
68
+ }
69
+
70
+ // Main chat handler
13
71
  export async function POST(request: NextRequest) {
72
+ const startTime = Date.now();
73
+
14
74
  try {
15
- const { message, userInfo } = await request.json();
75
+ const { message, userInfo, conversationHistory, nResults } = await request.json();
16
76
 
17
- if (!message) {
77
+ // Validation
78
+ if (!message || typeof message !== 'string' || !message.trim()) {
18
79
  return NextResponse.json(
19
- { error: 'Message is required' },
80
+ {
81
+ error: 'Message is required',
82
+ message: 'Please provide a valid message.',
83
+ response: 'Please provide a valid message.',
84
+ },
20
85
  { status: 400 }
21
86
  );
22
87
  }
23
88
 
24
89
  if (!process.env.OPENAI_API_KEY) {
25
90
  return NextResponse.json(
26
- { error: 'OPENAI_API_KEY is not set in environment variables' },
91
+ {
92
+ error: 'OPENAI_API_KEY is not set in environment variables',
93
+ message: 'Server configuration error. Please contact support.',
94
+ response: 'Server configuration error. Please contact support.',
95
+ },
27
96
  { status: 500 }
28
97
  );
29
98
  }
30
99
 
100
+ console.log(`[Chat] Processing message: "${message.substring(0, 50)}..."`);
101
+
31
102
  // Get query embedding
32
- const queryEmbedding = await openai.embeddings.create({
33
- model: 'text-embedding-3-small',
34
- input: message,
35
- });
103
+ let queryEmbedding: number[];
104
+ try {
105
+ const embeddingResponse = await retryWithBackoff(() =>
106
+ openai.embeddings.create({
107
+ model: 'text-embedding-3-small',
108
+ input: message.trim(),
109
+ })
110
+ );
111
+
112
+ queryEmbedding = embeddingResponse.data[0].embedding;
113
+ console.log('[Chat] Generated query embedding');
114
+ } catch (error: any) {
115
+ console.error('[Chat] Failed to generate embedding:', error);
116
+ return NextResponse.json(
117
+ {
118
+ error: 'Failed to process query',
119
+ message: 'Sorry, I encountered an error processing your question. Please try again.',
120
+ response: 'Sorry, I encountered an error processing your question. Please try again.',
121
+ },
122
+ { status: 500 }
123
+ );
124
+ }
36
125
 
37
126
  // Search in ChromaDB
38
127
  const collectionName = process.env.CHROMA_COLLECTION_NAME || 'db_default';
128
+ const numResults = nResults || DEFAULT_N_RESULTS;
129
+
39
130
  let chromaCollection;
131
+ let searchResults: any;
40
132
 
41
133
  try {
134
+ // Try to get collection
42
135
  chromaCollection = await chromaClient.getCollection({
43
136
  name: collectionName,
44
137
  });
45
- } catch (error) {
138
+ console.log(`[Chat] Connected to ChromaDB collection: ${collectionName}`);
139
+ } catch (error: any) {
46
140
  // If collection doesn't exist, return a helpful message
141
+ console.warn(`[Chat] ChromaDB collection not found: ${collectionName}`);
47
142
  return NextResponse.json({
48
143
  message: 'Vector database not initialized. Please use AdminSetup to process embeddings first.',
49
144
  response: 'Vector database not initialized. Please use AdminSetup to process embeddings first.',
145
+ error: 'CHROMADB_NOT_INITIALIZED',
50
146
  });
51
147
  }
52
148
 
53
- const results = await chromaCollection.query({
54
- queryEmbeddings: [queryEmbedding.data[0].embedding],
55
- nResults: 5,
56
- });
149
+ // Perform vector search
150
+ try {
151
+ searchResults = await retryWithBackoff(() =>
152
+ chromaCollection.query({
153
+ queryEmbeddings: [queryEmbedding],
154
+ nResults: numResults,
155
+ })
156
+ );
157
+
158
+ console.log(`[Chat] Found ${searchResults.documents[0]?.length || 0} relevant documents`);
159
+ } catch (error: any) {
160
+ console.error('[Chat] Failed to search ChromaDB:', error);
161
+ return NextResponse.json(
162
+ {
163
+ error: 'Failed to search database',
164
+ message: 'Sorry, I encountered an error searching the database. Please try again.',
165
+ response: 'Sorry, I encountered an error searching the database. Please try again.',
166
+ },
167
+ { status: 500 }
168
+ );
169
+ }
170
+
171
+ // Extract context from search results
172
+ const contexts = searchResults.documents[0] || [];
173
+ const metadatas = searchResults.metadatas[0] || [];
174
+ const distances = searchResults.distances[0] || [];
57
175
 
58
- // Extract context
59
- const contexts = results.documents[0] || [];
60
- const contextText = contexts.length > 0
61
- ? contexts.join('\n\n')
62
- : 'No relevant context found.';
176
+ let contextText: string;
177
+
178
+ if (contexts.length === 0) {
179
+ contextText = 'No relevant context found in the database.';
180
+ console.log('[Chat] No relevant context found');
181
+ } else {
182
+ // Filter out very low relevance results (high distance)
183
+ const relevantContexts = contexts.filter((_: string, index: number) => {
184
+ // ChromaDB returns distances (lower is better)
185
+ // Filter out results with distance > 1.5 (less relevant)
186
+ return distances[index] !== undefined && distances[index] < 1.5;
187
+ });
188
+
189
+ if (relevantContexts.length === 0) {
190
+ contextText = 'No highly relevant context found. Using available context.';
191
+ contextText = truncateContext(contexts);
192
+ } else {
193
+ contextText = truncateContext(relevantContexts);
194
+ }
195
+
196
+ console.log(`[Chat] Using ${relevantContexts.length || contexts.length} context document(s)`);
197
+ }
198
+
199
+ // Build conversation history if provided
200
+ const messages: any[] = [
201
+ {
202
+ role: 'system',
203
+ content: 'You are a helpful assistant that answers questions based on the provided context from a database. Use the context to provide accurate and relevant answers. If the context does not contain relevant information to answer the question, say so politely and suggest that the user rephrase their question or ask about something else.',
204
+ },
205
+ ];
206
+
207
+ // Add conversation history if provided
208
+ if (conversationHistory && Array.isArray(conversationHistory)) {
209
+ // Add recent conversation history (last 5 exchanges)
210
+ const recentHistory = conversationHistory.slice(-10); // Last 10 messages (5 exchanges)
211
+ recentHistory.forEach((msg: any) => {
212
+ if (msg.sender === 'user') {
213
+ messages.push({ role: 'user', content: msg.text });
214
+ } else if (msg.sender === 'bot') {
215
+ messages.push({ role: 'assistant', content: msg.text });
216
+ }
217
+ });
218
+ }
219
+
220
+ // Add current query with context
221
+ messages.push({
222
+ role: 'user',
223
+ content: `Context from database:\n${contextText}\n\nUser question: ${message.trim()}\n\nPlease provide a helpful answer based on the context above.`,
224
+ });
63
225
 
64
226
  // Generate response with OpenAI
65
- const completion = await openai.chat.completions.create({
66
- model: process.env.OPENAI_MODEL || 'gpt-3.5-turbo',
67
- messages: [
68
- {
69
- role: 'system',
70
- content: 'You are a helpful assistant that answers questions based on the provided context. If the context does not contain relevant information, say so politely.',
71
- },
72
- {
73
- role: 'user',
74
- content: `Context:\n${contextText}\n\nQuestion: ${message}\n\nAnswer:`,
227
+ let answer: string;
228
+ try {
229
+ const completion = await retryWithBackoff(() =>
230
+ openai.chat.completions.create({
231
+ model: process.env.OPENAI_MODEL || 'gpt-3.5-turbo',
232
+ messages,
233
+ temperature: 0.7,
234
+ max_tokens: parseInt(process.env.OPENAI_MAX_TOKENS || '500'),
235
+ })
236
+ );
237
+
238
+ answer = completion.choices[0].message.content || 'Sorry, I could not generate a response.';
239
+ console.log(`[Chat] Generated response (${answer.length} characters)`);
240
+ } catch (error: any) {
241
+ console.error('[Chat] Failed to generate response:', error);
242
+ return NextResponse.json(
243
+ {
244
+ error: 'Failed to generate response',
245
+ message: 'Sorry, I encountered an error generating a response. Please try again.',
246
+ response: 'Sorry, I encountered an error generating a response. Please try again.',
75
247
  },
76
- ],
77
- temperature: 0.7,
78
- max_tokens: 500,
79
- });
248
+ { status: 500 }
249
+ );
250
+ }
80
251
 
81
- const answer = completion.choices[0].message.content;
252
+ const duration = ((Date.now() - startTime) / 1000).toFixed(2);
253
+ console.log(`[Chat] Request completed in ${duration}s`);
82
254
 
83
255
  return NextResponse.json({
84
256
  message: answer,
85
257
  response: answer,
258
+ contexts: contexts.length > 0 ? contexts : undefined,
259
+ metadata: {
260
+ numResults: contexts.length,
261
+ processingTime: `${duration}s`,
262
+ collection: collectionName,
263
+ },
86
264
  });
87
265
  } catch (error: any) {
88
- console.error('Chat error:', error);
266
+ console.error('[Chat] Fatal error:', error);
89
267
  return NextResponse.json(
90
268
  {
91
- message: error.message || 'Failed to process message',
92
- response: error.message || 'Failed to process message',
269
+ error: error.message || 'Failed to process message',
270
+ message: 'Sorry, I encountered an unexpected error. Please try again later.',
271
+ response: 'Sorry, I encountered an unexpected error. Please try again later.',
93
272
  },
94
273
  { status: 500 }
95
274
  );