@studious-lms/server 1.1.12 → 1.1.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,56 @@
1
+ import OpenAI from 'openai';
2
+ export declare const inferenceClient: OpenAI;
3
+ export interface LabChatContext {
4
+ subject: string;
5
+ topic: string;
6
+ difficulty: 'beginner' | 'intermediate' | 'advanced';
7
+ objectives: string[];
8
+ resources?: string[];
9
+ persona: string;
10
+ constraints: string[];
11
+ examples?: any[];
12
+ metadata?: Record<string, any>;
13
+ }
14
+ export interface InferenceResponse {
15
+ content: string;
16
+ model: string;
17
+ tokensUsed: number;
18
+ finishReason: string;
19
+ }
20
+ /**
21
+ * Centralized function to send AI messages to conversations
22
+ * Handles database storage and Pusher broadcasting
23
+ */
24
+ export declare function sendAIMessage(content: string, conversationId: string, options?: {
25
+ subject?: string;
26
+ customSender?: {
27
+ displayName: string;
28
+ profilePicture?: string | null;
29
+ };
30
+ }): Promise<{
31
+ id: string;
32
+ content: string;
33
+ senderId: string;
34
+ conversationId: string;
35
+ createdAt: Date;
36
+ }>;
37
+ /**
38
+ * Simple inference function for general use
39
+ */
40
+ export declare function generateInferenceResponse(subject: string, question: string, options?: {
41
+ model?: string;
42
+ maxTokens?: number;
43
+ }): Promise<InferenceResponse>;
44
+ /**
45
+ * Validate inference configuration
46
+ */
47
+ export declare function validateInferenceConfig(): boolean;
48
+ /**
49
+ * Get available inference models (for admin/config purposes)
50
+ */
51
+ export declare function getAvailableModels(): Promise<string[]>;
52
+ /**
53
+ * Estimate token count for a message (rough approximation)
54
+ */
55
+ export declare function estimateTokenCount(text: string): number;
56
+ //# sourceMappingURL=inference.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../src/utils/inference.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAW5B,eAAO,MAAM,eAAe,QAG1B,CAAC;AAGH,MAAM,WAAW,cAAc;IAC7B,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,EAAE,UAAU,GAAG,cAAc,GAAG,UAAU,CAAC;IACrD,UAAU,EAAE,MAAM,EAAE,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,EAAE,CAAC;IACrB,OAAO,EAAE,MAAM,CAAC;IAChB,WAAW,EAAE,MAAM,EAAE,CAAC;IACtB,QAAQ,CAAC,EAAE,GAAG,EAAE,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;CAChC;AAED,MAAM,WAAW,iBAAiB;IAChC,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,UAAU,EAAE,MAAM,CAAC;IACnB,YAAY,EAAE,MAAM,CAAC;CACtB;AAED;;;GAGG;AACH,wBAAsB,aAAa,CACjC,OAAO,EAAE,MAAM,EACf,cAAc,EAAE,MAAM,EACtB,OAAO,GAAE;IACP,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,YAAY,CAAC,EAAE;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,cAAc,CAAC,EAAE,MAAM,GAAG,IAAI,CAAC;KAChC,CAAC;CACE,GACL,OAAO,CAAC;IACT,EAAE,EAAE,MAAM,CAAC;IACX,OAAO,EAAE,MAAM,CAAC;IAChB,QAAQ,EAAE,MAAM,CAAC;IACjB,cAAc,EAAE,MAAM,CAAC;IACvB,SAAS,EAAE,IAAI,CAAC;CACjB,CAAC,CAmDD;AAED;;GAEG;AACH,wBAAsB,yBAAyB,CAC7C,OAAO,EAAE,MAAM,EACf,QAAQ,EAAE,MAAM,EAChB,OAAO,GAAE;IACP,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;CACf,GACL,OAAO,CAAC,iBAAiB,CAAC,CAsC5B;AAED;;GAEG;AACH,wBAAgB,uBAAuB,IAAI,OAAO,CAMjD;AAED;;GAEG;AACH,wBAAsB,kBAAkB,IAAI,OAAO,CAAC,MAAM,EAAE,CAAC,CAW5D;AAED;;GAEG;AACH,wBAAgB,kBAAkB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,CAGvD"}
@@ -0,0 +1,135 @@
1
+ import OpenAI from 'openai';
2
+ import { logger } from './logger.js';
3
+ import { prisma } from '../lib/prisma.js';
4
+ import { pusher } from '../lib/pusher.js';
5
+ import { ensureAIUserExists, getAIUserId } from './aiUser.js';
6
+ // Initialize inference client (Cohere via OpenAI SDK)
7
+ logger.info('Inference API Key', { apiKey: process.env.INFERENCE_API_KEY });
8
+ logger.info('Inference API Base URL', { baseURL: process.env.INFERENCE_API_BASE_URL });
9
+ export const inferenceClient = new OpenAI({
10
+ apiKey: process.env.INFERENCE_API_KEY,
11
+ baseURL: process.env.INFERENCE_API_BASE_URL,
12
+ });
13
+ /**
14
+ * Centralized function to send AI messages to conversations
15
+ * Handles database storage and Pusher broadcasting
16
+ */
17
+ export async function sendAIMessage(content, conversationId, options = {}) {
18
+ // Ensure AI user exists
19
+ await ensureAIUserExists();
20
+ // Create message in database
21
+ const aiMessage = await prisma.message.create({
22
+ data: {
23
+ content,
24
+ senderId: getAIUserId(),
25
+ conversationId,
26
+ },
27
+ });
28
+ logger.info('AI Message sent', {
29
+ messageId: aiMessage.id,
30
+ conversationId,
31
+ contentLength: content.length,
32
+ });
33
+ // Prepare sender info
34
+ const senderInfo = {
35
+ id: getAIUserId(),
36
+ username: 'AI Assistant',
37
+ profile: {
38
+ displayName: options.customSender?.displayName || `${options.subject || 'AI'} Assistant`,
39
+ profilePicture: options.customSender?.profilePicture || null,
40
+ },
41
+ };
42
+ // Broadcast via Pusher
43
+ try {
44
+ await pusher.trigger(`conversation-${conversationId}`, 'new-message', {
45
+ id: aiMessage.id,
46
+ content: aiMessage.content,
47
+ senderId: getAIUserId(),
48
+ conversationId: aiMessage.conversationId,
49
+ createdAt: aiMessage.createdAt,
50
+ sender: senderInfo,
51
+ mentionedUserIds: [],
52
+ });
53
+ }
54
+ catch (error) {
55
+ logger.error('Failed to broadcast AI message:', { error, messageId: aiMessage.id });
56
+ }
57
+ return {
58
+ id: aiMessage.id,
59
+ content: aiMessage.content,
60
+ senderId: getAIUserId(),
61
+ conversationId: aiMessage.conversationId,
62
+ createdAt: aiMessage.createdAt,
63
+ };
64
+ }
65
+ /**
66
+ * Simple inference function for general use
67
+ */
68
+ export async function generateInferenceResponse(subject, question, options = {}) {
69
+ const { model = 'command-r-plus', maxTokens = 500 } = options;
70
+ try {
71
+ const completion = await inferenceClient.chat.completions.create({
72
+ model,
73
+ messages: [
74
+ {
75
+ role: 'system',
76
+ content: `You are a helpful educational assistant for ${subject}. Provide clear, concise, and accurate answers. Keep responses educational and appropriate for students.`,
77
+ },
78
+ {
79
+ role: 'user',
80
+ content: question,
81
+ },
82
+ ],
83
+ max_tokens: maxTokens,
84
+ temperature: 0.5,
85
+ // Remove OpenAI-specific parameters for Cohere compatibility
86
+ });
87
+ const response = completion.choices[0]?.message?.content;
88
+ if (!response) {
89
+ throw new Error('No response generated from inference API');
90
+ }
91
+ return {
92
+ content: response,
93
+ model,
94
+ tokensUsed: completion.usage?.total_tokens || 0,
95
+ finishReason: completion.choices[0]?.finish_reason || 'unknown',
96
+ };
97
+ }
98
+ catch (error) {
99
+ logger.error('Failed to generate inference response', { error, subject, question: question.substring(0, 50) + '...' });
100
+ throw error;
101
+ }
102
+ }
103
+ /**
104
+ * Validate inference configuration
105
+ */
106
+ export function validateInferenceConfig() {
107
+ if (!process.env.INFERENCE_API_KEY) {
108
+ logger.error('Inference API key not configured for Cohere');
109
+ return false;
110
+ }
111
+ return true;
112
+ }
113
+ /**
114
+ * Get available inference models (for admin/config purposes)
115
+ */
116
+ export async function getAvailableModels() {
117
+ try {
118
+ const models = await inferenceClient.models.list();
119
+ return models.data
120
+ .filter(model => model.id.includes('command'))
121
+ .map(model => model.id)
122
+ .sort();
123
+ }
124
+ catch (error) {
125
+ logger.error('Failed to fetch inference models', { error });
126
+ return ['command-r-plus', 'command-r', 'command-light']; // Fallback Cohere models
127
+ }
128
+ }
129
+ /**
130
+ * Estimate token count for a message (rough approximation)
131
+ */
132
+ export function estimateTokenCount(text) {
133
+ // Rough approximation: 1 token ≈ 4 characters for English text
134
+ return Math.ceil(text.length / 4);
135
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@studious-lms/server",
3
- "version": "1.1.12",
3
+ "version": "1.1.14",
4
4
  "description": "Backend server for Studious application",
5
5
  "main": "dist/exportType.js",
6
6
  "types": "dist/exportType.d.ts",
@@ -30,6 +30,7 @@
30
30
  "dotenv": "^16.5.0",
31
31
  "express": "^4.18.3",
32
32
  "nodemailer": "^7.0.4",
33
+ "openai": "^5.23.0",
33
34
  "prisma": "^6.7.0",
34
35
  "pusher": "^5.2.0",
35
36
  "sharp": "^0.34.2",
@@ -169,9 +169,9 @@ export const classRouter = createTRPCRouter({
169
169
  },
170
170
  },
171
171
  submissions: {
172
- where: {
173
- studentId: ctx.user?.id,
174
- },
172
+ // where: {
173
+ // studentId: ctx.user?.id,
174
+ // },
175
175
  select: {
176
176
  studentId: true,
177
177
  id: true,
@@ -16,6 +16,12 @@ export const conversationRouter = createTRPCRouter({
16
16
  },
17
17
  },
18
18
  include: {
19
+ labChat: {
20
+ select: {
21
+ id: true,
22
+ title: true,
23
+ },
24
+ },
19
25
  members: {
20
26
  include: {
21
27
  user: {
@@ -101,6 +107,7 @@ export const conversationRouter = createTRPCRouter({
101
107
  name: conversation.name,
102
108
  createdAt: conversation.createdAt,
103
109
  updatedAt: conversation.updatedAt,
110
+ labChat: conversation.labChat,
104
111
  members: conversation.members,
105
112
  lastMessage: conversation.messages[0] || null,
106
113
  unreadCount,
@@ -3,6 +3,7 @@ import { createTRPCRouter, protectedProcedure, protectedClassMemberProcedure, pr
3
3
  import { TRPCError } from "@trpc/server";
4
4
  import { prisma } from "../lib/prisma.js";
5
5
  import { uploadFiles, type UploadedFile } from "../lib/fileUpload.js";
6
+ import { type Folder } from "@prisma/client";
6
7
 
7
8
  const fileSchema = z.object({
8
9
  name: z.string(),
@@ -766,4 +767,26 @@ export const folderRouter = createTRPCRouter({
766
767
 
767
768
  return updatedFolder;
768
769
  }),
770
+ getParents: protectedProcedure
771
+ .input(z.object({
772
+ folderId: z.string(),
773
+ }))
774
+ .query(async ({ ctx, input }) => {
775
+ const { folderId } = input;
776
+
777
+ let currentParent: string | null = folderId;
778
+ const parents: Folder[] = [];
779
+ while (currentParent) {
780
+ const parent = await prisma.folder.findFirst({
781
+ where: {
782
+ id: currentParent,
783
+ },
784
+ });
785
+
786
+ currentParent = parent?.parentFolderId;
787
+ parents.push(parent);
788
+ }
789
+
790
+ return parents;
791
+ }),
769
792
  });
@@ -3,6 +3,14 @@ import { createTRPCRouter, protectedProcedure } from '../trpc.js';
3
3
  import { prisma } from '../lib/prisma.js';
4
4
  import { pusher } from '../lib/pusher.js';
5
5
  import { TRPCError } from '@trpc/server';
6
+ import {
7
+ inferenceClient,
8
+ sendAIMessage,
9
+ type LabChatContext
10
+ } from '../utils/inference.js';
11
+ import { logger } from '../utils/logger.js';
12
+ import { isAIUser } from '../utils/aiUser.js';
13
+ import OpenAI from 'openai';
6
14
 
7
15
  export const labChatRouter = createTRPCRouter({
8
16
  create: protectedProcedure
@@ -83,14 +91,14 @@ export const labChatRouter = createTRPCRouter({
83
91
  },
84
92
  });
85
93
 
86
- // Add all class members to the conversation
87
- const allMembers = [
88
- ...classWithTeachers.teachers.map(t => ({ userId: t.id, role: 'ADMIN' as const })),
89
- ...classWithTeachers.students.map(s => ({ userId: s.id, role: 'MEMBER' as const })),
90
- ];
94
+ // Add only teachers to the conversation (this is for course material creation)
95
+ const teacherMembers = classWithTeachers.teachers.map(t => ({
96
+ userId: t.id,
97
+ role: 'ADMIN' as const
98
+ }));
91
99
 
92
100
  await tx.conversationMember.createMany({
93
- data: allMembers.map(member => ({
101
+ data: teacherMembers.map(member => ({
94
102
  userId: member.userId,
95
103
  conversationId: conversation.id,
96
104
  role: member.role,
@@ -152,6 +160,11 @@ export const labChatRouter = createTRPCRouter({
152
160
  return labChat;
153
161
  });
154
162
 
163
+ // Generate AI introduction message in parallel (don't await - fire and forget)
164
+ generateAndSendLabIntroduction(result.id, result.conversationId, context, classWithTeachers.subject || 'Lab').catch(error => {
165
+ logger.error('Failed to generate AI introduction:', { error, labChatId: result.id });
166
+ });
167
+
155
168
  // Broadcast lab chat creation to class members
156
169
  try {
157
170
  await pusher.trigger(`class-${classId}`, 'lab-chat-created', {
@@ -176,7 +189,8 @@ export const labChatRouter = createTRPCRouter({
176
189
  const userId = ctx.user!.id;
177
190
  const { labChatId } = input;
178
191
 
179
- const labChat = await prisma.labChat.findFirst({
192
+ // First, try to find the lab chat if user is already a member
193
+ let labChat = await prisma.labChat.findFirst({
180
194
  where: {
181
195
  id: labChatId,
182
196
  conversation: {
@@ -230,6 +244,88 @@ export const labChatRouter = createTRPCRouter({
230
244
  },
231
245
  });
232
246
 
247
+ // If not found, check if user is a teacher in the class
248
+ if (!labChat) {
249
+ const labChatForTeacher = await prisma.labChat.findFirst({
250
+ where: {
251
+ id: labChatId,
252
+ class: {
253
+ teachers: {
254
+ some: {
255
+ id: userId,
256
+ },
257
+ },
258
+ },
259
+ },
260
+ include: {
261
+ conversation: {
262
+ select: {
263
+ id: true,
264
+ },
265
+ },
266
+ },
267
+ });
268
+
269
+ if (labChatForTeacher) {
270
+ // Add teacher to conversation
271
+ await prisma.conversationMember.create({
272
+ data: {
273
+ userId,
274
+ conversationId: labChatForTeacher.conversation.id,
275
+ role: 'ADMIN',
276
+ },
277
+ });
278
+
279
+ // Now fetch the full lab chat with the user as a member
280
+ labChat = await prisma.labChat.findFirst({
281
+ where: {
282
+ id: labChatId,
283
+ },
284
+ include: {
285
+ conversation: {
286
+ include: {
287
+ members: {
288
+ include: {
289
+ user: {
290
+ select: {
291
+ id: true,
292
+ username: true,
293
+ profile: {
294
+ select: {
295
+ displayName: true,
296
+ profilePicture: true,
297
+ },
298
+ },
299
+ },
300
+ },
301
+ },
302
+ },
303
+ },
304
+ },
305
+ createdBy: {
306
+ select: {
307
+ id: true,
308
+ username: true,
309
+ profile: {
310
+ select: {
311
+ displayName: true,
312
+ },
313
+ },
314
+ },
315
+ },
316
+ class: {
317
+ select: {
318
+ id: true,
319
+ name: true,
320
+ subject: true,
321
+ section: true,
322
+ },
323
+ },
324
+ },
325
+ });
326
+ }
327
+ }
328
+
233
329
  if (!labChat) {
234
330
  throw new TRPCError({
235
331
  code: 'NOT_FOUND',
@@ -439,7 +535,7 @@ export const labChatRouter = createTRPCRouter({
439
535
  return message;
440
536
  });
441
537
 
442
- // Broadcast to Pusher channel (using conversation ID)
538
+ // Broadcast to Pusher channel (same format as regular chat)
443
539
  try {
444
540
  await pusher.trigger(`conversation-${labChat.conversationId}`, 'new-message', {
445
541
  id: result.id,
@@ -449,13 +545,20 @@ export const labChatRouter = createTRPCRouter({
449
545
  createdAt: result.createdAt,
450
546
  sender: result.sender,
451
547
  mentionedUserIds,
452
- labChatId, // Include lab chat ID for frontend context
453
548
  });
454
549
  } catch (error) {
455
550
  console.error('Failed to broadcast lab chat message:', error);
456
551
  // Don't fail the request if Pusher fails
457
552
  }
458
553
 
554
+ // Generate AI response in parallel (don't await - fire and forget)
555
+ if (!isAIUser(userId)) {
556
+ // Run AI response generation in background
557
+ generateAndSendLabResponse(labChatId, content, labChat.conversationId).catch(error => {
558
+ logger.error('Failed to generate AI response:', { error });
559
+ });
560
+ }
561
+
459
562
  return {
460
563
  id: result.id,
461
564
  content: result.content,
@@ -464,7 +567,6 @@ export const labChatRouter = createTRPCRouter({
464
567
  createdAt: result.createdAt,
465
568
  sender: result.sender,
466
569
  mentionedUserIds,
467
- labChatId,
468
570
  };
469
571
  }),
470
572
 
@@ -539,3 +641,184 @@ export const labChatRouter = createTRPCRouter({
539
641
  return { success: true };
540
642
  }),
541
643
  });
644
+
645
+ /**
646
+ * Generate and send AI introduction for lab chat
647
+ * Uses the stored context directly from database
648
+ */
649
+ async function generateAndSendLabIntroduction(
650
+ labChatId: string,
651
+ conversationId: string,
652
+ contextString: string,
653
+ subject: string
654
+ ): Promise<void> {
655
+ try {
656
+ // Enhance the stored context with clarifying question instructions
657
+ const enhancedSystemPrompt = `${contextString}
658
+
659
+ IMPORTANT INSTRUCTIONS:
660
+ - You are helping teachers create course materials
661
+ - Use the context information provided above (subject, topic, difficulty, objectives, etc.) as your foundation
662
+ - Only ask clarifying questions about details NOT already specified in the context
663
+ - Focus your questions on format preferences, specific requirements, or missing details needed to create the content
664
+ - Only output final course materials when you have sufficient details beyond what's in the context
665
+ - Do not use markdown formatting in your responses - use plain text only
666
+ - When creating content, make it clear and well-structured without markdown`;
667
+
668
+ const completion = await inferenceClient.chat.completions.create({
669
+ model: 'command-a-03-2025',
670
+ messages: [
671
+ { role: 'system', content: enhancedSystemPrompt },
672
+ {
673
+ role: 'user',
674
+ content: 'Please introduce yourself to the teaching team. Explain that you will help create course materials by first asking clarifying questions based on the context provided, and only output final content when you have enough information.'
675
+ },
676
+ ],
677
+ max_tokens: 300,
678
+ temperature: 0.8,
679
+ });
680
+
681
+ const response = completion.choices[0]?.message?.content;
682
+
683
+ if (!response) {
684
+ throw new Error('No response generated from inference API');
685
+ }
686
+
687
+ // Send AI introduction using centralized sender
688
+ await sendAIMessage(response, conversationId, {
689
+ subject,
690
+ });
691
+
692
+ logger.info('AI Introduction sent', { labChatId, conversationId });
693
+
694
+ } catch (error) {
695
+ logger.error('Failed to generate AI introduction:', { error, labChatId });
696
+
697
+ // Send fallback introduction
698
+ try {
699
+ const fallbackIntro = `Hello teaching team! I'm your AI assistant for course material development. I will help you create educational content by first asking clarifying questions based on the provided context, then outputting final materials when I have sufficient information. I won't use markdown formatting in my responses. What would you like to work on?`;
700
+
701
+ await sendAIMessage(fallbackIntro, conversationId, {
702
+ subject,
703
+ });
704
+
705
+ logger.info('Fallback AI introduction sent', { labChatId });
706
+
707
+ } catch (fallbackError) {
708
+ logger.error('Failed to send fallback AI introduction:', { error: fallbackError, labChatId });
709
+ }
710
+ }
711
+ }
712
+
713
+ /**
714
+ * Generate and send AI response to teacher message
715
+ * Uses the stored context directly from database
716
+ */
717
+ async function generateAndSendLabResponse(
718
+ labChatId: string,
719
+ teacherMessage: string,
720
+ conversationId: string
721
+ ): Promise<void> {
722
+ try {
723
+ // Get lab context from database
724
+ const fullLabChat = await prisma.labChat.findUnique({
725
+ where: { id: labChatId },
726
+ include: {
727
+ class: {
728
+ select: {
729
+ name: true,
730
+ subject: true,
731
+ },
732
+ },
733
+ },
734
+ });
735
+
736
+ if (!fullLabChat) {
737
+ throw new Error('Lab chat not found');
738
+ }
739
+
740
+ // Get recent conversation history
741
+ const recentMessages = await prisma.message.findMany({
742
+ where: {
743
+ conversationId,
744
+ },
745
+ include: {
746
+ sender: {
747
+ select: {
748
+ id: true,
749
+ username: true,
750
+ profile: {
751
+ select: {
752
+ displayName: true,
753
+ },
754
+ },
755
+ },
756
+ },
757
+ },
758
+ orderBy: {
759
+ createdAt: 'desc',
760
+ },
761
+ take: 10, // Last 10 messages for context
762
+ });
763
+
764
+ // Build conversation history as proper message objects
765
+ // Enhance the stored context with clarifying question instructions
766
+ const enhancedSystemPrompt = `${fullLabChat.context}
767
+
768
+ IMPORTANT INSTRUCTIONS:
769
+ - Use the context information provided above (subject, topic, difficulty, objectives, etc.) as your foundation
770
+ - Based on the teacher's input and existing context, only ask clarifying questions about details NOT already specified
771
+ - Focus questions on format preferences, specific requirements, quantity, or missing implementation details
772
+ - Only output final course materials when you have sufficient details beyond what's in the context
773
+ - Do not use markdown formatting in your responses - use plain text only
774
+ - When you do create content, make it clear and well-structured without markdown
775
+ - If the request is vague, ask 1-2 specific clarifying questions about missing details only`;
776
+
777
+ const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
778
+ { role: 'system', content: enhancedSystemPrompt },
779
+ ];
780
+
781
+ // Add recent conversation history
782
+ recentMessages.reverse().forEach(msg => {
783
+ const role = isAIUser(msg.senderId) ? 'assistant' : 'user';
784
+ const senderName = msg.sender?.profile?.displayName || msg.sender?.username || 'Teacher';
785
+ const content = isAIUser(msg.senderId) ? msg.content : `${senderName}: ${msg.content}`;
786
+
787
+ messages.push({
788
+ role: role as 'user' | 'assistant',
789
+ content,
790
+ });
791
+ });
792
+
793
+ // Add the new teacher message
794
+ const senderName = 'Teacher'; // We could get this from the actual sender if needed
795
+ messages.push({
796
+ role: 'user',
797
+ content: `${senderName}: ${teacherMessage}`,
798
+ });
799
+
800
+ const completion = await inferenceClient.chat.completions.create({
801
+ model: 'command-a-03-2025',
802
+ messages,
803
+ max_tokens: 500,
804
+ temperature: 0.7,
805
+ });
806
+
807
+ const response = completion.choices[0]?.message?.content;
808
+
809
+ if (!response) {
810
+ throw new Error('No response generated from inference API');
811
+ }
812
+
813
+ // Send AI response
814
+ await sendAIMessage(response, conversationId, {
815
+ subject: fullLabChat.class?.subject || 'Lab',
816
+ });
817
+
818
+ logger.info('AI response sent', { labChatId, conversationId });
819
+
820
+ } catch (error) {
821
+ logger.error('Failed to generate AI response:', { error, labChatId });
822
+ }
823
+ }
824
+