@studious-lms/server 1.2.46 → 1.2.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/dist/middleware/auth.d.ts.map +1 -1
  2. package/dist/middleware/auth.js +3 -2
  3. package/dist/middleware/auth.js.map +1 -1
  4. package/dist/routers/_app.d.ts +90 -26
  5. package/dist/routers/_app.d.ts.map +1 -1
  6. package/dist/routers/assignment.d.ts +10 -0
  7. package/dist/routers/assignment.d.ts.map +1 -1
  8. package/dist/routers/assignment.js +18 -3
  9. package/dist/routers/assignment.js.map +1 -1
  10. package/dist/routers/conversation.d.ts +1 -0
  11. package/dist/routers/conversation.d.ts.map +1 -1
  12. package/dist/routers/labChat.d.ts +1 -0
  13. package/dist/routers/labChat.d.ts.map +1 -1
  14. package/dist/routers/labChat.js +5 -321
  15. package/dist/routers/labChat.js.map +1 -1
  16. package/dist/routers/message.d.ts +1 -0
  17. package/dist/routers/message.d.ts.map +1 -1
  18. package/dist/routers/message.js +3 -2
  19. package/dist/routers/message.js.map +1 -1
  20. package/dist/routers/newtonChat.d.ts.map +1 -1
  21. package/dist/routers/newtonChat.js +3 -179
  22. package/dist/routers/newtonChat.js.map +1 -1
  23. package/dist/routers/section.d.ts +10 -0
  24. package/dist/routers/section.d.ts.map +1 -1
  25. package/dist/routers/section.js +21 -3
  26. package/dist/routers/section.js.map +1 -1
  27. package/dist/routers/worksheet.d.ts +22 -13
  28. package/dist/routers/worksheet.d.ts.map +1 -1
  29. package/dist/routers/worksheet.js +16 -3
  30. package/dist/routers/worksheet.js.map +1 -1
  31. package/dist/server/pipelines/aiLabChat.d.ts +12 -1
  32. package/dist/server/pipelines/aiLabChat.d.ts.map +1 -1
  33. package/dist/server/pipelines/aiLabChat.js +388 -15
  34. package/dist/server/pipelines/aiLabChat.js.map +1 -1
  35. package/dist/server/pipelines/aiNewtonChat.d.ts +30 -0
  36. package/dist/server/pipelines/aiNewtonChat.d.ts.map +1 -0
  37. package/dist/server/pipelines/aiNewtonChat.js +280 -0
  38. package/dist/server/pipelines/aiNewtonChat.js.map +1 -0
  39. package/dist/server/pipelines/gradeWorksheet.d.ts +14 -1
  40. package/dist/server/pipelines/gradeWorksheet.d.ts.map +1 -1
  41. package/dist/server/pipelines/gradeWorksheet.js +6 -5
  42. package/dist/server/pipelines/gradeWorksheet.js.map +1 -1
  43. package/dist/utils/inference.d.ts +3 -1
  44. package/dist/utils/inference.d.ts.map +1 -1
  45. package/dist/utils/inference.js +34 -4
  46. package/dist/utils/inference.js.map +1 -1
  47. package/package.json +1 -1
  48. package/prisma/schema.prisma +2 -0
  49. package/src/middleware/auth.ts +1 -0
  50. package/src/routers/assignment.ts +17 -2
  51. package/src/routers/labChat.ts +3 -366
  52. package/src/routers/message.ts +1 -1
  53. package/src/routers/newtonChat.ts +1 -222
  54. package/src/routers/section.ts +21 -1
  55. package/src/routers/worksheet.ts +17 -1
  56. package/src/server/pipelines/aiLabChat.ts +434 -19
  57. package/src/server/pipelines/aiNewtonChat.ts +338 -0
  58. package/src/server/pipelines/gradeWorksheet.ts +3 -4
  59. package/src/utils/inference.ts +40 -5
@@ -21,6 +21,7 @@ const directFileSchema = z.object({
21
21
 
22
22
  const createAssignmentSchema = z.object({
23
23
  classId: z.string(),
24
+ id: z.string().optional(),
24
25
  title: z.string(),
25
26
  instructions: z.string(),
26
27
  dueDate: z.string(),
@@ -319,6 +320,20 @@ export const assignmentRouter = createTRPCRouter({
319
320
  return updated;
320
321
  }),
321
322
 
323
+ exists: protectedClassMemberProcedure
324
+ .input(z.object({
325
+ id: z.string(),
326
+ }))
327
+ .query(async ({ ctx, input }) => {
328
+ if (!ctx.user) {
329
+ throw new TRPCError({ code: 'UNAUTHORIZED', message: 'User must be authenticated' });
330
+ }
331
+ const assignment = await prisma.assignment.findUnique({
332
+ where: { id: input.id },
333
+ });
334
+
335
+ return assignment ? true : false;
336
+ }),
322
337
  move: protectedTeacherProcedure
323
338
  .input(z.object({
324
339
  id: z.string(),
@@ -358,7 +373,7 @@ export const assignmentRouter = createTRPCRouter({
358
373
  create: protectedTeacherProcedure
359
374
  .input(createAssignmentSchema)
360
375
  .mutation(async ({ ctx, input }) => {
361
- const { classId, title, instructions, dueDate, files, existingFileIds, aiPolicyLevel, acceptFiles, acceptExtendedResponse, acceptWorksheet, worksheetIds, gradeWithAI, studentIds, maxGrade, graded, weight, sectionId, type, markSchemeId, gradingBoundaryId, inProgress } = input;
376
+ const { classId, id, title, instructions, dueDate, files, existingFileIds, aiPolicyLevel, acceptFiles, acceptExtendedResponse, acceptWorksheet, worksheetIds, gradeWithAI, studentIds, maxGrade, graded, weight, sectionId, type, markSchemeId, gradingBoundaryId, inProgress } = input;
362
377
 
363
378
  if (!ctx.user) {
364
379
  throw new TRPCError({
@@ -418,6 +433,7 @@ export const assignmentRouter = createTRPCRouter({
418
433
  // Create assignment with order 0 (will be at top)
419
434
  const created = await tx.assignment.create({
420
435
  data: {
436
+ ...(id && { id }),
421
437
  title,
422
438
  instructions,
423
439
  dueDate: new Date(dueDate),
@@ -1290,7 +1306,6 @@ export const assignmentRouter = createTRPCRouter({
1290
1306
 
1291
1307
  if (submit !== undefined) {
1292
1308
  // Toggle submission status
1293
-
1294
1309
  if (submission.assignment.acceptWorksheet && submission.assignment.gradeWithAI) {
1295
1310
 
1296
1311
  // Grade the submission with AI
@@ -3,17 +3,8 @@ import { createTRPCRouter, protectedProcedure } from '../trpc.js';
3
3
  import { prisma } from '../lib/prisma.js';
4
4
  import { pusher } from '../lib/pusher.js';
5
5
  import { TRPCError } from '@trpc/server';
6
- import {
7
- inferenceClient,
8
- sendAIMessage,
9
- type LabChatContext
10
- } from '../utils/inference.js';
11
- import { logger } from '../utils/logger.js';
12
6
  import { isAIUser } from '../utils/aiUser.js';
13
- import { bucket } from '../lib/googleCloudStorage.js';
14
- import { createPdf } from "../lib/jsonConversion.js"
15
- import OpenAI from 'openai';
16
- import { v4 as uuidv4 } from "uuid";
7
+ import { generateAndSendLabIntroduction, generateAndSendLabResponse } from '../server/pipelines/aiLabChat.js';
17
8
 
18
9
  export const labChatRouter = createTRPCRouter({
19
10
  create: protectedProcedure
@@ -164,10 +155,7 @@ export const labChatRouter = createTRPCRouter({
164
155
  });
165
156
 
166
157
  // Generate AI introduction message in parallel (don't await - fire and forget)
167
- generateAndSendLabIntroduction(result.id, result.conversationId, context, classWithTeachers.subject || 'Lab').catch(error => {
168
- logger.error('Failed to generate AI introduction:', { error, labChatId: result.id });
169
- });
170
-
158
+ generateAndSendLabIntroduction(result.id, result.conversationId, context, classWithTeachers.subject || 'Lab');
171
159
  // Broadcast lab chat creation to class members
172
160
  try {
173
161
  await pusher.trigger(`class-${classId}`, 'lab-chat-created', {
@@ -557,9 +545,7 @@ export const labChatRouter = createTRPCRouter({
557
545
  // Generate AI response in parallel (don't await - fire and forget)
558
546
  if (!isAIUser(userId)) {
559
547
  // Run AI response generation in background
560
- generateAndSendLabResponse(labChatId, content, labChat.conversationId).catch(error => {
561
- logger.error('Failed to generate AI response:', { error });
562
- });
548
+ generateAndSendLabResponse(labChatId, content, labChat.conversationId)
563
549
  }
564
550
 
565
551
  return {
@@ -645,352 +631,3 @@ export const labChatRouter = createTRPCRouter({
645
631
  }),
646
632
  });
647
633
 
648
- /**
649
- * Generate and send AI introduction for lab chat
650
- * Uses the stored context directly from database
651
- */
652
- async function generateAndSendLabIntroduction(
653
- labChatId: string,
654
- conversationId: string,
655
- contextString: string,
656
- subject: string
657
- ): Promise<void> {
658
- try {
659
- // Enhance the stored context with clarifying question instructions
660
- const enhancedSystemPrompt = `${contextString}
661
-
662
- IMPORTANT INSTRUCTIONS:
663
- - You are helping teachers create course materials
664
- - Use the context information provided above (subject, topic, difficulty, objectives, etc.) as your foundation
665
- - Only ask clarifying questions about details NOT already specified in the context
666
- - Focus your questions on format preferences, specific requirements, or missing details needed to create the content
667
- - Only output final course materials when you have sufficient details beyond what's in the context
668
- - Do not use markdown formatting in your responses - use plain text only
669
- - When creating content, make it clear and well-structured without markdown`;
670
-
671
- const completion = await inferenceClient.chat.completions.create({
672
- model: 'command-a-03-2025',
673
- messages: [
674
- { role: 'system', content: enhancedSystemPrompt },
675
- {
676
- role: 'user',
677
- content: 'Please introduce yourself to the teaching team. Explain that you will help create course materials by first asking clarifying questions based on the context provided, and only output final content when you have enough information.'
678
- },
679
- ],
680
- max_tokens: 300,
681
- temperature: 0.8,
682
- });
683
-
684
- const response = completion.choices[0]?.message?.content;
685
-
686
- if (!response) {
687
- throw new Error('No response generated from inference API');
688
- }
689
-
690
- // Send AI introduction using centralized sender
691
- await sendAIMessage(response, conversationId, {
692
- subject,
693
- });
694
-
695
- logger.info('AI Introduction sent', { labChatId, conversationId });
696
-
697
- } catch (error) {
698
- logger.error('Failed to generate AI introduction:', { error, labChatId });
699
-
700
- // Send fallback introduction
701
- try {
702
- const fallbackIntro = `Hello teaching team! I'm your AI assistant for course material development. I will help you create educational content by first asking clarifying questions based on the provided context, then outputting final materials when I have sufficient information. I won't use markdown formatting in my responses. What would you like to work on?`;
703
-
704
- await sendAIMessage(fallbackIntro, conversationId, {
705
- subject,
706
- });
707
-
708
- logger.info('Fallback AI introduction sent', { labChatId });
709
-
710
- } catch (fallbackError) {
711
- logger.error('Failed to send fallback AI introduction:', { error: fallbackError, labChatId });
712
- }
713
- }
714
- }
715
-
716
- /**
717
- * Generate and send AI response to teacher message
718
- * Uses the stored context directly from database
719
- */
720
- async function generateAndSendLabResponse(
721
- labChatId: string,
722
- teacherMessage: string,
723
- conversationId: string
724
- ): Promise<void> {
725
- try {
726
- // Get lab context from database
727
- const fullLabChat = await prisma.labChat.findUnique({
728
- where: { id: labChatId },
729
- include: {
730
- class: {
731
- select: {
732
- name: true,
733
- subject: true,
734
- },
735
- },
736
- },
737
- });
738
-
739
- if (!fullLabChat) {
740
- throw new Error('Lab chat not found');
741
- }
742
-
743
- // Get recent conversation history
744
- const recentMessages = await prisma.message.findMany({
745
- where: {
746
- conversationId,
747
- },
748
- include: {
749
- sender: {
750
- select: {
751
- id: true,
752
- username: true,
753
- profile: {
754
- select: {
755
- displayName: true,
756
- },
757
- },
758
- },
759
- },
760
- },
761
- orderBy: {
762
- createdAt: 'desc',
763
- },
764
- take: 10, // Last 10 messages for context
765
- });
766
-
767
- // Build conversation history as proper message objects
768
- // Enhance the stored context with clarifying question instructions
769
- const enhancedSystemPrompt = `${fullLabChat.context}
770
-
771
- IMPORTANT INSTRUCTIONS:
772
- - Use the context information provided above (subject, topic, difficulty, objectives, etc.) as your foundation
773
- - Based on the teacher's input and existing context, only ask clarifying questions about details NOT already specified
774
- - Focus questions on format preferences, specific requirements, quantity, or missing implementation details
775
- - Only output final course materials when you have sufficient details beyond what's in the context
776
- - Do not use markdown formatting in your responses - use plain text only
777
- - When you do create content, make it clear and well-structured without markdown
778
- - If the request is vague, ask 1-2 specific clarifying questions about missing details only
779
- - You are primarily a chatbot - only provide files when it is necessary
780
-
781
- RESPONSE FORMAT:
782
- - Always respond with JSON in this format: { "text": string, "docs": null | array }
783
- - "text": Your conversational response (questions, explanations, etc.) - use plain text, no markdown
784
- - "docs": null for regular conversation, or array of PDF document objects when creating course materials
785
-
786
- WHEN CREATING COURSE MATERIALS (docs field):
787
- - docs: [ { "title": string, "blocks": [ { "format": <int 0-12>, "content": string | string[], "metadata"?: { fontSize?: number, lineHeight?: number, paragraphSpacing?: number, indentWidth?: number, paddingX?: number, paddingY?: number, font?: 0|1|2|3|4|5, color?: "#RGB"|"#RRGGBB", background?: "#RGB"|"#RRGGBB", align?: "left"|"center"|"right" } } ] } ]
788
- - Each document in the array should have a "title" (used for filename) and "blocks" (content)
789
- - You can create multiple documents when it makes sense (e.g., separate worksheets, answer keys, different topics)
790
- - Use descriptive titles like "Biology_Cell_Structure_Worksheet" or "Chemistry_Lab_Instructions"
791
- - Format enum (integers): 0=HEADER_1, 1=HEADER_2, 2=HEADER_3, 3=HEADER_4, 4=HEADER_5, 5=HEADER_6, 6=PARAGRAPH, 7=BULLET, 8=NUMBERED, 9=TABLE, 10=IMAGE, 11=CODE_BLOCK, 12=QUOTE
792
- - Fonts enum: 0=TIMES_ROMAN, 1=COURIER, 2=HELVETICA, 3=HELVETICA_BOLD, 4=HELVETICA_ITALIC, 5=HELVETICA_BOLD_ITALIC
793
- - Colors must be hex strings: "#RGB" or "#RRGGBB".
794
- - Headings (0-5): content is a single string; you may set metadata.align.
795
- - Paragraphs (6) and Quotes (12): content is a single string.
796
- - Bullets (7) and Numbered (8): content is an array of strings (one item per list entry). DO NOT include bullet symbols (*) or numbers (1. 2. 3.) in the content - the format will automatically add these.
797
- - Code blocks (11): prefer content as an array of lines; preserve indentation via leading tabs/spaces. If using a single string, include \n between lines.
798
- - Table (9) and Image (10) are not supported by the renderer now; do not emit them.
799
- - Use metadata sparingly; omit fields you don't need. For code blocks you may set metadata.paddingX, paddingY, background, and font (1 for Courier).
800
- - Wrap text naturally; do not insert manual line breaks except where semantically required (lists, code).
801
- - The JSON must be valid and ready for PDF rendering by the server.`;
802
-
803
- const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
804
- { role: 'system', content: enhancedSystemPrompt },
805
- ];
806
-
807
- // Add recent conversation history
808
- recentMessages.reverse().forEach(msg => {
809
- const role = isAIUser(msg.senderId) ? 'assistant' : 'user';
810
- const senderName = msg.sender?.profile?.displayName || msg.sender?.username || 'Teacher';
811
- const content = isAIUser(msg.senderId) ? msg.content : `${senderName}: ${msg.content}`;
812
-
813
- messages.push({
814
- role: role as 'user' | 'assistant',
815
- content,
816
- });
817
- });
818
-
819
- // Add the new teacher message
820
- const senderName = 'Teacher'; // We could get this from the actual sender if needed
821
- messages.push({
822
- role: 'user',
823
- content: `${senderName}: ${teacherMessage}`,
824
- });
825
-
826
-
827
- const completion = await inferenceClient.chat.completions.create({
828
- model: 'command-a-03-2025',
829
- messages,
830
- temperature: 0.7,
831
- response_format: {
832
- type: "json_object",
833
- // @ts-expect-error
834
- schema: {
835
- type: "object",
836
- properties: {
837
- text: { type: "string" },
838
- docs: {
839
- type: "array",
840
- items: {
841
- type: "object",
842
- properties: {
843
- title: { type: "string" },
844
- blocks: {
845
- type: "array",
846
- items: {
847
- type: "object",
848
- properties: {
849
- format: { type: "integer", minimum: 0, maximum: 12 },
850
- content: {
851
- oneOf: [
852
- { type: "string" },
853
- { type: "array", items: { type: "string" } }
854
- ]
855
- },
856
- metadata: {
857
- type: "object",
858
- properties: {
859
- fontSize: { type: "number", minimum: 6 },
860
- lineHeight: { type: "number", minimum: 0.6 },
861
- paragraphSpacing: { type: "number", minimum: 0 },
862
- indentWidth: { type: "number", minimum: 0 },
863
- paddingX: { type: "number", minimum: 0 },
864
- paddingY: { type: "number", minimum: 0 },
865
- font: { type: "integer", minimum: 0, maximum: 5 },
866
- color: { type: "string", pattern: "^#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" },
867
- background: { type: "string", pattern: "^#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" },
868
- align: { type: "string", enum: ["left", "center", "right"] }
869
- },
870
- additionalProperties: false
871
- }
872
- },
873
- required: ["format", "content"],
874
- additionalProperties: false
875
- }
876
- }
877
- },
878
- required: ["title", "blocks"],
879
- additionalProperties: false
880
- }
881
- }
882
- },
883
- required: ["text"],
884
- additionalProperties: false
885
- }
886
- },
887
- });
888
-
889
- const response = completion.choices[0]?.message?.content;
890
-
891
- if (!response) {
892
- throw new Error('No response generated from inference API');
893
- }
894
-
895
- // Parse the JSON response and generate PDF if docs are provided
896
- try {
897
- const jsonData = JSON.parse(response);
898
-
899
-
900
- const attachmentIds: string[] = [];
901
- // Generate PDFs if docs are provided
902
- if (jsonData.docs && Array.isArray(jsonData.docs)) {
903
-
904
-
905
- for (let i = 0; i < jsonData.docs.length; i++) {
906
- const doc = jsonData.docs[i];
907
- if (!doc.title || !doc.blocks || !Array.isArray(doc.blocks)) {
908
- logger.error(`Document ${i + 1} is missing title or blocks`);
909
- continue;
910
- }
911
-
912
-
913
- try {
914
- let pdfBytes = await createPdf(doc.blocks);
915
- if (pdfBytes) {
916
- // Sanitize filename - remove special characters and limit length
917
- const sanitizedTitle = doc.title
918
- .replace(/[^a-zA-Z0-9\s\-_]/g, '')
919
- .replace(/\s+/g, '_')
920
- .substring(0, 50);
921
-
922
- const filename = `${sanitizedTitle}_${uuidv4().substring(0, 8)}.pdf`;
923
- const filePath = `class/generated/${fullLabChat.classId}/${filename}`;
924
-
925
- logger.info(`PDF ${i + 1} generated successfully`, { labChatId, title: doc.title });
926
-
927
- // Upload directly to Google Cloud Storage
928
- const gcsFile = bucket.file(filePath);
929
- await gcsFile.save(Buffer.from(pdfBytes), {
930
- metadata: {
931
- contentType: 'application/pdf',
932
- }
933
- });
934
-
935
- logger.info(`PDF ${i + 1} uploaded successfully`, { labChatId, filename });
936
-
937
- const file = await prisma.file.create({
938
- data: {
939
- name: filename,
940
- path: filePath,
941
- type: 'application/pdf',
942
- size: pdfBytes.length,
943
- userId: fullLabChat.createdById,
944
- uploadStatus: 'COMPLETED',
945
- uploadedAt: new Date(),
946
- },
947
- });
948
- attachmentIds.push(file.id);
949
- } else {
950
- logger.error(`PDF ${i + 1} creation returned undefined/null`, { labChatId, title: doc.title });
951
- }
952
- } catch (pdfError) {
953
- logger.error(`PDF creation threw an error for document ${i + 1}:`, {
954
- error: pdfError instanceof Error ? {
955
- message: pdfError.message,
956
- stack: pdfError.stack,
957
- name: pdfError.name
958
- } : pdfError,
959
- labChatId,
960
- title: doc.title
961
- });
962
- }
963
- }
964
- }
965
-
966
- // Send the text response to the conversation
967
- await sendAIMessage(jsonData.text || response, conversationId, {
968
- attachments: {
969
- connect: attachmentIds.map(id => ({ id })),
970
- },
971
- subject: fullLabChat.class?.subject || 'Lab',
972
- });
973
- } catch (parseError) {
974
- logger.error('Failed to parse AI response or generate PDF:', { error: parseError, labChatId });
975
- // Fallback: send the raw response if parsing fails
976
- await sendAIMessage(response, conversationId, {
977
- subject: fullLabChat.class?.subject || 'Lab',
978
- });
979
- }
980
-
981
- logger.info('AI response sent', { labChatId, conversationId });
982
-
983
- } catch (error) {
984
- console.error('Full error object:', error);
985
- logger.error('Failed to generate AI response:', {
986
- error: error instanceof Error ? {
987
- message: error.message,
988
- stack: error.stack,
989
- name: error.name
990
- } : error,
991
- labChatId
992
- });
993
- throw error; // Re-throw to see the full error in the calling function
994
- }
995
- }
996
-
@@ -103,6 +103,7 @@ export const messageRouter = createTRPCRouter({
103
103
  name: attachment.name,
104
104
  type: attachment.type,
105
105
  })),
106
+ meta: message.meta as Record<string, any>,
106
107
  mentions: message.mentions.map((mention) => ({
107
108
  user: mention.user,
108
109
  })),
@@ -111,7 +112,6 @@ export const messageRouter = createTRPCRouter({
111
112
  nextCursor,
112
113
  };
113
114
  }),
114
-
115
115
  send: protectedProcedure
116
116
  .input(
117
117
  z.object({
@@ -3,13 +3,9 @@ import { createTRPCRouter, protectedProcedure } from '../trpc.js';
3
3
  import { prisma } from '../lib/prisma.js';
4
4
  import { pusher } from '../lib/pusher.js';
5
5
  import { TRPCError } from '@trpc/server';
6
- import {
7
- inferenceClient,
8
- openAIClient,
9
- sendAIMessage,
10
- } from '../utils/inference.js';
11
6
  import { logger } from '../utils/logger.js';
12
7
  import { isAIUser } from '../utils/aiUser.js';
8
+ import { generateAndSendNewtonIntroduction, generateAndSendNewtonResponse } from '../server/pipelines/aiNewtonChat.js';
13
9
 
14
10
  export const newtonChatRouter = createTRPCRouter({
15
11
  getTutorConversation: protectedProcedure
@@ -300,221 +296,4 @@ export const newtonChatRouter = createTRPCRouter({
300
296
  }),
301
297
  });
302
298
 
303
- /**
304
- * Generate and send AI introduction for Newton chat
305
- */
306
- async function generateAndSendNewtonIntroduction(
307
- newtonChatId: string,
308
- conversationId: string,
309
- submissionId: string
310
- ): Promise<void> {
311
- try {
312
- // Get submission details for context
313
- const submission = await prisma.submission.findUnique({
314
- where: { id: submissionId },
315
- include: {
316
- assignment: {
317
- select: {
318
- title: true,
319
- instructions: true,
320
- class: {
321
- select: {
322
- subject: true,
323
- name: true,
324
- },
325
- },
326
- },
327
- },
328
- attachments: {
329
- select: {
330
- id: true,
331
- name: true,
332
- type: true,
333
- },
334
- },
335
- },
336
- });
337
-
338
- if (!submission) {
339
- throw new Error('Submission not found');
340
- }
341
-
342
- const systemPrompt = `You are Newton, an AI tutor helping a student with their assignment submission.
343
-
344
- Assignment: ${submission.assignment.title}
345
- Subject: ${submission.assignment.class.subject}
346
- Instructions: ${submission.assignment.instructions || 'No specific instructions provided'}
347
-
348
- Your role:
349
- - Help the student understand concepts related to their assignment
350
- - Provide guidance and explanations without giving away direct answers
351
- - Encourage learning and critical thinking
352
- - Be supportive and encouraging
353
- - Use clear, educational language appropriate for the subject
354
-
355
- Do not use markdown formatting in your responses - use plain text only.`;
356
-
357
- const completion = await inferenceClient.chat.completions.create({
358
- model: 'command-a-03-2025',
359
- messages: [
360
- { role: 'system', content: systemPrompt },
361
- {
362
- role: 'user',
363
- content: 'Please introduce yourself to the student. Explain that you are Newton, their AI tutor, and you are here to help them with their assignment. Ask them what they would like help with.'
364
- },
365
- ],
366
- max_tokens: 300,
367
- temperature: 0.8,
368
- });
369
-
370
- const response = completion.choices[0]?.message?.content;
371
-
372
- if (!response) {
373
- throw new Error('No response generated from inference API');
374
- }
375
-
376
- // Send AI introduction using centralized sender
377
- await sendAIMessage(response, conversationId, {
378
- subject: submission.assignment.class.subject || 'Assignment',
379
- });
380
-
381
- logger.info('AI Introduction sent', { newtonChatId, conversationId });
382
-
383
- } catch (error) {
384
- logger.error('Failed to generate AI introduction:', { error, newtonChatId });
385
-
386
- // Send fallback introduction
387
- try {
388
- const fallbackIntro = `Hello! I'm Newton, your AI tutor. I'm here to help you with your assignment. I can answer questions, explain concepts, and guide you through your work. What would you like help with today?`;
389
-
390
- await sendAIMessage(fallbackIntro, conversationId, {
391
- subject: 'Assignment',
392
- });
393
-
394
- logger.info('Fallback AI introduction sent', { newtonChatId });
395
-
396
- } catch (fallbackError) {
397
- logger.error('Failed to send fallback AI introduction:', { error: fallbackError, newtonChatId });
398
- }
399
- }
400
- }
401
-
402
- /**
403
- * Generate and send AI response to student message
404
- */
405
- async function generateAndSendNewtonResponse(
406
- newtonChatId: string,
407
- studentMessage: string,
408
- conversationId: string,
409
- submission: {
410
- id: string;
411
- assignment: {
412
- id: string;
413
- title: string;
414
- instructions: string | null;
415
- class: {
416
- subject: string | null;
417
- };
418
- };
419
- }
420
- ): Promise<void> {
421
- try {
422
- // Get recent conversation history
423
- const recentMessages = await prisma.message.findMany({
424
- where: {
425
- conversationId,
426
- },
427
- include: {
428
- sender: {
429
- select: {
430
- id: true,
431
- username: true,
432
- profile: {
433
- select: {
434
- displayName: true,
435
- },
436
- },
437
- },
438
- },
439
- },
440
- orderBy: {
441
- createdAt: 'desc',
442
- },
443
- take: 10, // Last 10 messages for context
444
- });
445
-
446
- const systemPrompt = `You are Newton, an AI tutor helping a student with their assignment submission.
447
-
448
- Assignment: ${submission.assignment.title}
449
- Subject: ${submission.assignment.class.subject || 'General'}
450
- Instructions: ${submission.assignment.instructions || 'No specific instructions provided'}
451
-
452
- Your role:
453
- - Help the student understand concepts related to their assignment
454
- - Provide guidance and explanations without giving away direct answers
455
- - Encourage learning and critical thinking
456
- - Be supportive and encouraging
457
- - Use clear, educational language appropriate for the subject
458
- - If the student asks for direct answers, guide them to think through the problem instead
459
- - Break down complex concepts into simpler parts
460
- - Use examples and analogies when helpful
461
-
462
- IMPORTANT:
463
- - Do not use markdown formatting in your responses - use plain text only
464
- - Keep responses conversational and educational
465
- - Focus on helping the student learn, not just completing the assignment`;
466
-
467
- const messages: Array<{ role: 'user' | 'assistant' | 'system'; content: string }> = [
468
- { role: 'system', content: systemPrompt },
469
- ];
470
-
471
- // Add recent conversation history
472
- recentMessages.reverse().forEach(msg => {
473
- const role = isAIUser(msg.senderId) ? 'assistant' : 'user';
474
- const senderName = msg.sender?.profile?.displayName || msg.sender?.username || 'Student';
475
- const content = isAIUser(msg.senderId) ? msg.content : `${senderName}: ${msg.content}`;
476
-
477
- messages.push({
478
- role: role as 'user' | 'assistant',
479
- content,
480
- });
481
- });
482
-
483
- // Add the new student message
484
- messages.push({
485
- role: 'user',
486
- content: `Student: ${studentMessage}`,
487
- });
488
-
489
- const completion = await openAIClient.chat.completions.create({
490
- model: 'gpt-5-nano',
491
- messages,
492
- temperature: 0.7,
493
- });
494
-
495
- const response = completion.choices[0]?.message?.content;
496
-
497
- if (!response) {
498
- throw new Error('No response generated from inference API');
499
- }
500
-
501
- // Send the text response to the conversation
502
- await sendAIMessage(response, conversationId, {
503
- subject: submission.assignment.class.subject || 'Assignment',
504
- });
505
-
506
- logger.info('AI response sent', { newtonChatId, conversationId });
507
-
508
- } catch (error) {
509
- logger.error('Failed to generate AI response:', {
510
- error: error instanceof Error ? {
511
- message: error.message,
512
- stack: error.stack,
513
- name: error.name
514
- } : error,
515
- newtonChatId
516
- });
517
- }
518
- }
519
-
520
299