@studious-lms/server 1.1.13 → 1.1.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,10 @@ import {
10
10
  } from '../utils/inference.js';
11
11
  import { logger } from '../utils/logger.js';
12
12
  import { isAIUser } from '../utils/aiUser.js';
13
+ import { uploadFile } from 'src/lib/googleCloudStorage.js';
14
+ import { createPdf } from "src/lib/jsonConversion.js"
13
15
  import OpenAI from 'openai';
16
+ import { v4 as uuidv4 } from "uuid";
14
17
 
15
18
  export const labChatRouter = createTRPCRouter({
16
19
  create: protectedProcedure
@@ -772,7 +775,30 @@ IMPORTANT INSTRUCTIONS:
772
775
  - Only output final course materials when you have sufficient details beyond what's in the context
773
776
  - Do not use markdown formatting in your responses - use plain text only
774
777
  - When you do create content, make it clear and well-structured without markdown
775
- - If the request is vague, ask 1-2 specific clarifying questions about missing details only`;
778
+ - If the request is vague, ask 1-2 specific clarifying questions about missing details only
779
+ - You are primarily a chatbot - only provide files when it is necessary
780
+
781
+ RESPONSE FORMAT:
782
+ - Always respond with JSON in this format: { "text": string, "docs": null | array }
783
+ - "text": Your conversational response (questions, explanations, etc.) - use plain text, no markdown
784
+ - "docs": null for regular conversation, or array of PDF document objects when creating course materials
785
+
786
+ WHEN CREATING COURSE MATERIALS (docs field):
787
+ - docs: [ { "title": string, "blocks": [ { "format": <int 0-12>, "content": string | string[], "metadata"?: { fontSize?: number, lineHeight?: number, paragraphSpacing?: number, indentWidth?: number, paddingX?: number, paddingY?: number, font?: 0|1|2|3|4|5, color?: "#RGB"|"#RRGGBB", background?: "#RGB"|"#RRGGBB", align?: "left"|"center"|"right" } } ] } ]
788
+ - Each document in the array should have a "title" (used for filename) and "blocks" (content)
789
+ - You can create multiple documents when it makes sense (e.g., separate worksheets, answer keys, different topics)
790
+ - Use descriptive titles like "Biology_Cell_Structure_Worksheet" or "Chemistry_Lab_Instructions"
791
+ - Format enum (integers): 0=HEADER_1, 1=HEADER_2, 2=HEADER_3, 3=HEADER_4, 4=HEADER_5, 5=HEADER_6, 6=PARAGRAPH, 7=BULLET, 8=NUMBERED, 9=TABLE, 10=IMAGE, 11=CODE_BLOCK, 12=QUOTE
792
+ - Fonts enum: 0=TIMES_ROMAN, 1=COURIER, 2=HELVETICA, 3=HELVETICA_BOLD, 4=HELVETICA_ITALIC, 5=HELVETICA_BOLD_ITALIC
793
+ - Colors must be hex strings: "#RGB" or "#RRGGBB".
794
+ - Headings (0-5): content is a single string; you may set metadata.align.
795
+ - Paragraphs (6) and Quotes (12): content is a single string.
796
+ - Bullets (7) and Numbered (8): content is an array of strings (one item per list entry).
797
+ - Code blocks (11): prefer content as an array of lines; preserve indentation via leading tabs/spaces. If using a single string, include \n between lines.
798
+ - Table (9) and Image (10) are not supported by the renderer now; do not emit them.
799
+ - Use metadata sparingly; omit fields you don't need. For code blocks you may set metadata.paddingX, paddingY, background, and font (1 for Courier).
800
+ - Wrap text naturally; do not insert manual line breaks except where semantically required (lists, code).
801
+ - The JSON must be valid and ready for PDF rendering by the server.`;
776
802
 
777
803
  const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
778
804
  { role: 'system', content: enhancedSystemPrompt },
@@ -797,11 +823,67 @@ IMPORTANT INSTRUCTIONS:
797
823
  content: `${senderName}: ${teacherMessage}`,
798
824
  });
799
825
 
826
+
800
827
  const completion = await inferenceClient.chat.completions.create({
801
828
  model: 'command-a-03-2025',
802
829
  messages,
803
- max_tokens: 500,
804
830
  temperature: 0.7,
831
+ response_format: {
832
+ type: "json_object",
833
+ // @ts-expect-error
834
+ schema: {
835
+ type: "object",
836
+ properties: {
837
+ text: { type: "string" },
838
+ docs: {
839
+ type: "array",
840
+ items: {
841
+ type: "object",
842
+ properties: {
843
+ title: { type: "string" },
844
+ blocks: {
845
+ type: "array",
846
+ items: {
847
+ type: "object",
848
+ properties: {
849
+ format: { type: "integer", minimum: 0, maximum: 12 },
850
+ content: {
851
+ oneOf: [
852
+ { type: "string" },
853
+ { type: "array", items: { type: "string" } }
854
+ ]
855
+ },
856
+ metadata: {
857
+ type: "object",
858
+ properties: {
859
+ fontSize: { type: "number", minimum: 6 },
860
+ lineHeight: { type: "number", minimum: 0.6 },
861
+ paragraphSpacing: { type: "number", minimum: 0 },
862
+ indentWidth: { type: "number", minimum: 0 },
863
+ paddingX: { type: "number", minimum: 0 },
864
+ paddingY: { type: "number", minimum: 0 },
865
+ font: { type: "integer", minimum: 0, maximum: 5 },
866
+ color: { type: "string", pattern: "^#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" },
867
+ background: { type: "string", pattern: "^#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" },
868
+ align: { type: "string", enum: ["left", "center", "right"] }
869
+ },
870
+ additionalProperties: false
871
+ }
872
+ },
873
+ required: ["format", "content"],
874
+ additionalProperties: false
875
+ }
876
+ }
877
+ },
878
+ required: ["title", "blocks"],
879
+ additionalProperties: false
880
+ }
881
+ }
882
+ },
883
+ required: ["text"],
884
+ additionalProperties: false
885
+ }
886
+ },
805
887
  });
806
888
 
807
889
  const response = completion.choices[0]?.message?.content;
@@ -810,15 +892,92 @@ IMPORTANT INSTRUCTIONS:
810
892
  throw new Error('No response generated from inference API');
811
893
  }
812
894
 
813
- // Send AI response
814
- await sendAIMessage(response, conversationId, {
815
- subject: fullLabChat.class?.subject || 'Lab',
816
- });
895
+ // Parse the JSON response and generate PDF if docs are provided
896
+ try {
897
+ const jsonData = JSON.parse(response);
898
+
899
+
900
+ const attachmentIds: string[] = [];
901
+ // Generate PDFs if docs are provided
902
+ if (jsonData.docs && Array.isArray(jsonData.docs)) {
903
+
904
+ for (let i = 0; i < jsonData.docs.length; i++) {
905
+ const doc = jsonData.docs[i];
906
+ if (!doc.title || !doc.blocks || !Array.isArray(doc.blocks)) {
907
+ logger.error(`Document ${i + 1} is missing title or blocks`);
908
+ continue;
909
+ }
910
+
911
+ try {
912
+ let pdfBytes = await createPdf(doc.blocks);
913
+ if (pdfBytes) {
914
+ // Sanitize filename - remove special characters and limit length
915
+ const sanitizedTitle = doc.title
916
+ .replace(/[^a-zA-Z0-9\s\-_]/g, '')
917
+ .replace(/\s+/g, '_')
918
+ .substring(0, 50);
919
+
920
+ const filename = `${sanitizedTitle}_${uuidv4().substring(0, 8)}.pdf`;
921
+
922
+
923
+ logger.info(`PDF ${i + 1} generated successfully`, { labChatId, title: doc.title });
924
+ const gcpResult = await uploadFile(Buffer.from(pdfBytes).toString('base64'), `class/generated/${fullLabChat.classId}/${filename}`, 'application/pdf');
925
+ logger.info(`PDF ${i + 1} uploaded successfully`, { labChatId, filename });
926
+
927
+ const file = await prisma.file.create({
928
+ data: {
929
+ name: filename,
930
+ path: `class/generated/${fullLabChat.classId}/${filename}`,
931
+ type: 'application/pdf',
932
+ userId: fullLabChat.createdById,
933
+ },
934
+ });
935
+ attachmentIds.push(file.id);
936
+ } else {
937
+ logger.error(`PDF ${i + 1} creation returned undefined/null`, { labChatId, title: doc.title });
938
+ }
939
+ } catch (pdfError) {
940
+ logger.error(`PDF creation threw an error for document ${i + 1}:`, {
941
+ error: pdfError instanceof Error ? {
942
+ message: pdfError.message,
943
+ stack: pdfError.stack,
944
+ name: pdfError.name
945
+ } : pdfError,
946
+ labChatId,
947
+ title: doc.title
948
+ });
949
+ }
950
+ }
951
+ }
952
+
953
+ // Send the text response to the conversation
954
+ await sendAIMessage(jsonData.text || response, conversationId, {
955
+ attachments: {
956
+ connect: attachmentIds.map(id => ({ id })),
957
+ },
958
+ subject: fullLabChat.class?.subject || 'Lab',
959
+ });
960
+ } catch (parseError) {
961
+ logger.error('Failed to parse AI response or generate PDF:', { error: parseError, labChatId });
962
+ // Fallback: send the raw response if parsing fails
963
+ await sendAIMessage(response, conversationId, {
964
+ subject: fullLabChat.class?.subject || 'Lab',
965
+ });
966
+ }
817
967
 
818
968
  logger.info('AI response sent', { labChatId, conversationId });
819
969
 
820
970
  } catch (error) {
821
- logger.error('Failed to generate AI response:', { error, labChatId });
971
+ console.error('Full error object:', error);
972
+ logger.error('Failed to generate AI response:', {
973
+ error: error instanceof Error ? {
974
+ message: error.message,
975
+ stack: error.stack,
976
+ name: error.name
977
+ } : error,
978
+ labChatId
979
+ });
980
+ throw error; // Re-throw to see the full error in the calling function
822
981
  }
823
982
  }
824
983
 
@@ -3,6 +3,7 @@ import { createTRPCRouter, protectedProcedure } from '../trpc.js';
3
3
  import { prisma } from '../lib/prisma.js';
4
4
  import { pusher } from '../lib/pusher.js';
5
5
  import { TRPCError } from '@trpc/server';
6
+ import { logger } from '../utils/logger.js';
6
7
 
7
8
  export const messageRouter = createTRPCRouter({
8
9
  list: protectedProcedure
@@ -42,6 +43,13 @@ export const messageRouter = createTRPCRouter({
42
43
  }),
43
44
  },
44
45
  include: {
46
+ attachments: {
47
+ select: {
48
+ id: true,
49
+ name: true,
50
+ type: true,
51
+ },
52
+ },
45
53
  sender: {
46
54
  select: {
47
55
  id: true,
@@ -90,6 +98,11 @@ export const messageRouter = createTRPCRouter({
90
98
  conversationId: message.conversationId,
91
99
  createdAt: message.createdAt,
92
100
  sender: message.sender,
101
+ attachments: message.attachments.map((attachment) => ({
102
+ id: attachment.id,
103
+ name: attachment.name,
104
+ type: attachment.type,
105
+ })),
93
106
  mentions: message.mentions.map((mention) => ({
94
107
  user: mention.user,
95
108
  })),
@@ -198,7 +211,7 @@ export const messageRouter = createTRPCRouter({
198
211
  mentionedUserIds,
199
212
  });
200
213
  } catch (error) {
201
- console.error('Failed to broadcast message:', error);
214
+ logger.error('Failed to broadcast message:', {error});
202
215
  // Don't fail the request if Pusher fails
203
216
  }
204
217
 
@@ -341,7 +354,7 @@ export const messageRouter = createTRPCRouter({
341
354
  mentionedUserIds,
342
355
  });
343
356
  } catch (error) {
344
- console.error('Failed to broadcast message update:', error);
357
+ logger.error('Failed to broadcast message update:', {error});
345
358
  // Don't fail the request if Pusher fails
346
359
  }
347
360
 
@@ -429,7 +442,7 @@ export const messageRouter = createTRPCRouter({
429
442
  senderId: existingMessage.senderId,
430
443
  });
431
444
  } catch (error) {
432
- console.error('Failed to broadcast message deletion:', error);
445
+ logger.error('Failed to broadcast message deletion:', {error});
433
446
  // Don't fail the request if Pusher fails
434
447
  }
435
448
 
@@ -480,7 +493,7 @@ export const messageRouter = createTRPCRouter({
480
493
  viewedAt: new Date(),
481
494
  });
482
495
  } catch (error) {
483
- console.error('Failed to broadcast conversation view:', error);
496
+ logger.error('Failed to broadcast conversation view:', {error});
484
497
  // Don't fail the request if Pusher fails
485
498
  }
486
499
 
@@ -529,7 +542,7 @@ export const messageRouter = createTRPCRouter({
529
542
  viewedAt: new Date(),
530
543
  });
531
544
  } catch (error) {
532
- console.error('Failed to broadcast mentions view:', error);
545
+ logger.error('Failed to broadcast mentions view:', {error});
533
546
  // Don't fail the request if Pusher fails
534
547
  }
535
548
 
@@ -43,6 +43,9 @@ export async function sendAIMessage(
43
43
  conversationId: string,
44
44
  options: {
45
45
  subject?: string;
46
+ attachments?: {
47
+ connect: { id: string }[];
48
+ };
46
49
  customSender?: {
47
50
  displayName: string;
48
51
  profilePicture?: string | null;
@@ -64,6 +67,14 @@ export async function sendAIMessage(
64
67
  content,
65
68
  senderId: getAIUserId(),
66
69
  conversationId,
70
+ ...(options.attachments && {
71
+ attachments: {
72
+ connect: options.attachments.connect,
73
+ },
74
+ }),
75
+ },
76
+ include: {
77
+ attachments: true,
67
78
  },
68
79
  });
69
80
 
@@ -93,6 +104,14 @@ export async function sendAIMessage(
93
104
  createdAt: aiMessage.createdAt,
94
105
  sender: senderInfo,
95
106
  mentionedUserIds: [],
107
+ attachments: aiMessage.attachments.map(attachment => ({
108
+ id: attachment.id,
109
+ attachmentId: attachment.id,
110
+ name: attachment.name,
111
+ type: attachment.type,
112
+ size: attachment.size,
113
+ path: attachment.path,
114
+ })),
96
115
  });
97
116
  } catch (error) {
98
117
  logger.error('Failed to broadcast AI message:', { error, messageId: aiMessage.id });