@goscribe/server 1.0.7 → 1.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/AUTH_FRONTEND_SPEC.md +21 -0
  2. package/CHAT_FRONTEND_SPEC.md +474 -0
  3. package/MEETINGSUMMARY_FRONTEND_SPEC.md +28 -0
  4. package/PODCAST_FRONTEND_SPEC.md +595 -0
  5. package/STUDYGUIDE_FRONTEND_SPEC.md +18 -0
  6. package/WORKSHEETS_FRONTEND_SPEC.md +26 -0
  7. package/WORKSPACE_FRONTEND_SPEC.md +47 -0
  8. package/dist/context.d.ts +1 -1
  9. package/dist/lib/ai-session.d.ts +26 -0
  10. package/dist/lib/ai-session.js +343 -0
  11. package/dist/lib/auth.js +10 -6
  12. package/dist/lib/inference.d.ts +2 -0
  13. package/dist/lib/inference.js +21 -0
  14. package/dist/lib/pusher.d.ts +14 -0
  15. package/dist/lib/pusher.js +94 -0
  16. package/dist/lib/storage.d.ts +10 -2
  17. package/dist/lib/storage.js +63 -6
  18. package/dist/routers/_app.d.ts +878 -100
  19. package/dist/routers/_app.js +8 -2
  20. package/dist/routers/ai-session.d.ts +0 -0
  21. package/dist/routers/ai-session.js +1 -0
  22. package/dist/routers/auth.d.ts +13 -11
  23. package/dist/routers/auth.js +50 -21
  24. package/dist/routers/chat.d.ts +171 -0
  25. package/dist/routers/chat.js +270 -0
  26. package/dist/routers/flashcards.d.ts +51 -39
  27. package/dist/routers/flashcards.js +143 -31
  28. package/dist/routers/meetingsummary.d.ts +0 -0
  29. package/dist/routers/meetingsummary.js +377 -0
  30. package/dist/routers/podcast.d.ts +277 -0
  31. package/dist/routers/podcast.js +847 -0
  32. package/dist/routers/studyguide.d.ts +54 -0
  33. package/dist/routers/studyguide.js +125 -0
  34. package/dist/routers/worksheets.d.ts +147 -40
  35. package/dist/routers/worksheets.js +348 -33
  36. package/dist/routers/workspace.d.ts +163 -8
  37. package/dist/routers/workspace.js +453 -8
  38. package/dist/server.d.ts +1 -1
  39. package/dist/server.js +7 -2
  40. package/dist/trpc.d.ts +5 -5
  41. package/package.json +11 -3
  42. package/prisma/migrations/20250826124819_add_worksheet_difficulty_and_estimated_time/migration.sql +213 -0
  43. package/prisma/migrations/20250826133236_add_worksheet_question_progress/migration.sql +31 -0
  44. package/prisma/migrations/migration_lock.toml +3 -0
  45. package/prisma/schema.prisma +87 -6
  46. package/prisma/seed.mjs +135 -0
  47. package/src/lib/ai-session.ts +411 -0
  48. package/src/lib/auth.ts +1 -1
  49. package/src/lib/inference.ts +21 -0
  50. package/src/lib/pusher.ts +104 -0
  51. package/src/lib/storage.ts +89 -6
  52. package/src/routers/_app.ts +6 -0
  53. package/src/routers/auth.ts +8 -4
  54. package/src/routers/chat.ts +275 -0
  55. package/src/routers/flashcards.ts +151 -33
  56. package/src/routers/meetingsummary.ts +416 -0
  57. package/src/routers/podcast.ts +934 -0
  58. package/src/routers/studyguide.ts +144 -0
  59. package/src/routers/worksheets.ts +346 -18
  60. package/src/routers/workspace.ts +500 -8
  61. package/src/server.ts +7 -2
  62. package/test-ai-integration.js +134 -0
  63. package/dist/context.d.ts.map +0 -1
  64. package/dist/index.d.ts.map +0 -1
  65. package/dist/lib/auth.d.ts.map +0 -1
  66. package/dist/lib/file.d.ts.map +0 -1
  67. package/dist/lib/prisma.d.ts.map +0 -1
  68. package/dist/lib/storage.d.ts.map +0 -1
  69. package/dist/routers/_app.d.ts.map +0 -1
  70. package/dist/routers/auth.d.ts.map +0 -1
  71. package/dist/routers/sample.js +0 -21
  72. package/dist/routers/workspace.d.ts.map +0 -1
  73. package/dist/server.d.ts.map +0 -1
  74. package/dist/trpc.d.ts.map +0 -1
@@ -0,0 +1,847 @@
1
+ import { z } from 'zod';
2
+ import { TRPCError } from '@trpc/server';
3
+ import { router, authedProcedure } from '../trpc.js';
4
+ import { v4 as uuidv4 } from 'uuid';
5
+ import inference from '../lib/inference.js';
6
+ import { uploadToGCS, generateSignedUrl, deleteFromGCS } from '../lib/storage.js';
7
+ import PusherService from '../lib/pusher.js';
8
+ // Prisma enum values mapped manually to avoid type import issues in ESM
9
+ const ArtifactType = {
10
+ PODCAST_EPISODE: 'PODCAST_EPISODE',
11
+ STUDY_GUIDE: 'STUDY_GUIDE',
12
+ FLASHCARD_SET: 'FLASHCARD_SET',
13
+ };
14
+ // Podcast segment schema
15
+ const podcastSegmentSchema = z.object({
16
+ id: z.string(),
17
+ title: z.string(),
18
+ content: z.string(),
19
+ startTime: z.number(), // in seconds
20
+ duration: z.number(), // in seconds
21
+ keyPoints: z.array(z.string()),
22
+ order: z.number().int(),
23
+ audioUrl: z.string().optional(),
24
+ objectKey: z.string().optional(), // Google Cloud Storage object key
25
+ });
26
+ // Podcast creation input schema
27
+ const podcastInputSchema = z.object({
28
+ title: z.string(),
29
+ description: z.string().optional(),
30
+ userPrompt: z.string(),
31
+ voice: z.enum(['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']).default('nova'),
32
+ speed: z.number().min(0.25).max(4.0).default(1.0),
33
+ generateIntro: z.boolean().default(true),
34
+ generateOutro: z.boolean().default(true),
35
+ segmentByTopics: z.boolean().default(true),
36
+ });
37
+ // Podcast metadata schema for version data
38
+ const podcastMetadataSchema = z.object({
39
+ title: z.string(),
40
+ description: z.string().optional(),
41
+ totalDuration: z.number(),
42
+ voice: z.string(),
43
+ speed: z.number(),
44
+ segments: z.array(podcastSegmentSchema),
45
+ summary: z.object({
46
+ executiveSummary: z.string(),
47
+ learningObjectives: z.array(z.string()),
48
+ keyConcepts: z.array(z.string()),
49
+ followUpActions: z.array(z.string()),
50
+ targetAudience: z.string(),
51
+ prerequisites: z.array(z.string()),
52
+ tags: z.array(z.string()),
53
+ }),
54
+ generatedAt: z.string(),
55
+ });
56
+ export const podcast = router({
57
+ // List all podcast episodes for a workspace
58
+ listEpisodes: authedProcedure
59
+ .input(z.object({ workspaceId: z.string() }))
60
+ .query(async ({ ctx, input }) => {
61
+ const workspace = await ctx.db.workspace.findFirst({
62
+ where: { id: input.workspaceId, ownerId: ctx.session.user.id },
63
+ });
64
+ if (!workspace)
65
+ throw new TRPCError({ code: 'NOT_FOUND' });
66
+ const artifacts = await ctx.db.artifact.findMany({
67
+ where: {
68
+ workspaceId: input.workspaceId,
69
+ type: ArtifactType.PODCAST_EPISODE
70
+ },
71
+ include: {
72
+ versions: {
73
+ orderBy: { version: 'desc' },
74
+ take: 1, // Get only the latest version
75
+ },
76
+ },
77
+ orderBy: { updatedAt: 'desc' },
78
+ });
79
+ // Transform to include metadata from the latest version with fresh signed URLs
80
+ const episodesWithUrls = await Promise.all(artifacts.map(async (artifact) => {
81
+ const latestVersion = artifact.versions[0];
82
+ if (!latestVersion) {
83
+ // Return a consistent structure even when no version exists
84
+ return {
85
+ id: artifact.id,
86
+ title: artifact.title || 'Untitled Episode',
87
+ description: artifact.description || null,
88
+ metadata: null,
89
+ segments: [],
90
+ createdAt: artifact.createdAt,
91
+ updatedAt: artifact.updatedAt,
92
+ workspaceId: artifact.workspaceId,
93
+ type: artifact.type,
94
+ createdById: artifact.createdById,
95
+ isArchived: artifact.isArchived,
96
+ };
97
+ }
98
+ try {
99
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
100
+ // Generate fresh signed URLs for all segments
101
+ const segmentsWithUrls = await Promise.all(metadata.segments.map(async (segment) => {
102
+ if (segment.objectKey) {
103
+ try {
104
+ const signedUrl = await generateSignedUrl(segment.objectKey, 24); // 24 hours
105
+ return {
106
+ id: segment.id,
107
+ title: segment.title,
108
+ audioUrl: signedUrl,
109
+ objectKey: segment.objectKey,
110
+ startTime: segment.startTime,
111
+ duration: segment.duration,
112
+ order: segment.order,
113
+ };
114
+ }
115
+ catch (error) {
116
+ console.error(`Failed to generate signed URL for segment ${segment.id}:`, error);
117
+ return {
118
+ id: segment.id,
119
+ title: segment.title,
120
+ audioUrl: null,
121
+ objectKey: segment.objectKey,
122
+ startTime: segment.startTime,
123
+ duration: segment.duration,
124
+ order: segment.order,
125
+ };
126
+ }
127
+ }
128
+ return {
129
+ id: segment.id,
130
+ title: segment.title,
131
+ audioUrl: null,
132
+ objectKey: segment.objectKey,
133
+ startTime: segment.startTime,
134
+ duration: segment.duration,
135
+ order: segment.order,
136
+ };
137
+ }));
138
+ return {
139
+ id: artifact.id,
140
+ title: metadata.title, // Use title from version metadata
141
+ description: metadata.description, // Use description from version metadata
142
+ metadata: metadata,
143
+ segments: segmentsWithUrls,
144
+ createdAt: artifact.createdAt,
145
+ updatedAt: artifact.updatedAt,
146
+ workspaceId: artifact.workspaceId,
147
+ type: artifact.type,
148
+ createdById: artifact.createdById,
149
+ isArchived: artifact.isArchived,
150
+ };
151
+ }
152
+ catch (error) {
153
+ console.error('Failed to parse podcast metadata:', error);
154
+ // Return a consistent structure even when metadata parsing fails
155
+ return {
156
+ id: artifact.id,
157
+ title: artifact.title || 'Untitled Episode',
158
+ description: artifact.description || null,
159
+ metadata: null,
160
+ segments: [],
161
+ createdAt: artifact.createdAt,
162
+ updatedAt: artifact.updatedAt,
163
+ workspaceId: artifact.workspaceId,
164
+ type: artifact.type,
165
+ createdById: artifact.createdById,
166
+ isArchived: artifact.isArchived,
167
+ };
168
+ }
169
+ }));
170
+ return episodesWithUrls;
171
+ }),
172
+ // Get a specific podcast episode with segments and signed URLs
173
+ getEpisode: authedProcedure
174
+ .input(z.object({ episodeId: z.string() }))
175
+ .query(async ({ ctx, input }) => {
176
+ const episode = await ctx.db.artifact.findFirst({
177
+ where: {
178
+ id: input.episodeId,
179
+ type: ArtifactType.PODCAST_EPISODE,
180
+ workspace: { ownerId: ctx.session.user.id }
181
+ },
182
+ include: {
183
+ versions: {
184
+ orderBy: { version: 'desc' },
185
+ take: 1,
186
+ },
187
+ },
188
+ });
189
+ console.log(episode);
190
+ if (!episode)
191
+ throw new TRPCError({ code: 'NOT_FOUND' });
192
+ const latestVersion = episode.versions[0];
193
+ if (!latestVersion)
194
+ throw new TRPCError({ code: 'NOT_FOUND', message: 'No version found' });
195
+ console.log(latestVersion);
196
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
197
+ // Generate fresh signed URLs for all segments
198
+ const segmentsWithUrls = await Promise.all(metadata.segments.map(async (segment) => {
199
+ if (segment.objectKey) {
200
+ try {
201
+ const signedUrl = await generateSignedUrl(segment.objectKey, 24); // 24 hours
202
+ return {
203
+ id: segment.id,
204
+ title: segment.title,
205
+ content: segment.content,
206
+ audioUrl: signedUrl,
207
+ objectKey: segment.objectKey,
208
+ startTime: segment.startTime,
209
+ duration: segment.duration,
210
+ keyPoints: segment.keyPoints,
211
+ order: segment.order,
212
+ };
213
+ }
214
+ catch (error) {
215
+ console.error(`Failed to generate signed URL for segment ${segment.id}:`, error);
216
+ return {
217
+ id: segment.id,
218
+ title: segment.title,
219
+ content: segment.content,
220
+ audioUrl: null,
221
+ objectKey: segment.objectKey,
222
+ startTime: segment.startTime,
223
+ duration: segment.duration,
224
+ keyPoints: segment.keyPoints,
225
+ order: segment.order,
226
+ };
227
+ }
228
+ }
229
+ return {
230
+ id: segment.id,
231
+ title: segment.title,
232
+ content: segment.content,
233
+ audioUrl: null,
234
+ objectKey: segment.objectKey,
235
+ startTime: segment.startTime,
236
+ duration: segment.duration,
237
+ keyPoints: segment.keyPoints,
238
+ order: segment.order,
239
+ };
240
+ }));
241
+ return {
242
+ id: episode.id,
243
+ title: metadata.title, // Use title from version metadata
244
+ description: metadata.description, // Use description from version metadata
245
+ metadata,
246
+ segments: segmentsWithUrls,
247
+ content: latestVersion.content, // transcript
248
+ createdAt: episode.createdAt,
249
+ updatedAt: episode.updatedAt,
250
+ };
251
+ }),
252
+ // Generate podcast episode from text input
253
+ generateEpisode: authedProcedure
254
+ .input(z.object({
255
+ workspaceId: z.string(),
256
+ podcastData: podcastInputSchema,
257
+ }))
258
+ .mutation(async ({ ctx, input }) => {
259
+ const workspace = await ctx.db.workspace.findFirst({
260
+ where: { id: input.workspaceId, ownerId: ctx.session.user.id },
261
+ });
262
+ if (!workspace)
263
+ throw new TRPCError({ code: 'NOT_FOUND' });
264
+ try {
265
+ // Emit podcast generation start notification
266
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_generation_start', {
267
+ title: input.podcastData.title
268
+ });
269
+ const studyGuide = await ctx.db.artifact.findFirst({
270
+ where: {
271
+ workspaceId: input.workspaceId,
272
+ type: ArtifactType.STUDY_GUIDE,
273
+ },
274
+ include: {
275
+ versions: {
276
+ orderBy: { version: 'desc' },
277
+ take: 1,
278
+ },
279
+ },
280
+ });
281
+ const latestVersion = studyGuide?.versions[0];
282
+ const studyGuideContent = latestVersion?.content || '';
283
+ // Step 1: Structure the content into segments using inference API
284
+ const structurePrompt = `You are a podcast content structuring assistant. Given a user prompt, create a complete podcast episode with engaging content and logical segments.
285
+
286
+ Based on the user's prompt (and any existing study guide context for this workspace), generate a podcast episode that:
287
+ - Addresses the user's request or topic
288
+ - Is educational, informative, and engaging
289
+ - Has natural, conversational language
290
+ - Flows logically from one segment to the next
291
+
292
+ Create segments that are:
293
+ - 2-5 minutes each when spoken
294
+ - Focused on specific topics or concepts
295
+ - Include key takeaways for each segment
296
+ - Use natural, conversational language suitable for audio
297
+
298
+ ${input.podcastData.generateIntro ? 'Include an engaging introduction segment that hooks the listener.' : ''}
299
+ ${input.podcastData.generateOutro ? 'Include a conclusion/outro segment that summarizes key points.' : ''}
300
+
301
+ Format your response as JSON:
302
+ {
303
+ "episodeTitle": "Enhanced title for the podcast",
304
+ "totalEstimatedDuration": "estimated duration in minutes",
305
+ "segments": [
306
+ {
307
+ "title": "Segment title",
308
+ "content": "Natural, conversational script for this segment",
309
+ "keyPoints": ["key point 1", "key point 2"],
310
+ "estimatedDuration": "duration in minutes",
311
+ "order": 1
312
+ }
313
+ ]
314
+ }
315
+
316
+ Title: ${input.podcastData.title}
317
+ Description: ${input.podcastData.description || 'No description provided'}
318
+ Users notes:
319
+ User Prompt: ${input.podcastData.userPrompt}
320
+
321
+ If there is a study guide artifact in this workspace, incorporate its key points and structure to improve coherence. Use it only as supportive context, do not copy verbatim.`;
322
+ const structureResponse = await inference(structurePrompt, 'podcast_structure');
323
+ const structureData = await structureResponse.json();
324
+ const structureContent = structureData.response || '';
325
+ let structuredContent;
326
+ try {
327
+ // Extract JSON from the response
328
+ const jsonMatch = structureContent.match(/\{[\s\S]*\}/);
329
+ if (!jsonMatch) {
330
+ throw new Error('No JSON found in response');
331
+ }
332
+ structuredContent = JSON.parse(jsonMatch[0]);
333
+ }
334
+ catch (parseError) {
335
+ console.error('Failed to parse structure response:', structureContent);
336
+ await PusherService.emitError(input.workspaceId, 'Failed to structure podcast content', 'podcast');
337
+ throw new TRPCError({
338
+ code: 'INTERNAL_SERVER_ERROR',
339
+ message: 'Failed to structure podcast content'
340
+ });
341
+ }
342
+ // Emit structure completion notification
343
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_structure_complete', {
344
+ segmentsCount: structuredContent.segments?.length || 0
345
+ });
346
+ // Step 2: Generate audio for each segment
347
+ const segments = [];
348
+ let totalDuration = 0;
349
+ let fullTranscript = '';
350
+ // Emit audio generation start notification
351
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_audio_generation_start', {
352
+ totalSegments: structuredContent.segments?.length || 0
353
+ });
354
+ for (const [index, segment] of structuredContent.segments.entries()) {
355
+ try {
356
+ // Emit segment generation progress
357
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_segment_progress', {
358
+ currentSegment: index + 1,
359
+ totalSegments: structuredContent.segments.length,
360
+ segmentTitle: segment.title || `Segment ${index + 1}`
361
+ });
362
+ // Generate speech for this segment using Murf TTS
363
+ const mp3Response = await fetch('https://api.murf.ai/v1/speech/generate', {
364
+ method: 'POST',
365
+ headers: {
366
+ 'api-key': process.env.MURF_TTS_KEY || '',
367
+ 'Content-Type': 'application/json',
368
+ 'Accept': 'application/json',
369
+ },
370
+ body: JSON.stringify({
371
+ text: segment.content,
372
+ voiceId: 'en-US-natalie',
373
+ }),
374
+ });
375
+ if (!mp3Response.ok) {
376
+ throw new Error(`Murf TTS error: ${mp3Response.status} ${mp3Response.statusText}`);
377
+ }
378
+ // Parse the response to get the audio URL
379
+ const mp3Data = await mp3Response.json();
380
+ // Check for different possible response structures
381
+ const audioUrl = mp3Data.audioFile || mp3Data.audioUrl || mp3Data.url || mp3Data.downloadUrl;
382
+ if (!audioUrl) {
383
+ console.error('No audio URL found in Murf response. Available fields:', Object.keys(mp3Data));
384
+ throw new Error('No audio URL in Murf response');
385
+ }
386
+ // Download the actual audio file from the URL
387
+ const audioResponse = await fetch(audioUrl);
388
+ if (!audioResponse.ok) {
389
+ throw new Error(`Failed to download audio: ${audioResponse.status} ${audioResponse.statusText}`);
390
+ }
391
+ // Upload to Google Cloud Storage
392
+ const audioBuffer = Buffer.from(await audioResponse.arrayBuffer());
393
+ const fileName = `segment_${index + 1}.mp3`;
394
+ const uploadResult = await uploadToGCS(audioBuffer, fileName, 'audio/mpeg', false); // Keep private
395
+ // Estimate duration (roughly 150 words per minute for TTS)
396
+ const wordCount = segment.content.split(' ').length;
397
+ const estimatedDuration = Math.ceil((wordCount / 150) * 60); // in seconds
398
+ segments.push({
399
+ id: uuidv4(),
400
+ title: segment.title,
401
+ content: segment.content,
402
+ objectKey: uploadResult.objectKey, // Store object key for future operations
403
+ startTime: totalDuration,
404
+ duration: estimatedDuration,
405
+ keyPoints: segment.keyPoints || [],
406
+ order: segment.order || index + 1,
407
+ });
408
+ totalDuration += estimatedDuration;
409
+ fullTranscript += `\n\n## ${segment.title}\n\n${segment.content}`;
410
+ }
411
+ catch (audioError) {
412
+ console.error(`Error generating audio for segment ${index + 1}:`, audioError);
413
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_segment_error', {
414
+ segmentIndex: index + 1,
415
+ error: audioError instanceof Error ? audioError.message : 'Unknown error'
416
+ });
417
+ // Continue with other segments even if one fails
418
+ }
419
+ }
420
+ // Emit audio generation completion notification
421
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_audio_generation_complete', {
422
+ totalSegments: segments.length,
423
+ totalDuration: totalDuration
424
+ });
425
+ // Step 2.5: Prepare segment audio array for frontend joining
426
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_audio_preparation_complete', {
427
+ totalSegments: segments.length
428
+ });
429
+ // Step 3: Generate episode summary using inference API
430
+ const summaryPrompt = `Create a comprehensive podcast episode summary including:
431
+ - Executive summary
432
+ - Learning objectives
433
+ - Key concepts covered
434
+ - Recommended follow-up actions
435
+ - Target audience
436
+ - Prerequisites (if any)
437
+
438
+ Format as JSON:
439
+ {
440
+ "executiveSummary": "Brief overview of the episode",
441
+ "learningObjectives": ["objective1", "objective2"],
442
+ "keyConcepts": ["concept1", "concept2"],
443
+ "followUpActions": ["action1", "action2"],
444
+ "targetAudience": "Description of target audience",
445
+ "prerequisites": ["prerequisite1", "prerequisite2"],
446
+ "tags": ["tag1", "tag2", "tag3"]
447
+ }
448
+
449
+ Podcast Title: ${structuredContent.episodeTitle}
450
+ Segments: ${JSON.stringify(segments.map(s => ({ title: s.title, keyPoints: s.keyPoints })))}`;
451
+ const summaryResponse = await inference(summaryPrompt, 'podcast_summary');
452
+ const summaryData = await summaryResponse.json();
453
+ const summaryContent = summaryData.response || '';
454
+ let episodeSummary;
455
+ try {
456
+ // Extract JSON from the response
457
+ const jsonMatch = summaryContent.match(/\{[\s\S]*\}/);
458
+ if (!jsonMatch) {
459
+ throw new Error('No JSON found in summary response');
460
+ }
461
+ episodeSummary = JSON.parse(jsonMatch[0]);
462
+ }
463
+ catch (parseError) {
464
+ console.error('Failed to parse summary response:', summaryContent);
465
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_summary_error', {
466
+ error: 'Failed to parse summary response'
467
+ });
468
+ episodeSummary = {
469
+ executiveSummary: 'AI-generated podcast episode',
470
+ learningObjectives: [],
471
+ keyConcepts: [],
472
+ followUpActions: [],
473
+ targetAudience: 'General audience',
474
+ prerequisites: [],
475
+ tags: [],
476
+ };
477
+ }
478
+ // Emit summary generation completion notification
479
+ await PusherService.emitTaskComplete(input.workspaceId, 'podcast_summary_complete', {
480
+ summaryGenerated: true
481
+ });
482
+ // Step 4: Create artifact and initial version
483
+ const episodeTitle = structuredContent.episodeTitle || input.podcastData.title;
484
+ const artifact = await ctx.db.artifact.create({
485
+ data: {
486
+ workspaceId: input.workspaceId,
487
+ type: ArtifactType.PODCAST_EPISODE,
488
+ title: episodeTitle, // Store basic title for listing/searching
489
+ description: input.podcastData.description, // Store basic description for listing/searching
490
+ createdById: ctx.session.user.id,
491
+ },
492
+ });
493
+ // Create initial version with complete metadata
494
+ const metadata = {
495
+ title: episodeTitle,
496
+ description: input.podcastData.description,
497
+ totalDuration: totalDuration,
498
+ voice: input.podcastData.voice,
499
+ speed: input.podcastData.speed,
500
+ segments: segments, // Array of segments with audio URLs for frontend joining
501
+ summary: episodeSummary,
502
+ generatedAt: new Date().toISOString(),
503
+ };
504
+ await ctx.db.artifactVersion.create({
505
+ data: {
506
+ artifactId: artifact.id,
507
+ version: 1,
508
+ content: fullTranscript.trim(), // Full transcript as markdown
509
+ data: metadata,
510
+ createdById: ctx.session.user.id,
511
+ },
512
+ });
513
+ // Emit podcast generation completion notification
514
+ await PusherService.emitPodcastComplete(input.workspaceId, artifact);
515
+ return {
516
+ id: artifact.id,
517
+ title: metadata.title,
518
+ description: metadata.description,
519
+ metadata,
520
+ content: fullTranscript.trim(),
521
+ };
522
+ }
523
+ catch (error) {
524
+ console.error('Error generating podcast episode:', error);
525
+ await PusherService.emitError(input.workspaceId, `Failed to generate podcast episode: ${error instanceof Error ? error.message : 'Unknown error'}`, 'podcast');
526
+ throw new TRPCError({
527
+ code: 'INTERNAL_SERVER_ERROR',
528
+ message: `Failed to generate podcast episode: ${error instanceof Error ? error.message : 'Unknown error'}`
529
+ });
530
+ }
531
+ }),
532
+ // Regenerate a specific segment
533
+ regenerateSegment: authedProcedure
534
+ .input(z.object({
535
+ episodeId: z.string(),
536
+ segmentId: z.string(),
537
+ newContent: z.string().optional(),
538
+ voice: z.enum(['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']).optional(),
539
+ speed: z.number().min(0.25).max(4.0).optional(),
540
+ }))
541
+ .mutation(async ({ ctx, input }) => {
542
+ const episode = await ctx.db.artifact.findFirst({
543
+ where: {
544
+ id: input.episodeId,
545
+ type: ArtifactType.PODCAST_EPISODE,
546
+ workspace: { ownerId: ctx.session.user.id }
547
+ },
548
+ include: {
549
+ workspace: true,
550
+ versions: {
551
+ orderBy: { version: 'desc' },
552
+ take: 1,
553
+ },
554
+ },
555
+ });
556
+ if (!episode)
557
+ throw new TRPCError({ code: 'NOT_FOUND' });
558
+ const latestVersion = episode.versions[0];
559
+ if (!latestVersion)
560
+ throw new TRPCError({ code: 'NOT_FOUND', message: 'No version found' });
561
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
562
+ const segment = metadata.segments.find(s => s.id === input.segmentId);
563
+ if (!segment)
564
+ throw new TRPCError({ code: 'NOT_FOUND', message: 'Segment not found' });
565
+ try {
566
+ // Emit segment regeneration start notification
567
+ await PusherService.emitTaskComplete(episode.workspaceId, 'podcast_segment_regeneration_start', {
568
+ segmentId: input.segmentId,
569
+ segmentTitle: segment.title || 'Untitled Segment'
570
+ });
571
+ // Use new content or existing content
572
+ const contentToSpeak = input.newContent || segment.content;
573
+ const voice = input.voice || metadata.voice || 'nova';
574
+ const speed = input.speed || metadata.speed || 1.0;
575
+ // Generate new audio using Murf TTS
576
+ const mp3Response = await fetch('https://api.murf.ai/v1/speech/generate', {
577
+ method: 'POST',
578
+ headers: {
579
+ 'api-key': process.env.MURF_TTS_KEY || '',
580
+ 'Content-Type': 'application/json',
581
+ 'Accept': 'application/json',
582
+ },
583
+ body: JSON.stringify({
584
+ text: contentToSpeak,
585
+ voiceId: 'en-US-natalie',
586
+ }),
587
+ });
588
+ if (!mp3Response.ok) {
589
+ throw new Error(`Murf TTS error: ${mp3Response.status} ${mp3Response.statusText}`);
590
+ }
591
+ // Parse the response to get the audio URL
592
+ const mp3Data = await mp3Response.json();
593
+ // Check for different possible response structures
594
+ const audioUrl = mp3Data.audioFile || mp3Data.audioUrl || mp3Data.url || mp3Data.downloadUrl;
595
+ if (!audioUrl) {
596
+ console.error('No audio URL found in Murf response. Available fields:', Object.keys(mp3Data));
597
+ throw new Error('No audio URL in Murf response');
598
+ }
599
+ // Download the actual audio file from the URL
600
+ const audioResponse = await fetch(audioUrl);
601
+ if (!audioResponse.ok) {
602
+ throw new Error(`Failed to download audio: ${audioResponse.status} ${audioResponse.statusText}`);
603
+ }
604
+ // Upload to Google Cloud Storage
605
+ const audioBuffer = Buffer.from(await audioResponse.arrayBuffer());
606
+ const fileName = `segment_${segment.order}_${Date.now()}.mp3`;
607
+ const uploadResult = await uploadToGCS(audioBuffer, fileName, 'audio/mpeg', false); // Keep private
608
+ // Update segment data
609
+ segment.content = contentToSpeak;
610
+ segment.objectKey = uploadResult.objectKey; // Store object key
611
+ // Recalculate duration
612
+ const wordCount = contentToSpeak.split(' ').length;
613
+ segment.duration = Math.ceil((wordCount / 150) * 60);
614
+ // Recalculate start times for subsequent segments
615
+ let currentTime = 0;
616
+ for (const seg of metadata.segments) {
617
+ if (seg.order < segment.order) {
618
+ currentTime += seg.duration;
619
+ }
620
+ else if (seg.order === segment.order) {
621
+ seg.startTime = currentTime;
622
+ currentTime += seg.duration;
623
+ }
624
+ else {
625
+ seg.startTime = currentTime;
626
+ currentTime += seg.duration;
627
+ }
628
+ }
629
+ // Update total duration
630
+ metadata.totalDuration = currentTime;
631
+ // Rebuild transcript
632
+ const fullTranscript = metadata.segments
633
+ .sort((a, b) => a.order - b.order)
634
+ .map(s => `\n\n## ${s.title}\n\n${s.content}`)
635
+ .join('');
636
+ // Step: Update segment audio (no need to regenerate full episode)
637
+ await PusherService.emitTaskComplete(episode.workspaceId, 'podcast_segment_audio_updated', {
638
+ segmentId: input.segmentId,
639
+ totalSegments: metadata.segments.length
640
+ });
641
+ // Create new version
642
+ const nextVersion = (latestVersion.version || 0) + 1;
643
+ await ctx.db.artifactVersion.create({
644
+ data: {
645
+ artifactId: input.episodeId,
646
+ version: nextVersion,
647
+ content: fullTranscript.trim(),
648
+ data: metadata,
649
+ createdById: ctx.session.user.id,
650
+ },
651
+ });
652
+ // Emit segment regeneration completion notification
653
+ await PusherService.emitTaskComplete(episode.workspaceId, 'podcast_segment_regeneration_complete', {
654
+ segmentId: input.segmentId,
655
+ segmentTitle: segment.title || 'Untitled Segment',
656
+ duration: segment.duration
657
+ });
658
+ return {
659
+ segmentId: input.segmentId,
660
+ audioUrl: segment.audioUrl,
661
+ duration: segment.duration,
662
+ content: segment.content,
663
+ totalDuration: metadata.totalDuration,
664
+ };
665
+ }
666
+ catch (error) {
667
+ console.error('Error regenerating segment:', error);
668
+ await PusherService.emitError(episode.workspaceId, `Failed to regenerate segment: ${error instanceof Error ? error.message : 'Unknown error'}`, 'podcast');
669
+ throw new TRPCError({
670
+ code: 'INTERNAL_SERVER_ERROR',
671
+ message: `Failed to regenerate segment: ${error instanceof Error ? error.message : 'Unknown error'}`
672
+ });
673
+ }
674
+ }),
675
+ // Get episode schema/structure for navigation
676
+ getEpisodeSchema: authedProcedure
677
+ .input(z.object({ episodeId: z.string() }))
678
+ .query(async ({ ctx, input }) => {
679
+ const episode = await ctx.db.artifact.findFirst({
680
+ where: {
681
+ id: input.episodeId,
682
+ type: ArtifactType.PODCAST_EPISODE,
683
+ workspace: { ownerId: ctx.session.user.id }
684
+ },
685
+ include: {
686
+ versions: {
687
+ orderBy: { version: 'desc' },
688
+ take: 1,
689
+ },
690
+ },
691
+ });
692
+ if (!episode)
693
+ throw new TRPCError({ code: 'NOT_FOUND' });
694
+ const latestVersion = episode.versions[0];
695
+ if (!latestVersion)
696
+ throw new TRPCError({ code: 'NOT_FOUND', message: 'No version found' });
697
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
698
+ return {
699
+ segments: metadata.segments.map(s => ({
700
+ id: s.id,
701
+ title: s.title,
702
+ startTime: s.startTime,
703
+ duration: s.duration,
704
+ keyPoints: s.keyPoints,
705
+ order: s.order,
706
+ })),
707
+ summary: metadata.summary,
708
+ metadata: {
709
+ title: metadata.title,
710
+ description: metadata.description,
711
+ totalDuration: metadata.totalDuration,
712
+ voice: metadata.voice,
713
+ speed: metadata.speed,
714
+ },
715
+ };
716
+ }),
717
+ // Update episode metadata
718
+ updateEpisode: authedProcedure
719
+ .input(z.object({
720
+ episodeId: z.string(),
721
+ title: z.string().optional(),
722
+ description: z.string().optional(),
723
+ }))
724
+ .mutation(async ({ ctx, input }) => {
725
+ const episode = await ctx.db.artifact.findFirst({
726
+ where: {
727
+ id: input.episodeId,
728
+ type: ArtifactType.PODCAST_EPISODE,
729
+ workspace: { ownerId: ctx.session.user.id }
730
+ },
731
+ include: {
732
+ versions: {
733
+ orderBy: { version: 'desc' },
734
+ take: 1,
735
+ },
736
+ },
737
+ });
738
+ if (!episode)
739
+ throw new TRPCError({ code: 'NOT_FOUND' });
740
+ const latestVersion = episode.versions[0];
741
+ if (!latestVersion)
742
+ throw new TRPCError({ code: 'NOT_FOUND', message: 'No version found' });
743
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
744
+ // Update metadata
745
+ if (input.title)
746
+ metadata.title = input.title;
747
+ if (input.description)
748
+ metadata.description = input.description;
749
+ // Create new version with updated metadata
750
+ const nextVersion = (latestVersion.version || 0) + 1;
751
+ await ctx.db.artifactVersion.create({
752
+ data: {
753
+ artifactId: input.episodeId,
754
+ version: nextVersion,
755
+ content: latestVersion.content,
756
+ data: metadata,
757
+ createdById: ctx.session.user.id,
758
+ },
759
+ });
760
+ // Update the artifact with basic info for listing/searching
761
+ return ctx.db.artifact.update({
762
+ where: { id: input.episodeId },
763
+ data: {
764
+ title: input.title ?? episode.title,
765
+ description: input.description ?? episode.description,
766
+ updatedAt: new Date(),
767
+ },
768
+ });
769
+ }),
770
+ // Delete episode and associated audio files
771
+ deleteEpisode: authedProcedure
772
+ .input(z.object({ episodeId: z.string() }))
773
+ .mutation(async ({ ctx, input }) => {
774
+ const episode = await ctx.db.artifact.findFirst({
775
+ where: {
776
+ id: input.episodeId,
777
+ type: ArtifactType.PODCAST_EPISODE,
778
+ workspace: { ownerId: ctx.session.user.id }
779
+ },
780
+ include: {
781
+ versions: {
782
+ orderBy: { version: 'desc' },
783
+ take: 1,
784
+ },
785
+ },
786
+ });
787
+ if (!episode)
788
+ throw new TRPCError({ code: 'NOT_FOUND' });
789
+ try {
790
+ // Emit episode deletion start notification
791
+ await PusherService.emitTaskComplete(episode.workspaceId, 'podcast_deletion_start', {
792
+ episodeId: input.episodeId,
793
+ episodeTitle: episode.title || 'Untitled Episode'
794
+ });
795
+ // Parse episode data to get audio file paths
796
+ const latestVersion = episode.versions[0];
797
+ if (latestVersion) {
798
+ const metadata = podcastMetadataSchema.parse(latestVersion.data);
799
+ // Delete audio files from Google Cloud Storage
800
+ for (const segment of metadata.segments || []) {
801
+ if (segment.objectKey) {
802
+ try {
803
+ await deleteFromGCS(segment.objectKey);
804
+ }
805
+ catch (error) {
806
+ console.error(`Failed to delete audio file ${segment.objectKey}:`, error);
807
+ }
808
+ }
809
+ }
810
+ }
811
+ // Delete associated versions
812
+ await ctx.db.artifactVersion.deleteMany({
813
+ where: { artifactId: input.episodeId },
814
+ });
815
+ // Delete the artifact
816
+ await ctx.db.artifact.delete({
817
+ where: { id: input.episodeId },
818
+ });
819
+ // Emit episode deletion completion notification
820
+ await PusherService.emitTaskComplete(episode.workspaceId, 'podcast_deletion_complete', {
821
+ episodeId: input.episodeId,
822
+ episodeTitle: episode.title || 'Untitled Episode'
823
+ });
824
+ return true;
825
+ }
826
+ catch (error) {
827
+ console.error('Error deleting episode:', error);
828
+ await PusherService.emitError(episode.workspaceId, `Failed to delete episode: ${error instanceof Error ? error.message : 'Unknown error'}`, 'podcast');
829
+ throw new TRPCError({
830
+ code: 'INTERNAL_SERVER_ERROR',
831
+ message: 'Failed to delete episode'
832
+ });
833
+ }
834
+ }),
835
+ // Get available voices for TTS
836
+ getAvailableVoices: authedProcedure
837
+ .query(async () => {
838
+ return [
839
+ { id: 'alloy', name: 'Alloy', description: 'Neutral, balanced voice' },
840
+ { id: 'echo', name: 'Echo', description: 'Clear, professional voice' },
841
+ { id: 'fable', name: 'Fable', description: 'Warm, storytelling voice' },
842
+ { id: 'onyx', name: 'Onyx', description: 'Deep, authoritative voice' },
843
+ { id: 'nova', name: 'Nova', description: 'Friendly, conversational voice' },
844
+ { id: 'shimmer', name: 'Shimmer', description: 'Bright, energetic voice' },
845
+ ];
846
+ }),
847
+ });