yt-transcript-strapi-plugin 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/dist/_chunks/App-BZXINnvY.mjs +23 -0
- package/dist/_chunks/App-BZXINnvY.mjs.map +1 -0
- package/dist/_chunks/App-Dk5vxL_c.js +23 -0
- package/dist/_chunks/App-Dk5vxL_c.js.map +1 -0
- package/dist/_chunks/en-B4KWt_jN.js +5 -0
- package/dist/_chunks/en-B4KWt_jN.js.map +1 -0
- package/dist/_chunks/en-Byx4XI2L.mjs +5 -0
- package/dist/_chunks/en-Byx4XI2L.mjs.map +1 -0
- package/dist/_chunks/index-CenWR5nf.js +74 -0
- package/dist/_chunks/index-CenWR5nf.js.map +1 -0
- package/dist/_chunks/index-xAToV0M5.mjs +75 -0
- package/dist/_chunks/index-xAToV0M5.mjs.map +1 -0
- package/dist/admin/index.js +4 -0
- package/dist/admin/index.js.map +1 -0
- package/dist/admin/index.mjs +5 -0
- package/dist/admin/index.mjs.map +1 -0
- package/dist/admin/src/components/Initializer.d.ts +5 -0
- package/dist/admin/src/components/PluginIcon.d.ts +2 -0
- package/dist/admin/src/index.d.ts +11 -0
- package/dist/admin/src/pages/App.d.ts +2 -0
- package/dist/admin/src/pages/HomePage.d.ts +2 -0
- package/dist/admin/src/pluginId.d.ts +1 -0
- package/dist/admin/src/utils/getTranslation.d.ts +2 -0
- package/dist/server/index.js +499 -0
- package/dist/server/index.js.map +1 -0
- package/dist/server/index.mjs +478 -0
- package/dist/server/index.mjs.map +1 -0
- package/dist/server/src/bootstrap.d.ts +5 -0
- package/dist/server/src/config/index.d.ts +5 -0
- package/dist/server/src/content-types/index.d.ts +45 -0
- package/dist/server/src/content-types/transcript/index.d.ts +43 -0
- package/dist/server/src/controllers/controller.d.ts +13 -0
- package/dist/server/src/controllers/index.d.ts +14 -0
- package/dist/server/src/destroy.d.ts +5 -0
- package/dist/server/src/index.d.ts +112 -0
- package/dist/server/src/middlewares/index.d.ts +2 -0
- package/dist/server/src/policies/index.d.ts +2 -0
- package/dist/server/src/register.d.ts +5 -0
- package/dist/server/src/routes/admin.d.ts +9 -0
- package/dist/server/src/routes/content-api.d.ts +9 -0
- package/dist/server/src/routes/index.d.ts +25 -0
- package/dist/server/src/services/index.d.ts +14 -0
- package/dist/server/src/services/service.d.ts +14 -0
- package/dist/server/src/utils/extract-youtube-id.d.ts +1 -0
- package/dist/server/src/utils/fetch-transcript.d.ts +15 -0
- package/dist/server/src/utils/openai.d.ts +9 -0
- package/package.json +81 -0
- package/strapi-server.js +3 -0
|
@@ -0,0 +1,478 @@
|
|
|
1
|
+
import { BaseDocumentTransformer, Document } from "@langchain/core/documents";
|
|
2
|
+
import { getEncoding } from "@langchain/core/utils/tiktoken";
|
|
3
|
+
import { PromptTemplate } from "@langchain/core/prompts";
|
|
4
|
+
import { ChatOpenAI } from "@langchain/openai";
|
|
5
|
+
const bootstrap = ({ strapi }) => {
|
|
6
|
+
};
|
|
7
|
+
const destroy = ({ strapi }) => {
|
|
8
|
+
};
|
|
9
|
+
const register = ({ strapi }) => {
|
|
10
|
+
};
|
|
11
|
+
const config = {
|
|
12
|
+
default: {},
|
|
13
|
+
validator() {
|
|
14
|
+
}
|
|
15
|
+
};
|
|
16
|
+
const kind = "collectionType";
|
|
17
|
+
const collectionName = "transcript";
|
|
18
|
+
const info = {
|
|
19
|
+
singularName: "transcript",
|
|
20
|
+
pluralName: "transcripts",
|
|
21
|
+
displayName: "Transcript"
|
|
22
|
+
};
|
|
23
|
+
const options = {
|
|
24
|
+
draftAndPublish: false
|
|
25
|
+
};
|
|
26
|
+
const pluginOptions = {
|
|
27
|
+
"content-manager": {
|
|
28
|
+
visible: true
|
|
29
|
+
},
|
|
30
|
+
"content-type-builder": {
|
|
31
|
+
visible: false
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
const attributes = {
|
|
35
|
+
title: {
|
|
36
|
+
type: "string"
|
|
37
|
+
},
|
|
38
|
+
videoId: {
|
|
39
|
+
type: "string"
|
|
40
|
+
},
|
|
41
|
+
thumbnailUrl: {
|
|
42
|
+
type: "string"
|
|
43
|
+
},
|
|
44
|
+
fullTranscript: {
|
|
45
|
+
type: "richtext"
|
|
46
|
+
},
|
|
47
|
+
transcriptWithTimeCodes: {
|
|
48
|
+
type: "json"
|
|
49
|
+
},
|
|
50
|
+
readableTranscript: {
|
|
51
|
+
type: "richtext"
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
const schema = {
|
|
55
|
+
kind,
|
|
56
|
+
collectionName,
|
|
57
|
+
info,
|
|
58
|
+
options,
|
|
59
|
+
pluginOptions,
|
|
60
|
+
attributes
|
|
61
|
+
};
|
|
62
|
+
const transcript = {
|
|
63
|
+
schema
|
|
64
|
+
};
|
|
65
|
+
const contentTypes = {
|
|
66
|
+
transcript
|
|
67
|
+
};
|
|
68
|
+
function extractYouTubeID(urlOrID) {
|
|
69
|
+
const regExpID = /^[a-zA-Z0-9_-]{11}$/;
|
|
70
|
+
if (regExpID.test(urlOrID)) {
|
|
71
|
+
return urlOrID;
|
|
72
|
+
}
|
|
73
|
+
const regExpStandard = /youtube\.com\/watch\?v=([a-zA-Z0-9_-]+)/;
|
|
74
|
+
const regExpShorts = /youtube\.com\/shorts\/([a-zA-Z0-9_-]+)/;
|
|
75
|
+
const matchStandard = urlOrID.match(regExpStandard);
|
|
76
|
+
if (matchStandard) {
|
|
77
|
+
return matchStandard[1];
|
|
78
|
+
}
|
|
79
|
+
const matchShorts = urlOrID.match(regExpShorts);
|
|
80
|
+
if (matchShorts) {
|
|
81
|
+
return matchShorts[1];
|
|
82
|
+
}
|
|
83
|
+
return null;
|
|
84
|
+
}
|
|
85
|
+
const controller = ({ strapi }) => ({
|
|
86
|
+
async getTranscript(ctx) {
|
|
87
|
+
const videoId = extractYouTubeID(ctx.params.videoId);
|
|
88
|
+
if (!videoId) return ctx.body = { error: "Invalid YouTube URL or ID", data: null };
|
|
89
|
+
const found = await strapi.plugin("yt-transcript").service("service").findTranscript(videoId);
|
|
90
|
+
if (found) return ctx.body = { data: found };
|
|
91
|
+
const transcriptData = await strapi.plugin("yt-transcript").service("service").getTranscript(videoId);
|
|
92
|
+
const readableTranscript = await strapi.plugin("yt-transcript").service("service").generateHumanReadableTranscript(transcriptData.fullTranscript);
|
|
93
|
+
const payload = {
|
|
94
|
+
title: transcriptData.title,
|
|
95
|
+
transcript: transcriptData.transcript,
|
|
96
|
+
videoId: transcriptData.videoId,
|
|
97
|
+
thumbnailUrl: transcriptData.thumbnailUrl,
|
|
98
|
+
fullTranscript: transcriptData.fullTranscript,
|
|
99
|
+
transcriptWithTimeCodes: transcriptData.transcriptWithTimeCodes,
|
|
100
|
+
readableTranscript
|
|
101
|
+
};
|
|
102
|
+
console.log("Payload:", payload);
|
|
103
|
+
const transcript2 = await strapi.plugin("yt-transcript").service("service").saveTranscript(payload);
|
|
104
|
+
ctx.body = { data: transcript2 };
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
const controllers = {
|
|
108
|
+
controller
|
|
109
|
+
};
|
|
110
|
+
const middlewares = {};
|
|
111
|
+
const policies = {};
|
|
112
|
+
const contentApi = [
|
|
113
|
+
{
|
|
114
|
+
method: "GET",
|
|
115
|
+
path: "/yt-transcript/:videoId",
|
|
116
|
+
handler: "controller.getTranscript",
|
|
117
|
+
config: {
|
|
118
|
+
policies: []
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
];
|
|
122
|
+
const admin = [
|
|
123
|
+
{
|
|
124
|
+
method: "GET",
|
|
125
|
+
path: "/yt-transcript/:videoId",
|
|
126
|
+
handler: "controller.getTranscript",
|
|
127
|
+
config: {
|
|
128
|
+
policies: []
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
];
|
|
132
|
+
const routes = {
|
|
133
|
+
"content-api": {
|
|
134
|
+
type: "content-api",
|
|
135
|
+
routes: [...contentApi]
|
|
136
|
+
},
|
|
137
|
+
admin: {
|
|
138
|
+
type: "admin",
|
|
139
|
+
routes: [...admin]
|
|
140
|
+
}
|
|
141
|
+
};
|
|
142
|
+
class TextSplitter extends BaseDocumentTransformer {
|
|
143
|
+
constructor(fields) {
|
|
144
|
+
super(fields);
|
|
145
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
146
|
+
enumerable: true,
|
|
147
|
+
configurable: true,
|
|
148
|
+
writable: true,
|
|
149
|
+
value: ["langchain", "document_transformers", "text_splitters"]
|
|
150
|
+
});
|
|
151
|
+
Object.defineProperty(this, "chunkSize", {
|
|
152
|
+
enumerable: true,
|
|
153
|
+
configurable: true,
|
|
154
|
+
writable: true,
|
|
155
|
+
value: 1e3
|
|
156
|
+
});
|
|
157
|
+
Object.defineProperty(this, "chunkOverlap", {
|
|
158
|
+
enumerable: true,
|
|
159
|
+
configurable: true,
|
|
160
|
+
writable: true,
|
|
161
|
+
value: 200
|
|
162
|
+
});
|
|
163
|
+
Object.defineProperty(this, "keepSeparator", {
|
|
164
|
+
enumerable: true,
|
|
165
|
+
configurable: true,
|
|
166
|
+
writable: true,
|
|
167
|
+
value: false
|
|
168
|
+
});
|
|
169
|
+
Object.defineProperty(this, "lengthFunction", {
|
|
170
|
+
enumerable: true,
|
|
171
|
+
configurable: true,
|
|
172
|
+
writable: true,
|
|
173
|
+
value: void 0
|
|
174
|
+
});
|
|
175
|
+
this.chunkSize = fields?.chunkSize ?? this.chunkSize;
|
|
176
|
+
this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap;
|
|
177
|
+
this.keepSeparator = fields?.keepSeparator ?? this.keepSeparator;
|
|
178
|
+
this.lengthFunction = fields?.lengthFunction ?? ((text) => text.length);
|
|
179
|
+
if (this.chunkOverlap >= this.chunkSize) {
|
|
180
|
+
throw new Error("Cannot have chunkOverlap >= chunkSize");
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
async transformDocuments(documents, chunkHeaderOptions = {}) {
|
|
184
|
+
return this.splitDocuments(documents, chunkHeaderOptions);
|
|
185
|
+
}
|
|
186
|
+
splitOnSeparator(text, separator) {
|
|
187
|
+
let splits;
|
|
188
|
+
if (separator) {
|
|
189
|
+
if (this.keepSeparator) {
|
|
190
|
+
const regexEscapedSeparator = separator.replace(/[/\-\\^$*+?.()|[\]{}]/g, "\\$&");
|
|
191
|
+
splits = text.split(new RegExp(`(?=${regexEscapedSeparator})`));
|
|
192
|
+
} else {
|
|
193
|
+
splits = text.split(separator);
|
|
194
|
+
}
|
|
195
|
+
} else {
|
|
196
|
+
splits = text.split("");
|
|
197
|
+
}
|
|
198
|
+
return splits.filter((s) => s !== "");
|
|
199
|
+
}
|
|
200
|
+
async createDocuments(texts, metadatas = [], chunkHeaderOptions = {}) {
|
|
201
|
+
const _metadatas = metadatas.length > 0 ? metadatas : [...Array(texts.length)].map(() => ({}));
|
|
202
|
+
const { chunkHeader = "", chunkOverlapHeader = "(cont'd) ", appendChunkOverlapHeader = false } = chunkHeaderOptions;
|
|
203
|
+
const documents = new Array();
|
|
204
|
+
for (let i = 0; i < texts.length; i += 1) {
|
|
205
|
+
const text = texts[i];
|
|
206
|
+
let lineCounterIndex = 1;
|
|
207
|
+
let prevChunk = null;
|
|
208
|
+
let indexPrevChunk = -1;
|
|
209
|
+
for (const chunk of await this.splitText(text)) {
|
|
210
|
+
let pageContent = chunkHeader;
|
|
211
|
+
const indexChunk = text.indexOf(chunk, indexPrevChunk + 1);
|
|
212
|
+
if (prevChunk === null) {
|
|
213
|
+
const newLinesBeforeFirstChunk = this.numberOfNewLines(text, 0, indexChunk);
|
|
214
|
+
lineCounterIndex += newLinesBeforeFirstChunk;
|
|
215
|
+
} else {
|
|
216
|
+
const indexEndPrevChunk = indexPrevChunk + await this.lengthFunction(prevChunk);
|
|
217
|
+
if (indexEndPrevChunk < indexChunk) {
|
|
218
|
+
const numberOfIntermediateNewLines = this.numberOfNewLines(text, indexEndPrevChunk, indexChunk);
|
|
219
|
+
lineCounterIndex += numberOfIntermediateNewLines;
|
|
220
|
+
} else if (indexEndPrevChunk > indexChunk) {
|
|
221
|
+
const numberOfIntermediateNewLines = this.numberOfNewLines(text, indexChunk, indexEndPrevChunk);
|
|
222
|
+
lineCounterIndex -= numberOfIntermediateNewLines;
|
|
223
|
+
}
|
|
224
|
+
if (appendChunkOverlapHeader) {
|
|
225
|
+
pageContent += chunkOverlapHeader;
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
const newLinesCount = this.numberOfNewLines(chunk);
|
|
229
|
+
const loc = _metadatas[i].loc && typeof _metadatas[i].loc === "object" ? { ..._metadatas[i].loc } : {};
|
|
230
|
+
loc.lines = {
|
|
231
|
+
from: lineCounterIndex,
|
|
232
|
+
to: lineCounterIndex + newLinesCount
|
|
233
|
+
};
|
|
234
|
+
const metadataWithLinesNumber = {
|
|
235
|
+
..._metadatas[i],
|
|
236
|
+
loc
|
|
237
|
+
};
|
|
238
|
+
pageContent += chunk;
|
|
239
|
+
documents.push(new Document({
|
|
240
|
+
pageContent,
|
|
241
|
+
metadata: metadataWithLinesNumber
|
|
242
|
+
}));
|
|
243
|
+
lineCounterIndex += newLinesCount;
|
|
244
|
+
prevChunk = chunk;
|
|
245
|
+
indexPrevChunk = indexChunk;
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
return documents;
|
|
249
|
+
}
|
|
250
|
+
numberOfNewLines(text, start, end) {
|
|
251
|
+
const textSection = text.slice(start, end);
|
|
252
|
+
return (textSection.match(/\n/g) || []).length;
|
|
253
|
+
}
|
|
254
|
+
async splitDocuments(documents, chunkHeaderOptions = {}) {
|
|
255
|
+
const selectedDocuments = documents.filter((doc) => doc.pageContent !== void 0);
|
|
256
|
+
const texts = selectedDocuments.map((doc) => doc.pageContent);
|
|
257
|
+
const metadatas = selectedDocuments.map((doc) => doc.metadata);
|
|
258
|
+
return this.createDocuments(texts, metadatas, chunkHeaderOptions);
|
|
259
|
+
}
|
|
260
|
+
joinDocs(docs, separator) {
|
|
261
|
+
const text = docs.join(separator).trim();
|
|
262
|
+
return text === "" ? null : text;
|
|
263
|
+
}
|
|
264
|
+
async mergeSplits(splits, separator) {
|
|
265
|
+
const docs = [];
|
|
266
|
+
const currentDoc = [];
|
|
267
|
+
let total = 0;
|
|
268
|
+
for (const d of splits) {
|
|
269
|
+
const _len = await this.lengthFunction(d);
|
|
270
|
+
if (total + _len + currentDoc.length * separator.length > this.chunkSize) {
|
|
271
|
+
if (total > this.chunkSize) {
|
|
272
|
+
console.warn(`Created a chunk of size ${total}, +
|
|
273
|
+
which is longer than the specified ${this.chunkSize}`);
|
|
274
|
+
}
|
|
275
|
+
if (currentDoc.length > 0) {
|
|
276
|
+
const doc2 = this.joinDocs(currentDoc, separator);
|
|
277
|
+
if (doc2 !== null) {
|
|
278
|
+
docs.push(doc2);
|
|
279
|
+
}
|
|
280
|
+
while (total > this.chunkOverlap || total + _len + currentDoc.length * separator.length > this.chunkSize && total > 0) {
|
|
281
|
+
total -= await this.lengthFunction(currentDoc[0]);
|
|
282
|
+
currentDoc.shift();
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
currentDoc.push(d);
|
|
287
|
+
total += _len;
|
|
288
|
+
}
|
|
289
|
+
const doc = this.joinDocs(currentDoc, separator);
|
|
290
|
+
if (doc !== null) {
|
|
291
|
+
docs.push(doc);
|
|
292
|
+
}
|
|
293
|
+
return docs;
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
class TokenTextSplitter extends TextSplitter {
|
|
297
|
+
static lc_name() {
|
|
298
|
+
return "TokenTextSplitter";
|
|
299
|
+
}
|
|
300
|
+
constructor(fields) {
|
|
301
|
+
super(fields);
|
|
302
|
+
Object.defineProperty(this, "encodingName", {
|
|
303
|
+
enumerable: true,
|
|
304
|
+
configurable: true,
|
|
305
|
+
writable: true,
|
|
306
|
+
value: void 0
|
|
307
|
+
});
|
|
308
|
+
Object.defineProperty(this, "allowedSpecial", {
|
|
309
|
+
enumerable: true,
|
|
310
|
+
configurable: true,
|
|
311
|
+
writable: true,
|
|
312
|
+
value: void 0
|
|
313
|
+
});
|
|
314
|
+
Object.defineProperty(this, "disallowedSpecial", {
|
|
315
|
+
enumerable: true,
|
|
316
|
+
configurable: true,
|
|
317
|
+
writable: true,
|
|
318
|
+
value: void 0
|
|
319
|
+
});
|
|
320
|
+
Object.defineProperty(this, "tokenizer", {
|
|
321
|
+
enumerable: true,
|
|
322
|
+
configurable: true,
|
|
323
|
+
writable: true,
|
|
324
|
+
value: void 0
|
|
325
|
+
});
|
|
326
|
+
this.encodingName = fields?.encodingName ?? "gpt2";
|
|
327
|
+
this.allowedSpecial = fields?.allowedSpecial ?? [];
|
|
328
|
+
this.disallowedSpecial = fields?.disallowedSpecial ?? "all";
|
|
329
|
+
}
|
|
330
|
+
async splitText(text) {
|
|
331
|
+
if (!this.tokenizer) {
|
|
332
|
+
this.tokenizer = await getEncoding(this.encodingName);
|
|
333
|
+
}
|
|
334
|
+
const splits = [];
|
|
335
|
+
const input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);
|
|
336
|
+
let start_idx = 0;
|
|
337
|
+
while (start_idx < input_ids.length) {
|
|
338
|
+
if (start_idx > 0) {
|
|
339
|
+
start_idx -= this.chunkOverlap;
|
|
340
|
+
}
|
|
341
|
+
const end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);
|
|
342
|
+
const chunk_ids = input_ids.slice(start_idx, end_idx);
|
|
343
|
+
splits.push(this.tokenizer.decode(chunk_ids));
|
|
344
|
+
start_idx = end_idx;
|
|
345
|
+
}
|
|
346
|
+
return splits;
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
async function initializeModel({
|
|
350
|
+
openAIApiKey,
|
|
351
|
+
model,
|
|
352
|
+
temp
|
|
353
|
+
}) {
|
|
354
|
+
return new ChatOpenAI({
|
|
355
|
+
temperature: temp,
|
|
356
|
+
openAIApiKey,
|
|
357
|
+
modelName: model,
|
|
358
|
+
maxTokens: 1e3
|
|
359
|
+
});
|
|
360
|
+
}
|
|
361
|
+
const fetchTranscript = async (identifier) => {
|
|
362
|
+
const { Innertube } = await import("youtubei.js");
|
|
363
|
+
const youtube = await Innertube.create({
|
|
364
|
+
lang: "en",
|
|
365
|
+
location: "US",
|
|
366
|
+
retrieve_player: false
|
|
367
|
+
});
|
|
368
|
+
try {
|
|
369
|
+
let cleanImageUrl = function(url) {
|
|
370
|
+
return url.split("?")[0];
|
|
371
|
+
};
|
|
372
|
+
const info2 = await youtube.getInfo(identifier);
|
|
373
|
+
const transcriptData = await info2.getTranscript();
|
|
374
|
+
const transcriptWithTimeCodes = transcriptData?.transcript?.content?.body?.initial_segments.map(
|
|
375
|
+
(segment) => {
|
|
376
|
+
const segmentDuration = Number(segment.end_ms) - Number(segment.start_ms);
|
|
377
|
+
return {
|
|
378
|
+
text: segment.snippet.text,
|
|
379
|
+
start: Number(segment.start_ms),
|
|
380
|
+
end: Number(segment.end_ms),
|
|
381
|
+
duration: segmentDuration
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
);
|
|
385
|
+
const fullTranscript = transcriptData?.transcript?.content?.body?.initial_segments.map(
|
|
386
|
+
(segment) => segment.snippet.text
|
|
387
|
+
).join(" ");
|
|
388
|
+
const title = info2.basic_info.title;
|
|
389
|
+
const videoId = info2.basic_info.id;
|
|
390
|
+
const thumbnailUrl = cleanImageUrl(info2.basic_info.thumbnail[0].url);
|
|
391
|
+
return {
|
|
392
|
+
title,
|
|
393
|
+
videoId,
|
|
394
|
+
thumbnailUrl,
|
|
395
|
+
fullTranscript,
|
|
396
|
+
transcriptWithTimeCodes
|
|
397
|
+
};
|
|
398
|
+
} catch (error) {
|
|
399
|
+
console.error("Error fetching transcript:", error);
|
|
400
|
+
throw error;
|
|
401
|
+
}
|
|
402
|
+
};
|
|
403
|
+
async function processTextChunks(chunks, model) {
|
|
404
|
+
const punctuationPrompt = PromptTemplate.fromTemplate(
|
|
405
|
+
"Add proper punctuation and capitalization to the following text chunk:\n\n{chunk}"
|
|
406
|
+
);
|
|
407
|
+
const punctuationChain = punctuationPrompt.pipe(model);
|
|
408
|
+
const processedChunks = await Promise.all(
|
|
409
|
+
chunks.map(async (chunk) => {
|
|
410
|
+
const result = await punctuationChain.invoke({ chunk });
|
|
411
|
+
return result.content;
|
|
412
|
+
})
|
|
413
|
+
);
|
|
414
|
+
return processedChunks.join(" ");
|
|
415
|
+
}
|
|
416
|
+
async function generateModifiedTranscript(rawTranscript) {
|
|
417
|
+
const chatModel = await initializeModel({
|
|
418
|
+
openAIApiKey: process.env.OPEN_AI_KEY ?? "",
|
|
419
|
+
model: process.env.OPEN_AI_MODEL ?? "gpt-4o-mini",
|
|
420
|
+
temp: parseFloat(process.env.OPEN_AI_TEMPERATURE ?? "0.7")
|
|
421
|
+
});
|
|
422
|
+
const splitter = new TokenTextSplitter({
|
|
423
|
+
chunkSize: 1e3,
|
|
424
|
+
chunkOverlap: 200
|
|
425
|
+
});
|
|
426
|
+
const transcriptChunks = await splitter.createDocuments([rawTranscript]);
|
|
427
|
+
const chunkTexts = transcriptChunks.map((chunk) => chunk.pageContent);
|
|
428
|
+
const modifiedTranscript = await processTextChunks(chunkTexts, chatModel);
|
|
429
|
+
return modifiedTranscript;
|
|
430
|
+
}
|
|
431
|
+
const service = ({ strapi }) => ({
|
|
432
|
+
async getTranscript(identifier) {
|
|
433
|
+
const youtubeIdRegex = /^[a-zA-Z0-9_-]{11}$/;
|
|
434
|
+
const isValid = youtubeIdRegex.test(identifier);
|
|
435
|
+
if (!isValid) return { error: "Invalid video ID", data: null };
|
|
436
|
+
const transcriptData = await fetchTranscript(identifier);
|
|
437
|
+
return transcriptData;
|
|
438
|
+
},
|
|
439
|
+
async saveTranscript(payload) {
|
|
440
|
+
console.log("Saving transcript:", payload);
|
|
441
|
+
return await strapi.documents("plugin::yt-transcript.transcript").create({
|
|
442
|
+
data: payload
|
|
443
|
+
});
|
|
444
|
+
},
|
|
445
|
+
async findTranscript(videoId) {
|
|
446
|
+
console.log("Finding transcript for videoId:", videoId);
|
|
447
|
+
const transcriptData = await strapi.documents("plugin::yt-transcript.transcript").findFirst({
|
|
448
|
+
filters: { videoId }
|
|
449
|
+
});
|
|
450
|
+
console.log("Transcript found:", transcriptData?.title, "found");
|
|
451
|
+
if (!transcriptData) return null;
|
|
452
|
+
return transcriptData;
|
|
453
|
+
},
|
|
454
|
+
async generateHumanReadableTranscript(transcript2) {
|
|
455
|
+
console.log("Generating human readable transcript:", transcript2);
|
|
456
|
+
const modifiedTranscript = await generateModifiedTranscript(transcript2);
|
|
457
|
+
return modifiedTranscript;
|
|
458
|
+
}
|
|
459
|
+
});
|
|
460
|
+
const services = {
|
|
461
|
+
service
|
|
462
|
+
};
|
|
463
|
+
const index = {
|
|
464
|
+
register,
|
|
465
|
+
bootstrap,
|
|
466
|
+
destroy,
|
|
467
|
+
config,
|
|
468
|
+
controllers,
|
|
469
|
+
routes,
|
|
470
|
+
services,
|
|
471
|
+
contentTypes,
|
|
472
|
+
policies,
|
|
473
|
+
middlewares
|
|
474
|
+
};
|
|
475
|
+
export {
|
|
476
|
+
index as default
|
|
477
|
+
};
|
|
478
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","sources":["../../server/src/bootstrap.ts","../../server/src/destroy.ts","../../server/src/register.ts","../../server/src/config/index.ts","../../server/src/content-types/transcript/index.ts","../../server/src/content-types/index.ts","../../server/src/utils/extract-youtube-id.ts","../../server/src/controllers/controller.ts","../../server/src/controllers/index.ts","../../server/src/middlewares/index.ts","../../server/src/policies/index.ts","../../server/src/routes/content-api.ts","../../server/src/routes/admin.ts","../../server/src/routes/index.ts","../../node_modules/@langchain/textsplitters/dist/text_splitter.js","../../server/src/utils/openai.ts","../../server/src/utils/fetch-transcript.ts","../../server/src/services/service.ts","../../server/src/services/index.ts","../../server/src/index.ts"],"sourcesContent":["import type { Core } from '@strapi/strapi';\n\nconst bootstrap = ({ strapi }: { strapi: Core.Strapi }) => {\n // bootstrap phase\n};\n\nexport default bootstrap;\n","import type { Core } from '@strapi/strapi';\n\nconst destroy = ({ strapi }: { strapi: Core.Strapi }) => {\n // destroy phase\n};\n\nexport default destroy;\n","import type { Core } from '@strapi/strapi';\n\nconst register = ({ strapi }: { strapi: Core.Strapi }) => {\n // register phase\n};\n\nexport default register;\n","export default {\n default: {},\n validator() {},\n};\n","import schema from './schema.json';\n\nexport default {\n schema,\n};","import transcript from './transcript';\n\nexport default {\n transcript,\n};\n\n\n","export function extractYouTubeID(urlOrID: string): string | null {\n // Regular expression for YouTube ID format\n const regExpID = /^[a-zA-Z0-9_-]{11}$/;\n\n // Check if the input is a YouTube ID\n if (regExpID.test(urlOrID)) {\n return urlOrID;\n }\n\n // Regular expression for standard YouTube links\n const regExpStandard = /youtube\\.com\\/watch\\?v=([a-zA-Z0-9_-]+)/;\n\n // Regular expression for YouTube Shorts links\n const regExpShorts = /youtube\\.com\\/shorts\\/([a-zA-Z0-9_-]+)/;\n\n // Check for standard YouTube link\n const matchStandard = urlOrID.match(regExpStandard);\n if (matchStandard) {\n return matchStandard[1];\n }\n\n // Check for YouTube Shorts link\n const matchShorts = urlOrID.match(regExpShorts);\n if (matchShorts) {\n return matchShorts[1];\n }\n\n // Return null if no match is found\n return null;\n}","import type { Core } from '@strapi/strapi';\nimport { extractYouTubeID } from '../utils/extract-youtube-id';\nconst controller = ({ strapi }: { strapi: Core.Strapi }) => ({\n async getTranscript(ctx) {\n const videoId = extractYouTubeID(ctx.params.videoId);\n\n if (!videoId) return (ctx.body = { error: 'Invalid YouTube URL or ID', data: null });\n\n const found = await strapi\n .plugin('yt-transcript')\n .service('service')\n .findTranscript(videoId);\n\n if (found) return (ctx.body = { data: found });\n\n const transcriptData = await strapi\n .plugin('yt-transcript')\n .service('service')\n .getTranscript(videoId);\n\n const readableTranscript = await strapi\n .plugin('yt-transcript')\n .service('service')\n .generateHumanReadableTranscript(transcriptData.fullTranscript);\n\n const payload = {\n title: transcriptData.title,\n transcript: transcriptData.transcript,\n videoId: transcriptData.videoId,\n thumbnailUrl: transcriptData.thumbnailUrl,\n fullTranscript: transcriptData.fullTranscript,\n transcriptWithTimeCodes: transcriptData.transcriptWithTimeCodes,\n readableTranscript: readableTranscript,\n };\n\n console.log('Payload:', payload);\n\n const transcript = await strapi\n .plugin('yt-transcript')\n .service('service')\n .saveTranscript(payload);\n\n ctx.body = { data: transcript };\n },\n});\n\nexport default controller;\n","import controller from './controller';\n\nexport default {\n controller,\n};\n","export default {};\n","export default {};\n","export default [\n {\n method: 'GET',\n path: '/yt-transcript/:videoId',\n handler: 'controller.getTranscript',\n config: { \n policies: [], \n }, \n },\n];","export default [\n {\n method: 'GET',\n path: '/yt-transcript/:videoId',\n handler: 'controller.getTranscript',\n config: { \n policies: [], \n }, \n },\n];","\"use strict\";\n\nimport contentApi from \"./content-api\";\nimport admin from \"./admin\";\n\nexport default {\n \"content-api\": {\n type: \"content-api\",\n routes: [...contentApi],\n },\n admin: {\n type: \"admin\",\n routes: [...admin],\n },\n};","import { Document, BaseDocumentTransformer } from \"@langchain/core/documents\";\nimport { getEncoding } from \"@langchain/core/utils/tiktoken\";\nexport class TextSplitter extends BaseDocumentTransformer {\n constructor(fields) {\n super(fields);\n Object.defineProperty(this, \"lc_namespace\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: [\"langchain\", \"document_transformers\", \"text_splitters\"]\n });\n Object.defineProperty(this, \"chunkSize\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: 1000\n });\n Object.defineProperty(this, \"chunkOverlap\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: 200\n });\n Object.defineProperty(this, \"keepSeparator\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: false\n });\n Object.defineProperty(this, \"lengthFunction\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n this.chunkSize = fields?.chunkSize ?? this.chunkSize;\n this.chunkOverlap = fields?.chunkOverlap ?? this.chunkOverlap;\n this.keepSeparator = fields?.keepSeparator ?? this.keepSeparator;\n this.lengthFunction =\n fields?.lengthFunction ?? ((text) => text.length);\n if (this.chunkOverlap >= this.chunkSize) {\n throw new Error(\"Cannot have chunkOverlap >= chunkSize\");\n }\n }\n async transformDocuments(documents, chunkHeaderOptions = {}) {\n return this.splitDocuments(documents, chunkHeaderOptions);\n }\n splitOnSeparator(text, separator) {\n let splits;\n if (separator) {\n if (this.keepSeparator) {\n const regexEscapedSeparator = separator.replace(/[/\\-\\\\^$*+?.()|[\\]{}]/g, \"\\\\$&\");\n splits = text.split(new RegExp(`(?=${regexEscapedSeparator})`));\n }\n else {\n splits = text.split(separator);\n }\n }\n else {\n splits = text.split(\"\");\n }\n return splits.filter((s) => s !== \"\");\n }\n async createDocuments(texts, \n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n metadatas = [], chunkHeaderOptions = {}) {\n // if no metadata is provided, we create an empty one for each text\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const _metadatas = metadatas.length > 0\n ? metadatas\n : [...Array(texts.length)].map(() => ({}));\n const { chunkHeader = \"\", chunkOverlapHeader = \"(cont'd) \", appendChunkOverlapHeader = false, } = chunkHeaderOptions;\n const documents = new Array();\n for (let i = 0; i < texts.length; i += 1) {\n const text = texts[i];\n let lineCounterIndex = 1;\n let prevChunk = null;\n let indexPrevChunk = -1;\n for (const chunk of await this.splitText(text)) {\n let pageContent = chunkHeader;\n // we need to count the \\n that are in the text before getting removed by the splitting\n const indexChunk = text.indexOf(chunk, indexPrevChunk + 1);\n if (prevChunk === null) {\n const newLinesBeforeFirstChunk = this.numberOfNewLines(text, 0, indexChunk);\n lineCounterIndex += newLinesBeforeFirstChunk;\n }\n else {\n const indexEndPrevChunk = indexPrevChunk + (await this.lengthFunction(prevChunk));\n if (indexEndPrevChunk < indexChunk) {\n const numberOfIntermediateNewLines = this.numberOfNewLines(text, indexEndPrevChunk, indexChunk);\n lineCounterIndex += numberOfIntermediateNewLines;\n }\n else if (indexEndPrevChunk > indexChunk) {\n const numberOfIntermediateNewLines = this.numberOfNewLines(text, indexChunk, indexEndPrevChunk);\n lineCounterIndex -= numberOfIntermediateNewLines;\n }\n if (appendChunkOverlapHeader) {\n pageContent += chunkOverlapHeader;\n }\n }\n const newLinesCount = this.numberOfNewLines(chunk);\n const loc = _metadatas[i].loc && typeof _metadatas[i].loc === \"object\"\n ? { ..._metadatas[i].loc }\n : {};\n loc.lines = {\n from: lineCounterIndex,\n to: lineCounterIndex + newLinesCount,\n };\n const metadataWithLinesNumber = {\n ..._metadatas[i],\n loc,\n };\n pageContent += chunk;\n documents.push(new Document({\n pageContent,\n metadata: metadataWithLinesNumber,\n }));\n lineCounterIndex += newLinesCount;\n prevChunk = chunk;\n indexPrevChunk = indexChunk;\n }\n }\n return documents;\n }\n numberOfNewLines(text, start, end) {\n const textSection = text.slice(start, end);\n return (textSection.match(/\\n/g) || []).length;\n }\n async splitDocuments(documents, chunkHeaderOptions = {}) {\n const selectedDocuments = documents.filter((doc) => doc.pageContent !== undefined);\n const texts = selectedDocuments.map((doc) => doc.pageContent);\n const metadatas = selectedDocuments.map((doc) => doc.metadata);\n return this.createDocuments(texts, metadatas, chunkHeaderOptions);\n }\n joinDocs(docs, separator) {\n const text = docs.join(separator).trim();\n return text === \"\" ? null : text;\n }\n async mergeSplits(splits, separator) {\n const docs = [];\n const currentDoc = [];\n let total = 0;\n for (const d of splits) {\n const _len = await this.lengthFunction(d);\n if (total + _len + currentDoc.length * separator.length >\n this.chunkSize) {\n if (total > this.chunkSize) {\n console.warn(`Created a chunk of size ${total}, +\nwhich is longer than the specified ${this.chunkSize}`);\n }\n if (currentDoc.length > 0) {\n const doc = this.joinDocs(currentDoc, separator);\n if (doc !== null) {\n docs.push(doc);\n }\n // Keep on popping if:\n // - we have a larger chunk than in the chunk overlap\n // - or if we still have any chunks and the length is long\n while (total > this.chunkOverlap ||\n (total + _len + currentDoc.length * separator.length >\n this.chunkSize &&\n total > 0)) {\n total -= await this.lengthFunction(currentDoc[0]);\n currentDoc.shift();\n }\n }\n }\n currentDoc.push(d);\n total += _len;\n }\n const doc = this.joinDocs(currentDoc, separator);\n if (doc !== null) {\n docs.push(doc);\n }\n return docs;\n }\n}\nexport class CharacterTextSplitter extends TextSplitter {\n static lc_name() {\n return \"CharacterTextSplitter\";\n }\n constructor(fields) {\n super(fields);\n Object.defineProperty(this, \"separator\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: \"\\n\\n\"\n });\n this.separator = fields?.separator ?? this.separator;\n }\n async splitText(text) {\n // First we naively split the large input into a bunch of smaller ones.\n const splits = this.splitOnSeparator(text, this.separator);\n return this.mergeSplits(splits, this.keepSeparator ? \"\" : this.separator);\n }\n}\nexport const SupportedTextSplitterLanguages = [\n \"cpp\",\n \"go\",\n \"java\",\n \"js\",\n \"php\",\n \"proto\",\n \"python\",\n \"rst\",\n \"ruby\",\n \"rust\",\n \"scala\",\n \"swift\",\n \"markdown\",\n \"latex\",\n \"html\",\n \"sol\",\n];\nexport class RecursiveCharacterTextSplitter extends TextSplitter {\n static lc_name() {\n return \"RecursiveCharacterTextSplitter\";\n }\n constructor(fields) {\n super(fields);\n Object.defineProperty(this, \"separators\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: [\"\\n\\n\", \"\\n\", \" \", \"\"]\n });\n this.separators = fields?.separators ?? this.separators;\n this.keepSeparator = fields?.keepSeparator ?? true;\n }\n async _splitText(text, separators) {\n const finalChunks = [];\n // Get appropriate separator to use\n let separator = separators[separators.length - 1];\n let newSeparators;\n for (let i = 0; i < separators.length; i += 1) {\n const s = separators[i];\n if (s === \"\") {\n separator = s;\n break;\n }\n if (text.includes(s)) {\n separator = s;\n newSeparators = separators.slice(i + 1);\n break;\n }\n }\n // Now that we have the separator, split the text\n const splits = this.splitOnSeparator(text, separator);\n // Now go merging things, recursively splitting longer texts.\n let goodSplits = [];\n const _separator = this.keepSeparator ? \"\" : separator;\n for (const s of splits) {\n if ((await this.lengthFunction(s)) < this.chunkSize) {\n goodSplits.push(s);\n }\n else {\n if (goodSplits.length) {\n const mergedText = await this.mergeSplits(goodSplits, _separator);\n finalChunks.push(...mergedText);\n goodSplits = [];\n }\n if (!newSeparators) {\n finalChunks.push(s);\n }\n else {\n const otherInfo = await this._splitText(s, newSeparators);\n finalChunks.push(...otherInfo);\n }\n }\n }\n if (goodSplits.length) {\n const mergedText = await this.mergeSplits(goodSplits, _separator);\n finalChunks.push(...mergedText);\n }\n return finalChunks;\n }\n async splitText(text) {\n return this._splitText(text, this.separators);\n }\n static fromLanguage(language, options) {\n return new RecursiveCharacterTextSplitter({\n ...options,\n separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(language),\n });\n }\n static getSeparatorsForLanguage(language) {\n if (language === \"cpp\") {\n return [\n // Split along class definitions\n \"\\nclass \",\n // Split along function definitions\n \"\\nvoid \",\n \"\\nint \",\n \"\\nfloat \",\n \"\\ndouble \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\nswitch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"go\") {\n return [\n // Split along function definitions\n \"\\nfunc \",\n \"\\nvar \",\n \"\\nconst \",\n \"\\ntype \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nswitch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"java\") {\n return [\n // Split along class definitions\n \"\\nclass \",\n // Split along method definitions\n \"\\npublic \",\n \"\\nprotected \",\n \"\\nprivate \",\n \"\\nstatic \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\nswitch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"js\") {\n return [\n // Split along function definitions\n \"\\nfunction \",\n \"\\nconst \",\n \"\\nlet \",\n \"\\nvar \",\n \"\\nclass \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\nswitch \",\n \"\\ncase \",\n \"\\ndefault \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"php\") {\n return [\n // Split along function definitions\n \"\\nfunction \",\n // Split along class definitions\n \"\\nclass \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nforeach \",\n \"\\nwhile \",\n \"\\ndo \",\n \"\\nswitch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"proto\") {\n return [\n // Split along message definitions\n \"\\nmessage \",\n // Split along service definitions\n \"\\nservice \",\n // Split along enum definitions\n \"\\nenum \",\n // Split along option definitions\n \"\\noption \",\n // Split along import statements\n \"\\nimport \",\n // Split along syntax declarations\n \"\\nsyntax \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"python\") {\n return [\n // First, try to split along class definitions\n \"\\nclass \",\n \"\\ndef \",\n \"\\n\\tdef \",\n // Now split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"rst\") {\n return [\n // Split along section titles\n \"\\n===\\n\",\n \"\\n---\\n\",\n \"\\n***\\n\",\n // Split along directive markers\n \"\\n.. \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"ruby\") {\n return [\n // Split along method definitions\n \"\\ndef \",\n \"\\nclass \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nunless \",\n \"\\nwhile \",\n \"\\nfor \",\n \"\\ndo \",\n \"\\nbegin \",\n \"\\nrescue \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"rust\") {\n return [\n // Split along function definitions\n \"\\nfn \",\n \"\\nconst \",\n \"\\nlet \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nwhile \",\n \"\\nfor \",\n \"\\nloop \",\n \"\\nmatch \",\n \"\\nconst \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"scala\") {\n return [\n // Split along class definitions\n \"\\nclass \",\n \"\\nobject \",\n // Split along method definitions\n \"\\ndef \",\n \"\\nval \",\n \"\\nvar \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\nmatch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"swift\") {\n return [\n // Split along function definitions\n \"\\nfunc \",\n // Split along class definitions\n \"\\nclass \",\n \"\\nstruct \",\n \"\\nenum \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\ndo \",\n \"\\nswitch \",\n \"\\ncase \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"markdown\") {\n return [\n // First, try to split along Markdown headings (starting with level 2)\n \"\\n## \",\n \"\\n### \",\n \"\\n#### \",\n \"\\n##### \",\n \"\\n###### \",\n // Note the alternative syntax for headings (below) is not handled here\n // Heading level 2\n // ---------------\n // End of code block\n \"```\\n\\n\",\n // Horizontal lines\n \"\\n\\n***\\n\\n\",\n \"\\n\\n---\\n\\n\",\n \"\\n\\n___\\n\\n\",\n // Note that this splitter doesn't handle horizontal lines defined\n // by *three or more* of ***, ---, or ___, but this is not handled\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"latex\") {\n return [\n // First, try to split along Latex sections\n \"\\n\\\\chapter{\",\n \"\\n\\\\section{\",\n \"\\n\\\\subsection{\",\n \"\\n\\\\subsubsection{\",\n // Now split by environments\n \"\\n\\\\begin{enumerate}\",\n \"\\n\\\\begin{itemize}\",\n \"\\n\\\\begin{description}\",\n \"\\n\\\\begin{list}\",\n \"\\n\\\\begin{quote}\",\n \"\\n\\\\begin{quotation}\",\n \"\\n\\\\begin{verse}\",\n \"\\n\\\\begin{verbatim}\",\n // Now split by math environments\n \"\\n\\\\begin{align}\",\n \"$$\",\n \"$\",\n // Now split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else if (language === \"html\") {\n return [\n // First, try to split along HTML tags\n \"<body>\",\n \"<div>\",\n \"<p>\",\n \"<br>\",\n \"<li>\",\n \"<h1>\",\n \"<h2>\",\n \"<h3>\",\n \"<h4>\",\n \"<h5>\",\n \"<h6>\",\n \"<span>\",\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n \"<th>\",\n \"<ul>\",\n \"<ol>\",\n \"<header>\",\n \"<footer>\",\n \"<nav>\",\n // Head\n \"<head>\",\n \"<style>\",\n \"<script>\",\n \"<meta>\",\n \"<title>\",\n // Normal type of lines\n \" \",\n \"\",\n ];\n }\n else if (language === \"sol\") {\n return [\n // Split along compiler informations definitions\n \"\\npragma \",\n \"\\nusing \",\n // Split along contract definitions\n \"\\ncontract \",\n \"\\ninterface \",\n \"\\nlibrary \",\n // Split along method definitions\n \"\\nconstructor \",\n \"\\ntype \",\n \"\\nfunction \",\n \"\\nevent \",\n \"\\nmodifier \",\n \"\\nerror \",\n \"\\nstruct \",\n \"\\nenum \",\n // Split along control flow statements\n \"\\nif \",\n \"\\nfor \",\n \"\\nwhile \",\n \"\\ndo while \",\n \"\\nassembly \",\n // Split by the normal type of lines\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\",\n ];\n }\n else {\n throw new Error(`Language ${language} is not supported.`);\n }\n }\n}\n/**\n * Implementation of splitter which looks at tokens.\n */\nexport class TokenTextSplitter extends TextSplitter {\n static lc_name() {\n return \"TokenTextSplitter\";\n }\n constructor(fields) {\n super(fields);\n Object.defineProperty(this, \"encodingName\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"allowedSpecial\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"disallowedSpecial\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"tokenizer\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n this.encodingName = fields?.encodingName ?? \"gpt2\";\n this.allowedSpecial = fields?.allowedSpecial ?? [];\n this.disallowedSpecial = fields?.disallowedSpecial ?? \"all\";\n }\n async splitText(text) {\n if (!this.tokenizer) {\n this.tokenizer = await getEncoding(this.encodingName);\n }\n const splits = [];\n const input_ids = this.tokenizer.encode(text, this.allowedSpecial, this.disallowedSpecial);\n let start_idx = 0;\n while (start_idx < input_ids.length) {\n if (start_idx > 0) {\n start_idx -= this.chunkOverlap;\n }\n const end_idx = Math.min(start_idx + this.chunkSize, input_ids.length);\n const chunk_ids = input_ids.slice(start_idx, end_idx);\n splits.push(this.tokenizer.decode(chunk_ids));\n start_idx = end_idx;\n }\n return splits;\n }\n}\nexport class MarkdownTextSplitter extends RecursiveCharacterTextSplitter {\n constructor(fields) {\n super({\n ...fields,\n separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\"markdown\"),\n });\n }\n}\nexport class LatexTextSplitter extends RecursiveCharacterTextSplitter {\n constructor(fields) {\n super({\n ...fields,\n separators: RecursiveCharacterTextSplitter.getSeparatorsForLanguage(\"latex\"),\n });\n }\n}\n","import { ChatOpenAI } from \"@langchain/openai\";\n\ninterface InitializeModelProps {\n openAIApiKey: string;\n model: string;\n temp: number;\n maxTokens?: number;\n}\n\nexport async function initializeModel({\n openAIApiKey,\n model,\n temp,\n}: InitializeModelProps) {\n return new ChatOpenAI({\n temperature: temp,\n openAIApiKey: openAIApiKey,\n modelName: model,\n maxTokens: 1000,\n });\n}","export interface TranscriptSegment {\n text: string;\n start: number;\n end: number;\n duration: number;\n}\n\nexport interface TranscriptData {\n title: string;\n videoId: string;\n thumbnailUrl: string;\n fullTranscript: string;\n transcriptWithTimeCodes: TranscriptSegment[];\n}\n\nconst fetchTranscript = async (\n identifier: string\n): Promise<TranscriptData> => {\n const { Innertube } = await import('youtubei.js');\n\n const youtube = await Innertube.create({\n lang: 'en',\n location: 'US',\n retrieve_player: false,\n });\n\n try {\n const info = await youtube.getInfo(identifier);\n const transcriptData = await info.getTranscript();\n\n const transcriptWithTimeCodes: TranscriptSegment[] = transcriptData?.transcript?.content?.body?.initial_segments.map(\n (segment) => {\n const segmentDuration = Number(segment.end_ms) - Number(segment.start_ms);\n return {\n text: segment.snippet.text,\n start: Number(segment.start_ms),\n end: Number(segment.end_ms),\n duration: segmentDuration,\n };\n }\n );\n\n function cleanImageUrl(url) {\n return url.split('?')[0];\n }\n\n const fullTranscript = transcriptData?.transcript?.content?.body?.initial_segments.map(\n (segment) => segment.snippet.text\n ).join(' ');\n\n const title = info.basic_info.title;\n const videoId = info.basic_info.id;\n const thumbnailUrl = cleanImageUrl(info.basic_info.thumbnail[0].url);\n\n return {\n title,\n videoId,\n thumbnailUrl,\n fullTranscript,\n transcriptWithTimeCodes,\n };\n } catch (error) {\n console.error('Error fetching transcript:', error);\n throw error;\n }\n};\n\nexport default fetchTranscript;","import type { Core } from '@strapi/strapi';\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { TokenTextSplitter } from \"@langchain/textsplitters\";\nimport { PromptTemplate } from \"@langchain/core/prompts\";\n\nimport { initializeModel } from \"../utils/openai\";\nimport fetchTranscript from '../utils/fetch-transcript';\n\n\nasync function processTextChunks(chunks: string[], model: ChatOpenAI) {\n const punctuationPrompt = PromptTemplate.fromTemplate(\n \"Add proper punctuation and capitalization to the following text chunk:\\n\\n{chunk}\"\n );\n const punctuationChain = punctuationPrompt.pipe(model);\n\n const processedChunks = await Promise.all(\n chunks.map(async (chunk) => {\n const result = await punctuationChain.invoke({ chunk });\n return result.content as string;\n })\n );\n\n return processedChunks.join(\" \");\n}\n\nexport async function generateModifiedTranscript (rawTranscript: string) {\n const chatModel = await initializeModel({\n openAIApiKey: process.env.OPEN_AI_KEY ?? \"\",\n model: process.env.OPEN_AI_MODEL ?? \"gpt-4o-mini\",\n temp: parseFloat(process.env.OPEN_AI_TEMPERATURE ?? \"0.7\"),\n });\n\n const splitter = new TokenTextSplitter({\n chunkSize: 1000,\n chunkOverlap: 200,\n });\n\n const transcriptChunks = await splitter.createDocuments([rawTranscript]);\n const chunkTexts = transcriptChunks.map(chunk => chunk.pageContent);\n const modifiedTranscript = await processTextChunks(chunkTexts, chatModel);\n\n return modifiedTranscript;\n}\n\nconst service = ({ strapi }: { strapi: Core.Strapi }) => ({\n async getTranscript(identifier: string) {\n const youtubeIdRegex = /^[a-zA-Z0-9_-]{11}$/;\n const isValid = youtubeIdRegex.test(identifier);\n if (!isValid) return { error: 'Invalid video ID', data: null };\n const transcriptData = await fetchTranscript(identifier);\n return transcriptData;\n },\n\n async saveTranscript(payload) {\n console.log('Saving transcript:', payload);\n return await strapi.documents('plugin::yt-transcript.transcript').create({\n data: payload,\n });\n },\n\n async findTranscript(videoId) {\n console.log('Finding transcript for videoId:', videoId);\n const transcriptData = await strapi.documents('plugin::yt-transcript.transcript').findFirst({\n filters: { videoId },\n });\n\n console.log('Transcript found:', transcriptData?.title, 'found');\n\n if (!transcriptData) return null;\n return transcriptData;\n },\n\n async generateHumanReadableTranscript(transcript) {\n console.log('Generating human readable transcript:', transcript);\n const modifiedTranscript = await generateModifiedTranscript(transcript);\n return modifiedTranscript;\n },\n});\n\nexport default service;\n","import service from './service';\n\nexport default {\n service,\n};\n","/**\n * Application methods\n */\nimport bootstrap from './bootstrap';\nimport destroy from './destroy';\nimport register from './register';\n\n/**\n * Plugin server methods\n */\nimport config from './config';\nimport contentTypes from './content-types';\nimport controllers from './controllers';\nimport middlewares from './middlewares';\nimport policies from './policies';\nimport routes from './routes';\nimport services from './services';\n\nexport default {\n register,\n bootstrap,\n destroy,\n config,\n controllers,\n routes,\n services,\n contentTypes,\n policies,\n middlewares,\n};\n"],"names":["transcript","doc","info"],"mappings":";;;;AAEA,MAAM,YAAY,CAAC,EAAE,aAAsC;AAE3D;ACFA,MAAM,UAAU,CAAC,EAAE,aAAsC;AAEzD;ACFA,MAAM,WAAW,CAAC,EAAE,aAAsC;AAE1D;ACJA,MAAe,SAAA;AAAA,EACb,SAAS,CAAC;AAAA,EACV,YAAY;AAAA,EAAA;AACd;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ACDA,MAAe,aAAA;AAAA,EACb;AACF;ACFA,MAAe,eAAA;AAAA,EACb;AACF;ACJO,SAAS,iBAAiB,SAAgC;AAE/D,QAAM,WAAW;AAGb,MAAA,SAAS,KAAK,OAAO,GAAG;AACnB,WAAA;AAAA,EAAA;AAIT,QAAM,iBAAiB;AAGvB,QAAM,eAAe;AAGf,QAAA,gBAAgB,QAAQ,MAAM,cAAc;AAClD,MAAI,eAAe;AACjB,WAAO,cAAc,CAAC;AAAA,EAAA;AAIlB,QAAA,cAAc,QAAQ,MAAM,YAAY;AAC9C,MAAI,aAAa;AACf,WAAO,YAAY,CAAC;AAAA,EAAA;AAIf,SAAA;AACT;AC3BA,MAAM,aAAa,CAAC,EAAE,cAAuC;AAAA,EAC3D,MAAM,cAAc,KAAK;AACvB,UAAM,UAAU,iBAAiB,IAAI,OAAO,OAAO;AAE/C,QAAA,CAAC,QAAiB,QAAA,IAAI,OAAO,EAAE,OAAO,6BAA6B,MAAM,KAAK;AAE5E,UAAA,QAAQ,MAAM,OACjB,OAAO,eAAe,EACtB,QAAQ,SAAS,EACjB,eAAe,OAAO;AAEzB,QAAI,MAAe,QAAA,IAAI,OAAO,EAAE,MAAM,MAAM;AAEtC,UAAA,iBAAiB,MAAM,OAC1B,OAAO,eAAe,EACtB,QAAQ,SAAS,EACjB,cAAc,OAAO;AAElB,UAAA,qBAAqB,MAAM,OAC9B,OAAO,eAAe,EACtB,QAAQ,SAAS,EACjB,gCAAgC,eAAe,cAAc;AAEhE,UAAM,UAAU;AAAA,MACd,OAAO,eAAe;AAAA,MACtB,YAAY,eAAe;AAAA,MAC3B,SAAS,eAAe;AAAA,MACxB,cAAc,eAAe;AAAA,MAC7B,gBAAgB,eAAe;AAAA,MAC/B,yBAAyB,eAAe;AAAA,MACxC;AAAA,IACF;AAEQ,YAAA,IAAI,YAAY,OAAO;AAEzB,UAAAA,cAAa,MAAM,OACtB,OAAO,eAAe,EACtB,QAAQ,SAAS,EACjB,eAAe,OAAO;AAErB,QAAA,OAAO,EAAE,MAAMA,YAAW;AAAA,EAAA;AAElC;AC1CA,MAAe,cAAA;AAAA,EACb;AACF;ACJA,MAAA,cAAe,CAAC;ACAhB,MAAA,WAAe,CAAC;ACAhB,MAAe,aAAA;AAAA,EACb;AAAA,IACE,QAAQ;AAAA,IACR,MAAM;AAAA,IACN,SAAS;AAAA,IACT,QAAQ;AAAA,MACN,UAAU,CAAA;AAAA,IAAC;AAAA,EACb;AAEJ;ACTA,MAAe,QAAA;AAAA,EACb;AAAA,IACE,QAAQ;AAAA,IACR,MAAM;AAAA,IACN,SAAS;AAAA,IACT,QAAQ;AAAA,MACN,UAAU,CAAA;AAAA,IAAC;AAAA,EACb;AAEJ;ACJA,MAAe,SAAA;AAAA,EACb,eAAe;AAAA,IACb,MAAM;AAAA,IACN,QAAQ,CAAC,GAAG,UAAU;AAAA,EACxB;AAAA,EACA,OAAO;AAAA,IACL,MAAM;AAAA,IACN,QAAQ,CAAC,GAAG,KAAK;AAAA,EAAA;AAErB;ACZO,MAAM,qBAAqB,wBAAwB;AAAA,EACtD,YAAY,QAAQ;AAChB,UAAM,MAAM;AACZ,WAAO,eAAe,MAAM,gBAAgB;AAAA,MACxC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO,CAAC,aAAa,yBAAyB,gBAAgB;AAAA,IAC1E,CAAS;AACD,WAAO,eAAe,MAAM,aAAa;AAAA,MACrC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,gBAAgB;AAAA,MACxC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,iBAAiB;AAAA,MACzC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,kBAAkB;AAAA,MAC1C,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,SAAK,YAAY,QAAQ,aAAa,KAAK;AAC3C,SAAK,eAAe,QAAQ,gBAAgB,KAAK;AACjD,SAAK,gBAAgB,QAAQ,iBAAiB,KAAK;AACnD,SAAK,iBACD,QAAQ,mBAAmB,CAAC,SAAS,KAAK;AAC9C,QAAI,KAAK,gBAAgB,KAAK,WAAW;AACrC,YAAM,IAAI,MAAM,uCAAuC;AAAA,IACnE;AAAA,EACA;AAAA,EACI,MAAM,mBAAmB,WAAW,qBAAqB,IAAI;AACzD,WAAO,KAAK,eAAe,WAAW,kBAAkB;AAAA,EAChE;AAAA,EACI,iBAAiB,MAAM,WAAW;AAC9B,QAAI;AACJ,QAAI,WAAW;AACX,UAAI,KAAK,eAAe;AACpB,cAAM,wBAAwB,UAAU,QAAQ,0BAA0B,MAAM;AAChF,iBAAS,KAAK,MAAM,IAAI,OAAO,MAAM,qBAAqB,GAAG,CAAC;AAAA,MAC9E,OACiB;AACD,iBAAS,KAAK,MAAM,SAAS;AAAA,MAC7C;AAAA,IACA,OACa;AACD,eAAS,KAAK,MAAM,EAAE;AAAA,IAClC;AACQ,WAAO,OAAO,OAAO,CAAC,MAAM,MAAM,EAAE;AAAA,EAC5C;AAAA,EACI,MAAM,gBAAgB,OAEtB,YAAY,CAAA,GAAI,qBAAqB,IAAI;AAGrC,UAAM,aAAa,UAAU,SAAS,IAChC,YACA,CAAC,GAAG,MAAM,MAAM,MAAM,CAAC,EAAE,IAAI,OAAO,CAAA,EAAG;AAC7C,UAAM,EAAE,cAAc,IAAI,qBAAqB,aAAa,2BAA2B,MAAK,IAAM;AAClG,UAAM,YAAY,IAAI,MAAO;AAC7B,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,GAAG;AACtC,YAAM,OAAO,MAAM,CAAC;AACpB,UAAI,mBAAmB;AACvB,UAAI,YAAY;AAChB,UAAI,iBAAiB;AACrB,iBAAW,SAAS,MAAM,KAAK,UAAU,IAAI,GAAG;AAC5C,YAAI,cAAc;AAElB,cAAM,aAAa,KAAK,QAAQ,OAAO,iBAAiB,CAAC;AACzD,YAAI,cAAc,MAAM;AACpB,gBAAM,2BAA2B,KAAK,iBAAiB,MAAM,GAAG,UAAU;AAC1E,8BAAoB;AAAA,QACxC,OACqB;AACD,gBAAM,oBAAoB,iBAAkB,MAAM,KAAK,eAAe,SAAS;AAC/E,cAAI,oBAAoB,YAAY;AAChC,kBAAM,+BAA+B,KAAK,iBAAiB,MAAM,mBAAmB,UAAU;AAC9F,gCAAoB;AAAA,UAC5C,WAC6B,oBAAoB,YAAY;AACrC,kBAAM,+BAA+B,KAAK,iBAAiB,MAAM,YAAY,iBAAiB;AAC9F,gCAAoB;AAAA,UAC5C;AACoB,cAAI,0BAA0B;AAC1B,2BAAe;AAAA,UACvC;AAAA,QACA;AACgB,cAAM,gBAAgB,KAAK,iBAAiB,KAAK;AACjD,cAAM,MAAM,WAAW,CAAC,EAAE,OAAO,OAAO,WAAW,CAAC,EAAE,QAAQ,WACxD,EAAE,GAAG,WAAW,CAAC,EAAE,IAAG,IACtB,CAAE;AACR,YAAI,QAAQ;AAAA,UACR,MAAM;AAAA,UACN,IAAI,mBAAmB;AAAA,QAC1B;AACD,cAAM,0BAA0B;AAAA,UAC5B,GAAG,WAAW,CAAC;AAAA,UACf;AAAA,QACH;AACD,uBAAe;AACf,kBAAU,KAAK,IAAI,SAAS;AAAA,UACxB;AAAA,UACA,UAAU;AAAA,QAC9B,CAAiB,CAAC;AACF,4BAAoB;AACpB,oBAAY;AACZ,yBAAiB;AAAA,MACjC;AAAA,IACA;AACQ,WAAO;AAAA,EACf;AAAA,EACI,iBAAiB,MAAM,OAAO,KAAK;AAC/B,UAAM,cAAc,KAAK,MAAM,OAAO,GAAG;AACzC,YAAQ,YAAY,MAAM,KAAK,KAAK,CAAE,GAAE;AAAA,EAChD;AAAA,EACI,MAAM,eAAe,WAAW,qBAAqB,IAAI;AACrD,UAAM,oBAAoB,UAAU,OAAO,CAAC,QAAQ,IAAI,gBAAgB,MAAS;AACjF,UAAM,QAAQ,kBAAkB,IAAI,CAAC,QAAQ,IAAI,WAAW;AAC5D,UAAM,YAAY,kBAAkB,IAAI,CAAC,QAAQ,IAAI,QAAQ;AAC7D,WAAO,KAAK,gBAAgB,OAAO,WAAW,kBAAkB;AAAA,EACxE;AAAA,EACI,SAAS,MAAM,WAAW;AACtB,UAAM,OAAO,KAAK,KAAK,SAAS,EAAE,KAAM;AACxC,WAAO,SAAS,KAAK,OAAO;AAAA,EACpC;AAAA,EACI,MAAM,YAAY,QAAQ,WAAW;AACjC,UAAM,OAAO,CAAE;AACf,UAAM,aAAa,CAAE;AACrB,QAAI,QAAQ;AACZ,eAAW,KAAK,QAAQ;AACpB,YAAM,OAAO,MAAM,KAAK,eAAe,CAAC;AACxC,UAAI,QAAQ,OAAO,WAAW,SAAS,UAAU,SAC7C,KAAK,WAAW;AAChB,YAAI,QAAQ,KAAK,WAAW;AACxB,kBAAQ,KAAK,2BAA2B,KAAK;AAAA,qCAC5B,KAAK,SAAS,EAAE;AAAA,QACrD;AACgB,YAAI,WAAW,SAAS,GAAG;AACvB,gBAAMC,OAAM,KAAK,SAAS,YAAY,SAAS;AAC/C,cAAIA,SAAQ,MAAM;AACd,iBAAK,KAAKA,IAAG;AAAA,UACrC;AAIoB,iBAAO,QAAQ,KAAK,gBACf,QAAQ,OAAO,WAAW,SAAS,UAAU,SAC1C,KAAK,aACL,QAAQ,GAAI;AAChB,qBAAS,MAAM,KAAK,eAAe,WAAW,CAAC,CAAC;AAChD,uBAAW,MAAO;AAAA,UAC1C;AAAA,QACA;AAAA,MACA;AACY,iBAAW,KAAK,CAAC;AACjB,eAAS;AAAA,IACrB;AACQ,UAAM,MAAM,KAAK,SAAS,YAAY,SAAS;AAC/C,QAAI,QAAQ,MAAM;AACd,WAAK,KAAK,GAAG;AAAA,IACzB;AACQ,WAAO;AAAA,EACf;AACA;AA2dO,MAAM,0BAA0B,aAAa;AAAA,EAChD,OAAO,UAAU;AACb,WAAO;AAAA,EACf;AAAA,EACI,YAAY,QAAQ;AAChB,UAAM,MAAM;AACZ,WAAO,eAAe,MAAM,gBAAgB;AAAA,MACxC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,kBAAkB;AAAA,MAC1C,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,qBAAqB;AAAA,MAC7C,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,WAAO,eAAe,MAAM,aAAa;AAAA,MACrC,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,UAAU;AAAA,MACV,OAAO;AAAA,IACnB,CAAS;AACD,SAAK,eAAe,QAAQ,gBAAgB;AAC5C,SAAK,iBAAiB,QAAQ,kBAAkB,CAAE;AAClD,SAAK,oBAAoB,QAAQ,qBAAqB;AAAA,EAC9D;AAAA,EACI,MAAM,UAAU,MAAM;AAClB,QAAI,CAAC,KAAK,WAAW;AACjB,WAAK,YAAY,MAAM,YAAY,KAAK,YAAY;AAAA,IAChE;AACQ,UAAM,SAAS,CAAE;AACjB,UAAM,YAAY,KAAK,UAAU,OAAO,MAAM,KAAK,gBAAgB,KAAK,iBAAiB;AACzF,QAAI,YAAY;AAChB,WAAO,YAAY,UAAU,QAAQ;AACjC,UAAI,YAAY,GAAG;AACf,qBAAa,KAAK;AAAA,MAClC;AACY,YAAM,UAAU,KAAK,IAAI,YAAY,KAAK,WAAW,UAAU,MAAM;AACrE,YAAM,YAAY,UAAU,MAAM,WAAW,OAAO;AACpD,aAAO,KAAK,KAAK,UAAU,OAAO,SAAS,CAAC;AAC5C,kBAAY;AAAA,IACxB;AACQ,WAAO;AAAA,EACf;AACA;ACtrBA,eAAsB,gBAAgB;AAAA,EACpC;AAAA,EACA;AAAA,EACA;AACF,GAAyB;AACvB,SAAO,IAAI,WAAW;AAAA,IACpB,aAAa;AAAA,IACb;AAAA,IACA,WAAW;AAAA,IACX,WAAW;AAAA,EAAA,CACZ;AACH;ACLA,MAAM,kBAAkB,OACtB,eAC4B;AAC5B,QAAM,EAAE,UAAA,IAAc,MAAM,OAAO,aAAa;AAE1C,QAAA,UAAU,MAAM,UAAU,OAAO;AAAA,IACrC,MAAM;AAAA,IACN,UAAU;AAAA,IACV,iBAAiB;AAAA,EAAA,CAClB;AAEG,MAAA;AAgBO,QAAA,gBAAT,SAAuB,KAAK;AAC1B,aAAO,IAAI,MAAM,GAAG,EAAE,CAAC;AAAA,IACzB;AAjBA,UAAMC,QAAO,MAAM,QAAQ,QAAQ,UAAU;AACvC,UAAA,iBAAiB,MAAMA,MAAK,cAAc;AAEhD,UAAM,0BAA+C,gBAAgB,YAAY,SAAS,MAAM,iBAAiB;AAAA,MAC/G,CAAC,YAAY;AACX,cAAM,kBAAkB,OAAO,QAAQ,MAAM,IAAI,OAAO,QAAQ,QAAQ;AACjE,eAAA;AAAA,UACL,MAAM,QAAQ,QAAQ;AAAA,UACtB,OAAO,OAAO,QAAQ,QAAQ;AAAA,UAC9B,KAAK,OAAO,QAAQ,MAAM;AAAA,UAC1B,UAAU;AAAA,QACZ;AAAA,MAAA;AAAA,IAEJ;AAMA,UAAM,iBAAiB,gBAAgB,YAAY,SAAS,MAAM,iBAAiB;AAAA,MACjF,CAAC,YAAY,QAAQ,QAAQ;AAAA,IAAA,EAC7B,KAAK,GAAG;AAEJ,UAAA,QAAQA,MAAK,WAAW;AACxB,UAAA,UAAUA,MAAK,WAAW;AAChC,UAAM,eAAe,cAAcA,MAAK,WAAW,UAAU,CAAC,EAAE,GAAG;AAE5D,WAAA;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,WACO,OAAO;AACN,YAAA,MAAM,8BAA8B,KAAK;AAC3C,UAAA;AAAA,EAAA;AAEV;ACxDA,eAAe,kBAAkB,QAAkB,OAAmB;AACpE,QAAM,oBAAoB,eAAe;AAAA,IACvC;AAAA,EACF;AACM,QAAA,mBAAmB,kBAAkB,KAAK,KAAK;AAE/C,QAAA,kBAAkB,MAAM,QAAQ;AAAA,IACpC,OAAO,IAAI,OAAO,UAAU;AAC1B,YAAM,SAAS,MAAM,iBAAiB,OAAO,EAAE,OAAO;AACtD,aAAO,OAAO;AAAA,IACf,CAAA;AAAA,EACH;AAEO,SAAA,gBAAgB,KAAK,GAAG;AACjC;AAEA,eAAsB,2BAA4B,eAAuB;AACjE,QAAA,YAAY,MAAM,gBAAgB;AAAA,IACtC,cAAc,QAAQ,IAAI,eAAe;AAAA,IACzC,OAAO,QAAQ,IAAI,iBAAiB;AAAA,IACpC,MAAM,WAAW,QAAQ,IAAI,uBAAuB,KAAK;AAAA,EAAA,CAC1D;AAEK,QAAA,WAAW,IAAI,kBAAkB;AAAA,IACrC,WAAW;AAAA,IACX,cAAc;AAAA,EAAA,CACf;AAED,QAAM,mBAAmB,MAAM,SAAS,gBAAgB,CAAC,aAAa,CAAC;AACvE,QAAM,aAAa,iBAAiB,IAAI,CAAA,UAAS,MAAM,WAAW;AAClE,QAAM,qBAAqB,MAAM,kBAAkB,YAAY,SAAS;AAEjE,SAAA;AACT;AAEA,MAAM,UAAU,CAAC,EAAE,cAAuC;AAAA,EACxD,MAAM,cAAc,YAAoB;AACtC,UAAM,iBAAiB;AACjB,UAAA,UAAU,eAAe,KAAK,UAAU;AAC9C,QAAI,CAAC,QAAS,QAAO,EAAE,OAAO,oBAAoB,MAAM,KAAK;AACvD,UAAA,iBAAiB,MAAM,gBAAgB,UAAU;AAChD,WAAA;AAAA,EACT;AAAA,EAEA,MAAM,eAAe,SAAS;AACpB,YAAA,IAAI,sBAAsB,OAAO;AACzC,WAAO,MAAM,OAAO,UAAU,kCAAkC,EAAE,OAAO;AAAA,MACvE,MAAM;AAAA,IAAA,CACP;AAAA,EACH;AAAA,EAEA,MAAM,eAAe,SAAS;AACpB,YAAA,IAAI,mCAAmC,OAAO;AACtD,UAAM,iBAAmB,MAAM,OAAO,UAAU,kCAAkC,EAAE,UAAU;AAAA,MAC5F,SAAS,EAAE,QAAQ;AAAA,IAAA,CACpB;AAED,YAAQ,IAAI,qBAAqB,gBAAgB,OAAO,OAAO;AAE3D,QAAA,CAAC,eAAuB,QAAA;AACrB,WAAA;AAAA,EACT;AAAA,EAEA,MAAM,gCAAgCF,aAAY;AACxC,YAAA,IAAI,yCAAyCA,WAAU;AACzD,UAAA,qBAAqB,MAAM,2BAA2BA,WAAU;AAC/D,WAAA;AAAA,EAAA;AAEX;AC3EA,MAAe,WAAA;AAAA,EACb;AACF;ACcA,MAAe,QAAA;AAAA,EACb;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;","x_google_ignoreList":[14]}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
declare const _default: {
|
|
2
|
+
transcript: {
|
|
3
|
+
schema: {
|
|
4
|
+
kind: string;
|
|
5
|
+
collectionName: string;
|
|
6
|
+
info: {
|
|
7
|
+
singularName: string;
|
|
8
|
+
pluralName: string;
|
|
9
|
+
displayName: string;
|
|
10
|
+
};
|
|
11
|
+
options: {
|
|
12
|
+
draftAndPublish: boolean;
|
|
13
|
+
};
|
|
14
|
+
pluginOptions: {
|
|
15
|
+
"content-manager": {
|
|
16
|
+
visible: boolean;
|
|
17
|
+
};
|
|
18
|
+
"content-type-builder": {
|
|
19
|
+
visible: boolean;
|
|
20
|
+
};
|
|
21
|
+
};
|
|
22
|
+
attributes: {
|
|
23
|
+
title: {
|
|
24
|
+
type: string;
|
|
25
|
+
};
|
|
26
|
+
videoId: {
|
|
27
|
+
type: string;
|
|
28
|
+
};
|
|
29
|
+
thumbnailUrl: {
|
|
30
|
+
type: string;
|
|
31
|
+
};
|
|
32
|
+
fullTranscript: {
|
|
33
|
+
type: string;
|
|
34
|
+
};
|
|
35
|
+
transcriptWithTimeCodes: {
|
|
36
|
+
type: string;
|
|
37
|
+
};
|
|
38
|
+
readableTranscript: {
|
|
39
|
+
type: string;
|
|
40
|
+
};
|
|
41
|
+
};
|
|
42
|
+
};
|
|
43
|
+
};
|
|
44
|
+
};
|
|
45
|
+
export default _default;
|