@minded-ai/mindedjs 1.0.103 → 1.0.105

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/agent.d.ts +1 -0
  2. package/dist/agent.d.ts.map +1 -1
  3. package/dist/agent.js +21 -5
  4. package/dist/agent.js.map +1 -1
  5. package/dist/cli/lambdaHandlerTemplate.d.ts.map +1 -1
  6. package/dist/cli/lambdaHandlerTemplate.js +0 -1
  7. package/dist/cli/lambdaHandlerTemplate.js.map +1 -1
  8. package/dist/cli/lambdaHandlerTemplate.ts +0 -1
  9. package/dist/index.d.ts +3 -0
  10. package/dist/index.d.ts.map +1 -1
  11. package/dist/index.js +40 -1
  12. package/dist/index.js.map +1 -1
  13. package/dist/internalTools/appActionRunnerTool.d.ts +0 -1
  14. package/dist/internalTools/appActionRunnerTool.d.ts.map +1 -1
  15. package/dist/internalTools/appActionRunnerTool.js +0 -7
  16. package/dist/internalTools/appActionRunnerTool.js.map +1 -1
  17. package/dist/internalTools/documentExtraction/documentExtraction.d.ts +129 -0
  18. package/dist/internalTools/documentExtraction/documentExtraction.d.ts.map +1 -0
  19. package/dist/internalTools/documentExtraction/documentExtraction.js +788 -0
  20. package/dist/internalTools/documentExtraction/documentExtraction.js.map +1 -0
  21. package/dist/internalTools/documentExtraction/types.d.ts +29 -0
  22. package/dist/internalTools/documentExtraction/types.d.ts.map +1 -0
  23. package/dist/internalTools/documentExtraction/types.js +35 -0
  24. package/dist/internalTools/documentExtraction/types.js.map +1 -0
  25. package/dist/internalTools/libraryActionRunnerTool.d.ts +4 -0
  26. package/dist/internalTools/libraryActionRunnerTool.d.ts.map +1 -0
  27. package/dist/internalTools/libraryActionRunnerTool.js +54 -0
  28. package/dist/internalTools/libraryActionRunnerTool.js.map +1 -0
  29. package/dist/nodes/addAppToolNode.d.ts +3 -1
  30. package/dist/nodes/addAppToolNode.d.ts.map +1 -1
  31. package/dist/nodes/addAppToolNode.js +8 -4
  32. package/dist/nodes/addAppToolNode.js.map +1 -1
  33. package/dist/nodes/addToolNode.d.ts.map +1 -1
  34. package/dist/nodes/addToolNode.js +0 -1
  35. package/dist/nodes/addToolNode.js.map +1 -1
  36. package/dist/nodes/nodeFactory.js +1 -1
  37. package/dist/nodes/nodeFactory.js.map +1 -1
  38. package/dist/platform/utils/parseAttachments.d.ts +14 -0
  39. package/dist/platform/utils/parseAttachments.d.ts.map +1 -0
  40. package/dist/platform/utils/parseAttachments.js +54 -0
  41. package/dist/platform/utils/parseAttachments.js.map +1 -0
  42. package/dist/toolsLibrary/index.d.ts +5 -0
  43. package/dist/toolsLibrary/index.d.ts.map +1 -0
  44. package/dist/toolsLibrary/index.js +42 -0
  45. package/dist/toolsLibrary/index.js.map +1 -0
  46. package/dist/toolsLibrary/parseDocument.d.ts +30 -0
  47. package/dist/toolsLibrary/parseDocument.d.ts.map +1 -0
  48. package/dist/toolsLibrary/parseDocument.js +127 -0
  49. package/dist/toolsLibrary/parseDocument.js.map +1 -0
  50. package/docs/SUMMARY.md +1 -0
  51. package/docs/getting-started/installation.md +0 -42
  52. package/docs/tooling/document-processing.md +389 -0
  53. package/package.json +5 -1
  54. package/src/agent.ts +44 -14
  55. package/src/cli/lambdaHandlerTemplate.ts +0 -1
  56. package/src/index.ts +12 -0
  57. package/src/internalTools/appActionRunnerTool.ts +0 -7
  58. package/src/internalTools/documentExtraction/documentExtraction.ts +861 -0
  59. package/src/internalTools/documentExtraction/types.ts +59 -0
  60. package/src/internalTools/libraryActionRunnerTool.ts +63 -0
  61. package/src/nodes/addAppToolNode.ts +13 -3
  62. package/src/nodes/addToolNode.ts +0 -1
  63. package/src/nodes/nodeFactory.ts +2 -2
  64. package/src/platform/utils/parseAttachments.ts +56 -0
  65. package/src/toolsLibrary/index.ts +6 -0
  66. package/src/toolsLibrary/parseDocument.ts +136 -0
@@ -0,0 +1,861 @@
1
+ import { ZodSchema, ZodTypeAny } from 'zod';
2
+ import { BaseLanguageModel } from '@langchain/core/language_models/base';
3
+ import * as fs from 'fs';
4
+ import * as path from 'path';
5
+ import { pdfToPng } from 'pdf-to-png-converter';
6
+ import { logger } from '../../utils/logger';
7
+ import * as os from 'os';
8
+ import { DocumentExtractionOptions, DocumentProcessingResult, DocumentProcessorConfig, SUPPORTED_DOCUMENT_TYPES } from './types';
9
+
10
+ /**
11
+ * Extract data from documents using AI or return raw text.
12
+ *
13
+ * This function allows you to process various document types (PDFs, images, Word docs, etc.)
14
+ * in multiple ways:
15
+ * - With LLM + Schema: Extract structured data according to a Zod schema
16
+ * - With LLM + System Prompt: Extract unstructured data based on prompt instructions
17
+ * - Without LLM: Extract raw text content using LlamaParse
18
+ *
19
+ * @param options - Document extraction options
20
+ * @param options.llm - Optional language model for AI-powered extraction
21
+ * @param options.documentPath - Path to the document file
22
+ * @param options.documentContent - Document content as Buffer or string
23
+ * @param options.documentUrl - URL to fetch the document from
24
+ * @param options.schema - Optional Zod schema for structured data extraction
25
+ * @param options.systemPrompt - Optional prompt for guiding extraction
26
+ * @param options.config - Optional document processor configuration
27
+ *
28
+ * @returns Promise resolving to extracted data/text and metadata
29
+ *
30
+ * @example
31
+ * ```typescript
32
+ * import { extractFromDocument } from '@minded-ai/mindedjs';
33
+ * import { z } from 'zod';
34
+ *
35
+ * // Extract structured data with schema
36
+ * const result1 = await extractFromDocument({
37
+ * llm: agent.llm,
38
+ * documentPath: './invoice.pdf',
39
+ * schema: z.object({
40
+ * invoiceNumber: z.string(),
41
+ * amount: z.number(),
42
+ * })
43
+ * });
44
+ *
45
+ * // Extract unstructured data with prompt
46
+ * const result2 = await extractFromDocument({
47
+ * llm: agent.llm,
48
+ * documentPath: './contract.pdf',
49
+ * systemPrompt: 'Extract all payment terms and conditions'
50
+ * });
51
+ *
52
+ * // Extract raw text without LLM
53
+ * const result3 = await extractFromDocument({
54
+ * documentPath: './document.pdf'
55
+ * });
56
+ * ```
57
+ */
58
+ export async function extractFromDocument<T = string>(options: {
59
+ llm?: BaseLanguageModel;
60
+ documentPath?: string;
61
+ documentContent?: Buffer | string;
62
+ documentUrl?: string;
63
+ schema?: ZodSchema<T>;
64
+ systemPrompt?: string;
65
+ config?: DocumentProcessorConfig;
66
+ }): Promise<{
67
+ data: T;
68
+ metadata: {
69
+ fileSize?: number;
70
+ fileType: string;
71
+ processingTime: number;
72
+ contentLength: number;
73
+ };
74
+ }> {
75
+ // Create a document processor
76
+ const processor = new DocumentProcessor(options.config, options.llm);
77
+
78
+ // Extract from document using the processor
79
+ return processor.extractFromDocument({
80
+ documentPath: options.documentPath,
81
+ documentContent: options.documentContent,
82
+ documentUrl: options.documentUrl,
83
+ schema: options.schema,
84
+ systemPrompt: options.systemPrompt,
85
+ });
86
+ }
87
+
88
+ /**
89
+ * Generic document processor that can extract structured data from various document types
90
+ * including images, PDFs, Word documents, spreadsheets, and more.
91
+ */
92
+ export class DocumentProcessor {
93
+ private config: DocumentProcessorConfig;
94
+ private llm: BaseLanguageModel | null = null;
95
+ private llamaCloudApiKey: string | null = null;
96
+ private sharpModule: any = null;
97
+ private sharpLoadAttempted = false;
98
+
99
+ constructor(config: DocumentProcessorConfig = {}, llm?: BaseLanguageModel) {
100
+ this.config = {
101
+ maxImageWidth: 1200,
102
+ imageQuality: 85,
103
+ useBase64: false,
104
+ ...config,
105
+ };
106
+
107
+ this.llm = llm || null;
108
+ this.llamaCloudApiKey = this.config.llamaCloudApiKey || process.env.LLAMA_CLOUD_API_KEY || null;
109
+ }
110
+
111
+ /**
112
+ * Parse document using LlamaCloud REST API
113
+ */
114
+ private async parseWithLlamaCloud(filePath: string): Promise<string | null> {
115
+ if (!this.llamaCloudApiKey) {
116
+ return null;
117
+ }
118
+
119
+ try {
120
+ // Step 1: Upload file and start parsing
121
+ const fileContent = fs.readFileSync(filePath);
122
+ const fileName = path.basename(filePath);
123
+ const mimeType = this.getMimeType(path.extname(filePath));
124
+
125
+ const formData = new FormData();
126
+ const blob = new Blob([fileContent], { type: mimeType });
127
+ formData.append('file', blob, fileName);
128
+
129
+ const uploadResponse = await fetch('https://api.cloud.llamaindex.ai/api/v1/parsing/upload', {
130
+ method: 'POST',
131
+ headers: {
132
+ Accept: 'application/json',
133
+ Authorization: `Bearer ${this.llamaCloudApiKey}`,
134
+ },
135
+ body: formData,
136
+ });
137
+
138
+ if (!uploadResponse.ok) {
139
+ const errorText = await uploadResponse.text();
140
+ throw new Error(`Failed to upload file: ${uploadResponse.status} - ${errorText}`);
141
+ }
142
+
143
+ const uploadResult = await uploadResponse.json();
144
+ const jobId = uploadResult.id || uploadResult.job_id;
145
+
146
+ if (!jobId) {
147
+ throw new Error('No job ID returned from upload');
148
+ }
149
+
150
+ logger.info({
151
+ msg: '[DocumentProcessor] File uploaded to LlamaCloud',
152
+ jobId,
153
+ fileName,
154
+ });
155
+
156
+ // Step 2: Poll for job completion
157
+ let attempts = 0;
158
+ const maxAttempts = 60; // 60 attempts with 2 second delay = 2 minutes max
159
+ const pollDelay = 2000; // 2 seconds
160
+
161
+ while (attempts < maxAttempts) {
162
+ const statusResponse = await fetch(`https://api.cloud.llamaindex.ai/api/v1/parsing/job/${jobId}`, {
163
+ method: 'GET',
164
+ headers: {
165
+ Accept: 'application/json',
166
+ Authorization: `Bearer ${this.llamaCloudApiKey}`,
167
+ },
168
+ });
169
+
170
+ if (!statusResponse.ok) {
171
+ throw new Error(`Failed to check job status: ${statusResponse.status}`);
172
+ }
173
+
174
+ const statusResult = await statusResponse.json();
175
+ const status = statusResult.status || statusResult.job_status;
176
+
177
+ if (status === 'SUCCESS' || status === 'COMPLETED' || status === 'completed') {
178
+ // Step 3: Retrieve results in Markdown
179
+
180
+ // Create an AbortController for timeout
181
+ const controller = new AbortController();
182
+ const timeout = setTimeout(() => controller.abort(), 20000); // 20 second timeout
183
+
184
+ let resultResponse;
185
+ try {
186
+ resultResponse = await fetch(`https://api.cloud.llamaindex.ai/api/v1/parsing/job/${jobId}/result/markdown`, {
187
+ method: 'GET',
188
+ headers: {
189
+ Accept: 'application/json',
190
+ Authorization: `Bearer ${this.llamaCloudApiKey}`,
191
+ },
192
+ signal: controller.signal,
193
+ });
194
+ } catch (fetchError) {
195
+ clearTimeout(timeout);
196
+ if (fetchError instanceof Error && fetchError.name === 'AbortError') {
197
+ throw new Error('Timeout fetching results from LlamaCloud after 20 seconds');
198
+ }
199
+ throw fetchError;
200
+ }
201
+
202
+ clearTimeout(timeout);
203
+
204
+ if (!resultResponse.ok) {
205
+ const errorText = await resultResponse.text();
206
+ throw new Error(`Failed to retrieve results: ${resultResponse.status} - ${errorText}`);
207
+ }
208
+
209
+ let resultData: any;
210
+ try {
211
+ // Read response using manual stream reading (more reliable than text())
212
+ let responseText;
213
+ if (resultResponse.body) {
214
+ const reader = resultResponse.body.getReader();
215
+ const chunks: Uint8Array[] = [];
216
+ let totalLength = 0;
217
+
218
+ try {
219
+ while (true) {
220
+ const { done, value } = await reader.read();
221
+ if (done) break;
222
+ if (value) {
223
+ chunks.push(value);
224
+ totalLength += value.length;
225
+ }
226
+ }
227
+
228
+ // Combine chunks
229
+ const combined = new Uint8Array(totalLength);
230
+ let offset = 0;
231
+ for (const chunk of chunks) {
232
+ combined.set(chunk, offset);
233
+ offset += chunk.length;
234
+ }
235
+
236
+ responseText = new TextDecoder().decode(combined);
237
+ } finally {
238
+ reader.releaseLock();
239
+ }
240
+ } else {
241
+ responseText = await resultResponse.text();
242
+ }
243
+
244
+ // Try to parse as JSON, but if it fails, use the text directly
245
+ try {
246
+ resultData = JSON.parse(responseText);
247
+ } catch {
248
+ // If it's not JSON, assume it's the markdown content directly
249
+ resultData = responseText;
250
+ }
251
+ } catch (textError) {
252
+ logger.error({
253
+ msg: '[DocumentProcessor] Failed to read response text',
254
+ jobId,
255
+ error: textError instanceof Error ? textError.message : String(textError),
256
+ stack: textError instanceof Error ? textError.stack : undefined,
257
+ });
258
+ throw new Error('Failed to read response from LlamaCloud');
259
+ }
260
+
261
+ logger.debug({
262
+ msg: '[DocumentProcessor] Result data structure',
263
+ jobId,
264
+ dataType: typeof resultData,
265
+ keys: typeof resultData === 'object' && resultData !== null ? Object.keys(resultData) : [],
266
+ hasMarkdown: typeof resultData === 'object' && 'markdown' in resultData,
267
+ hasContent: typeof resultData === 'object' && 'content' in resultData,
268
+ hasText: typeof resultData === 'object' && 'text' in resultData,
269
+ });
270
+
271
+ // The API might return the markdown directly as a string or nested in an object
272
+ let markdownContent: string;
273
+ if (typeof resultData === 'string') {
274
+ markdownContent = resultData;
275
+ } else {
276
+ markdownContent = resultData.markdown || resultData.content || resultData.text || '';
277
+ }
278
+
279
+ if (!markdownContent) {
280
+ logger.error({
281
+ msg: '[DocumentProcessor] No content in result',
282
+ jobId,
283
+ resultData: JSON.stringify(resultData).substring(0, 500),
284
+ });
285
+ throw new Error('No content returned from parsing');
286
+ }
287
+
288
+ logger.info({
289
+ msg: '[DocumentProcessor] Successfully parsed document with LlamaCloud',
290
+ jobId,
291
+ contentLength: markdownContent.length,
292
+ preview: markdownContent.substring(0, 100),
293
+ });
294
+
295
+ logger.debug({
296
+ msg: '[DocumentProcessor] About to return markdown content',
297
+ jobId,
298
+ });
299
+
300
+ return markdownContent;
301
+ } else if (status === 'FAILED' || status === 'ERROR' || status === 'failed') {
302
+ throw new Error(`Parsing job failed: ${statusResult.error || 'Unknown error'}`);
303
+ }
304
+
305
+ // Wait before next attempt
306
+ await new Promise((resolve) => setTimeout(resolve, pollDelay));
307
+ attempts++;
308
+ }
309
+
310
+ throw new Error('Parsing job timed out after 2 minutes');
311
+ } catch (error) {
312
+ logger.warn({
313
+ msg: '[DocumentProcessor] LlamaCloud parsing failed',
314
+ error: error instanceof Error ? error.message : String(error),
315
+ });
316
+ return null;
317
+ } finally {
318
+ logger.debug({
319
+ msg: '[DocumentProcessor] parseWithLlamaCloud finished',
320
+ filePath,
321
+ });
322
+ }
323
+ }
324
+
325
+ /**
326
+ * Get MIME type for file extension
327
+ */
328
+ private getMimeType(fileExtension: string): string {
329
+ const mimeTypes: { [key: string]: string } = {
330
+ '.pdf': 'application/pdf',
331
+ '.doc': 'application/msword',
332
+ '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
333
+ '.txt': 'text/plain',
334
+ '.rtf': 'application/rtf',
335
+ '.jpg': 'image/jpeg',
336
+ '.jpeg': 'image/jpeg',
337
+ '.png': 'image/png',
338
+ '.gif': 'image/gif',
339
+ '.bmp': 'image/bmp',
340
+ '.webp': 'image/webp',
341
+ '.tiff': 'image/tiff',
342
+ '.xls': 'application/vnd.ms-excel',
343
+ '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
344
+ '.csv': 'text/csv',
345
+ '.html': 'text/html',
346
+ '.htm': 'text/html',
347
+ '.xml': 'application/xml',
348
+ '.md': 'text/markdown',
349
+ };
350
+
351
+ return mimeTypes[fileExtension.toLowerCase()] || 'application/octet-stream';
352
+ }
353
+
354
+ /**
355
+ * Extract data from a document - either structured data using AI or raw text
356
+ */
357
+ async extractFromDocument<T = string>(options: DocumentExtractionOptions): Promise<DocumentProcessingResult<T>> {
358
+ logger.debug({
359
+ msg: '[DocumentProcessor] extractFromDocument called',
360
+ hasDocumentPath: !!options.documentPath,
361
+ hasDocumentUrl: !!options.documentUrl,
362
+ hasDocumentContent: !!options.documentContent,
363
+ hasLLM: !!this.llm,
364
+ hasSchema: !!options.schema,
365
+ });
366
+
367
+ const startTime = Date.now();
368
+
369
+ try {
370
+ logger.debug({
371
+ msg: '[DocumentProcessor] Getting document content',
372
+ });
373
+
374
+ // Determine document source and content
375
+ const { content, fileType, fileSize } = await this.getDocumentContent(options);
376
+
377
+ // Process document content based on type
378
+ let processedContent: string;
379
+
380
+ if (this.isImageFile(fileType)) {
381
+ processedContent = await this.processImageDocument(content as Buffer, fileType, options.documentPath);
382
+ } else {
383
+ processedContent = await this.processTextDocument(content, options.documentPath, fileType);
384
+ }
385
+
386
+ logger.info({
387
+ msg: '[DocumentProcessor] Document content processed',
388
+ fileType,
389
+ contentLength: processedContent.length,
390
+ });
391
+
392
+ const processingTime = Date.now() - startTime;
393
+
394
+ // If no LLM is provided, return the raw text content
395
+ if (!this.llm) {
396
+ return {
397
+ data: processedContent as T,
398
+ metadata: {
399
+ fileSize,
400
+ fileType,
401
+ processingTime,
402
+ contentLength: processedContent.length,
403
+ },
404
+ };
405
+ }
406
+
407
+ // If LLM is provided, extract data (structured or unstructured)
408
+ const extractedData = await this.extractStructuredData<T>(processedContent, options.schema, options.llmConfig, options.systemPrompt);
409
+
410
+ return {
411
+ data: extractedData,
412
+ metadata: {
413
+ fileSize,
414
+ fileType,
415
+ processingTime,
416
+ contentLength: processedContent.length,
417
+ },
418
+ };
419
+ } catch (error) {
420
+ logger.error({
421
+ msg: '[DocumentProcessor] Document processing failed',
422
+ error: error instanceof Error ? error.message : String(error),
423
+ });
424
+ throw new Error(`Document processing failed: ${error instanceof Error ? error.message : String(error)}`);
425
+ }
426
+ }
427
+
428
+ /**
429
+ * Get document content from various sources
430
+ */
431
+ private async getDocumentContent(options: DocumentExtractionOptions): Promise<{
432
+ content: Buffer | string;
433
+ fileType: string;
434
+ fileSize?: number;
435
+ }> {
436
+ logger.debug({
437
+ msg: '[DocumentProcessor] getDocumentContent called',
438
+ hasPath: !!options.documentPath,
439
+ hasContent: !!options.documentContent,
440
+ hasUrl: !!options.documentUrl,
441
+ });
442
+
443
+ // From file path
444
+ if (options.documentPath) {
445
+ if (!fs.existsSync(options.documentPath)) {
446
+ throw new Error(`Document not found: ${options.documentPath}`);
447
+ }
448
+
449
+ const content = fs.readFileSync(options.documentPath);
450
+ const fileType = path.extname(options.documentPath).toLowerCase();
451
+
452
+ return {
453
+ content,
454
+ fileType,
455
+ fileSize: content.length,
456
+ };
457
+ }
458
+
459
+ // From provided content
460
+ if (options.documentContent) {
461
+ // Try to infer file type from content if it's a buffer
462
+ let fileType = '.unknown';
463
+ if (Buffer.isBuffer(options.documentContent)) {
464
+ fileType = this.inferFileTypeFromBuffer(options.documentContent);
465
+ } else if (typeof options.documentContent === 'string') {
466
+ fileType = '.txt'; // Assume text content
467
+ }
468
+
469
+ return {
470
+ content: options.documentContent,
471
+ fileType,
472
+ fileSize: Buffer.isBuffer(options.documentContent) ? options.documentContent.length : Buffer.byteLength(options.documentContent),
473
+ };
474
+ }
475
+
476
+ // From URL
477
+ if (options.documentUrl) {
478
+ logger.debug({
479
+ msg: '[DocumentProcessor] Fetching document from URL',
480
+ url: options.documentUrl,
481
+ });
482
+
483
+ const response = await fetch(options.documentUrl);
484
+
485
+ logger.debug({
486
+ msg: '[DocumentProcessor] URL fetch response',
487
+ status: response.status,
488
+ ok: response.ok,
489
+ });
490
+
491
+ if (!response.ok) {
492
+ throw new Error(`Failed to fetch document from URL: ${response.statusText}`);
493
+ }
494
+
495
+ const arrayBuffer = await response.arrayBuffer();
496
+ const content = Buffer.from(arrayBuffer);
497
+ const fileType = this.inferFileTypeFromUrl(options.documentUrl) || this.inferFileTypeFromBuffer(content);
498
+
499
+ logger.debug({
500
+ msg: '[DocumentProcessor] Document fetched from URL',
501
+ contentSize: content.length,
502
+ fileType,
503
+ });
504
+
505
+ return {
506
+ content,
507
+ fileType,
508
+ fileSize: content.length,
509
+ };
510
+ }
511
+
512
+ throw new Error('No document source provided. Specify documentPath, documentContent, or documentUrl.');
513
+ }
514
+
515
+ /**
516
+ * Process image documents by converting them to a standardized format
517
+ */
518
+ private async processImageDocument(content: Buffer, fileType: string, filePath?: string): Promise<string> {
519
+ try {
520
+ // First, try to use LlamaParser if available for text extraction
521
+ if (filePath && this.llamaCloudApiKey) {
522
+ logger.debug({
523
+ msg: '[DocumentProcessor] Calling parseWithLlamaCloud for image',
524
+ filePath,
525
+ });
526
+ const parsedContent = await this.parseWithLlamaCloud(filePath);
527
+ logger.debug({
528
+ msg: '[DocumentProcessor] parseWithLlamaCloud returned for image',
529
+ hasContent: !!parsedContent,
530
+ contentLength: parsedContent?.length,
531
+ });
532
+ if (parsedContent) {
533
+ return parsedContent;
534
+ }
535
+ }
536
+
537
+ // If no file path, create a temporary file for LlamaCloud parsing
538
+ if (!filePath && this.llamaCloudApiKey) {
539
+ const tempDir = os.tmpdir();
540
+ const tempFileName = `temp_${Date.now()}${fileType}`;
541
+ const tempFilePath = path.join(tempDir, tempFileName);
542
+
543
+ logger.debug({
544
+ msg: '[DocumentProcessor] Creating temp file for image',
545
+ tempFilePath,
546
+ contentSize: content.length,
547
+ });
548
+
549
+ try {
550
+ fs.writeFileSync(tempFilePath, content);
551
+ logger.debug({
552
+ msg: '[DocumentProcessor] Calling parseWithLlamaCloud for temp image',
553
+ tempFilePath,
554
+ });
555
+ const parsedContent = await this.parseWithLlamaCloud(tempFilePath);
556
+ logger.debug({
557
+ msg: '[DocumentProcessor] parseWithLlamaCloud returned for temp image',
558
+ hasContent: !!parsedContent,
559
+ contentLength: parsedContent?.length,
560
+ });
561
+ fs.unlinkSync(tempFilePath);
562
+
563
+ if (parsedContent) {
564
+ return parsedContent;
565
+ }
566
+ } catch (error) {
567
+ // Clean up temp file on error
568
+ if (fs.existsSync(tempFilePath)) {
569
+ fs.unlinkSync(tempFilePath);
570
+ }
571
+ logger.warn({
572
+ msg: '[DocumentProcessor] Failed to parse image with LlamaCloud',
573
+ error: error instanceof Error ? error.message : String(error),
574
+ });
575
+ }
576
+ }
577
+
578
+ // Fallback: Convert to image format for LLM processing
579
+ // For PDFs, convert first page to image
580
+ if (fileType === '.pdf') {
581
+ const pngPages = await pdfToPng(content, {
582
+ disableFontFace: true,
583
+ useSystemFonts: true,
584
+ viewportScale: 2.0,
585
+ pagesToProcess: [1], // Only first page
586
+ });
587
+
588
+ if (pngPages.length === 0) {
589
+ throw new Error('Failed to convert PDF to image');
590
+ }
591
+
592
+ content = pngPages[0].content;
593
+ }
594
+
595
+ // Check if sharp is available before using it
596
+ if (!this.sharpLoadAttempted) {
597
+ this.sharpLoadAttempted = true;
598
+ try {
599
+ this.sharpModule = await import('sharp');
600
+ this.sharpModule = this.sharpModule.default || this.sharpModule;
601
+ } catch (error) {
602
+ logger.warn({
603
+ msg: '[DocumentProcessor] Sharp module not available. Image processing will be limited.',
604
+ error: error instanceof Error ? error.message : String(error),
605
+ });
606
+ this.sharpModule = null; // Ensure it's null if loading fails
607
+ }
608
+ }
609
+
610
+ if (!this.sharpModule) {
611
+ logger.warn({
612
+ msg: '[DocumentProcessor] Sharp module not available. Using original image without optimization.',
613
+ fileType,
614
+ contentSize: content.length,
615
+ });
616
+
617
+ // If sharp is not available, use the original image
618
+ if (this.config.useBase64) {
619
+ // Return original image as base64
620
+ const base64Image = content.toString('base64');
621
+ const mimeType = this.getMimeType(fileType);
622
+ return `data:${mimeType};base64,${base64Image}`;
623
+ } else {
624
+ // Without sharp and without base64, we cannot process the image
625
+ return `[IMAGE CONTENT - ${fileType.toUpperCase()} file. Size: ${
626
+ content.length
627
+ } bytes. Consider using LLAMA_CLOUD_API_KEY for text extraction or set useBase64: true]`;
628
+ }
629
+ }
630
+
631
+ // Resize and optimize image using sharp
632
+ const processedImage = await this.sharpModule(content)
633
+ .resize(this.config.maxImageWidth!, null, {
634
+ withoutEnlargement: true,
635
+ fit: 'inside',
636
+ })
637
+ .jpeg({
638
+ quality: this.config.imageQuality!,
639
+ mozjpeg: true,
640
+ })
641
+ .toBuffer();
642
+
643
+ if (this.config.useBase64) {
644
+ const base64Image = processedImage.toString('base64');
645
+ return `data:image/jpeg;base64,${base64Image}`;
646
+ } else {
647
+ // Without LlamaParser, we cannot extract text from images
648
+ // Return a placeholder that indicates OCR is needed
649
+ return `[IMAGE CONTENT - ${fileType.toUpperCase()} file. OCR text extraction requires LlamaParser with LLAMA_CLOUD_API_KEY. Size: ${
650
+ processedImage.length
651
+ } bytes]`;
652
+ }
653
+ } catch (error) {
654
+ throw new Error(`Failed to process image document: ${error instanceof Error ? error.message : String(error)}`);
655
+ }
656
+ }
657
+
658
+ /**
659
+ * Process text-based documents using LlamaParser or fallback methods
660
+ */
661
+ private async processTextDocument(content: Buffer | string, filePath?: string, fileType?: string): Promise<string> {
662
+ // Try LlamaCloud parsing if we have a file path
663
+ if (filePath && this.llamaCloudApiKey) {
664
+ const parsedContent = await this.parseWithLlamaCloud(filePath);
665
+ if (parsedContent) {
666
+ return parsedContent;
667
+ }
668
+ }
669
+
670
+ // If no file path but we have content and LlamaCloud API key, create a temp file
671
+ if (!filePath && this.llamaCloudApiKey && Buffer.isBuffer(content)) {
672
+ const tempDir = os.tmpdir();
673
+ const tempFileName = `temp_${Date.now()}${fileType || '.txt'}`;
674
+ const tempFilePath = path.join(tempDir, tempFileName);
675
+
676
+ try {
677
+ fs.writeFileSync(tempFilePath, content);
678
+ const parsedContent = await this.parseWithLlamaCloud(tempFilePath);
679
+ fs.unlinkSync(tempFilePath);
680
+
681
+ if (parsedContent) {
682
+ return parsedContent;
683
+ }
684
+ } catch (error) {
685
+ // Clean up temp file on error
686
+ if (fs.existsSync(tempFilePath)) {
687
+ fs.unlinkSync(tempFilePath);
688
+ }
689
+ logger.warn({
690
+ msg: '[DocumentProcessor] Failed to parse text document with LlamaCloud',
691
+ error: error instanceof Error ? error.message : String(error),
692
+ });
693
+ }
694
+ }
695
+
696
+ // Fallback: handle based on file type
697
+ if (typeof content === 'string') {
698
+ return content;
699
+ }
700
+
701
+ // For binary content, convert to text
702
+ if (fileType === '.pdf' && !this.llamaCloudApiKey) {
703
+ throw new Error('PDF processing requires LLAMA_CLOUD_API_KEY environment variable for LlamaParser');
704
+ }
705
+
706
+ // Basic text extraction for simple formats
707
+ if (['.txt', '.md', '.html', '.htm', '.xml', '.csv'].includes(fileType || '')) {
708
+ return content.toString('utf-8');
709
+ }
710
+
711
+ // For unsupported binary formats without LlamaParser
712
+ throw new Error(`Unsupported document type ${fileType}. Please provide LLAMA_CLOUD_API_KEY for advanced document processing.`);
713
+ }
714
+
715
+ /**
716
+ * Extract data using LLM - either structured with schema or unstructured with prompt
717
+ */
718
+ private async extractStructuredData<T>(
719
+ content: string,
720
+ schema?: ZodSchema<T> | ZodTypeAny,
721
+ llmConfig?: { model?: string; temperature?: number },
722
+ systemPrompt?: string,
723
+ ): Promise<T> {
724
+ if (!this.llm) {
725
+ throw new Error('LLM instance is required for data extraction. Please provide an LLM when creating the DocumentProcessor.');
726
+ }
727
+
728
+ // Note: llmConfig is ignored when using the provided LLM instance
729
+ // The LLM should already be configured with the desired model and temperature
730
+
731
+ const defaultSystemPrompt =
732
+ 'You are an expert data-extraction assistant. ' +
733
+ 'Extract the requested information from the provided document content. ' +
734
+ 'If you cannot find a value for a required field, use "N/A" or a descriptive placeholder. ' +
735
+ 'Be accurate and thorough in your extraction.';
736
+
737
+ const finalSystemPrompt = systemPrompt || defaultSystemPrompt;
738
+
739
+ try {
740
+ // If schema is provided, use structured output
741
+ if (schema) {
742
+ // Check if the LLM supports withStructuredOutput
743
+ if (!('withStructuredOutput' in this.llm)) {
744
+ throw new Error('The provided LLM does not support structured output. Please use a compatible LLM instance.');
745
+ }
746
+
747
+ const structuredLlm = (this.llm as any).withStructuredOutput(schema as any);
748
+
749
+ const result = await structuredLlm.invoke([
750
+ {
751
+ role: 'system',
752
+ content: finalSystemPrompt,
753
+ },
754
+ {
755
+ role: 'user',
756
+ content: `Please extract the following information from this document:\n\n${content}`,
757
+ },
758
+ ]);
759
+
760
+ logger.debug({
761
+ msg: '[DocumentProcessor] Structured data extraction completed',
762
+ extractedData: JSON.stringify(result, null, 2),
763
+ });
764
+
765
+ return result as T;
766
+ } else {
767
+ // Without schema, return the LLM's text response
768
+ const response = await this.llm.invoke([
769
+ {
770
+ role: 'system',
771
+ content: finalSystemPrompt,
772
+ },
773
+ {
774
+ role: 'user',
775
+ content: `Please analyze and extract information from this document:\n\n${content}`,
776
+ },
777
+ ]);
778
+
779
+ // Extract the text content from the response
780
+ let textContent: string;
781
+ if (typeof response.content === 'string') {
782
+ textContent = response.content;
783
+ } else if (Array.isArray(response.content) && response.content.length > 0) {
784
+ // Handle array of content blocks
785
+ textContent = response.content.map((block: any) => (typeof block === 'string' ? block : block.text || '')).join('\n');
786
+ } else {
787
+ textContent = String(response.content);
788
+ }
789
+
790
+ logger.debug({
791
+ msg: '[DocumentProcessor] Unstructured data extraction completed',
792
+ contentLength: textContent.length,
793
+ });
794
+
795
+ return textContent as T;
796
+ }
797
+ } catch (error) {
798
+ throw new Error(`LLM extraction failed: ${error instanceof Error ? error.message : String(error)}`);
799
+ }
800
+ }
801
+
802
+ /**
803
+ * Check if file is an image type
804
+ */
805
+ private isImageFile(fileType: string): boolean {
806
+ const imageTypes = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tiff'];
807
+ return imageTypes.includes(fileType.toLowerCase());
808
+ }
809
+
810
+ /**
811
+ * Infer file type from buffer content
812
+ */
813
+ private inferFileTypeFromBuffer(buffer: Buffer): string {
814
+ // Check common file signatures
815
+ const signatures: { [key: string]: string } = {
816
+ '89504E47': '.png',
817
+ FFD8FF: '.jpg',
818
+ '47494638': '.gif',
819
+ '25504446': '.pdf',
820
+ '504B0304': '.zip', // Also used by docx, xlsx, pptx
821
+ D0CF11E0: '.doc', // Also xls, ppt
822
+ };
823
+
824
+ const hex = buffer.toString('hex', 0, 4).toUpperCase();
825
+
826
+ for (const [signature, type] of Object.entries(signatures)) {
827
+ if (hex.startsWith(signature)) {
828
+ return type;
829
+ }
830
+ }
831
+
832
+ return '.unknown';
833
+ }
834
+
835
+ /**
836
+ * Infer file type from URL
837
+ */
838
+ private inferFileTypeFromUrl(url: string): string | null {
839
+ try {
840
+ const pathname = new URL(url).pathname;
841
+ const extension = path.extname(pathname).toLowerCase();
842
+ return extension || null;
843
+ } catch {
844
+ return null;
845
+ }
846
+ }
847
+
848
+ /**
849
+ * Get list of supported document types
850
+ */
851
+ static getSupportedDocumentTypes(): string[] {
852
+ return [...SUPPORTED_DOCUMENT_TYPES];
853
+ }
854
+
855
+ /**
856
+ * Check if a file type is supported
857
+ */
858
+ static isDocumentTypeSupported(fileType: string): boolean {
859
+ return SUPPORTED_DOCUMENT_TYPES.includes(fileType.toLowerCase());
860
+ }
861
+ }