aiecs 1.0.8__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (45) hide show
  1. aiecs/__init__.py +1 -1
  2. aiecs/aiecs_client.py +159 -1
  3. aiecs/config/config.py +4 -0
  4. aiecs/domain/context/__init__.py +24 -0
  5. aiecs/main.py +20 -2
  6. aiecs/scripts/dependance_check/__init__.py +18 -0
  7. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +50 -8
  8. aiecs/scripts/dependance_patch/__init__.py +8 -0
  9. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +12 -0
  10. aiecs/scripts/tools_develop/README.md +340 -0
  11. aiecs/scripts/tools_develop/__init__.py +16 -0
  12. aiecs/scripts/tools_develop/check_type_annotations.py +263 -0
  13. aiecs/scripts/tools_develop/validate_tool_schemas.py +346 -0
  14. aiecs/tools/__init__.py +33 -14
  15. aiecs/tools/docs/__init__.py +103 -0
  16. aiecs/tools/docs/ai_document_orchestrator.py +543 -0
  17. aiecs/tools/docs/ai_document_writer_orchestrator.py +2199 -0
  18. aiecs/tools/docs/content_insertion_tool.py +1214 -0
  19. aiecs/tools/docs/document_creator_tool.py +1161 -0
  20. aiecs/tools/docs/document_layout_tool.py +1090 -0
  21. aiecs/tools/docs/document_parser_tool.py +904 -0
  22. aiecs/tools/docs/document_writer_tool.py +1583 -0
  23. aiecs/tools/langchain_adapter.py +102 -51
  24. aiecs/tools/schema_generator.py +265 -0
  25. aiecs/tools/task_tools/image_tool.py +1 -1
  26. aiecs/tools/task_tools/office_tool.py +9 -0
  27. aiecs/tools/task_tools/scraper_tool.py +1 -1
  28. {aiecs-1.0.8.dist-info → aiecs-1.1.0.dist-info}/METADATA +1 -1
  29. {aiecs-1.0.8.dist-info → aiecs-1.1.0.dist-info}/RECORD +44 -28
  30. aiecs-1.1.0.dist-info/entry_points.txt +9 -0
  31. aiecs-1.0.8.dist-info/entry_points.txt +0 -7
  32. /aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +0 -0
  33. /aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +0 -0
  34. /aiecs/scripts/{dependency_checker.py → dependance_check/dependency_checker.py} +0 -0
  35. /aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +0 -0
  36. /aiecs/scripts/{quick_dependency_check.py → dependance_check/quick_dependency_check.py} +0 -0
  37. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  38. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  39. /aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +0 -0
  40. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  41. /aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +0 -0
  42. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  43. {aiecs-1.0.8.dist-info → aiecs-1.1.0.dist-info}/WHEEL +0 -0
  44. {aiecs-1.0.8.dist-info → aiecs-1.1.0.dist-info}/licenses/LICENSE +0 -0
  45. {aiecs-1.0.8.dist-info → aiecs-1.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,904 @@
1
+ import os
2
+ import re
3
+ import mimetypes
4
+ import logging
5
+ import asyncio
6
+ from typing import Dict, Any, List, Optional, Union, Tuple
7
+ from enum import Enum
8
+ from urllib.parse import urlparse
9
+ from pathlib import Path
10
+ import tempfile
11
+
12
+ import httpx
13
+ from pydantic import BaseModel, Field, ValidationError, ConfigDict
14
+ from pydantic_settings import BaseSettings
15
+
16
+ from aiecs.tools.base_tool import BaseTool
17
+ from aiecs.tools import register_tool
18
+
19
+
20
+ class DocumentType(str, Enum):
21
+ """Supported document types for parsing"""
22
+ PDF = "pdf"
23
+ DOCX = "docx"
24
+ XLSX = "xlsx"
25
+ PPTX = "pptx"
26
+ TXT = "txt"
27
+ HTML = "html"
28
+ RTF = "rtf"
29
+ CSV = "csv"
30
+ JSON = "json"
31
+ XML = "xml"
32
+ MARKDOWN = "md"
33
+ IMAGE = "image"
34
+ UNKNOWN = "unknown"
35
+
36
+
37
+ class ParsingStrategy(str, Enum):
38
+ """Document parsing strategies"""
39
+ TEXT_ONLY = "text_only"
40
+ STRUCTURED = "structured"
41
+ FULL_CONTENT = "full_content"
42
+ METADATA_ONLY = "metadata_only"
43
+
44
+
45
+ class OutputFormat(str, Enum):
46
+ """Output formats for parsed content"""
47
+ TEXT = "text"
48
+ JSON = "json"
49
+ MARKDOWN = "markdown"
50
+ HTML = "html"
51
+
52
+
53
+ class DocumentParserSettings(BaseSettings):
54
+ """Configuration for DocumentParserTool"""
55
+ user_agent: str = "DocumentParser/1.0"
56
+ max_file_size: int = 50 * 1024 * 1024 # 50MB
57
+ temp_dir: str = os.path.join(tempfile.gettempdir(), 'document_parser')
58
+ default_encoding: str = "utf-8"
59
+ timeout: int = 30
60
+ max_pages: int = 1000 # For large PDF files
61
+
62
+ # Cloud storage settings
63
+ enable_cloud_storage: bool = True
64
+ gcs_bucket_name: str = "aiecs-documents"
65
+ gcs_project_id: Optional[str] = None
66
+
67
+ model_config = ConfigDict(env_prefix="DOC_PARSER_")
68
+
69
+
70
+ class DocumentParserError(Exception):
71
+ """Base exception for document parser errors"""
72
+ pass
73
+
74
+
75
+ class UnsupportedDocumentError(DocumentParserError):
76
+ """Raised when document type is not supported"""
77
+ pass
78
+
79
+
80
+ class DownloadError(DocumentParserError):
81
+ """Raised when document download fails"""
82
+ pass
83
+
84
+
85
+ class ParseError(DocumentParserError):
86
+ """Raised when document parsing fails"""
87
+ pass
88
+
89
+
90
+ @register_tool("document_parser")
91
+ class DocumentParserTool(BaseTool):
92
+ """
93
+ Modern high-performance document parsing component that can:
94
+ 1. Auto-detect document types from URLs or files
95
+ 2. Download documents from URLs
96
+ 3. Parse various document formats using existing atomic tools
97
+ 4. Output structured content for AI consumption
98
+
99
+ Leverages existing tools:
100
+ - ScraperTool for URL downloading
101
+ - OfficeTool for Office document parsing
102
+ - ImageTool for image OCR
103
+ """
104
+
105
+ def __init__(self, config: Optional[Dict] = None):
106
+ """Initialize DocumentParserTool with settings"""
107
+ super().__init__(config)
108
+ # Initialize settings with config if provided
109
+ if config:
110
+ try:
111
+ # For BaseSettings, use dictionary unpacking
112
+ self.settings = DocumentParserSettings(**config)
113
+ except ValidationError as e:
114
+ raise ValueError(f"Invalid settings: {e}")
115
+ else:
116
+ self.settings = DocumentParserSettings()
117
+
118
+ self.logger = logging.getLogger(__name__)
119
+ os.makedirs(self.settings.temp_dir, exist_ok=True)
120
+
121
+ # Initialize dependent tools
122
+ self._init_dependent_tools()
123
+
124
+ # Initialize cloud storage
125
+ self._init_cloud_storage()
126
+
127
+ def _init_dependent_tools(self):
128
+ """Initialize dependent tools for document processing"""
129
+ try:
130
+ from aiecs.tools.task_tools.scraper_tool import ScraperTool
131
+ self.scraper_tool = ScraperTool()
132
+ except ImportError:
133
+ self.logger.warning("ScraperTool not available")
134
+ self.scraper_tool = None
135
+
136
+ try:
137
+ from aiecs.tools.task_tools.office_tool import OfficeTool
138
+ self.office_tool = OfficeTool()
139
+ except ImportError:
140
+ self.logger.warning("OfficeTool not available")
141
+ self.office_tool = None
142
+
143
+ try:
144
+ from aiecs.tools.task_tools.image_tool import ImageTool
145
+ self.image_tool = ImageTool()
146
+ except ImportError:
147
+ self.logger.warning("ImageTool not available")
148
+ self.image_tool = None
149
+
150
+ def _init_cloud_storage(self):
151
+ """Initialize cloud storage for document retrieval"""
152
+ self.file_storage = None
153
+
154
+ if self.settings.enable_cloud_storage:
155
+ try:
156
+ from aiecs.infrastructure.persistence.file_storage import FileStorage
157
+
158
+ storage_config = {
159
+ 'gcs_bucket_name': self.settings.gcs_bucket_name,
160
+ 'gcs_project_id': self.settings.gcs_project_id,
161
+ 'enable_local_fallback': True,
162
+ 'local_storage_path': self.settings.temp_dir
163
+ }
164
+
165
+ self.file_storage = FileStorage(storage_config)
166
+ asyncio.create_task(self._init_storage_async())
167
+
168
+ except ImportError:
169
+ self.logger.warning("FileStorage not available, cloud storage disabled")
170
+ except Exception as e:
171
+ self.logger.warning(f"Failed to initialize cloud storage: {e}")
172
+
173
+ async def _init_storage_async(self):
174
+ """Async initialization of file storage"""
175
+ try:
176
+ if self.file_storage:
177
+ await self.file_storage.initialize()
178
+ self.logger.info("Cloud storage initialized successfully")
179
+ except Exception as e:
180
+ self.logger.warning(f"Cloud storage initialization failed: {e}")
181
+ self.file_storage = None
182
+
183
+ # Schema definitions
184
+ class ParseDocumentSchema(BaseModel):
185
+ """Schema for parse_document operation"""
186
+ source: str = Field(description="URL or file path to the document")
187
+ strategy: ParsingStrategy = Field(default=ParsingStrategy.FULL_CONTENT, description="Parsing strategy")
188
+ output_format: OutputFormat = Field(default=OutputFormat.JSON, description="Output format")
189
+ force_type: Optional[DocumentType] = Field(default=None, description="Force document type detection")
190
+ extract_metadata: bool = Field(default=True, description="Whether to extract metadata")
191
+ chunk_size: Optional[int] = Field(default=None, description="Chunk size for large documents")
192
+
193
+ class DetectTypeSchema(BaseModel):
194
+ """Schema for detect_document_type operation"""
195
+ source: str = Field(description="URL or file path to analyze")
196
+ download_sample: bool = Field(default=True, description="Download sample for content-based detection")
197
+
198
+ def detect_document_type(self, source: str, download_sample: bool = True) -> Dict[str, Any]:
199
+ """
200
+ Detect document type from URL or file path
201
+
202
+ Args:
203
+ source: URL or file path
204
+ download_sample: Whether to download sample for content analysis
205
+
206
+ Returns:
207
+ Dict containing detected type and confidence
208
+ """
209
+ try:
210
+ result = {
211
+ "source": source,
212
+ "is_url": self._is_url(source),
213
+ "detected_type": DocumentType.UNKNOWN,
214
+ "confidence": 0.0,
215
+ "mime_type": None,
216
+ "file_extension": None,
217
+ "file_size": None,
218
+ "detection_methods": []
219
+ }
220
+
221
+ # Method 1: File extension analysis
222
+ extension_type, ext_confidence = self._detect_by_extension(source)
223
+ if extension_type != DocumentType.UNKNOWN:
224
+ result["detected_type"] = extension_type
225
+ result["confidence"] = ext_confidence
226
+ result["file_extension"] = Path(source).suffix.lower()
227
+ result["detection_methods"].append("file_extension")
228
+
229
+ # Method 2: MIME type detection (for URLs)
230
+ if self._is_url(source) and download_sample:
231
+ mime_type, mime_confidence = self._detect_by_mime_type(source)
232
+ if mime_type != DocumentType.UNKNOWN and mime_confidence > result["confidence"]:
233
+ result["detected_type"] = mime_type
234
+ result["confidence"] = mime_confidence
235
+ result["detection_methods"].append("mime_type")
236
+
237
+ # Method 3: Content-based detection
238
+ if download_sample:
239
+ content_type, content_confidence = self._detect_by_content(source)
240
+ if content_type != DocumentType.UNKNOWN and content_confidence > result["confidence"]:
241
+ result["detected_type"] = content_type
242
+ result["confidence"] = content_confidence
243
+ result["detection_methods"].append("content_analysis")
244
+
245
+ return result
246
+
247
+ except Exception as e:
248
+ raise DocumentParserError(f"Document type detection failed: {str(e)}")
249
+
250
+ def parse_document(self,
251
+ source: str,
252
+ strategy: ParsingStrategy = ParsingStrategy.FULL_CONTENT,
253
+ output_format: OutputFormat = OutputFormat.JSON,
254
+ force_type: Optional[DocumentType] = None,
255
+ extract_metadata: bool = True,
256
+ chunk_size: Optional[int] = None) -> Dict[str, Any]:
257
+ """
258
+ Parse document from URL or file path
259
+
260
+ Args:
261
+ source: URL or file path to document
262
+ strategy: Parsing strategy to use
263
+ output_format: Format for output content
264
+ force_type: Force specific document type
265
+ extract_metadata: Whether to extract metadata
266
+ chunk_size: Chunk size for large documents
267
+
268
+ Returns:
269
+ Dict containing parsed content and metadata
270
+ """
271
+ try:
272
+ # Step 1: Detect document type
273
+ if force_type:
274
+ doc_type = force_type
275
+ confidence = 1.0
276
+ else:
277
+ detection_result = self.detect_document_type(source)
278
+ doc_type = detection_result["detected_type"]
279
+ confidence = detection_result["confidence"]
280
+
281
+ if confidence < 0.5:
282
+ raise UnsupportedDocumentError(f"Unable to reliably detect document type for: {source}")
283
+
284
+ # Step 2: Download document if it's a URL
285
+ local_path = self._ensure_local_file(source)
286
+
287
+ # Step 3: Parse document based on type and strategy
288
+ content = self._parse_by_type(local_path, doc_type, strategy)
289
+
290
+ # Step 4: Extract metadata if requested
291
+ metadata = {}
292
+ if extract_metadata:
293
+ metadata = self._extract_metadata(local_path, doc_type)
294
+
295
+ # Step 5: Format output
296
+ result = {
297
+ "source": source,
298
+ "document_type": doc_type,
299
+ "detection_confidence": confidence,
300
+ "parsing_strategy": strategy,
301
+ "metadata": metadata,
302
+ "content": content,
303
+ "content_stats": self._calculate_content_stats(content),
304
+ "chunks": []
305
+ }
306
+
307
+ # Step 6: Create chunks if requested
308
+ if chunk_size and isinstance(content, str):
309
+ result["chunks"] = self._create_chunks(content, chunk_size)
310
+
311
+ # Step 7: Format output according to requested format
312
+ if output_format == OutputFormat.TEXT:
313
+ return {"text": self._format_as_text(result)}
314
+ elif output_format == OutputFormat.MARKDOWN:
315
+ return {"markdown": self._format_as_markdown(result)}
316
+ elif output_format == OutputFormat.HTML:
317
+ return {"html": self._format_as_html(result)}
318
+ else:
319
+ return result
320
+
321
+ except Exception as e:
322
+ if isinstance(e, DocumentParserError):
323
+ raise
324
+ raise ParseError(f"Document parsing failed: {str(e)}")
325
+ finally:
326
+ # Cleanup temporary files
327
+ self._cleanup_temp_files(source)
328
+
329
+ async def parse_document_async(self,
330
+ source: str,
331
+ strategy: ParsingStrategy = ParsingStrategy.FULL_CONTENT,
332
+ output_format: OutputFormat = OutputFormat.JSON,
333
+ force_type: Optional[DocumentType] = None,
334
+ extract_metadata: bool = True,
335
+ chunk_size: Optional[int] = None) -> Dict[str, Any]:
336
+ """Async version of parse_document"""
337
+ return await asyncio.to_thread(
338
+ self.parse_document,
339
+ source=source,
340
+ strategy=strategy,
341
+ output_format=output_format,
342
+ force_type=force_type,
343
+ extract_metadata=extract_metadata,
344
+ chunk_size=chunk_size
345
+ )
346
+
347
+ def _is_url(self, source: str) -> bool:
348
+ """Check if source is a URL"""
349
+ try:
350
+ result = urlparse(source)
351
+ return bool(result.scheme and result.netloc)
352
+ except:
353
+ return False
354
+
355
+ def _is_cloud_storage_path(self, source: str) -> bool:
356
+ """Check if source is a cloud storage path"""
357
+ # Support various cloud storage path formats:
358
+ # - gs://bucket/path/file.pdf (Google Cloud Storage)
359
+ # - s3://bucket/path/file.pdf (AWS S3)
360
+ # - azure://container/path/file.pdf (Azure Blob Storage)
361
+ # - cloud://path/file.pdf (Generic cloud storage)
362
+ cloud_schemes = ['gs', 's3', 'azure', 'cloud']
363
+ try:
364
+ parsed = urlparse(source)
365
+ return parsed.scheme in cloud_schemes
366
+ except:
367
+ return False
368
+
369
+ def _is_storage_id(self, source: str) -> bool:
370
+ """Check if source is a storage ID (UUID-like identifier)"""
371
+ # Check for UUID patterns or other storage ID formats
372
+ import re
373
+ uuid_pattern = r'^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$'
374
+ storage_id_pattern = r'^[a-zA-Z0-9_-]{10,}$' # Generic storage ID
375
+
376
+ return bool(re.match(uuid_pattern, source, re.IGNORECASE) or
377
+ re.match(storage_id_pattern, source))
378
+
379
+ def _detect_by_extension(self, source: str) -> Tuple[DocumentType, float]:
380
+ """Detect document type by file extension"""
381
+ try:
382
+ path = Path(source)
383
+ ext = path.suffix.lower()
384
+
385
+ extension_map = {
386
+ '.pdf': DocumentType.PDF,
387
+ '.docx': DocumentType.DOCX,
388
+ '.doc': DocumentType.DOCX,
389
+ '.xlsx': DocumentType.XLSX,
390
+ '.xls': DocumentType.XLSX,
391
+ '.pptx': DocumentType.PPTX,
392
+ '.ppt': DocumentType.PPTX,
393
+ '.txt': DocumentType.TXT,
394
+ '.html': DocumentType.HTML,
395
+ '.htm': DocumentType.HTML,
396
+ '.rtf': DocumentType.RTF,
397
+ '.csv': DocumentType.CSV,
398
+ '.json': DocumentType.JSON,
399
+ '.xml': DocumentType.XML,
400
+ '.md': DocumentType.MARKDOWN,
401
+ '.markdown': DocumentType.MARKDOWN,
402
+ '.jpg': DocumentType.IMAGE,
403
+ '.jpeg': DocumentType.IMAGE,
404
+ '.png': DocumentType.IMAGE,
405
+ '.gif': DocumentType.IMAGE,
406
+ '.bmp': DocumentType.IMAGE,
407
+ '.tiff': DocumentType.IMAGE,
408
+ }
409
+
410
+ doc_type = extension_map.get(ext, DocumentType.UNKNOWN)
411
+ confidence = 0.8 if doc_type != DocumentType.UNKNOWN else 0.0
412
+
413
+ return doc_type, confidence
414
+
415
+ except Exception:
416
+ return DocumentType.UNKNOWN, 0.0
417
+
418
+ def _detect_by_mime_type(self, url: str) -> Tuple[DocumentType, float]:
419
+ """Detect document type by MIME type from URL"""
420
+ try:
421
+ if not self.scraper_tool:
422
+ return DocumentType.UNKNOWN, 0.0
423
+
424
+ # Get headers only
425
+ response = asyncio.run(self.scraper_tool.get_httpx(
426
+ url, method="HEAD", verify_ssl=False
427
+ ))
428
+
429
+ content_type = response.get('headers', {}).get('content-type', '').lower()
430
+
431
+ mime_map = {
432
+ 'application/pdf': DocumentType.PDF,
433
+ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': DocumentType.DOCX,
434
+ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': DocumentType.XLSX,
435
+ 'application/vnd.openxmlformats-officedocument.presentationml.presentation': DocumentType.PPTX,
436
+ 'text/plain': DocumentType.TXT,
437
+ 'text/html': DocumentType.HTML,
438
+ 'application/rtf': DocumentType.RTF,
439
+ 'text/csv': DocumentType.CSV,
440
+ 'application/json': DocumentType.JSON,
441
+ 'application/xml': DocumentType.XML,
442
+ 'text/xml': DocumentType.XML,
443
+ 'text/markdown': DocumentType.MARKDOWN,
444
+ 'image/jpeg': DocumentType.IMAGE,
445
+ 'image/png': DocumentType.IMAGE,
446
+ 'image/gif': DocumentType.IMAGE,
447
+ 'image/bmp': DocumentType.IMAGE,
448
+ 'image/tiff': DocumentType.IMAGE,
449
+ }
450
+
451
+ for mime_pattern, doc_type in mime_map.items():
452
+ if mime_pattern in content_type:
453
+ return doc_type, 0.9
454
+
455
+ return DocumentType.UNKNOWN, 0.0
456
+
457
+ except Exception:
458
+ return DocumentType.UNKNOWN, 0.0
459
+
460
+ def _detect_by_content(self, source: str) -> Tuple[DocumentType, float]:
461
+ """Detect document type by content analysis"""
462
+ try:
463
+ # Download a small sample for analysis
464
+ if self._is_url(source):
465
+ sample_path = self._download_sample(source, max_size=1024) # 1KB sample
466
+ else:
467
+ sample_path = source
468
+
469
+ with open(sample_path, 'rb') as f:
470
+ header = f.read(512) # Read first 512 bytes
471
+
472
+ # Magic number detection
473
+ if header.startswith(b'%PDF'):
474
+ return DocumentType.PDF, 0.95
475
+ elif header.startswith(b'PK\x03\x04'): # ZIP-based formats
476
+ if b'word/' in header or b'document.xml' in header:
477
+ return DocumentType.DOCX, 0.9
478
+ elif b'xl/' in header or b'workbook.xml' in header:
479
+ return DocumentType.XLSX, 0.9
480
+ elif b'ppt/' in header or b'presentation.xml' in header:
481
+ return DocumentType.PPTX, 0.9
482
+ elif header.startswith(b'{\rtf'):
483
+ return DocumentType.RTF, 0.95
484
+ elif header.startswith((b'\xff\xd8\xff', b'\x89PNG', b'GIF8')):
485
+ return DocumentType.IMAGE, 0.95
486
+ elif header.startswith(b'<?xml'):
487
+ return DocumentType.XML, 0.9
488
+ elif header.startswith((b'{', b'[')):
489
+ # Try to parse as JSON
490
+ try:
491
+ import json
492
+ json.loads(header.decode('utf-8', errors='ignore'))
493
+ return DocumentType.JSON, 0.85
494
+ except:
495
+ pass
496
+
497
+ # Text-based detection
498
+ try:
499
+ text_content = header.decode('utf-8', errors='ignore')
500
+ if re.match(r'^#\s+.*$', text_content, re.MULTILINE):
501
+ return DocumentType.MARKDOWN, 0.7
502
+ elif '<html' in text_content.lower() or '<!doctype html' in text_content.lower():
503
+ return DocumentType.HTML, 0.85
504
+ elif ',' in text_content and '\n' in text_content:
505
+ # Simple CSV detection
506
+ lines = text_content.split('\n')[:5]
507
+ if all(',' in line for line in lines if line.strip()):
508
+ return DocumentType.CSV, 0.6
509
+ except:
510
+ pass
511
+
512
+ return DocumentType.UNKNOWN, 0.0
513
+
514
+ except Exception:
515
+ return DocumentType.UNKNOWN, 0.0
516
+
517
+ def _ensure_local_file(self, source: str) -> str:
518
+ """Ensure we have a local file, download/retrieve if necessary"""
519
+ # Check source type and handle accordingly
520
+ if self._is_cloud_storage_path(source) or self._is_storage_id(source):
521
+ # Download from cloud storage
522
+ return asyncio.run(self._download_from_cloud_storage(source))
523
+ elif self._is_url(source):
524
+ # Download from URL
525
+ return self._download_document(source)
526
+ else:
527
+ # Local file path
528
+ if not os.path.exists(source):
529
+ raise FileNotFoundError(f"File not found: {source}")
530
+ return source
531
+
532
+ def _download_document(self, url: str) -> str:
533
+ """Download document from URL"""
534
+ try:
535
+ if not self.scraper_tool:
536
+ raise DownloadError("ScraperTool not available for URL download")
537
+
538
+ # Generate temp file path
539
+ parsed_url = urlparse(url)
540
+ filename = os.path.basename(parsed_url.path) or "document"
541
+ temp_path = os.path.join(self.settings.temp_dir, f"download_{hash(url)}_{filename}")
542
+
543
+ # Download using scraper tool
544
+ result = asyncio.run(self.scraper_tool.get_httpx(
545
+ url,
546
+ content_type="binary",
547
+ output_path=temp_path,
548
+ verify_ssl=False
549
+ ))
550
+
551
+ if isinstance(result, dict) and 'saved_to' in result:
552
+ return result['saved_to']
553
+ else:
554
+ # Fallback: save content manually
555
+ with open(temp_path, 'wb') as f:
556
+ if isinstance(result, dict) and 'content' in result:
557
+ f.write(result['content'])
558
+ else:
559
+ f.write(result)
560
+ return temp_path
561
+
562
+ except Exception as e:
563
+ raise DownloadError(f"Failed to download document from {url}: {str(e)}")
564
+
565
+ async def _download_from_cloud_storage(self, source: str) -> str:
566
+ """Download document from cloud storage"""
567
+ if not self.file_storage:
568
+ raise DownloadError("Cloud storage not available")
569
+
570
+ try:
571
+ # Parse the cloud storage path
572
+ storage_path = self._parse_cloud_storage_path(source)
573
+
574
+ # Generate local temp file path
575
+ temp_filename = f"cloud_download_{hash(source)}_{Path(storage_path).name}"
576
+ temp_path = os.path.join(self.settings.temp_dir, temp_filename)
577
+
578
+ self.logger.info(f"Downloading from cloud storage: {source} -> {temp_path}")
579
+
580
+ # Retrieve file from cloud storage
581
+ file_data = await self.file_storage.retrieve(storage_path)
582
+
583
+ # Save to local temp file
584
+ if isinstance(file_data, bytes):
585
+ with open(temp_path, 'wb') as f:
586
+ f.write(file_data)
587
+ elif isinstance(file_data, str):
588
+ with open(temp_path, 'w', encoding='utf-8') as f:
589
+ f.write(file_data)
590
+ else:
591
+ # Handle other data types (e.g., dict, list)
592
+ import json
593
+ with open(temp_path, 'w', encoding='utf-8') as f:
594
+ json.dump(file_data, f)
595
+
596
+ self.logger.info(f"Successfully downloaded file to: {temp_path}")
597
+ return temp_path
598
+
599
+ except Exception as e:
600
+ raise DownloadError(f"Failed to download from cloud storage {source}: {str(e)}")
601
+
602
+ def _parse_cloud_storage_path(self, source: str) -> str:
603
+ """Parse cloud storage path to get the storage key"""
604
+ try:
605
+ if self._is_storage_id(source):
606
+ # Direct storage ID
607
+ return source
608
+ elif self._is_cloud_storage_path(source):
609
+ parsed = urlparse(source)
610
+ if parsed.scheme == 'gs':
611
+ # Google Cloud Storage: gs://bucket/path/file.pdf -> path/file.pdf
612
+ return parsed.path.lstrip('/')
613
+ elif parsed.scheme == 's3':
614
+ # AWS S3: s3://bucket/path/file.pdf -> path/file.pdf
615
+ return parsed.path.lstrip('/')
616
+ elif parsed.scheme == 'azure':
617
+ # Azure Blob: azure://container/path/file.pdf -> path/file.pdf
618
+ return parsed.path.lstrip('/')
619
+ elif parsed.scheme == 'cloud':
620
+ # Generic cloud: cloud://path/file.pdf -> path/file.pdf
621
+ return parsed.path.lstrip('/')
622
+ else:
623
+ return parsed.path.lstrip('/')
624
+ else:
625
+ # Assume it's already a storage path
626
+ return source
627
+ except Exception as e:
628
+ self.logger.warning(f"Failed to parse cloud storage path {source}: {e}")
629
+ return source
630
+
631
+ def _download_sample(self, url: str, max_size: int = 1024) -> str:
632
+ """Download a small sample of the document for analysis"""
633
+ # This is a simplified version - in practice, you'd implement range requests
634
+ return self._download_document(url)
635
+
636
+ def _parse_by_type(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
637
+ """Parse document based on its type and strategy"""
638
+ try:
639
+ if doc_type == DocumentType.PDF:
640
+ return self._parse_pdf(file_path, strategy)
641
+ elif doc_type in [DocumentType.DOCX, DocumentType.XLSX, DocumentType.PPTX]:
642
+ return self._parse_office_document(file_path, doc_type, strategy)
643
+ elif doc_type == DocumentType.IMAGE:
644
+ return self._parse_image(file_path, strategy)
645
+ elif doc_type in [DocumentType.TXT, DocumentType.HTML, DocumentType.CSV,
646
+ DocumentType.JSON, DocumentType.XML, DocumentType.MARKDOWN]:
647
+ return self._parse_text_document(file_path, doc_type, strategy)
648
+ else:
649
+ raise UnsupportedDocumentError(f"Unsupported document type: {doc_type}")
650
+
651
+ except Exception as e:
652
+ raise ParseError(f"Failed to parse {doc_type} document: {str(e)}")
653
+
654
+ def _parse_pdf(self, file_path: str, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
655
+ """Parse PDF document"""
656
+ if self.office_tool:
657
+ try:
658
+ text_content = self.office_tool.extract_text(file_path)
659
+
660
+ if strategy == ParsingStrategy.TEXT_ONLY:
661
+ return text_content
662
+ elif strategy == ParsingStrategy.STRUCTURED:
663
+ # Try to extract structure from PDF
664
+ return {
665
+ "text": text_content,
666
+ "structure": self._extract_pdf_structure(text_content)
667
+ }
668
+ else:
669
+ return {
670
+ "text": text_content,
671
+ "pages": self._split_into_pages(text_content)
672
+ }
673
+ except Exception as e:
674
+ self.logger.warning(f"OfficeTool PDF parsing failed: {e}")
675
+
676
+ # Fallback to simple text extraction
677
+ return self._extract_text_fallback(file_path)
678
+
679
+ def _parse_office_document(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
680
+ """Parse Office documents (DOCX, XLSX, PPTX)"""
681
+ if not self.office_tool:
682
+ raise UnsupportedDocumentError("OfficeTool not available for Office document parsing")
683
+
684
+ try:
685
+ text_content = self.office_tool.extract_text(file_path)
686
+
687
+ if strategy == ParsingStrategy.TEXT_ONLY:
688
+ return text_content
689
+ elif strategy == ParsingStrategy.STRUCTURED:
690
+ return {
691
+ "text": text_content,
692
+ "structure": self._extract_office_structure(file_path, doc_type)
693
+ }
694
+ else:
695
+ return {
696
+ "text": text_content,
697
+ "raw_content": text_content
698
+ }
699
+
700
+ except Exception as e:
701
+ raise ParseError(f"Failed to parse Office document: {str(e)}")
702
+
703
+ def _parse_image(self, file_path: str, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
704
+ """Parse image document using OCR"""
705
+ if not self.image_tool:
706
+ raise UnsupportedDocumentError("ImageTool not available for image OCR")
707
+
708
+ try:
709
+ # Use image tool for OCR
710
+ ocr_result = self.image_tool.ocr_image(file_path)
711
+
712
+ if strategy == ParsingStrategy.TEXT_ONLY:
713
+ return ocr_result.get('text', '')
714
+ else:
715
+ return ocr_result
716
+
717
+ except Exception as e:
718
+ raise ParseError(f"Failed to parse image document: {str(e)}")
719
+
720
+ def _parse_text_document(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
721
+ """Parse text-based documents"""
722
+ try:
723
+ with open(file_path, 'r', encoding=self.settings.default_encoding, errors='ignore') as f:
724
+ content = f.read()
725
+
726
+ if strategy == ParsingStrategy.TEXT_ONLY:
727
+ return content
728
+ elif strategy == ParsingStrategy.STRUCTURED:
729
+ return self._extract_text_structure(content, doc_type)
730
+ else:
731
+ return {
732
+ "text": content,
733
+ "lines": content.split('\n'),
734
+ "word_count": len(content.split())
735
+ }
736
+
737
+ except Exception as e:
738
+ raise ParseError(f"Failed to parse text document: {str(e)}")
739
+
740
+ def _extract_metadata(self, file_path: str, doc_type: DocumentType) -> Dict[str, Any]:
741
+ """Extract metadata from document"""
742
+ metadata = {
743
+ "file_path": file_path,
744
+ "file_size": os.path.getsize(file_path),
745
+ "file_type": doc_type.value,
746
+ "created_at": os.path.getctime(file_path),
747
+ "modified_at": os.path.getmtime(file_path)
748
+ }
749
+
750
+ # Add type-specific metadata extraction here
751
+ # This could leverage existing tools' metadata extraction capabilities
752
+
753
+ return metadata
754
+
755
+ def _calculate_content_stats(self, content: Union[str, Dict[str, Any]]) -> Dict[str, Any]:
756
+ """Calculate statistics about the parsed content"""
757
+ if isinstance(content, str):
758
+ return {
759
+ "character_count": len(content),
760
+ "word_count": len(content.split()),
761
+ "line_count": len(content.split('\n')),
762
+ "paragraph_count": len([p for p in content.split('\n\n') if p.strip()])
763
+ }
764
+ else:
765
+ # For structured content, calculate stats on text portion
766
+ text_content = content.get('text', '')
767
+ return self._calculate_content_stats(text_content)
768
+
769
+ def _create_chunks(self, content: str, chunk_size: int) -> List[Dict[str, Any]]:
770
+ """Create chunks from content for better AI processing"""
771
+ chunks = []
772
+ words = content.split()
773
+
774
+ for i in range(0, len(words), chunk_size):
775
+ chunk_words = words[i:i + chunk_size]
776
+ chunk_text = ' '.join(chunk_words)
777
+
778
+ chunks.append({
779
+ "index": len(chunks),
780
+ "text": chunk_text,
781
+ "word_count": len(chunk_words),
782
+ "start_word": i,
783
+ "end_word": min(i + chunk_size, len(words))
784
+ })
785
+
786
+ return chunks
787
+
788
+ def _format_as_text(self, result: Dict[str, Any]) -> str:
789
+ """Format result as plain text"""
790
+ content = result.get('content', '')
791
+ if isinstance(content, dict):
792
+ return content.get('text', str(content))
793
+ return str(content)
794
+
795
+ def _format_as_markdown(self, result: Dict[str, Any]) -> str:
796
+ """Format result as Markdown"""
797
+ content = result.get('content', '')
798
+ metadata = result.get('metadata', {})
799
+
800
+ md_content = f"# Document: {result.get('source', 'Unknown')}\n\n"
801
+ md_content += f"**Type:** {result.get('document_type', 'Unknown')}\n"
802
+ md_content += f"**Detection Confidence:** {result.get('detection_confidence', 0):.2f}\n\n"
803
+
804
+ if isinstance(content, dict):
805
+ md_content += content.get('text', str(content))
806
+ else:
807
+ md_content += str(content)
808
+
809
+ return md_content
810
+
811
+ def _format_as_html(self, result: Dict[str, Any]) -> str:
812
+ """Format result as HTML"""
813
+ content = result.get('content', '')
814
+
815
+ html_content = f"""
816
+ <html>
817
+ <head><title>Parsed Document</title></head>
818
+ <body>
819
+ <h1>Document: {result.get('source', 'Unknown')}</h1>
820
+ <p><strong>Type:</strong> {result.get('document_type', 'Unknown')}</p>
821
+ <p><strong>Detection Confidence:</strong> {result.get('detection_confidence', 0):.2f}</p>
822
+ <div class="content">
823
+ """
824
+
825
+ if isinstance(content, dict):
826
+ html_content += f"<pre>{content.get('text', str(content))}</pre>"
827
+ else:
828
+ html_content += f"<pre>{str(content)}</pre>"
829
+
830
+ html_content += "</div></body></html>"
831
+ return html_content
832
+
833
+ def _cleanup_temp_files(self, source: str):
834
+ """Clean up temporary files"""
835
+ import glob
836
+
837
+ if self._is_url(source):
838
+ # Clean up URL downloaded files
839
+ temp_pattern = os.path.join(self.settings.temp_dir, f"download_{hash(source)}_*")
840
+ for temp_file in glob.glob(temp_pattern):
841
+ try:
842
+ os.remove(temp_file)
843
+ self.logger.debug(f"Cleaned up temp file: {temp_file}")
844
+ except Exception as e:
845
+ self.logger.warning(f"Failed to clean up temp file {temp_file}: {e}")
846
+
847
+ elif self._is_cloud_storage_path(source) or self._is_storage_id(source):
848
+ # Clean up cloud storage downloaded files
849
+ temp_pattern = os.path.join(self.settings.temp_dir, f"cloud_download_{hash(source)}_*")
850
+ for temp_file in glob.glob(temp_pattern):
851
+ try:
852
+ os.remove(temp_file)
853
+ self.logger.debug(f"Cleaned up cloud temp file: {temp_file}")
854
+ except Exception as e:
855
+ self.logger.warning(f"Failed to clean up cloud temp file {temp_file}: {e}")
856
+
857
+ # Helper methods for structure extraction
858
+ def _extract_pdf_structure(self, text: str) -> Dict[str, Any]:
859
+ """Extract structure from PDF text"""
860
+ # Implement PDF structure extraction logic
861
+ return {"sections": [], "headings": []}
862
+
863
+ def _extract_office_structure(self, file_path: str, doc_type: DocumentType) -> Dict[str, Any]:
864
+ """Extract structure from Office documents"""
865
+ # Implement Office document structure extraction
866
+ return {"sections": [], "tables": [], "images": []}
867
+
868
+ def _extract_text_structure(self, content: str, doc_type: DocumentType) -> Dict[str, Any]:
869
+ """Extract structure from text documents"""
870
+ result = {"text": content}
871
+
872
+ if doc_type == DocumentType.MARKDOWN:
873
+ # Extract markdown structure
874
+ headings = re.findall(r'^(#{1,6})\s+(.+)$', content, re.MULTILINE)
875
+ result["headings"] = [{"level": len(h[0]), "text": h[1]} for h in headings]
876
+ elif doc_type == DocumentType.HTML:
877
+ # Extract HTML structure (simplified)
878
+ from bs4 import BeautifulSoup
879
+ soup = BeautifulSoup(content, 'html.parser')
880
+ result["title"] = soup.title.string if soup.title else ""
881
+ result["headings"] = [{"tag": h.name, "text": h.get_text()} for h in soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])]
882
+ elif doc_type == DocumentType.JSON:
883
+ import json
884
+ try:
885
+ result["json_data"] = json.loads(content)
886
+ except:
887
+ pass
888
+
889
+ return result
890
+
891
+ def _split_into_pages(self, text: str) -> List[str]:
892
+ """Split text into pages (simplified)"""
893
+ # This is a simple implementation - could be enhanced
894
+ pages = text.split('\f') # Form feed character often indicates page break
895
+ return [page.strip() for page in pages if page.strip()]
896
+
897
+ def _extract_text_fallback(self, file_path: str) -> str:
898
+ """Fallback text extraction method"""
899
+ try:
900
+ with open(file_path, 'r', encoding=self.settings.default_encoding, errors='ignore') as f:
901
+ return f.read()
902
+ except:
903
+ with open(file_path, 'rb') as f:
904
+ return f.read().decode('utf-8', errors='ignore')