aiecs 1.0.8__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (81) hide show
  1. aiecs/__init__.py +1 -1
  2. aiecs/aiecs_client.py +159 -1
  3. aiecs/config/config.py +6 -0
  4. aiecs/domain/__init__.py +95 -0
  5. aiecs/domain/community/__init__.py +159 -0
  6. aiecs/domain/community/agent_adapter.py +516 -0
  7. aiecs/domain/community/analytics.py +465 -0
  8. aiecs/domain/community/collaborative_workflow.py +99 -7
  9. aiecs/domain/community/communication_hub.py +649 -0
  10. aiecs/domain/community/community_builder.py +322 -0
  11. aiecs/domain/community/community_integration.py +365 -12
  12. aiecs/domain/community/community_manager.py +481 -5
  13. aiecs/domain/community/decision_engine.py +459 -13
  14. aiecs/domain/community/exceptions.py +238 -0
  15. aiecs/domain/community/models/__init__.py +36 -0
  16. aiecs/domain/community/resource_manager.py +1 -1
  17. aiecs/domain/community/shared_context_manager.py +621 -0
  18. aiecs/domain/context/__init__.py +24 -0
  19. aiecs/domain/context/context_engine.py +37 -33
  20. aiecs/main.py +20 -2
  21. aiecs/scripts/aid/VERSION_MANAGEMENT.md +97 -0
  22. aiecs/scripts/aid/__init__.py +15 -0
  23. aiecs/scripts/aid/version_manager.py +224 -0
  24. aiecs/scripts/dependance_check/__init__.py +18 -0
  25. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +51 -8
  26. aiecs/scripts/dependance_patch/__init__.py +8 -0
  27. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +12 -0
  28. aiecs/scripts/tools_develop/README.md +340 -0
  29. aiecs/scripts/tools_develop/__init__.py +16 -0
  30. aiecs/scripts/tools_develop/check_type_annotations.py +263 -0
  31. aiecs/scripts/tools_develop/validate_tool_schemas.py +346 -0
  32. aiecs/tools/__init__.py +53 -34
  33. aiecs/tools/docs/__init__.py +106 -0
  34. aiecs/tools/docs/ai_document_orchestrator.py +556 -0
  35. aiecs/tools/docs/ai_document_writer_orchestrator.py +2222 -0
  36. aiecs/tools/docs/content_insertion_tool.py +1234 -0
  37. aiecs/tools/docs/document_creator_tool.py +1179 -0
  38. aiecs/tools/docs/document_layout_tool.py +1105 -0
  39. aiecs/tools/docs/document_parser_tool.py +924 -0
  40. aiecs/tools/docs/document_writer_tool.py +1636 -0
  41. aiecs/tools/langchain_adapter.py +102 -51
  42. aiecs/tools/schema_generator.py +265 -0
  43. aiecs/tools/statistics/__init__.py +82 -0
  44. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +581 -0
  45. aiecs/tools/statistics/ai_insight_generator_tool.py +473 -0
  46. aiecs/tools/statistics/ai_report_orchestrator_tool.py +629 -0
  47. aiecs/tools/statistics/data_loader_tool.py +518 -0
  48. aiecs/tools/statistics/data_profiler_tool.py +599 -0
  49. aiecs/tools/statistics/data_transformer_tool.py +531 -0
  50. aiecs/tools/statistics/data_visualizer_tool.py +460 -0
  51. aiecs/tools/statistics/model_trainer_tool.py +470 -0
  52. aiecs/tools/statistics/statistical_analyzer_tool.py +426 -0
  53. aiecs/tools/task_tools/chart_tool.py +2 -1
  54. aiecs/tools/task_tools/image_tool.py +43 -43
  55. aiecs/tools/task_tools/office_tool.py +48 -36
  56. aiecs/tools/task_tools/pandas_tool.py +37 -33
  57. aiecs/tools/task_tools/report_tool.py +67 -56
  58. aiecs/tools/task_tools/research_tool.py +32 -31
  59. aiecs/tools/task_tools/scraper_tool.py +53 -46
  60. aiecs/tools/task_tools/search_tool.py +1123 -0
  61. aiecs/tools/task_tools/stats_tool.py +20 -15
  62. {aiecs-1.0.8.dist-info → aiecs-1.2.0.dist-info}/METADATA +5 -1
  63. aiecs-1.2.0.dist-info/RECORD +135 -0
  64. aiecs-1.2.0.dist-info/entry_points.txt +10 -0
  65. aiecs/tools/task_tools/search_api.py +0 -7
  66. aiecs-1.0.8.dist-info/RECORD +0 -98
  67. aiecs-1.0.8.dist-info/entry_points.txt +0 -7
  68. /aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +0 -0
  69. /aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +0 -0
  70. /aiecs/scripts/{dependency_checker.py → dependance_check/dependency_checker.py} +0 -0
  71. /aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +0 -0
  72. /aiecs/scripts/{quick_dependency_check.py → dependance_check/quick_dependency_check.py} +0 -0
  73. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  74. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  75. /aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +0 -0
  76. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  77. /aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +0 -0
  78. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  79. {aiecs-1.0.8.dist-info → aiecs-1.2.0.dist-info}/WHEEL +0 -0
  80. {aiecs-1.0.8.dist-info → aiecs-1.2.0.dist-info}/licenses/LICENSE +0 -0
  81. {aiecs-1.0.8.dist-info → aiecs-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,924 @@
1
+ import os
2
+ import re
3
+ import mimetypes
4
+ import logging
5
+ import asyncio
6
+ from typing import Dict, Any, List, Optional, Union, Tuple
7
+ from enum import Enum
8
+ from urllib.parse import urlparse
9
+ from pathlib import Path
10
+ import tempfile
11
+
12
+ import httpx
13
+ from pydantic import BaseModel, Field, ValidationError, ConfigDict
14
+
15
+ from aiecs.tools.base_tool import BaseTool
16
+ from aiecs.tools import register_tool
17
+
18
+
19
+ class DocumentType(str, Enum):
20
+ """Supported document types for parsing"""
21
+ PDF = "pdf"
22
+ DOCX = "docx"
23
+ XLSX = "xlsx"
24
+ PPTX = "pptx"
25
+ TXT = "txt"
26
+ HTML = "html"
27
+ RTF = "rtf"
28
+ CSV = "csv"
29
+ JSON = "json"
30
+ XML = "xml"
31
+ MARKDOWN = "md"
32
+ IMAGE = "image"
33
+ UNKNOWN = "unknown"
34
+
35
+
36
+ class ParsingStrategy(str, Enum):
37
+ """Document parsing strategies"""
38
+ TEXT_ONLY = "text_only"
39
+ STRUCTURED = "structured"
40
+ FULL_CONTENT = "full_content"
41
+ METADATA_ONLY = "metadata_only"
42
+
43
+
44
+ class OutputFormat(str, Enum):
45
+ """Output formats for parsed content"""
46
+ TEXT = "text"
47
+ JSON = "json"
48
+ MARKDOWN = "markdown"
49
+ HTML = "html"
50
+
51
+
52
+
53
+
54
+ class DocumentParserError(Exception):
55
+ """Base exception for document parser errors"""
56
+ pass
57
+
58
+
59
+ class UnsupportedDocumentError(DocumentParserError):
60
+ """Raised when document type is not supported"""
61
+ pass
62
+
63
+
64
+ class DownloadError(DocumentParserError):
65
+ """Raised when document download fails"""
66
+ pass
67
+
68
+
69
+ class ParseError(DocumentParserError):
70
+ """Raised when document parsing fails"""
71
+ pass
72
+
73
+
74
+ @register_tool("document_parser")
75
+ class DocumentParserTool(BaseTool):
76
+ """
77
+ Modern high-performance document parsing component that can:
78
+ 1. Auto-detect document types from URLs or files
79
+ 2. Download documents from URLs
80
+ 3. Parse various document formats using existing atomic tools
81
+ 4. Output structured content for AI consumption
82
+
83
+ Leverages existing tools:
84
+ - ScraperTool for URL downloading
85
+ - OfficeTool for Office document parsing
86
+ - ImageTool for image OCR
87
+ """
88
+
89
+ # Configuration schema
90
+ class Config(BaseModel):
91
+ """Configuration for the document parser tool"""
92
+ model_config = ConfigDict(env_prefix="DOC_PARSER_")
93
+
94
+ user_agent: str = Field(
95
+ default="DocumentParser/1.0",
96
+ description="User agent for HTTP requests"
97
+ )
98
+ max_file_size: int = Field(
99
+ default=50 * 1024 * 1024,
100
+ description="Maximum file size in bytes"
101
+ )
102
+ temp_dir: str = Field(
103
+ default=os.path.join(tempfile.gettempdir(), 'document_parser'),
104
+ description="Temporary directory for document processing"
105
+ )
106
+ default_encoding: str = Field(
107
+ default="utf-8",
108
+ description="Default encoding for text files"
109
+ )
110
+ timeout: int = Field(
111
+ default=30,
112
+ description="Timeout for HTTP requests in seconds"
113
+ )
114
+ max_pages: int = Field(
115
+ default=1000,
116
+ description="Maximum number of pages to process for large documents"
117
+ )
118
+ enable_cloud_storage: bool = Field(
119
+ default=True,
120
+ description="Whether to enable cloud storage integration"
121
+ )
122
+ gcs_bucket_name: str = Field(
123
+ default="aiecs-documents",
124
+ description="Google Cloud Storage bucket name"
125
+ )
126
+ gcs_project_id: Optional[str] = Field(
127
+ default=None,
128
+ description="Google Cloud Storage project ID"
129
+ )
130
+
131
+ def __init__(self, config: Optional[Dict] = None):
132
+ """Initialize DocumentParserTool with settings"""
133
+ super().__init__(config)
134
+
135
+ # Parse configuration
136
+ self.config = self.Config(**(config or {}))
137
+
138
+ self.logger = logging.getLogger(__name__)
139
+ os.makedirs(self.config.temp_dir, exist_ok=True)
140
+
141
+ # Initialize dependent tools
142
+ self._init_dependent_tools()
143
+
144
+ # Initialize cloud storage
145
+ self._init_cloud_storage()
146
+
147
+ def _init_dependent_tools(self):
148
+ """Initialize dependent tools for document processing"""
149
+ try:
150
+ from aiecs.tools.task_tools.scraper_tool import ScraperTool
151
+ self.scraper_tool = ScraperTool()
152
+ except ImportError:
153
+ self.logger.warning("ScraperTool not available")
154
+ self.scraper_tool = None
155
+
156
+ try:
157
+ from aiecs.tools.task_tools.office_tool import OfficeTool
158
+ self.office_tool = OfficeTool()
159
+ except ImportError:
160
+ self.logger.warning("OfficeTool not available")
161
+ self.office_tool = None
162
+
163
+ try:
164
+ from aiecs.tools.task_tools.image_tool import ImageTool
165
+ self.image_tool = ImageTool()
166
+ except ImportError:
167
+ self.logger.warning("ImageTool not available")
168
+ self.image_tool = None
169
+
170
+ def _init_cloud_storage(self):
171
+ """Initialize cloud storage for document retrieval"""
172
+ self.file_storage = None
173
+
174
+ if self.config.enable_cloud_storage:
175
+ try:
176
+ from aiecs.infrastructure.persistence.file_storage import FileStorage
177
+
178
+ storage_config = {
179
+ 'gcs_bucket_name': self.config.gcs_bucket_name,
180
+ 'gcs_project_id': self.config.gcs_project_id,
181
+ 'enable_local_fallback': True,
182
+ 'local_storage_path': self.config.temp_dir
183
+ }
184
+
185
+ self.file_storage = FileStorage(storage_config)
186
+ asyncio.create_task(self._init_storage_async())
187
+
188
+ except ImportError:
189
+ self.logger.warning("FileStorage not available, cloud storage disabled")
190
+ except Exception as e:
191
+ self.logger.warning(f"Failed to initialize cloud storage: {e}")
192
+
193
+ async def _init_storage_async(self):
194
+ """Async initialization of file storage"""
195
+ try:
196
+ if self.file_storage:
197
+ await self.file_storage.initialize()
198
+ self.logger.info("Cloud storage initialized successfully")
199
+ except Exception as e:
200
+ self.logger.warning(f"Cloud storage initialization failed: {e}")
201
+ self.file_storage = None
202
+
203
+ # Schema definitions
204
+ class ParseDocumentSchema(BaseModel):
205
+ """Schema for parse_document operation"""
206
+ source: str = Field(description="URL or file path to the document")
207
+ strategy: ParsingStrategy = Field(default=ParsingStrategy.FULL_CONTENT, description="Parsing strategy")
208
+ output_format: OutputFormat = Field(default=OutputFormat.JSON, description="Output format")
209
+ force_type: Optional[DocumentType] = Field(default=None, description="Force document type detection")
210
+ extract_metadata: bool = Field(default=True, description="Whether to extract metadata")
211
+ chunk_size: Optional[int] = Field(default=None, description="Chunk size for large documents")
212
+
213
+ class DetectTypeSchema(BaseModel):
214
+ """Schema for detect_document_type operation"""
215
+ source: str = Field(description="URL or file path to analyze")
216
+ download_sample: bool = Field(default=True, description="Download sample for content-based detection")
217
+
218
+ def detect_document_type(self, source: str, download_sample: bool = True) -> Dict[str, Any]:
219
+ """
220
+ Detect document type from URL or file path
221
+
222
+ Args:
223
+ source: URL or file path
224
+ download_sample: Whether to download sample for content analysis
225
+
226
+ Returns:
227
+ Dict containing detected type and confidence
228
+ """
229
+ try:
230
+ result = {
231
+ "source": source,
232
+ "is_url": self._is_url(source),
233
+ "detected_type": DocumentType.UNKNOWN,
234
+ "confidence": 0.0,
235
+ "mime_type": None,
236
+ "file_extension": None,
237
+ "file_size": None,
238
+ "detection_methods": []
239
+ }
240
+
241
+ # Method 1: File extension analysis
242
+ extension_type, ext_confidence = self._detect_by_extension(source)
243
+ if extension_type != DocumentType.UNKNOWN:
244
+ result["detected_type"] = extension_type
245
+ result["confidence"] = ext_confidence
246
+ result["file_extension"] = Path(source).suffix.lower()
247
+ result["detection_methods"].append("file_extension")
248
+
249
+ # Method 2: MIME type detection (for URLs)
250
+ if self._is_url(source) and download_sample:
251
+ mime_type, mime_confidence = self._detect_by_mime_type(source)
252
+ if mime_type != DocumentType.UNKNOWN and mime_confidence > result["confidence"]:
253
+ result["detected_type"] = mime_type
254
+ result["confidence"] = mime_confidence
255
+ result["detection_methods"].append("mime_type")
256
+
257
+ # Method 3: Content-based detection
258
+ if download_sample:
259
+ content_type, content_confidence = self._detect_by_content(source)
260
+ if content_type != DocumentType.UNKNOWN and content_confidence > result["confidence"]:
261
+ result["detected_type"] = content_type
262
+ result["confidence"] = content_confidence
263
+ result["detection_methods"].append("content_analysis")
264
+
265
+ return result
266
+
267
+ except Exception as e:
268
+ raise DocumentParserError(f"Document type detection failed: {str(e)}")
269
+
270
+ def parse_document(self,
271
+ source: str,
272
+ strategy: ParsingStrategy = ParsingStrategy.FULL_CONTENT,
273
+ output_format: OutputFormat = OutputFormat.JSON,
274
+ force_type: Optional[DocumentType] = None,
275
+ extract_metadata: bool = True,
276
+ chunk_size: Optional[int] = None) -> Dict[str, Any]:
277
+ """
278
+ Parse document from URL or file path
279
+
280
+ Args:
281
+ source: URL or file path to document
282
+ strategy: Parsing strategy to use
283
+ output_format: Format for output content
284
+ force_type: Force specific document type
285
+ extract_metadata: Whether to extract metadata
286
+ chunk_size: Chunk size for large documents
287
+
288
+ Returns:
289
+ Dict containing parsed content and metadata
290
+ """
291
+ try:
292
+ # Step 1: Detect document type
293
+ if force_type:
294
+ doc_type = force_type
295
+ confidence = 1.0
296
+ else:
297
+ detection_result = self.detect_document_type(source)
298
+ doc_type = detection_result["detected_type"]
299
+ confidence = detection_result["confidence"]
300
+
301
+ if confidence < 0.5:
302
+ raise UnsupportedDocumentError(f"Unable to reliably detect document type for: {source}")
303
+
304
+ # Step 2: Download document if it's a URL
305
+ local_path = self._ensure_local_file(source)
306
+
307
+ # Step 3: Parse document based on type and strategy
308
+ content = self._parse_by_type(local_path, doc_type, strategy)
309
+
310
+ # Step 4: Extract metadata if requested
311
+ metadata = {}
312
+ if extract_metadata:
313
+ metadata = self._extract_metadata(local_path, doc_type)
314
+
315
+ # Step 5: Format output
316
+ result = {
317
+ "source": source,
318
+ "document_type": doc_type,
319
+ "detection_confidence": confidence,
320
+ "parsing_strategy": strategy,
321
+ "metadata": metadata,
322
+ "content": content,
323
+ "content_stats": self._calculate_content_stats(content),
324
+ "chunks": []
325
+ }
326
+
327
+ # Step 6: Create chunks if requested
328
+ if chunk_size and isinstance(content, str):
329
+ result["chunks"] = self._create_chunks(content, chunk_size)
330
+
331
+ # Step 7: Format output according to requested format
332
+ if output_format == OutputFormat.TEXT:
333
+ return {"text": self._format_as_text(result)}
334
+ elif output_format == OutputFormat.MARKDOWN:
335
+ return {"markdown": self._format_as_markdown(result)}
336
+ elif output_format == OutputFormat.HTML:
337
+ return {"html": self._format_as_html(result)}
338
+ else:
339
+ return result
340
+
341
+ except Exception as e:
342
+ if isinstance(e, DocumentParserError):
343
+ raise
344
+ raise ParseError(f"Document parsing failed: {str(e)}")
345
+ finally:
346
+ # Cleanup temporary files
347
+ self._cleanup_temp_files(source)
348
+
349
+ async def parse_document_async(self,
350
+ source: str,
351
+ strategy: ParsingStrategy = ParsingStrategy.FULL_CONTENT,
352
+ output_format: OutputFormat = OutputFormat.JSON,
353
+ force_type: Optional[DocumentType] = None,
354
+ extract_metadata: bool = True,
355
+ chunk_size: Optional[int] = None) -> Dict[str, Any]:
356
+ """Async version of parse_document"""
357
+ return await asyncio.to_thread(
358
+ self.parse_document,
359
+ source=source,
360
+ strategy=strategy,
361
+ output_format=output_format,
362
+ force_type=force_type,
363
+ extract_metadata=extract_metadata,
364
+ chunk_size=chunk_size
365
+ )
366
+
367
+ def _is_url(self, source: str) -> bool:
368
+ """Check if source is a URL"""
369
+ try:
370
+ result = urlparse(source)
371
+ return bool(result.scheme and result.netloc)
372
+ except:
373
+ return False
374
+
375
+ def _is_cloud_storage_path(self, source: str) -> bool:
376
+ """Check if source is a cloud storage path"""
377
+ # Support various cloud storage path formats:
378
+ # - gs://bucket/path/file.pdf (Google Cloud Storage)
379
+ # - s3://bucket/path/file.pdf (AWS S3)
380
+ # - azure://container/path/file.pdf (Azure Blob Storage)
381
+ # - cloud://path/file.pdf (Generic cloud storage)
382
+ cloud_schemes = ['gs', 's3', 'azure', 'cloud']
383
+ try:
384
+ parsed = urlparse(source)
385
+ return parsed.scheme in cloud_schemes
386
+ except:
387
+ return False
388
+
389
+ def _is_storage_id(self, source: str) -> bool:
390
+ """Check if source is a storage ID (UUID-like identifier)"""
391
+ # Check for UUID patterns or other storage ID formats
392
+ import re
393
+ uuid_pattern = r'^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$'
394
+ storage_id_pattern = r'^[a-zA-Z0-9_-]{10,}$' # Generic storage ID
395
+
396
+ return bool(re.match(uuid_pattern, source, re.IGNORECASE) or
397
+ re.match(storage_id_pattern, source))
398
+
399
+ def _detect_by_extension(self, source: str) -> Tuple[DocumentType, float]:
400
+ """Detect document type by file extension"""
401
+ try:
402
+ path = Path(source)
403
+ ext = path.suffix.lower()
404
+
405
+ extension_map = {
406
+ '.pdf': DocumentType.PDF,
407
+ '.docx': DocumentType.DOCX,
408
+ '.doc': DocumentType.DOCX,
409
+ '.xlsx': DocumentType.XLSX,
410
+ '.xls': DocumentType.XLSX,
411
+ '.pptx': DocumentType.PPTX,
412
+ '.ppt': DocumentType.PPTX,
413
+ '.txt': DocumentType.TXT,
414
+ '.html': DocumentType.HTML,
415
+ '.htm': DocumentType.HTML,
416
+ '.rtf': DocumentType.RTF,
417
+ '.csv': DocumentType.CSV,
418
+ '.json': DocumentType.JSON,
419
+ '.xml': DocumentType.XML,
420
+ '.md': DocumentType.MARKDOWN,
421
+ '.markdown': DocumentType.MARKDOWN,
422
+ '.jpg': DocumentType.IMAGE,
423
+ '.jpeg': DocumentType.IMAGE,
424
+ '.png': DocumentType.IMAGE,
425
+ '.gif': DocumentType.IMAGE,
426
+ '.bmp': DocumentType.IMAGE,
427
+ '.tiff': DocumentType.IMAGE,
428
+ }
429
+
430
+ doc_type = extension_map.get(ext, DocumentType.UNKNOWN)
431
+ confidence = 0.8 if doc_type != DocumentType.UNKNOWN else 0.0
432
+
433
+ return doc_type, confidence
434
+
435
+ except Exception:
436
+ return DocumentType.UNKNOWN, 0.0
437
+
438
+ def _detect_by_mime_type(self, url: str) -> Tuple[DocumentType, float]:
439
+ """Detect document type by MIME type from URL"""
440
+ try:
441
+ if not self.scraper_tool:
442
+ return DocumentType.UNKNOWN, 0.0
443
+
444
+ # Get headers only
445
+ response = asyncio.run(self.scraper_tool.get_httpx(
446
+ url, method="HEAD", verify_ssl=False
447
+ ))
448
+
449
+ content_type = response.get('headers', {}).get('content-type', '').lower()
450
+
451
+ mime_map = {
452
+ 'application/pdf': DocumentType.PDF,
453
+ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': DocumentType.DOCX,
454
+ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': DocumentType.XLSX,
455
+ 'application/vnd.openxmlformats-officedocument.presentationml.presentation': DocumentType.PPTX,
456
+ 'text/plain': DocumentType.TXT,
457
+ 'text/html': DocumentType.HTML,
458
+ 'application/rtf': DocumentType.RTF,
459
+ 'text/csv': DocumentType.CSV,
460
+ 'application/json': DocumentType.JSON,
461
+ 'application/xml': DocumentType.XML,
462
+ 'text/xml': DocumentType.XML,
463
+ 'text/markdown': DocumentType.MARKDOWN,
464
+ 'image/jpeg': DocumentType.IMAGE,
465
+ 'image/png': DocumentType.IMAGE,
466
+ 'image/gif': DocumentType.IMAGE,
467
+ 'image/bmp': DocumentType.IMAGE,
468
+ 'image/tiff': DocumentType.IMAGE,
469
+ }
470
+
471
+ for mime_pattern, doc_type in mime_map.items():
472
+ if mime_pattern in content_type:
473
+ return doc_type, 0.9
474
+
475
+ return DocumentType.UNKNOWN, 0.0
476
+
477
+ except Exception:
478
+ return DocumentType.UNKNOWN, 0.0
479
+
480
+ def _detect_by_content(self, source: str) -> Tuple[DocumentType, float]:
481
+ """Detect document type by content analysis"""
482
+ try:
483
+ # Download a small sample for analysis
484
+ if self._is_url(source):
485
+ sample_path = self._download_sample(source, max_size=1024) # 1KB sample
486
+ else:
487
+ sample_path = source
488
+
489
+ with open(sample_path, 'rb') as f:
490
+ header = f.read(512) # Read first 512 bytes
491
+
492
+ # Magic number detection
493
+ if header.startswith(b'%PDF'):
494
+ return DocumentType.PDF, 0.95
495
+ elif header.startswith(b'PK\x03\x04'): # ZIP-based formats
496
+ if b'word/' in header or b'document.xml' in header:
497
+ return DocumentType.DOCX, 0.9
498
+ elif b'xl/' in header or b'workbook.xml' in header:
499
+ return DocumentType.XLSX, 0.9
500
+ elif b'ppt/' in header or b'presentation.xml' in header:
501
+ return DocumentType.PPTX, 0.9
502
+ elif header.startswith(b'{\rtf'):
503
+ return DocumentType.RTF, 0.95
504
+ elif header.startswith((b'\xff\xd8\xff', b'\x89PNG', b'GIF8')):
505
+ return DocumentType.IMAGE, 0.95
506
+ elif header.startswith(b'<?xml'):
507
+ return DocumentType.XML, 0.9
508
+ elif header.startswith((b'{', b'[')):
509
+ # Try to parse as JSON
510
+ try:
511
+ import json
512
+ json.loads(header.decode('utf-8', errors='ignore'))
513
+ return DocumentType.JSON, 0.85
514
+ except:
515
+ pass
516
+
517
+ # Text-based detection
518
+ try:
519
+ text_content = header.decode('utf-8', errors='ignore')
520
+ if re.match(r'^#\s+.*$', text_content, re.MULTILINE):
521
+ return DocumentType.MARKDOWN, 0.7
522
+ elif '<html' in text_content.lower() or '<!doctype html' in text_content.lower():
523
+ return DocumentType.HTML, 0.85
524
+ elif ',' in text_content and '\n' in text_content:
525
+ # Simple CSV detection
526
+ lines = text_content.split('\n')[:5]
527
+ if all(',' in line for line in lines if line.strip()):
528
+ return DocumentType.CSV, 0.6
529
+ except:
530
+ pass
531
+
532
+ return DocumentType.UNKNOWN, 0.0
533
+
534
+ except Exception:
535
+ return DocumentType.UNKNOWN, 0.0
536
+
537
+ def _ensure_local_file(self, source: str) -> str:
538
+ """Ensure we have a local file, download/retrieve if necessary"""
539
+ # Check source type and handle accordingly
540
+ if self._is_cloud_storage_path(source) or self._is_storage_id(source):
541
+ # Download from cloud storage
542
+ return asyncio.run(self._download_from_cloud_storage(source))
543
+ elif self._is_url(source):
544
+ # Download from URL
545
+ return self._download_document(source)
546
+ else:
547
+ # Local file path
548
+ if not os.path.exists(source):
549
+ raise FileNotFoundError(f"File not found: {source}")
550
+ return source
551
+
552
+ def _download_document(self, url: str) -> str:
553
+ """Download document from URL"""
554
+ try:
555
+ if not self.scraper_tool:
556
+ raise DownloadError("ScraperTool not available for URL download")
557
+
558
+ # Generate temp file path
559
+ parsed_url = urlparse(url)
560
+ filename = os.path.basename(parsed_url.path) or "document"
561
+ temp_path = os.path.join(self.config.temp_dir, f"download_{hash(url)}_{filename}")
562
+
563
+ # Download using scraper tool
564
+ result = asyncio.run(self.scraper_tool.get_httpx(
565
+ url,
566
+ content_type="binary",
567
+ output_path=temp_path,
568
+ verify_ssl=False
569
+ ))
570
+
571
+ if isinstance(result, dict) and 'saved_to' in result:
572
+ return result['saved_to']
573
+ else:
574
+ # Fallback: save content manually
575
+ with open(temp_path, 'wb') as f:
576
+ if isinstance(result, dict) and 'content' in result:
577
+ f.write(result['content'])
578
+ else:
579
+ f.write(result)
580
+ return temp_path
581
+
582
+ except Exception as e:
583
+ raise DownloadError(f"Failed to download document from {url}: {str(e)}")
584
+
585
+ async def _download_from_cloud_storage(self, source: str) -> str:
586
+ """Download document from cloud storage"""
587
+ if not self.file_storage:
588
+ raise DownloadError("Cloud storage not available")
589
+
590
+ try:
591
+ # Parse the cloud storage path
592
+ storage_path = self._parse_cloud_storage_path(source)
593
+
594
+ # Generate local temp file path
595
+ temp_filename = f"cloud_download_{hash(source)}_{Path(storage_path).name}"
596
+ temp_path = os.path.join(self.config.temp_dir, temp_filename)
597
+
598
+ self.logger.info(f"Downloading from cloud storage: {source} -> {temp_path}")
599
+
600
+ # Retrieve file from cloud storage
601
+ file_data = await self.file_storage.retrieve(storage_path)
602
+
603
+ # Save to local temp file
604
+ if isinstance(file_data, bytes):
605
+ with open(temp_path, 'wb') as f:
606
+ f.write(file_data)
607
+ elif isinstance(file_data, str):
608
+ with open(temp_path, 'w', encoding='utf-8') as f:
609
+ f.write(file_data)
610
+ else:
611
+ # Handle other data types (e.g., dict, list)
612
+ import json
613
+ with open(temp_path, 'w', encoding='utf-8') as f:
614
+ json.dump(file_data, f)
615
+
616
+ self.logger.info(f"Successfully downloaded file to: {temp_path}")
617
+ return temp_path
618
+
619
+ except Exception as e:
620
+ raise DownloadError(f"Failed to download from cloud storage {source}: {str(e)}")
621
+
622
+ def _parse_cloud_storage_path(self, source: str) -> str:
623
+ """Parse cloud storage path to get the storage key"""
624
+ try:
625
+ if self._is_storage_id(source):
626
+ # Direct storage ID
627
+ return source
628
+ elif self._is_cloud_storage_path(source):
629
+ parsed = urlparse(source)
630
+ if parsed.scheme == 'gs':
631
+ # Google Cloud Storage: gs://bucket/path/file.pdf -> path/file.pdf
632
+ return parsed.path.lstrip('/')
633
+ elif parsed.scheme == 's3':
634
+ # AWS S3: s3://bucket/path/file.pdf -> path/file.pdf
635
+ return parsed.path.lstrip('/')
636
+ elif parsed.scheme == 'azure':
637
+ # Azure Blob: azure://container/path/file.pdf -> path/file.pdf
638
+ return parsed.path.lstrip('/')
639
+ elif parsed.scheme == 'cloud':
640
+ # Generic cloud: cloud://path/file.pdf -> path/file.pdf
641
+ return parsed.path.lstrip('/')
642
+ else:
643
+ return parsed.path.lstrip('/')
644
+ else:
645
+ # Assume it's already a storage path
646
+ return source
647
+ except Exception as e:
648
+ self.logger.warning(f"Failed to parse cloud storage path {source}: {e}")
649
+ return source
650
+
651
+ def _download_sample(self, url: str, max_size: int = 1024) -> str:
652
+ """Download a small sample of the document for analysis"""
653
+ # This is a simplified version - in practice, you'd implement range requests
654
+ return self._download_document(url)
655
+
656
+ def _parse_by_type(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
657
+ """Parse document based on its type and strategy"""
658
+ try:
659
+ if doc_type == DocumentType.PDF:
660
+ return self._parse_pdf(file_path, strategy)
661
+ elif doc_type in [DocumentType.DOCX, DocumentType.XLSX, DocumentType.PPTX]:
662
+ return self._parse_office_document(file_path, doc_type, strategy)
663
+ elif doc_type == DocumentType.IMAGE:
664
+ return self._parse_image(file_path, strategy)
665
+ elif doc_type in [DocumentType.TXT, DocumentType.HTML, DocumentType.CSV,
666
+ DocumentType.JSON, DocumentType.XML, DocumentType.MARKDOWN]:
667
+ return self._parse_text_document(file_path, doc_type, strategy)
668
+ else:
669
+ raise UnsupportedDocumentError(f"Unsupported document type: {doc_type}")
670
+
671
+ except Exception as e:
672
+ raise ParseError(f"Failed to parse {doc_type} document: {str(e)}")
673
+
674
+ def _parse_pdf(self, file_path: str, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
675
+ """Parse PDF document"""
676
+ if self.office_tool:
677
+ try:
678
+ text_content = self.office_tool.extract_text(file_path)
679
+
680
+ if strategy == ParsingStrategy.TEXT_ONLY:
681
+ return text_content
682
+ elif strategy == ParsingStrategy.STRUCTURED:
683
+ # Try to extract structure from PDF
684
+ return {
685
+ "text": text_content,
686
+ "structure": self._extract_pdf_structure(text_content)
687
+ }
688
+ else:
689
+ return {
690
+ "text": text_content,
691
+ "pages": self._split_into_pages(text_content)
692
+ }
693
+ except Exception as e:
694
+ self.logger.warning(f"OfficeTool PDF parsing failed: {e}")
695
+
696
+ # Fallback to simple text extraction
697
+ return self._extract_text_fallback(file_path)
698
+
699
+ def _parse_office_document(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
700
+ """Parse Office documents (DOCX, XLSX, PPTX)"""
701
+ if not self.office_tool:
702
+ raise UnsupportedDocumentError("OfficeTool not available for Office document parsing")
703
+
704
+ try:
705
+ text_content = self.office_tool.extract_text(file_path)
706
+
707
+ if strategy == ParsingStrategy.TEXT_ONLY:
708
+ return text_content
709
+ elif strategy == ParsingStrategy.STRUCTURED:
710
+ return {
711
+ "text": text_content,
712
+ "structure": self._extract_office_structure(file_path, doc_type)
713
+ }
714
+ else:
715
+ return {
716
+ "text": text_content,
717
+ "raw_content": text_content
718
+ }
719
+
720
+ except Exception as e:
721
+ raise ParseError(f"Failed to parse Office document: {str(e)}")
722
+
723
+ def _parse_image(self, file_path: str, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
724
+ """Parse image document using OCR"""
725
+ if not self.image_tool:
726
+ raise UnsupportedDocumentError("ImageTool not available for image OCR")
727
+
728
+ try:
729
+ # Use image tool for OCR
730
+ ocr_result = self.image_tool.ocr_image(file_path)
731
+
732
+ if strategy == ParsingStrategy.TEXT_ONLY:
733
+ return ocr_result.get('text', '')
734
+ else:
735
+ return ocr_result
736
+
737
+ except Exception as e:
738
+ raise ParseError(f"Failed to parse image document: {str(e)}")
739
+
740
+ def _parse_text_document(self, file_path: str, doc_type: DocumentType, strategy: ParsingStrategy) -> Union[str, Dict[str, Any]]:
741
+ """Parse text-based documents"""
742
+ try:
743
+ with open(file_path, 'r', encoding=self.config.default_encoding, errors='ignore') as f:
744
+ content = f.read()
745
+
746
+ if strategy == ParsingStrategy.TEXT_ONLY:
747
+ return content
748
+ elif strategy == ParsingStrategy.STRUCTURED:
749
+ return self._extract_text_structure(content, doc_type)
750
+ else:
751
+ return {
752
+ "text": content,
753
+ "lines": content.split('\n'),
754
+ "word_count": len(content.split())
755
+ }
756
+
757
+ except Exception as e:
758
+ raise ParseError(f"Failed to parse text document: {str(e)}")
759
+
760
+ def _extract_metadata(self, file_path: str, doc_type: DocumentType) -> Dict[str, Any]:
761
+ """Extract metadata from document"""
762
+ metadata = {
763
+ "file_path": file_path,
764
+ "file_size": os.path.getsize(file_path),
765
+ "file_type": doc_type.value,
766
+ "created_at": os.path.getctime(file_path),
767
+ "modified_at": os.path.getmtime(file_path)
768
+ }
769
+
770
+ # Add type-specific metadata extraction here
771
+ # This could leverage existing tools' metadata extraction capabilities
772
+
773
+ return metadata
774
+
775
+ def _calculate_content_stats(self, content: Union[str, Dict[str, Any]]) -> Dict[str, Any]:
776
+ """Calculate statistics about the parsed content"""
777
+ if isinstance(content, str):
778
+ return {
779
+ "character_count": len(content),
780
+ "word_count": len(content.split()),
781
+ "line_count": len(content.split('\n')),
782
+ "paragraph_count": len([p for p in content.split('\n\n') if p.strip()])
783
+ }
784
+ else:
785
+ # For structured content, calculate stats on text portion
786
+ text_content = content.get('text', '')
787
+ return self._calculate_content_stats(text_content)
788
+
789
+ def _create_chunks(self, content: str, chunk_size: int) -> List[Dict[str, Any]]:
790
+ """Create chunks from content for better AI processing"""
791
+ chunks = []
792
+ words = content.split()
793
+
794
+ for i in range(0, len(words), chunk_size):
795
+ chunk_words = words[i:i + chunk_size]
796
+ chunk_text = ' '.join(chunk_words)
797
+
798
+ chunks.append({
799
+ "index": len(chunks),
800
+ "text": chunk_text,
801
+ "word_count": len(chunk_words),
802
+ "start_word": i,
803
+ "end_word": min(i + chunk_size, len(words))
804
+ })
805
+
806
+ return chunks
807
+
808
+ def _format_as_text(self, result: Dict[str, Any]) -> str:
809
+ """Format result as plain text"""
810
+ content = result.get('content', '')
811
+ if isinstance(content, dict):
812
+ return content.get('text', str(content))
813
+ return str(content)
814
+
815
+ def _format_as_markdown(self, result: Dict[str, Any]) -> str:
816
+ """Format result as Markdown"""
817
+ content = result.get('content', '')
818
+ metadata = result.get('metadata', {})
819
+
820
+ md_content = f"# Document: {result.get('source', 'Unknown')}\n\n"
821
+ md_content += f"**Type:** {result.get('document_type', 'Unknown')}\n"
822
+ md_content += f"**Detection Confidence:** {result.get('detection_confidence', 0):.2f}\n\n"
823
+
824
+ if isinstance(content, dict):
825
+ md_content += content.get('text', str(content))
826
+ else:
827
+ md_content += str(content)
828
+
829
+ return md_content
830
+
831
+ def _format_as_html(self, result: Dict[str, Any]) -> str:
832
+ """Format result as HTML"""
833
+ content = result.get('content', '')
834
+
835
+ html_content = f"""
836
+ <html>
837
+ <head><title>Parsed Document</title></head>
838
+ <body>
839
+ <h1>Document: {result.get('source', 'Unknown')}</h1>
840
+ <p><strong>Type:</strong> {result.get('document_type', 'Unknown')}</p>
841
+ <p><strong>Detection Confidence:</strong> {result.get('detection_confidence', 0):.2f}</p>
842
+ <div class="content">
843
+ """
844
+
845
+ if isinstance(content, dict):
846
+ html_content += f"<pre>{content.get('text', str(content))}</pre>"
847
+ else:
848
+ html_content += f"<pre>{str(content)}</pre>"
849
+
850
+ html_content += "</div></body></html>"
851
+ return html_content
852
+
853
+ def _cleanup_temp_files(self, source: str):
854
+ """Clean up temporary files"""
855
+ import glob
856
+
857
+ if self._is_url(source):
858
+ # Clean up URL downloaded files
859
+ temp_pattern = os.path.join(self.config.temp_dir, f"download_{hash(source)}_*")
860
+ for temp_file in glob.glob(temp_pattern):
861
+ try:
862
+ os.remove(temp_file)
863
+ self.logger.debug(f"Cleaned up temp file: {temp_file}")
864
+ except Exception as e:
865
+ self.logger.warning(f"Failed to clean up temp file {temp_file}: {e}")
866
+
867
+ elif self._is_cloud_storage_path(source) or self._is_storage_id(source):
868
+ # Clean up cloud storage downloaded files
869
+ temp_pattern = os.path.join(self.config.temp_dir, f"cloud_download_{hash(source)}_*")
870
+ for temp_file in glob.glob(temp_pattern):
871
+ try:
872
+ os.remove(temp_file)
873
+ self.logger.debug(f"Cleaned up cloud temp file: {temp_file}")
874
+ except Exception as e:
875
+ self.logger.warning(f"Failed to clean up cloud temp file {temp_file}: {e}")
876
+
877
+ # Helper methods for structure extraction
878
+ def _extract_pdf_structure(self, text: str) -> Dict[str, Any]:
879
+ """Extract structure from PDF text"""
880
+ # Implement PDF structure extraction logic
881
+ return {"sections": [], "headings": []}
882
+
883
+ def _extract_office_structure(self, file_path: str, doc_type: DocumentType) -> Dict[str, Any]:
884
+ """Extract structure from Office documents"""
885
+ # Implement Office document structure extraction
886
+ return {"sections": [], "tables": [], "images": []}
887
+
888
+ def _extract_text_structure(self, content: str, doc_type: DocumentType) -> Dict[str, Any]:
889
+ """Extract structure from text documents"""
890
+ result = {"text": content}
891
+
892
+ if doc_type == DocumentType.MARKDOWN:
893
+ # Extract markdown structure
894
+ headings = re.findall(r'^(#{1,6})\s+(.+)$', content, re.MULTILINE)
895
+ result["headings"] = [{"level": len(h[0]), "text": h[1]} for h in headings]
896
+ elif doc_type == DocumentType.HTML:
897
+ # Extract HTML structure (simplified)
898
+ from bs4 import BeautifulSoup
899
+ soup = BeautifulSoup(content, 'html.parser')
900
+ result["title"] = soup.title.string if soup.title else ""
901
+ result["headings"] = [{"tag": h.name, "text": h.get_text()} for h in soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])]
902
+ elif doc_type == DocumentType.JSON:
903
+ import json
904
+ try:
905
+ result["json_data"] = json.loads(content)
906
+ except:
907
+ pass
908
+
909
+ return result
910
+
911
+ def _split_into_pages(self, text: str) -> List[str]:
912
+ """Split text into pages (simplified)"""
913
+ # This is a simple implementation - could be enhanced
914
+ pages = text.split('\f') # Form feed character often indicates page break
915
+ return [page.strip() for page in pages if page.strip()]
916
+
917
+ def _extract_text_fallback(self, file_path: str) -> str:
918
+ """Fallback text extraction method"""
919
+ try:
920
+ with open(file_path, 'r', encoding=self.config.default_encoding, errors='ignore') as f:
921
+ return f.read()
922
+ except:
923
+ with open(file_path, 'rb') as f:
924
+ return f.read().decode('utf-8', errors='ignore')