ara-cli 0.1.9.94__py3-none-any.whl → 0.1.9.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ara_cli/__init__.py +18 -1
- ara_cli/__main__.py +57 -11
- ara_cli/ara_command_action.py +31 -19
- ara_cli/ara_config.py +17 -2
- ara_cli/artefact_autofix.py +171 -23
- ara_cli/artefact_creator.py +5 -8
- ara_cli/artefact_deleter.py +2 -4
- ara_cli/artefact_fuzzy_search.py +13 -6
- ara_cli/artefact_models/artefact_templates.py +3 -3
- ara_cli/artefact_models/feature_artefact_model.py +25 -0
- ara_cli/artefact_reader.py +4 -5
- ara_cli/chat.py +79 -37
- ara_cli/commands/extract_command.py +4 -11
- ara_cli/error_handler.py +134 -0
- ara_cli/file_classifier.py +3 -2
- ara_cli/file_loaders/document_readers.py +233 -0
- ara_cli/file_loaders/file_loaders.py +123 -0
- ara_cli/file_loaders/image_processor.py +89 -0
- ara_cli/file_loaders/markdown_reader.py +75 -0
- ara_cli/file_loaders/text_file_loader.py +9 -11
- ara_cli/global_file_lister.py +61 -0
- ara_cli/prompt_extractor.py +1 -1
- ara_cli/prompt_handler.py +24 -4
- ara_cli/template_manager.py +14 -4
- ara_cli/update_config_prompt.py +7 -1
- ara_cli/version.py +1 -1
- {ara_cli-0.1.9.94.dist-info → ara_cli-0.1.9.96.dist-info}/METADATA +2 -1
- {ara_cli-0.1.9.94.dist-info → ara_cli-0.1.9.96.dist-info}/RECORD +40 -33
- tests/test_ara_command_action.py +66 -52
- tests/test_ara_config.py +28 -0
- tests/test_artefact_autofix.py +361 -5
- tests/test_chat.py +105 -36
- tests/test_file_classifier.py +23 -0
- tests/test_file_creator.py +3 -5
- tests/test_global_file_lister.py +131 -0
- tests/test_prompt_handler.py +26 -1
- tests/test_template_manager.py +5 -4
- {ara_cli-0.1.9.94.dist-info → ara_cli-0.1.9.96.dist-info}/WHEEL +0 -0
- {ara_cli-0.1.9.94.dist-info → ara_cli-0.1.9.96.dist-info}/entry_points.txt +0 -0
- {ara_cli-0.1.9.94.dist-info → ara_cli-0.1.9.96.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Tuple, Optional
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DocumentReader(ABC):
|
|
7
|
+
"""Abstract base class for document readers"""
|
|
8
|
+
|
|
9
|
+
def __init__(self, file_path: str):
|
|
10
|
+
self.file_path = file_path
|
|
11
|
+
self.base_dir = os.path.dirname(file_path)
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def read(self, extract_images: bool = False) -> str:
|
|
15
|
+
"""Read document and optionally extract images"""
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
def create_image_data_dir(self, extension_suffix: str) -> str:
|
|
19
|
+
"""
|
|
20
|
+
Create data directory for images with file extension suffix to avoid conflicts.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
str: Path to images directory
|
|
24
|
+
"""
|
|
25
|
+
file_name_with_ext = os.path.splitext(os.path.basename(self.file_path))[0] + f"_{extension_suffix}"
|
|
26
|
+
data_dir = os.path.join(self.base_dir, f"{file_name_with_ext}.data")
|
|
27
|
+
images_dir = os.path.join(data_dir, "images")
|
|
28
|
+
if not os.path.exists(images_dir):
|
|
29
|
+
os.makedirs(images_dir)
|
|
30
|
+
return images_dir
|
|
31
|
+
|
|
32
|
+
def save_and_describe_image(self, image_data: bytes, image_format: str,
|
|
33
|
+
save_dir: str, image_counter: int) -> Tuple[str, str]:
|
|
34
|
+
"""
|
|
35
|
+
Save image data and get its description from LLM.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
tuple: (relative_image_path, description)
|
|
39
|
+
"""
|
|
40
|
+
from ara_cli.prompt_handler import describe_image
|
|
41
|
+
|
|
42
|
+
# Save image
|
|
43
|
+
image_filename = f"{image_counter}.{image_format}"
|
|
44
|
+
image_path = os.path.join(save_dir, image_filename)
|
|
45
|
+
|
|
46
|
+
with open(image_path, "wb") as image_file:
|
|
47
|
+
image_file.write(image_data)
|
|
48
|
+
|
|
49
|
+
# Get image description from LLM
|
|
50
|
+
description = describe_image(image_path)
|
|
51
|
+
|
|
52
|
+
# Get relative path
|
|
53
|
+
relative_image_path = os.path.relpath(image_path, self.base_dir)
|
|
54
|
+
|
|
55
|
+
return relative_image_path, description
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class DocxReader(DocumentReader):
|
|
59
|
+
"""Reader for DOCX files"""
|
|
60
|
+
|
|
61
|
+
def read(self, extract_images: bool = False) -> str:
|
|
62
|
+
import docx
|
|
63
|
+
|
|
64
|
+
doc = docx.Document(self.file_path)
|
|
65
|
+
text_content = '\n'.join(para.text for para in doc.paragraphs)
|
|
66
|
+
|
|
67
|
+
if not extract_images:
|
|
68
|
+
return text_content
|
|
69
|
+
|
|
70
|
+
from PIL import Image
|
|
71
|
+
import io
|
|
72
|
+
|
|
73
|
+
# Create data directory for images
|
|
74
|
+
images_dir = self.create_image_data_dir("docx")
|
|
75
|
+
|
|
76
|
+
# Extract and process images
|
|
77
|
+
image_descriptions = []
|
|
78
|
+
image_counter = 1
|
|
79
|
+
|
|
80
|
+
for rel in doc.part.rels.values():
|
|
81
|
+
if "image" in rel.reltype:
|
|
82
|
+
image_data = rel.target_part.blob
|
|
83
|
+
|
|
84
|
+
# Determine image format
|
|
85
|
+
image = Image.open(io.BytesIO(image_data))
|
|
86
|
+
image_format = image.format.lower()
|
|
87
|
+
|
|
88
|
+
# Save and describe image
|
|
89
|
+
relative_path, description = self.save_and_describe_image(
|
|
90
|
+
image_data, image_format, images_dir, image_counter
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Add formatted description to list
|
|
94
|
+
image_description = f"\nImage: {relative_path}\n[{description}]\n"
|
|
95
|
+
image_descriptions.append(image_description)
|
|
96
|
+
|
|
97
|
+
image_counter += 1
|
|
98
|
+
|
|
99
|
+
# Combine text content with image descriptions
|
|
100
|
+
if image_descriptions:
|
|
101
|
+
text_content += "\n\n### Extracted Images\n" + "\n".join(image_descriptions)
|
|
102
|
+
|
|
103
|
+
return text_content
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class PdfReader(DocumentReader):
|
|
107
|
+
"""Reader for PDF files"""
|
|
108
|
+
|
|
109
|
+
def read(self, extract_images: bool = False) -> str:
|
|
110
|
+
import pymupdf4llm
|
|
111
|
+
|
|
112
|
+
if not extract_images:
|
|
113
|
+
return pymupdf4llm.to_markdown(self.file_path, write_images=False)
|
|
114
|
+
|
|
115
|
+
import fitz # PyMuPDF
|
|
116
|
+
|
|
117
|
+
# Create images directory
|
|
118
|
+
images_dir = self.create_image_data_dir("pdf")
|
|
119
|
+
|
|
120
|
+
# Extract text without images first
|
|
121
|
+
text_content = pymupdf4llm.to_markdown(self.file_path, write_images=False)
|
|
122
|
+
|
|
123
|
+
# Extract and process images
|
|
124
|
+
doc = fitz.open(self.file_path)
|
|
125
|
+
image_descriptions = []
|
|
126
|
+
image_counter = 1
|
|
127
|
+
|
|
128
|
+
for page_num, page in enumerate(doc):
|
|
129
|
+
image_list = page.get_images()
|
|
130
|
+
|
|
131
|
+
for img_index, img in enumerate(image_list):
|
|
132
|
+
# Extract image
|
|
133
|
+
xref = img[0]
|
|
134
|
+
base_image = doc.extract_image(xref)
|
|
135
|
+
image_bytes = base_image["image"]
|
|
136
|
+
image_ext = base_image["ext"]
|
|
137
|
+
|
|
138
|
+
# Save and describe image
|
|
139
|
+
relative_path, description = self.save_and_describe_image(
|
|
140
|
+
image_bytes, image_ext, images_dir, image_counter
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
# Add formatted description to list
|
|
144
|
+
image_description = f"\nImage: {relative_path}\n[{description}]\n"
|
|
145
|
+
image_descriptions.append(image_description)
|
|
146
|
+
|
|
147
|
+
image_counter += 1
|
|
148
|
+
|
|
149
|
+
doc.close()
|
|
150
|
+
|
|
151
|
+
# Combine text content with image descriptions
|
|
152
|
+
if image_descriptions:
|
|
153
|
+
text_content += "\n\n### Extracted Images\n" + "\n".join(image_descriptions)
|
|
154
|
+
|
|
155
|
+
return text_content
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
class OdtReader(DocumentReader):
|
|
159
|
+
"""Reader for ODT files"""
|
|
160
|
+
|
|
161
|
+
def read(self, extract_images: bool = False) -> str:
|
|
162
|
+
import pymupdf4llm
|
|
163
|
+
|
|
164
|
+
if not extract_images:
|
|
165
|
+
return pymupdf4llm.to_markdown(self.file_path, write_images=False)
|
|
166
|
+
|
|
167
|
+
import zipfile
|
|
168
|
+
from PIL import Image
|
|
169
|
+
import io
|
|
170
|
+
|
|
171
|
+
# Create data directory for images
|
|
172
|
+
images_dir = self.create_image_data_dir("odt")
|
|
173
|
+
|
|
174
|
+
# Get text content
|
|
175
|
+
text_content = pymupdf4llm.to_markdown(self.file_path, write_images=False)
|
|
176
|
+
|
|
177
|
+
# Extract and process images from ODT
|
|
178
|
+
image_descriptions = []
|
|
179
|
+
image_counter = 1
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
with zipfile.ZipFile(self.file_path, 'r') as odt_zip:
|
|
183
|
+
# List all files in the Pictures directory
|
|
184
|
+
picture_files = [f for f in odt_zip.namelist() if f.startswith('Pictures/')]
|
|
185
|
+
|
|
186
|
+
for picture_file in picture_files:
|
|
187
|
+
# Extract image data
|
|
188
|
+
image_data = odt_zip.read(picture_file)
|
|
189
|
+
|
|
190
|
+
# Determine image format
|
|
191
|
+
image = Image.open(io.BytesIO(image_data))
|
|
192
|
+
image_format = image.format.lower()
|
|
193
|
+
|
|
194
|
+
# Save and describe image
|
|
195
|
+
relative_path, description = self.save_and_describe_image(
|
|
196
|
+
image_data, image_format, images_dir, image_counter
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Add formatted description to list
|
|
200
|
+
image_description = f"\nImage: {relative_path}\n[{description}]\n"
|
|
201
|
+
image_descriptions.append(image_description)
|
|
202
|
+
|
|
203
|
+
image_counter += 1
|
|
204
|
+
except Exception as e:
|
|
205
|
+
print(f"Warning: Could not extract images from ODT: {e}")
|
|
206
|
+
|
|
207
|
+
# Combine text content with image descriptions
|
|
208
|
+
if image_descriptions:
|
|
209
|
+
text_content += "\n\n### Extracted Images\n" + "\n".join(image_descriptions)
|
|
210
|
+
|
|
211
|
+
return text_content
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
class DocumentReaderFactory:
|
|
215
|
+
"""Factory for creating appropriate document readers"""
|
|
216
|
+
|
|
217
|
+
@staticmethod
|
|
218
|
+
def create_reader(file_path: str) -> Optional[DocumentReader]:
|
|
219
|
+
"""Create appropriate reader based on file extension"""
|
|
220
|
+
_, ext = os.path.splitext(file_path)
|
|
221
|
+
ext = ext.lower()
|
|
222
|
+
|
|
223
|
+
readers = {
|
|
224
|
+
'.docx': DocxReader,
|
|
225
|
+
'.pdf': PdfReader,
|
|
226
|
+
'.odt': OdtReader
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
reader_class = readers.get(ext)
|
|
230
|
+
if reader_class:
|
|
231
|
+
return reader_class(file_path)
|
|
232
|
+
|
|
233
|
+
return None
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from ara_cli.file_loaders.markdown_reader import MarkdownReader
|
|
6
|
+
from ara_cli.file_loaders.document_readers import DocumentReaderFactory
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FileLoader(ABC):
|
|
10
|
+
"""Abstract base class for file loaders"""
|
|
11
|
+
|
|
12
|
+
def __init__(self, chat_instance):
|
|
13
|
+
self.chat = chat_instance
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def load(self, file_path: str, **kwargs) -> bool:
|
|
17
|
+
"""Load file with specific implementation"""
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
def add_prompt_tag_if_needed(self):
|
|
21
|
+
"""Add prompt tag to chat if needed"""
|
|
22
|
+
self.chat.add_prompt_tag_if_needed(self.chat.chat_name)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class TextFileLoader(FileLoader):
|
|
26
|
+
"""Loads text files"""
|
|
27
|
+
|
|
28
|
+
def load(self, file_path: str, prefix: str = "", suffix: str = "",
|
|
29
|
+
block_delimiter: str = "", extract_images: bool = False) -> bool:
|
|
30
|
+
"""Load text file with optional markdown image extraction"""
|
|
31
|
+
|
|
32
|
+
is_md_file = file_path.lower().endswith('.md')
|
|
33
|
+
|
|
34
|
+
if is_md_file and extract_images:
|
|
35
|
+
reader = MarkdownReader(file_path)
|
|
36
|
+
file_content = reader.read(extract_images=True)
|
|
37
|
+
else:
|
|
38
|
+
with open(file_path, 'r', encoding='utf-8', errors="replace") as file:
|
|
39
|
+
file_content = file.read()
|
|
40
|
+
|
|
41
|
+
if block_delimiter:
|
|
42
|
+
file_content = f"{block_delimiter}\n{file_content}\n{block_delimiter}"
|
|
43
|
+
|
|
44
|
+
write_content = f"{prefix}{file_content}{suffix}\n"
|
|
45
|
+
|
|
46
|
+
with open(self.chat.chat_name, 'a', encoding='utf-8') as chat_file:
|
|
47
|
+
chat_file.write(write_content)
|
|
48
|
+
|
|
49
|
+
return True
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class BinaryFileLoader(FileLoader):
|
|
53
|
+
"""Loads binary files (images)"""
|
|
54
|
+
|
|
55
|
+
def load(self, file_path: str, mime_type: str, prefix: str = "", suffix: str = "") -> bool:
|
|
56
|
+
"""Load binary file as base64"""
|
|
57
|
+
|
|
58
|
+
with open(file_path, 'rb') as file:
|
|
59
|
+
file_content = file.read()
|
|
60
|
+
|
|
61
|
+
base64_image = base64.b64encode(file_content).decode("utf-8")
|
|
62
|
+
write_content = f"{prefix}{suffix}\n"
|
|
63
|
+
|
|
64
|
+
with open(self.chat.chat_name, 'a', encoding='utf-8') as chat_file:
|
|
65
|
+
chat_file.write(write_content)
|
|
66
|
+
|
|
67
|
+
return True
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class DocumentFileLoader(FileLoader):
|
|
71
|
+
"""Loads document files (PDF, DOCX, ODT)"""
|
|
72
|
+
|
|
73
|
+
def load(self, file_path: str, prefix: str = "", suffix: str = "",
|
|
74
|
+
block_delimiter: str = "```", extract_images: bool = False) -> bool:
|
|
75
|
+
"""Load document file with optional image extraction"""
|
|
76
|
+
|
|
77
|
+
reader = DocumentReaderFactory.create_reader(file_path)
|
|
78
|
+
|
|
79
|
+
if not reader:
|
|
80
|
+
print("Unsupported document type.")
|
|
81
|
+
return False
|
|
82
|
+
|
|
83
|
+
text_content = reader.read(extract_images=extract_images)
|
|
84
|
+
|
|
85
|
+
if block_delimiter:
|
|
86
|
+
text_content = f"{block_delimiter}\n{text_content}\n{block_delimiter}"
|
|
87
|
+
|
|
88
|
+
write_content = f"{prefix}{text_content}{suffix}\n"
|
|
89
|
+
|
|
90
|
+
with open(self.chat.chat_name, 'a', encoding='utf-8') as chat_file:
|
|
91
|
+
chat_file.write(write_content)
|
|
92
|
+
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class FileLoaderFactory:
|
|
97
|
+
"""Factory for creating appropriate file loaders"""
|
|
98
|
+
|
|
99
|
+
BINARY_TYPE_MAPPING = {
|
|
100
|
+
".png": "image/png",
|
|
101
|
+
".jpg": "image/jpeg",
|
|
102
|
+
".jpeg": "image/jpeg",
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
DOCUMENT_TYPE_EXTENSIONS = [".docx", ".doc", ".odt", ".pdf"]
|
|
106
|
+
|
|
107
|
+
@staticmethod
|
|
108
|
+
def create_loader(file_name: str, chat_instance) -> Optional[FileLoader]:
|
|
109
|
+
"""Create appropriate loader based on file type"""
|
|
110
|
+
|
|
111
|
+
file_name_lower = file_name.lower()
|
|
112
|
+
|
|
113
|
+
# Check if it's a binary file
|
|
114
|
+
for extension, mime_type in FileLoaderFactory.BINARY_TYPE_MAPPING.items():
|
|
115
|
+
if file_name_lower.endswith(extension):
|
|
116
|
+
return BinaryFileLoader(chat_instance)
|
|
117
|
+
|
|
118
|
+
# Check if it's a document
|
|
119
|
+
if any(file_name_lower.endswith(ext) for ext in FileLoaderFactory.DOCUMENT_TYPE_EXTENSIONS):
|
|
120
|
+
return DocumentFileLoader(chat_instance)
|
|
121
|
+
|
|
122
|
+
# Default to text file loader
|
|
123
|
+
return TextFileLoader(chat_instance)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
import tempfile
|
|
4
|
+
import requests
|
|
5
|
+
from typing import Optional, Tuple
|
|
6
|
+
import re
|
|
7
|
+
from ara_cli.prompt_handler import describe_image
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ImageProcessor:
|
|
11
|
+
"""Handles image processing operations"""
|
|
12
|
+
|
|
13
|
+
@staticmethod
|
|
14
|
+
def process_base64_image(image_ref: str, base64_pattern: re.Pattern) -> Optional[Tuple[str, str]]:
|
|
15
|
+
"""Process base64 encoded image and return description"""
|
|
16
|
+
base64_match = base64_pattern.match(image_ref)
|
|
17
|
+
if not base64_match:
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
image_format = base64_match.group(1)
|
|
21
|
+
base64_data = base64_match.group(2)
|
|
22
|
+
image_data = base64.b64decode(base64_data)
|
|
23
|
+
|
|
24
|
+
# Create a temporary file to send to LLM
|
|
25
|
+
with tempfile.NamedTemporaryFile(suffix=f'.{image_format}', delete=False) as tmp_file:
|
|
26
|
+
tmp_file.write(image_data)
|
|
27
|
+
tmp_file_path = tmp_file.name
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
description = describe_image(tmp_file_path)
|
|
31
|
+
return f"Image: (base64 embedded {image_format} image)\n[{description}]", None
|
|
32
|
+
finally:
|
|
33
|
+
os.unlink(tmp_file_path)
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def process_url_image(image_ref: str) -> Tuple[str, Optional[str]]:
|
|
37
|
+
"""Process image from URL and return description"""
|
|
38
|
+
if not image_ref.startswith(('http://', 'https://')):
|
|
39
|
+
return "", None
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
response = requests.get(image_ref, timeout=10)
|
|
43
|
+
response.raise_for_status()
|
|
44
|
+
|
|
45
|
+
# Determine file extension from content-type
|
|
46
|
+
content_type = response.headers.get('content-type', '')
|
|
47
|
+
ext = ImageProcessor._get_extension_from_content_type(content_type, image_ref)
|
|
48
|
+
|
|
49
|
+
# Create temporary file
|
|
50
|
+
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as tmp_file:
|
|
51
|
+
tmp_file.write(response.content)
|
|
52
|
+
tmp_file_path = tmp_file.name
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
description = describe_image(tmp_file_path)
|
|
56
|
+
return f"Image: {image_ref}\n[{description}]", None
|
|
57
|
+
finally:
|
|
58
|
+
os.unlink(tmp_file_path)
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
error_msg = f"Could not download image: {str(e)}"
|
|
62
|
+
return f"Image: {image_ref}\n[{error_msg}]", error_msg
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def process_local_image(image_ref: str, base_dir: str) -> Tuple[str, Optional[str]]:
|
|
66
|
+
"""Process local image file and return description"""
|
|
67
|
+
if os.path.isabs(image_ref):
|
|
68
|
+
local_image_path = image_ref
|
|
69
|
+
else:
|
|
70
|
+
local_image_path = os.path.join(base_dir, image_ref)
|
|
71
|
+
|
|
72
|
+
if os.path.exists(local_image_path):
|
|
73
|
+
description = describe_image(local_image_path)
|
|
74
|
+
return f"Image: {image_ref}\n[{description}]", None
|
|
75
|
+
else:
|
|
76
|
+
error_msg = f"Image file not found"
|
|
77
|
+
return f"Image: {image_ref}\n[{error_msg}]", f"Local image not found: {local_image_path}"
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def _get_extension_from_content_type(content_type: str, url: str) -> str:
|
|
81
|
+
"""Determine file extension from content type or URL"""
|
|
82
|
+
if 'image/jpeg' in content_type:
|
|
83
|
+
return '.jpg'
|
|
84
|
+
elif 'image/png' in content_type:
|
|
85
|
+
return '.png'
|
|
86
|
+
elif 'image/gif' in content_type:
|
|
87
|
+
return '.gif'
|
|
88
|
+
else:
|
|
89
|
+
return os.path.splitext(url)[1] or '.png'
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from charset_normalizer import from_path
|
|
5
|
+
from ara_cli.file_loaders.image_processor import ImageProcessor
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MarkdownReader:
|
|
9
|
+
"""Handles markdown file reading with optional image extraction"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, file_path: str):
|
|
12
|
+
self.file_path = file_path
|
|
13
|
+
self.base_dir = os.path.dirname(file_path)
|
|
14
|
+
self.image_processor = ImageProcessor()
|
|
15
|
+
|
|
16
|
+
def read(self, extract_images: bool = False) -> str:
|
|
17
|
+
"""Read markdown file and optionally extract/describe images"""
|
|
18
|
+
# Detect and use the most appropriate encoding
|
|
19
|
+
result = from_path(self.file_path).best()
|
|
20
|
+
if not result:
|
|
21
|
+
print(f"Failed to detect encoding for {self.file_path}")
|
|
22
|
+
return ""
|
|
23
|
+
content = str(result)
|
|
24
|
+
|
|
25
|
+
if not extract_images:
|
|
26
|
+
return content
|
|
27
|
+
|
|
28
|
+
return self._process_images(content)
|
|
29
|
+
|
|
30
|
+
def _process_images(self, content: str) -> str:
|
|
31
|
+
"""Process all images in markdown content"""
|
|
32
|
+
# Pattern to match markdown images: 
|
|
33
|
+
image_pattern = re.compile(r"!\[([^\]]*)\]\(([^\)]+)\)")
|
|
34
|
+
base64_pattern = re.compile(r"data:image/([^;]+);base64,([^)]+)")
|
|
35
|
+
|
|
36
|
+
# Process each image reference
|
|
37
|
+
for match in image_pattern.finditer(content):
|
|
38
|
+
image_ref = match.group(2)
|
|
39
|
+
replacement = self._process_single_image(image_ref, base64_pattern)
|
|
40
|
+
|
|
41
|
+
if replacement:
|
|
42
|
+
content = content.replace(match.group(0), replacement, 1)
|
|
43
|
+
|
|
44
|
+
return content
|
|
45
|
+
|
|
46
|
+
def _process_single_image(
|
|
47
|
+
self, image_ref: str, base64_pattern: re.Pattern
|
|
48
|
+
) -> Optional[str]:
|
|
49
|
+
"""Process a single image reference"""
|
|
50
|
+
try:
|
|
51
|
+
# Try base64 first
|
|
52
|
+
result = self.image_processor.process_base64_image(
|
|
53
|
+
image_ref, base64_pattern
|
|
54
|
+
)
|
|
55
|
+
if result:
|
|
56
|
+
return result[0]
|
|
57
|
+
|
|
58
|
+
# Try URL
|
|
59
|
+
result, error = self.image_processor.process_url_image(image_ref)
|
|
60
|
+
if result:
|
|
61
|
+
if error:
|
|
62
|
+
print(f"Warning: {error}")
|
|
63
|
+
return result
|
|
64
|
+
|
|
65
|
+
# Try local file
|
|
66
|
+
result, error = self.image_processor.process_local_image(
|
|
67
|
+
image_ref, self.base_dir
|
|
68
|
+
)
|
|
69
|
+
if error:
|
|
70
|
+
print(f"Warning: {error}")
|
|
71
|
+
return result
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
print(f"Warning: Could not process image {image_ref}: {e}")
|
|
75
|
+
return None
|
|
@@ -4,21 +4,15 @@ import base64
|
|
|
4
4
|
import tempfile
|
|
5
5
|
from typing import Optional, Tuple
|
|
6
6
|
import requests
|
|
7
|
+
from charset_normalizer import from_path
|
|
7
8
|
from ara_cli.prompt_handler import describe_image
|
|
8
9
|
from ara_cli.file_loaders.file_loader import FileLoader
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class TextFileLoader(FileLoader):
|
|
12
13
|
"""Loads text files"""
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
self,
|
|
16
|
-
file_path: str,
|
|
17
|
-
prefix: str = "",
|
|
18
|
-
suffix: str = "",
|
|
19
|
-
block_delimiter: str = "",
|
|
20
|
-
extract_images: bool = False
|
|
21
|
-
) -> bool:
|
|
14
|
+
def load(self, file_path: str, prefix: str = "", suffix: str = "",
|
|
15
|
+
block_delimiter: str = "", extract_images: bool = False, **kwargs) -> bool:
|
|
22
16
|
"""Load text file with optional markdown image extraction"""
|
|
23
17
|
|
|
24
18
|
is_md_file = file_path.lower().endswith('.md')
|
|
@@ -27,8 +21,12 @@ class TextFileLoader(FileLoader):
|
|
|
27
21
|
reader = MarkdownReader(file_path)
|
|
28
22
|
file_content = reader.read(extract_images=True)
|
|
29
23
|
else:
|
|
30
|
-
|
|
31
|
-
|
|
24
|
+
# Use charset-normalizer to detect encoding
|
|
25
|
+
encoded_content = from_path(file_path).best()
|
|
26
|
+
if not encoded_content:
|
|
27
|
+
print(f"Failed to detect encoding for {file_path}")
|
|
28
|
+
return False
|
|
29
|
+
file_content = str(encoded_content)
|
|
32
30
|
|
|
33
31
|
if block_delimiter:
|
|
34
32
|
file_content = f"{block_delimiter}\n{file_content}\n{block_delimiter}"
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import fnmatch
|
|
3
|
+
from typing import List, Dict, Any
|
|
4
|
+
|
|
5
|
+
# Ağaç yapımız için bir tip tanımı yapalım
|
|
6
|
+
DirTree = Dict[str, Any]
|
|
7
|
+
|
|
8
|
+
def _build_tree(root_path: str, patterns: List[str]) -> DirTree:
|
|
9
|
+
"""Belirtilen yoldaki dizin yapısını temsil eden iç içe bir sözlük oluşturur."""
|
|
10
|
+
tree: DirTree = {'files': [], 'dirs': {}}
|
|
11
|
+
try:
|
|
12
|
+
for item in os.listdir(root_path):
|
|
13
|
+
item_path = os.path.join(root_path, item)
|
|
14
|
+
if os.path.isdir(item_path):
|
|
15
|
+
subtree = _build_tree(item_path, patterns)
|
|
16
|
+
# Sadece içinde dosya olan veya dosyası olan alt klasörleri ekle
|
|
17
|
+
if subtree['files'] or subtree['dirs']:
|
|
18
|
+
tree['dirs'][item] = subtree
|
|
19
|
+
elif os.path.isfile(item_path):
|
|
20
|
+
# Dosyanın verilen desenlerden herhangi biriyle eşleşip eşleşmediğini kontrol et
|
|
21
|
+
if any(fnmatch.fnmatch(item, pattern) for pattern in patterns):
|
|
22
|
+
tree['files'].append(item)
|
|
23
|
+
except OSError as e:
|
|
24
|
+
print(f"Warning: Could not access path {root_path}: {e}")
|
|
25
|
+
return tree
|
|
26
|
+
|
|
27
|
+
def _write_tree_to_markdown(md_file, tree: DirTree, level: int):
|
|
28
|
+
"""Ağaç veri yapısını markdown formatında dosyaya yazar."""
|
|
29
|
+
# Dosyaları girintili olarak yaz
|
|
30
|
+
indent = ' ' * level
|
|
31
|
+
for filename in sorted(tree['files']):
|
|
32
|
+
md_file.write(f"{indent}- [] {filename}\n")
|
|
33
|
+
|
|
34
|
+
# Alt dizinler için başlık oluştur ve recursive olarak devam et
|
|
35
|
+
for dirname, subtree in sorted(tree['dirs'].items()):
|
|
36
|
+
# Alt başlıklar için girinti yok, sadece başlık seviyesi artıyor
|
|
37
|
+
md_file.write(f"{' ' * (level -1)}{'#' * (level + 1)} {dirname}\n")
|
|
38
|
+
_write_tree_to_markdown(md_file, subtree, level + 1)
|
|
39
|
+
|
|
40
|
+
def generate_global_markdown_listing(directories: List[str], file_patterns: List[str], output_file: str):
|
|
41
|
+
"""
|
|
42
|
+
Global dizinler için hiyerarşik bir markdown dosya listesi oluşturur.
|
|
43
|
+
En üst başlık olarak mutlak yolu kullanır, alt öğeler için göreceli isimler kullanır.
|
|
44
|
+
"""
|
|
45
|
+
with open(output_file, 'w', encoding='utf-8') as md_file:
|
|
46
|
+
for directory in directories:
|
|
47
|
+
abs_dir = os.path.abspath(directory)
|
|
48
|
+
|
|
49
|
+
if not os.path.isdir(abs_dir):
|
|
50
|
+
print(f"Warning: Global directory not found: {abs_dir}")
|
|
51
|
+
md_file.write(f"# {directory}\n")
|
|
52
|
+
md_file.write(f" - !! UYARI: Dizin bulunamadı: {abs_dir}\n\n")
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
tree = _build_tree(abs_dir, file_patterns)
|
|
56
|
+
|
|
57
|
+
# Sadece ağaç boş değilse yaz
|
|
58
|
+
if tree['files'] or tree['dirs']:
|
|
59
|
+
md_file.write(f"# {abs_dir}\n")
|
|
60
|
+
_write_tree_to_markdown(md_file, tree, 1)
|
|
61
|
+
md_file.write("\n")
|
ara_cli/prompt_extractor.py
CHANGED
|
@@ -17,7 +17,7 @@ def extract_code_blocks_md(markdown_text):
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
def extract_responses(document_path, relative_to_ara_root=False, force=False, write=False):
|
|
20
|
-
print(f"
|
|
20
|
+
print(f"Starting extraction from '{document_path}'")
|
|
21
21
|
block_extraction_counter = 0
|
|
22
22
|
|
|
23
23
|
with open(document_path, 'r', encoding='utf-8', errors='replace') as file:
|