quantalogic 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. quantalogic/__init__.py +20 -0
  2. quantalogic/agent.py +638 -0
  3. quantalogic/agent_config.py +138 -0
  4. quantalogic/coding_agent.py +83 -0
  5. quantalogic/event_emitter.py +223 -0
  6. quantalogic/generative_model.py +226 -0
  7. quantalogic/interactive_text_editor.py +190 -0
  8. quantalogic/main.py +185 -0
  9. quantalogic/memory.py +217 -0
  10. quantalogic/model_names.py +19 -0
  11. quantalogic/print_event.py +66 -0
  12. quantalogic/prompts.py +99 -0
  13. quantalogic/server/__init__.py +3 -0
  14. quantalogic/server/agent_server.py +633 -0
  15. quantalogic/server/models.py +60 -0
  16. quantalogic/server/routes.py +117 -0
  17. quantalogic/server/state.py +199 -0
  18. quantalogic/server/static/js/event_visualizer.js +430 -0
  19. quantalogic/server/static/js/quantalogic.js +571 -0
  20. quantalogic/server/templates/index.html +134 -0
  21. quantalogic/tool_manager.py +68 -0
  22. quantalogic/tools/__init__.py +46 -0
  23. quantalogic/tools/agent_tool.py +88 -0
  24. quantalogic/tools/download_http_file_tool.py +64 -0
  25. quantalogic/tools/edit_whole_content_tool.py +70 -0
  26. quantalogic/tools/elixir_tool.py +240 -0
  27. quantalogic/tools/execute_bash_command_tool.py +116 -0
  28. quantalogic/tools/input_question_tool.py +57 -0
  29. quantalogic/tools/language_handlers/__init__.py +21 -0
  30. quantalogic/tools/language_handlers/c_handler.py +33 -0
  31. quantalogic/tools/language_handlers/cpp_handler.py +33 -0
  32. quantalogic/tools/language_handlers/go_handler.py +33 -0
  33. quantalogic/tools/language_handlers/java_handler.py +37 -0
  34. quantalogic/tools/language_handlers/javascript_handler.py +42 -0
  35. quantalogic/tools/language_handlers/python_handler.py +29 -0
  36. quantalogic/tools/language_handlers/rust_handler.py +33 -0
  37. quantalogic/tools/language_handlers/scala_handler.py +33 -0
  38. quantalogic/tools/language_handlers/typescript_handler.py +42 -0
  39. quantalogic/tools/list_directory_tool.py +123 -0
  40. quantalogic/tools/llm_tool.py +119 -0
  41. quantalogic/tools/markitdown_tool.py +105 -0
  42. quantalogic/tools/nodejs_tool.py +515 -0
  43. quantalogic/tools/python_tool.py +469 -0
  44. quantalogic/tools/read_file_block_tool.py +140 -0
  45. quantalogic/tools/read_file_tool.py +79 -0
  46. quantalogic/tools/replace_in_file_tool.py +300 -0
  47. quantalogic/tools/ripgrep_tool.py +353 -0
  48. quantalogic/tools/search_definition_names.py +419 -0
  49. quantalogic/tools/task_complete_tool.py +35 -0
  50. quantalogic/tools/tool.py +146 -0
  51. quantalogic/tools/unified_diff_tool.py +387 -0
  52. quantalogic/tools/write_file_tool.py +97 -0
  53. quantalogic/utils/__init__.py +17 -0
  54. quantalogic/utils/ask_user_validation.py +12 -0
  55. quantalogic/utils/download_http_file.py +77 -0
  56. quantalogic/utils/get_coding_environment.py +15 -0
  57. quantalogic/utils/get_environment.py +26 -0
  58. quantalogic/utils/get_quantalogic_rules_content.py +19 -0
  59. quantalogic/utils/git_ls.py +121 -0
  60. quantalogic/utils/read_file.py +54 -0
  61. quantalogic/utils/read_http_text_content.py +101 -0
  62. quantalogic/xml_parser.py +242 -0
  63. quantalogic/xml_tool_parser.py +99 -0
  64. quantalogic-0.2.0.dist-info/LICENSE +201 -0
  65. quantalogic-0.2.0.dist-info/METADATA +1034 -0
  66. quantalogic-0.2.0.dist-info/RECORD +68 -0
  67. quantalogic-0.2.0.dist-info/WHEEL +4 -0
  68. quantalogic-0.2.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,387 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ from dataclasses import dataclass
5
+ from enum import Enum
6
+ from typing import Dict, List, Optional, Tuple
7
+
8
+ from quantalogic.tools.tool import Tool, ToolArgument
9
+
10
+
11
+ class LineType(Enum):
12
+ """Enum for different types of lines in a patch."""
13
+
14
+ CONTEXT = " "
15
+ ADDITION = "+"
16
+ DELETION = "-"
17
+
18
+ @classmethod
19
+ def from_line(cls, line: str) -> Optional["LineType"]:
20
+ """Get the line type from a patch line."""
21
+ if line.startswith(" "):
22
+ return cls.CONTEXT
23
+ elif line.startswith("+"):
24
+ return cls.ADDITION
25
+ elif line.startswith("-"):
26
+ return cls.DELETION
27
+ return None
28
+
29
+
30
+ @dataclass
31
+ class PatchLine:
32
+ """Represents a line in a patch with type and content."""
33
+
34
+ type: LineType
35
+ content: str
36
+ original_line_number: Optional[int] = None
37
+ new_line_number: Optional[int] = None
38
+
39
+
40
+ @dataclass
41
+ class HunkHeader:
42
+ """Represents a hunk header in a patch."""
43
+
44
+ orig_start: int
45
+ orig_count: int
46
+ new_start: int
47
+ new_count: int
48
+ section_header: Optional[str] = None
49
+
50
+
51
+ @dataclass
52
+ class Hunk:
53
+ """Represents a complete hunk in a patch with validation and application logic."""
54
+
55
+ header: HunkHeader
56
+ lines: List[PatchLine]
57
+
58
+ def validate(self, file_lines: List[str], start_line: int, lenient: bool = False, tolerance: int = 5) -> int:
59
+ """Validate hunk context against file contents with detailed error reporting.
60
+
61
+ Args:
62
+ file_lines (List[str]): The lines of the file to patch.
63
+ start_line (int): The expected starting line number for the hunk.
64
+ lenient (bool): Whether to allow lenient matching of context lines.
65
+ tolerance (int): The number of lines to search around the expected line for context matching.
66
+
67
+ Returns:
68
+ int: The offset to apply to subsequent lines in the hunk.
69
+ """
70
+ if not file_lines:
71
+ if self.header.orig_start > 0:
72
+ raise PatchError("Cannot delete from empty file", {"Hunk header": self._format_header()})
73
+ return 0
74
+
75
+ if start_line > len(file_lines):
76
+ raise PatchError(
77
+ "Patch refers to lines beyond file length",
78
+ {"File length": len(file_lines), "Start line": start_line, "Hunk header": self._format_header()},
79
+ )
80
+
81
+ context_lines = [line for line in self.lines if line.type in (LineType.CONTEXT, LineType.DELETION)]
82
+ if not context_lines: # Only additions, no context to validate
83
+ return 0
84
+
85
+ file_pos = start_line - 1
86
+ offset = 0
87
+
88
+ for patch_line in context_lines:
89
+ expected_line = file_pos + offset
90
+ search_start = max(0, expected_line - tolerance)
91
+ search_end = min(len(file_lines), expected_line + tolerance + 1)
92
+
93
+ found = False
94
+ for i in range(search_start, search_end):
95
+ if file_lines[i].rstrip() == patch_line.content.rstrip():
96
+ found = True
97
+ if i != expected_line:
98
+ offset = i - expected_line
99
+ break
100
+
101
+ if not found:
102
+ raise PatchError(
103
+ "Context mismatch",
104
+ {
105
+ "Expected": patch_line.content.rstrip(),
106
+ "Found": file_lines[expected_line].rstrip()
107
+ if expected_line < len(file_lines)
108
+ else "End of file",
109
+ "At line": expected_line + 1,
110
+ "Hunk header": self._format_header(),
111
+ "Offset": offset,
112
+ },
113
+ )
114
+
115
+ file_pos += 1
116
+
117
+ return offset
118
+
119
+ def _format_header(self) -> str:
120
+ """Format hunk header for error messages."""
121
+ header = (
122
+ f"@@ -{self.header.orig_start},{self.header.orig_count} +{self.header.new_start},{self.header.new_count} @@"
123
+ )
124
+ if self.header.section_header:
125
+ header += f" {self.header.section_header}"
126
+ return header
127
+
128
+ def apply(self, lines: List[str], start_line: int, offset: int = 0) -> List[str]:
129
+ """Apply this hunk to the given lines."""
130
+ result = lines[: start_line - 1 + offset]
131
+ file_pos = start_line - 1 + offset
132
+
133
+ for patch_line in self.lines:
134
+ if patch_line.type == LineType.CONTEXT:
135
+ if file_pos < len(lines):
136
+ result.append(lines[file_pos])
137
+ file_pos += 1
138
+ elif patch_line.type == LineType.ADDITION:
139
+ result.append(patch_line.content.rstrip() + "\n")
140
+ elif patch_line.type == LineType.DELETION:
141
+ if file_pos < len(lines):
142
+ file_pos += 1
143
+
144
+ result.extend(lines[file_pos:])
145
+ return result
146
+
147
+
148
+ class PatchError(Exception):
149
+ """Custom exception for patch-related errors with context."""
150
+
151
+ def __init__(self, message: str, context: Optional[Dict] = None):
152
+ self.context = context or {}
153
+ super().__init__(message)
154
+
155
+ def __str__(self):
156
+ msg = [super().__str__()]
157
+ if self.context:
158
+ for key, value in self.context.items():
159
+ msg.append(f"\n{key}:")
160
+ msg.append(f" {str(value)}")
161
+ return "\n".join(msg)
162
+
163
+
164
+ class Patch:
165
+ """Represents a complete patch with enhanced parsing and validation."""
166
+
167
+ def __init__(self, content: str):
168
+ self.content = content
169
+ self.hunks: List[Hunk] = []
170
+ self.original_filename: Optional[str] = None
171
+ self.new_filename: Optional[str] = None
172
+ self.metadata: Dict[str, str] = {}
173
+ self._parse()
174
+
175
+ def _parse(self) -> None:
176
+ """Parse the patch content with metadata and headers."""
177
+ if not self.content:
178
+ raise PatchError("Empty patch content")
179
+
180
+ lines = self.content.splitlines()
181
+ if not lines:
182
+ raise PatchError("No lines in patch")
183
+
184
+ if self.content.startswith("<![CDATA[") and self.content.endswith("]]>"):
185
+ self.content = self.content[9:-3]
186
+ lines = self.content.splitlines()
187
+
188
+ self._parse_headers(lines)
189
+ self._parse_hunks(lines)
190
+
191
+ if not self.hunks:
192
+ raise PatchError("No valid hunks found in patch")
193
+
194
+ def _parse_headers(self, lines: List[str]) -> None:
195
+ """Parse patch headers and metadata."""
196
+ for line in lines:
197
+ if line.startswith("--- "):
198
+ self.original_filename = line[4:].split("\t")[0].strip()
199
+ elif line.startswith("+++ "):
200
+ self.new_filename = line[4:].split("\t")[0].strip()
201
+ elif ":" in line: # Possible metadata
202
+ key, value = line.split(":", 1)
203
+ self.metadata[key.strip()] = value.strip()
204
+
205
+ def _parse_hunks(self, lines: List[str]) -> None:
206
+ """Parse patch hunks with line number tracking."""
207
+ current_hunk_lines: List[str] = []
208
+ in_hunk = False
209
+
210
+ for line in lines:
211
+ if line.startswith("@@ "):
212
+ if current_hunk_lines:
213
+ self._parse_hunk(current_hunk_lines)
214
+ current_hunk_lines = []
215
+ in_hunk = True
216
+
217
+ if in_hunk:
218
+ current_hunk_lines.append(line)
219
+ elif not (line.startswith("--- ") or line.startswith("+++ ") or line.strip() == ""):
220
+ if ":" in line:
221
+ key, value = line.split(":", 1)
222
+ self.metadata[key.strip()] = value.strip()
223
+
224
+ if current_hunk_lines:
225
+ self._parse_hunk(current_hunk_lines)
226
+
227
+ def _parse_hunk(self, lines: List[str]) -> None:
228
+ """Parse a single hunk from its lines."""
229
+ if not lines or not lines[0].startswith("@@ "):
230
+ raise PatchError("Invalid hunk format", {"First line": lines[0] if lines else "No lines"})
231
+
232
+ header = self._parse_hunk_header(lines[0])
233
+ patch_lines: List[PatchLine] = []
234
+ orig_line = header.orig_start
235
+ new_line = header.new_start
236
+
237
+ for line in lines[1:]:
238
+ line_type = LineType.from_line(line)
239
+ if line_type:
240
+ content = line[1:]
241
+ patch_line = PatchLine(line_type, content)
242
+ if line_type in (LineType.CONTEXT, LineType.DELETION):
243
+ patch_line.original_line_number = orig_line
244
+ orig_line += 1
245
+ if line_type in (LineType.CONTEXT, LineType.ADDITION):
246
+ patch_line.new_line_number = new_line
247
+ new_line += 1
248
+ patch_lines.append(patch_line)
249
+
250
+ self.hunks.append(Hunk(header, patch_lines))
251
+
252
+ def _parse_hunk_header(self, header_line: str) -> HunkHeader:
253
+ """Parse a hunk header line."""
254
+ if not header_line.startswith("@@ "):
255
+ raise PatchError("Malformed hunk header", {"Header line": header_line})
256
+
257
+ parts = header_line.split("@@")
258
+ if len(parts) < 3:
259
+ raise PatchError("Malformed hunk header", {"Header line": header_line})
260
+
261
+ ranges = parts[1].strip().split(" ")
262
+ if len(ranges) != 2:
263
+ raise PatchError("Malformed hunk ranges", {"Header line": header_line, "Ranges": ranges})
264
+
265
+ orig_range = ranges[0][1:]
266
+ new_range = ranges[1][1:]
267
+
268
+ try:
269
+ orig_start, orig_count = self._parse_range(orig_range)
270
+ new_start, new_count = self._parse_range(new_range)
271
+ except ValueError as e:
272
+ raise PatchError(
273
+ "Invalid range format",
274
+ {"Header line": header_line, "Original range": orig_range, "New range": new_range, "Error": str(e)},
275
+ )
276
+
277
+ section_header = " ".join(parts[2:]).strip() if len(parts) > 2 else None
278
+ return HunkHeader(orig_start, orig_count, new_start, new_count, section_header)
279
+
280
+ def _parse_range(self, range_str: str) -> Tuple[int, int]:
281
+ """Parse a range string (e.g., 'start,length') into start and count."""
282
+ try:
283
+ if "," in range_str:
284
+ start, count = range_str.split(",")
285
+ return int(start), int(count)
286
+ return int(range_str), 1
287
+ except ValueError:
288
+ raise ValueError(f"Invalid range format: {range_str}")
289
+
290
+ def apply_to_text(self, text: str, lenient: bool = False, tolerance: int = 5) -> str:
291
+ """Apply the patch to the given text with enhanced error handling."""
292
+ lines = text.splitlines(keepends=True) if text else []
293
+
294
+ for hunk in self.hunks:
295
+ offset = hunk.validate(lines, hunk.header.orig_start, lenient, tolerance)
296
+ lines = hunk.apply(lines, hunk.header.orig_start, offset)
297
+
298
+ return "".join(lines)
299
+
300
+
301
+ class UnifiedDiffTool(Tool):
302
+ """Tool for applying unified diff patches with comprehensive error handling."""
303
+
304
+ name: str = "unified_diff"
305
+ description: str = "Applies a unified diff patch to update a file."
306
+ need_validation: bool = True
307
+ lenient: bool = True
308
+ tolerance: int = 5
309
+ arguments: list[ToolArgument] = [
310
+ ToolArgument(
311
+ name="file_path",
312
+ arg_type="string",
313
+ description="The path to the file to patch. Using an absolute path is recommended.",
314
+ required=True,
315
+ example="/path/to/file.txt",
316
+ ),
317
+ ToolArgument(
318
+ name="patch",
319
+ arg_type="string",
320
+ description="The unified diff patch content in CDATA format.",
321
+ required=True,
322
+ example="<![CDATA[--- a/file.txt\n+++ b/file.txt\n@@ -1,3 +1,4 @@\n Hello, world!\n+New line!]]>",
323
+ ),
324
+ ]
325
+
326
+ def execute(self, file_path: str, patch: str):
327
+ """Apply the patch to the specified file."""
328
+ error_context = {
329
+ "File": file_path,
330
+ "File exists": os.path.exists(file_path),
331
+ }
332
+
333
+ try:
334
+ if os.path.exists(file_path):
335
+ with open(file_path, encoding="utf-8") as f:
336
+ original_content = f.read()
337
+ error_context["File preview"] = (
338
+ original_content[:200] + "..." if len(original_content) > 200 else original_content
339
+ )
340
+ else:
341
+ original_content = ""
342
+
343
+ patch_obj = Patch(patch)
344
+ new_content = patch_obj.apply_to_text(original_content, lenient=self.lenient, tolerance=self.tolerance)
345
+
346
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
347
+ with open(file_path, "w", encoding="utf-8") as f:
348
+ f.write(new_content)
349
+
350
+ return "Patch applied successfully"
351
+
352
+ except Exception as e:
353
+ raise PatchError(f"Unexpected error: {str(e)}", error_context)
354
+
355
+
356
+ if __name__ == "__main__":
357
+ parser = argparse.ArgumentParser(description="Apply a unified diff patch to a file")
358
+ parser.add_argument("file_path", help="Path to the file to patch")
359
+ parser.add_argument("--patch-file", help="Path to the patch file")
360
+ parser.add_argument("--patch", help="Patch content as string")
361
+ parser.add_argument("--lenient", action="store_true", help="Enable lenient mode for patch application")
362
+ parser.add_argument(
363
+ "--tolerance", type=int, default=5, help="Number of lines to search around for context matching"
364
+ )
365
+
366
+ args = parser.parse_args()
367
+
368
+ if args.patch_file and args.patch:
369
+ parser.error("Cannot specify both --patch-file and --patch")
370
+
371
+ if args.patch_file:
372
+ with open(args.patch_file, encoding="utf-8") as f:
373
+ patch_content = f.read()
374
+ elif args.patch:
375
+ patch_content = args.patch
376
+ else:
377
+ parser.error("Must specify either --patch-file or --patch")
378
+
379
+ tool = UnifiedDiffTool()
380
+ tool.lenient = args.lenient
381
+ tool.tolerance = args.tolerance
382
+ try:
383
+ result = tool.execute(args.file_path, patch_content)
384
+ print(result)
385
+ except Exception as e:
386
+ print(f"Error: {str(e)}", file=sys.stderr)
387
+ sys.exit(1)
@@ -0,0 +1,97 @@
1
+ """Tool for writing a file and returning its content."""
2
+
3
+ import os
4
+
5
+ from quantalogic.tools.tool import Tool, ToolArgument
6
+
7
+
8
+ class WriteFileTool(Tool):
9
+ """Tool for writing a text file."""
10
+
11
+ name: str = "write_file_tool"
12
+ description: str = "Writes a file with the given content. The tool will fail if the file already exists when not used in append mode."
13
+ need_validation: bool = True
14
+ arguments: list = [
15
+ ToolArgument(
16
+ name="file_path",
17
+ arg_type="string",
18
+ description="The path to the file to write. Using an absolute path is recommended.",
19
+ required=True,
20
+ example="/path/to/file.txt",
21
+ ),
22
+ ToolArgument(
23
+ name="content",
24
+ arg_type="string",
25
+ description="""
26
+ The content to write to the file. Use CDATA to escape special characters.
27
+ Don't add newlines at the beginning or end of the content.
28
+ """,
29
+ required=True,
30
+ example="Hello, world!",
31
+ ),
32
+ ToolArgument(
33
+ name="append_mode",
34
+ arg_type="string",
35
+ description="""Append mode. If true, the content will be appended to the end of the file.
36
+ """,
37
+ required=False,
38
+ example="False",
39
+ ),
40
+ ToolArgument(
41
+ name="overwrite",
42
+ arg_type="string",
43
+ description="Overwrite mode. If true, existing files can be overwritten. Defaults to False.",
44
+ required=False,
45
+ example="False",
46
+ ),
47
+ ]
48
+
49
+ def execute(self, file_path: str, content: str, append_mode: str = "False", overwrite: str = "False") -> str:
50
+ """Writes a file with the given content.
51
+
52
+ Args:
53
+ file_path (str): The path to the file to write.
54
+ content (str): The content to write to the file.
55
+ append_mode (str, optional): If true, append content to existing file. Defaults to "False".
56
+ overwrite (str, optional): If true, overwrite existing file. Defaults to "False".
57
+
58
+ Returns:
59
+ str: The content of the file.
60
+
61
+ Raises:
62
+ FileExistsError: If the file already exists and append_mode is False and overwrite is False.
63
+ """
64
+ # Convert mode strings to booleans
65
+ append_mode_bool = append_mode.lower() in ["true", "1", "yes"]
66
+ overwrite_bool = overwrite.lower() in ["true", "1", "yes"]
67
+
68
+ ## Handle tilde expansion
69
+ if file_path.startswith("~"):
70
+ # Expand the tilde to the user's home directory
71
+ file_path = os.path.expanduser(file_path)
72
+
73
+ # Convert relative paths to absolute paths using current working directory
74
+ if not os.path.isabs(file_path):
75
+ file_path = os.path.abspath(os.path.join(os.getcwd(), file_path))
76
+
77
+ # Ensure parent directory exists
78
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
79
+
80
+ # Determine file write mode based on append_mode
81
+ mode = "a" if append_mode_bool else "w"
82
+
83
+ # Check if file already exists and not in append mode and not in overwrite mode
84
+ if os.path.exists(file_path) and not append_mode_bool and not overwrite_bool:
85
+ raise FileExistsError(
86
+ f"File {file_path} already exists. Set append_mode=True to append or overwrite=True to overwrite."
87
+ )
88
+
89
+ with open(file_path, mode, encoding="utf-8") as f:
90
+ f.write(content)
91
+ file_size = os.path.getsize(file_path)
92
+ return f"File {file_path} {'appended to' if append_mode_bool else 'written'} successfully. Size: {file_size} bytes."
93
+
94
+
95
+ if __name__ == "__main__":
96
+ tool = WriteFileTool()
97
+ print(tool.to_markdown())
@@ -0,0 +1,17 @@
1
+ from .download_http_file import download_http_file # noqa: I001
2
+ from .read_file import read_file
3
+ from .read_http_text_content import read_http_text_content
4
+ from .git_ls import git_ls
5
+ from .get_environment import get_environment
6
+ from .get_coding_environment import get_coding_environment
7
+ from .get_quantalogic_rules_content import get_quantalogic_rules_file_content
8
+
9
+ __all__ = [
10
+ "download_http_file",
11
+ "read_http_text_content",
12
+ "read_file",
13
+ "git_ls",
14
+ "get_environment",
15
+ "get_coding_environment",
16
+ "get_quantalogic_rules_file_content",
17
+ ]
@@ -0,0 +1,12 @@
1
+ def console_ask_for_user_validation(question: str = "Do you want to continue?") -> bool:
2
+ """Prompt the user for validation using Rich.
3
+
4
+ Args:
5
+ question (str): The validation question.
6
+
7
+ Returns:
8
+ bool: User's confirmation.
9
+ """
10
+ from rich.prompt import Confirm
11
+
12
+ return Confirm.ask(question, default=True)
@@ -0,0 +1,77 @@
1
+ """Utility function to download a file from a given URL and save it to a local path."""
2
+
3
+ import logging
4
+ from time import sleep
5
+ from typing import Optional
6
+
7
+ import requests
8
+ from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout, TooManyRedirects
9
+
10
+ # Configure logging
11
+ logging.basicConfig(level=logging.ERROR, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def download_http_file(
16
+ url: str, local_path: str, chunk_size: int = 8192, max_retries: int = 3, timeout: int = 10, delay: int = 2
17
+ ) -> Optional[str]:
18
+ """Downloads a file from a given URL and saves it to the specified local path.
19
+
20
+ Args:
21
+ url (str): The URL of the file to download.
22
+ local_path (str): The local file path where the downloaded file will be saved.
23
+ chunk_size (int): The size of each chunk to download. Default is 8192 bytes.
24
+ max_retries (int): The maximum number of retries for transient errors. Default is 3.
25
+ timeout (int): Timeout in seconds for the HTTP request. Default is 10.
26
+ delay (int): Delay in seconds between retries. Default is 2.
27
+
28
+ Returns:
29
+ Optional[str]: The local path where the file was saved if successful, None otherwise.
30
+ """
31
+ headers = {
32
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
33
+ "Accept": "*/*",
34
+ }
35
+
36
+ for attempt in range(max_retries):
37
+ try:
38
+ logger.info(f"Attempt {attempt + 1} of {max_retries} to download {url}")
39
+ response = requests.get(url, headers=headers, stream=True, timeout=timeout)
40
+ response.raise_for_status()
41
+
42
+ content_type = response.headers.get("Content-Type", "unknown")
43
+ logger.info(f"Downloading content with Content-Type: {content_type}")
44
+
45
+ with open(local_path, "wb") as file:
46
+ for chunk in response.iter_content(chunk_size=chunk_size):
47
+ file.write(chunk)
48
+
49
+ logger.info(f"File successfully downloaded and saved to {local_path}")
50
+ return local_path
51
+
52
+ except HTTPError as http_err:
53
+ status_code = http_err.response.status_code if http_err.response else "unknown"
54
+ error_msg = f"HTTP error occurred (status code: {status_code}, URL: {url}): {http_err}"
55
+ logger.error(error_msg)
56
+ if status_code in [404, 403, 401]: # Don't retry for these status codes
57
+ break
58
+ except ConnectionError as conn_err:
59
+ error_msg = f"Connection error occurred (URL: {url}): {conn_err}"
60
+ logger.error(error_msg)
61
+ except Timeout as timeout_err:
62
+ error_msg = f"Request timed out after {timeout} seconds (URL: {url}): {timeout_err}"
63
+ logger.error(error_msg)
64
+ except TooManyRedirects as redirect_err:
65
+ error_msg = f"Too many redirects (URL: {url}): {redirect_err}"
66
+ logger.error(error_msg)
67
+ except RequestException as req_err:
68
+ error_msg = f"An unexpected error occurred (URL: {url}): {req_err}"
69
+ logger.error(error_msg)
70
+
71
+ if attempt < max_retries - 1:
72
+ sleep_duration = delay * (2**attempt) # Exponential backoff
73
+ logger.info(f"Retrying in {sleep_duration} seconds...")
74
+ sleep(sleep_duration)
75
+
76
+ logger.error("Max retries reached. Download failed.")
77
+ return None
@@ -0,0 +1,15 @@
1
+ import os
2
+
3
+ from quantalogic.utils.get_environment import get_environment
4
+ from quantalogic.utils.git_ls import git_ls
5
+
6
+
7
+ def get_coding_environment() -> str:
8
+ """Retrieve coding environment details."""
9
+ return (
10
+ f"{get_environment()}"
11
+ "\n\n"
12
+ "<codebase_first_level>\n"
13
+ f"{git_ls(directory_path=os.getcwd())}"
14
+ "\n</codebase_first_level>\n"
15
+ )
@@ -0,0 +1,26 @@
1
+ import os
2
+ from datetime import datetime
3
+
4
+ from loguru import logger
5
+
6
+
7
+ def get_environment() -> str:
8
+ """Retrieve the current environment details."""
9
+ try:
10
+ logger.debug("Retrieving environment details.")
11
+ shell = os.getenv("SHELL", "bash")
12
+ current_dir = os.getcwd()
13
+ operating_system = os.name
14
+ date_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
15
+
16
+ environment_details = (
17
+ f"Current shell: {shell}\n"
18
+ f"Current directory: {current_dir}\n"
19
+ f"Operating system: {operating_system}\n"
20
+ f"Date and time: {date_time}"
21
+ )
22
+ logger.debug(f"Environment details:\n{environment_details}")
23
+ return environment_details
24
+ except Exception as e:
25
+ logger.error(f"Error retrieving environment details: {str(e)}")
26
+ return "Environment details unavailable."
@@ -0,0 +1,19 @@
1
+ from typing import Union
2
+
3
+
4
+ def get_quantalogic_rules_file_content() -> Union[str, None]:
5
+ """
6
+ Reads the content of the .quantalogicrules file in the current directory.
7
+
8
+ Returns:
9
+ Union[str, None]: The content of the .quantalogicrules file if it exists.
10
+ Returns None if the file does not exist.
11
+ Raises RuntimeError if an error occurs while reading the file.
12
+ """
13
+ try:
14
+ with open(".quantalogicrules") as file:
15
+ return file.read()
16
+ except FileNotFoundError:
17
+ return None
18
+ except Exception as e:
19
+ raise RuntimeError(f"Error reading .quantalogicrules file: {e}")