chatterer 0.1.17__py3-none-any.whl → 0.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. chatterer/__init__.py +93 -93
  2. chatterer/common_types/__init__.py +21 -21
  3. chatterer/common_types/io.py +19 -19
  4. chatterer/examples/__init__.py +0 -0
  5. chatterer/examples/anything_to_markdown.py +95 -0
  6. chatterer/examples/get_code_snippets.py +64 -0
  7. chatterer/examples/login_with_playwright.py +171 -0
  8. chatterer/examples/make_ppt.py +499 -0
  9. chatterer/examples/pdf_to_markdown.py +107 -0
  10. chatterer/examples/pdf_to_text.py +60 -0
  11. chatterer/examples/transcription_api.py +127 -0
  12. chatterer/examples/upstage_parser.py +95 -0
  13. chatterer/examples/webpage_to_markdown.py +79 -0
  14. chatterer/interactive.py +354 -354
  15. chatterer/language_model.py +533 -533
  16. chatterer/messages.py +21 -21
  17. chatterer/strategies/__init__.py +13 -13
  18. chatterer/strategies/atom_of_thoughts.py +975 -975
  19. chatterer/strategies/base.py +14 -14
  20. chatterer/tools/__init__.py +46 -46
  21. chatterer/tools/caption_markdown_images.py +384 -384
  22. chatterer/tools/citation_chunking/__init__.py +3 -3
  23. chatterer/tools/citation_chunking/chunks.py +53 -53
  24. chatterer/tools/citation_chunking/citation_chunker.py +118 -118
  25. chatterer/tools/citation_chunking/citations.py +285 -285
  26. chatterer/tools/citation_chunking/prompt.py +157 -157
  27. chatterer/tools/citation_chunking/reference.py +26 -26
  28. chatterer/tools/citation_chunking/utils.py +138 -138
  29. chatterer/tools/convert_pdf_to_markdown.py +302 -302
  30. chatterer/tools/convert_to_text.py +447 -447
  31. chatterer/tools/upstage_document_parser.py +705 -705
  32. chatterer/tools/webpage_to_markdown.py +739 -739
  33. chatterer/tools/youtube.py +146 -146
  34. chatterer/utils/__init__.py +15 -15
  35. chatterer/utils/base64_image.py +285 -285
  36. chatterer/utils/bytesio.py +59 -59
  37. chatterer/utils/code_agent.py +237 -237
  38. chatterer/utils/imghdr.py +148 -148
  39. {chatterer-0.1.17.dist-info → chatterer-0.1.19.dist-info}/METADATA +392 -392
  40. chatterer-0.1.19.dist-info/RECORD +44 -0
  41. {chatterer-0.1.17.dist-info → chatterer-0.1.19.dist-info}/WHEEL +1 -1
  42. chatterer-0.1.19.dist-info/entry_points.txt +10 -0
  43. chatterer-0.1.17.dist-info/RECORD +0 -33
  44. {chatterer-0.1.17.dist-info → chatterer-0.1.19.dist-info}/top_level.txt +0 -0
@@ -1,447 +1,447 @@
1
- import ast
2
- import importlib
3
- import os
4
- import re
5
- import site
6
- from fnmatch import fnmatch
7
- from pathlib import Path
8
- from typing import (
9
- TYPE_CHECKING,
10
- Callable,
11
- Iterable,
12
- NamedTuple,
13
- NotRequired,
14
- Optional,
15
- Self,
16
- Sequence,
17
- TypeAlias,
18
- TypedDict,
19
- )
20
-
21
- from ..common_types.io import PathOrReadable
22
- from ..utils.bytesio import read_bytes_stream
23
- from .convert_pdf_to_markdown import extract_text_from_pdf
24
-
25
- if TYPE_CHECKING:
26
- from bs4 import Tag
27
- from openai import OpenAI
28
- from requests import Response, Session
29
-
30
- try:
31
- from tiktoken import get_encoding, list_encoding_names
32
-
33
- enc = get_encoding(list_encoding_names()[-1])
34
- except ImportError:
35
- enc = None
36
-
37
-
38
- # Type definition for representing a file tree structure
39
- type FileTree = dict[str, Optional[FileTree]]
40
-
41
- # Type aliases for callback functions and file descriptors
42
- CodeLanguageCallback: TypeAlias = Callable[["Tag"], Optional[str]]
43
-
44
-
45
- class HtmlToMarkdownOptions(TypedDict):
46
- """
47
- TypedDict for options used in HTML to Markdown conversion.
48
-
49
- Contains various configuration options for controlling how HTML is converted to Markdown,
50
- including formatting preferences, escape behaviors, and styling options.
51
- """
52
-
53
- autolinks: NotRequired[bool]
54
- bullets: NotRequired[str]
55
- code_language: NotRequired[str]
56
- code_language_callback: NotRequired[CodeLanguageCallback]
57
- convert: NotRequired[Sequence[str]]
58
- default_title: NotRequired[bool]
59
- escape_asterisks: NotRequired[bool]
60
- escape_underscores: NotRequired[bool]
61
- escape_misc: NotRequired[bool]
62
- heading_style: NotRequired[str]
63
- keep_inline_images_in: NotRequired[Sequence[str]]
64
- newline_style: NotRequired[str]
65
- strip: NotRequired[Sequence[str]]
66
- strip_document: NotRequired[str]
67
- strong_em_symbol: NotRequired[str]
68
- sub_symbol: NotRequired[str]
69
- sup_symbol: NotRequired[str]
70
- table_infer_header: NotRequired[bool]
71
- wrap: NotRequired[bool]
72
- wrap_width: NotRequired[int]
73
-
74
-
75
- def get_default_html_to_markdown_options() -> HtmlToMarkdownOptions:
76
- """
77
- Returns the default options for HTML to Markdown conversion.
78
-
79
- This function provides a set of sensible defaults for the markdownify library,
80
- including settings for bullets, escaping, heading styles, and other formatting options.
81
-
82
- Returns:
83
- HtmlToMarkdownOptions: A dictionary of default conversion options.
84
- """
85
- from markdownify import ( # pyright: ignore[reportUnknownVariableType, reportMissingTypeStubs]
86
- ASTERISK,
87
- SPACES,
88
- STRIP,
89
- UNDERLINED,
90
- )
91
-
92
- return {
93
- "autolinks": True,
94
- "bullets": "*+-", # An iterable of bullet types.
95
- "code_language": "",
96
- "default_title": False,
97
- "escape_asterisks": True,
98
- "escape_underscores": True,
99
- "escape_misc": False,
100
- "heading_style": UNDERLINED,
101
- "keep_inline_images_in": [],
102
- "newline_style": SPACES,
103
- "strip_document": STRIP,
104
- "strong_em_symbol": ASTERISK,
105
- "sub_symbol": "",
106
- "sup_symbol": "",
107
- "table_infer_header": False,
108
- "wrap": False,
109
- "wrap_width": 80,
110
- }
111
-
112
-
113
- class CodeSnippets(NamedTuple):
114
- """
115
- A named tuple that represents code snippets extracted from Python files.
116
-
117
- Contains the paths to the files, the concatenated text of all snippets,
118
- and the base directory of the files.
119
- """
120
-
121
- paths: list[Path]
122
- snippets_text: str
123
- base_dir: Path
124
-
125
- @classmethod
126
- def from_path_or_pkgname(
127
- cls,
128
- path_or_pkgname: str,
129
- glob_patterns: str | list[str] = "*.py",
130
- case_sensitive: bool = False,
131
- ban_file_patterns: Optional[list[str]] = None,
132
- ) -> Self:
133
- """
134
- Creates a CodeSnippets instance from a file path or package name.
135
-
136
- Args:
137
- path_or_pkgname: Path to a file/directory or a Python package name.
138
- ban_file_patterns: Optional list of patterns to exclude files.
139
-
140
- Returns:
141
- A new CodeSnippets instance with extracted code snippets.
142
- """
143
- paths: list[Path] = _get_filepaths(
144
- path_or_pkgname=path_or_pkgname,
145
- glob_patterns=glob_patterns,
146
- case_sensitive=case_sensitive,
147
- ban_fn_patterns=ban_file_patterns,
148
- )
149
- snippets_text: str = "".join(_get_a_snippet(p) for p in paths)
150
- return cls(
151
- paths=paths,
152
- snippets_text=snippets_text,
153
- base_dir=_get_base_dir(paths),
154
- )
155
-
156
- @property
157
- def metadata(self) -> str:
158
- """
159
- Generates metadata about the code snippets.
160
-
161
- Returns a string containing information about the file tree structure,
162
- total number of files, tokens (if tiktoken is available), and lines.
163
-
164
- Returns:
165
- str: Formatted metadata string.
166
- """
167
- file_paths: list[Path] = self.paths
168
- text: str = self.snippets_text
169
-
170
- base_dir: Path = _get_base_dir(file_paths)
171
- results: list[str] = [base_dir.as_posix()]
172
-
173
- file_tree: FileTree = {}
174
- for file_path in sorted(file_paths):
175
- rel_path = file_path.relative_to(base_dir)
176
- subtree: Optional[FileTree] = file_tree
177
- for part in rel_path.parts[:-1]:
178
- if subtree is not None:
179
- subtree = subtree.setdefault(part, {})
180
- if subtree is not None:
181
- subtree[rel_path.parts[-1]] = None
182
-
183
- def _display_tree(tree: FileTree, prefix: str = "") -> None:
184
- """
185
- Helper function to recursively display a file tree structure.
186
-
187
- Args:
188
- tree: The file tree dictionary to display.
189
- prefix: Current line prefix for proper indentation.
190
- """
191
- items: list[tuple[str, Optional[FileTree]]] = sorted(tree.items())
192
- count: int = len(items)
193
- for idx, (name, subtree) in enumerate(items):
194
- branch: str = "└── " if idx == count - 1 else "├── "
195
- results.append(f"{prefix}{branch}{name}")
196
- if subtree is not None:
197
- extension: str = " " if idx == count - 1 else "│ "
198
- _display_tree(tree=subtree, prefix=prefix + extension)
199
-
200
- _display_tree(file_tree)
201
- results.append(f"- Total files: {len(file_paths)}")
202
- if enc is not None:
203
- num_tokens: int = len(enc.encode(text, disallowed_special=()))
204
- results.append(f"- Total tokens: {num_tokens}")
205
- results.append(f"- Total lines: {text.count('\n') + 1}")
206
- return "\n".join(results)
207
-
208
-
209
- def html_to_markdown(html: str, options: Optional[HtmlToMarkdownOptions]) -> str:
210
- """
211
- Convert HTML content to Markdown using the provided options.
212
-
213
- Args:
214
- html (str): HTML content to convert.
215
- options (HtmlToMarkdownOptions): Options for the conversion.
216
-
217
- Returns:
218
- str: The Markdown content.
219
- """
220
- from markdownify import markdownify # pyright: ignore[reportUnknownVariableType, reportMissingTypeStubs]
221
-
222
- return str(markdownify(html, **(options or {}))) # pyright: ignore[reportUnknownArgumentType]
223
-
224
-
225
- def pdf_to_text(path_or_file: PathOrReadable, page_indices: Iterable[int] | int | None = None) -> str:
226
- """
227
- Convert a PDF file to plain text.
228
-
229
- Extracts text from each page of a PDF file and formats it with page markers.
230
-
231
- Args:
232
- path_or_file: Path to a PDF file or a readable object containing PDF data.
233
- page_indices: Optional list of page indices to extract. If None, all pages are extracted.
234
- If an integer is provided, it extracts that specific page.
235
- If a list is provided, it extracts the specified pages.
236
-
237
- Returns:
238
- str: Extracted text with page markers.
239
-
240
- Raises:
241
- FileNotFoundError: If the file cannot be found or opened.
242
- """
243
- from pymupdf import Document # pyright: ignore[reportMissingTypeStubs]
244
-
245
- with read_bytes_stream(path_or_file) as stream:
246
- if stream is None:
247
- raise FileNotFoundError(path_or_file)
248
- with Document(stream=stream.read()) as doc:
249
- return "\n".join(
250
- f"<!-- Page {page_no} -->\n{text}\n"
251
- for page_no, text in extract_text_from_pdf(doc, page_indices).items()
252
- )
253
-
254
-
255
- def anything_to_markdown(
256
- source: "str | Response | Path",
257
- requests_session: Optional["Session"] = None,
258
- llm_client: Optional["OpenAI"] = None,
259
- llm_model: Optional[str] = None,
260
- style_map: Optional[str] = None,
261
- exiftool_path: Optional[str] = None,
262
- docintel_endpoint: Optional[str] = None,
263
- ) -> str:
264
- """
265
- Convert various types of content to Markdown format.
266
-
267
- Uses the MarkItDown library to convert different types of content (URLs, files, API responses)
268
- to Markdown format.
269
-
270
- Args:
271
- source: The source content to convert (URL string, Response object, or Path).
272
- requests_session: Optional requests Session for HTTP requests.
273
- llm_client: Optional OpenAI client for LLM-based conversions.
274
- llm_model: Optional model name for the LLM.
275
- style_map: Optional style mapping configuration.
276
- exiftool_path: Optional path to exiftool for metadata extraction.
277
- docintel_endpoint: Optional Document Intelligence API endpoint.
278
-
279
- Returns:
280
- str: The converted Markdown content.
281
- """
282
- from markitdown import MarkItDown
283
-
284
- result = MarkItDown(
285
- requests_session=requests_session,
286
- llm_client=llm_client,
287
- llm_model=llm_model,
288
- style_map=style_map,
289
- exiftool_path=exiftool_path,
290
- docintel_endpoint=docintel_endpoint,
291
- ).convert(source)
292
- return result.text_content
293
-
294
-
295
- # Alias for CodeSnippets.from_path_or_pkgname for backward compatibility
296
- pyscripts_to_snippets = CodeSnippets.from_path_or_pkgname
297
-
298
-
299
- def _pattern_to_regex(pattern: str) -> re.Pattern[str]:
300
- """
301
- Converts an fnmatch pattern to a regular expression.
302
-
303
- In this function, '**' is converted to match any character including directory separators.
304
- The remaining '*' matches any character except directory separators, and '?' matches a single character.
305
-
306
- Args:
307
- pattern: The fnmatch pattern to convert.
308
-
309
- Returns:
310
- A compiled regular expression pattern.
311
- """
312
- # First escape the pattern
313
- pattern = re.escape(pattern)
314
- # Convert '**' to match any character including directory separators ('.*')
315
- pattern = pattern.replace(r"\*\*", ".*")
316
- # Then convert single '*' to match any character except directory separators
317
- pattern = pattern.replace(r"\*", "[^/]*")
318
- # Convert '?' to match a single character
319
- pattern = pattern.replace(r"\?", ".")
320
- # Anchor the pattern to start and end
321
- pattern = "^" + pattern + "$"
322
- return re.compile(pattern)
323
-
324
-
325
- def _is_banned(p: Path, ban_patterns: list[str]) -> bool:
326
- """
327
- Checks if a given path matches any of the ban patterns.
328
-
329
- Determines if the path p matches any pattern in ban_patterns using either
330
- fnmatch-based or recursive patterns (i.e., containing '**').
331
-
332
- Note: Patterns should use POSIX-style paths (i.e., '/' separators).
333
-
334
- Args:
335
- p: The path to check.
336
- ban_patterns: List of patterns to match against.
337
-
338
- Returns:
339
- bool: True if the path matches any ban pattern, False otherwise.
340
- """
341
- p_str = p.as_posix()
342
- for pattern in ban_patterns:
343
- if "**" in pattern:
344
- regex = _pattern_to_regex(pattern)
345
- if regex.match(p_str):
346
- return True
347
- else:
348
- # Simple fnmatch: '*' by default doesn't match '/'
349
- if fnmatch(p_str, pattern):
350
- return True
351
- return False
352
-
353
-
354
- def _get_a_snippet(fpath: Path) -> str:
355
- """
356
- Extracts a code snippet from a Python file.
357
-
358
- Reads the file, parses it as Python code, and returns a formatted code snippet
359
- with the relative path as a header in markdown code block format.
360
-
361
- Args:
362
- fpath: Path to the Python file.
363
-
364
- Returns:
365
- str: Formatted code snippet or empty string if the file doesn't exist.
366
- """
367
- if not fpath.is_file():
368
- return ""
369
-
370
- cleaned_code: str = "\n".join(
371
- line for line in ast.unparse(ast.parse(fpath.read_text(encoding="utf-8"))).splitlines()
372
- )
373
- if site_dir := next(
374
- (d for d in reversed(site.getsitepackages()) if fpath.is_relative_to(d)),
375
- None,
376
- ):
377
- display_path = fpath.relative_to(site_dir)
378
- elif fpath.is_relative_to(cwd := Path.cwd()):
379
- display_path = fpath.relative_to(cwd)
380
- else:
381
- display_path = fpath.absolute()
382
- return f"```{display_path}\n{cleaned_code}\n```\n\n"
383
-
384
-
385
- def _get_base_dir(target_files: Sequence[Path]) -> Path:
386
- """
387
- Determines the common base directory for a sequence of file paths.
388
-
389
- Finds the directory with the shortest path that is a parent to at least one file.
390
-
391
- Args:
392
- target_files: Sequence of file paths.
393
-
394
- Returns:
395
- Path: The common base directory.
396
- """
397
- return Path(os.path.commonpath(target_files))
398
-
399
-
400
- def _get_filepaths(
401
- path_or_pkgname: str,
402
- glob_patterns: str | list[str] = "*.py",
403
- case_sensitive: bool = False,
404
- ban_fn_patterns: Optional[list[str]] = None,
405
- ) -> list[Path]:
406
- """
407
- Gets paths to files from a directory, file, or Python package name.
408
-
409
- If path_or_pkgname is a directory, finds all `glob_pattern` matching files recursively.
410
- If it's a file, returns just that file.
411
- If it's a package name, imports the package and finds all .py files in its directory.
412
-
413
- Args:
414
- path_or_pkgname: Path to directory/file or package name.
415
- glob_pattern: Pattern to match files.
416
- case_sensitive: Whether to match files case-sensitively.
417
- ban_fn_patterns: Optional list of patterns to exclude files.
418
-
419
- Returns:
420
- list[Path]: List of paths to Python files.
421
- """
422
- path = Path(path_or_pkgname)
423
- pypaths: list[Path]
424
- if path.is_dir():
425
- glob_patterns = glob_patterns if isinstance(glob_patterns, (tuple, list)) else [glob_patterns]
426
- pypaths = []
427
- for pattern in glob_patterns:
428
- if "**" in pattern:
429
- regex = _pattern_to_regex(pattern)
430
- pypaths.extend(
431
- p for p in path.rglob("**/*", case_sensitive=case_sensitive) if regex.match(p.as_posix())
432
- )
433
- else:
434
- pypaths += list(path.rglob(pattern, case_sensitive=case_sensitive))
435
-
436
- # pypaths = list(path.rglob(glob_pattern, case_sensitive=case_sensitive))
437
- elif path.is_file():
438
- pypaths = [path]
439
- else:
440
- pypaths = [
441
- p
442
- for p in Path(next(iter(importlib.import_module(path_or_pkgname).__path__))).rglob(
443
- "*.py", case_sensitive=False
444
- )
445
- if p.is_file()
446
- ]
447
- return [p for p in pypaths if not ban_fn_patterns or not _is_banned(p, ban_fn_patterns)]
1
+ import ast
2
+ import importlib
3
+ import os
4
+ import re
5
+ import site
6
+ from fnmatch import fnmatch
7
+ from pathlib import Path
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Callable,
11
+ Iterable,
12
+ NamedTuple,
13
+ NotRequired,
14
+ Optional,
15
+ Self,
16
+ Sequence,
17
+ TypeAlias,
18
+ TypedDict,
19
+ )
20
+
21
+ from ..common_types.io import PathOrReadable
22
+ from ..utils.bytesio import read_bytes_stream
23
+ from .convert_pdf_to_markdown import extract_text_from_pdf
24
+
25
+ if TYPE_CHECKING:
26
+ from bs4 import Tag
27
+ from openai import OpenAI
28
+ from requests import Response, Session
29
+
30
+ try:
31
+ from tiktoken import get_encoding, list_encoding_names
32
+
33
+ enc = get_encoding(list_encoding_names()[-1])
34
+ except ImportError:
35
+ enc = None
36
+
37
+
38
+ # Type definition for representing a file tree structure
39
+ type FileTree = dict[str, Optional[FileTree]]
40
+
41
+ # Type aliases for callback functions and file descriptors
42
+ CodeLanguageCallback: TypeAlias = Callable[["Tag"], Optional[str]]
43
+
44
+
45
+ class HtmlToMarkdownOptions(TypedDict):
46
+ """
47
+ TypedDict for options used in HTML to Markdown conversion.
48
+
49
+ Contains various configuration options for controlling how HTML is converted to Markdown,
50
+ including formatting preferences, escape behaviors, and styling options.
51
+ """
52
+
53
+ autolinks: NotRequired[bool]
54
+ bullets: NotRequired[str]
55
+ code_language: NotRequired[str]
56
+ code_language_callback: NotRequired[CodeLanguageCallback]
57
+ convert: NotRequired[Sequence[str]]
58
+ default_title: NotRequired[bool]
59
+ escape_asterisks: NotRequired[bool]
60
+ escape_underscores: NotRequired[bool]
61
+ escape_misc: NotRequired[bool]
62
+ heading_style: NotRequired[str]
63
+ keep_inline_images_in: NotRequired[Sequence[str]]
64
+ newline_style: NotRequired[str]
65
+ strip: NotRequired[Sequence[str]]
66
+ strip_document: NotRequired[str]
67
+ strong_em_symbol: NotRequired[str]
68
+ sub_symbol: NotRequired[str]
69
+ sup_symbol: NotRequired[str]
70
+ table_infer_header: NotRequired[bool]
71
+ wrap: NotRequired[bool]
72
+ wrap_width: NotRequired[int]
73
+
74
+
75
+ def get_default_html_to_markdown_options() -> HtmlToMarkdownOptions:
76
+ """
77
+ Returns the default options for HTML to Markdown conversion.
78
+
79
+ This function provides a set of sensible defaults for the markdownify library,
80
+ including settings for bullets, escaping, heading styles, and other formatting options.
81
+
82
+ Returns:
83
+ HtmlToMarkdownOptions: A dictionary of default conversion options.
84
+ """
85
+ from markdownify import ( # pyright: ignore[reportUnknownVariableType, reportMissingTypeStubs]
86
+ ASTERISK,
87
+ SPACES,
88
+ STRIP,
89
+ UNDERLINED,
90
+ )
91
+
92
+ return {
93
+ "autolinks": True,
94
+ "bullets": "*+-", # An iterable of bullet types.
95
+ "code_language": "",
96
+ "default_title": False,
97
+ "escape_asterisks": True,
98
+ "escape_underscores": True,
99
+ "escape_misc": False,
100
+ "heading_style": UNDERLINED,
101
+ "keep_inline_images_in": [],
102
+ "newline_style": SPACES,
103
+ "strip_document": STRIP,
104
+ "strong_em_symbol": ASTERISK,
105
+ "sub_symbol": "",
106
+ "sup_symbol": "",
107
+ "table_infer_header": False,
108
+ "wrap": False,
109
+ "wrap_width": 80,
110
+ }
111
+
112
+
113
+ class CodeSnippets(NamedTuple):
114
+ """
115
+ A named tuple that represents code snippets extracted from Python files.
116
+
117
+ Contains the paths to the files, the concatenated text of all snippets,
118
+ and the base directory of the files.
119
+ """
120
+
121
+ paths: list[Path]
122
+ snippets_text: str
123
+ base_dir: Path
124
+
125
+ @classmethod
126
+ def from_path_or_pkgname(
127
+ cls,
128
+ path_or_pkgname: str,
129
+ glob_patterns: str | list[str] = "*.py",
130
+ case_sensitive: bool = False,
131
+ ban_file_patterns: Optional[list[str]] = None,
132
+ ) -> Self:
133
+ """
134
+ Creates a CodeSnippets instance from a file path or package name.
135
+
136
+ Args:
137
+ path_or_pkgname: Path to a file/directory or a Python package name.
138
+ ban_file_patterns: Optional list of patterns to exclude files.
139
+
140
+ Returns:
141
+ A new CodeSnippets instance with extracted code snippets.
142
+ """
143
+ paths: list[Path] = _get_filepaths(
144
+ path_or_pkgname=path_or_pkgname,
145
+ glob_patterns=glob_patterns,
146
+ case_sensitive=case_sensitive,
147
+ ban_fn_patterns=ban_file_patterns,
148
+ )
149
+ snippets_text: str = "".join(_get_a_snippet(p) for p in paths)
150
+ return cls(
151
+ paths=paths,
152
+ snippets_text=snippets_text,
153
+ base_dir=_get_base_dir(paths),
154
+ )
155
+
156
+ @property
157
+ def metadata(self) -> str:
158
+ """
159
+ Generates metadata about the code snippets.
160
+
161
+ Returns a string containing information about the file tree structure,
162
+ total number of files, tokens (if tiktoken is available), and lines.
163
+
164
+ Returns:
165
+ str: Formatted metadata string.
166
+ """
167
+ file_paths: list[Path] = self.paths
168
+ text: str = self.snippets_text
169
+
170
+ base_dir: Path = _get_base_dir(file_paths)
171
+ results: list[str] = [base_dir.as_posix()]
172
+
173
+ file_tree: FileTree = {}
174
+ for file_path in sorted(file_paths):
175
+ rel_path = file_path.relative_to(base_dir)
176
+ subtree: Optional[FileTree] = file_tree
177
+ for part in rel_path.parts[:-1]:
178
+ if subtree is not None:
179
+ subtree = subtree.setdefault(part, {})
180
+ if subtree is not None:
181
+ subtree[rel_path.parts[-1]] = None
182
+
183
+ def _display_tree(tree: FileTree, prefix: str = "") -> None:
184
+ """
185
+ Helper function to recursively display a file tree structure.
186
+
187
+ Args:
188
+ tree: The file tree dictionary to display.
189
+ prefix: Current line prefix for proper indentation.
190
+ """
191
+ items: list[tuple[str, Optional[FileTree]]] = sorted(tree.items())
192
+ count: int = len(items)
193
+ for idx, (name, subtree) in enumerate(items):
194
+ branch: str = "└── " if idx == count - 1 else "├── "
195
+ results.append(f"{prefix}{branch}{name}")
196
+ if subtree is not None:
197
+ extension: str = " " if idx == count - 1 else "│ "
198
+ _display_tree(tree=subtree, prefix=prefix + extension)
199
+
200
+ _display_tree(file_tree)
201
+ results.append(f"- Total files: {len(file_paths)}")
202
+ if enc is not None:
203
+ num_tokens: int = len(enc.encode(text, disallowed_special=()))
204
+ results.append(f"- Total tokens: {num_tokens}")
205
+ results.append(f"- Total lines: {text.count('\n') + 1}")
206
+ return "\n".join(results)
207
+
208
+
209
+ def html_to_markdown(html: str, options: Optional[HtmlToMarkdownOptions]) -> str:
210
+ """
211
+ Convert HTML content to Markdown using the provided options.
212
+
213
+ Args:
214
+ html (str): HTML content to convert.
215
+ options (HtmlToMarkdownOptions): Options for the conversion.
216
+
217
+ Returns:
218
+ str: The Markdown content.
219
+ """
220
+ from markdownify import markdownify # pyright: ignore[reportUnknownVariableType, reportMissingTypeStubs]
221
+
222
+ return str(markdownify(html, **(options or {}))) # pyright: ignore[reportUnknownArgumentType]
223
+
224
+
225
+ def pdf_to_text(path_or_file: PathOrReadable, page_indices: Iterable[int] | int | None = None) -> str:
226
+ """
227
+ Convert a PDF file to plain text.
228
+
229
+ Extracts text from each page of a PDF file and formats it with page markers.
230
+
231
+ Args:
232
+ path_or_file: Path to a PDF file or a readable object containing PDF data.
233
+ page_indices: Optional list of page indices to extract. If None, all pages are extracted.
234
+ If an integer is provided, it extracts that specific page.
235
+ If a list is provided, it extracts the specified pages.
236
+
237
+ Returns:
238
+ str: Extracted text with page markers.
239
+
240
+ Raises:
241
+ FileNotFoundError: If the file cannot be found or opened.
242
+ """
243
+ from pymupdf import Document # pyright: ignore[reportMissingTypeStubs]
244
+
245
+ with read_bytes_stream(path_or_file) as stream:
246
+ if stream is None:
247
+ raise FileNotFoundError(path_or_file)
248
+ with Document(stream=stream.read()) as doc:
249
+ return "\n".join(
250
+ f"<!-- Page {page_no} -->\n{text}\n"
251
+ for page_no, text in extract_text_from_pdf(doc, page_indices).items()
252
+ )
253
+
254
+
255
+ def anything_to_markdown(
256
+ source: "str | Response | Path",
257
+ requests_session: Optional["Session"] = None,
258
+ llm_client: Optional["OpenAI"] = None,
259
+ llm_model: Optional[str] = None,
260
+ style_map: Optional[str] = None,
261
+ exiftool_path: Optional[str] = None,
262
+ docintel_endpoint: Optional[str] = None,
263
+ ) -> str:
264
+ """
265
+ Convert various types of content to Markdown format.
266
+
267
+ Uses the MarkItDown library to convert different types of content (URLs, files, API responses)
268
+ to Markdown format.
269
+
270
+ Args:
271
+ source: The source content to convert (URL string, Response object, or Path).
272
+ requests_session: Optional requests Session for HTTP requests.
273
+ llm_client: Optional OpenAI client for LLM-based conversions.
274
+ llm_model: Optional model name for the LLM.
275
+ style_map: Optional style mapping configuration.
276
+ exiftool_path: Optional path to exiftool for metadata extraction.
277
+ docintel_endpoint: Optional Document Intelligence API endpoint.
278
+
279
+ Returns:
280
+ str: The converted Markdown content.
281
+ """
282
+ from markitdown import MarkItDown
283
+
284
+ result = MarkItDown(
285
+ requests_session=requests_session,
286
+ llm_client=llm_client,
287
+ llm_model=llm_model,
288
+ style_map=style_map,
289
+ exiftool_path=exiftool_path,
290
+ docintel_endpoint=docintel_endpoint,
291
+ ).convert(source)
292
+ return result.text_content
293
+
294
+
295
+ # Alias for CodeSnippets.from_path_or_pkgname for backward compatibility
296
+ pyscripts_to_snippets = CodeSnippets.from_path_or_pkgname
297
+
298
+
299
+ def _pattern_to_regex(pattern: str) -> re.Pattern[str]:
300
+ """
301
+ Converts an fnmatch pattern to a regular expression.
302
+
303
+ In this function, '**' is converted to match any character including directory separators.
304
+ The remaining '*' matches any character except directory separators, and '?' matches a single character.
305
+
306
+ Args:
307
+ pattern: The fnmatch pattern to convert.
308
+
309
+ Returns:
310
+ A compiled regular expression pattern.
311
+ """
312
+ # First escape the pattern
313
+ pattern = re.escape(pattern)
314
+ # Convert '**' to match any character including directory separators ('.*')
315
+ pattern = pattern.replace(r"\*\*", ".*")
316
+ # Then convert single '*' to match any character except directory separators
317
+ pattern = pattern.replace(r"\*", "[^/]*")
318
+ # Convert '?' to match a single character
319
+ pattern = pattern.replace(r"\?", ".")
320
+ # Anchor the pattern to start and end
321
+ pattern = "^" + pattern + "$"
322
+ return re.compile(pattern)
323
+
324
+
325
+ def _is_banned(p: Path, ban_patterns: list[str]) -> bool:
326
+ """
327
+ Checks if a given path matches any of the ban patterns.
328
+
329
+ Determines if the path p matches any pattern in ban_patterns using either
330
+ fnmatch-based or recursive patterns (i.e., containing '**').
331
+
332
+ Note: Patterns should use POSIX-style paths (i.e., '/' separators).
333
+
334
+ Args:
335
+ p: The path to check.
336
+ ban_patterns: List of patterns to match against.
337
+
338
+ Returns:
339
+ bool: True if the path matches any ban pattern, False otherwise.
340
+ """
341
+ p_str = p.as_posix()
342
+ for pattern in ban_patterns:
343
+ if "**" in pattern:
344
+ regex = _pattern_to_regex(pattern)
345
+ if regex.match(p_str):
346
+ return True
347
+ else:
348
+ # Simple fnmatch: '*' by default doesn't match '/'
349
+ if fnmatch(p_str, pattern):
350
+ return True
351
+ return False
352
+
353
+
354
+ def _get_a_snippet(fpath: Path) -> str:
355
+ """
356
+ Extracts a code snippet from a Python file.
357
+
358
+ Reads the file, parses it as Python code, and returns a formatted code snippet
359
+ with the relative path as a header in markdown code block format.
360
+
361
+ Args:
362
+ fpath: Path to the Python file.
363
+
364
+ Returns:
365
+ str: Formatted code snippet or empty string if the file doesn't exist.
366
+ """
367
+ if not fpath.is_file():
368
+ return ""
369
+
370
+ cleaned_code: str = "\n".join(
371
+ line for line in ast.unparse(ast.parse(fpath.read_text(encoding="utf-8"))).splitlines()
372
+ )
373
+ if site_dir := next(
374
+ (d for d in reversed(site.getsitepackages()) if fpath.is_relative_to(d)),
375
+ None,
376
+ ):
377
+ display_path = fpath.relative_to(site_dir)
378
+ elif fpath.is_relative_to(cwd := Path.cwd()):
379
+ display_path = fpath.relative_to(cwd)
380
+ else:
381
+ display_path = fpath.absolute()
382
+ return f"```{display_path}\n{cleaned_code}\n```\n\n"
383
+
384
+
385
+ def _get_base_dir(target_files: Sequence[Path]) -> Path:
386
+ """
387
+ Determines the common base directory for a sequence of file paths.
388
+
389
+ Finds the directory with the shortest path that is a parent to at least one file.
390
+
391
+ Args:
392
+ target_files: Sequence of file paths.
393
+
394
+ Returns:
395
+ Path: The common base directory.
396
+ """
397
+ return Path(os.path.commonpath(target_files))
398
+
399
+
400
+ def _get_filepaths(
401
+ path_or_pkgname: str,
402
+ glob_patterns: str | list[str] = "*.py",
403
+ case_sensitive: bool = False,
404
+ ban_fn_patterns: Optional[list[str]] = None,
405
+ ) -> list[Path]:
406
+ """
407
+ Gets paths to files from a directory, file, or Python package name.
408
+
409
+ If path_or_pkgname is a directory, finds all `glob_pattern` matching files recursively.
410
+ If it's a file, returns just that file.
411
+ If it's a package name, imports the package and finds all .py files in its directory.
412
+
413
+ Args:
414
+ path_or_pkgname: Path to directory/file or package name.
415
+ glob_pattern: Pattern to match files.
416
+ case_sensitive: Whether to match files case-sensitively.
417
+ ban_fn_patterns: Optional list of patterns to exclude files.
418
+
419
+ Returns:
420
+ list[Path]: List of paths to Python files.
421
+ """
422
+ path = Path(path_or_pkgname)
423
+ pypaths: list[Path]
424
+ if path.is_dir():
425
+ glob_patterns = glob_patterns if isinstance(glob_patterns, (tuple, list)) else [glob_patterns]
426
+ pypaths = []
427
+ for pattern in glob_patterns:
428
+ if "**" in pattern:
429
+ regex = _pattern_to_regex(pattern)
430
+ pypaths.extend(
431
+ p for p in path.rglob("**/*", case_sensitive=case_sensitive) if regex.match(p.as_posix())
432
+ )
433
+ else:
434
+ pypaths += list(path.rglob(pattern, case_sensitive=case_sensitive))
435
+
436
+ # pypaths = list(path.rglob(glob_pattern, case_sensitive=case_sensitive))
437
+ elif path.is_file():
438
+ pypaths = [path]
439
+ else:
440
+ pypaths = [
441
+ p
442
+ for p in Path(next(iter(importlib.import_module(path_or_pkgname).__path__))).rglob(
443
+ "*.py", case_sensitive=False
444
+ )
445
+ if p.is_file()
446
+ ]
447
+ return [p for p in pypaths if not ban_fn_patterns or not _is_banned(p, ban_fn_patterns)]