chatterer 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. chatterer/__init__.py +93 -93
  2. chatterer/common_types/__init__.py +21 -21
  3. chatterer/common_types/io.py +19 -19
  4. chatterer/examples/anything_to_markdown.py +91 -0
  5. chatterer/examples/get_code_snippets.py +62 -0
  6. chatterer/examples/login_with_playwright.py +167 -0
  7. chatterer/examples/make_ppt.py +497 -0
  8. chatterer/examples/pdf_to_markdown.py +107 -0
  9. chatterer/examples/pdf_to_text.py +56 -0
  10. chatterer/examples/transcription_api.py +123 -0
  11. chatterer/examples/upstage_parser.py +100 -0
  12. chatterer/examples/webpage_to_markdown.py +79 -0
  13. chatterer/interactive.py +354 -692
  14. chatterer/language_model.py +533 -533
  15. chatterer/messages.py +21 -21
  16. chatterer/strategies/__init__.py +13 -13
  17. chatterer/strategies/atom_of_thoughts.py +975 -975
  18. chatterer/strategies/base.py +14 -14
  19. chatterer/tools/__init__.py +46 -46
  20. chatterer/tools/caption_markdown_images.py +384 -384
  21. chatterer/tools/citation_chunking/__init__.py +3 -3
  22. chatterer/tools/citation_chunking/chunks.py +53 -53
  23. chatterer/tools/citation_chunking/citation_chunker.py +118 -118
  24. chatterer/tools/citation_chunking/citations.py +285 -285
  25. chatterer/tools/citation_chunking/prompt.py +157 -157
  26. chatterer/tools/citation_chunking/reference.py +26 -26
  27. chatterer/tools/citation_chunking/utils.py +138 -138
  28. chatterer/tools/convert_pdf_to_markdown.py +302 -302
  29. chatterer/tools/convert_to_text.py +447 -447
  30. chatterer/tools/upstage_document_parser.py +705 -705
  31. chatterer/tools/webpage_to_markdown.py +739 -739
  32. chatterer/tools/youtube.py +146 -146
  33. chatterer/utils/__init__.py +15 -15
  34. chatterer/utils/base64_image.py +285 -285
  35. chatterer/utils/bytesio.py +59 -59
  36. chatterer/utils/code_agent.py +237 -237
  37. chatterer/utils/imghdr.py +148 -148
  38. {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/METADATA +392 -392
  39. chatterer-0.1.18.dist-info/RECORD +42 -0
  40. {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/WHEEL +1 -1
  41. chatterer-0.1.16.dist-info/RECORD +0 -33
  42. {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,123 @@
1
+ # pyright: reportUnknownVariableType=false, reportUnknownMemberType=false, reportArgumentType=false, reportMissingTypeStubs=false
2
+
3
+ from io import BytesIO
4
+ from pathlib import Path
5
+ from typing import cast
6
+
7
+ from openai import OpenAI
8
+ from pydub import AudioSegment
9
+ from spargear import ArgumentSpec, BaseArguments
10
+
11
+ # Maximum chunk length in seconds
12
+ MAX_CHUNK_DURATION = 600
13
+
14
+
15
+ class TranscriptionApiArguments(BaseArguments):
16
+ in_path = ArgumentSpec(
17
+ ["in-path"],
18
+ type=Path,
19
+ help="The audio file to transcribe.",
20
+ )
21
+ out_path = ArgumentSpec(
22
+ ["--out-path"],
23
+ type=Path,
24
+ default=None,
25
+ help="Path to save the transcription output.",
26
+ )
27
+ model: ArgumentSpec[str] = ArgumentSpec(
28
+ ["--model"],
29
+ default="gpt-4o-transcribe",
30
+ help="The model to use for transcription.",
31
+ )
32
+ api_key: ArgumentSpec[str] = ArgumentSpec(
33
+ ["--api-key"],
34
+ default=None,
35
+ help="The API key for authentication.",
36
+ )
37
+ base_url: ArgumentSpec[str] = ArgumentSpec(
38
+ ["--base-url"],
39
+ default="https://api.openai.com/v1",
40
+ help="The base URL for the API.",
41
+ )
42
+
43
+ def run(self) -> None:
44
+ audio_path = self.in_path.unwrap()
45
+ model = self.model.unwrap()
46
+
47
+ client = OpenAI(api_key=self.api_key.value, base_url=self.base_url.value)
48
+
49
+ audio = load_audio_segment(audio_path)
50
+
51
+ segments = split_audio(audio, MAX_CHUNK_DURATION)
52
+ print(f"[i] Audio duration: {len(audio) / 1000:.1f}s; splitting into {len(segments)} segment(s)")
53
+
54
+ transcripts: list[str] = []
55
+ for idx, seg in enumerate(segments, start=1):
56
+ print(f"[i] Transcribing segment {idx}/{len(segments)}...")
57
+ transcripts.append(transcribe_segment(seg, client, model))
58
+
59
+ full_transcript = "\n\n".join(transcripts)
60
+ output_path: Path = self.out_path.value or audio_path.with_suffix(".txt")
61
+ output_path.write_text(full_transcript, encoding="utf-8")
62
+ print(f"[✓] Transcription saved to: {output_path}")
63
+
64
+
65
+ def load_audio_segment(file_path: Path) -> AudioSegment:
66
+ """
67
+ Load an audio file as an AudioSegment. Convert to mp3 format in-memory if needed.
68
+ """
69
+ ext = file_path.suffix.lower()[1:]
70
+ audio = AudioSegment.from_file(file_path.as_posix(), format=ext if ext != "mp3" else None)
71
+ if ext != "mp3":
72
+ buffer = BytesIO()
73
+ audio.export(buffer, format="mp3")
74
+ buffer.seek(0)
75
+ audio = AudioSegment.from_file(buffer, format="mp3")
76
+ return audio
77
+
78
+
79
+ def split_audio(audio: AudioSegment, max_duration_s: int) -> list[AudioSegment]:
80
+ """
81
+ Split the AudioSegment into chunks no longer than max_duration_s seconds.
82
+ """
83
+ chunk_length_ms = (max_duration_s - 1) * 1000
84
+ duration_ms = len(audio)
85
+ segments: list[AudioSegment] = []
86
+ segment_idx: int = 0
87
+ for start_ms in range(0, duration_ms, chunk_length_ms):
88
+ end_ms = min(start_ms + chunk_length_ms, duration_ms)
89
+ segment = cast(AudioSegment, audio[start_ms:end_ms])
90
+ segments.append(segment)
91
+ # with open(f"segment_{segment_idx}.mp3", "wb") as f:
92
+ # segment.export(f, format="mp3")
93
+ segment_idx += 1
94
+ return segments
95
+
96
+
97
+ def transcribe_segment(segment: AudioSegment, client: OpenAI, model: str) -> str:
98
+ """
99
+ Transcribe a single AudioSegment chunk and return its text.
100
+ """
101
+ buffer = BytesIO()
102
+ segment.export(buffer, format="mp3")
103
+ buffer.seek(0)
104
+ mp3_bytes = buffer.read()
105
+ response = client.audio.transcriptions.create(
106
+ model=model,
107
+ prompt="Transcribe whole text from audio.",
108
+ file=("audio.mp3", mp3_bytes),
109
+ response_format="text",
110
+ stream=True,
111
+ )
112
+ for res in response:
113
+ if res.type == "transcript.text.delta":
114
+ print(res.delta, end="", flush=True)
115
+ if res.type == "transcript.text.done":
116
+ print()
117
+ return res.text
118
+ else:
119
+ raise RuntimeError("No transcription result found.")
120
+
121
+
122
+ if __name__ == "__main__":
123
+ TranscriptionApiArguments().run()
@@ -0,0 +1,100 @@
1
+ def resolve_import_path_and_get_logger():
2
+ # ruff: noqa: E402
3
+ import logging
4
+ import sys
5
+
6
+ if __name__ == "__main__" and "." not in sys.path:
7
+ sys.path.append(".")
8
+
9
+ logger = logging.getLogger(__name__)
10
+ return logger
11
+
12
+
13
+ logger = resolve_import_path_and_get_logger()
14
+ from pathlib import Path
15
+
16
+ from langchain_core.documents.base import Blob
17
+ from spargear import ArgumentSpec, BaseArguments
18
+
19
+ from chatterer import Chatterer, UpstageDocumentParseParser
20
+ from chatterer.tools.upstage_document_parser import (
21
+ DEFAULT_IMAGE_DIR,
22
+ DOCUMENT_PARSE_BASE_URL,
23
+ DOCUMENT_PARSE_DEFAULT_MODEL,
24
+ OCR,
25
+ Category,
26
+ OutputFormat,
27
+ SplitType,
28
+ )
29
+
30
+
31
+ class UpstageParserArguments(BaseArguments):
32
+ in_path: ArgumentSpec[Path] = ArgumentSpec(["in-path"], help="Path to the input file.")
33
+ out_path: ArgumentSpec[Path] = ArgumentSpec(["--out-path"], default=None, help="Output file path.")
34
+ api_key: ArgumentSpec[str] = ArgumentSpec(["--api-key"], default=None, help="API key for the Upstage API.")
35
+ base_url: ArgumentSpec[str] = ArgumentSpec(
36
+ ["--base-url"], default=DOCUMENT_PARSE_BASE_URL, help="Base URL for the Upstage API."
37
+ )
38
+ model: ArgumentSpec[str] = ArgumentSpec(
39
+ ["--model"], default=DOCUMENT_PARSE_DEFAULT_MODEL, help="Model to use for parsing."
40
+ )
41
+ split: ArgumentSpec[SplitType] = ArgumentSpec(["--split"], default="none", help="Split type for parsing.")
42
+ ocr: ArgumentSpec[OCR] = ArgumentSpec(["--ocr"], default="auto", help="OCR type for parsing.")
43
+ output_format: ArgumentSpec[OutputFormat] = ArgumentSpec(
44
+ ["--output-format"], default="markdown", help="Output format."
45
+ )
46
+ coordinates: ArgumentSpec[bool] = ArgumentSpec(["--coordinates"], action="store_true", help="Include coordinates.")
47
+ base64_encoding: ArgumentSpec[list[Category]] = ArgumentSpec(
48
+ ["--base64-encoding"], default=["figure"], help="Base64 encoding for specific categories."
49
+ )
50
+ image_description_instruction: ArgumentSpec[str] = ArgumentSpec(
51
+ ["--image-description-instruction"],
52
+ default="Describe the image in detail.",
53
+ help="Instruction for image description.",
54
+ )
55
+ image_dir: ArgumentSpec[str] = ArgumentSpec(
56
+ ["--image-dir"],
57
+ default=DEFAULT_IMAGE_DIR,
58
+ help="Directory for image paths.",
59
+ )
60
+ chatterer: ArgumentSpec[Chatterer] = ArgumentSpec(
61
+ ["--chatterer"],
62
+ default=None,
63
+ help="Chatterer instance for communication.",
64
+ type=Chatterer.from_provider,
65
+ )
66
+
67
+ def run(self) -> None:
68
+ UpstageParserArguments.load()
69
+ input = UpstageParserArguments.in_path.unwrap().resolve()
70
+ out = UpstageParserArguments.out_path.value or input.with_suffix(".md")
71
+
72
+ parser = UpstageDocumentParseParser(
73
+ api_key=UpstageParserArguments.api_key.value,
74
+ base_url=UpstageParserArguments.base_url.unwrap(),
75
+ model=UpstageParserArguments.model.unwrap(),
76
+ split=UpstageParserArguments.split.unwrap(),
77
+ ocr=UpstageParserArguments.ocr.unwrap(),
78
+ output_format=UpstageParserArguments.output_format.unwrap(),
79
+ coordinates=UpstageParserArguments.coordinates.unwrap(),
80
+ base64_encoding=UpstageParserArguments.base64_encoding.unwrap(),
81
+ image_description_instruction=UpstageParserArguments.image_description_instruction.unwrap(),
82
+ image_dir=UpstageParserArguments.image_dir.value,
83
+ chatterer=UpstageParserArguments.chatterer.value,
84
+ )
85
+
86
+ docs = parser.parse(Blob.from_path(input)) # pyright: ignore[reportUnknownMemberType]
87
+
88
+ if UpstageParserArguments.image_dir.value:
89
+ for path, image in parser.image_data.items():
90
+ (path := Path(path)).parent.mkdir(parents=True, exist_ok=True)
91
+ path.write_bytes(image)
92
+ logger.info(f"Saved image to `{path}`")
93
+
94
+ markdown: str = "\n\n".join(f"<!--- page {i} -->\n{doc.page_content}" for i, doc in enumerate(docs, 1))
95
+ out.write_text(markdown, encoding="utf-8")
96
+ logger.info(f"Parsed `{input}` to `{out}`")
97
+
98
+
99
+ if __name__ == "__main__":
100
+ UpstageParserArguments().run()
@@ -0,0 +1,79 @@
1
+ def resolve_import_path_and_get_logger():
2
+ # ruff: noqa: E402
3
+ import logging
4
+ import sys
5
+
6
+ if __name__ == "__main__" and "." not in sys.path:
7
+ sys.path.append(".")
8
+
9
+ logger = logging.getLogger(__name__)
10
+ return logger
11
+
12
+
13
+ logger = resolve_import_path_and_get_logger()
14
+ from pathlib import Path
15
+ from typing import Literal
16
+
17
+ from spargear import ArgumentSpec, BaseArguments
18
+
19
+ from chatterer import Chatterer, MarkdownLink, PlayWrightBot
20
+
21
+
22
+ class WebpageToMarkdownArgs(BaseArguments):
23
+ url: ArgumentSpec[str] = ArgumentSpec(["url"], help="The URL to crawl.")
24
+ out_path: str = Path(__file__).with_suffix(".md").as_posix()
25
+ """The output file path for the markdown file."""
26
+ chatterer: ArgumentSpec[Chatterer] = ArgumentSpec(
27
+ ["--llm"],
28
+ default=None,
29
+ type=Chatterer.from_provider,
30
+ help="The Chatterer backend and model to use for filtering the markdown.",
31
+ )
32
+ engine: Literal["firefox", "chromium", "webkit"] = "firefox"
33
+ """The browser engine to use."""
34
+
35
+ def run(self) -> None:
36
+ chatterer = self.chatterer.value
37
+ url: str = self.url.unwrap().strip()
38
+ out_path: Path = Path(self.out_path).resolve()
39
+ with PlayWrightBot(chatterer=chatterer, engine=self.engine) as bot:
40
+ md = bot.url_to_md(url)
41
+ out_path.write_text(md, encoding="utf-8")
42
+ if chatterer is not None:
43
+ md_llm = bot.url_to_md_with_llm(url.strip())
44
+ out_path.write_text(md_llm, encoding="utf-8")
45
+ links = MarkdownLink.from_markdown(md, referer_url=url)
46
+ for link in links:
47
+ if link.type == "link":
48
+ print(
49
+ f"- [{truncate_string(link.url)}] {truncate_string(link.inline_text)} ({truncate_string(link.inline_title)})"
50
+ )
51
+ elif link.type == "image":
52
+ print(f"- ![{truncate_string(link.url)}] ({truncate_string(link.inline_text)})")
53
+
54
+ async def arun(self) -> None:
55
+ chatterer = self.chatterer.value
56
+ url: str = self.url.unwrap().strip()
57
+ out_path: Path = Path(self.out_path).resolve()
58
+ async with PlayWrightBot(chatterer=chatterer, engine=self.engine) as bot:
59
+ md = await bot.aurl_to_md(url)
60
+ out_path.write_text(md, encoding="utf-8")
61
+ if chatterer is not None:
62
+ md_llm = await bot.aurl_to_md_with_llm(url.strip())
63
+ out_path.write_text(md_llm, encoding="utf-8")
64
+ links = MarkdownLink.from_markdown(md, referer_url=url)
65
+ for link in links:
66
+ if link.type == "link":
67
+ print(
68
+ f"- [{truncate_string(link.url)}] {truncate_string(link.inline_text)} ({truncate_string(link.inline_title)})"
69
+ )
70
+ elif link.type == "image":
71
+ print(f"- ![{truncate_string(link.url)}] ({truncate_string(link.inline_text)})")
72
+
73
+
74
+ def truncate_string(s: str) -> str:
75
+ return s[:50] + "..." if len(s) > 50 else s
76
+
77
+
78
+ if __name__ == "__main__":
79
+ WebpageToMarkdownArgs().run()