chatterer 0.1.12__tar.gz → 0.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {chatterer-0.1.12 → chatterer-0.1.13}/PKG-INFO +171 -170
  2. {chatterer-0.1.12 → chatterer-0.1.13}/README.md +136 -136
  3. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/__init__.py +62 -60
  4. chatterer-0.1.13/chatterer/common_types/__init__.py +21 -0
  5. chatterer-0.1.13/chatterer/common_types/io.py +19 -0
  6. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/language_model.py +577 -577
  7. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/messages.py +9 -9
  8. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/strategies/__init__.py +13 -13
  9. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/strategies/atom_of_thoughts.py +975 -975
  10. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/strategies/base.py +14 -14
  11. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/__init__.py +35 -28
  12. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/__init__.py +3 -3
  13. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/chunks.py +53 -53
  14. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/citation_chunker.py +118 -118
  15. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/citations.py +285 -285
  16. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/prompt.py +157 -157
  17. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/reference.py +26 -26
  18. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/citation_chunking/utils.py +138 -138
  19. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/convert_to_text.py +418 -463
  20. chatterer-0.1.13/chatterer/tools/upstage_document_parser.py +438 -0
  21. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/webpage_to_markdown/__init__.py +4 -4
  22. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/webpage_to_markdown/playwright_bot.py +649 -649
  23. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/webpage_to_markdown/utils.py +334 -334
  24. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/tools/youtube.py +146 -146
  25. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/utils/__init__.py +15 -15
  26. chatterer-0.1.13/chatterer/utils/bytesio.py +59 -0
  27. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/utils/code_agent.py +138 -138
  28. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/utils/image.py +291 -291
  29. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer.egg-info/PKG-INFO +171 -170
  30. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer.egg-info/SOURCES.txt +4 -0
  31. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer.egg-info/requires.txt +8 -7
  32. {chatterer-0.1.12 → chatterer-0.1.13}/pyproject.toml +9 -8
  33. {chatterer-0.1.12 → chatterer-0.1.13}/setup.cfg +4 -4
  34. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer/py.typed +0 -0
  35. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer.egg-info/dependency_links.txt +0 -0
  36. {chatterer-0.1.12 → chatterer-0.1.13}/chatterer.egg-info/top_level.txt +0 -0
@@ -1,170 +1,171 @@
1
- Metadata-Version: 2.4
2
- Name: chatterer
3
- Version: 0.1.12
4
- Summary: The highest-level interface for various LLM APIs.
5
- Requires-Python: >=3.12
6
- Description-Content-Type: text/markdown
7
- Requires-Dist: instructor>=1.7.2
8
- Requires-Dist: langchain>=0.3.19
9
- Provides-Extra: dev
10
- Requires-Dist: neo4j-extension>=0.1.14; extra == "dev"
11
- Requires-Dist: colorama>=0.4.6; extra == "dev"
12
- Requires-Dist: ipykernel>=6.29.5; extra == "dev"
13
- Provides-Extra: conversion
14
- Requires-Dist: markdownify>=1.1.0; extra == "conversion"
15
- Requires-Dist: commonmark>=0.9.1; extra == "conversion"
16
- Requires-Dist: playwright>=1.50.0; extra == "conversion"
17
- Requires-Dist: pillow>=11.1.0; extra == "conversion"
18
- Requires-Dist: mistune>=3.1.2; extra == "conversion"
19
- Requires-Dist: markitdown>=0.0.2; extra == "conversion"
20
- Requires-Dist: pymupdf>=1.25.4; extra == "conversion"
21
- Requires-Dist: youtube-transcript-api>=1.0.2; extra == "conversion"
22
- Provides-Extra: langchain
23
- Requires-Dist: chatterer[langchain-providers]; extra == "langchain"
24
- Requires-Dist: langchain-experimental>=0.3.4; extra == "langchain"
25
- Provides-Extra: langchain-providers
26
- Requires-Dist: langchain-openai>=0.3.7; extra == "langchain-providers"
27
- Requires-Dist: langchain-anthropic>=0.3.8; extra == "langchain-providers"
28
- Requires-Dist: langchain-google-genai>=2.0.10; extra == "langchain-providers"
29
- Requires-Dist: langchain-ollama>=0.2.3; extra == "langchain-providers"
30
- Provides-Extra: all
31
- Requires-Dist: chatterer[langchain]; extra == "all"
32
- Requires-Dist: chatterer[conversion]; extra == "all"
33
- Requires-Dist: chatterer[dev]; extra == "all"
34
-
35
- # Chatterer
36
-
37
- **Simplified, Structured AI Assistant Framework**
38
-
39
- `chatterer` is a Python library designed as a type-safe LangChain wrapper for interacting with various language models (OpenAI, Anthropic, Gemini, Ollama, etc.). It supports structured outputs via Pydantic models, plain text responses, and asynchronous calls.
40
-
41
- The structured reasoning in `chatterer` is inspired by the [Atom-of-Thought](https://github.com/qixucen/atom) pipeline.
42
-
43
- ---
44
-
45
- ## Quick Install
46
-
47
- ```bash
48
- pip install chatterer
49
- ```
50
-
51
- ---
52
-
53
- ## Quickstart Example
54
-
55
- Generate text quickly using OpenAI:
56
-
57
- ```python
58
- from chatterer import Chatterer
59
-
60
- chat = Chatterer.openai("gpt-4o-mini")
61
- response = chat.generate("What is the meaning of life?")
62
- print(response)
63
- ```
64
-
65
- Messages can be input as plain strings or structured lists:
66
-
67
- ```python
68
- response = chat.generate([{ "role": "user", "content": "What's 2+2?" }])
69
- print(response)
70
- ```
71
-
72
- ### Structured Output with Pydantic
73
-
74
- ```python
75
- from pydantic import BaseModel
76
-
77
- class AnswerModel(BaseModel):
78
- question: str
79
- answer: str
80
-
81
- response = chat.generate_pydantic(AnswerModel, "What's the capital of France?")
82
- print(response.question, response.answer)
83
- ```
84
-
85
- ### Async Example
86
-
87
- ```python
88
- import asyncio
89
-
90
- async def main():
91
- response = await chat.agenerate("Explain async in Python briefly.")
92
- print(response)
93
-
94
- asyncio.run(main())
95
- ```
96
-
97
- ---
98
-
99
- ## Atom-of-Thought Pipeline (AoT)
100
-
101
- `AoTPipeline` provides structured reasoning by:
102
-
103
- - Detecting question domains (general, math, coding, philosophy, multihop).
104
- - Decomposing questions recursively.
105
- - Generating direct, decomposition-based, and simplified answers.
106
- - Combining answers via ensemble.
107
-
108
- ### AoT Usage Example
109
-
110
- ```python
111
- from chatterer import Chatterer
112
- from chatterer.strategies import AoTStrategy, AoTPipeline
113
-
114
- pipeline = AoTPipeline(chatterer=Chatterer.openai(), max_depth=2)
115
- strategy = AoTStrategy(pipeline=pipeline)
116
-
117
- question = "What would Newton discover if hit by an apple falling from 100 meters?"
118
- answer = strategy.invoke(question)
119
- print(answer)
120
- ```
121
-
122
- ---
123
-
124
- ## Supported Models
125
-
126
- - **OpenAI**
127
- - **Anthropic**
128
- - **Google Gemini**
129
- - **Ollama** (local models)
130
-
131
- Initialize models easily:
132
-
133
- ```python
134
- openai_chat = Chatterer.openai("gpt-4o-mini")
135
- anthropic_chat = Chatterer.anthropic("claude-3-7-sonnet-20250219")
136
- gemini_chat = Chatterer.google("gemini-2.0-flash")
137
- ollama_chat = Chatterer.ollama("deepseek-r1:1.5b")
138
- ```
139
-
140
- ---
141
-
142
- ## Advanced Features
143
-
144
- - **Streaming responses**
145
- - **Async/Await support**
146
- - **Structured outputs with Pydantic models**
147
-
148
- ---
149
-
150
- ## Logging
151
-
152
- Built-in logging for easy debugging:
153
-
154
- ```python
155
- import logging
156
- logging.basicConfig(level=logging.DEBUG)
157
- ```
158
-
159
- ---
160
-
161
- ## Contributing
162
-
163
- Feel free to open an issue or pull request.
164
-
165
- ---
166
-
167
- ## License
168
-
169
- MIT License
170
-
1
+ Metadata-Version: 2.4
2
+ Name: chatterer
3
+ Version: 0.1.13
4
+ Summary: The highest-level interface for various LLM APIs.
5
+ Requires-Python: >=3.12
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: instructor>=1.7.2
8
+ Requires-Dist: langchain>=0.3.19
9
+ Provides-Extra: dev
10
+ Requires-Dist: neo4j-extension>=0.1.14; extra == "dev"
11
+ Requires-Dist: colorama>=0.4.6; extra == "dev"
12
+ Requires-Dist: ipykernel>=6.29.5; extra == "dev"
13
+ Provides-Extra: conversion
14
+ Requires-Dist: markdownify>=1.1.0; extra == "conversion"
15
+ Requires-Dist: commonmark>=0.9.1; extra == "conversion"
16
+ Requires-Dist: playwright>=1.50.0; extra == "conversion"
17
+ Requires-Dist: pillow>=11.1.0; extra == "conversion"
18
+ Requires-Dist: mistune>=3.1.3; extra == "conversion"
19
+ Requires-Dist: markitdown>=0.1.1; extra == "conversion"
20
+ Requires-Dist: pymupdf>=1.25.4; extra == "conversion"
21
+ Requires-Dist: youtube-transcript-api>=1.0.3; extra == "conversion"
22
+ Requires-Dist: pypdf>=5.4.0; extra == "conversion"
23
+ Provides-Extra: langchain
24
+ Requires-Dist: chatterer[langchain-providers]; extra == "langchain"
25
+ Requires-Dist: langchain-experimental>=0.3.4; extra == "langchain"
26
+ Provides-Extra: langchain-providers
27
+ Requires-Dist: langchain-openai>=0.3.11; extra == "langchain-providers"
28
+ Requires-Dist: langchain-anthropic>=0.3.10; extra == "langchain-providers"
29
+ Requires-Dist: langchain-google-genai>=2.1.1; extra == "langchain-providers"
30
+ Requires-Dist: langchain-ollama>=0.3.0; extra == "langchain-providers"
31
+ Provides-Extra: all
32
+ Requires-Dist: chatterer[langchain]; extra == "all"
33
+ Requires-Dist: chatterer[conversion]; extra == "all"
34
+ Requires-Dist: chatterer[dev]; extra == "all"
35
+
36
+ # Chatterer
37
+
38
+ **Simplified, Structured AI Assistant Framework**
39
+
40
+ `chatterer` is a Python library designed as a type-safe LangChain wrapper for interacting with various language models (OpenAI, Anthropic, Gemini, Ollama, etc.). It supports structured outputs via Pydantic models, plain text responses, and asynchronous calls.
41
+
42
+ The structured reasoning in `chatterer` is inspired by the [Atom-of-Thought](https://github.com/qixucen/atom) pipeline.
43
+
44
+ ---
45
+
46
+ ## Quick Install
47
+
48
+ ```bash
49
+ pip install chatterer
50
+ ```
51
+
52
+ ---
53
+
54
+ ## Quickstart Example
55
+
56
+ Generate text quickly using OpenAI:
57
+
58
+ ```python
59
+ from chatterer import Chatterer
60
+
61
+ chat = Chatterer.openai("gpt-4o-mini")
62
+ response = chat.generate("What is the meaning of life?")
63
+ print(response)
64
+ ```
65
+
66
+ Messages can be input as plain strings or structured lists:
67
+
68
+ ```python
69
+ response = chat.generate([{ "role": "user", "content": "What's 2+2?" }])
70
+ print(response)
71
+ ```
72
+
73
+ ### Structured Output with Pydantic
74
+
75
+ ```python
76
+ from pydantic import BaseModel
77
+
78
+ class AnswerModel(BaseModel):
79
+ question: str
80
+ answer: str
81
+
82
+ response = chat.generate_pydantic(AnswerModel, "What's the capital of France?")
83
+ print(response.question, response.answer)
84
+ ```
85
+
86
+ ### Async Example
87
+
88
+ ```python
89
+ import asyncio
90
+
91
+ async def main():
92
+ response = await chat.agenerate("Explain async in Python briefly.")
93
+ print(response)
94
+
95
+ asyncio.run(main())
96
+ ```
97
+
98
+ ---
99
+
100
+ ## Atom-of-Thought Pipeline (AoT)
101
+
102
+ `AoTPipeline` provides structured reasoning by:
103
+
104
+ - Detecting question domains (general, math, coding, philosophy, multihop).
105
+ - Decomposing questions recursively.
106
+ - Generating direct, decomposition-based, and simplified answers.
107
+ - Combining answers via ensemble.
108
+
109
+ ### AoT Usage Example
110
+
111
+ ```python
112
+ from chatterer import Chatterer
113
+ from chatterer.strategies import AoTStrategy, AoTPipeline
114
+
115
+ pipeline = AoTPipeline(chatterer=Chatterer.openai(), max_depth=2)
116
+ strategy = AoTStrategy(pipeline=pipeline)
117
+
118
+ question = "What would Newton discover if hit by an apple falling from 100 meters?"
119
+ answer = strategy.invoke(question)
120
+ print(answer)
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Supported Models
126
+
127
+ - **OpenAI**
128
+ - **Anthropic**
129
+ - **Google Gemini**
130
+ - **Ollama** (local models)
131
+
132
+ Initialize models easily:
133
+
134
+ ```python
135
+ openai_chat = Chatterer.openai("gpt-4o-mini")
136
+ anthropic_chat = Chatterer.anthropic("claude-3-7-sonnet-20250219")
137
+ gemini_chat = Chatterer.google("gemini-2.0-flash")
138
+ ollama_chat = Chatterer.ollama("deepseek-r1:1.5b")
139
+ ```
140
+
141
+ ---
142
+
143
+ ## Advanced Features
144
+
145
+ - **Streaming responses**
146
+ - **Async/Await support**
147
+ - **Structured outputs with Pydantic models**
148
+
149
+ ---
150
+
151
+ ## Logging
152
+
153
+ Built-in logging for easy debugging:
154
+
155
+ ```python
156
+ import logging
157
+ logging.basicConfig(level=logging.DEBUG)
158
+ ```
159
+
160
+ ---
161
+
162
+ ## Contributing
163
+
164
+ Feel free to open an issue or pull request.
165
+
166
+ ---
167
+
168
+ ## License
169
+
170
+ MIT License
171
+
@@ -1,136 +1,136 @@
1
- # Chatterer
2
-
3
- **Simplified, Structured AI Assistant Framework**
4
-
5
- `chatterer` is a Python library designed as a type-safe LangChain wrapper for interacting with various language models (OpenAI, Anthropic, Gemini, Ollama, etc.). It supports structured outputs via Pydantic models, plain text responses, and asynchronous calls.
6
-
7
- The structured reasoning in `chatterer` is inspired by the [Atom-of-Thought](https://github.com/qixucen/atom) pipeline.
8
-
9
- ---
10
-
11
- ## Quick Install
12
-
13
- ```bash
14
- pip install chatterer
15
- ```
16
-
17
- ---
18
-
19
- ## Quickstart Example
20
-
21
- Generate text quickly using OpenAI:
22
-
23
- ```python
24
- from chatterer import Chatterer
25
-
26
- chat = Chatterer.openai("gpt-4o-mini")
27
- response = chat.generate("What is the meaning of life?")
28
- print(response)
29
- ```
30
-
31
- Messages can be input as plain strings or structured lists:
32
-
33
- ```python
34
- response = chat.generate([{ "role": "user", "content": "What's 2+2?" }])
35
- print(response)
36
- ```
37
-
38
- ### Structured Output with Pydantic
39
-
40
- ```python
41
- from pydantic import BaseModel
42
-
43
- class AnswerModel(BaseModel):
44
- question: str
45
- answer: str
46
-
47
- response = chat.generate_pydantic(AnswerModel, "What's the capital of France?")
48
- print(response.question, response.answer)
49
- ```
50
-
51
- ### Async Example
52
-
53
- ```python
54
- import asyncio
55
-
56
- async def main():
57
- response = await chat.agenerate("Explain async in Python briefly.")
58
- print(response)
59
-
60
- asyncio.run(main())
61
- ```
62
-
63
- ---
64
-
65
- ## Atom-of-Thought Pipeline (AoT)
66
-
67
- `AoTPipeline` provides structured reasoning by:
68
-
69
- - Detecting question domains (general, math, coding, philosophy, multihop).
70
- - Decomposing questions recursively.
71
- - Generating direct, decomposition-based, and simplified answers.
72
- - Combining answers via ensemble.
73
-
74
- ### AoT Usage Example
75
-
76
- ```python
77
- from chatterer import Chatterer
78
- from chatterer.strategies import AoTStrategy, AoTPipeline
79
-
80
- pipeline = AoTPipeline(chatterer=Chatterer.openai(), max_depth=2)
81
- strategy = AoTStrategy(pipeline=pipeline)
82
-
83
- question = "What would Newton discover if hit by an apple falling from 100 meters?"
84
- answer = strategy.invoke(question)
85
- print(answer)
86
- ```
87
-
88
- ---
89
-
90
- ## Supported Models
91
-
92
- - **OpenAI**
93
- - **Anthropic**
94
- - **Google Gemini**
95
- - **Ollama** (local models)
96
-
97
- Initialize models easily:
98
-
99
- ```python
100
- openai_chat = Chatterer.openai("gpt-4o-mini")
101
- anthropic_chat = Chatterer.anthropic("claude-3-7-sonnet-20250219")
102
- gemini_chat = Chatterer.google("gemini-2.0-flash")
103
- ollama_chat = Chatterer.ollama("deepseek-r1:1.5b")
104
- ```
105
-
106
- ---
107
-
108
- ## Advanced Features
109
-
110
- - **Streaming responses**
111
- - **Async/Await support**
112
- - **Structured outputs with Pydantic models**
113
-
114
- ---
115
-
116
- ## Logging
117
-
118
- Built-in logging for easy debugging:
119
-
120
- ```python
121
- import logging
122
- logging.basicConfig(level=logging.DEBUG)
123
- ```
124
-
125
- ---
126
-
127
- ## Contributing
128
-
129
- Feel free to open an issue or pull request.
130
-
131
- ---
132
-
133
- ## License
134
-
135
- MIT License
136
-
1
+ # Chatterer
2
+
3
+ **Simplified, Structured AI Assistant Framework**
4
+
5
+ `chatterer` is a Python library designed as a type-safe LangChain wrapper for interacting with various language models (OpenAI, Anthropic, Gemini, Ollama, etc.). It supports structured outputs via Pydantic models, plain text responses, and asynchronous calls.
6
+
7
+ The structured reasoning in `chatterer` is inspired by the [Atom-of-Thought](https://github.com/qixucen/atom) pipeline.
8
+
9
+ ---
10
+
11
+ ## Quick Install
12
+
13
+ ```bash
14
+ pip install chatterer
15
+ ```
16
+
17
+ ---
18
+
19
+ ## Quickstart Example
20
+
21
+ Generate text quickly using OpenAI:
22
+
23
+ ```python
24
+ from chatterer import Chatterer
25
+
26
+ chat = Chatterer.openai("gpt-4o-mini")
27
+ response = chat.generate("What is the meaning of life?")
28
+ print(response)
29
+ ```
30
+
31
+ Messages can be input as plain strings or structured lists:
32
+
33
+ ```python
34
+ response = chat.generate([{ "role": "user", "content": "What's 2+2?" }])
35
+ print(response)
36
+ ```
37
+
38
+ ### Structured Output with Pydantic
39
+
40
+ ```python
41
+ from pydantic import BaseModel
42
+
43
+ class AnswerModel(BaseModel):
44
+ question: str
45
+ answer: str
46
+
47
+ response = chat.generate_pydantic(AnswerModel, "What's the capital of France?")
48
+ print(response.question, response.answer)
49
+ ```
50
+
51
+ ### Async Example
52
+
53
+ ```python
54
+ import asyncio
55
+
56
+ async def main():
57
+ response = await chat.agenerate("Explain async in Python briefly.")
58
+ print(response)
59
+
60
+ asyncio.run(main())
61
+ ```
62
+
63
+ ---
64
+
65
+ ## Atom-of-Thought Pipeline (AoT)
66
+
67
+ `AoTPipeline` provides structured reasoning by:
68
+
69
+ - Detecting question domains (general, math, coding, philosophy, multihop).
70
+ - Decomposing questions recursively.
71
+ - Generating direct, decomposition-based, and simplified answers.
72
+ - Combining answers via ensemble.
73
+
74
+ ### AoT Usage Example
75
+
76
+ ```python
77
+ from chatterer import Chatterer
78
+ from chatterer.strategies import AoTStrategy, AoTPipeline
79
+
80
+ pipeline = AoTPipeline(chatterer=Chatterer.openai(), max_depth=2)
81
+ strategy = AoTStrategy(pipeline=pipeline)
82
+
83
+ question = "What would Newton discover if hit by an apple falling from 100 meters?"
84
+ answer = strategy.invoke(question)
85
+ print(answer)
86
+ ```
87
+
88
+ ---
89
+
90
+ ## Supported Models
91
+
92
+ - **OpenAI**
93
+ - **Anthropic**
94
+ - **Google Gemini**
95
+ - **Ollama** (local models)
96
+
97
+ Initialize models easily:
98
+
99
+ ```python
100
+ openai_chat = Chatterer.openai("gpt-4o-mini")
101
+ anthropic_chat = Chatterer.anthropic("claude-3-7-sonnet-20250219")
102
+ gemini_chat = Chatterer.google("gemini-2.0-flash")
103
+ ollama_chat = Chatterer.ollama("deepseek-r1:1.5b")
104
+ ```
105
+
106
+ ---
107
+
108
+ ## Advanced Features
109
+
110
+ - **Streaming responses**
111
+ - **Async/Await support**
112
+ - **Structured outputs with Pydantic models**
113
+
114
+ ---
115
+
116
+ ## Logging
117
+
118
+ Built-in logging for easy debugging:
119
+
120
+ ```python
121
+ import logging
122
+ logging.basicConfig(level=logging.DEBUG)
123
+ ```
124
+
125
+ ---
126
+
127
+ ## Contributing
128
+
129
+ Feel free to open an issue or pull request.
130
+
131
+ ---
132
+
133
+ ## License
134
+
135
+ MIT License
136
+