chatterer 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chatterer/__init__.py +21 -23
- chatterer/language_model.py +590 -0
- chatterer/strategies/__init__.py +19 -0
- chatterer/strategies/atom_of_thoughts.py +594 -0
- chatterer/strategies/base.py +14 -0
- chatterer-0.1.3.dist-info/METADATA +150 -0
- chatterer-0.1.3.dist-info/RECORD +9 -0
- chatterer/llms/__init__.py +0 -20
- chatterer/llms/base.py +0 -42
- chatterer/llms/instructor.py +0 -127
- chatterer/llms/langchain.py +0 -49
- chatterer/llms/ollama.py +0 -69
- chatterer-0.1.2.dist-info/METADATA +0 -213
- chatterer-0.1.2.dist-info/RECORD +0 -10
- {chatterer-0.1.2.dist-info → chatterer-0.1.3.dist-info}/WHEEL +0 -0
- {chatterer-0.1.2.dist-info → chatterer-0.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,150 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: chatterer
|
3
|
+
Version: 0.1.3
|
4
|
+
Summary: The highest-level interface for various LLM APIs.
|
5
|
+
Requires-Python: >=3.12
|
6
|
+
Description-Content-Type: text/markdown
|
7
|
+
Requires-Dist: instructor>=1.7.2
|
8
|
+
Requires-Dist: langchain>=0.3.19
|
9
|
+
Provides-Extra: all
|
10
|
+
Requires-Dist: langchain-openai>=0.3.7; extra == "all"
|
11
|
+
Requires-Dist: langchain-anthropic>=0.3.8; extra == "all"
|
12
|
+
Requires-Dist: langchain-google-genai>=2.0.10; extra == "all"
|
13
|
+
Requires-Dist: langchain-ollama>=0.2.3; extra == "all"
|
14
|
+
|
15
|
+
# Chatterer
|
16
|
+
|
17
|
+
**Simplified, Structured AI Assistant Framework**
|
18
|
+
|
19
|
+
`chatterer` is a Python library designed as a type-safe LangChain wrapper for interacting with various language models (OpenAI, Anthropic, Gemini, Ollama, etc.). It supports structured outputs via Pydantic models, plain text responses, and asynchronous calls.
|
20
|
+
|
21
|
+
The structured reasoning in `chatterer` is inspired by the [Atom-of-Thought](https://github.com/qixucen/atom) pipeline.
|
22
|
+
|
23
|
+
---
|
24
|
+
|
25
|
+
## Quick Install
|
26
|
+
|
27
|
+
```bash
|
28
|
+
pip install chatterer
|
29
|
+
```
|
30
|
+
|
31
|
+
---
|
32
|
+
|
33
|
+
## Quickstart Example
|
34
|
+
|
35
|
+
Generate text quickly using OpenAI:
|
36
|
+
|
37
|
+
```python
|
38
|
+
from chatterer import Chatterer
|
39
|
+
|
40
|
+
chat = Chatterer.openai("gpt-4o-mini")
|
41
|
+
response = chat.generate("What is the meaning of life?")
|
42
|
+
print(response)
|
43
|
+
```
|
44
|
+
|
45
|
+
Messages can be input as plain strings or structured lists:
|
46
|
+
|
47
|
+
```python
|
48
|
+
response = chat.generate([{ "role": "user", "content": "What's 2+2?" }])
|
49
|
+
print(response)
|
50
|
+
```
|
51
|
+
|
52
|
+
### Structured Output with Pydantic
|
53
|
+
|
54
|
+
```python
|
55
|
+
from pydantic import BaseModel
|
56
|
+
|
57
|
+
class AnswerModel(BaseModel):
|
58
|
+
question: str
|
59
|
+
answer: str
|
60
|
+
|
61
|
+
response = chat.generate_pydantic(AnswerModel, "What's the capital of France?")
|
62
|
+
print(response.question, response.answer)
|
63
|
+
```
|
64
|
+
|
65
|
+
### Async Example
|
66
|
+
|
67
|
+
```python
|
68
|
+
import asyncio
|
69
|
+
|
70
|
+
async def main():
|
71
|
+
response = await chat.agenerate("Explain async in Python briefly.")
|
72
|
+
print(response)
|
73
|
+
|
74
|
+
asyncio.run(main())
|
75
|
+
```
|
76
|
+
|
77
|
+
---
|
78
|
+
|
79
|
+
## Atom-of-Thought Pipeline (AoT)
|
80
|
+
|
81
|
+
`AoTPipeline` provides structured reasoning by:
|
82
|
+
|
83
|
+
- Detecting question domains (general, math, coding, philosophy, multihop).
|
84
|
+
- Decomposing questions recursively.
|
85
|
+
- Generating direct, decomposition-based, and simplified answers.
|
86
|
+
- Combining answers via ensemble.
|
87
|
+
|
88
|
+
### AoT Usage Example
|
89
|
+
|
90
|
+
```python
|
91
|
+
from chatterer import Chatterer
|
92
|
+
from chatterer.strategies import AoTStrategy, AoTPipeline
|
93
|
+
|
94
|
+
pipeline = AoTPipeline(chatterer=Chatterer.openai(), max_depth=2)
|
95
|
+
strategy = AoTStrategy(pipeline=pipeline)
|
96
|
+
|
97
|
+
question = "What would Newton discover if hit by an apple falling from 100 meters?"
|
98
|
+
answer = strategy.invoke(question)
|
99
|
+
print(answer)
|
100
|
+
```
|
101
|
+
|
102
|
+
---
|
103
|
+
|
104
|
+
## Supported Models
|
105
|
+
|
106
|
+
- **OpenAI**
|
107
|
+
- **Anthropic**
|
108
|
+
- **Google Gemini**
|
109
|
+
- **Ollama** (local models)
|
110
|
+
|
111
|
+
Initialize models easily:
|
112
|
+
|
113
|
+
```python
|
114
|
+
openai_chat = Chatterer.openai("gpt-4o-mini")
|
115
|
+
anthropic_chat = Chatterer.anthropic("claude-3-7-sonnet-20250219")
|
116
|
+
gemini_chat = Chatterer.google("gemini-2.0-flash")
|
117
|
+
ollama_chat = Chatterer.ollama("deepseek-r1:1.5b")
|
118
|
+
```
|
119
|
+
|
120
|
+
---
|
121
|
+
|
122
|
+
## Advanced Features
|
123
|
+
|
124
|
+
- **Streaming responses**
|
125
|
+
- **Async/Await support**
|
126
|
+
- **Structured outputs with Pydantic models**
|
127
|
+
|
128
|
+
---
|
129
|
+
|
130
|
+
## Logging
|
131
|
+
|
132
|
+
Built-in logging for easy debugging:
|
133
|
+
|
134
|
+
```python
|
135
|
+
import logging
|
136
|
+
logging.basicConfig(level=logging.DEBUG)
|
137
|
+
```
|
138
|
+
|
139
|
+
---
|
140
|
+
|
141
|
+
## Contributing
|
142
|
+
|
143
|
+
Feel free to open an issue or pull request.
|
144
|
+
|
145
|
+
---
|
146
|
+
|
147
|
+
## License
|
148
|
+
|
149
|
+
MIT License
|
150
|
+
|
@@ -0,0 +1,9 @@
|
|
1
|
+
chatterer/__init__.py,sha256=8j0NJU6mH-aaaCKyQwvJif25n_p7plCCR7pkcij4daY,462
|
2
|
+
chatterer/language_model.py,sha256=LgnUtETjvandKi1GrcXwTLDInhuFVGoFZNqfM0P0T4o,38470
|
3
|
+
chatterer/strategies/__init__.py,sha256=AKDBC06AdNjPskERssCOsz3qGfFN31mxklmnfhHatJo,389
|
4
|
+
chatterer/strategies/atom_of_thoughts.py,sha256=2cHq6kfB_RAb4NbM396NpD40E2aiavZKblMCtUi-u1Q,24948
|
5
|
+
chatterer/strategies/base.py,sha256=b2gMPqodp97OP1dkHfj0UqixjdjVhmTw_V5qJ7i2S6g,427
|
6
|
+
chatterer-0.1.3.dist-info/METADATA,sha256=1cmKFrkea0VqUyQiUaHCqpMYHwHTr6RzcWYk4syMZfw,3373
|
7
|
+
chatterer-0.1.3.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
8
|
+
chatterer-0.1.3.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
|
9
|
+
chatterer-0.1.3.dist-info/RECORD,,
|
chatterer/llms/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
from importlib.util import find_spec
|
2
|
-
|
3
|
-
from .base import LLM
|
4
|
-
|
5
|
-
__all__ = ["LLM"]
|
6
|
-
|
7
|
-
if find_spec("langchain_core") is not None:
|
8
|
-
from .langchain import LangchainLLM
|
9
|
-
|
10
|
-
__all__ += ["LangchainLLM"]
|
11
|
-
|
12
|
-
if find_spec("ollama") is not None:
|
13
|
-
from .ollama import OllamaLLM
|
14
|
-
|
15
|
-
__all__ += ["OllamaLLM"]
|
16
|
-
|
17
|
-
if find_spec("instructor") is not None:
|
18
|
-
from .instructor import InstructorLLM
|
19
|
-
|
20
|
-
__all__ += ["InstructorLLM"]
|
chatterer/llms/base.py
DELETED
@@ -1,42 +0,0 @@
|
|
1
|
-
from abc import ABC, abstractmethod
|
2
|
-
from typing import (
|
3
|
-
Any,
|
4
|
-
ClassVar,
|
5
|
-
Iterator,
|
6
|
-
Sequence,
|
7
|
-
Type,
|
8
|
-
TypeVar,
|
9
|
-
)
|
10
|
-
|
11
|
-
from openai.types.chat import ChatCompletionMessageParam
|
12
|
-
from pydantic import BaseModel, ConfigDict, Field
|
13
|
-
|
14
|
-
P = TypeVar("P", bound=BaseModel)
|
15
|
-
|
16
|
-
|
17
|
-
class LLM(BaseModel, ABC):
|
18
|
-
call_kwargs: dict[str, Any] = Field(default_factory=dict)
|
19
|
-
model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
|
20
|
-
|
21
|
-
def __call__(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
22
|
-
return self.generate(messages)
|
23
|
-
|
24
|
-
@abstractmethod
|
25
|
-
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str: ...
|
26
|
-
|
27
|
-
@abstractmethod
|
28
|
-
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]: ...
|
29
|
-
|
30
|
-
@abstractmethod
|
31
|
-
def generate_pydantic(
|
32
|
-
self,
|
33
|
-
response_model: Type[P],
|
34
|
-
messages: Sequence[ChatCompletionMessageParam],
|
35
|
-
) -> P: ...
|
36
|
-
|
37
|
-
def generate_pydantic_stream(
|
38
|
-
self,
|
39
|
-
response_model: Type[P],
|
40
|
-
messages: Sequence[ChatCompletionMessageParam],
|
41
|
-
) -> Iterator[P]:
|
42
|
-
raise NotImplementedError
|
chatterer/llms/instructor.py
DELETED
@@ -1,127 +0,0 @@
|
|
1
|
-
from os import environ
|
2
|
-
from typing import (
|
3
|
-
Any,
|
4
|
-
Iterator,
|
5
|
-
Self,
|
6
|
-
Sequence,
|
7
|
-
Type,
|
8
|
-
)
|
9
|
-
|
10
|
-
from instructor import Instructor, Mode, from_openai
|
11
|
-
from openai import OpenAI
|
12
|
-
from openai.types.chat import ChatCompletionMessageParam
|
13
|
-
from pydantic import BaseModel
|
14
|
-
|
15
|
-
from .base import LLM, P
|
16
|
-
|
17
|
-
|
18
|
-
class Response(BaseModel):
|
19
|
-
response: str
|
20
|
-
|
21
|
-
|
22
|
-
class InstructorLLM(LLM):
|
23
|
-
inst: Instructor
|
24
|
-
|
25
|
-
@classmethod
|
26
|
-
def openai(cls, call_kwargs: dict[str, Any] = {"model": "o3-mini"}) -> Self:
|
27
|
-
return cls(
|
28
|
-
inst=from_openai(OpenAI(), Mode.TOOLS_STRICT),
|
29
|
-
call_kwargs=call_kwargs,
|
30
|
-
)
|
31
|
-
|
32
|
-
@classmethod
|
33
|
-
def anthropic(
|
34
|
-
cls,
|
35
|
-
call_kwargs: dict[str, Any] = {
|
36
|
-
"model": "claude-3-7-sonnet-20250219",
|
37
|
-
"temperature": 0.7,
|
38
|
-
"max_tokens": 8192,
|
39
|
-
},
|
40
|
-
) -> Self:
|
41
|
-
from anthropic import Anthropic
|
42
|
-
from instructor import from_anthropic
|
43
|
-
|
44
|
-
return cls(
|
45
|
-
inst=from_anthropic(client=Anthropic(), mode=Mode.ANTHROPIC_TOOLS),
|
46
|
-
call_kwargs=call_kwargs,
|
47
|
-
)
|
48
|
-
|
49
|
-
@classmethod
|
50
|
-
def gemini(
|
51
|
-
cls,
|
52
|
-
model_name: str = "gemini-2.0-flash",
|
53
|
-
call_kwargs: dict[str, Any] = {},
|
54
|
-
) -> Self:
|
55
|
-
from google.generativeai.generative_models import GenerativeModel
|
56
|
-
from instructor import from_gemini
|
57
|
-
|
58
|
-
return cls(
|
59
|
-
inst=from_gemini(client=GenerativeModel(model_name=model_name), mode=Mode.GEMINI_TOOLS),
|
60
|
-
call_kwargs=call_kwargs,
|
61
|
-
)
|
62
|
-
|
63
|
-
@classmethod
|
64
|
-
def deepseek(cls, call_kwargs: dict[str, Any] = {"model": "deepseek-chat"}) -> Self:
|
65
|
-
return cls(
|
66
|
-
inst=from_openai(
|
67
|
-
OpenAI(
|
68
|
-
base_url="https://api.deepseek.com/v1",
|
69
|
-
api_key=environ["DEEPSEEK_API_KEY"],
|
70
|
-
),
|
71
|
-
Mode.TOOLS_STRICT,
|
72
|
-
),
|
73
|
-
call_kwargs=call_kwargs,
|
74
|
-
)
|
75
|
-
|
76
|
-
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
77
|
-
if self.inst is None:
|
78
|
-
raise ValueError("Instructor instance is not initialized")
|
79
|
-
res = self.inst.chat.completions.create(
|
80
|
-
response_model=Response,
|
81
|
-
messages=list(messages),
|
82
|
-
**self.call_kwargs,
|
83
|
-
)
|
84
|
-
return res.response
|
85
|
-
|
86
|
-
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
87
|
-
if self.inst is None:
|
88
|
-
raise ValueError("Instructor instance is not initialized")
|
89
|
-
last_content: str = ""
|
90
|
-
for res in self.inst.chat.completions.create_partial(
|
91
|
-
response_model=Response,
|
92
|
-
messages=list(messages),
|
93
|
-
**self.call_kwargs,
|
94
|
-
):
|
95
|
-
content: str = res.response
|
96
|
-
delta: str = content.removeprefix(last_content)
|
97
|
-
if not delta:
|
98
|
-
continue
|
99
|
-
last_content = content
|
100
|
-
yield delta
|
101
|
-
|
102
|
-
def generate_pydantic(
|
103
|
-
self,
|
104
|
-
response_model: Type[P],
|
105
|
-
messages: Sequence[ChatCompletionMessageParam],
|
106
|
-
) -> P:
|
107
|
-
if self.inst is None:
|
108
|
-
raise ValueError("Instructor instance is not initialized")
|
109
|
-
return self.inst.chat.completions.create(
|
110
|
-
response_model=response_model,
|
111
|
-
messages=list(messages),
|
112
|
-
**self.call_kwargs,
|
113
|
-
)
|
114
|
-
|
115
|
-
def generate_pydantic_stream(
|
116
|
-
self,
|
117
|
-
response_model: Type[P],
|
118
|
-
messages: Sequence[ChatCompletionMessageParam],
|
119
|
-
) -> Iterator[P]:
|
120
|
-
if self.inst is None:
|
121
|
-
raise ValueError("Instructor instance is not initialized")
|
122
|
-
for res in self.inst.chat.completions.create_partial(
|
123
|
-
response_model=response_model,
|
124
|
-
messages=list(messages),
|
125
|
-
**self.call_kwargs,
|
126
|
-
):
|
127
|
-
yield res
|
chatterer/llms/langchain.py
DELETED
@@ -1,49 +0,0 @@
|
|
1
|
-
from typing import (
|
2
|
-
Iterator,
|
3
|
-
Sequence,
|
4
|
-
Type,
|
5
|
-
)
|
6
|
-
|
7
|
-
from langchain_community.adapters.openai import (
|
8
|
-
convert_openai_messages,
|
9
|
-
)
|
10
|
-
from langchain_core.language_models.chat_models import BaseChatModel
|
11
|
-
from openai.types.chat import ChatCompletionMessageParam
|
12
|
-
|
13
|
-
from .base import LLM, P
|
14
|
-
|
15
|
-
|
16
|
-
class LangchainLLM(LLM):
|
17
|
-
client: BaseChatModel
|
18
|
-
|
19
|
-
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
20
|
-
content = self.client.invoke(convert_openai_messages([dict(msg) for msg in messages])).content
|
21
|
-
if isinstance(content, str):
|
22
|
-
return content
|
23
|
-
else:
|
24
|
-
return "".join(part for part in content if isinstance(part, str))
|
25
|
-
|
26
|
-
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
27
|
-
for chunk in self.client.stream(convert_openai_messages([dict(msg) for msg in messages])):
|
28
|
-
content = chunk.content
|
29
|
-
if isinstance(content, str):
|
30
|
-
yield content
|
31
|
-
elif isinstance(content, list):
|
32
|
-
for part in content:
|
33
|
-
if isinstance(part, str):
|
34
|
-
yield part
|
35
|
-
else:
|
36
|
-
continue
|
37
|
-
else:
|
38
|
-
continue
|
39
|
-
|
40
|
-
def generate_pydantic(
|
41
|
-
self,
|
42
|
-
response_model: Type[P],
|
43
|
-
messages: Sequence[ChatCompletionMessageParam],
|
44
|
-
) -> P:
|
45
|
-
result = self.client.with_structured_output(response_model).invoke(convert_openai_messages([dict(msg) for msg in messages]))
|
46
|
-
if isinstance(result, response_model):
|
47
|
-
return result
|
48
|
-
else:
|
49
|
-
return response_model.model_validate(result)
|
chatterer/llms/ollama.py
DELETED
@@ -1,69 +0,0 @@
|
|
1
|
-
from typing import (
|
2
|
-
Any,
|
3
|
-
Callable,
|
4
|
-
Iterator,
|
5
|
-
Literal,
|
6
|
-
Mapping,
|
7
|
-
Optional,
|
8
|
-
Sequence,
|
9
|
-
Type,
|
10
|
-
)
|
11
|
-
|
12
|
-
from ollama import Options, Tool, chat
|
13
|
-
from openai.types.chat import ChatCompletionMessageParam
|
14
|
-
from pydantic.json_schema import JsonSchemaValue
|
15
|
-
|
16
|
-
from .base import LLM, P
|
17
|
-
|
18
|
-
|
19
|
-
class OllamaLLM(LLM):
|
20
|
-
model: str
|
21
|
-
tools: Optional[Sequence[Mapping[str, Any] | Tool | Callable]] = None
|
22
|
-
stream: bool = False
|
23
|
-
format: Optional[Literal["", "json"] | JsonSchemaValue] = None
|
24
|
-
options: Optional[Mapping[str, Any] | Options] = None
|
25
|
-
keep_alive: Optional[float | str] = None
|
26
|
-
|
27
|
-
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
28
|
-
return "".join(self.generate_stream(messages))
|
29
|
-
|
30
|
-
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
31
|
-
model = str(self.call_kwargs.get("model", self.model))
|
32
|
-
format = self.call_kwargs.get("format", self.format)
|
33
|
-
options = self.call_kwargs.get("options", self.options)
|
34
|
-
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
35
|
-
tools = self.call_kwargs.get("tools", self.tools)
|
36
|
-
return (
|
37
|
-
res.message.content or ""
|
38
|
-
for res in chat(
|
39
|
-
model=model,
|
40
|
-
messages=messages,
|
41
|
-
tools=tools,
|
42
|
-
stream=True,
|
43
|
-
format=format,
|
44
|
-
options=options,
|
45
|
-
keep_alive=keep_alive,
|
46
|
-
)
|
47
|
-
)
|
48
|
-
|
49
|
-
def generate_pydantic(
|
50
|
-
self,
|
51
|
-
response_model: Type[P],
|
52
|
-
messages: Sequence[ChatCompletionMessageParam],
|
53
|
-
) -> P:
|
54
|
-
model = str(self.call_kwargs.get("model", self.model))
|
55
|
-
format = response_model.model_json_schema()
|
56
|
-
options = self.call_kwargs.get("options", self.options)
|
57
|
-
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
58
|
-
return response_model.model_validate_json(
|
59
|
-
chat(
|
60
|
-
model=model,
|
61
|
-
messages=messages,
|
62
|
-
tools=None,
|
63
|
-
stream=False,
|
64
|
-
format=format,
|
65
|
-
options=options,
|
66
|
-
keep_alive=keep_alive,
|
67
|
-
).message.content
|
68
|
-
or ""
|
69
|
-
)
|
@@ -1,213 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.2
|
2
|
-
Name: chatterer
|
3
|
-
Version: 0.1.2
|
4
|
-
Summary: The highest-level interface for various LLM APIs.
|
5
|
-
Requires-Python: >=3.12
|
6
|
-
Description-Content-Type: text/markdown
|
7
|
-
Requires-Dist: openai>=1.63.2
|
8
|
-
Provides-Extra: all
|
9
|
-
Requires-Dist: anthropic>=0.49.0; extra == "all"
|
10
|
-
Requires-Dist: google-generativeai>=0.8.4; extra == "all"
|
11
|
-
Requires-Dist: instructor>=1.7.2; extra == "all"
|
12
|
-
Requires-Dist: jsonref>=1.1.0; extra == "all"
|
13
|
-
Requires-Dist: langchain-community>=0.3.18; extra == "all"
|
14
|
-
Requires-Dist: ollama>=0.4.7; extra == "all"
|
15
|
-
|
16
|
-
# chatterer
|
17
|
-
|
18
|
-
`chatterer` is a Python library that provides a unified interface for interacting with various Language Model (LLM) backends. It abstracts over different providers such as OpenAI, Anthropic, DeepSeek, Ollama, and Langchain, allowing you to generate completions, stream responses, and even validate outputs using Pydantic models.
|
19
|
-
|
20
|
-
---
|
21
|
-
|
22
|
-
## Features
|
23
|
-
|
24
|
-
- **Unified LLM Interface**
|
25
|
-
Define a common interface (`LLM`) for generating completions and streaming responses regardless of the underlying provider.
|
26
|
-
|
27
|
-
- **Multiple Backend Support**
|
28
|
-
Built-in support for:
|
29
|
-
- **InstructorLLM**: Integrates with OpenAI, Anthropic, and DeepSeek.
|
30
|
-
- **OllamaLLM**: Supports the Ollama model with optional streaming and formatting.
|
31
|
-
- **LangchainLLM**: Leverages Langchain’s chat models with conversion utilities.
|
32
|
-
|
33
|
-
- **Pydantic Integration**
|
34
|
-
Easily validate and structure LLM responses by leveraging Pydantic models with methods like `generate_pydantic` and `generate_pydantic_stream`.
|
35
|
-
|
36
|
-
---
|
37
|
-
|
38
|
-
## Installation
|
39
|
-
|
40
|
-
Assuming `chatterer` is published on PyPI, install it via pip:
|
41
|
-
|
42
|
-
```bash
|
43
|
-
pip install chatterer
|
44
|
-
```
|
45
|
-
|
46
|
-
Alternatively, clone the repository and install manually:
|
47
|
-
|
48
|
-
```bash
|
49
|
-
git clone https://github.com/yourusername/chatterer.git
|
50
|
-
cd chatterer
|
51
|
-
pip install -r requirements.txt
|
52
|
-
```
|
53
|
-
|
54
|
-
---
|
55
|
-
|
56
|
-
## Usage
|
57
|
-
|
58
|
-
### Importing the Library
|
59
|
-
|
60
|
-
You can import the core components directly from `chatterer`:
|
61
|
-
|
62
|
-
```python
|
63
|
-
from chatterer import LLM, InstructorLLM, OllamaLLM, LangchainLLM
|
64
|
-
```
|
65
|
-
|
66
|
-
---
|
67
|
-
|
68
|
-
### Example 1: Using InstructorLLM with OpenAI
|
69
|
-
|
70
|
-
```python
|
71
|
-
from chatterer import InstructorLLM
|
72
|
-
from openai.types.chat import ChatCompletionMessageParam
|
73
|
-
|
74
|
-
# Create an instance for OpenAI using the InstructorLLM wrapper
|
75
|
-
llm = InstructorLLM.openai(call_kwargs={"model": "o3-mini"})
|
76
|
-
|
77
|
-
# Define a conversation message list
|
78
|
-
messages: list[ChatCompletionMessageParam] = [
|
79
|
-
{"role": "user", "content": "Hello, how can I help you?"}
|
80
|
-
]
|
81
|
-
|
82
|
-
# Generate a completion
|
83
|
-
response = llm.generate(messages)
|
84
|
-
print("Response:", response)
|
85
|
-
|
86
|
-
# Stream the response incrementally
|
87
|
-
print("Streaming response:")
|
88
|
-
for chunk in llm.generate_stream(messages):
|
89
|
-
print(chunk, end="")
|
90
|
-
```
|
91
|
-
|
92
|
-
---
|
93
|
-
|
94
|
-
### Example 2: Using OllamaLLM
|
95
|
-
|
96
|
-
```python
|
97
|
-
from chatterer import OllamaLLM
|
98
|
-
from openai.types.chat import ChatCompletionMessageParam
|
99
|
-
|
100
|
-
# Initialize an OllamaLLM instance with streaming enabled
|
101
|
-
llm = OllamaLLM(model="ollama-model", stream=True)
|
102
|
-
|
103
|
-
messages: list[ChatCompletionMessageParam] = [
|
104
|
-
{"role": "user", "content": "Tell me a joke."}
|
105
|
-
]
|
106
|
-
|
107
|
-
# Generate and print the full response
|
108
|
-
print("Response:", llm.generate(messages))
|
109
|
-
|
110
|
-
# Stream the response chunk by chunk
|
111
|
-
print("Streaming response:")
|
112
|
-
for chunk in llm.generate_stream(messages):
|
113
|
-
print(chunk, end="")
|
114
|
-
```
|
115
|
-
|
116
|
-
---
|
117
|
-
|
118
|
-
### Example 3: Using LangchainLLM
|
119
|
-
|
120
|
-
```python
|
121
|
-
from chatterer import LangchainLLM
|
122
|
-
from openai.types.chat import ChatCompletionMessageParam
|
123
|
-
# Ensure you have a Langchain chat model instance; for example:
|
124
|
-
from langchain_core.language_models.chat_models import BaseChatModel
|
125
|
-
|
126
|
-
client: BaseChatModel = ... # Initialize your Langchain chat model here
|
127
|
-
llm = LangchainLLM(client=client)
|
128
|
-
|
129
|
-
messages: list[ChatCompletionMessageParam] = [
|
130
|
-
{"role": "user", "content": "What is the weather like today?"}
|
131
|
-
]
|
132
|
-
|
133
|
-
# Generate a complete response
|
134
|
-
response = llm.generate(messages)
|
135
|
-
print("Response:", response)
|
136
|
-
|
137
|
-
# Stream the response
|
138
|
-
print("Streaming response:")
|
139
|
-
for chunk in llm.generate_stream(messages):
|
140
|
-
print(chunk, end="")
|
141
|
-
```
|
142
|
-
|
143
|
-
---
|
144
|
-
|
145
|
-
### Example 4: Using Pydantic for Structured Outputs
|
146
|
-
|
147
|
-
```python
|
148
|
-
from pydantic import BaseModel
|
149
|
-
from chatterer import InstructorLLM
|
150
|
-
from openai.types.chat import ChatCompletionMessageParam
|
151
|
-
|
152
|
-
# Define a response model
|
153
|
-
class MyResponse(BaseModel):
|
154
|
-
response: str
|
155
|
-
|
156
|
-
# Initialize the InstructorLLM instance
|
157
|
-
llm = InstructorLLM.openai()
|
158
|
-
|
159
|
-
messages: list[ChatCompletionMessageParam] = [
|
160
|
-
{"role": "user", "content": "Summarize this text."}
|
161
|
-
]
|
162
|
-
|
163
|
-
# Generate a structured response using a Pydantic model
|
164
|
-
structured_response = llm.generate_pydantic(MyResponse, messages)
|
165
|
-
print("Structured Response:", structured_response.response)
|
166
|
-
```
|
167
|
-
|
168
|
-
---
|
169
|
-
|
170
|
-
## API Overview
|
171
|
-
|
172
|
-
### `LLM` (Abstract Base Class)
|
173
|
-
|
174
|
-
- **Methods:**
|
175
|
-
- `generate(messages: Sequence[ChatCompletionMessageParam]) -> str`
|
176
|
-
Generate a complete text response from a list of messages.
|
177
|
-
|
178
|
-
- `generate_stream(messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]`
|
179
|
-
Stream the response incrementally.
|
180
|
-
|
181
|
-
- `generate_pydantic(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> P`
|
182
|
-
Generate and validate the response using a Pydantic model.
|
183
|
-
|
184
|
-
- `generate_pydantic_stream(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> Iterator[P]`
|
185
|
-
(Optional) Stream validated responses as Pydantic models.
|
186
|
-
|
187
|
-
### `InstructorLLM`
|
188
|
-
|
189
|
-
- Factory methods to create instances with various backends:
|
190
|
-
- `openai()`
|
191
|
-
- `anthropic()`
|
192
|
-
- `deepseek()`
|
193
|
-
|
194
|
-
### `OllamaLLM`
|
195
|
-
|
196
|
-
- Supports additional options such as:
|
197
|
-
- `model`, `stream`, `format`, `tools`, `options`, `keep_alive`
|
198
|
-
|
199
|
-
### `LangchainLLM`
|
200
|
-
|
201
|
-
- Integrates with Langchain's BaseChatModel and converts messages to a compatible format.
|
202
|
-
|
203
|
-
---
|
204
|
-
|
205
|
-
## Contributing
|
206
|
-
|
207
|
-
Contributions are highly encouraged! If you find a bug or have a feature request, please open an issue or submit a pull request on the repository. When contributing, please ensure your code adheres to the existing style and passes all tests.
|
208
|
-
|
209
|
-
---
|
210
|
-
|
211
|
-
## License
|
212
|
-
|
213
|
-
This project is licensed under the MIT License.
|
chatterer-0.1.2.dist-info/RECORD
DELETED
@@ -1,10 +0,0 @@
|
|
1
|
-
chatterer/__init__.py,sha256=zi6zoXdpdWL35p-BYAKrsNWcqi_T3RqpflugLy_93mg,370
|
2
|
-
chatterer/llms/__init__.py,sha256=3vUVxkeyBz9xi6nAYg2D7ZtSeuZumTMS5pdJHj3JDpI,435
|
3
|
-
chatterer/llms/base.py,sha256=rdYJgpNeDoR-1Cmuea5rBYTJ8Jmtof3U6rFzcUcnG_Y,1196
|
4
|
-
chatterer/llms/instructor.py,sha256=QP7rbyENN-4m7kNXGnP1r0kb6ioF0ozkEG0qe5BjQnc,3936
|
5
|
-
chatterer/llms/langchain.py,sha256=cuKYWtunCNLNs15P_mlOIbxjvmClrYphSKqAqg3b05k,1723
|
6
|
-
chatterer/llms/ollama.py,sha256=amhcGB-xK5ooDAlFzl0aln76x5e13NRKaUkzZ7rcEm0,2304
|
7
|
-
chatterer-0.1.2.dist-info/METADATA,sha256=FRpuLBBieJbmIHcSM17V4DU3lw92JonmLCYU8gPYxFk,6192
|
8
|
-
chatterer-0.1.2.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
9
|
-
chatterer-0.1.2.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
|
10
|
-
chatterer-0.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|