chatterer 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chatterer-0.1.0/PKG-INFO +211 -0
- chatterer-0.1.0/README.md +198 -0
- chatterer-0.1.0/chatterer/__init__.py +13 -0
- chatterer-0.1.0/chatterer/llms.py +291 -0
- chatterer-0.1.0/chatterer.egg-info/PKG-INFO +211 -0
- chatterer-0.1.0/chatterer.egg-info/SOURCES.txt +9 -0
- chatterer-0.1.0/chatterer.egg-info/dependency_links.txt +1 -0
- chatterer-0.1.0/chatterer.egg-info/requires.txt +7 -0
- chatterer-0.1.0/chatterer.egg-info/top_level.txt +1 -0
- chatterer-0.1.0/pyproject.toml +15 -0
- chatterer-0.1.0/setup.cfg +4 -0
chatterer-0.1.0/PKG-INFO
ADDED
@@ -0,0 +1,211 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: chatterer
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: The highest-level interface for various LLM APIs.
|
5
|
+
Requires-Python: >=3.12
|
6
|
+
Description-Content-Type: text/markdown
|
7
|
+
Requires-Dist: openai>=1.63.2
|
8
|
+
Provides-Extra: all
|
9
|
+
Requires-Dist: anthropic>=0.46.0; extra == "all"
|
10
|
+
Requires-Dist: instructor>=1.7.2; extra == "all"
|
11
|
+
Requires-Dist: langchain-community>=0.3.18; extra == "all"
|
12
|
+
Requires-Dist: ollama>=0.4.7; extra == "all"
|
13
|
+
|
14
|
+
# chatterer
|
15
|
+
|
16
|
+
`chatterer` is a Python library that provides a unified interface for interacting with various Language Model (LLM) backends. It abstracts over different providers such as OpenAI, Anthropic, DeepSeek, Ollama, and Langchain, allowing you to generate completions, stream responses, and even validate outputs using Pydantic models.
|
17
|
+
|
18
|
+
---
|
19
|
+
|
20
|
+
## Features
|
21
|
+
|
22
|
+
- **Unified LLM Interface**
|
23
|
+
Define a common interface (`LLM`) for generating completions and streaming responses regardless of the underlying provider.
|
24
|
+
|
25
|
+
- **Multiple Backend Support**
|
26
|
+
Built-in support for:
|
27
|
+
- **InstructorLLM**: Integrates with OpenAI, Anthropic, and DeepSeek.
|
28
|
+
- **OllamaLLM**: Supports the Ollama model with optional streaming and formatting.
|
29
|
+
- **LangchainLLM**: Leverages Langchain’s chat models with conversion utilities.
|
30
|
+
|
31
|
+
- **Pydantic Integration**
|
32
|
+
Easily validate and structure LLM responses by leveraging Pydantic models with methods like `generate_pydantic` and `generate_pydantic_stream`.
|
33
|
+
|
34
|
+
---
|
35
|
+
|
36
|
+
## Installation
|
37
|
+
|
38
|
+
Assuming `chatterer` is published on PyPI, install it via pip:
|
39
|
+
|
40
|
+
```bash
|
41
|
+
pip install chatterer
|
42
|
+
```
|
43
|
+
|
44
|
+
Alternatively, clone the repository and install manually:
|
45
|
+
|
46
|
+
```bash
|
47
|
+
git clone https://github.com/yourusername/chatterer.git
|
48
|
+
cd chatterer
|
49
|
+
pip install -r requirements.txt
|
50
|
+
```
|
51
|
+
|
52
|
+
---
|
53
|
+
|
54
|
+
## Usage
|
55
|
+
|
56
|
+
### Importing the Library
|
57
|
+
|
58
|
+
You can import the core components directly from `chatterer`:
|
59
|
+
|
60
|
+
```python
|
61
|
+
from chatterer import LLM, InstructorLLM, OllamaLLM, LangchainLLM
|
62
|
+
```
|
63
|
+
|
64
|
+
---
|
65
|
+
|
66
|
+
### Example 1: Using InstructorLLM with OpenAI
|
67
|
+
|
68
|
+
```python
|
69
|
+
from chatterer import InstructorLLM
|
70
|
+
from openai.types.chat import ChatCompletionMessageParam
|
71
|
+
|
72
|
+
# Create an instance for OpenAI using the InstructorLLM wrapper
|
73
|
+
llm = InstructorLLM.openai(call_kwargs={"model": "o3-mini"})
|
74
|
+
|
75
|
+
# Define a conversation message list
|
76
|
+
messages: list[ChatCompletionMessageParam] = [
|
77
|
+
{"role": "user", "content": "Hello, how can I help you?"}
|
78
|
+
]
|
79
|
+
|
80
|
+
# Generate a completion
|
81
|
+
response = llm.generate(messages)
|
82
|
+
print("Response:", response)
|
83
|
+
|
84
|
+
# Stream the response incrementally
|
85
|
+
print("Streaming response:")
|
86
|
+
for chunk in llm.generate_stream(messages):
|
87
|
+
print(chunk, end="")
|
88
|
+
```
|
89
|
+
|
90
|
+
---
|
91
|
+
|
92
|
+
### Example 2: Using OllamaLLM
|
93
|
+
|
94
|
+
```python
|
95
|
+
from chatterer import OllamaLLM
|
96
|
+
from openai.types.chat import ChatCompletionMessageParam
|
97
|
+
|
98
|
+
# Initialize an OllamaLLM instance with streaming enabled
|
99
|
+
llm = OllamaLLM(model="ollama-model", stream=True)
|
100
|
+
|
101
|
+
messages: list[ChatCompletionMessageParam] = [
|
102
|
+
{"role": "user", "content": "Tell me a joke."}
|
103
|
+
]
|
104
|
+
|
105
|
+
# Generate and print the full response
|
106
|
+
print("Response:", llm.generate(messages))
|
107
|
+
|
108
|
+
# Stream the response chunk by chunk
|
109
|
+
print("Streaming response:")
|
110
|
+
for chunk in llm.generate_stream(messages):
|
111
|
+
print(chunk, end="")
|
112
|
+
```
|
113
|
+
|
114
|
+
---
|
115
|
+
|
116
|
+
### Example 3: Using LangchainLLM
|
117
|
+
|
118
|
+
```python
|
119
|
+
from chatterer import LangchainLLM
|
120
|
+
from openai.types.chat import ChatCompletionMessageParam
|
121
|
+
# Ensure you have a Langchain chat model instance; for example:
|
122
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
123
|
+
|
124
|
+
client: BaseChatModel = ... # Initialize your Langchain chat model here
|
125
|
+
llm = LangchainLLM(client=client)
|
126
|
+
|
127
|
+
messages: list[ChatCompletionMessageParam] = [
|
128
|
+
{"role": "user", "content": "What is the weather like today?"}
|
129
|
+
]
|
130
|
+
|
131
|
+
# Generate a complete response
|
132
|
+
response = llm.generate(messages)
|
133
|
+
print("Response:", response)
|
134
|
+
|
135
|
+
# Stream the response
|
136
|
+
print("Streaming response:")
|
137
|
+
for chunk in llm.generate_stream(messages):
|
138
|
+
print(chunk, end="")
|
139
|
+
```
|
140
|
+
|
141
|
+
---
|
142
|
+
|
143
|
+
### Example 4: Using Pydantic for Structured Outputs
|
144
|
+
|
145
|
+
```python
|
146
|
+
from pydantic import BaseModel
|
147
|
+
from chatterer import InstructorLLM
|
148
|
+
from openai.types.chat import ChatCompletionMessageParam
|
149
|
+
|
150
|
+
# Define a response model
|
151
|
+
class MyResponse(BaseModel):
|
152
|
+
response: str
|
153
|
+
|
154
|
+
# Initialize the InstructorLLM instance
|
155
|
+
llm = InstructorLLM.openai()
|
156
|
+
|
157
|
+
messages: list[ChatCompletionMessageParam] = [
|
158
|
+
{"role": "user", "content": "Summarize this text."}
|
159
|
+
]
|
160
|
+
|
161
|
+
# Generate a structured response using a Pydantic model
|
162
|
+
structured_response = llm.generate_pydantic(MyResponse, messages)
|
163
|
+
print("Structured Response:", structured_response.response)
|
164
|
+
```
|
165
|
+
|
166
|
+
---
|
167
|
+
|
168
|
+
## API Overview
|
169
|
+
|
170
|
+
### `LLM` (Abstract Base Class)
|
171
|
+
|
172
|
+
- **Methods:**
|
173
|
+
- `generate(messages: Sequence[ChatCompletionMessageParam]) -> str`
|
174
|
+
Generate a complete text response from a list of messages.
|
175
|
+
|
176
|
+
- `generate_stream(messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]`
|
177
|
+
Stream the response incrementally.
|
178
|
+
|
179
|
+
- `generate_pydantic(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> P`
|
180
|
+
Generate and validate the response using a Pydantic model.
|
181
|
+
|
182
|
+
- `generate_pydantic_stream(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> Iterator[P]`
|
183
|
+
(Optional) Stream validated responses as Pydantic models.
|
184
|
+
|
185
|
+
### `InstructorLLM`
|
186
|
+
|
187
|
+
- Factory methods to create instances with various backends:
|
188
|
+
- `openai()`
|
189
|
+
- `anthropic()`
|
190
|
+
- `deepseek()`
|
191
|
+
|
192
|
+
### `OllamaLLM`
|
193
|
+
|
194
|
+
- Supports additional options such as:
|
195
|
+
- `model`, `stream`, `format`, `tools`, `options`, `keep_alive`
|
196
|
+
|
197
|
+
### `LangchainLLM`
|
198
|
+
|
199
|
+
- Integrates with Langchain's BaseChatModel and converts messages to a compatible format.
|
200
|
+
|
201
|
+
---
|
202
|
+
|
203
|
+
## Contributing
|
204
|
+
|
205
|
+
Contributions are highly encouraged! If you find a bug or have a feature request, please open an issue or submit a pull request on the repository. When contributing, please ensure your code adheres to the existing style and passes all tests.
|
206
|
+
|
207
|
+
---
|
208
|
+
|
209
|
+
## License
|
210
|
+
|
211
|
+
This project is licensed under the MIT License.
|
@@ -0,0 +1,198 @@
|
|
1
|
+
# chatterer
|
2
|
+
|
3
|
+
`chatterer` is a Python library that provides a unified interface for interacting with various Language Model (LLM) backends. It abstracts over different providers such as OpenAI, Anthropic, DeepSeek, Ollama, and Langchain, allowing you to generate completions, stream responses, and even validate outputs using Pydantic models.
|
4
|
+
|
5
|
+
---
|
6
|
+
|
7
|
+
## Features
|
8
|
+
|
9
|
+
- **Unified LLM Interface**
|
10
|
+
Define a common interface (`LLM`) for generating completions and streaming responses regardless of the underlying provider.
|
11
|
+
|
12
|
+
- **Multiple Backend Support**
|
13
|
+
Built-in support for:
|
14
|
+
- **InstructorLLM**: Integrates with OpenAI, Anthropic, and DeepSeek.
|
15
|
+
- **OllamaLLM**: Supports the Ollama model with optional streaming and formatting.
|
16
|
+
- **LangchainLLM**: Leverages Langchain’s chat models with conversion utilities.
|
17
|
+
|
18
|
+
- **Pydantic Integration**
|
19
|
+
Easily validate and structure LLM responses by leveraging Pydantic models with methods like `generate_pydantic` and `generate_pydantic_stream`.
|
20
|
+
|
21
|
+
---
|
22
|
+
|
23
|
+
## Installation
|
24
|
+
|
25
|
+
Assuming `chatterer` is published on PyPI, install it via pip:
|
26
|
+
|
27
|
+
```bash
|
28
|
+
pip install chatterer
|
29
|
+
```
|
30
|
+
|
31
|
+
Alternatively, clone the repository and install manually:
|
32
|
+
|
33
|
+
```bash
|
34
|
+
git clone https://github.com/yourusername/chatterer.git
|
35
|
+
cd chatterer
|
36
|
+
pip install -r requirements.txt
|
37
|
+
```
|
38
|
+
|
39
|
+
---
|
40
|
+
|
41
|
+
## Usage
|
42
|
+
|
43
|
+
### Importing the Library
|
44
|
+
|
45
|
+
You can import the core components directly from `chatterer`:
|
46
|
+
|
47
|
+
```python
|
48
|
+
from chatterer import LLM, InstructorLLM, OllamaLLM, LangchainLLM
|
49
|
+
```
|
50
|
+
|
51
|
+
---
|
52
|
+
|
53
|
+
### Example 1: Using InstructorLLM with OpenAI
|
54
|
+
|
55
|
+
```python
|
56
|
+
from chatterer import InstructorLLM
|
57
|
+
from openai.types.chat import ChatCompletionMessageParam
|
58
|
+
|
59
|
+
# Create an instance for OpenAI using the InstructorLLM wrapper
|
60
|
+
llm = InstructorLLM.openai(call_kwargs={"model": "o3-mini"})
|
61
|
+
|
62
|
+
# Define a conversation message list
|
63
|
+
messages: list[ChatCompletionMessageParam] = [
|
64
|
+
{"role": "user", "content": "Hello, how can I help you?"}
|
65
|
+
]
|
66
|
+
|
67
|
+
# Generate a completion
|
68
|
+
response = llm.generate(messages)
|
69
|
+
print("Response:", response)
|
70
|
+
|
71
|
+
# Stream the response incrementally
|
72
|
+
print("Streaming response:")
|
73
|
+
for chunk in llm.generate_stream(messages):
|
74
|
+
print(chunk, end="")
|
75
|
+
```
|
76
|
+
|
77
|
+
---
|
78
|
+
|
79
|
+
### Example 2: Using OllamaLLM
|
80
|
+
|
81
|
+
```python
|
82
|
+
from chatterer import OllamaLLM
|
83
|
+
from openai.types.chat import ChatCompletionMessageParam
|
84
|
+
|
85
|
+
# Initialize an OllamaLLM instance with streaming enabled
|
86
|
+
llm = OllamaLLM(model="ollama-model", stream=True)
|
87
|
+
|
88
|
+
messages: list[ChatCompletionMessageParam] = [
|
89
|
+
{"role": "user", "content": "Tell me a joke."}
|
90
|
+
]
|
91
|
+
|
92
|
+
# Generate and print the full response
|
93
|
+
print("Response:", llm.generate(messages))
|
94
|
+
|
95
|
+
# Stream the response chunk by chunk
|
96
|
+
print("Streaming response:")
|
97
|
+
for chunk in llm.generate_stream(messages):
|
98
|
+
print(chunk, end="")
|
99
|
+
```
|
100
|
+
|
101
|
+
---
|
102
|
+
|
103
|
+
### Example 3: Using LangchainLLM
|
104
|
+
|
105
|
+
```python
|
106
|
+
from chatterer import LangchainLLM
|
107
|
+
from openai.types.chat import ChatCompletionMessageParam
|
108
|
+
# Ensure you have a Langchain chat model instance; for example:
|
109
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
110
|
+
|
111
|
+
client: BaseChatModel = ... # Initialize your Langchain chat model here
|
112
|
+
llm = LangchainLLM(client=client)
|
113
|
+
|
114
|
+
messages: list[ChatCompletionMessageParam] = [
|
115
|
+
{"role": "user", "content": "What is the weather like today?"}
|
116
|
+
]
|
117
|
+
|
118
|
+
# Generate a complete response
|
119
|
+
response = llm.generate(messages)
|
120
|
+
print("Response:", response)
|
121
|
+
|
122
|
+
# Stream the response
|
123
|
+
print("Streaming response:")
|
124
|
+
for chunk in llm.generate_stream(messages):
|
125
|
+
print(chunk, end="")
|
126
|
+
```
|
127
|
+
|
128
|
+
---
|
129
|
+
|
130
|
+
### Example 4: Using Pydantic for Structured Outputs
|
131
|
+
|
132
|
+
```python
|
133
|
+
from pydantic import BaseModel
|
134
|
+
from chatterer import InstructorLLM
|
135
|
+
from openai.types.chat import ChatCompletionMessageParam
|
136
|
+
|
137
|
+
# Define a response model
|
138
|
+
class MyResponse(BaseModel):
|
139
|
+
response: str
|
140
|
+
|
141
|
+
# Initialize the InstructorLLM instance
|
142
|
+
llm = InstructorLLM.openai()
|
143
|
+
|
144
|
+
messages: list[ChatCompletionMessageParam] = [
|
145
|
+
{"role": "user", "content": "Summarize this text."}
|
146
|
+
]
|
147
|
+
|
148
|
+
# Generate a structured response using a Pydantic model
|
149
|
+
structured_response = llm.generate_pydantic(MyResponse, messages)
|
150
|
+
print("Structured Response:", structured_response.response)
|
151
|
+
```
|
152
|
+
|
153
|
+
---
|
154
|
+
|
155
|
+
## API Overview
|
156
|
+
|
157
|
+
### `LLM` (Abstract Base Class)
|
158
|
+
|
159
|
+
- **Methods:**
|
160
|
+
- `generate(messages: Sequence[ChatCompletionMessageParam]) -> str`
|
161
|
+
Generate a complete text response from a list of messages.
|
162
|
+
|
163
|
+
- `generate_stream(messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]`
|
164
|
+
Stream the response incrementally.
|
165
|
+
|
166
|
+
- `generate_pydantic(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> P`
|
167
|
+
Generate and validate the response using a Pydantic model.
|
168
|
+
|
169
|
+
- `generate_pydantic_stream(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> Iterator[P]`
|
170
|
+
(Optional) Stream validated responses as Pydantic models.
|
171
|
+
|
172
|
+
### `InstructorLLM`
|
173
|
+
|
174
|
+
- Factory methods to create instances with various backends:
|
175
|
+
- `openai()`
|
176
|
+
- `anthropic()`
|
177
|
+
- `deepseek()`
|
178
|
+
|
179
|
+
### `OllamaLLM`
|
180
|
+
|
181
|
+
- Supports additional options such as:
|
182
|
+
- `model`, `stream`, `format`, `tools`, `options`, `keep_alive`
|
183
|
+
|
184
|
+
### `LangchainLLM`
|
185
|
+
|
186
|
+
- Integrates with Langchain's BaseChatModel and converts messages to a compatible format.
|
187
|
+
|
188
|
+
---
|
189
|
+
|
190
|
+
## Contributing
|
191
|
+
|
192
|
+
Contributions are highly encouraged! If you find a bug or have a feature request, please open an issue or submit a pull request on the repository. When contributing, please ensure your code adheres to the existing style and passes all tests.
|
193
|
+
|
194
|
+
---
|
195
|
+
|
196
|
+
## License
|
197
|
+
|
198
|
+
This project is licensed under the MIT License.
|
@@ -0,0 +1,291 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from dataclasses import dataclass
|
3
|
+
from os import environ
|
4
|
+
from typing import (
|
5
|
+
TYPE_CHECKING,
|
6
|
+
Any,
|
7
|
+
Callable,
|
8
|
+
Iterator,
|
9
|
+
Literal,
|
10
|
+
Mapping,
|
11
|
+
Optional,
|
12
|
+
Self,
|
13
|
+
Sequence,
|
14
|
+
Type,
|
15
|
+
TypeVar,
|
16
|
+
)
|
17
|
+
|
18
|
+
from openai import OpenAI
|
19
|
+
from openai.types.chat import ChatCompletionMessageParam
|
20
|
+
from pydantic import BaseModel, create_model
|
21
|
+
from pydantic.json_schema import JsonSchemaValue
|
22
|
+
|
23
|
+
if TYPE_CHECKING:
|
24
|
+
from instructor import Instructor
|
25
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
26
|
+
from ollama import Options, Tool
|
27
|
+
|
28
|
+
P = TypeVar("P", bound=BaseModel)
|
29
|
+
|
30
|
+
|
31
|
+
@dataclass
|
32
|
+
class LLM(ABC):
|
33
|
+
call_kwargs: dict[str, Any]
|
34
|
+
|
35
|
+
@abstractmethod
|
36
|
+
def generate(
|
37
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
38
|
+
) -> str: ...
|
39
|
+
|
40
|
+
@abstractmethod
|
41
|
+
def generate_stream(
|
42
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
43
|
+
) -> Iterator[str]: ...
|
44
|
+
|
45
|
+
@abstractmethod
|
46
|
+
def generate_pydantic(
|
47
|
+
self,
|
48
|
+
response_model: Type[P],
|
49
|
+
messages: Sequence[ChatCompletionMessageParam],
|
50
|
+
) -> P: ...
|
51
|
+
|
52
|
+
def generate_pydantic_stream(
|
53
|
+
self,
|
54
|
+
response_model: Type[P],
|
55
|
+
messages: Sequence[ChatCompletionMessageParam],
|
56
|
+
) -> Iterator[P]:
|
57
|
+
raise NotImplementedError
|
58
|
+
|
59
|
+
|
60
|
+
@dataclass
|
61
|
+
class InstructorLLM(LLM):
|
62
|
+
inst: "Instructor"
|
63
|
+
|
64
|
+
@property
|
65
|
+
def dependency(self) -> list[str]:
|
66
|
+
return ["instructor"]
|
67
|
+
|
68
|
+
@classmethod
|
69
|
+
def openai(
|
70
|
+
cls, call_kwargs: dict[str, Any] = {"model": "o3-mini"}
|
71
|
+
) -> Self:
|
72
|
+
from instructor import Mode, from_openai
|
73
|
+
|
74
|
+
return cls(
|
75
|
+
inst=from_openai(OpenAI(), Mode.TOOLS_STRICT),
|
76
|
+
call_kwargs=call_kwargs,
|
77
|
+
)
|
78
|
+
|
79
|
+
@classmethod
|
80
|
+
def anthropic(
|
81
|
+
cls,
|
82
|
+
call_kwargs: dict[str, Any] = {
|
83
|
+
"temperature": 0.7,
|
84
|
+
"max_tokens": 8192,
|
85
|
+
"model": "claude-3-5-sonnet-20241022",
|
86
|
+
},
|
87
|
+
) -> Self:
|
88
|
+
|
89
|
+
from anthropic import Anthropic
|
90
|
+
from instructor import Mode, from_anthropic
|
91
|
+
|
92
|
+
return cls(
|
93
|
+
inst=from_anthropic(
|
94
|
+
client=Anthropic(), mode=Mode.ANTHROPIC_TOOLS
|
95
|
+
),
|
96
|
+
call_kwargs=call_kwargs,
|
97
|
+
)
|
98
|
+
|
99
|
+
@classmethod
|
100
|
+
def deepseek(
|
101
|
+
cls, call_kwargs: dict[str, Any] = {"model": "deepseek-chat"}
|
102
|
+
) -> Self:
|
103
|
+
|
104
|
+
from instructor import Mode, from_openai
|
105
|
+
|
106
|
+
return cls(
|
107
|
+
inst=from_openai(
|
108
|
+
OpenAI(
|
109
|
+
base_url="https://api.deepseek.com/v1",
|
110
|
+
api_key=environ["DEEPSEEK_API_KEY"],
|
111
|
+
),
|
112
|
+
Mode.TOOLS_STRICT,
|
113
|
+
),
|
114
|
+
call_kwargs=call_kwargs,
|
115
|
+
)
|
116
|
+
|
117
|
+
def generate(
|
118
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
119
|
+
) -> str:
|
120
|
+
res = self.inst.chat.completions.create(
|
121
|
+
response_model=create_model(
|
122
|
+
"Response",
|
123
|
+
response=(str, ...),
|
124
|
+
),
|
125
|
+
messages=list(messages),
|
126
|
+
**self.call_kwargs,
|
127
|
+
)
|
128
|
+
return str(getattr(res, "response", "") or "")
|
129
|
+
|
130
|
+
def generate_stream(
|
131
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
132
|
+
) -> Iterator[str]:
|
133
|
+
last_content: str = ""
|
134
|
+
for res in self.inst.chat.completions.create_partial(
|
135
|
+
response_model=create_model(
|
136
|
+
"Response",
|
137
|
+
response=(str, ...),
|
138
|
+
),
|
139
|
+
messages=list(messages),
|
140
|
+
**self.call_kwargs,
|
141
|
+
):
|
142
|
+
content = str(getattr(res, "response", "") or "")
|
143
|
+
delta: str = content.removeprefix(last_content)
|
144
|
+
if not delta:
|
145
|
+
continue
|
146
|
+
last_content = content
|
147
|
+
yield delta
|
148
|
+
|
149
|
+
def generate_pydantic(
|
150
|
+
self,
|
151
|
+
response_model: Type[P],
|
152
|
+
messages: Sequence[ChatCompletionMessageParam],
|
153
|
+
) -> P:
|
154
|
+
return self.inst.chat.completions.create(
|
155
|
+
response_model=response_model,
|
156
|
+
messages=list(messages),
|
157
|
+
**self.call_kwargs,
|
158
|
+
)
|
159
|
+
|
160
|
+
def generate_pydantic_stream(
|
161
|
+
self,
|
162
|
+
response_model: Type[P],
|
163
|
+
messages: Sequence[ChatCompletionMessageParam],
|
164
|
+
) -> Iterator[P]:
|
165
|
+
for res in self.inst.chat.completions.create_partial(
|
166
|
+
response_model=response_model,
|
167
|
+
messages=list(messages),
|
168
|
+
**self.call_kwargs,
|
169
|
+
):
|
170
|
+
yield res
|
171
|
+
|
172
|
+
|
173
|
+
@dataclass
|
174
|
+
class OllamaLLM(LLM):
|
175
|
+
model: str
|
176
|
+
tools: Optional[Sequence[Mapping[str, Any] | "Tool" | Callable]] = None
|
177
|
+
stream: bool = False
|
178
|
+
format: Optional[Literal["", "json"] | JsonSchemaValue] = None
|
179
|
+
options: Optional[Mapping[str, Any] | "Options"] = None
|
180
|
+
keep_alive: Optional[float | str] = None
|
181
|
+
|
182
|
+
def generate(
|
183
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
184
|
+
) -> str:
|
185
|
+
return "".join(self.generate_stream(messages))
|
186
|
+
|
187
|
+
def generate_stream(
|
188
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
189
|
+
) -> Iterator[str]:
|
190
|
+
from ollama import chat
|
191
|
+
|
192
|
+
model = str(self.call_kwargs.get("model", self.model))
|
193
|
+
format = self.call_kwargs.get("format", self.format)
|
194
|
+
options = self.call_kwargs.get("options", self.options)
|
195
|
+
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
196
|
+
tools = self.call_kwargs.get("tools", self.tools)
|
197
|
+
return (
|
198
|
+
res.message.content or ""
|
199
|
+
for res in chat(
|
200
|
+
model=model,
|
201
|
+
messages=messages,
|
202
|
+
tools=tools,
|
203
|
+
stream=True,
|
204
|
+
format=format,
|
205
|
+
options=options,
|
206
|
+
keep_alive=keep_alive,
|
207
|
+
)
|
208
|
+
)
|
209
|
+
|
210
|
+
def generate_pydantic(
|
211
|
+
self,
|
212
|
+
response_model: Type[P],
|
213
|
+
messages: Sequence[ChatCompletionMessageParam],
|
214
|
+
) -> P:
|
215
|
+
from ollama import chat
|
216
|
+
|
217
|
+
model = str(self.call_kwargs.get("model", self.model))
|
218
|
+
format = response_model.model_json_schema()
|
219
|
+
options = self.call_kwargs.get("options", self.options)
|
220
|
+
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
221
|
+
return response_model.model_validate_json(
|
222
|
+
chat(
|
223
|
+
model=model,
|
224
|
+
messages=messages,
|
225
|
+
tools=None,
|
226
|
+
stream=False,
|
227
|
+
format=format,
|
228
|
+
options=options,
|
229
|
+
keep_alive=keep_alive,
|
230
|
+
).message.content
|
231
|
+
or ""
|
232
|
+
)
|
233
|
+
|
234
|
+
|
235
|
+
@dataclass
|
236
|
+
class LangchainLLM(LLM):
|
237
|
+
client: "BaseChatModel"
|
238
|
+
|
239
|
+
def generate(
|
240
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
241
|
+
) -> str:
|
242
|
+
from langchain_community.adapters.openai import (
|
243
|
+
convert_openai_messages,
|
244
|
+
)
|
245
|
+
|
246
|
+
content = self.client.invoke(
|
247
|
+
convert_openai_messages([dict(msg) for msg in messages])
|
248
|
+
).content
|
249
|
+
if isinstance(content, str):
|
250
|
+
return content
|
251
|
+
else:
|
252
|
+
return "".join(part for part in content if isinstance(part, str))
|
253
|
+
|
254
|
+
def generate_stream(
|
255
|
+
self, messages: Sequence[ChatCompletionMessageParam]
|
256
|
+
) -> Iterator[str]:
|
257
|
+
from langchain_community.adapters.openai import (
|
258
|
+
convert_openai_messages,
|
259
|
+
)
|
260
|
+
|
261
|
+
for chunk in self.client.stream(
|
262
|
+
convert_openai_messages([dict(msg) for msg in messages])
|
263
|
+
):
|
264
|
+
content = chunk.content
|
265
|
+
if isinstance(content, str):
|
266
|
+
yield content
|
267
|
+
elif isinstance(content, list):
|
268
|
+
for part in content:
|
269
|
+
if isinstance(part, str):
|
270
|
+
yield part
|
271
|
+
else:
|
272
|
+
continue
|
273
|
+
else:
|
274
|
+
continue
|
275
|
+
|
276
|
+
def generate_pydantic(
|
277
|
+
self,
|
278
|
+
response_model: Type[P],
|
279
|
+
messages: Sequence[ChatCompletionMessageParam],
|
280
|
+
) -> P:
|
281
|
+
from langchain_community.adapters.openai import (
|
282
|
+
convert_openai_messages,
|
283
|
+
)
|
284
|
+
|
285
|
+
result = self.client.with_structured_output(response_model).invoke(
|
286
|
+
convert_openai_messages([dict(msg) for msg in messages])
|
287
|
+
)
|
288
|
+
if isinstance(result, response_model):
|
289
|
+
return result
|
290
|
+
else:
|
291
|
+
return response_model.model_validate(result)
|
@@ -0,0 +1,211 @@
|
|
1
|
+
Metadata-Version: 2.2
|
2
|
+
Name: chatterer
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: The highest-level interface for various LLM APIs.
|
5
|
+
Requires-Python: >=3.12
|
6
|
+
Description-Content-Type: text/markdown
|
7
|
+
Requires-Dist: openai>=1.63.2
|
8
|
+
Provides-Extra: all
|
9
|
+
Requires-Dist: anthropic>=0.46.0; extra == "all"
|
10
|
+
Requires-Dist: instructor>=1.7.2; extra == "all"
|
11
|
+
Requires-Dist: langchain-community>=0.3.18; extra == "all"
|
12
|
+
Requires-Dist: ollama>=0.4.7; extra == "all"
|
13
|
+
|
14
|
+
# chatterer
|
15
|
+
|
16
|
+
`chatterer` is a Python library that provides a unified interface for interacting with various Language Model (LLM) backends. It abstracts over different providers such as OpenAI, Anthropic, DeepSeek, Ollama, and Langchain, allowing you to generate completions, stream responses, and even validate outputs using Pydantic models.
|
17
|
+
|
18
|
+
---
|
19
|
+
|
20
|
+
## Features
|
21
|
+
|
22
|
+
- **Unified LLM Interface**
|
23
|
+
Define a common interface (`LLM`) for generating completions and streaming responses regardless of the underlying provider.
|
24
|
+
|
25
|
+
- **Multiple Backend Support**
|
26
|
+
Built-in support for:
|
27
|
+
- **InstructorLLM**: Integrates with OpenAI, Anthropic, and DeepSeek.
|
28
|
+
- **OllamaLLM**: Supports the Ollama model with optional streaming and formatting.
|
29
|
+
- **LangchainLLM**: Leverages Langchain’s chat models with conversion utilities.
|
30
|
+
|
31
|
+
- **Pydantic Integration**
|
32
|
+
Easily validate and structure LLM responses by leveraging Pydantic models with methods like `generate_pydantic` and `generate_pydantic_stream`.
|
33
|
+
|
34
|
+
---
|
35
|
+
|
36
|
+
## Installation
|
37
|
+
|
38
|
+
Assuming `chatterer` is published on PyPI, install it via pip:
|
39
|
+
|
40
|
+
```bash
|
41
|
+
pip install chatterer
|
42
|
+
```
|
43
|
+
|
44
|
+
Alternatively, clone the repository and install manually:
|
45
|
+
|
46
|
+
```bash
|
47
|
+
git clone https://github.com/yourusername/chatterer.git
|
48
|
+
cd chatterer
|
49
|
+
pip install -r requirements.txt
|
50
|
+
```
|
51
|
+
|
52
|
+
---
|
53
|
+
|
54
|
+
## Usage
|
55
|
+
|
56
|
+
### Importing the Library
|
57
|
+
|
58
|
+
You can import the core components directly from `chatterer`:
|
59
|
+
|
60
|
+
```python
|
61
|
+
from chatterer import LLM, InstructorLLM, OllamaLLM, LangchainLLM
|
62
|
+
```
|
63
|
+
|
64
|
+
---
|
65
|
+
|
66
|
+
### Example 1: Using InstructorLLM with OpenAI
|
67
|
+
|
68
|
+
```python
|
69
|
+
from chatterer import InstructorLLM
|
70
|
+
from openai.types.chat import ChatCompletionMessageParam
|
71
|
+
|
72
|
+
# Create an instance for OpenAI using the InstructorLLM wrapper
|
73
|
+
llm = InstructorLLM.openai(call_kwargs={"model": "o3-mini"})
|
74
|
+
|
75
|
+
# Define a conversation message list
|
76
|
+
messages: list[ChatCompletionMessageParam] = [
|
77
|
+
{"role": "user", "content": "Hello, how can I help you?"}
|
78
|
+
]
|
79
|
+
|
80
|
+
# Generate a completion
|
81
|
+
response = llm.generate(messages)
|
82
|
+
print("Response:", response)
|
83
|
+
|
84
|
+
# Stream the response incrementally
|
85
|
+
print("Streaming response:")
|
86
|
+
for chunk in llm.generate_stream(messages):
|
87
|
+
print(chunk, end="")
|
88
|
+
```
|
89
|
+
|
90
|
+
---
|
91
|
+
|
92
|
+
### Example 2: Using OllamaLLM
|
93
|
+
|
94
|
+
```python
|
95
|
+
from chatterer import OllamaLLM
|
96
|
+
from openai.types.chat import ChatCompletionMessageParam
|
97
|
+
|
98
|
+
# Initialize an OllamaLLM instance with streaming enabled
|
99
|
+
llm = OllamaLLM(model="ollama-model", stream=True)
|
100
|
+
|
101
|
+
messages: list[ChatCompletionMessageParam] = [
|
102
|
+
{"role": "user", "content": "Tell me a joke."}
|
103
|
+
]
|
104
|
+
|
105
|
+
# Generate and print the full response
|
106
|
+
print("Response:", llm.generate(messages))
|
107
|
+
|
108
|
+
# Stream the response chunk by chunk
|
109
|
+
print("Streaming response:")
|
110
|
+
for chunk in llm.generate_stream(messages):
|
111
|
+
print(chunk, end="")
|
112
|
+
```
|
113
|
+
|
114
|
+
---
|
115
|
+
|
116
|
+
### Example 3: Using LangchainLLM
|
117
|
+
|
118
|
+
```python
|
119
|
+
from chatterer import LangchainLLM
|
120
|
+
from openai.types.chat import ChatCompletionMessageParam
|
121
|
+
# Ensure you have a Langchain chat model instance; for example:
|
122
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
123
|
+
|
124
|
+
client: BaseChatModel = ... # Initialize your Langchain chat model here
|
125
|
+
llm = LangchainLLM(client=client)
|
126
|
+
|
127
|
+
messages: list[ChatCompletionMessageParam] = [
|
128
|
+
{"role": "user", "content": "What is the weather like today?"}
|
129
|
+
]
|
130
|
+
|
131
|
+
# Generate a complete response
|
132
|
+
response = llm.generate(messages)
|
133
|
+
print("Response:", response)
|
134
|
+
|
135
|
+
# Stream the response
|
136
|
+
print("Streaming response:")
|
137
|
+
for chunk in llm.generate_stream(messages):
|
138
|
+
print(chunk, end="")
|
139
|
+
```
|
140
|
+
|
141
|
+
---
|
142
|
+
|
143
|
+
### Example 4: Using Pydantic for Structured Outputs
|
144
|
+
|
145
|
+
```python
|
146
|
+
from pydantic import BaseModel
|
147
|
+
from chatterer import InstructorLLM
|
148
|
+
from openai.types.chat import ChatCompletionMessageParam
|
149
|
+
|
150
|
+
# Define a response model
|
151
|
+
class MyResponse(BaseModel):
|
152
|
+
response: str
|
153
|
+
|
154
|
+
# Initialize the InstructorLLM instance
|
155
|
+
llm = InstructorLLM.openai()
|
156
|
+
|
157
|
+
messages: list[ChatCompletionMessageParam] = [
|
158
|
+
{"role": "user", "content": "Summarize this text."}
|
159
|
+
]
|
160
|
+
|
161
|
+
# Generate a structured response using a Pydantic model
|
162
|
+
structured_response = llm.generate_pydantic(MyResponse, messages)
|
163
|
+
print("Structured Response:", structured_response.response)
|
164
|
+
```
|
165
|
+
|
166
|
+
---
|
167
|
+
|
168
|
+
## API Overview
|
169
|
+
|
170
|
+
### `LLM` (Abstract Base Class)
|
171
|
+
|
172
|
+
- **Methods:**
|
173
|
+
- `generate(messages: Sequence[ChatCompletionMessageParam]) -> str`
|
174
|
+
Generate a complete text response from a list of messages.
|
175
|
+
|
176
|
+
- `generate_stream(messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]`
|
177
|
+
Stream the response incrementally.
|
178
|
+
|
179
|
+
- `generate_pydantic(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> P`
|
180
|
+
Generate and validate the response using a Pydantic model.
|
181
|
+
|
182
|
+
- `generate_pydantic_stream(response_model: Type[P], messages: Sequence[ChatCompletionMessageParam]) -> Iterator[P]`
|
183
|
+
(Optional) Stream validated responses as Pydantic models.
|
184
|
+
|
185
|
+
### `InstructorLLM`
|
186
|
+
|
187
|
+
- Factory methods to create instances with various backends:
|
188
|
+
- `openai()`
|
189
|
+
- `anthropic()`
|
190
|
+
- `deepseek()`
|
191
|
+
|
192
|
+
### `OllamaLLM`
|
193
|
+
|
194
|
+
- Supports additional options such as:
|
195
|
+
- `model`, `stream`, `format`, `tools`, `options`, `keep_alive`
|
196
|
+
|
197
|
+
### `LangchainLLM`
|
198
|
+
|
199
|
+
- Integrates with Langchain's BaseChatModel and converts messages to a compatible format.
|
200
|
+
|
201
|
+
---
|
202
|
+
|
203
|
+
## Contributing
|
204
|
+
|
205
|
+
Contributions are highly encouraged! If you find a bug or have a feature request, please open an issue or submit a pull request on the repository. When contributing, please ensure your code adheres to the existing style and passes all tests.
|
206
|
+
|
207
|
+
---
|
208
|
+
|
209
|
+
## License
|
210
|
+
|
211
|
+
This project is licensed under the MIT License.
|
@@ -0,0 +1 @@
|
|
1
|
+
|
@@ -0,0 +1 @@
|
|
1
|
+
chatterer
|
@@ -0,0 +1,15 @@
|
|
1
|
+
[project]
|
2
|
+
name = "chatterer"
|
3
|
+
version = "0.1.0"
|
4
|
+
description = "The highest-level interface for various LLM APIs."
|
5
|
+
readme = "README.md"
|
6
|
+
requires-python = ">=3.12"
|
7
|
+
dependencies = ["openai>=1.63.2"]
|
8
|
+
|
9
|
+
[project.optional-dependencies]
|
10
|
+
all = [
|
11
|
+
"anthropic>=0.46.0",
|
12
|
+
"instructor>=1.7.2",
|
13
|
+
"langchain-community>=0.3.18",
|
14
|
+
"ollama>=0.4.7",
|
15
|
+
]
|