chatterer 0.1.3__tar.gz → 0.1.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {chatterer-0.1.3 → chatterer-0.1.5}/PKG-INFO +1 -1
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer/__init__.py +1 -2
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer/language_model.py +45 -27
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer/strategies/atom_of_thoughts.py +3 -3
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer.egg-info/PKG-INFO +1 -1
- {chatterer-0.1.3 → chatterer-0.1.5}/pyproject.toml +1 -1
- {chatterer-0.1.3 → chatterer-0.1.5}/README.md +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer/strategies/__init__.py +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer/strategies/base.py +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer.egg-info/SOURCES.txt +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer.egg-info/dependency_links.txt +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer.egg-info/requires.txt +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/chatterer.egg-info/top_level.txt +0 -0
- {chatterer-0.1.3 → chatterer-0.1.5}/setup.cfg +0 -0
@@ -1,4 +1,4 @@
|
|
1
|
-
from .language_model import Chatterer
|
1
|
+
from .language_model import Chatterer
|
2
2
|
from .strategies import (
|
3
3
|
AoTPipeline,
|
4
4
|
AoTStrategy,
|
@@ -12,7 +12,6 @@ from .strategies import (
|
|
12
12
|
__all__ = [
|
13
13
|
"BaseStrategy",
|
14
14
|
"Chatterer",
|
15
|
-
"InvokeKwargs",
|
16
15
|
"AoTStrategy",
|
17
16
|
"AoTPipeline",
|
18
17
|
"BaseAoTPrompter",
|
@@ -2,62 +2,80 @@ from typing import (
|
|
2
2
|
Any,
|
3
3
|
AsyncIterator,
|
4
4
|
Iterator,
|
5
|
-
NotRequired,
|
6
5
|
Optional,
|
7
6
|
Self,
|
8
7
|
Type,
|
9
8
|
TypeAlias,
|
10
|
-
TypedDict,
|
11
9
|
TypeVar,
|
12
10
|
)
|
13
11
|
|
14
12
|
from langchain_core.language_models.base import LanguageModelInput
|
15
13
|
from langchain_core.language_models.chat_models import BaseChatModel
|
16
14
|
from langchain_core.runnables.config import RunnableConfig
|
17
|
-
from pydantic import BaseModel
|
15
|
+
from pydantic import BaseModel, Field
|
18
16
|
|
19
17
|
PydanticModelT = TypeVar("PydanticModelT", bound=BaseModel)
|
20
18
|
ContentType: TypeAlias = str | list[str | dict[str, Any]]
|
21
19
|
StructuredOutputType: TypeAlias = dict[str, Any] | BaseModel
|
22
20
|
|
23
21
|
|
24
|
-
class InvokeKwargs(TypedDict):
|
25
|
-
config: NotRequired[RunnableConfig]
|
26
|
-
stop: NotRequired[list[str]]
|
27
|
-
kwargs: NotRequired[dict[str, Any]]
|
28
|
-
|
29
|
-
|
30
22
|
class Chatterer(BaseModel):
|
31
23
|
"""Language model for generating text from a given input."""
|
32
24
|
|
33
25
|
client: BaseChatModel
|
26
|
+
structured_output_kwargs: dict[str, Any] = Field(default_factory=dict)
|
34
27
|
|
35
28
|
def __call__(self, messages: LanguageModelInput) -> str:
|
36
29
|
return self.generate(messages)
|
37
30
|
|
38
31
|
@classmethod
|
39
|
-
def openai(
|
32
|
+
def openai(
|
33
|
+
cls,
|
34
|
+
model: str = "gpt-4o-mini",
|
35
|
+
structured_output_kwargs: Optional[dict[str, Any]] = {"strict": True},
|
36
|
+
) -> Self:
|
40
37
|
from langchain_openai import ChatOpenAI
|
41
38
|
|
42
|
-
return cls(client=ChatOpenAI(
|
39
|
+
return cls(client=ChatOpenAI(model=model), structured_output_kwargs=structured_output_kwargs or {})
|
43
40
|
|
44
41
|
@classmethod
|
45
|
-
def anthropic(
|
42
|
+
def anthropic(
|
43
|
+
cls,
|
44
|
+
model_name: str = "claude-3-7-sonnet-20250219",
|
45
|
+
structured_output_kwargs: Optional[dict[str, Any]] = None,
|
46
|
+
) -> Self:
|
46
47
|
from langchain_anthropic import ChatAnthropic
|
47
48
|
|
48
|
-
return cls(
|
49
|
+
return cls(
|
50
|
+
client=ChatAnthropic(model_name=model_name, timeout=None, stop=None),
|
51
|
+
structured_output_kwargs=structured_output_kwargs or {},
|
52
|
+
)
|
49
53
|
|
50
54
|
@classmethod
|
51
|
-
def google(
|
55
|
+
def google(
|
56
|
+
cls,
|
57
|
+
model: str = "gemini-2.0-flash",
|
58
|
+
structured_output_kwargs: Optional[dict[str, Any]] = None,
|
59
|
+
) -> Self:
|
52
60
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
53
61
|
|
54
|
-
return cls(
|
62
|
+
return cls(
|
63
|
+
client=ChatGoogleGenerativeAI(model=model),
|
64
|
+
structured_output_kwargs=structured_output_kwargs or {},
|
65
|
+
)
|
55
66
|
|
56
67
|
@classmethod
|
57
|
-
def ollama(
|
68
|
+
def ollama(
|
69
|
+
cls,
|
70
|
+
model: str = "deepseek-r1:1.5b",
|
71
|
+
structured_output_kwargs: Optional[dict[str, Any]] = None,
|
72
|
+
) -> Self:
|
58
73
|
from langchain_ollama import ChatOllama
|
59
74
|
|
60
|
-
return cls(
|
75
|
+
return cls(
|
76
|
+
client=ChatOllama(model=model),
|
77
|
+
structured_output_kwargs=structured_output_kwargs or {},
|
78
|
+
)
|
61
79
|
|
62
80
|
def generate(
|
63
81
|
self,
|
@@ -133,9 +151,9 @@ class Chatterer(BaseModel):
|
|
133
151
|
stop: Optional[list[str]] = None,
|
134
152
|
**kwargs: Any,
|
135
153
|
) -> PydanticModelT:
|
136
|
-
result: StructuredOutputType = self.client.with_structured_output(
|
137
|
-
|
138
|
-
)
|
154
|
+
result: StructuredOutputType = self.client.with_structured_output(
|
155
|
+
response_model, **self.structured_output_kwargs
|
156
|
+
).invoke(input=messages, config=config, stop=stop, **kwargs)
|
139
157
|
if isinstance(result, response_model):
|
140
158
|
return result
|
141
159
|
else:
|
@@ -149,9 +167,9 @@ class Chatterer(BaseModel):
|
|
149
167
|
stop: Optional[list[str]] = None,
|
150
168
|
**kwargs: Any,
|
151
169
|
) -> PydanticModelT:
|
152
|
-
result: StructuredOutputType = await self.client.with_structured_output(
|
153
|
-
|
154
|
-
)
|
170
|
+
result: StructuredOutputType = await self.client.with_structured_output(
|
171
|
+
response_model, **self.structured_output_kwargs
|
172
|
+
).ainvoke(input=messages, config=config, stop=stop, **kwargs)
|
155
173
|
if isinstance(result, response_model):
|
156
174
|
return result
|
157
175
|
else:
|
@@ -171,7 +189,7 @@ class Chatterer(BaseModel):
|
|
171
189
|
raise ImportError("Please install `instructor` with `pip install instructor` to use this feature.")
|
172
190
|
|
173
191
|
partial_response_model = instructor.Partial[response_model]
|
174
|
-
for chunk in self.client.with_structured_output(partial_response_model).stream(
|
192
|
+
for chunk in self.client.with_structured_output(partial_response_model, **self.structured_output_kwargs).stream(
|
175
193
|
input=messages, config=config, stop=stop, **kwargs
|
176
194
|
):
|
177
195
|
yield response_model.model_validate(chunk)
|
@@ -190,9 +208,9 @@ class Chatterer(BaseModel):
|
|
190
208
|
raise ImportError("Please install `instructor` with `pip install instructor` to use this feature.")
|
191
209
|
|
192
210
|
partial_response_model = instructor.Partial[response_model]
|
193
|
-
async for chunk in self.client.with_structured_output(
|
194
|
-
|
195
|
-
):
|
211
|
+
async for chunk in self.client.with_structured_output(
|
212
|
+
partial_response_model, **self.structured_output_kwargs
|
213
|
+
).astream(input=messages, config=config, stop=stop, **kwargs):
|
196
214
|
yield response_model.model_validate(chunk)
|
197
215
|
|
198
216
|
|
@@ -34,7 +34,7 @@ class SubQuestionNode(BaseModel):
|
|
34
34
|
|
35
35
|
question: str = Field(description="A sub-question string that arises from decomposition.")
|
36
36
|
answer: Optional[str] = Field(description="Answer for this sub-question, if resolved.")
|
37
|
-
depend: list[int] = Field(
|
37
|
+
depend: list[int] = Field(description="Indices of sub-questions that this node depends on.")
|
38
38
|
|
39
39
|
|
40
40
|
class RecursiveDecomposeResponse(BaseModel):
|
@@ -42,7 +42,7 @@ class RecursiveDecomposeResponse(BaseModel):
|
|
42
42
|
|
43
43
|
thought: str = Field(description="Reasoning about decomposition.")
|
44
44
|
final_answer: str = Field(description="Best answer to the main question.")
|
45
|
-
sub_questions: list[SubQuestionNode] = Field(
|
45
|
+
sub_questions: list[SubQuestionNode] = Field(description="Root-level sub-questions.")
|
46
46
|
|
47
47
|
|
48
48
|
class DirectResponse(BaseModel):
|
@@ -76,7 +76,7 @@ class LabelResponse(BaseModel):
|
|
76
76
|
|
77
77
|
thought: str = Field(description="Explanation or reasoning about labeling.")
|
78
78
|
sub_questions: list[SubQuestionNode] = Field(
|
79
|
-
|
79
|
+
description="Refined list of sub-questions with corrected dependencies."
|
80
80
|
)
|
81
81
|
# Some tasks also keep the final answer, but we focus on sub-questions.
|
82
82
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|