chatterer 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chatterer/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .language_model import Chatterer, InvokeKwargs
1
+ from .language_model import Chatterer
2
2
  from .strategies import (
3
3
  AoTPipeline,
4
4
  AoTStrategy,
@@ -12,7 +12,6 @@ from .strategies import (
12
12
  __all__ = [
13
13
  "BaseStrategy",
14
14
  "Chatterer",
15
- "InvokeKwargs",
16
15
  "AoTStrategy",
17
16
  "AoTPipeline",
18
17
  "BaseAoTPrompter",
@@ -2,62 +2,80 @@ from typing import (
2
2
  Any,
3
3
  AsyncIterator,
4
4
  Iterator,
5
- NotRequired,
6
5
  Optional,
7
6
  Self,
8
7
  Type,
9
8
  TypeAlias,
10
- TypedDict,
11
9
  TypeVar,
12
10
  )
13
11
 
14
12
  from langchain_core.language_models.base import LanguageModelInput
15
13
  from langchain_core.language_models.chat_models import BaseChatModel
16
14
  from langchain_core.runnables.config import RunnableConfig
17
- from pydantic import BaseModel
15
+ from pydantic import BaseModel, Field
18
16
 
19
17
  PydanticModelT = TypeVar("PydanticModelT", bound=BaseModel)
20
18
  ContentType: TypeAlias = str | list[str | dict[str, Any]]
21
19
  StructuredOutputType: TypeAlias = dict[str, Any] | BaseModel
22
20
 
23
21
 
24
- class InvokeKwargs(TypedDict):
25
- config: NotRequired[RunnableConfig]
26
- stop: NotRequired[list[str]]
27
- kwargs: NotRequired[dict[str, Any]]
28
-
29
-
30
22
  class Chatterer(BaseModel):
31
23
  """Language model for generating text from a given input."""
32
24
 
33
25
  client: BaseChatModel
26
+ structured_output_kwargs: dict[str, Any] = Field(default_factory=dict)
34
27
 
35
28
  def __call__(self, messages: LanguageModelInput) -> str:
36
29
  return self.generate(messages)
37
30
 
38
31
  @classmethod
39
- def openai(cls, name: str = "gpt-4o-mini") -> Self:
32
+ def openai(
33
+ cls,
34
+ name: str = "gpt-4o-mini",
35
+ structured_output_kwargs: Optional[dict[str, Any]] = {"strict": True},
36
+ ) -> Self:
40
37
  from langchain_openai import ChatOpenAI
41
38
 
42
- return cls(client=ChatOpenAI(name=name))
39
+ return cls(client=ChatOpenAI(name=name), structured_output_kwargs=structured_output_kwargs or {})
43
40
 
44
41
  @classmethod
45
- def anthropic(cls, model_name: str = "claude-3-7-sonnet-20250219") -> Self:
42
+ def anthropic(
43
+ cls,
44
+ model_name: str = "claude-3-7-sonnet-20250219",
45
+ structured_output_kwargs: Optional[dict[str, Any]] = None,
46
+ ) -> Self:
46
47
  from langchain_anthropic import ChatAnthropic
47
48
 
48
- return cls(client=ChatAnthropic(model_name=model_name, timeout=None, stop=None))
49
+ return cls(
50
+ client=ChatAnthropic(model_name=model_name, timeout=None, stop=None),
51
+ structured_output_kwargs=structured_output_kwargs or {},
52
+ )
49
53
 
50
54
  @classmethod
51
- def google(cls, model: str = "gemini-2.0-flash") -> Self:
55
+ def google(
56
+ cls,
57
+ model: str = "gemini-2.0-flash",
58
+ structured_output_kwargs: Optional[dict[str, Any]] = None,
59
+ ) -> Self:
52
60
  from langchain_google_genai import ChatGoogleGenerativeAI
53
61
 
54
- return cls(client=ChatGoogleGenerativeAI(model=model))
62
+ return cls(
63
+ client=ChatGoogleGenerativeAI(model=model),
64
+ structured_output_kwargs=structured_output_kwargs or {},
65
+ )
55
66
 
56
67
  @classmethod
57
- def ollama(cls, model: str = "deepseek-r1:1.5b") -> Self:
68
+ def ollama(
69
+ cls,
70
+ model: str = "deepseek-r1:1.5b",
71
+ structured_output_kwargs: Optional[dict[str, Any]] = None,
72
+ ) -> Self:
58
73
  from langchain_ollama import ChatOllama
59
74
 
60
- return cls(client=ChatOllama(model=model))
75
+ return cls(
76
+ client=ChatOllama(model=model),
77
+ structured_output_kwargs=structured_output_kwargs or {},
78
+ )
61
79
 
62
80
  def generate(
63
81
  self,
@@ -133,9 +151,9 @@ class Chatterer(BaseModel):
133
151
  stop: Optional[list[str]] = None,
134
152
  **kwargs: Any,
135
153
  ) -> PydanticModelT:
136
- result: StructuredOutputType = self.client.with_structured_output(response_model).invoke(
137
- input=messages, config=config, stop=stop, **kwargs
138
- )
154
+ result: StructuredOutputType = self.client.with_structured_output(
155
+ response_model, **self.structured_output_kwargs
156
+ ).invoke(input=messages, config=config, stop=stop, **kwargs)
139
157
  if isinstance(result, response_model):
140
158
  return result
141
159
  else:
@@ -149,9 +167,9 @@ class Chatterer(BaseModel):
149
167
  stop: Optional[list[str]] = None,
150
168
  **kwargs: Any,
151
169
  ) -> PydanticModelT:
152
- result: StructuredOutputType = await self.client.with_structured_output(response_model).ainvoke(
153
- input=messages, config=config, stop=stop, **kwargs
154
- )
170
+ result: StructuredOutputType = await self.client.with_structured_output(
171
+ response_model, **self.structured_output_kwargs
172
+ ).ainvoke(input=messages, config=config, stop=stop, **kwargs)
155
173
  if isinstance(result, response_model):
156
174
  return result
157
175
  else:
@@ -171,7 +189,7 @@ class Chatterer(BaseModel):
171
189
  raise ImportError("Please install `instructor` with `pip install instructor` to use this feature.")
172
190
 
173
191
  partial_response_model = instructor.Partial[response_model]
174
- for chunk in self.client.with_structured_output(partial_response_model).stream(
192
+ for chunk in self.client.with_structured_output(partial_response_model, **self.structured_output_kwargs).stream(
175
193
  input=messages, config=config, stop=stop, **kwargs
176
194
  ):
177
195
  yield response_model.model_validate(chunk)
@@ -190,9 +208,9 @@ class Chatterer(BaseModel):
190
208
  raise ImportError("Please install `instructor` with `pip install instructor` to use this feature.")
191
209
 
192
210
  partial_response_model = instructor.Partial[response_model]
193
- async for chunk in self.client.with_structured_output(partial_response_model).astream(
194
- input=messages, config=config, stop=stop, **kwargs
195
- ):
211
+ async for chunk in self.client.with_structured_output(
212
+ partial_response_model, **self.structured_output_kwargs
213
+ ).astream(input=messages, config=config, stop=stop, **kwargs):
196
214
  yield response_model.model_validate(chunk)
197
215
 
198
216
 
@@ -34,7 +34,7 @@ class SubQuestionNode(BaseModel):
34
34
 
35
35
  question: str = Field(description="A sub-question string that arises from decomposition.")
36
36
  answer: Optional[str] = Field(description="Answer for this sub-question, if resolved.")
37
- depend: list[int] = Field(default_factory=list, description="Indices of sub-questions that this node depends on.")
37
+ depend: list[int] = Field(description="Indices of sub-questions that this node depends on.")
38
38
 
39
39
 
40
40
  class RecursiveDecomposeResponse(BaseModel):
@@ -42,7 +42,7 @@ class RecursiveDecomposeResponse(BaseModel):
42
42
 
43
43
  thought: str = Field(description="Reasoning about decomposition.")
44
44
  final_answer: str = Field(description="Best answer to the main question.")
45
- sub_questions: list[SubQuestionNode] = Field(default_factory=list, description="Root-level sub-questions.")
45
+ sub_questions: list[SubQuestionNode] = Field(description="Root-level sub-questions.")
46
46
 
47
47
 
48
48
  class DirectResponse(BaseModel):
@@ -76,7 +76,7 @@ class LabelResponse(BaseModel):
76
76
 
77
77
  thought: str = Field(description="Explanation or reasoning about labeling.")
78
78
  sub_questions: list[SubQuestionNode] = Field(
79
- default_factory=list, description="Refined list of sub-questions with corrected dependencies."
79
+ description="Refined list of sub-questions with corrected dependencies."
80
80
  )
81
81
  # Some tasks also keep the final answer, but we focus on sub-questions.
82
82
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: chatterer
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: The highest-level interface for various LLM APIs.
5
5
  Requires-Python: >=3.12
6
6
  Description-Content-Type: text/markdown
@@ -0,0 +1,9 @@
1
+ chatterer/__init__.py,sha256=jLwLwmF65vqnuC22lYs6CZT105DqJuNBiladUg902HM,427
2
+ chatterer/language_model.py,sha256=ngcKXqevlnUWTqdrw7_i0Y58_vCuuCrVvzkhTeFWOn4,39229
3
+ chatterer/strategies/__init__.py,sha256=AKDBC06AdNjPskERssCOsz3qGfFN31mxklmnfhHatJo,389
4
+ chatterer/strategies/atom_of_thoughts.py,sha256=VywNKwp_6QB2gTmjbb_YDmrlUmMMc-lyVFENKULWtuU,24882
5
+ chatterer/strategies/base.py,sha256=b2gMPqodp97OP1dkHfj0UqixjdjVhmTw_V5qJ7i2S6g,427
6
+ chatterer-0.1.4.dist-info/METADATA,sha256=U-d7QPaVIktFVppsqNkGsG7XAbO6nqm1zbisY_rZQzY,3373
7
+ chatterer-0.1.4.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
8
+ chatterer-0.1.4.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
9
+ chatterer-0.1.4.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- chatterer/__init__.py,sha256=8j0NJU6mH-aaaCKyQwvJif25n_p7plCCR7pkcij4daY,462
2
- chatterer/language_model.py,sha256=LgnUtETjvandKi1GrcXwTLDInhuFVGoFZNqfM0P0T4o,38470
3
- chatterer/strategies/__init__.py,sha256=AKDBC06AdNjPskERssCOsz3qGfFN31mxklmnfhHatJo,389
4
- chatterer/strategies/atom_of_thoughts.py,sha256=2cHq6kfB_RAb4NbM396NpD40E2aiavZKblMCtUi-u1Q,24948
5
- chatterer/strategies/base.py,sha256=b2gMPqodp97OP1dkHfj0UqixjdjVhmTw_V5qJ7i2S6g,427
6
- chatterer-0.1.3.dist-info/METADATA,sha256=1cmKFrkea0VqUyQiUaHCqpMYHwHTr6RzcWYk4syMZfw,3373
7
- chatterer-0.1.3.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
8
- chatterer-0.1.3.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
9
- chatterer-0.1.3.dist-info/RECORD,,