chatterer 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chatterer/__init__.py +24 -13
- chatterer/llms/__init__.py +20 -0
- chatterer/llms/base.py +42 -0
- chatterer/llms/instructor.py +127 -0
- chatterer/llms/langchain.py +49 -0
- chatterer/llms/ollama.py +69 -0
- {chatterer-0.1.0.dist-info → chatterer-0.1.2.dist-info}/METADATA +4 -2
- chatterer-0.1.2.dist-info/RECORD +10 -0
- {chatterer-0.1.0.dist-info → chatterer-0.1.2.dist-info}/WHEEL +1 -1
- chatterer/llms.py +0 -291
- chatterer-0.1.0.dist-info/RECORD +0 -6
- {chatterer-0.1.0.dist-info → chatterer-0.1.2.dist-info}/top_level.txt +0 -0
chatterer/__init__.py
CHANGED
@@ -1,13 +1,24 @@
|
|
1
|
-
from .llms import
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
__all__
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
1
|
+
from .llms import LLM
|
2
|
+
|
3
|
+
__all__ = ["LLM"]
|
4
|
+
|
5
|
+
try:
|
6
|
+
from .llms import LangchainLLM
|
7
|
+
|
8
|
+
__all__ += ["LangchainLLM"]
|
9
|
+
except ImportError:
|
10
|
+
pass
|
11
|
+
|
12
|
+
try:
|
13
|
+
from .llms import OllamaLLM
|
14
|
+
|
15
|
+
__all__ += ["OllamaLLM"]
|
16
|
+
except ImportError:
|
17
|
+
pass
|
18
|
+
|
19
|
+
try:
|
20
|
+
from .llms import InstructorLLM
|
21
|
+
|
22
|
+
__all__ += ["InstructorLLM"]
|
23
|
+
except ImportError:
|
24
|
+
pass
|
@@ -0,0 +1,20 @@
|
|
1
|
+
from importlib.util import find_spec
|
2
|
+
|
3
|
+
from .base import LLM
|
4
|
+
|
5
|
+
__all__ = ["LLM"]
|
6
|
+
|
7
|
+
if find_spec("langchain_core") is not None:
|
8
|
+
from .langchain import LangchainLLM
|
9
|
+
|
10
|
+
__all__ += ["LangchainLLM"]
|
11
|
+
|
12
|
+
if find_spec("ollama") is not None:
|
13
|
+
from .ollama import OllamaLLM
|
14
|
+
|
15
|
+
__all__ += ["OllamaLLM"]
|
16
|
+
|
17
|
+
if find_spec("instructor") is not None:
|
18
|
+
from .instructor import InstructorLLM
|
19
|
+
|
20
|
+
__all__ += ["InstructorLLM"]
|
chatterer/llms/base.py
ADDED
@@ -0,0 +1,42 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import (
|
3
|
+
Any,
|
4
|
+
ClassVar,
|
5
|
+
Iterator,
|
6
|
+
Sequence,
|
7
|
+
Type,
|
8
|
+
TypeVar,
|
9
|
+
)
|
10
|
+
|
11
|
+
from openai.types.chat import ChatCompletionMessageParam
|
12
|
+
from pydantic import BaseModel, ConfigDict, Field
|
13
|
+
|
14
|
+
P = TypeVar("P", bound=BaseModel)
|
15
|
+
|
16
|
+
|
17
|
+
class LLM(BaseModel, ABC):
|
18
|
+
call_kwargs: dict[str, Any] = Field(default_factory=dict)
|
19
|
+
model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
|
20
|
+
|
21
|
+
def __call__(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
22
|
+
return self.generate(messages)
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str: ...
|
26
|
+
|
27
|
+
@abstractmethod
|
28
|
+
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]: ...
|
29
|
+
|
30
|
+
@abstractmethod
|
31
|
+
def generate_pydantic(
|
32
|
+
self,
|
33
|
+
response_model: Type[P],
|
34
|
+
messages: Sequence[ChatCompletionMessageParam],
|
35
|
+
) -> P: ...
|
36
|
+
|
37
|
+
def generate_pydantic_stream(
|
38
|
+
self,
|
39
|
+
response_model: Type[P],
|
40
|
+
messages: Sequence[ChatCompletionMessageParam],
|
41
|
+
) -> Iterator[P]:
|
42
|
+
raise NotImplementedError
|
@@ -0,0 +1,127 @@
|
|
1
|
+
from os import environ
|
2
|
+
from typing import (
|
3
|
+
Any,
|
4
|
+
Iterator,
|
5
|
+
Self,
|
6
|
+
Sequence,
|
7
|
+
Type,
|
8
|
+
)
|
9
|
+
|
10
|
+
from instructor import Instructor, Mode, from_openai
|
11
|
+
from openai import OpenAI
|
12
|
+
from openai.types.chat import ChatCompletionMessageParam
|
13
|
+
from pydantic import BaseModel
|
14
|
+
|
15
|
+
from .base import LLM, P
|
16
|
+
|
17
|
+
|
18
|
+
class Response(BaseModel):
|
19
|
+
response: str
|
20
|
+
|
21
|
+
|
22
|
+
class InstructorLLM(LLM):
|
23
|
+
inst: Instructor
|
24
|
+
|
25
|
+
@classmethod
|
26
|
+
def openai(cls, call_kwargs: dict[str, Any] = {"model": "o3-mini"}) -> Self:
|
27
|
+
return cls(
|
28
|
+
inst=from_openai(OpenAI(), Mode.TOOLS_STRICT),
|
29
|
+
call_kwargs=call_kwargs,
|
30
|
+
)
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def anthropic(
|
34
|
+
cls,
|
35
|
+
call_kwargs: dict[str, Any] = {
|
36
|
+
"model": "claude-3-7-sonnet-20250219",
|
37
|
+
"temperature": 0.7,
|
38
|
+
"max_tokens": 8192,
|
39
|
+
},
|
40
|
+
) -> Self:
|
41
|
+
from anthropic import Anthropic
|
42
|
+
from instructor import from_anthropic
|
43
|
+
|
44
|
+
return cls(
|
45
|
+
inst=from_anthropic(client=Anthropic(), mode=Mode.ANTHROPIC_TOOLS),
|
46
|
+
call_kwargs=call_kwargs,
|
47
|
+
)
|
48
|
+
|
49
|
+
@classmethod
|
50
|
+
def gemini(
|
51
|
+
cls,
|
52
|
+
model_name: str = "gemini-2.0-flash",
|
53
|
+
call_kwargs: dict[str, Any] = {},
|
54
|
+
) -> Self:
|
55
|
+
from google.generativeai.generative_models import GenerativeModel
|
56
|
+
from instructor import from_gemini
|
57
|
+
|
58
|
+
return cls(
|
59
|
+
inst=from_gemini(client=GenerativeModel(model_name=model_name), mode=Mode.GEMINI_TOOLS),
|
60
|
+
call_kwargs=call_kwargs,
|
61
|
+
)
|
62
|
+
|
63
|
+
@classmethod
|
64
|
+
def deepseek(cls, call_kwargs: dict[str, Any] = {"model": "deepseek-chat"}) -> Self:
|
65
|
+
return cls(
|
66
|
+
inst=from_openai(
|
67
|
+
OpenAI(
|
68
|
+
base_url="https://api.deepseek.com/v1",
|
69
|
+
api_key=environ["DEEPSEEK_API_KEY"],
|
70
|
+
),
|
71
|
+
Mode.TOOLS_STRICT,
|
72
|
+
),
|
73
|
+
call_kwargs=call_kwargs,
|
74
|
+
)
|
75
|
+
|
76
|
+
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
77
|
+
if self.inst is None:
|
78
|
+
raise ValueError("Instructor instance is not initialized")
|
79
|
+
res = self.inst.chat.completions.create(
|
80
|
+
response_model=Response,
|
81
|
+
messages=list(messages),
|
82
|
+
**self.call_kwargs,
|
83
|
+
)
|
84
|
+
return res.response
|
85
|
+
|
86
|
+
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
87
|
+
if self.inst is None:
|
88
|
+
raise ValueError("Instructor instance is not initialized")
|
89
|
+
last_content: str = ""
|
90
|
+
for res in self.inst.chat.completions.create_partial(
|
91
|
+
response_model=Response,
|
92
|
+
messages=list(messages),
|
93
|
+
**self.call_kwargs,
|
94
|
+
):
|
95
|
+
content: str = res.response
|
96
|
+
delta: str = content.removeprefix(last_content)
|
97
|
+
if not delta:
|
98
|
+
continue
|
99
|
+
last_content = content
|
100
|
+
yield delta
|
101
|
+
|
102
|
+
def generate_pydantic(
|
103
|
+
self,
|
104
|
+
response_model: Type[P],
|
105
|
+
messages: Sequence[ChatCompletionMessageParam],
|
106
|
+
) -> P:
|
107
|
+
if self.inst is None:
|
108
|
+
raise ValueError("Instructor instance is not initialized")
|
109
|
+
return self.inst.chat.completions.create(
|
110
|
+
response_model=response_model,
|
111
|
+
messages=list(messages),
|
112
|
+
**self.call_kwargs,
|
113
|
+
)
|
114
|
+
|
115
|
+
def generate_pydantic_stream(
|
116
|
+
self,
|
117
|
+
response_model: Type[P],
|
118
|
+
messages: Sequence[ChatCompletionMessageParam],
|
119
|
+
) -> Iterator[P]:
|
120
|
+
if self.inst is None:
|
121
|
+
raise ValueError("Instructor instance is not initialized")
|
122
|
+
for res in self.inst.chat.completions.create_partial(
|
123
|
+
response_model=response_model,
|
124
|
+
messages=list(messages),
|
125
|
+
**self.call_kwargs,
|
126
|
+
):
|
127
|
+
yield res
|
@@ -0,0 +1,49 @@
|
|
1
|
+
from typing import (
|
2
|
+
Iterator,
|
3
|
+
Sequence,
|
4
|
+
Type,
|
5
|
+
)
|
6
|
+
|
7
|
+
from langchain_community.adapters.openai import (
|
8
|
+
convert_openai_messages,
|
9
|
+
)
|
10
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
11
|
+
from openai.types.chat import ChatCompletionMessageParam
|
12
|
+
|
13
|
+
from .base import LLM, P
|
14
|
+
|
15
|
+
|
16
|
+
class LangchainLLM(LLM):
|
17
|
+
client: BaseChatModel
|
18
|
+
|
19
|
+
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
20
|
+
content = self.client.invoke(convert_openai_messages([dict(msg) for msg in messages])).content
|
21
|
+
if isinstance(content, str):
|
22
|
+
return content
|
23
|
+
else:
|
24
|
+
return "".join(part for part in content if isinstance(part, str))
|
25
|
+
|
26
|
+
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
27
|
+
for chunk in self.client.stream(convert_openai_messages([dict(msg) for msg in messages])):
|
28
|
+
content = chunk.content
|
29
|
+
if isinstance(content, str):
|
30
|
+
yield content
|
31
|
+
elif isinstance(content, list):
|
32
|
+
for part in content:
|
33
|
+
if isinstance(part, str):
|
34
|
+
yield part
|
35
|
+
else:
|
36
|
+
continue
|
37
|
+
else:
|
38
|
+
continue
|
39
|
+
|
40
|
+
def generate_pydantic(
|
41
|
+
self,
|
42
|
+
response_model: Type[P],
|
43
|
+
messages: Sequence[ChatCompletionMessageParam],
|
44
|
+
) -> P:
|
45
|
+
result = self.client.with_structured_output(response_model).invoke(convert_openai_messages([dict(msg) for msg in messages]))
|
46
|
+
if isinstance(result, response_model):
|
47
|
+
return result
|
48
|
+
else:
|
49
|
+
return response_model.model_validate(result)
|
chatterer/llms/ollama.py
ADDED
@@ -0,0 +1,69 @@
|
|
1
|
+
from typing import (
|
2
|
+
Any,
|
3
|
+
Callable,
|
4
|
+
Iterator,
|
5
|
+
Literal,
|
6
|
+
Mapping,
|
7
|
+
Optional,
|
8
|
+
Sequence,
|
9
|
+
Type,
|
10
|
+
)
|
11
|
+
|
12
|
+
from ollama import Options, Tool, chat
|
13
|
+
from openai.types.chat import ChatCompletionMessageParam
|
14
|
+
from pydantic.json_schema import JsonSchemaValue
|
15
|
+
|
16
|
+
from .base import LLM, P
|
17
|
+
|
18
|
+
|
19
|
+
class OllamaLLM(LLM):
|
20
|
+
model: str
|
21
|
+
tools: Optional[Sequence[Mapping[str, Any] | Tool | Callable]] = None
|
22
|
+
stream: bool = False
|
23
|
+
format: Optional[Literal["", "json"] | JsonSchemaValue] = None
|
24
|
+
options: Optional[Mapping[str, Any] | Options] = None
|
25
|
+
keep_alive: Optional[float | str] = None
|
26
|
+
|
27
|
+
def generate(self, messages: Sequence[ChatCompletionMessageParam]) -> str:
|
28
|
+
return "".join(self.generate_stream(messages))
|
29
|
+
|
30
|
+
def generate_stream(self, messages: Sequence[ChatCompletionMessageParam]) -> Iterator[str]:
|
31
|
+
model = str(self.call_kwargs.get("model", self.model))
|
32
|
+
format = self.call_kwargs.get("format", self.format)
|
33
|
+
options = self.call_kwargs.get("options", self.options)
|
34
|
+
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
35
|
+
tools = self.call_kwargs.get("tools", self.tools)
|
36
|
+
return (
|
37
|
+
res.message.content or ""
|
38
|
+
for res in chat(
|
39
|
+
model=model,
|
40
|
+
messages=messages,
|
41
|
+
tools=tools,
|
42
|
+
stream=True,
|
43
|
+
format=format,
|
44
|
+
options=options,
|
45
|
+
keep_alive=keep_alive,
|
46
|
+
)
|
47
|
+
)
|
48
|
+
|
49
|
+
def generate_pydantic(
|
50
|
+
self,
|
51
|
+
response_model: Type[P],
|
52
|
+
messages: Sequence[ChatCompletionMessageParam],
|
53
|
+
) -> P:
|
54
|
+
model = str(self.call_kwargs.get("model", self.model))
|
55
|
+
format = response_model.model_json_schema()
|
56
|
+
options = self.call_kwargs.get("options", self.options)
|
57
|
+
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
58
|
+
return response_model.model_validate_json(
|
59
|
+
chat(
|
60
|
+
model=model,
|
61
|
+
messages=messages,
|
62
|
+
tools=None,
|
63
|
+
stream=False,
|
64
|
+
format=format,
|
65
|
+
options=options,
|
66
|
+
keep_alive=keep_alive,
|
67
|
+
).message.content
|
68
|
+
or ""
|
69
|
+
)
|
@@ -1,13 +1,15 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: chatterer
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.2
|
4
4
|
Summary: The highest-level interface for various LLM APIs.
|
5
5
|
Requires-Python: >=3.12
|
6
6
|
Description-Content-Type: text/markdown
|
7
7
|
Requires-Dist: openai>=1.63.2
|
8
8
|
Provides-Extra: all
|
9
|
-
Requires-Dist: anthropic>=0.
|
9
|
+
Requires-Dist: anthropic>=0.49.0; extra == "all"
|
10
|
+
Requires-Dist: google-generativeai>=0.8.4; extra == "all"
|
10
11
|
Requires-Dist: instructor>=1.7.2; extra == "all"
|
12
|
+
Requires-Dist: jsonref>=1.1.0; extra == "all"
|
11
13
|
Requires-Dist: langchain-community>=0.3.18; extra == "all"
|
12
14
|
Requires-Dist: ollama>=0.4.7; extra == "all"
|
13
15
|
|
@@ -0,0 +1,10 @@
|
|
1
|
+
chatterer/__init__.py,sha256=zi6zoXdpdWL35p-BYAKrsNWcqi_T3RqpflugLy_93mg,370
|
2
|
+
chatterer/llms/__init__.py,sha256=3vUVxkeyBz9xi6nAYg2D7ZtSeuZumTMS5pdJHj3JDpI,435
|
3
|
+
chatterer/llms/base.py,sha256=rdYJgpNeDoR-1Cmuea5rBYTJ8Jmtof3U6rFzcUcnG_Y,1196
|
4
|
+
chatterer/llms/instructor.py,sha256=QP7rbyENN-4m7kNXGnP1r0kb6ioF0ozkEG0qe5BjQnc,3936
|
5
|
+
chatterer/llms/langchain.py,sha256=cuKYWtunCNLNs15P_mlOIbxjvmClrYphSKqAqg3b05k,1723
|
6
|
+
chatterer/llms/ollama.py,sha256=amhcGB-xK5ooDAlFzl0aln76x5e13NRKaUkzZ7rcEm0,2304
|
7
|
+
chatterer-0.1.2.dist-info/METADATA,sha256=FRpuLBBieJbmIHcSM17V4DU3lw92JonmLCYU8gPYxFk,6192
|
8
|
+
chatterer-0.1.2.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
9
|
+
chatterer-0.1.2.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
|
10
|
+
chatterer-0.1.2.dist-info/RECORD,,
|
chatterer/llms.py
DELETED
@@ -1,291 +0,0 @@
|
|
1
|
-
from abc import ABC, abstractmethod
|
2
|
-
from dataclasses import dataclass
|
3
|
-
from os import environ
|
4
|
-
from typing import (
|
5
|
-
TYPE_CHECKING,
|
6
|
-
Any,
|
7
|
-
Callable,
|
8
|
-
Iterator,
|
9
|
-
Literal,
|
10
|
-
Mapping,
|
11
|
-
Optional,
|
12
|
-
Self,
|
13
|
-
Sequence,
|
14
|
-
Type,
|
15
|
-
TypeVar,
|
16
|
-
)
|
17
|
-
|
18
|
-
from openai import OpenAI
|
19
|
-
from openai.types.chat import ChatCompletionMessageParam
|
20
|
-
from pydantic import BaseModel, create_model
|
21
|
-
from pydantic.json_schema import JsonSchemaValue
|
22
|
-
|
23
|
-
if TYPE_CHECKING:
|
24
|
-
from instructor import Instructor
|
25
|
-
from langchain_core.language_models.chat_models import BaseChatModel
|
26
|
-
from ollama import Options, Tool
|
27
|
-
|
28
|
-
P = TypeVar("P", bound=BaseModel)
|
29
|
-
|
30
|
-
|
31
|
-
@dataclass
|
32
|
-
class LLM(ABC):
|
33
|
-
call_kwargs: dict[str, Any]
|
34
|
-
|
35
|
-
@abstractmethod
|
36
|
-
def generate(
|
37
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
38
|
-
) -> str: ...
|
39
|
-
|
40
|
-
@abstractmethod
|
41
|
-
def generate_stream(
|
42
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
43
|
-
) -> Iterator[str]: ...
|
44
|
-
|
45
|
-
@abstractmethod
|
46
|
-
def generate_pydantic(
|
47
|
-
self,
|
48
|
-
response_model: Type[P],
|
49
|
-
messages: Sequence[ChatCompletionMessageParam],
|
50
|
-
) -> P: ...
|
51
|
-
|
52
|
-
def generate_pydantic_stream(
|
53
|
-
self,
|
54
|
-
response_model: Type[P],
|
55
|
-
messages: Sequence[ChatCompletionMessageParam],
|
56
|
-
) -> Iterator[P]:
|
57
|
-
raise NotImplementedError
|
58
|
-
|
59
|
-
|
60
|
-
@dataclass
|
61
|
-
class InstructorLLM(LLM):
|
62
|
-
inst: "Instructor"
|
63
|
-
|
64
|
-
@property
|
65
|
-
def dependency(self) -> list[str]:
|
66
|
-
return ["instructor"]
|
67
|
-
|
68
|
-
@classmethod
|
69
|
-
def openai(
|
70
|
-
cls, call_kwargs: dict[str, Any] = {"model": "o3-mini"}
|
71
|
-
) -> Self:
|
72
|
-
from instructor import Mode, from_openai
|
73
|
-
|
74
|
-
return cls(
|
75
|
-
inst=from_openai(OpenAI(), Mode.TOOLS_STRICT),
|
76
|
-
call_kwargs=call_kwargs,
|
77
|
-
)
|
78
|
-
|
79
|
-
@classmethod
|
80
|
-
def anthropic(
|
81
|
-
cls,
|
82
|
-
call_kwargs: dict[str, Any] = {
|
83
|
-
"temperature": 0.7,
|
84
|
-
"max_tokens": 8192,
|
85
|
-
"model": "claude-3-5-sonnet-20241022",
|
86
|
-
},
|
87
|
-
) -> Self:
|
88
|
-
|
89
|
-
from anthropic import Anthropic
|
90
|
-
from instructor import Mode, from_anthropic
|
91
|
-
|
92
|
-
return cls(
|
93
|
-
inst=from_anthropic(
|
94
|
-
client=Anthropic(), mode=Mode.ANTHROPIC_TOOLS
|
95
|
-
),
|
96
|
-
call_kwargs=call_kwargs,
|
97
|
-
)
|
98
|
-
|
99
|
-
@classmethod
|
100
|
-
def deepseek(
|
101
|
-
cls, call_kwargs: dict[str, Any] = {"model": "deepseek-chat"}
|
102
|
-
) -> Self:
|
103
|
-
|
104
|
-
from instructor import Mode, from_openai
|
105
|
-
|
106
|
-
return cls(
|
107
|
-
inst=from_openai(
|
108
|
-
OpenAI(
|
109
|
-
base_url="https://api.deepseek.com/v1",
|
110
|
-
api_key=environ["DEEPSEEK_API_KEY"],
|
111
|
-
),
|
112
|
-
Mode.TOOLS_STRICT,
|
113
|
-
),
|
114
|
-
call_kwargs=call_kwargs,
|
115
|
-
)
|
116
|
-
|
117
|
-
def generate(
|
118
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
119
|
-
) -> str:
|
120
|
-
res = self.inst.chat.completions.create(
|
121
|
-
response_model=create_model(
|
122
|
-
"Response",
|
123
|
-
response=(str, ...),
|
124
|
-
),
|
125
|
-
messages=list(messages),
|
126
|
-
**self.call_kwargs,
|
127
|
-
)
|
128
|
-
return str(getattr(res, "response", "") or "")
|
129
|
-
|
130
|
-
def generate_stream(
|
131
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
132
|
-
) -> Iterator[str]:
|
133
|
-
last_content: str = ""
|
134
|
-
for res in self.inst.chat.completions.create_partial(
|
135
|
-
response_model=create_model(
|
136
|
-
"Response",
|
137
|
-
response=(str, ...),
|
138
|
-
),
|
139
|
-
messages=list(messages),
|
140
|
-
**self.call_kwargs,
|
141
|
-
):
|
142
|
-
content = str(getattr(res, "response", "") or "")
|
143
|
-
delta: str = content.removeprefix(last_content)
|
144
|
-
if not delta:
|
145
|
-
continue
|
146
|
-
last_content = content
|
147
|
-
yield delta
|
148
|
-
|
149
|
-
def generate_pydantic(
|
150
|
-
self,
|
151
|
-
response_model: Type[P],
|
152
|
-
messages: Sequence[ChatCompletionMessageParam],
|
153
|
-
) -> P:
|
154
|
-
return self.inst.chat.completions.create(
|
155
|
-
response_model=response_model,
|
156
|
-
messages=list(messages),
|
157
|
-
**self.call_kwargs,
|
158
|
-
)
|
159
|
-
|
160
|
-
def generate_pydantic_stream(
|
161
|
-
self,
|
162
|
-
response_model: Type[P],
|
163
|
-
messages: Sequence[ChatCompletionMessageParam],
|
164
|
-
) -> Iterator[P]:
|
165
|
-
for res in self.inst.chat.completions.create_partial(
|
166
|
-
response_model=response_model,
|
167
|
-
messages=list(messages),
|
168
|
-
**self.call_kwargs,
|
169
|
-
):
|
170
|
-
yield res
|
171
|
-
|
172
|
-
|
173
|
-
@dataclass
|
174
|
-
class OllamaLLM(LLM):
|
175
|
-
model: str
|
176
|
-
tools: Optional[Sequence[Mapping[str, Any] | "Tool" | Callable]] = None
|
177
|
-
stream: bool = False
|
178
|
-
format: Optional[Literal["", "json"] | JsonSchemaValue] = None
|
179
|
-
options: Optional[Mapping[str, Any] | "Options"] = None
|
180
|
-
keep_alive: Optional[float | str] = None
|
181
|
-
|
182
|
-
def generate(
|
183
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
184
|
-
) -> str:
|
185
|
-
return "".join(self.generate_stream(messages))
|
186
|
-
|
187
|
-
def generate_stream(
|
188
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
189
|
-
) -> Iterator[str]:
|
190
|
-
from ollama import chat
|
191
|
-
|
192
|
-
model = str(self.call_kwargs.get("model", self.model))
|
193
|
-
format = self.call_kwargs.get("format", self.format)
|
194
|
-
options = self.call_kwargs.get("options", self.options)
|
195
|
-
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
196
|
-
tools = self.call_kwargs.get("tools", self.tools)
|
197
|
-
return (
|
198
|
-
res.message.content or ""
|
199
|
-
for res in chat(
|
200
|
-
model=model,
|
201
|
-
messages=messages,
|
202
|
-
tools=tools,
|
203
|
-
stream=True,
|
204
|
-
format=format,
|
205
|
-
options=options,
|
206
|
-
keep_alive=keep_alive,
|
207
|
-
)
|
208
|
-
)
|
209
|
-
|
210
|
-
def generate_pydantic(
|
211
|
-
self,
|
212
|
-
response_model: Type[P],
|
213
|
-
messages: Sequence[ChatCompletionMessageParam],
|
214
|
-
) -> P:
|
215
|
-
from ollama import chat
|
216
|
-
|
217
|
-
model = str(self.call_kwargs.get("model", self.model))
|
218
|
-
format = response_model.model_json_schema()
|
219
|
-
options = self.call_kwargs.get("options", self.options)
|
220
|
-
keep_alive = self.call_kwargs.get("keep_alive", self.keep_alive)
|
221
|
-
return response_model.model_validate_json(
|
222
|
-
chat(
|
223
|
-
model=model,
|
224
|
-
messages=messages,
|
225
|
-
tools=None,
|
226
|
-
stream=False,
|
227
|
-
format=format,
|
228
|
-
options=options,
|
229
|
-
keep_alive=keep_alive,
|
230
|
-
).message.content
|
231
|
-
or ""
|
232
|
-
)
|
233
|
-
|
234
|
-
|
235
|
-
@dataclass
|
236
|
-
class LangchainLLM(LLM):
|
237
|
-
client: "BaseChatModel"
|
238
|
-
|
239
|
-
def generate(
|
240
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
241
|
-
) -> str:
|
242
|
-
from langchain_community.adapters.openai import (
|
243
|
-
convert_openai_messages,
|
244
|
-
)
|
245
|
-
|
246
|
-
content = self.client.invoke(
|
247
|
-
convert_openai_messages([dict(msg) for msg in messages])
|
248
|
-
).content
|
249
|
-
if isinstance(content, str):
|
250
|
-
return content
|
251
|
-
else:
|
252
|
-
return "".join(part for part in content if isinstance(part, str))
|
253
|
-
|
254
|
-
def generate_stream(
|
255
|
-
self, messages: Sequence[ChatCompletionMessageParam]
|
256
|
-
) -> Iterator[str]:
|
257
|
-
from langchain_community.adapters.openai import (
|
258
|
-
convert_openai_messages,
|
259
|
-
)
|
260
|
-
|
261
|
-
for chunk in self.client.stream(
|
262
|
-
convert_openai_messages([dict(msg) for msg in messages])
|
263
|
-
):
|
264
|
-
content = chunk.content
|
265
|
-
if isinstance(content, str):
|
266
|
-
yield content
|
267
|
-
elif isinstance(content, list):
|
268
|
-
for part in content:
|
269
|
-
if isinstance(part, str):
|
270
|
-
yield part
|
271
|
-
else:
|
272
|
-
continue
|
273
|
-
else:
|
274
|
-
continue
|
275
|
-
|
276
|
-
def generate_pydantic(
|
277
|
-
self,
|
278
|
-
response_model: Type[P],
|
279
|
-
messages: Sequence[ChatCompletionMessageParam],
|
280
|
-
) -> P:
|
281
|
-
from langchain_community.adapters.openai import (
|
282
|
-
convert_openai_messages,
|
283
|
-
)
|
284
|
-
|
285
|
-
result = self.client.with_structured_output(response_model).invoke(
|
286
|
-
convert_openai_messages([dict(msg) for msg in messages])
|
287
|
-
)
|
288
|
-
if isinstance(result, response_model):
|
289
|
-
return result
|
290
|
-
else:
|
291
|
-
return response_model.model_validate(result)
|
chatterer-0.1.0.dist-info/RECORD
DELETED
@@ -1,6 +0,0 @@
|
|
1
|
-
chatterer/__init__.py,sha256=uxj5SEbjxpGGt1ukNLgkafrE90DGdpIc6NcxumBrcDI,180
|
2
|
-
chatterer/llms.py,sha256=wDi4EX1Ebe7vKI8Cd2qTEo2BzzY0UyERryrd7eQafyg,8299
|
3
|
-
chatterer-0.1.0.dist-info/METADATA,sha256=bsXI0wNj3WueyfOXicNgUG6Ru7KqFgFi61fs9mLZpj8,6086
|
4
|
-
chatterer-0.1.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
5
|
-
chatterer-0.1.0.dist-info/top_level.txt,sha256=7nSQKP0bHxPRc7HyzdbKsJdkvPgYD0214o6slRizv9s,10
|
6
|
-
chatterer-0.1.0.dist-info/RECORD,,
|
File without changes
|