chatlas 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- chatlas/__init__.py +2 -1
- chatlas/_anthropic.py +104 -6
- chatlas/_chat.py +246 -24
- chatlas/_content.py +20 -7
- chatlas/_google.py +312 -161
- chatlas/_merge.py +1 -1
- chatlas/_ollama.py +8 -0
- chatlas/_openai.py +64 -7
- chatlas/_provider.py +16 -8
- chatlas/py.typed +0 -0
- chatlas/types/__init__.py +5 -1
- chatlas/types/anthropic/_client.py +0 -8
- chatlas/types/anthropic/_submit.py +2 -3
- chatlas/types/google/_client.py +12 -91
- chatlas/types/google/_submit.py +40 -87
- chatlas/types/openai/_client.py +1 -0
- chatlas/types/openai/_client_azure.py +1 -0
- chatlas/types/openai/_submit.py +10 -2
- {chatlas-0.2.0.dist-info → chatlas-0.4.0.dist-info}/METADATA +25 -11
- chatlas-0.4.0.dist-info/RECORD +38 -0
- {chatlas-0.2.0.dist-info → chatlas-0.4.0.dist-info}/WHEEL +1 -1
- chatlas-0.2.0.dist-info/RECORD +0 -37
chatlas/_openai.py
CHANGED
|
@@ -8,6 +8,7 @@ from pydantic import BaseModel
|
|
|
8
8
|
from ._chat import Chat
|
|
9
9
|
from ._content import (
|
|
10
10
|
Content,
|
|
11
|
+
ContentImage,
|
|
11
12
|
ContentImageInline,
|
|
12
13
|
ContentImageRemote,
|
|
13
14
|
ContentJson,
|
|
@@ -20,7 +21,7 @@ from ._merge import merge_dicts
|
|
|
20
21
|
from ._provider import Provider
|
|
21
22
|
from ._tokens import tokens_log
|
|
22
23
|
from ._tools import Tool, basemodel_to_param_schema
|
|
23
|
-
from ._turn import Turn, normalize_turns
|
|
24
|
+
from ._turn import Turn, normalize_turns, user_turn
|
|
24
25
|
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
25
26
|
|
|
26
27
|
if TYPE_CHECKING:
|
|
@@ -294,10 +295,12 @@ class OpenAIProvider(Provider[ChatCompletion, ChatCompletionChunk, ChatCompletio
|
|
|
294
295
|
"stream": stream,
|
|
295
296
|
"messages": self._as_message_param(turns),
|
|
296
297
|
"model": self._model,
|
|
297
|
-
"seed": self._seed,
|
|
298
298
|
**(kwargs or {}),
|
|
299
299
|
}
|
|
300
300
|
|
|
301
|
+
if self._seed is not None:
|
|
302
|
+
kwargs_full["seed"] = self._seed
|
|
303
|
+
|
|
301
304
|
if tool_schemas:
|
|
302
305
|
kwargs_full["tools"] = tool_schemas
|
|
303
306
|
|
|
@@ -335,7 +338,7 @@ class OpenAIProvider(Provider[ChatCompletion, ChatCompletionChunk, ChatCompletio
|
|
|
335
338
|
return chunkd
|
|
336
339
|
return merge_dicts(completion, chunkd)
|
|
337
340
|
|
|
338
|
-
def stream_turn(self, completion, has_data_model
|
|
341
|
+
def stream_turn(self, completion, has_data_model) -> Turn:
|
|
339
342
|
from openai.types.chat import ChatCompletion
|
|
340
343
|
|
|
341
344
|
delta = completion["choices"][0].pop("delta") # type: ignore
|
|
@@ -343,12 +346,60 @@ class OpenAIProvider(Provider[ChatCompletion, ChatCompletionChunk, ChatCompletio
|
|
|
343
346
|
completion = ChatCompletion.construct(**completion)
|
|
344
347
|
return self._as_turn(completion, has_data_model)
|
|
345
348
|
|
|
346
|
-
async def stream_turn_async(self, completion, has_data_model, stream):
|
|
347
|
-
return self.stream_turn(completion, has_data_model, stream)
|
|
348
|
-
|
|
349
349
|
def value_turn(self, completion, has_data_model) -> Turn:
|
|
350
350
|
return self._as_turn(completion, has_data_model)
|
|
351
351
|
|
|
352
|
+
def token_count(
|
|
353
|
+
self,
|
|
354
|
+
*args: Content | str,
|
|
355
|
+
tools: dict[str, Tool],
|
|
356
|
+
data_model: Optional[type[BaseModel]],
|
|
357
|
+
) -> int:
|
|
358
|
+
try:
|
|
359
|
+
import tiktoken
|
|
360
|
+
except ImportError:
|
|
361
|
+
raise ImportError(
|
|
362
|
+
"The tiktoken package is required for token counting. "
|
|
363
|
+
"Please install it with `pip install tiktoken`."
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
encoding = tiktoken.encoding_for_model(self._model)
|
|
367
|
+
|
|
368
|
+
turn = user_turn(*args)
|
|
369
|
+
|
|
370
|
+
# Count the tokens in image contents
|
|
371
|
+
image_tokens = sum(
|
|
372
|
+
self._image_token_count(x)
|
|
373
|
+
for x in turn.contents
|
|
374
|
+
if isinstance(x, ContentImage)
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
# For other contents, get the token count from the actual message param
|
|
378
|
+
other_contents = [x for x in turn.contents if not isinstance(x, ContentImage)]
|
|
379
|
+
other_full = self._as_message_param([Turn("user", other_contents)])
|
|
380
|
+
other_tokens = len(encoding.encode(str(other_full)))
|
|
381
|
+
|
|
382
|
+
return other_tokens + image_tokens
|
|
383
|
+
|
|
384
|
+
async def token_count_async(
|
|
385
|
+
self,
|
|
386
|
+
*args: Content | str,
|
|
387
|
+
tools: dict[str, Tool],
|
|
388
|
+
data_model: Optional[type[BaseModel]],
|
|
389
|
+
) -> int:
|
|
390
|
+
return self.token_count(*args, tools=tools, data_model=data_model)
|
|
391
|
+
|
|
392
|
+
@staticmethod
|
|
393
|
+
def _image_token_count(image: ContentImage) -> int:
|
|
394
|
+
if isinstance(image, ContentImageRemote) and image.detail == "low":
|
|
395
|
+
return 85
|
|
396
|
+
else:
|
|
397
|
+
# This is just the max token count for an image The highest possible
|
|
398
|
+
# resolution is 768 x 2048, and 8 tiles of size 512px can fit inside
|
|
399
|
+
# TODO: this is obviously a very conservative estimate and could be improved
|
|
400
|
+
# https://platform.openai.com/docs/guides/vision/calculating-costs
|
|
401
|
+
return 170 * 8 + 85
|
|
402
|
+
|
|
352
403
|
@staticmethod
|
|
353
404
|
def _as_message_param(turns: list[Turn]) -> list["ChatCompletionMessageParam"]:
|
|
354
405
|
from openai.types.chat import (
|
|
@@ -412,7 +463,13 @@ class OpenAIProvider(Provider[ChatCompletion, ChatCompletionChunk, ChatCompletio
|
|
|
412
463
|
contents.append({"type": "text", "text": ""})
|
|
413
464
|
elif isinstance(x, ContentImageRemote):
|
|
414
465
|
contents.append(
|
|
415
|
-
{
|
|
466
|
+
{
|
|
467
|
+
"type": "image_url",
|
|
468
|
+
"image_url": {
|
|
469
|
+
"url": x.url,
|
|
470
|
+
"detail": x.detail,
|
|
471
|
+
},
|
|
472
|
+
}
|
|
416
473
|
)
|
|
417
474
|
elif isinstance(x, ContentImageInline):
|
|
418
475
|
contents.append(
|
chatlas/_provider.py
CHANGED
|
@@ -14,6 +14,7 @@ from typing import (
|
|
|
14
14
|
|
|
15
15
|
from pydantic import BaseModel
|
|
16
16
|
|
|
17
|
+
from ._content import Content
|
|
17
18
|
from ._tools import Tool
|
|
18
19
|
from ._turn import Turn
|
|
19
20
|
|
|
@@ -124,20 +125,27 @@ class Provider(
|
|
|
124
125
|
self,
|
|
125
126
|
completion: ChatCompletionDictT,
|
|
126
127
|
has_data_model: bool,
|
|
127
|
-
stream: Any,
|
|
128
128
|
) -> Turn: ...
|
|
129
129
|
|
|
130
130
|
@abstractmethod
|
|
131
|
-
|
|
131
|
+
def value_turn(
|
|
132
132
|
self,
|
|
133
|
-
completion:
|
|
133
|
+
completion: ChatCompletionT,
|
|
134
134
|
has_data_model: bool,
|
|
135
|
-
stream: Any,
|
|
136
135
|
) -> Turn: ...
|
|
137
136
|
|
|
138
137
|
@abstractmethod
|
|
139
|
-
def
|
|
138
|
+
def token_count(
|
|
140
139
|
self,
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
140
|
+
*args: Content | str,
|
|
141
|
+
tools: dict[str, Tool],
|
|
142
|
+
data_model: Optional[type[BaseModel]],
|
|
143
|
+
) -> int: ...
|
|
144
|
+
|
|
145
|
+
@abstractmethod
|
|
146
|
+
async def token_count_async(
|
|
147
|
+
self,
|
|
148
|
+
*args: Content | str,
|
|
149
|
+
tools: dict[str, Tool],
|
|
150
|
+
data_model: Optional[type[BaseModel]],
|
|
151
|
+
) -> int: ...
|
chatlas/py.typed
ADDED
|
File without changes
|
chatlas/types/__init__.py
CHANGED
|
@@ -18,12 +18,4 @@ class ChatClientArgs(TypedDict, total=False):
|
|
|
18
18
|
default_headers: Optional[Mapping[str, str]]
|
|
19
19
|
default_query: Optional[Mapping[str, object]]
|
|
20
20
|
http_client: httpx.AsyncClient
|
|
21
|
-
transport: httpx.AsyncBaseTransport
|
|
22
|
-
proxies: Union[
|
|
23
|
-
str,
|
|
24
|
-
httpx.Proxy,
|
|
25
|
-
dict[str | httpx.URL, Union[None, str, httpx.URL, httpx.Proxy]],
|
|
26
|
-
None,
|
|
27
|
-
]
|
|
28
|
-
connection_pool_limits: httpx.Limits
|
|
29
21
|
_strict_response_validation: bool
|
|
@@ -6,7 +6,6 @@
|
|
|
6
6
|
from typing import Iterable, Literal, Mapping, Optional, TypedDict, Union
|
|
7
7
|
|
|
8
8
|
import anthropic
|
|
9
|
-
import anthropic._types
|
|
10
9
|
import anthropic.types.message_param
|
|
11
10
|
import anthropic.types.text_block_param
|
|
12
11
|
import anthropic.types.tool_choice_any_param
|
|
@@ -19,7 +18,6 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
19
18
|
max_tokens: int
|
|
20
19
|
messages: Iterable[anthropic.types.message_param.MessageParam]
|
|
21
20
|
model: Union[
|
|
22
|
-
str,
|
|
23
21
|
Literal[
|
|
24
22
|
"claude-3-5-haiku-latest",
|
|
25
23
|
"claude-3-5-haiku-20241022",
|
|
@@ -33,6 +31,7 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
33
31
|
"claude-2.1",
|
|
34
32
|
"claude-2.0",
|
|
35
33
|
],
|
|
34
|
+
str,
|
|
36
35
|
]
|
|
37
36
|
stop_sequences: Union[list[str], anthropic.NotGiven]
|
|
38
37
|
stream: Union[Literal[False], Literal[True], anthropic.NotGiven]
|
|
@@ -51,7 +50,7 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
51
50
|
tools: Union[Iterable[anthropic.types.tool_param.ToolParam], anthropic.NotGiven]
|
|
52
51
|
top_k: int | anthropic.NotGiven
|
|
53
52
|
top_p: float | anthropic.NotGiven
|
|
54
|
-
extra_headers: Optional[Mapping[str, Union[str, anthropic.
|
|
53
|
+
extra_headers: Optional[Mapping[str, Union[str, anthropic.Omit]]]
|
|
55
54
|
extra_query: Optional[Mapping[str, object]]
|
|
56
55
|
extra_body: object | None
|
|
57
56
|
timeout: float | anthropic.Timeout | None | anthropic.NotGiven
|
chatlas/types/google/_client.py
CHANGED
|
@@ -3,99 +3,20 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional, TypedDict, Union
|
|
7
7
|
|
|
8
|
-
import google.
|
|
9
|
-
import google.
|
|
10
|
-
import google.
|
|
11
|
-
import google.generativeai.types.content_types
|
|
12
|
-
import google.generativeai.types.file_types
|
|
13
|
-
import google.generativeai.types.generation_types
|
|
8
|
+
import google.auth.credentials
|
|
9
|
+
import google.genai.client
|
|
10
|
+
import google.genai.types
|
|
14
11
|
|
|
15
12
|
|
|
16
13
|
class ChatClientArgs(TypedDict, total=False):
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
google.generativeai.types.content_types.FunctionLibrary,
|
|
26
|
-
Iterable[
|
|
27
|
-
Union[
|
|
28
|
-
str,
|
|
29
|
-
google.generativeai.types.content_types.Tool,
|
|
30
|
-
google.ai.generativelanguage_v1beta.types.content.Tool,
|
|
31
|
-
google.generativeai.types.content_types.ToolDict,
|
|
32
|
-
Iterable[
|
|
33
|
-
Union[
|
|
34
|
-
google.generativeai.types.content_types.FunctionDeclaration,
|
|
35
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
36
|
-
dict[str, Any],
|
|
37
|
-
Callable[..., Any],
|
|
38
|
-
]
|
|
39
|
-
],
|
|
40
|
-
google.generativeai.types.content_types.FunctionDeclaration,
|
|
41
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
42
|
-
dict[str, Any],
|
|
43
|
-
Callable[..., Any],
|
|
44
|
-
]
|
|
45
|
-
],
|
|
46
|
-
str,
|
|
47
|
-
google.generativeai.types.content_types.Tool,
|
|
48
|
-
google.ai.generativelanguage_v1beta.types.content.Tool,
|
|
49
|
-
google.generativeai.types.content_types.ToolDict,
|
|
50
|
-
Iterable[
|
|
51
|
-
Union[
|
|
52
|
-
google.generativeai.types.content_types.FunctionDeclaration,
|
|
53
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
54
|
-
dict[str, Any],
|
|
55
|
-
Callable[..., Any],
|
|
56
|
-
]
|
|
57
|
-
],
|
|
58
|
-
google.generativeai.types.content_types.FunctionDeclaration,
|
|
59
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
60
|
-
dict[str, Any],
|
|
61
|
-
Callable[..., Any],
|
|
62
|
-
None,
|
|
63
|
-
]
|
|
64
|
-
tool_config: Union[
|
|
65
|
-
google.generativeai.types.content_types.ToolConfigDict,
|
|
66
|
-
google.ai.generativelanguage_v1beta.types.content.ToolConfig,
|
|
67
|
-
None,
|
|
68
|
-
]
|
|
69
|
-
system_instruction: Union[
|
|
70
|
-
google.ai.generativelanguage_v1beta.types.content.Content,
|
|
71
|
-
google.generativeai.types.content_types.ContentDict,
|
|
72
|
-
Iterable[
|
|
73
|
-
Union[
|
|
74
|
-
google.ai.generativelanguage_v1beta.types.content.Part,
|
|
75
|
-
google.generativeai.types.content_types.PartDict,
|
|
76
|
-
google.ai.generativelanguage_v1beta.types.content.Blob,
|
|
77
|
-
google.generativeai.types.content_types.BlobDict,
|
|
78
|
-
Any,
|
|
79
|
-
str,
|
|
80
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionCall,
|
|
81
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionResponse,
|
|
82
|
-
google.generativeai.types.file_types.FileDataDict,
|
|
83
|
-
google.ai.generativelanguage_v1beta.types.content.FileData,
|
|
84
|
-
google.ai.generativelanguage_v1beta.types.file.File,
|
|
85
|
-
google.generativeai.types.file_types.File,
|
|
86
|
-
]
|
|
87
|
-
],
|
|
88
|
-
google.ai.generativelanguage_v1beta.types.content.Part,
|
|
89
|
-
google.generativeai.types.content_types.PartDict,
|
|
90
|
-
google.ai.generativelanguage_v1beta.types.content.Blob,
|
|
91
|
-
google.generativeai.types.content_types.BlobDict,
|
|
92
|
-
Any,
|
|
93
|
-
str,
|
|
94
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionCall,
|
|
95
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionResponse,
|
|
96
|
-
google.generativeai.types.file_types.FileDataDict,
|
|
97
|
-
google.ai.generativelanguage_v1beta.types.content.FileData,
|
|
98
|
-
google.ai.generativelanguage_v1beta.types.file.File,
|
|
99
|
-
google.generativeai.types.file_types.File,
|
|
100
|
-
None,
|
|
14
|
+
vertexai: Optional[bool]
|
|
15
|
+
api_key: Optional[str]
|
|
16
|
+
credentials: Optional[google.auth.credentials.Credentials]
|
|
17
|
+
project: Optional[str]
|
|
18
|
+
location: Optional[str]
|
|
19
|
+
debug_config: Optional[google.genai.client.DebugConfig]
|
|
20
|
+
http_options: Union[
|
|
21
|
+
google.genai.types.HttpOptions, google.genai.types.HttpOptionsDict, None
|
|
101
22
|
]
|
chatlas/types/google/_submit.py
CHANGED
|
@@ -3,111 +3,64 @@
|
|
|
3
3
|
# ---------------------------------------------------------
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import TypedDict, Union
|
|
7
7
|
|
|
8
|
-
import google.
|
|
9
|
-
import
|
|
10
|
-
import google.ai.generativelanguage_v1beta.types.generative_service
|
|
11
|
-
import google.generativeai.types.content_types
|
|
12
|
-
import google.generativeai.types.file_types
|
|
13
|
-
import google.generativeai.types.generation_types
|
|
14
|
-
import google.generativeai.types.helper_types
|
|
8
|
+
import google.genai.types
|
|
9
|
+
import PIL.Image
|
|
15
10
|
|
|
16
11
|
|
|
17
12
|
class SubmitInputArgs(TypedDict, total=False):
|
|
13
|
+
model: str
|
|
18
14
|
contents: Union[
|
|
19
|
-
|
|
20
|
-
google.generativeai.types.content_types.ContentDict,
|
|
21
|
-
Iterable[
|
|
15
|
+
list[
|
|
22
16
|
Union[
|
|
23
|
-
google.
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
17
|
+
google.genai.types.Content,
|
|
18
|
+
list[
|
|
19
|
+
Union[
|
|
20
|
+
google.genai.types.File,
|
|
21
|
+
google.genai.types.Part,
|
|
22
|
+
PIL.Image.Image,
|
|
23
|
+
str,
|
|
24
|
+
]
|
|
25
|
+
],
|
|
26
|
+
google.genai.types.File,
|
|
27
|
+
google.genai.types.Part,
|
|
28
|
+
PIL.Image.Image,
|
|
28
29
|
str,
|
|
29
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionCall,
|
|
30
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionResponse,
|
|
31
|
-
google.generativeai.types.file_types.FileDataDict,
|
|
32
|
-
google.ai.generativelanguage_v1beta.types.content.FileData,
|
|
33
|
-
google.ai.generativelanguage_v1beta.types.file.File,
|
|
34
|
-
google.generativeai.types.file_types.File,
|
|
35
30
|
]
|
|
36
31
|
],
|
|
37
|
-
google.
|
|
38
|
-
|
|
39
|
-
google.ai.generativelanguage_v1beta.types.content.Blob,
|
|
40
|
-
google.generativeai.types.content_types.BlobDict,
|
|
41
|
-
Any,
|
|
42
|
-
str,
|
|
43
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionCall,
|
|
44
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionResponse,
|
|
45
|
-
google.generativeai.types.file_types.FileDataDict,
|
|
46
|
-
google.ai.generativelanguage_v1beta.types.content.FileData,
|
|
47
|
-
google.ai.generativelanguage_v1beta.types.file.File,
|
|
48
|
-
google.generativeai.types.file_types.File,
|
|
49
|
-
Iterable[
|
|
32
|
+
google.genai.types.Content,
|
|
33
|
+
list[
|
|
50
34
|
Union[
|
|
51
|
-
google.
|
|
52
|
-
google.generativeai.types.content_types.ContentDict,
|
|
35
|
+
google.genai.types.File, google.genai.types.Part, PIL.Image.Image, str
|
|
53
36
|
]
|
|
54
37
|
],
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
google.generativeai.types.generation_types.GenerationConfig,
|
|
61
|
-
None,
|
|
62
|
-
]
|
|
63
|
-
stream: bool
|
|
64
|
-
tools: Union[
|
|
65
|
-
google.generativeai.types.content_types.FunctionLibrary,
|
|
66
|
-
Iterable[
|
|
38
|
+
google.genai.types.File,
|
|
39
|
+
google.genai.types.Part,
|
|
40
|
+
PIL.Image.Image,
|
|
41
|
+
str,
|
|
42
|
+
list[
|
|
67
43
|
Union[
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
google.ai.generativelanguage_v1beta.types.content.Tool,
|
|
71
|
-
google.generativeai.types.content_types.ToolDict,
|
|
72
|
-
Iterable[
|
|
44
|
+
google.genai.types.Content,
|
|
45
|
+
list[
|
|
73
46
|
Union[
|
|
74
|
-
google.
|
|
75
|
-
google.
|
|
76
|
-
|
|
77
|
-
|
|
47
|
+
google.genai.types.File,
|
|
48
|
+
google.genai.types.Part,
|
|
49
|
+
PIL.Image.Image,
|
|
50
|
+
str,
|
|
78
51
|
]
|
|
79
52
|
],
|
|
80
|
-
google.
|
|
81
|
-
google.
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
],
|
|
86
|
-
str,
|
|
87
|
-
google.generativeai.types.content_types.Tool,
|
|
88
|
-
google.ai.generativelanguage_v1beta.types.content.Tool,
|
|
89
|
-
google.generativeai.types.content_types.ToolDict,
|
|
90
|
-
Iterable[
|
|
91
|
-
Union[
|
|
92
|
-
google.generativeai.types.content_types.FunctionDeclaration,
|
|
93
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
94
|
-
dict[str, Any],
|
|
95
|
-
Callable[..., Any],
|
|
53
|
+
google.genai.types.File,
|
|
54
|
+
google.genai.types.Part,
|
|
55
|
+
PIL.Image.Image,
|
|
56
|
+
str,
|
|
57
|
+
google.genai.types.ContentDict,
|
|
96
58
|
]
|
|
97
59
|
],
|
|
98
|
-
google.
|
|
99
|
-
google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration,
|
|
100
|
-
dict[str, Any],
|
|
101
|
-
Callable[..., Any],
|
|
102
|
-
None,
|
|
103
|
-
]
|
|
104
|
-
tool_config: Union[
|
|
105
|
-
google.generativeai.types.content_types.ToolConfigDict,
|
|
106
|
-
google.ai.generativelanguage_v1beta.types.content.ToolConfig,
|
|
107
|
-
None,
|
|
60
|
+
google.genai.types.ContentDict,
|
|
108
61
|
]
|
|
109
|
-
|
|
110
|
-
google.
|
|
111
|
-
google.
|
|
62
|
+
config: Union[
|
|
63
|
+
google.genai.types.GenerateContentConfig,
|
|
64
|
+
google.genai.types.GenerateContentConfigDict,
|
|
112
65
|
None,
|
|
113
66
|
]
|
chatlas/types/openai/_client.py
CHANGED
|
@@ -14,6 +14,7 @@ class ChatClientArgs(TypedDict, total=False):
|
|
|
14
14
|
organization: str | None
|
|
15
15
|
project: str | None
|
|
16
16
|
base_url: str | httpx.URL | None
|
|
17
|
+
websocket_base_url: str | httpx.URL | None
|
|
17
18
|
timeout: Union[float, openai.Timeout, None, openai.NotGiven]
|
|
18
19
|
max_retries: int
|
|
19
20
|
default_headers: Optional[Mapping[str, str]]
|
|
@@ -17,6 +17,7 @@ class ChatAzureClientArgs(TypedDict, total=False):
|
|
|
17
17
|
organization: str | None
|
|
18
18
|
project: str | None
|
|
19
19
|
base_url: str | None
|
|
20
|
+
websocket_base_url: str | httpx.URL | None
|
|
20
21
|
timeout: float | openai.Timeout | None | openai.NotGiven
|
|
21
22
|
max_retries: int
|
|
22
23
|
default_headers: Optional[Mapping[str, str]]
|
chatlas/types/openai/_submit.py
CHANGED
|
@@ -8,6 +8,7 @@ from typing import Iterable, Literal, Mapping, Optional, TypedDict, Union
|
|
|
8
8
|
import openai
|
|
9
9
|
import openai.types.chat.chat_completion_assistant_message_param
|
|
10
10
|
import openai.types.chat.chat_completion_audio_param
|
|
11
|
+
import openai.types.chat.chat_completion_developer_message_param
|
|
11
12
|
import openai.types.chat.chat_completion_function_call_option_param
|
|
12
13
|
import openai.types.chat.chat_completion_function_message_param
|
|
13
14
|
import openai.types.chat.chat_completion_named_tool_choice_param
|
|
@@ -26,6 +27,7 @@ import openai.types.shared_params.response_format_text
|
|
|
26
27
|
class SubmitInputArgs(TypedDict, total=False):
|
|
27
28
|
messages: Iterable[
|
|
28
29
|
Union[
|
|
30
|
+
openai.types.chat.chat_completion_developer_message_param.ChatCompletionDeveloperMessageParam,
|
|
29
31
|
openai.types.chat.chat_completion_system_message_param.ChatCompletionSystemMessageParam,
|
|
30
32
|
openai.types.chat.chat_completion_user_message_param.ChatCompletionUserMessageParam,
|
|
31
33
|
openai.types.chat.chat_completion_assistant_message_param.ChatCompletionAssistantMessageParam,
|
|
@@ -36,6 +38,10 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
36
38
|
model: Union[
|
|
37
39
|
str,
|
|
38
40
|
Literal[
|
|
41
|
+
"o3-mini",
|
|
42
|
+
"o3-mini-2025-01-31",
|
|
43
|
+
"o1",
|
|
44
|
+
"o1-2024-12-17",
|
|
39
45
|
"o1-preview",
|
|
40
46
|
"o1-preview-2024-09-12",
|
|
41
47
|
"o1-mini",
|
|
@@ -44,10 +50,11 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
44
50
|
"gpt-4o-2024-11-20",
|
|
45
51
|
"gpt-4o-2024-08-06",
|
|
46
52
|
"gpt-4o-2024-05-13",
|
|
47
|
-
"gpt-4o-realtime-preview",
|
|
48
|
-
"gpt-4o-realtime-preview-2024-10-01",
|
|
49
53
|
"gpt-4o-audio-preview",
|
|
50
54
|
"gpt-4o-audio-preview-2024-10-01",
|
|
55
|
+
"gpt-4o-audio-preview-2024-12-17",
|
|
56
|
+
"gpt-4o-mini-audio-preview",
|
|
57
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
|
51
58
|
"chatgpt-4o-latest",
|
|
52
59
|
"gpt-4o-mini",
|
|
53
60
|
"gpt-4o-mini-2024-07-18",
|
|
@@ -100,6 +107,7 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
100
107
|
openai.NotGiven,
|
|
101
108
|
]
|
|
102
109
|
presence_penalty: Union[float, None, openai.NotGiven]
|
|
110
|
+
reasoning_effort: Union[Literal["low", "medium", "high"], None, openai.NotGiven]
|
|
103
111
|
response_format: Union[
|
|
104
112
|
openai.types.shared_params.response_format_text.ResponseFormatText,
|
|
105
113
|
openai.types.shared_params.response_format_json_object.ResponseFormatJSONObject,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: chatlas
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: A simple and consistent interface for chatting with LLMs
|
|
5
5
|
Project-URL: Homepage, https://posit-dev.github.io/chatlas
|
|
6
6
|
Project-URL: Documentation, https://posit-dev.github.io/chatlas
|
|
@@ -22,7 +22,7 @@ Requires-Dist: pydantic>=2.0
|
|
|
22
22
|
Requires-Dist: rich
|
|
23
23
|
Provides-Extra: dev
|
|
24
24
|
Requires-Dist: anthropic[bedrock]; extra == 'dev'
|
|
25
|
-
Requires-Dist: google-
|
|
25
|
+
Requires-Dist: google-genai>=1.2.0; extra == 'dev'
|
|
26
26
|
Requires-Dist: matplotlib; extra == 'dev'
|
|
27
27
|
Requires-Dist: numpy>1.24.4; extra == 'dev'
|
|
28
28
|
Requires-Dist: openai; extra == 'dev'
|
|
@@ -30,15 +30,18 @@ Requires-Dist: pillow; extra == 'dev'
|
|
|
30
30
|
Requires-Dist: python-dotenv; extra == 'dev'
|
|
31
31
|
Requires-Dist: ruff>=0.6.5; extra == 'dev'
|
|
32
32
|
Requires-Dist: shiny; extra == 'dev'
|
|
33
|
+
Requires-Dist: tiktoken; extra == 'dev'
|
|
33
34
|
Provides-Extra: docs
|
|
34
35
|
Requires-Dist: griffe>=1; extra == 'docs'
|
|
35
36
|
Requires-Dist: ipykernel; extra == 'docs'
|
|
36
37
|
Requires-Dist: ipywidgets; extra == 'docs'
|
|
37
38
|
Requires-Dist: nbclient; extra == 'docs'
|
|
38
39
|
Requires-Dist: nbformat; extra == 'docs'
|
|
40
|
+
Requires-Dist: numpy; extra == 'docs'
|
|
39
41
|
Requires-Dist: pandas; extra == 'docs'
|
|
40
42
|
Requires-Dist: pyyaml; extra == 'docs'
|
|
41
43
|
Requires-Dist: quartodoc>=0.7; extra == 'docs'
|
|
44
|
+
Requires-Dist: sentence-transformers; extra == 'docs'
|
|
42
45
|
Provides-Extra: test
|
|
43
46
|
Requires-Dist: pyright>=1.1.379; extra == 'test'
|
|
44
47
|
Requires-Dist: pytest-asyncio; extra == 'test'
|
|
@@ -46,14 +49,24 @@ Requires-Dist: pytest>=8.3.2; extra == 'test'
|
|
|
46
49
|
Requires-Dist: syrupy>=4; extra == 'test'
|
|
47
50
|
Description-Content-Type: text/markdown
|
|
48
51
|
|
|
49
|
-
|
|
52
|
+
<h1 class="unnumbered unlisted"> chatlas <a href="https://posit-dev.github.io/chatlas"><img src="docs/images/logo.png" align="right" height="138" alt="chatlas website" /></a> </h1>
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
<p>
|
|
57
|
+
<!-- badges start -->
|
|
58
|
+
<a href="https://pypi.org/project/chatlas/"><img alt="PyPI" src="https://img.shields.io/pypi/v/chatlas?logo=python&logoColor=white&color=orange"></a>
|
|
59
|
+
<a href="https://choosealicense.com/licenses/mit/"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="MIT License"></a>
|
|
60
|
+
<a href="https://github.com/posit-dev/chatlas"><img src="https://github.com/posit-dev/chatlas/actions/workflows/test.yml/badge.svg?branch=main" alt="Python Tests"></a>
|
|
61
|
+
<!-- badges end -->
|
|
62
|
+
</p>
|
|
50
63
|
|
|
51
64
|
chatlas provides a simple and unified interface across large language model (llm) providers in Python.
|
|
52
|
-
It
|
|
53
|
-
|
|
54
|
-
Developer experience is also a key focus of chatlas: typing support, rich console output, and
|
|
65
|
+
It helps you prototype faster by abstracting away complexity from common tasks like streaming chat interfaces, tool calling, structured output, and much more.
|
|
66
|
+
Switching providers is also as easy as changing one line of code, but you can also reach for provider-specific features when you need them.
|
|
67
|
+
Developer experience is also a key focus of chatlas: typing support, rich console output, and extension points are all included.
|
|
55
68
|
|
|
56
|
-
(Looking for something similar to chatlas, but in R? Check out [
|
|
69
|
+
(Looking for something similar to chatlas, but in R? Check out [ellmer](https://ellmer.tidyverse.org/)!)
|
|
57
70
|
|
|
58
71
|
## Install
|
|
59
72
|
|
|
@@ -85,6 +98,7 @@ It also supports the following enterprise cloud providers:
|
|
|
85
98
|
|
|
86
99
|
* AWS Bedrock: [`ChatBedrockAnthropic()`](https://posit-dev.github.io/chatlas/reference/ChatBedrockAnthropic.html).
|
|
87
100
|
* Azure OpenAI: [`ChatAzureOpenAI()`](https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html).
|
|
101
|
+
* Vertex AI: [`ChatVertex()`](https://posit-dev.github.io/chatlas/reference/ChatVertex.html).
|
|
88
102
|
|
|
89
103
|
To use a model provider that isn't listed here, you have two options:
|
|
90
104
|
|
|
@@ -123,7 +137,7 @@ From a `chat` instance, it's simple to start a web-based or terminal-based chat
|
|
|
123
137
|
chat.app()
|
|
124
138
|
```
|
|
125
139
|
|
|
126
|
-
<div
|
|
140
|
+
<div align="center">
|
|
127
141
|
<img width="500" alt="A web app for chatting with an LLM via chatlas" src="https://github.com/user-attachments/assets/e43f60cb-3686-435a-bd11-8215cb024d2e" class="border rounded">
|
|
128
142
|
</div>
|
|
129
143
|
|
|
@@ -279,7 +293,7 @@ asyncio.run(main())
|
|
|
279
293
|
|
|
280
294
|
`chatlas` has full typing support, meaning that, among other things, autocompletion just works in your favorite editor:
|
|
281
295
|
|
|
282
|
-
<div
|
|
296
|
+
<div align="center">
|
|
283
297
|
<img width="500" alt="Autocompleting model options in ChatOpenAI" src="https://github.com/user-attachments/assets/163d6d8a-7d58-422d-b3af-cc9f2adee759" class="rounded">
|
|
284
298
|
</div>
|
|
285
299
|
|
|
@@ -299,7 +313,7 @@ This shows important information like tool call results, finish reasons, and mor
|
|
|
299
313
|
If the problem isn't self-evident, you can also reach into the `.get_last_turn()`, which contains the full response object, with full details about the completion.
|
|
300
314
|
|
|
301
315
|
|
|
302
|
-
<div
|
|
316
|
+
<div align="center">
|
|
303
317
|
<img width="500" alt="Turn completion details with typing support" src="https://github.com/user-attachments/assets/eaea338d-e44a-4e23-84a7-2e998d8af3ba" class="rounded">
|
|
304
318
|
</div>
|
|
305
319
|
|