chatlas 0.12.0__py3-none-any.whl → 0.13.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- chatlas/__init__.py +10 -0
- chatlas/_batch_chat.py +211 -0
- chatlas/_batch_job.py +234 -0
- chatlas/_chat.py +171 -42
- chatlas/_content.py +2 -2
- chatlas/_provider.py +88 -0
- chatlas/_provider_anthropic.py +106 -2
- chatlas/_provider_github.py +2 -2
- chatlas/_provider_openai.py +143 -12
- chatlas/_version.py +2 -2
- {chatlas-0.12.0.dist-info → chatlas-0.13.1.dist-info}/METADATA +2 -1
- {chatlas-0.12.0.dist-info → chatlas-0.13.1.dist-info}/RECORD +14 -12
- {chatlas-0.12.0.dist-info → chatlas-0.13.1.dist-info}/WHEEL +0 -0
- {chatlas-0.12.0.dist-info → chatlas-0.13.1.dist-info}/licenses/LICENSE +0 -0
chatlas/_provider_openai.py
CHANGED
|
@@ -1,11 +1,18 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import base64
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import tempfile
|
|
8
|
+
import warnings
|
|
4
9
|
from datetime import datetime
|
|
5
10
|
from typing import TYPE_CHECKING, Any, Literal, Optional, cast, overload
|
|
6
11
|
|
|
7
12
|
import orjson
|
|
8
13
|
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
|
|
14
|
+
from openai.types.batch import Batch
|
|
15
|
+
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
|
9
16
|
from pydantic import BaseModel
|
|
10
17
|
|
|
11
18
|
from ._chat import Chat
|
|
@@ -24,18 +31,20 @@ from ._content import (
|
|
|
24
31
|
)
|
|
25
32
|
from ._logging import log_model_default
|
|
26
33
|
from ._merge import merge_dicts
|
|
27
|
-
from ._provider import
|
|
34
|
+
from ._provider import (
|
|
35
|
+
BatchStatus,
|
|
36
|
+
ModelInfo,
|
|
37
|
+
Provider,
|
|
38
|
+
StandardModelParamNames,
|
|
39
|
+
StandardModelParams,
|
|
40
|
+
)
|
|
28
41
|
from ._tokens import get_token_pricing, tokens_log
|
|
29
42
|
from ._tools import Tool, basemodel_to_param_schema
|
|
30
43
|
from ._turn import Turn, user_turn
|
|
31
44
|
from ._utils import MISSING, MISSING_TYPE, is_testing, split_http_client_kwargs
|
|
32
45
|
|
|
33
46
|
if TYPE_CHECKING:
|
|
34
|
-
from openai.types.chat import
|
|
35
|
-
ChatCompletion,
|
|
36
|
-
ChatCompletionChunk,
|
|
37
|
-
ChatCompletionMessageParam,
|
|
38
|
-
)
|
|
47
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
39
48
|
from openai.types.chat.chat_completion_assistant_message_param import (
|
|
40
49
|
ContentArrayOfContentPart,
|
|
41
50
|
)
|
|
@@ -45,10 +54,6 @@ if TYPE_CHECKING:
|
|
|
45
54
|
from openai.types.chat_model import ChatModel
|
|
46
55
|
|
|
47
56
|
from .types.openai import ChatAzureClientArgs, ChatClientArgs, SubmitInputArgs
|
|
48
|
-
else:
|
|
49
|
-
ChatCompletion = object
|
|
50
|
-
ChatCompletionChunk = object
|
|
51
|
-
|
|
52
57
|
|
|
53
58
|
# The dictionary form of ChatCompletion (TODO: stronger typing)?
|
|
54
59
|
ChatCompletionDict = dict[str, Any]
|
|
@@ -171,6 +176,21 @@ def ChatOpenAI(
|
|
|
171
176
|
)
|
|
172
177
|
|
|
173
178
|
|
|
179
|
+
# Seems there is no native typing support for `files.content()` results
|
|
180
|
+
# so mock them based on the docs here
|
|
181
|
+
# https://platform.openai.com/docs/guides/batch#5-retrieve-the-results
|
|
182
|
+
class BatchResult(BaseModel):
|
|
183
|
+
id: str
|
|
184
|
+
custom_id: str
|
|
185
|
+
response: BatchResultResponse
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class BatchResultResponse(BaseModel):
|
|
189
|
+
status_code: int
|
|
190
|
+
request_id: str
|
|
191
|
+
body: ChatCompletionDict
|
|
192
|
+
|
|
193
|
+
|
|
174
194
|
class OpenAIProvider(
|
|
175
195
|
Provider[ChatCompletion, ChatCompletionChunk, ChatCompletionDict, "SubmitInputArgs"]
|
|
176
196
|
):
|
|
@@ -353,8 +373,6 @@ class OpenAIProvider(
|
|
|
353
373
|
return merge_dicts(completion, chunkd)
|
|
354
374
|
|
|
355
375
|
def stream_turn(self, completion, has_data_model) -> Turn:
|
|
356
|
-
from openai.types.chat import ChatCompletion
|
|
357
|
-
|
|
358
376
|
delta = completion["choices"][0].pop("delta") # type: ignore
|
|
359
377
|
completion["choices"][0]["message"] = delta # type: ignore
|
|
360
378
|
completion = ChatCompletion.construct(**completion)
|
|
@@ -662,6 +680,119 @@ class OpenAIProvider(
|
|
|
662
680
|
"stop_sequences",
|
|
663
681
|
}
|
|
664
682
|
|
|
683
|
+
def has_batch_support(self) -> bool:
|
|
684
|
+
return True
|
|
685
|
+
|
|
686
|
+
def batch_submit(
|
|
687
|
+
self,
|
|
688
|
+
conversations: list[list[Turn]],
|
|
689
|
+
data_model: Optional[type[BaseModel]] = None,
|
|
690
|
+
):
|
|
691
|
+
# First put the requests in a file
|
|
692
|
+
# https://platform.openai.com/docs/api-reference/batch/request-input
|
|
693
|
+
# https://platform.openai.com/docs/api-reference/batch
|
|
694
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as f:
|
|
695
|
+
temp_path = f.name
|
|
696
|
+
|
|
697
|
+
for i, turns in enumerate(conversations):
|
|
698
|
+
kwargs = self._chat_perform_args(
|
|
699
|
+
stream=False,
|
|
700
|
+
turns=turns,
|
|
701
|
+
tools={},
|
|
702
|
+
data_model=data_model,
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
body = {
|
|
706
|
+
"messages": kwargs.get("messages", []),
|
|
707
|
+
"model": self.model,
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
if "response_format" in kwargs:
|
|
711
|
+
body["response_format"] = kwargs["response_format"]
|
|
712
|
+
|
|
713
|
+
request = {
|
|
714
|
+
"custom_id": f"request-{i}",
|
|
715
|
+
"method": "POST",
|
|
716
|
+
"url": "/v1/chat/completions",
|
|
717
|
+
"body": body,
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
f.write(orjson.dumps(request).decode() + "\n")
|
|
721
|
+
|
|
722
|
+
try:
|
|
723
|
+
with open(temp_path, "rb") as f:
|
|
724
|
+
file_response = self._client.files.create(file=f, purpose="batch")
|
|
725
|
+
|
|
726
|
+
batch = self._client.batches.create(
|
|
727
|
+
input_file_id=file_response.id,
|
|
728
|
+
endpoint="/v1/chat/completions",
|
|
729
|
+
completion_window="24h",
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
return batch.model_dump()
|
|
733
|
+
finally:
|
|
734
|
+
os.unlink(temp_path)
|
|
735
|
+
|
|
736
|
+
def batch_poll(self, batch):
|
|
737
|
+
batch = Batch.model_validate(batch)
|
|
738
|
+
b = self._client.batches.retrieve(batch.id)
|
|
739
|
+
return b.model_dump()
|
|
740
|
+
|
|
741
|
+
def batch_status(self, batch):
|
|
742
|
+
batch = Batch.model_validate(batch)
|
|
743
|
+
counts = batch.request_counts
|
|
744
|
+
total, completed, failed = 0, 0, 0
|
|
745
|
+
if counts is not None:
|
|
746
|
+
total = counts.total
|
|
747
|
+
completed = counts.completed
|
|
748
|
+
failed = counts.failed
|
|
749
|
+
|
|
750
|
+
return BatchStatus(
|
|
751
|
+
working=batch.status not in ["completed", "failed", "cancelled"],
|
|
752
|
+
n_processing=total - completed - failed,
|
|
753
|
+
n_succeeded=completed,
|
|
754
|
+
n_failed=failed,
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
def batch_retrieve(self, batch):
|
|
758
|
+
batch = Batch.model_validate(batch)
|
|
759
|
+
if batch.output_file_id is None:
|
|
760
|
+
raise ValueError("Batch has no output file")
|
|
761
|
+
|
|
762
|
+
# Download and parse JSONL results
|
|
763
|
+
response = self._client.files.content(batch.output_file_id)
|
|
764
|
+
results: list[dict[str, Any]] = []
|
|
765
|
+
for line in response.text.splitlines():
|
|
766
|
+
results.append(json.loads(line))
|
|
767
|
+
|
|
768
|
+
# Sort by custom_id to maintain order
|
|
769
|
+
def extract_id(x: str):
|
|
770
|
+
match = re.search(r"-(\d+)$", x)
|
|
771
|
+
return int(match.group(1)) if match else 0
|
|
772
|
+
|
|
773
|
+
results.sort(key=lambda x: int(extract_id(x.get("custom_id", ""))))
|
|
774
|
+
|
|
775
|
+
return results
|
|
776
|
+
|
|
777
|
+
def batch_result_turn(
|
|
778
|
+
self,
|
|
779
|
+
result,
|
|
780
|
+
has_data_model: bool = False,
|
|
781
|
+
) -> Turn | None:
|
|
782
|
+
response = BatchResult.model_validate(result).response
|
|
783
|
+
if response.status_code != 200:
|
|
784
|
+
# TODO: offer advice on what to do?
|
|
785
|
+
warnings.warn(f"Batch request failed: {response.body}")
|
|
786
|
+
return None
|
|
787
|
+
|
|
788
|
+
completion = ChatCompletion.construct(**response.body)
|
|
789
|
+
return self._as_turn(completion, has_data_model)
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
# -------------------------------------------------------------------------------------
|
|
793
|
+
# Azure OpenAI Chat
|
|
794
|
+
# -------------------------------------------------------------------------------------
|
|
795
|
+
|
|
665
796
|
|
|
666
797
|
def ChatAzureOpenAI(
|
|
667
798
|
*,
|
chatlas/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0,
|
|
31
|
+
__version__ = version = '0.13.1'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 13, 1)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chatlas
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.13.1
|
|
4
4
|
Summary: A simple and consistent interface for chatting with LLMs
|
|
5
5
|
Project-URL: Homepage, https://posit-dev.github.io/chatlas
|
|
6
6
|
Project-URL: Documentation, https://posit-dev.github.io/chatlas
|
|
@@ -44,6 +44,7 @@ Requires-Dist: pillow; extra == 'dev'
|
|
|
44
44
|
Requires-Dist: python-dotenv; extra == 'dev'
|
|
45
45
|
Requires-Dist: ruff>=0.6.5; extra == 'dev'
|
|
46
46
|
Requires-Dist: shiny; extra == 'dev'
|
|
47
|
+
Requires-Dist: shinychat; extra == 'dev'
|
|
47
48
|
Requires-Dist: snowflake-ml-python>=1.8.4; extra == 'dev'
|
|
48
49
|
Requires-Dist: tenacity; extra == 'dev'
|
|
49
50
|
Requires-Dist: tiktoken; extra == 'dev'
|
|
@@ -1,8 +1,10 @@
|
|
|
1
|
-
chatlas/__init__.py,sha256=
|
|
1
|
+
chatlas/__init__.py,sha256=M3zK10LguXW6bybDqatRV9SXd0y6axYu5QzENjbOVd0,2633
|
|
2
2
|
chatlas/_auto.py,sha256=aeMN2_EM-xK-Yx5JaCuwYRZZ29eqn_0oM7QR5zayrec,8912
|
|
3
|
+
chatlas/_batch_chat.py,sha256=1KkHENB-l7VmhCizhdvbJO5WQmRntQS6EvcSJ6VLgvM,5546
|
|
4
|
+
chatlas/_batch_job.py,sha256=2__JIOo_JpcQyAAzO07r6eS4urpAxEc9m7_zsjFieQw,7359
|
|
3
5
|
chatlas/_callbacks.py,sha256=3RpPaOQonTqScjXbaShgKJ1Rc-YxzWerxKRBjVssFnc,1838
|
|
4
|
-
chatlas/_chat.py,sha256=
|
|
5
|
-
chatlas/_content.py,sha256=
|
|
6
|
+
chatlas/_chat.py,sha256=oESXNVzDCJ1DpV_fBRgwK6N_fs_EoJGrlez5dJjqx5c,90664
|
|
7
|
+
chatlas/_content.py,sha256=BdJQ5G5onT9Cf1tNFeXsCWWTD2zSIjWz50FYIk6_DDI,22767
|
|
6
8
|
chatlas/_content_image.py,sha256=EUK6wAint-JatLsiwvaPDu4D3W-NcIsDCkzABkXgfDg,8304
|
|
7
9
|
chatlas/_content_pdf.py,sha256=cffeuJxzhUDukQ-Srkmpy62M8X12skYpU_FVq-Wvya4,2420
|
|
8
10
|
chatlas/_display.py,sha256=wyQzSc6z1VqrJfkTLkw1wQcti9s1Pr4qT8UxFJESn4U,4664
|
|
@@ -11,18 +13,18 @@ chatlas/_live_render.py,sha256=UMZltE35LxziDKPMEeDwQ9meZ95SeqwhJi7j-y9pcro,4004
|
|
|
11
13
|
chatlas/_logging.py,sha256=weKvXZDIZ88X7X61ruXM_S0AAhQ5mgiW9dR-km8x7Mg,3324
|
|
12
14
|
chatlas/_mcp_manager.py,sha256=smMXeKZzP90MrlCdnTHMyo7AWHwl7J2jkU8dKSlnEsQ,10237
|
|
13
15
|
chatlas/_merge.py,sha256=SGj_BetgA7gaOqSBKOhYmW3CYeQKTEehFrXvx3y4OYE,3924
|
|
14
|
-
chatlas/_provider.py,sha256
|
|
15
|
-
chatlas/_provider_anthropic.py,sha256=
|
|
16
|
+
chatlas/_provider.py,sha256=-5Oyq8tehHJtbBWQUyFUvdqTqZNUcOq2pO5qfAw5oQo,9057
|
|
17
|
+
chatlas/_provider_anthropic.py,sha256=sPPEaDObGuY7JDqU533wlUDA-HaX3sumYWaD3kdG4nE,30964
|
|
16
18
|
chatlas/_provider_cloudflare.py,sha256=vFbqgQPmosopJa9qsVxTkjPn4vYC_wOlgqa6_QmwTho,5227
|
|
17
19
|
chatlas/_provider_databricks.py,sha256=JIOTm0HMe0qVAt8eS0WgGKugBwBdmL80JHLFH59ongU,4850
|
|
18
20
|
chatlas/_provider_deepseek.py,sha256=6nPtPSo-Po6sD4i8PZJHuI5T2oATpLi5djXFGdlserk,4906
|
|
19
|
-
chatlas/_provider_github.py,sha256=
|
|
21
|
+
chatlas/_provider_github.py,sha256=5h7xwgUB0UdFpqr3UmUuFcC9hfn6kJvZDab0uAURIek,5642
|
|
20
22
|
chatlas/_provider_google.py,sha256=0vAFSSk8SVBEtaDmy7Tl0XlHDqR4qwxWJR8HeSgh79E,21060
|
|
21
23
|
chatlas/_provider_groq.py,sha256=XB2JDyuF95CcSbNkgk7JHcuy9KCW7hxTVaONDSjK8U8,3671
|
|
22
24
|
chatlas/_provider_huggingface.py,sha256=feJ416X0UdtyoeHZbkgolFf62D7zxNwM7i_X3NYsQQw,4669
|
|
23
25
|
chatlas/_provider_mistral.py,sha256=-p4rut0KCn-PrwnOlvr6lK8-K-OXvc5H9vTX-rCzUkk,5309
|
|
24
26
|
chatlas/_provider_ollama.py,sha256=jFAu4v0NLUwdG_W_nKagBHOah0VKl_auTsgcYinP9rI,4119
|
|
25
|
-
chatlas/_provider_openai.py,sha256=
|
|
27
|
+
chatlas/_provider_openai.py,sha256=f8ijdXMMHy17VcuA2ImMpaYvaiKjHzcnTdR8LH1xE40,30654
|
|
26
28
|
chatlas/_provider_openrouter.py,sha256=9sCXvROVIiUdwfEbkVA-15_kc6ouFUP2uV2MmUe2rFk,4385
|
|
27
29
|
chatlas/_provider_perplexity.py,sha256=5q_LsUCJQ5w-jRveLDMPvZTX-GU2TVURp65mUMyDh10,4248
|
|
28
30
|
chatlas/_provider_portkey.py,sha256=6wKrLZmKVxOqyO6P3HBgWqPe7y1N8une_1wp0aJq7pU,4087
|
|
@@ -33,7 +35,7 @@ chatlas/_tools.py,sha256=8rhGOsEviBJXk5Qb-a1RRb_C-DE2T3DOeN6IhblkxqI,12408
|
|
|
33
35
|
chatlas/_turn.py,sha256=yK7alUxeP8d2iBc7amyz20BtEqcpvX6BCwWZsnlQ5R4,4515
|
|
34
36
|
chatlas/_typing_extensions.py,sha256=BXmbhywjm5ssmyVLGwyP_5TWZMAobzrrgZLYkB6_etE,1076
|
|
35
37
|
chatlas/_utils.py,sha256=Kku2fa1mvTYCr5D28VxE6-fwfy2e2doCi-eKQkLEg4Y,4686
|
|
36
|
-
chatlas/_version.py,sha256=
|
|
38
|
+
chatlas/_version.py,sha256=RBWKvLuPH5uJN_RJXTOaV5SvvBYz1PUZvU4D8m3gAvo,706
|
|
37
39
|
chatlas/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
40
|
chatlas/data/prices.json,sha256=X6qALp-dWc4nfus9lIqHoKzk3PZDPHTLoxxcN2m6fXc,62645
|
|
39
41
|
chatlas/types/__init__.py,sha256=1n0xrJ7TRIKsZ2z06FLFgGqfKMFtXSIxxPvJ2j0hvPw,850
|
|
@@ -48,7 +50,7 @@ chatlas/types/openai/__init__.py,sha256=Q2RAr1bSH1nHsxICK05nAmKmxdhKmhbBkWD_XHiV
|
|
|
48
50
|
chatlas/types/openai/_client.py,sha256=mAoQftcJIp0ssIhS8q3TIW9u6zTRNtYDmpZJO8L0mC0,849
|
|
49
51
|
chatlas/types/openai/_client_azure.py,sha256=Tf_PFRl0QAj4Nk5CD0ZNIO-SRsT39bVkEJlUTry1fb8,960
|
|
50
52
|
chatlas/types/openai/_submit.py,sha256=EDtIUFcNIJ5QAt0wVyBXvUshK8FA9e86wcZDQ_HUOYs,7829
|
|
51
|
-
chatlas-0.
|
|
52
|
-
chatlas-0.
|
|
53
|
-
chatlas-0.
|
|
54
|
-
chatlas-0.
|
|
53
|
+
chatlas-0.13.1.dist-info/METADATA,sha256=gG-XEXaUc9LFlqoyqHa5nNzH_Dz-QHeL6HqxECWHAgM,5635
|
|
54
|
+
chatlas-0.13.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
55
|
+
chatlas-0.13.1.dist-info/licenses/LICENSE,sha256=zyuGzPOC7CcbOaBHsQ3UEyKYRO56KDUkor0OA4LqqDg,1081
|
|
56
|
+
chatlas-0.13.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|