chatlas 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/__init__.py CHANGED
@@ -1,5 +1,11 @@
1
1
  from . import types
2
2
  from ._auto import ChatAuto
3
+ from ._batch_chat import (
4
+ batch_chat,
5
+ batch_chat_completed,
6
+ batch_chat_structured,
7
+ batch_chat_text,
8
+ )
3
9
  from ._chat import Chat
4
10
  from ._content import (
5
11
  ContentToolRequest,
@@ -36,6 +42,10 @@ except ImportError: # pragma: no cover
36
42
  __version__ = "0.0.0" # stub value for docs
37
43
 
38
44
  __all__ = (
45
+ "batch_chat",
46
+ "batch_chat_completed",
47
+ "batch_chat_structured",
48
+ "batch_chat_text",
39
49
  "ChatAnthropic",
40
50
  "ChatAuto",
41
51
  "ChatBedrockAnthropic",
chatlas/_batch_chat.py ADDED
@@ -0,0 +1,211 @@
1
+ """
2
+ Batch chat processing for submitting multiple requests simultaneously.
3
+
4
+ This module provides functionality for submitting multiple chat requests
5
+ in batches to providers that support it (currently OpenAI and Anthropic).
6
+ Batch processing can take up to 24 hours but offers significant cost savings
7
+ (up to 50% less than regular requests).
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import copy
13
+ from pathlib import Path
14
+ from typing import TypeVar, Union
15
+
16
+ from pydantic import BaseModel
17
+
18
+ from ._batch_job import BatchJob, ContentT
19
+ from ._chat import Chat
20
+
21
+ ChatT = TypeVar("ChatT", bound=Chat)
22
+ BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
23
+
24
+
25
+ def batch_chat(
26
+ chat: ChatT,
27
+ prompts: list[ContentT] | list[list[ContentT]],
28
+ path: Union[str, Path],
29
+ wait: bool = True,
30
+ ) -> list[ChatT | None]:
31
+ """
32
+ Submit multiple chat requests in a batch.
33
+
34
+ This function allows you to submit multiple chat requests simultaneously
35
+ using provider batch APIs (currently OpenAI and Anthropic). Batch processing
36
+ can take up to 24 hours but offers significant cost savings.
37
+
38
+ Parameters
39
+ ----------
40
+ chat
41
+ Chat instance to use for the batch
42
+ prompts
43
+ List of prompts to process. Each can be a string or list of strings.
44
+ path
45
+ Path to file (with .json extension) to store batch state
46
+ wait
47
+ If True, wait for batch to complete. If False, return None if incomplete.
48
+
49
+ Returns
50
+ -------
51
+ List of Chat objects (one per prompt) if complete, None if wait=False and incomplete.
52
+ Individual Chat objects may be None if their request failed.
53
+
54
+ Example
55
+ -------
56
+
57
+ ```python
58
+ from chatlas import ChatOpenAI
59
+
60
+ chat = ChatOpenAI()
61
+ prompts = [
62
+ "What's the capital of France?",
63
+ "What's the capital of Germany?",
64
+ "What's the capital of Italy?",
65
+ ]
66
+
67
+ chats = batch_chat(chat, prompts, "capitals.json")
68
+ for i, result_chat in enumerate(chats):
69
+ if result_chat:
70
+ print(f"Prompt {i + 1}: {result_chat.get_last_turn().text}")
71
+ ```
72
+ """
73
+ job = BatchJob(chat, prompts, path, wait=wait)
74
+ job.step_until_done()
75
+
76
+ chats = []
77
+ assistant_turns = job.result_turns()
78
+ for user, assistant in zip(job.user_turns, assistant_turns):
79
+ if assistant is not None:
80
+ new_chat = copy.deepcopy(chat)
81
+ new_chat.add_turn(user)
82
+ new_chat.add_turn(assistant)
83
+ chats.append(new_chat)
84
+ else:
85
+ chats.append(None)
86
+
87
+ return chats
88
+
89
+
90
+ def batch_chat_text(
91
+ chat: Chat,
92
+ prompts: list[ContentT] | list[list[ContentT]],
93
+ path: Union[str, Path],
94
+ wait: bool = True,
95
+ ) -> list[str | None]:
96
+ """
97
+ Submit multiple chat requests in a batch and return text responses.
98
+
99
+ This is a convenience function that returns just the text of the responses
100
+ rather than full Chat objects.
101
+
102
+ Parameters
103
+ ----------
104
+ chat
105
+ Chat instance to use for the batch
106
+ prompts
107
+ List of prompts to process
108
+ path
109
+ Path to file (with .json extension) to store batch state
110
+ wait
111
+ If True, wait for batch to complete
112
+
113
+ Return
114
+ ------
115
+ List of text responses (or None for failed requests)
116
+ """
117
+ chats = batch_chat(chat, prompts, path, wait=wait)
118
+
119
+ texts = []
120
+ for x in chats:
121
+ if x is None:
122
+ texts.append(None)
123
+ continue
124
+ last_turn = x.get_last_turn()
125
+ if last_turn is None:
126
+ texts.append(None)
127
+ continue
128
+ texts.append(last_turn.text)
129
+
130
+ return texts
131
+
132
+
133
+ def batch_chat_structured(
134
+ chat: Chat,
135
+ prompts: list[ContentT] | list[list[ContentT]],
136
+ path: Union[str, Path],
137
+ data_model: type[BaseModelT],
138
+ wait: bool = True,
139
+ ) -> list[BaseModelT | None]:
140
+ """
141
+ Submit multiple structured data requests in a batch.
142
+
143
+ Parameters
144
+ ----------
145
+ chat
146
+ Chat instance to use for the batch
147
+ prompts
148
+ List of prompts to process
149
+ path
150
+ Path to file (with .json extension) to store batch state
151
+ data_model
152
+ Pydantic model class for structured responses
153
+ wait
154
+ If True, wait for batch to complete
155
+
156
+ Return
157
+ ------
158
+ List of structured data objects (or None for failed requests)
159
+ """
160
+ job = BatchJob(chat, prompts, path, data_model=data_model, wait=wait)
161
+ result = job.step_until_done()
162
+
163
+ if result is None:
164
+ return []
165
+
166
+ res: list[BaseModelT | None] = []
167
+ assistant_turns = job.result_turns()
168
+ for turn in assistant_turns:
169
+ if turn is None:
170
+ res.append(None)
171
+ else:
172
+ json = chat._extract_turn_json(turn)
173
+ model = data_model.model_validate(json)
174
+ res.append(model)
175
+
176
+ return res
177
+
178
+
179
+ def batch_chat_completed(
180
+ chat: Chat,
181
+ prompts: list[ContentT] | list[list[ContentT]],
182
+ path: Union[str, Path],
183
+ ) -> bool:
184
+ """
185
+ Check if a batch job is completed without waiting.
186
+
187
+ Parameters
188
+ ----------
189
+ chat
190
+ Chat instance used for the batch
191
+ prompts
192
+ List of prompts used for the batch
193
+ path
194
+ Path to batch state file
195
+
196
+ Returns
197
+ -------
198
+ True if batch is complete, False otherwise
199
+ """
200
+ job = BatchJob(chat, prompts, path, wait=False)
201
+ stage = job.stage
202
+
203
+ if stage == "submitting":
204
+ return False
205
+ elif stage == "waiting":
206
+ status = job._poll()
207
+ return not status.working
208
+ elif stage == "retrieving" or stage == "done":
209
+ return True
210
+ else:
211
+ raise ValueError(f"Unknown batch stage: {stage}")
chatlas/_batch_job.py ADDED
@@ -0,0 +1,234 @@
1
+ from __future__ import annotations
2
+
3
+ import hashlib
4
+ import json
5
+ import time
6
+ from datetime import timedelta
7
+ from pathlib import Path
8
+ from typing import Any, Literal, Optional, TypeVar, Union
9
+
10
+ from pydantic import BaseModel
11
+ from rich.console import Console
12
+ from rich.progress import Progress, SpinnerColumn, TextColumn
13
+
14
+ from ._chat import Chat
15
+ from ._content import Content
16
+ from ._provider import BatchStatus
17
+ from ._turn import Turn, user_turn
18
+ from ._typing_extensions import TypedDict
19
+
20
+ BatchStage = Literal["submitting", "waiting", "retrieving", "done"]
21
+
22
+
23
+ class BatchStateHash(TypedDict):
24
+ provider: str
25
+ model: str
26
+ prompts: str
27
+ user_turns: str
28
+
29
+
30
+ class BatchState(BaseModel):
31
+ version: int
32
+ stage: BatchStage
33
+ batch: dict[str, Any]
34
+ results: list[dict[str, Any]]
35
+ started_at: int
36
+ hash: BatchStateHash
37
+
38
+
39
+ ContentT = TypeVar("ContentT", bound=Union[str, Content])
40
+
41
+
42
+ class BatchJob:
43
+ """
44
+ Manages the lifecycle of a batch processing job.
45
+
46
+ A batch job goes through several stages:
47
+ 1. "submitting" - Initial submission to the provider
48
+ 2. "waiting" - Waiting for processing to complete
49
+ 3. "retrieving" - Downloading results
50
+ 4. "done" - Processing complete
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ chat: Chat,
56
+ prompts: list[ContentT] | list[list[ContentT]],
57
+ path: Union[str, Path],
58
+ data_model: Optional[type[BaseModel]] = None,
59
+ wait: bool = True,
60
+ ):
61
+ if not chat.provider.has_batch_support():
62
+ raise ValueError("Batch requests are not supported by this provider")
63
+
64
+ self.chat = chat
65
+ self.prompts = prompts
66
+ self.path = Path(path)
67
+ self.data_model = data_model
68
+ self.should_wait = wait
69
+
70
+ # Convert prompts to user turns
71
+ self.user_turns: list[Turn] = []
72
+ for prompt in prompts:
73
+ if not isinstance(prompt, (str, Content)):
74
+ turn = user_turn(*prompt)
75
+ else:
76
+ turn = user_turn(prompt)
77
+ self.user_turns.append(turn)
78
+
79
+ # Job state management
80
+ self.provider = chat.provider
81
+ self.stage: BatchStage = "submitting"
82
+ self.batch: dict[str, Any] = {}
83
+ self.results: list[dict[str, Any]] = []
84
+
85
+ # Load existing state if file exists and is not empty
86
+ if self.path.exists() and self.path.stat().st_size > 0:
87
+ self._load_state()
88
+ else:
89
+ self.started_at = time.time()
90
+
91
+ def _load_state(self) -> None:
92
+ with open(self.path, "r") as f:
93
+ state = BatchState.model_validate_json(f.read())
94
+
95
+ self.stage = state.stage
96
+ self.batch = state.batch
97
+ self.results = state.results
98
+ self.started_at = state.started_at
99
+
100
+ # Verify hash to ensure consistency
101
+ stored_hash = state.hash
102
+ current_hash = self._compute_hash()
103
+
104
+ for key, value in current_hash.items():
105
+ if stored_hash.get(key) != value:
106
+ raise ValueError(
107
+ f"Batch state mismatch: {key} doesn't match stored value. "
108
+ f"Do you need to pick a different path?"
109
+ )
110
+
111
+ def _save_state(self) -> None:
112
+ state = BatchState(
113
+ version=1,
114
+ stage=self.stage,
115
+ batch=self.batch,
116
+ results=self.results,
117
+ started_at=int(self.started_at) if self.started_at else 0,
118
+ hash=self._compute_hash(),
119
+ )
120
+
121
+ with open(self.path, "w") as f:
122
+ f.write(state.model_dump_json(indent=2))
123
+
124
+ def _compute_hash(self) -> BatchStateHash:
125
+ turns = self.chat.get_turns(include_system_prompt=True)
126
+ return {
127
+ "provider": self.provider.name,
128
+ "model": self.provider.model,
129
+ "prompts": self._hash([str(p) for p in self.prompts]),
130
+ "user_turns": self._hash([str(turn) for turn in turns]),
131
+ }
132
+
133
+ @staticmethod
134
+ def _hash(x: Any) -> str:
135
+ return hashlib.md5(json.dumps(x, sort_keys=True).encode()).hexdigest()
136
+
137
+ def step(self) -> bool:
138
+ if self.stage == "submitting":
139
+ return self._submit()
140
+ elif self.stage == "waiting":
141
+ return self._wait()
142
+ elif self.stage == "retrieving":
143
+ return self._retrieve()
144
+ else:
145
+ raise ValueError(f"Unknown stage: {self.stage}")
146
+
147
+ def step_until_done(self) -> Optional["BatchJob"]:
148
+ while self.stage != "done":
149
+ if not self.step():
150
+ return None
151
+ return self
152
+
153
+ def _submit(self) -> bool:
154
+ existing_turns = self.chat.get_turns(include_system_prompt=True)
155
+
156
+ conversations = []
157
+ for turn in self.user_turns:
158
+ conversation = existing_turns + [turn]
159
+ conversations.append(conversation)
160
+
161
+ self.batch = self.provider.batch_submit(conversations, self.data_model)
162
+ self.stage = "waiting"
163
+ self._save_state()
164
+ return True
165
+
166
+ def _wait(self) -> bool:
167
+ # Always poll once, even when wait=False
168
+ status = self._poll()
169
+
170
+ if self.should_wait:
171
+ console = Console()
172
+
173
+ with Progress(
174
+ SpinnerColumn(),
175
+ TextColumn("Processing..."),
176
+ TextColumn("[{task.fields[elapsed]}]"),
177
+ TextColumn("{task.fields[n_processing]} pending |"),
178
+ TextColumn("[green]{task.fields[n_succeeded]}[/green] done |"),
179
+ TextColumn("[red]{task.fields[n_failed]}[/red] failed"),
180
+ console=console,
181
+ ) as progress:
182
+ task = progress.add_task(
183
+ "processing",
184
+ elapsed=self._elapsed(),
185
+ n_processing=status.n_processing,
186
+ n_succeeded=status.n_succeeded,
187
+ n_failed=status.n_failed,
188
+ )
189
+
190
+ while status.working:
191
+ time.sleep(0.5)
192
+ status = self._poll()
193
+ progress.update(
194
+ task,
195
+ elapsed=self._elapsed(),
196
+ n_processing=status.n_processing,
197
+ n_succeeded=status.n_succeeded,
198
+ n_failed=status.n_failed,
199
+ )
200
+
201
+ if not status.working:
202
+ self.stage = "retrieving"
203
+ self._save_state()
204
+ return True
205
+ else:
206
+ return False
207
+
208
+ def _poll(self) -> "BatchStatus":
209
+ if not self.batch:
210
+ raise ValueError("No batch to poll")
211
+ self.batch = self.provider.batch_poll(self.batch)
212
+ self._save_state()
213
+ return self.provider.batch_status(self.batch)
214
+
215
+ def _elapsed(self) -> str:
216
+ return str(timedelta(seconds=int(time.time()) - int(self.started_at)))
217
+
218
+ def _retrieve(self) -> bool:
219
+ if not self.batch:
220
+ raise ValueError("No batch to retrieve")
221
+ self.results = self.provider.batch_retrieve(self.batch)
222
+ self.stage = "done"
223
+ self._save_state()
224
+ return True
225
+
226
+ def result_turns(self) -> list[Turn | None]:
227
+ turns = []
228
+ for result in self.results:
229
+ turn = self.provider.batch_result_turn(
230
+ result, has_data_model=self.data_model is not None
231
+ )
232
+ turns.append(turn)
233
+
234
+ return turns
chatlas/_chat.py CHANGED
@@ -78,6 +78,7 @@ CompletionT = TypeVar("CompletionT")
78
78
  EchoOptions = Literal["output", "all", "none", "text"]
79
79
 
80
80
  T = TypeVar("T")
81
+ BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
81
82
 
82
83
 
83
84
  def is_present(value: T | None | MISSING_TYPE) -> TypeGuard[T]:
@@ -209,6 +210,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
209
210
  self,
210
211
  *,
211
212
  include_system_prompt: bool = False,
213
+ tool_result_role: Literal["assistant", "user"] = "user",
212
214
  ) -> list[Turn[CompletionT]]:
213
215
  """
214
216
  Get all the turns (i.e., message contents) in the chat.
@@ -217,14 +219,50 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
217
219
  ----------
218
220
  include_system_prompt
219
221
  Whether to include the system prompt in the turns.
222
+ tool_result_role
223
+ The role to assign to turns containing tool results. By default,
224
+ tool results are assigned a role of "user" since they represent
225
+ information provided to the assistant. If set to "assistant" tool
226
+ result content (plus the surrounding assistant turn contents) is
227
+ collected into a single assistant turn. This is convenient for
228
+ display purposes and more generally if you want the tool calling
229
+ loop to be contained in a single turn.
220
230
  """
221
231
 
222
232
  if not self._turns:
223
233
  return self._turns
224
234
 
225
235
  if not include_system_prompt and self._turns[0].role == "system":
226
- return self._turns[1:]
227
- return self._turns
236
+ turns = self._turns[1:]
237
+ else:
238
+ turns = self._turns
239
+
240
+ if tool_result_role == "user":
241
+ return turns
242
+
243
+ if tool_result_role != "assistant":
244
+ raise ValueError(
245
+ f"Expected `tool_result_role` to be one of 'user' or 'assistant', not '{tool_result_role}'"
246
+ )
247
+
248
+ # If a turn is purely a tool result, change its role
249
+ turns2 = copy.deepcopy(turns)
250
+ for turn in turns2:
251
+ if all(isinstance(c, ContentToolResult) for c in turn.contents):
252
+ turn.role = tool_result_role
253
+
254
+ # If two consecutive turns have the same role (i.e., assistant), collapse them into one
255
+ final_turns: list[Turn[CompletionT]] = []
256
+ for x in turns2:
257
+ if not final_turns:
258
+ final_turns.append(x)
259
+ continue
260
+ if x.role != final_turns[-1].role:
261
+ final_turns.append(x)
262
+ else:
263
+ final_turns[-1].contents.extend(x.contents)
264
+
265
+ return final_turns
228
266
 
229
267
  def get_last_turn(
230
268
  self,
@@ -531,7 +569,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
531
569
  args
532
570
  The input to get a token count for.
533
571
  data_model
534
- If the input is meant for data extraction (i.e., `.extract_data()`), then
572
+ If the input is meant for data extraction (i.e., `.chat_structured()`), then
535
573
  this should be the Pydantic model that describes the structure of the data to
536
574
  extract.
537
575
 
@@ -585,7 +623,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
585
623
  args
586
624
  The input to get a token count for.
587
625
  data_model
588
- If this input is meant for data extraction (i.e., `.extract_data_async()`),
626
+ If this input is meant for data extraction (i.e., `.chat_structured_async()`),
589
627
  then this should be the Pydantic model that describes the structure of the data
590
628
  to extract.
591
629
 
@@ -608,6 +646,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
608
646
  port: int = 0,
609
647
  host: str = "127.0.0.1",
610
648
  launch_browser: bool = True,
649
+ bookmark_store: Literal["url", "server", "disable"] = "url",
611
650
  bg_thread: Optional[bool] = None,
612
651
  echo: Optional[EchoOptions] = None,
613
652
  content: Literal["text", "all"] = "all",
@@ -626,6 +665,12 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
626
665
  The host to run the app on (the default is "127.0.0.1").
627
666
  launch_browser
628
667
  Whether to launch a browser window.
668
+ bookmark_store
669
+ One of the following (default is "url"):
670
+ - `"url"`: Store bookmarks in the URL (default).
671
+ - `"server"`: Store bookmarks on the server (requires a server-side
672
+ storage backend).
673
+ - `"disable"`: Disable bookmarking.
629
674
  bg_thread
630
675
  Whether to run the app in a background thread. If `None`, the app will
631
676
  run in a background thread if the current environment is a notebook.
@@ -647,24 +692,37 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
647
692
  from shiny import App, run_app, ui
648
693
  except ImportError:
649
694
  raise ImportError(
650
- "The `shiny` package is required for the `browser` method. "
695
+ "The `shiny` package is required for the `app()` method. "
651
696
  "Install it with `pip install shiny`."
652
697
  )
653
698
 
654
- app_ui = ui.page_fillable(
655
- ui.chat_ui("chat"),
656
- fillable_mobile=True,
657
- )
699
+ try:
700
+ from shinychat import (
701
+ Chat,
702
+ chat_ui,
703
+ message_content, # pyright: ignore[reportAttributeAccessIssue]
704
+ )
705
+ except ImportError:
706
+ raise ImportError(
707
+ "The `shinychat` package is required for the `app()` method. "
708
+ "Install it with `pip install shinychat`."
709
+ )
658
710
 
659
- def server(input): # noqa: A002
660
- chat = ui.Chat(
661
- "chat",
662
- messages=[
663
- {"role": turn.role, "content": turn.text}
664
- for turn in self.get_turns()
665
- ],
711
+ messages = [
712
+ message_content(x) for x in self.get_turns(tool_result_role="assistant")
713
+ ]
714
+
715
+ def app_ui(x):
716
+ return ui.page_fillable(
717
+ chat_ui("chat", messages=messages),
718
+ fillable_mobile=True,
666
719
  )
667
720
 
721
+ def server(input): # noqa: A002
722
+ chat = Chat("chat")
723
+
724
+ chat.enable_bookmarking(self)
725
+
668
726
  @chat.on_user_submit
669
727
  async def _(user_input: str):
670
728
  if stream:
@@ -688,7 +746,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
688
746
  )
689
747
  )
690
748
 
691
- app = App(app_ui, server)
749
+ app = App(app_ui, server, bookmark_store=bookmark_store)
692
750
 
693
751
  def _run_app():
694
752
  run_app(app, launch_browser=launch_browser, port=port, host=host)
@@ -997,20 +1055,22 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
997
1055
 
998
1056
  return wrapper()
999
1057
 
1000
- def extract_data(
1058
+ def chat_structured(
1001
1059
  self,
1002
1060
  *args: Content | str,
1003
- data_model: type[BaseModel],
1061
+ data_model: type[BaseModelT],
1004
1062
  echo: EchoOptions = "none",
1005
1063
  stream: bool = False,
1006
- ) -> dict[str, Any]:
1064
+ ) -> BaseModelT:
1007
1065
  """
1008
- Extract structured data from the given input.
1066
+ Extract structured data.
1009
1067
 
1010
1068
  Parameters
1011
1069
  ----------
1012
1070
  args
1013
- The input to extract data from.
1071
+ The input to send to the chatbot. This is typically the text you
1072
+ want to extract data from, but it can be omitted if the data is
1073
+ obvious from the existing conversation.
1014
1074
  data_model
1015
1075
  A Pydantic model describing the structure of the data to extract.
1016
1076
  echo
@@ -1024,10 +1084,47 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1024
1084
 
1025
1085
  Returns
1026
1086
  -------
1027
- dict[str, Any]
1028
- The extracted data.
1087
+ BaseModelT
1088
+ An instance of the provided `data_model` containing the extracted data.
1029
1089
  """
1090
+ dat = self._submit_and_extract_data(
1091
+ *args,
1092
+ data_model=data_model,
1093
+ echo=echo,
1094
+ stream=stream,
1095
+ )
1096
+ return data_model.model_validate(dat)
1030
1097
 
1098
+ def extract_data(
1099
+ self,
1100
+ *args: Content | str,
1101
+ data_model: type[BaseModel],
1102
+ echo: EchoOptions = "none",
1103
+ stream: bool = False,
1104
+ ) -> dict[str, Any]:
1105
+ """
1106
+ Deprecated: use `.chat_structured()` instead.
1107
+ """
1108
+ warnings.warn(
1109
+ "The `extract_data()` method is deprecated and will be removed in a future release. "
1110
+ "Use the `chat_structured()` method instead.",
1111
+ DeprecationWarning,
1112
+ stacklevel=2,
1113
+ )
1114
+ return self._submit_and_extract_data(
1115
+ *args,
1116
+ data_model=data_model,
1117
+ echo=echo,
1118
+ stream=stream,
1119
+ )
1120
+
1121
+ def _submit_and_extract_data(
1122
+ self,
1123
+ *args: Content | str,
1124
+ data_model: type[BaseModel],
1125
+ echo: EchoOptions = "none",
1126
+ stream: bool = False,
1127
+ ) -> dict[str, Any]:
1031
1128
  display = self._markdown_display(echo=echo)
1032
1129
 
1033
1130
  response = ChatResponse(
@@ -1046,33 +1143,24 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1046
1143
  turn = self.get_last_turn()
1047
1144
  assert turn is not None
1048
1145
 
1049
- res: list[ContentJson] = []
1050
- for x in turn.contents:
1051
- if isinstance(x, ContentJson):
1052
- res.append(x)
1053
-
1054
- if len(res) != 1:
1055
- raise ValueError(
1056
- f"Data extraction failed: {len(res)} data results received."
1057
- )
1058
-
1059
- json = res[0]
1060
- return json.value
1146
+ return Chat._extract_turn_json(turn)
1061
1147
 
1062
- async def extract_data_async(
1148
+ async def chat_structured_async(
1063
1149
  self,
1064
1150
  *args: Content | str,
1065
- data_model: type[BaseModel],
1151
+ data_model: type[BaseModelT],
1066
1152
  echo: EchoOptions = "none",
1067
1153
  stream: bool = False,
1068
- ) -> dict[str, Any]:
1154
+ ) -> BaseModelT:
1069
1155
  """
1070
1156
  Extract structured data from the given input asynchronously.
1071
1157
 
1072
1158
  Parameters
1073
1159
  ----------
1074
1160
  args
1075
- The input to extract data from.
1161
+ The input to send to the chatbot. This is typically the text you
1162
+ want to extract data from, but it can be omitted if the data is
1163
+ obvious from the existing conversation.
1076
1164
  data_model
1077
1165
  A Pydantic model describing the structure of the data to extract.
1078
1166
  echo
@@ -1087,10 +1175,47 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1087
1175
 
1088
1176
  Returns
1089
1177
  -------
1090
- dict[str, Any]
1091
- The extracted data.
1178
+ BaseModelT
1179
+ An instance of the provided `data_model` containing the extracted data.
1092
1180
  """
1181
+ dat = await self._submit_and_extract_data_async(
1182
+ *args,
1183
+ data_model=data_model,
1184
+ echo=echo,
1185
+ stream=stream,
1186
+ )
1187
+ return data_model.model_validate(dat)
1093
1188
 
1189
+ async def extract_data_async(
1190
+ self,
1191
+ *args: Content | str,
1192
+ data_model: type[BaseModel],
1193
+ echo: EchoOptions = "none",
1194
+ stream: bool = False,
1195
+ ) -> dict[str, Any]:
1196
+ """
1197
+ Deprecated: use `.chat_structured_async()` instead.
1198
+ """
1199
+ warnings.warn(
1200
+ "The `extract_data_async()` method is deprecated and will be removed in a future release. "
1201
+ "Use the `chat_structured_async()` method instead.",
1202
+ DeprecationWarning,
1203
+ stacklevel=2,
1204
+ )
1205
+ return await self._submit_and_extract_data_async(
1206
+ *args,
1207
+ data_model=data_model,
1208
+ echo=echo,
1209
+ stream=stream,
1210
+ )
1211
+
1212
+ async def _submit_and_extract_data_async(
1213
+ self,
1214
+ *args: Content | str,
1215
+ data_model: type[BaseModel],
1216
+ echo: EchoOptions = "none",
1217
+ stream: bool = False,
1218
+ ) -> dict[str, Any]:
1094
1219
  display = self._markdown_display(echo=echo)
1095
1220
 
1096
1221
  response = ChatResponseAsync(
@@ -1109,6 +1234,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1109
1234
  turn = self.get_last_turn()
1110
1235
  assert turn is not None
1111
1236
 
1237
+ return Chat._extract_turn_json(turn)
1238
+
1239
+ @staticmethod
1240
+ def _extract_turn_json(turn: Turn) -> dict[str, Any]:
1112
1241
  res: list[ContentJson] = []
1113
1242
  for x in turn.contents:
1114
1243
  if isinstance(x, ContentJson):
chatlas/_content.py CHANGED
@@ -603,7 +603,7 @@ class ContentJson(Content):
603
603
  JSON content
604
604
 
605
605
  This content type primarily exists to signal structured data extraction
606
- (i.e., data extracted via [](`~chatlas.Chat`)'s `.extract_data()` method)
606
+ (i.e., data extracted via [](`~chatlas.Chat`)'s `.chat_structured()` method)
607
607
 
608
608
  Parameters
609
609
  ----------
@@ -630,7 +630,7 @@ class ContentPDF(Content):
630
630
  PDF content
631
631
 
632
632
  This content type primarily exists to signal PDF data extraction
633
- (i.e., data extracted via [](`~chatlas.Chat`)'s `.extract_data()` method)
633
+ (i.e., data extracted via [](`~chatlas.Chat`)'s `.chat_structured()` method)
634
634
 
635
635
  Parameters
636
636
  ----------
chatlas/_provider.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from abc import ABC, abstractmethod
4
4
  from datetime import date
5
5
  from typing import (
6
+ Any,
6
7
  AsyncIterable,
7
8
  Generic,
8
9
  Iterable,
@@ -100,6 +101,16 @@ StandardModelParamNames = Literal[
100
101
  ]
101
102
 
102
103
 
104
+ # Provider-agnostic batch status info
105
+ class BatchStatus(BaseModel):
106
+ """Status information for a batch job."""
107
+
108
+ working: bool
109
+ n_processing: int
110
+ n_succeeded: int
111
+ n_failed: int
112
+
113
+
103
114
  class Provider(
104
115
  ABC,
105
116
  Generic[
@@ -261,3 +272,80 @@ class Provider(
261
272
 
262
273
  @abstractmethod
263
274
  def supported_model_params(self) -> set[StandardModelParamNames]: ...
275
+
276
+ def has_batch_support(self) -> bool:
277
+ """
278
+ Returns whether this provider supports batch processing.
279
+ Override this method to return True for providers that implement batch methods.
280
+ """
281
+ return False
282
+
283
+ def batch_submit(
284
+ self,
285
+ conversations: list[list[Turn]],
286
+ data_model: Optional[type[BaseModel]] = None,
287
+ ) -> dict[str, Any]:
288
+ """
289
+ Submit a batch of conversations for processing.
290
+
291
+ Args:
292
+ conversations: List of conversation histories (each is a list of Turns)
293
+ data_model: Optional structured data model for responses
294
+
295
+ Returns:
296
+ BatchInfo containing batch job information
297
+ """
298
+ raise NotImplementedError("This provider does not support batch processing")
299
+
300
+ def batch_poll(self, batch: dict[str, Any]) -> dict[str, Any]:
301
+ """
302
+ Poll the status of a submitted batch.
303
+
304
+ Args:
305
+ batch: Batch information returned from batch_submit
306
+
307
+ Returns:
308
+ Updated batch information
309
+ """
310
+ raise NotImplementedError("This provider does not support batch processing")
311
+
312
+ def batch_status(self, batch: dict[str, Any]) -> BatchStatus:
313
+ """
314
+ Get the status of a batch.
315
+
316
+ Args:
317
+ batch: Batch information
318
+
319
+ Returns:
320
+ BatchStatus with processing status information
321
+ """
322
+ raise NotImplementedError("This provider does not support batch processing")
323
+
324
+ def batch_retrieve(self, batch: dict[str, Any]) -> list[dict[str, Any]]:
325
+ """
326
+ Retrieve results from a completed batch.
327
+
328
+ Args:
329
+ batch: Batch information
330
+
331
+ Returns:
332
+ List of BatchResult objects, one for each request in the batch
333
+ """
334
+ raise NotImplementedError("This provider does not support batch processing")
335
+
336
+ def batch_result_turn(
337
+ self,
338
+ result: dict[str, Any],
339
+ has_data_model: bool = False,
340
+ ) -> Turn | None:
341
+ """
342
+ Convert a batch result to a Turn.
343
+
344
+ Args:
345
+ result: Individual BatchResult from batch_retrieve
346
+ has_data_model: Whether the request used a structured data model
347
+
348
+ Returns:
349
+ Turn object or None if the result was an error
350
+ """
351
+ raise NotImplementedError("This provider does not support batch processing")
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import base64
4
+ import re
4
5
  import warnings
5
6
  from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast, overload
6
7
 
7
8
  import orjson
9
+ from openai.types.chat import ChatCompletionToolParam
8
10
  from pydantic import BaseModel
9
11
 
10
12
  from ._chat import Chat
@@ -21,7 +23,13 @@ from ._content import (
21
23
  ContentToolResultResource,
22
24
  )
23
25
  from ._logging import log_model_default
24
- from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
26
+ from ._provider import (
27
+ BatchStatus,
28
+ ModelInfo,
29
+ Provider,
30
+ StandardModelParamNames,
31
+ StandardModelParams,
32
+ )
25
33
  from ._tokens import get_token_pricing, tokens_log
26
34
  from ._tools import Tool, basemodel_to_param_schema
27
35
  from ._turn import Turn, user_turn
@@ -38,11 +46,12 @@ if TYPE_CHECKING:
38
46
  )
39
47
  from anthropic.types.document_block_param import DocumentBlockParam
40
48
  from anthropic.types.image_block_param import ImageBlockParam
49
+ from anthropic.types.message_create_params import MessageCreateParamsNonStreaming
50
+ from anthropic.types.messages.batch_create_params import Request as BatchRequest
41
51
  from anthropic.types.model_param import ModelParam
42
52
  from anthropic.types.text_block_param import TextBlockParam
43
53
  from anthropic.types.tool_result_block_param import ToolResultBlockParam
44
54
  from anthropic.types.tool_use_block_param import ToolUseBlockParam
45
- from openai.types.chat import ChatCompletionToolParam
46
55
 
47
56
  from .types.anthropic import ChatBedrockClientArgs, ChatClientArgs, SubmitInputArgs
48
57
 
@@ -631,6 +640,101 @@ class AnthropicProvider(
631
640
  completion=completion,
632
641
  )
633
642
 
643
+ def has_batch_support(self) -> bool:
644
+ return True
645
+
646
+ def batch_submit(
647
+ self,
648
+ conversations: list[list[Turn]],
649
+ data_model: Optional[type[BaseModel]] = None,
650
+ ):
651
+ from anthropic import NotGiven
652
+
653
+ requests: list["BatchRequest"] = []
654
+
655
+ for i, turns in enumerate(conversations):
656
+ kwargs = self._chat_perform_args(
657
+ stream=False,
658
+ turns=turns,
659
+ tools={},
660
+ data_model=data_model,
661
+ )
662
+
663
+ params: "MessageCreateParamsNonStreaming" = {
664
+ "messages": kwargs.get("messages", {}),
665
+ "model": self.model,
666
+ "max_tokens": kwargs.get("max_tokens", 4096),
667
+ }
668
+
669
+ # If data_model, tools/tool_choice should be present
670
+ tools = kwargs.get("tools")
671
+ tool_choice = kwargs.get("tool_choice")
672
+ if tools and not isinstance(tools, NotGiven):
673
+ params["tools"] = tools
674
+ if tool_choice and not isinstance(tool_choice, NotGiven):
675
+ params["tool_choice"] = tool_choice
676
+
677
+ requests.append({"custom_id": f"request-{i}", "params": params})
678
+
679
+ batch = self._client.messages.batches.create(requests=requests)
680
+ return batch.model_dump()
681
+
682
+ def batch_poll(self, batch):
683
+ from anthropic.types.messages import MessageBatch
684
+
685
+ batch = MessageBatch.model_validate(batch)
686
+ b = self._client.messages.batches.retrieve(batch.id)
687
+ return b.model_dump()
688
+
689
+ def batch_status(self, batch) -> "BatchStatus":
690
+ from anthropic.types.messages import MessageBatch
691
+
692
+ batch = MessageBatch.model_validate(batch)
693
+ status = batch.processing_status
694
+ counts = batch.request_counts
695
+
696
+ return BatchStatus(
697
+ working=status != "ended",
698
+ n_processing=counts.processing,
699
+ n_succeeded=counts.succeeded,
700
+ n_failed=counts.errored + counts.canceled + counts.expired,
701
+ )
702
+
703
+ # https://docs.anthropic.com/en/api/retrieving-message-batch-results
704
+ def batch_retrieve(self, batch):
705
+ from anthropic.types.messages import MessageBatch
706
+
707
+ batch = MessageBatch.model_validate(batch)
708
+ if batch.results_url is None:
709
+ raise ValueError("Batch has no results URL")
710
+
711
+ results: list[dict[str, Any]] = []
712
+ for res in self._client.messages.batches.results(batch.id):
713
+ results.append(res.model_dump())
714
+
715
+ # Sort by custom_id to maintain order
716
+ def extract_id(x: str):
717
+ match = re.search(r"-(\d+)$", x)
718
+ return int(match.group(1)) if match else 0
719
+
720
+ results.sort(key=lambda x: extract_id(x.get("custom_id", "")))
721
+
722
+ return results
723
+
724
+ def batch_result_turn(self, result, has_data_model: bool = False) -> Turn | None:
725
+ from anthropic.types.messages.message_batch_individual_response import (
726
+ MessageBatchIndividualResponse,
727
+ )
728
+
729
+ result = MessageBatchIndividualResponse.model_validate(result)
730
+ if result.result.type != "succeeded":
731
+ # TODO: offer advice on what to do?
732
+ warnings.warn(f"Batch request didn't succeed: {result.result}")
733
+ return None
734
+
735
+ message = result.result.message
736
+ return self._as_turn(message, has_data_model)
737
+
634
738
 
635
739
  def ChatBedrockAnthropic(
636
740
  *,
@@ -1,11 +1,18 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import base64
4
+ import json
5
+ import os
6
+ import re
7
+ import tempfile
8
+ import warnings
4
9
  from datetime import datetime
5
10
  from typing import TYPE_CHECKING, Any, Literal, Optional, cast, overload
6
11
 
7
12
  import orjson
8
13
  from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
14
+ from openai.types.batch import Batch
15
+ from openai.types.chat import ChatCompletion, ChatCompletionChunk
9
16
  from pydantic import BaseModel
10
17
 
11
18
  from ._chat import Chat
@@ -24,18 +31,20 @@ from ._content import (
24
31
  )
25
32
  from ._logging import log_model_default
26
33
  from ._merge import merge_dicts
27
- from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
34
+ from ._provider import (
35
+ BatchStatus,
36
+ ModelInfo,
37
+ Provider,
38
+ StandardModelParamNames,
39
+ StandardModelParams,
40
+ )
28
41
  from ._tokens import get_token_pricing, tokens_log
29
42
  from ._tools import Tool, basemodel_to_param_schema
30
43
  from ._turn import Turn, user_turn
31
44
  from ._utils import MISSING, MISSING_TYPE, is_testing, split_http_client_kwargs
32
45
 
33
46
  if TYPE_CHECKING:
34
- from openai.types.chat import (
35
- ChatCompletion,
36
- ChatCompletionChunk,
37
- ChatCompletionMessageParam,
38
- )
47
+ from openai.types.chat import ChatCompletionMessageParam
39
48
  from openai.types.chat.chat_completion_assistant_message_param import (
40
49
  ContentArrayOfContentPart,
41
50
  )
@@ -45,10 +54,6 @@ if TYPE_CHECKING:
45
54
  from openai.types.chat_model import ChatModel
46
55
 
47
56
  from .types.openai import ChatAzureClientArgs, ChatClientArgs, SubmitInputArgs
48
- else:
49
- ChatCompletion = object
50
- ChatCompletionChunk = object
51
-
52
57
 
53
58
  # The dictionary form of ChatCompletion (TODO: stronger typing)?
54
59
  ChatCompletionDict = dict[str, Any]
@@ -171,6 +176,21 @@ def ChatOpenAI(
171
176
  )
172
177
 
173
178
 
179
+ # Seems there is no native typing support for `files.content()` results
180
+ # so mock them based on the docs here
181
+ # https://platform.openai.com/docs/guides/batch#5-retrieve-the-results
182
+ class BatchResult(BaseModel):
183
+ id: str
184
+ custom_id: str
185
+ response: BatchResultResponse
186
+
187
+
188
+ class BatchResultResponse(BaseModel):
189
+ status_code: int
190
+ request_id: str
191
+ body: ChatCompletionDict
192
+
193
+
174
194
  class OpenAIProvider(
175
195
  Provider[ChatCompletion, ChatCompletionChunk, ChatCompletionDict, "SubmitInputArgs"]
176
196
  ):
@@ -353,8 +373,6 @@ class OpenAIProvider(
353
373
  return merge_dicts(completion, chunkd)
354
374
 
355
375
  def stream_turn(self, completion, has_data_model) -> Turn:
356
- from openai.types.chat import ChatCompletion
357
-
358
376
  delta = completion["choices"][0].pop("delta") # type: ignore
359
377
  completion["choices"][0]["message"] = delta # type: ignore
360
378
  completion = ChatCompletion.construct(**completion)
@@ -662,6 +680,119 @@ class OpenAIProvider(
662
680
  "stop_sequences",
663
681
  }
664
682
 
683
+ def has_batch_support(self) -> bool:
684
+ return True
685
+
686
+ def batch_submit(
687
+ self,
688
+ conversations: list[list[Turn]],
689
+ data_model: Optional[type[BaseModel]] = None,
690
+ ):
691
+ # First put the requests in a file
692
+ # https://platform.openai.com/docs/api-reference/batch/request-input
693
+ # https://platform.openai.com/docs/api-reference/batch
694
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".jsonl", delete=False) as f:
695
+ temp_path = f.name
696
+
697
+ for i, turns in enumerate(conversations):
698
+ kwargs = self._chat_perform_args(
699
+ stream=False,
700
+ turns=turns,
701
+ tools={},
702
+ data_model=data_model,
703
+ )
704
+
705
+ body = {
706
+ "messages": kwargs.get("messages", []),
707
+ "model": self.model,
708
+ }
709
+
710
+ if "response_format" in kwargs:
711
+ body["response_format"] = kwargs["response_format"]
712
+
713
+ request = {
714
+ "custom_id": f"request-{i}",
715
+ "method": "POST",
716
+ "url": "/v1/chat/completions",
717
+ "body": body,
718
+ }
719
+
720
+ f.write(orjson.dumps(request).decode() + "\n")
721
+
722
+ try:
723
+ with open(temp_path, "rb") as f:
724
+ file_response = self._client.files.create(file=f, purpose="batch")
725
+
726
+ batch = self._client.batches.create(
727
+ input_file_id=file_response.id,
728
+ endpoint="/v1/chat/completions",
729
+ completion_window="24h",
730
+ )
731
+
732
+ return batch.model_dump()
733
+ finally:
734
+ os.unlink(temp_path)
735
+
736
+ def batch_poll(self, batch):
737
+ batch = Batch.model_validate(batch)
738
+ b = self._client.batches.retrieve(batch.id)
739
+ return b.model_dump()
740
+
741
+ def batch_status(self, batch):
742
+ batch = Batch.model_validate(batch)
743
+ counts = batch.request_counts
744
+ total, completed, failed = 0, 0, 0
745
+ if counts is not None:
746
+ total = counts.total
747
+ completed = counts.completed
748
+ failed = counts.failed
749
+
750
+ return BatchStatus(
751
+ working=batch.status not in ["completed", "failed", "cancelled"],
752
+ n_processing=total - completed - failed,
753
+ n_succeeded=completed,
754
+ n_failed=failed,
755
+ )
756
+
757
+ def batch_retrieve(self, batch):
758
+ batch = Batch.model_validate(batch)
759
+ if batch.output_file_id is None:
760
+ raise ValueError("Batch has no output file")
761
+
762
+ # Download and parse JSONL results
763
+ response = self._client.files.content(batch.output_file_id)
764
+ results: list[dict[str, Any]] = []
765
+ for line in response.text.splitlines():
766
+ results.append(json.loads(line))
767
+
768
+ # Sort by custom_id to maintain order
769
+ def extract_id(x: str):
770
+ match = re.search(r"-(\d+)$", x)
771
+ return int(match.group(1)) if match else 0
772
+
773
+ results.sort(key=lambda x: int(extract_id(x.get("custom_id", ""))))
774
+
775
+ return results
776
+
777
+ def batch_result_turn(
778
+ self,
779
+ result,
780
+ has_data_model: bool = False,
781
+ ) -> Turn | None:
782
+ response = BatchResult.model_validate(result).response
783
+ if response.status_code != 200:
784
+ # TODO: offer advice on what to do?
785
+ warnings.warn(f"Batch request failed: {response.body}")
786
+ return None
787
+
788
+ completion = ChatCompletion.construct(**response.body)
789
+ return self._as_turn(completion, has_data_model)
790
+
791
+
792
+ # -------------------------------------------------------------------------------------
793
+ # Azure OpenAI Chat
794
+ # -------------------------------------------------------------------------------------
795
+
665
796
 
666
797
  def ChatAzureOpenAI(
667
798
  *,
chatlas/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.12.0'
32
- __version_tuple__ = version_tuple = (0, 12, 0)
31
+ __version__ = version = '0.13.0'
32
+ __version_tuple__ = version_tuple = (0, 13, 0)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chatlas
3
- Version: 0.12.0
3
+ Version: 0.13.0
4
4
  Summary: A simple and consistent interface for chatting with LLMs
5
5
  Project-URL: Homepage, https://posit-dev.github.io/chatlas
6
6
  Project-URL: Documentation, https://posit-dev.github.io/chatlas
@@ -44,6 +44,7 @@ Requires-Dist: pillow; extra == 'dev'
44
44
  Requires-Dist: python-dotenv; extra == 'dev'
45
45
  Requires-Dist: ruff>=0.6.5; extra == 'dev'
46
46
  Requires-Dist: shiny; extra == 'dev'
47
+ Requires-Dist: shinychat; extra == 'dev'
47
48
  Requires-Dist: snowflake-ml-python>=1.8.4; extra == 'dev'
48
49
  Requires-Dist: tenacity; extra == 'dev'
49
50
  Requires-Dist: tiktoken; extra == 'dev'
@@ -1,8 +1,10 @@
1
- chatlas/__init__.py,sha256=CyViGMiz50clcVu3vpZgOq_qP4hmoYGOlcHKlRPcLJo,2416
1
+ chatlas/__init__.py,sha256=M3zK10LguXW6bybDqatRV9SXd0y6axYu5QzENjbOVd0,2633
2
2
  chatlas/_auto.py,sha256=aeMN2_EM-xK-Yx5JaCuwYRZZ29eqn_0oM7QR5zayrec,8912
3
+ chatlas/_batch_chat.py,sha256=1KkHENB-l7VmhCizhdvbJO5WQmRntQS6EvcSJ6VLgvM,5546
4
+ chatlas/_batch_job.py,sha256=2__JIOo_JpcQyAAzO07r6eS4urpAxEc9m7_zsjFieQw,7359
3
5
  chatlas/_callbacks.py,sha256=3RpPaOQonTqScjXbaShgKJ1Rc-YxzWerxKRBjVssFnc,1838
4
- chatlas/_chat.py,sha256=pZOmlg0rqrzn7bi5QECAraG3ZA1MJPS0Ovk96wvgAGg,85712
5
- chatlas/_content.py,sha256=xi00PQgYi0hC2mTROOGVZiKfTkvlG9dud213iRQcwnY,22761
6
+ chatlas/_chat.py,sha256=oESXNVzDCJ1DpV_fBRgwK6N_fs_EoJGrlez5dJjqx5c,90664
7
+ chatlas/_content.py,sha256=BdJQ5G5onT9Cf1tNFeXsCWWTD2zSIjWz50FYIk6_DDI,22767
6
8
  chatlas/_content_image.py,sha256=EUK6wAint-JatLsiwvaPDu4D3W-NcIsDCkzABkXgfDg,8304
7
9
  chatlas/_content_pdf.py,sha256=cffeuJxzhUDukQ-Srkmpy62M8X12skYpU_FVq-Wvya4,2420
8
10
  chatlas/_display.py,sha256=wyQzSc6z1VqrJfkTLkw1wQcti9s1Pr4qT8UxFJESn4U,4664
@@ -11,8 +13,8 @@ chatlas/_live_render.py,sha256=UMZltE35LxziDKPMEeDwQ9meZ95SeqwhJi7j-y9pcro,4004
11
13
  chatlas/_logging.py,sha256=weKvXZDIZ88X7X61ruXM_S0AAhQ5mgiW9dR-km8x7Mg,3324
12
14
  chatlas/_mcp_manager.py,sha256=smMXeKZzP90MrlCdnTHMyo7AWHwl7J2jkU8dKSlnEsQ,10237
13
15
  chatlas/_merge.py,sha256=SGj_BetgA7gaOqSBKOhYmW3CYeQKTEehFrXvx3y4OYE,3924
14
- chatlas/_provider.py,sha256=k0rJ2uzGDacXVJZZVoLlySNCSFOjYbOC7k_VUT7j_Ms,6453
15
- chatlas/_provider_anthropic.py,sha256=cOBkAEj6gyl0NGdLk3QvI6pruZZ2fUsXJZAN2i3_j3k,27394
16
+ chatlas/_provider.py,sha256=-5Oyq8tehHJtbBWQUyFUvdqTqZNUcOq2pO5qfAw5oQo,9057
17
+ chatlas/_provider_anthropic.py,sha256=sPPEaDObGuY7JDqU533wlUDA-HaX3sumYWaD3kdG4nE,30964
16
18
  chatlas/_provider_cloudflare.py,sha256=vFbqgQPmosopJa9qsVxTkjPn4vYC_wOlgqa6_QmwTho,5227
17
19
  chatlas/_provider_databricks.py,sha256=JIOTm0HMe0qVAt8eS0WgGKugBwBdmL80JHLFH59ongU,4850
18
20
  chatlas/_provider_deepseek.py,sha256=6nPtPSo-Po6sD4i8PZJHuI5T2oATpLi5djXFGdlserk,4906
@@ -22,7 +24,7 @@ chatlas/_provider_groq.py,sha256=XB2JDyuF95CcSbNkgk7JHcuy9KCW7hxTVaONDSjK8U8,367
22
24
  chatlas/_provider_huggingface.py,sha256=feJ416X0UdtyoeHZbkgolFf62D7zxNwM7i_X3NYsQQw,4669
23
25
  chatlas/_provider_mistral.py,sha256=-p4rut0KCn-PrwnOlvr6lK8-K-OXvc5H9vTX-rCzUkk,5309
24
26
  chatlas/_provider_ollama.py,sha256=jFAu4v0NLUwdG_W_nKagBHOah0VKl_auTsgcYinP9rI,4119
25
- chatlas/_provider_openai.py,sha256=SwzEBwA491HOL6YvEI5soDQIVXnSLpwMmU-0DC8k7QA,26422
27
+ chatlas/_provider_openai.py,sha256=f8ijdXMMHy17VcuA2ImMpaYvaiKjHzcnTdR8LH1xE40,30654
26
28
  chatlas/_provider_openrouter.py,sha256=9sCXvROVIiUdwfEbkVA-15_kc6ouFUP2uV2MmUe2rFk,4385
27
29
  chatlas/_provider_perplexity.py,sha256=5q_LsUCJQ5w-jRveLDMPvZTX-GU2TVURp65mUMyDh10,4248
28
30
  chatlas/_provider_portkey.py,sha256=6wKrLZmKVxOqyO6P3HBgWqPe7y1N8une_1wp0aJq7pU,4087
@@ -33,7 +35,7 @@ chatlas/_tools.py,sha256=8rhGOsEviBJXk5Qb-a1RRb_C-DE2T3DOeN6IhblkxqI,12408
33
35
  chatlas/_turn.py,sha256=yK7alUxeP8d2iBc7amyz20BtEqcpvX6BCwWZsnlQ5R4,4515
34
36
  chatlas/_typing_extensions.py,sha256=BXmbhywjm5ssmyVLGwyP_5TWZMAobzrrgZLYkB6_etE,1076
35
37
  chatlas/_utils.py,sha256=Kku2fa1mvTYCr5D28VxE6-fwfy2e2doCi-eKQkLEg4Y,4686
36
- chatlas/_version.py,sha256=VDAgmYWykomGcTucvPJWNS0ePk26QojGHvaE9chtgGc,706
38
+ chatlas/_version.py,sha256=2OwVfU6dYXS16K64p8xtyqfPM0bDW3r_7GeF3HzefhA,706
37
39
  chatlas/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
40
  chatlas/data/prices.json,sha256=X6qALp-dWc4nfus9lIqHoKzk3PZDPHTLoxxcN2m6fXc,62645
39
41
  chatlas/types/__init__.py,sha256=1n0xrJ7TRIKsZ2z06FLFgGqfKMFtXSIxxPvJ2j0hvPw,850
@@ -48,7 +50,7 @@ chatlas/types/openai/__init__.py,sha256=Q2RAr1bSH1nHsxICK05nAmKmxdhKmhbBkWD_XHiV
48
50
  chatlas/types/openai/_client.py,sha256=mAoQftcJIp0ssIhS8q3TIW9u6zTRNtYDmpZJO8L0mC0,849
49
51
  chatlas/types/openai/_client_azure.py,sha256=Tf_PFRl0QAj4Nk5CD0ZNIO-SRsT39bVkEJlUTry1fb8,960
50
52
  chatlas/types/openai/_submit.py,sha256=EDtIUFcNIJ5QAt0wVyBXvUshK8FA9e86wcZDQ_HUOYs,7829
51
- chatlas-0.12.0.dist-info/METADATA,sha256=Xd9L5THh_2C6ckT6Khvn2B6js3JhT8WyCjlAp1JzOOc,5594
52
- chatlas-0.12.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
53
- chatlas-0.12.0.dist-info/licenses/LICENSE,sha256=zyuGzPOC7CcbOaBHsQ3UEyKYRO56KDUkor0OA4LqqDg,1081
54
- chatlas-0.12.0.dist-info/RECORD,,
53
+ chatlas-0.13.0.dist-info/METADATA,sha256=bpXFGxJZFx1hIkGuIWfC-MoHLhT3cDzsotmuhtFyktY,5635
54
+ chatlas-0.13.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
55
+ chatlas-0.13.0.dist-info/licenses/LICENSE,sha256=zyuGzPOC7CcbOaBHsQ3UEyKYRO56KDUkor0OA4LqqDg,1081
56
+ chatlas-0.13.0.dist-info/RECORD,,