chatlas 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/__init__.py CHANGED
@@ -3,7 +3,7 @@ from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
3
3
  from ._chat import Chat
4
4
  from ._content_image import content_image_file, content_image_plot, content_image_url
5
5
  from ._github import ChatGithub
6
- from ._google import ChatGoogle
6
+ from ._google import ChatGoogle, ChatVertex
7
7
  from ._groq import ChatGroq
8
8
  from ._interpolate import interpolate, interpolate_file
9
9
  from ._ollama import ChatOllama
@@ -24,6 +24,7 @@ __all__ = (
24
24
  "ChatOpenAI",
25
25
  "ChatAzureOpenAI",
26
26
  "ChatPerplexity",
27
+ "ChatVertex",
27
28
  "Chat",
28
29
  "content_image_file",
29
30
  "content_image_plot",
chatlas/_anthropic.py CHANGED
@@ -311,7 +311,8 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
311
311
  if stream:
312
312
  stream = False
313
313
  warnings.warn(
314
- "Anthropic does not support structured data extraction in streaming mode."
314
+ "Anthropic does not support structured data extraction in streaming mode.",
315
+ stacklevel=2,
315
316
  )
316
317
 
317
318
  kwargs_full: "SubmitInputArgs" = {
@@ -371,10 +372,7 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
371
372
 
372
373
  return completion
373
374
 
374
- def stream_turn(self, completion, has_data_model, stream) -> Turn:
375
- return self._as_turn(completion, has_data_model)
376
-
377
- async def stream_turn_async(self, completion, has_data_model, stream) -> Turn:
375
+ def stream_turn(self, completion, has_data_model) -> Turn:
378
376
  return self._as_turn(completion, has_data_model)
379
377
 
380
378
  def value_turn(self, completion, has_data_model) -> Turn:
chatlas/_chat.py CHANGED
@@ -1,6 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import inspect
3
4
  import os
5
+ import sys
4
6
  from pathlib import Path
5
7
  from threading import Thread
6
8
  from typing import (
@@ -40,7 +42,7 @@ from ._provider import Provider
40
42
  from ._tools import Tool
41
43
  from ._turn import Turn, user_turn
42
44
  from ._typing_extensions import TypedDict
43
- from ._utils import html_escape
45
+ from ._utils import html_escape, wrap_async
44
46
 
45
47
 
46
48
  class AnyTypeDict(TypedDict, total=False):
@@ -388,6 +390,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
388
390
  port: int = 0,
389
391
  launch_browser: bool = True,
390
392
  bg_thread: Optional[bool] = None,
393
+ echo: Optional[Literal["text", "all", "none"]] = None,
391
394
  kwargs: Optional[SubmitInputArgsT] = None,
392
395
  ):
393
396
  """
@@ -404,6 +407,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
404
407
  bg_thread
405
408
  Whether to run the app in a background thread. If `None`, the app will
406
409
  run in a background thread if the current environment is a notebook.
410
+ echo
411
+ Whether to echo text content, all content (i.e., tool calls), or no content. Defaults to `"none"` when `stream=True` and `"text"` when `stream=False`.
407
412
  kwargs
408
413
  Additional keyword arguments to pass to the method used for requesting
409
414
  the response.
@@ -438,10 +443,22 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
438
443
  return
439
444
  if stream:
440
445
  await chat.append_message_stream(
441
- self.stream(user_input, kwargs=kwargs)
446
+ await self.stream_async(
447
+ user_input,
448
+ kwargs=kwargs,
449
+ echo=echo or "none",
450
+ )
442
451
  )
443
452
  else:
444
- await chat.append_message(str(self.chat(user_input, kwargs=kwargs)))
453
+ await chat.append_message(
454
+ str(
455
+ self.chat(
456
+ user_input,
457
+ kwargs=kwargs,
458
+ echo=echo or "text",
459
+ )
460
+ )
461
+ )
445
462
 
446
463
  app = App(app_ui, server)
447
464
 
@@ -948,11 +965,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
948
965
  is_html = filename.suffix == ".html"
949
966
 
950
967
  # Get contents from each turn
951
- contents = ""
968
+ content_arr: list[str] = []
952
969
  for turn in turns:
953
970
  turn_content = "\n\n".join(
954
971
  [
955
- str(content)
972
+ str(content).strip()
956
973
  for content in turn.contents
957
974
  if include == "all" or isinstance(content, ContentText)
958
975
  ]
@@ -963,7 +980,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
963
980
  turn_content = f"<shiny-{msg_type}-message content='{content_attr}'></shiny-{msg_type}-message>"
964
981
  else:
965
982
  turn_content = f"## {turn.role.capitalize()}\n\n{turn_content}"
966
- contents += f"{turn_content}\n\n"
983
+ content_arr.append(turn_content)
984
+ contents = "\n\n".join(content_arr)
967
985
 
968
986
  # Shiny chat message components requires container elements
969
987
  if is_html:
@@ -1093,7 +1111,6 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1093
1111
  turn = self.provider.stream_turn(
1094
1112
  result,
1095
1113
  has_data_model=data_model is not None,
1096
- stream=response,
1097
1114
  )
1098
1115
 
1099
1116
  if echo == "all":
@@ -1154,10 +1171,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1154
1171
  yield text
1155
1172
  result = self.provider.stream_merge_chunks(result, chunk)
1156
1173
 
1157
- turn = await self.provider.stream_turn_async(
1174
+ turn = self.provider.stream_turn(
1158
1175
  result,
1159
1176
  has_data_model=data_model is not None,
1160
- stream=response,
1161
1177
  )
1162
1178
 
1163
1179
  if echo == "all":
@@ -1210,7 +1226,12 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1210
1226
  for x in turn.contents:
1211
1227
  if isinstance(x, ContentToolRequest):
1212
1228
  tool_def = self._tools.get(x.name, None)
1213
- func = tool_def.func if tool_def is not None else None
1229
+ func = None
1230
+ if tool_def:
1231
+ if tool_def._is_async:
1232
+ func = tool_def.func
1233
+ else:
1234
+ func = wrap_async(tool_def.func)
1214
1235
  results.append(await self._invoke_tool_async(func, x.arguments, x.id))
1215
1236
 
1216
1237
  if not results:
@@ -1225,7 +1246,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1225
1246
  id_: str,
1226
1247
  ) -> ContentToolResult:
1227
1248
  if func is None:
1228
- return ContentToolResult(id_, None, "Unknown tool")
1249
+ return ContentToolResult(id_, value=None, error="Unknown tool")
1250
+
1251
+ name = func.__name__
1229
1252
 
1230
1253
  try:
1231
1254
  if isinstance(arguments, dict):
@@ -1233,10 +1256,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1233
1256
  else:
1234
1257
  result = func(arguments)
1235
1258
 
1236
- return ContentToolResult(id_, result, None)
1259
+ return ContentToolResult(id_, value=result, error=None, name=name)
1237
1260
  except Exception as e:
1238
- log_tool_error(func.__name__, str(arguments), e)
1239
- return ContentToolResult(id_, None, str(e))
1261
+ log_tool_error(name, str(arguments), e)
1262
+ return ContentToolResult(id_, value=None, error=str(e), name=name)
1240
1263
 
1241
1264
  @staticmethod
1242
1265
  async def _invoke_tool_async(
@@ -1245,7 +1268,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1245
1268
  id_: str,
1246
1269
  ) -> ContentToolResult:
1247
1270
  if func is None:
1248
- return ContentToolResult(id_, None, "Unknown tool")
1271
+ return ContentToolResult(id_, value=None, error="Unknown tool")
1272
+
1273
+ name = func.__name__
1249
1274
 
1250
1275
  try:
1251
1276
  if isinstance(arguments, dict):
@@ -1253,10 +1278,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1253
1278
  else:
1254
1279
  result = await func(arguments)
1255
1280
 
1256
- return ContentToolResult(id_, result, None)
1281
+ return ContentToolResult(id_, value=result, error=None, name=name)
1257
1282
  except Exception as e:
1258
1283
  log_tool_error(func.__name__, str(arguments), e)
1259
- return ContentToolResult(id_, None, str(e))
1284
+ return ContentToolResult(id_, value=None, error=str(e), name=name)
1260
1285
 
1261
1286
  def _markdown_display(
1262
1287
  self, echo: Literal["text", "all", "none"]
@@ -1373,7 +1398,7 @@ class ChatResponse:
1373
1398
 
1374
1399
  @property
1375
1400
  def consumed(self) -> bool:
1376
- return self._generator.gi_frame is None
1401
+ return inspect.getgeneratorstate(self._generator) == inspect.GEN_CLOSED
1377
1402
 
1378
1403
  def __str__(self) -> str:
1379
1404
  return self.get_content()
@@ -1423,7 +1448,11 @@ class ChatResponseAsync:
1423
1448
 
1424
1449
  @property
1425
1450
  def consumed(self) -> bool:
1426
- return self._generator.ag_frame is None
1451
+ if sys.version_info < (3, 12):
1452
+ raise NotImplementedError(
1453
+ "Checking for consumed state is only supported in Python 3.12+"
1454
+ )
1455
+ return inspect.getasyncgenstate(self._generator) == inspect.AGEN_CLOSED
1427
1456
 
1428
1457
 
1429
1458
  # ----------------------------------------------------------------------------
chatlas/_content.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  from dataclasses import dataclass
5
+ from pprint import pformat
5
6
  from typing import Any, Literal, Optional
6
7
 
7
8
  ImageContentTypes = Literal[
@@ -154,7 +155,7 @@ class ContentToolRequest(Content):
154
155
  args_str = self._arguments_str()
155
156
  func_call = f"{self.name}({args_str})"
156
157
  comment = f"# tool request ({self.id})"
157
- return f"\n```python\n{comment}\n{func_call}\n```\n"
158
+ return f"```python\n{comment}\n{func_call}\n```\n"
158
159
 
159
160
  def _repr_markdown_(self):
160
161
  return self.__str__()
@@ -187,18 +188,31 @@ class ContentToolResult(Content):
187
188
  The unique identifier of the tool request.
188
189
  value
189
190
  The value returned by the tool/function.
191
+ name
192
+ The name of the tool/function that was called.
190
193
  error
191
194
  An error message if the tool/function call failed.
192
195
  """
193
196
 
194
197
  id: str
195
198
  value: Any = None
199
+ name: Optional[str] = None
196
200
  error: Optional[str] = None
197
201
 
202
+ def _get_value_and_language(self) -> tuple[str, str]:
203
+ if self.error:
204
+ return f"Tool calling failed with error: '{self.error}'", ""
205
+ try:
206
+ json_val = json.loads(self.value)
207
+ return pformat(json_val, indent=2, sort_dicts=False), "python"
208
+ except: # noqa: E722
209
+ return str(self.value), ""
210
+
198
211
  def __str__(self):
199
212
  comment = f"# tool result ({self.id})"
200
- val = self.get_final_value()
201
- return f"""\n```python\n{comment}\n"{val}"\n```\n"""
213
+ value, language = self._get_value_and_language()
214
+
215
+ return f"""```{language}\n{comment}\n{value}\n```"""
202
216
 
203
217
  def _repr_markdown_(self):
204
218
  return self.__str__()
@@ -211,9 +225,8 @@ class ContentToolResult(Content):
211
225
  return res + ">"
212
226
 
213
227
  def get_final_value(self) -> str:
214
- if self.error:
215
- return f"Tool calling failed with error: '{self.error}'"
216
- return str(self.value)
228
+ value, _language = self._get_value_and_language()
229
+ return value
217
230
 
218
231
 
219
232
  @dataclass
@@ -236,7 +249,7 @@ class ContentJson(Content):
236
249
  return json.dumps(self.value, indent=2)
237
250
 
238
251
  def _repr_markdown_(self):
239
- return f"""\n```json\n{self.__str__()}\n```\n"""
252
+ return f"""```json\n{self.__str__()}\n```"""
240
253
 
241
254
  def __repr__(self, indent: int = 0):
242
255
  return " " * indent + f"<ContentJson value={self.value}>"