chatlas 0.7.1__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/__init__.py CHANGED
@@ -16,7 +16,7 @@ from ._perplexity import ChatPerplexity
16
16
  from ._provider import Provider
17
17
  from ._snowflake import ChatSnowflake
18
18
  from ._tokens import token_usage
19
- from ._tools import Tool
19
+ from ._tools import Tool, ToolRejectError
20
20
  from ._turn import Turn
21
21
 
22
22
  try:
@@ -51,6 +51,7 @@ __all__ = (
51
51
  "Provider",
52
52
  "token_usage",
53
53
  "Tool",
54
+ "ToolRejectError",
54
55
  "Turn",
55
56
  "types",
56
57
  )
chatlas/_anthropic.py CHANGED
@@ -451,10 +451,7 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
451
451
  @staticmethod
452
452
  def _as_content_block(content: Content) -> "ContentBlockParam":
453
453
  if isinstance(content, ContentText):
454
- text = content.text
455
- if text == "" or text.isspace():
456
- text = "[empty string]"
457
- return {"type": "text", "text": text}
454
+ return {"text": content.text, "type": "text"}
458
455
  elif isinstance(content, ContentJson):
459
456
  return {"text": "<structured data/>", "type": "text"}
460
457
  elif isinstance(content, ContentPDF):
chatlas/_callbacks.py ADDED
@@ -0,0 +1,56 @@
1
+ from collections import OrderedDict
2
+ from typing import Any, Callable
3
+
4
+ from ._utils import is_async_callable
5
+
6
+
7
+ class CallbackManager:
8
+ def __init__(self) -> None:
9
+ self._callbacks: dict[str, Callable[..., Any]] = OrderedDict()
10
+ self._id: int = 1
11
+
12
+ def add(self, callback: Callable[..., Any]) -> Callable[[], None]:
13
+ callback_id = self._next_id()
14
+ self._callbacks[callback_id] = callback
15
+
16
+ def _rm_callback() -> None:
17
+ self._callbacks.pop(callback_id, None)
18
+
19
+ return _rm_callback
20
+
21
+ def invoke(self, *args: Any, **kwargs: Any) -> None:
22
+ if not self._callbacks:
23
+ return
24
+
25
+ # Invoke in reverse insertion order
26
+ for callback_id in reversed(list(self._callbacks.keys())):
27
+ callback = self._callbacks[callback_id]
28
+ if is_async_callable(callback):
29
+ raise RuntimeError(
30
+ "Can't use async callbacks with `.chat()`/`.stream()`."
31
+ "Async callbacks can only be used with `.chat_async()`/`.stream_async()`."
32
+ )
33
+ callback(*args, **kwargs)
34
+
35
+ async def invoke_async(self, *args: Any, **kwargs: Any) -> None:
36
+ if not self._callbacks:
37
+ return
38
+
39
+ # Invoke in reverse insertion order
40
+ for callback_id in reversed(list(self._callbacks.keys())):
41
+ callback = self._callbacks[callback_id]
42
+ if is_async_callable(callback):
43
+ await callback(*args, **kwargs)
44
+ else:
45
+ callback(*args, **kwargs)
46
+
47
+ def count(self) -> int:
48
+ return len(self._callbacks)
49
+
50
+ def get_callbacks(self) -> list[Callable[..., Any]]:
51
+ return list(self._callbacks.values())
52
+
53
+ def _next_id(self) -> str:
54
+ current_id = self._id
55
+ self._id += 1
56
+ return str(current_id)
chatlas/_chat.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import copy
3
4
  import inspect
4
5
  import os
5
6
  import sys
@@ -25,6 +26,7 @@ from typing import (
25
26
 
26
27
  from pydantic import BaseModel
27
28
 
29
+ from ._callbacks import CallbackManager
28
30
  from ._content import (
29
31
  Content,
30
32
  ContentJson,
@@ -41,7 +43,7 @@ from ._display import (
41
43
  )
42
44
  from ._logging import log_tool_error
43
45
  from ._provider import Provider
44
- from ._tools import Tool
46
+ from ._tools import Tool, ToolRejectError
45
47
  from ._turn import Turn, user_turn
46
48
  from ._typing_extensions import TypedDict
47
49
  from ._utils import html_escape, wrap_async
@@ -95,6 +97,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
95
97
  self.provider = provider
96
98
  self._turns: list[Turn] = list(turns or [])
97
99
  self._tools: dict[str, Tool] = {}
100
+ self._on_tool_request_callbacks = CallbackManager()
101
+ self._on_tool_result_callbacks = CallbackManager()
98
102
  self._current_display: Optional[MarkdownDisplay] = None
99
103
  self._echo_options: EchoDisplayOptions = {
100
104
  "rich_markdown": {},
@@ -414,9 +418,12 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
414
418
  Whether to run the app in a background thread. If `None`, the app will
415
419
  run in a background thread if the current environment is a notebook.
416
420
  echo
417
- Whether to echo text content, all content (i.e., tool calls), or no
418
- content. Defaults to `"none"` when `stream=True` and `"text"` when
419
- `stream=False`.
421
+ One of the following (defaults to `"none"` when `stream=True` and `"text"` when
422
+ `stream=False`):
423
+ - `"text"`: Echo just the text content of the response.
424
+ - `"output"`: Echo text and tool call content.
425
+ - `"all"`: Echo both the assistant and user turn.
426
+ - `"none"`: Do not echo any content.
420
427
  content
421
428
  Whether to display text content or all content (i.e., tool calls).
422
429
  kwargs
@@ -504,8 +511,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
504
511
  Parameters
505
512
  ----------
506
513
  echo
507
- Whether to echo text content, all content (i.e., tool calls), or no
508
- content.
514
+ One of the following (default is "output"):
515
+ - `"text"`: Echo just the text content of the response.
516
+ - `"output"`: Echo text and tool call content.
517
+ - `"all"`: Echo both the assistant and user turn.
518
+ - `"none"`: Do not echo any content.
509
519
  stream
510
520
  Whether to stream the response (i.e., have the response appear in chunks).
511
521
  kwargs
@@ -542,8 +552,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
542
552
  args
543
553
  The user input(s) to generate a response from.
544
554
  echo
545
- Whether to echo text content, all content (i.e., tool calls), or no
546
- content.
555
+ One of the following (default is "output"):
556
+ - `"text"`: Echo just the text content of the response.
557
+ - `"output"`: Echo text and tool call content.
558
+ - `"all"`: Echo both the assistant and user turn.
559
+ - `"none"`: Do not echo any content.
547
560
  stream
548
561
  Whether to stream the response (i.e., have the response appear in
549
562
  chunks).
@@ -592,8 +605,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
592
605
  args
593
606
  The user input(s) to generate a response from.
594
607
  echo
595
- Whether to echo text content, all content (i.e., tool calls, images,
596
- etc), or no content.
608
+ One of the following (default is "output"):
609
+ - `"text"`: Echo just the text content of the response.
610
+ - `"output"`: Echo text and tool call content.
611
+ - `"all"`: Echo both the assistant and user turn.
612
+ - `"none"`: Do not echo any content.
597
613
  stream
598
614
  Whether to stream the response (i.e., have the response appear in
599
615
  chunks).
@@ -631,38 +647,25 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
631
647
  def stream(
632
648
  self,
633
649
  *args: Content | str,
650
+ content: Literal["text"] = "text",
651
+ echo: EchoOptions = "none",
652
+ kwargs: Optional[SubmitInputArgsT] = None,
634
653
  ) -> Generator[str, None, None]: ...
635
654
 
636
655
  @overload
637
656
  def stream(
638
657
  self,
639
658
  *args: Content | str,
640
- echo: EchoOptions,
641
- ) -> Generator[str, None, None]: ...
642
-
643
- @overload
644
- def stream(
645
- self,
646
- *args: Content | str,
647
- echo: EchoOptions,
648
- content: Literal["text"],
649
- kwargs: Optional[SubmitInputArgsT],
650
- ) -> Generator[str, None, None]: ...
651
-
652
- @overload
653
- def stream(
654
- self,
655
- *args: Content | str,
656
- echo: EchoOptions,
657
659
  content: Literal["all"],
658
- kwargs: Optional[SubmitInputArgsT],
660
+ echo: EchoOptions = "none",
661
+ kwargs: Optional[SubmitInputArgsT] = None,
659
662
  ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: ...
660
663
 
661
664
  def stream(
662
665
  self,
663
666
  *args: Content | str,
664
- echo: EchoOptions = "none",
665
667
  content: Literal["text", "all"] = "text",
668
+ echo: EchoOptions = "none",
666
669
  kwargs: Optional[SubmitInputArgsT] = None,
667
670
  ) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]:
668
671
  """
@@ -672,11 +675,15 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
672
675
  ----------
673
676
  args
674
677
  The user input(s) to generate a response from.
675
- echo
676
- Whether to echo text content, all content (i.e., tool calls), or no
677
- content.
678
678
  content
679
- Whether to yield just text content, or all content (i.e., tool calls).
679
+ Whether to yield just text content or include rich content objects
680
+ (e.g., tool calls) when relevant.
681
+ echo
682
+ One of the following (default is "none"):
683
+ - `"text"`: Echo just the text content of the response.
684
+ - `"output"`: Echo text and tool call content.
685
+ - `"all"`: Echo both the assistant and user turn.
686
+ - `"none"`: Do not echo any content.
680
687
  kwargs
681
688
  Additional keyword arguments to pass to the method used for requesting
682
689
  the response.
@@ -712,38 +719,25 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
712
719
  async def stream_async(
713
720
  self,
714
721
  *args: Content | str,
722
+ content: Literal["text"] = "text",
723
+ echo: EchoOptions = "none",
724
+ kwargs: Optional[SubmitInputArgsT] = None,
715
725
  ) -> AsyncGenerator[str, None]: ...
716
726
 
717
727
  @overload
718
728
  async def stream_async(
719
729
  self,
720
730
  *args: Content | str,
721
- echo: EchoOptions,
722
- ) -> AsyncGenerator[str, None]: ...
723
-
724
- @overload
725
- async def stream_async(
726
- self,
727
- *args: Content | str,
728
- echo: EchoOptions,
729
- content: Literal["text"],
730
- kwargs: Optional[SubmitInputArgsT],
731
- ) -> AsyncGenerator[str, None]: ...
732
-
733
- @overload
734
- async def stream_async(
735
- self,
736
- *args: Content | str,
737
- echo: EchoOptions,
738
731
  content: Literal["all"],
739
- kwargs: Optional[SubmitInputArgsT],
732
+ echo: EchoOptions = "none",
733
+ kwargs: Optional[SubmitInputArgsT] = None,
740
734
  ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: ...
741
735
 
742
736
  async def stream_async(
743
737
  self,
744
738
  *args: Content | str,
745
- echo: EchoOptions = "none",
746
739
  content: Literal["text", "all"] = "text",
740
+ echo: EchoOptions = "none",
747
741
  kwargs: Optional[SubmitInputArgsT] = None,
748
742
  ) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]:
749
743
  """
@@ -753,11 +747,15 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
753
747
  ----------
754
748
  args
755
749
  The user input(s) to generate a response from.
756
- echo
757
- Whether to echo text content, all content (i.e., tool calls), or no
758
- content.
759
750
  content
760
- Whether to yield just text content, or all content (i.e., tool calls).
751
+ Whether to yield just text content or include rich content objects
752
+ (e.g., tool calls) when relevant.
753
+ echo
754
+ One of the following (default is "none"):
755
+ - `"text"`: Echo just the text content of the response.
756
+ - `"output"`: Echo text and tool call content.
757
+ - `"all"`: Echo both the assistant and user turn.
758
+ - `"none"`: Do not echo any content.
761
759
  kwargs
762
760
  Additional keyword arguments to pass to the method used for requesting
763
761
  the response.
@@ -804,7 +802,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
804
802
  data_model
805
803
  A Pydantic model describing the structure of the data to extract.
806
804
  echo
807
- Whether to echo text content, all content (i.e., tool calls), or no content.
805
+ One of the following (default is "none"):
806
+ - `"text"`: Echo just the text content of the response.
807
+ - `"output"`: Echo text and tool call content.
808
+ - `"all"`: Echo both the assistant and user turn.
809
+ - `"none"`: Do not echo any content.
808
810
  stream
809
811
  Whether to stream the response (i.e., have the response appear in chunks).
810
812
 
@@ -862,7 +864,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
862
864
  data_model
863
865
  A Pydantic model describing the structure of the data to extract.
864
866
  echo
865
- Whether to echo text content, all content (i.e., tool calls), or no content
867
+ One of the following (default is "none"):
868
+ - `"text"`: Echo just the text content of the response.
869
+ - `"output"`: Echo text and tool call content.
870
+ - `"all"`: Echo both the assistant and user turn.
871
+ - `"none"`: Do not echo any content.
866
872
  stream
867
873
  Whether to stream the response (i.e., have the response appear in chunks).
868
874
  Defaults to `True` if `echo` is not "none".
@@ -987,6 +993,53 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
987
993
  tool = Tool(func, model=model)
988
994
  self._tools[tool.name] = tool
989
995
 
996
+ def on_tool_request(self, callback: Callable[[ContentToolRequest], None]):
997
+ """
998
+ Register a callback for a tool request event.
999
+
1000
+ A tool request event occurs when the assistant requests a tool to be
1001
+ called on its behalf. Before invoking the tool, `on_tool_request`
1002
+ handlers are called with the relevant `ContentToolRequest` object. This
1003
+ is useful if you want to handle tool requests in a custom way, such as
1004
+ requiring logging them or requiring user approval before invoking the
1005
+ tool
1006
+
1007
+ Parameters
1008
+ ----------
1009
+ callback
1010
+ A function to be called when a tool request event occurs.
1011
+ This function must have a single argument, which will be the
1012
+ tool request (i.e., a `ContentToolRequest` object).
1013
+
1014
+ Returns
1015
+ -------
1016
+ A callable that can be used to remove the callback later.
1017
+ """
1018
+ return self._on_tool_request_callbacks.add(callback)
1019
+
1020
+ def on_tool_result(self, callback: Callable[[ContentToolResult], None]):
1021
+ """
1022
+ Register a callback for a tool result event.
1023
+
1024
+ A tool result event occurs when a tool has been invoked and the
1025
+ result is ready to be provided to the assistant. After the tool
1026
+ has been invoked, `on_tool_result` handlers are called with the
1027
+ relevant `ContentToolResult` object. This is useful if you want to
1028
+ handle tool results in a custom way such as logging them.
1029
+
1030
+ Parameters
1031
+ ----------
1032
+ callback
1033
+ A function to be called when a tool result event occurs.
1034
+ This function must have a single argument, which will be the
1035
+ tool result (i.e., a `ContentToolResult` object).
1036
+
1037
+ Returns
1038
+ -------
1039
+ A callable that can be used to remove the callback later.
1040
+ """
1041
+ return self._on_tool_result_callbacks.add(callback)
1042
+
990
1043
  @property
991
1044
  def current_display(self) -> Optional[MarkdownDisplay]:
992
1045
  """
@@ -1417,28 +1470,43 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1417
1470
  e = RuntimeError(f"Unknown tool: {x.name}")
1418
1471
  return ContentToolResult(value=None, error=e, request=x)
1419
1472
 
1420
- args = x.arguments
1421
-
1473
+ # First, invoke the request callbacks. If a ToolRejectError is raised,
1474
+ # treat it like a tool failure (i.e., gracefully handle it).
1475
+ result: ContentToolResult | None = None
1422
1476
  try:
1423
- if isinstance(args, dict):
1424
- result = func(**args)
1425
- else:
1426
- result = func(args)
1477
+ self._on_tool_request_callbacks.invoke(x)
1478
+ except ToolRejectError as e:
1479
+ result = ContentToolResult(value=None, error=e, request=x)
1480
+
1481
+ # Invoke the tool (if it hasn't been rejected).
1482
+ if result is None:
1483
+ try:
1484
+ if isinstance(x.arguments, dict):
1485
+ res = func(**x.arguments)
1486
+ else:
1487
+ res = func(x.arguments)
1488
+
1489
+ if isinstance(res, ContentToolResult):
1490
+ result = res
1491
+ else:
1492
+ result = ContentToolResult(value=res)
1427
1493
 
1428
- if not isinstance(result, ContentToolResult):
1429
- result = ContentToolResult(value=result)
1494
+ result.request = x
1495
+ except Exception as e:
1496
+ result = ContentToolResult(value=None, error=e, request=x)
1430
1497
 
1431
- result.request = x
1432
- return result
1433
- except Exception as e:
1498
+ # If we've captured an error, notify and log it.
1499
+ if result.error:
1434
1500
  warnings.warn(
1435
1501
  f"Calling tool '{x.name}' led to an error.",
1436
1502
  ToolFailureWarning,
1437
1503
  stacklevel=2,
1438
1504
  )
1439
1505
  traceback.print_exc()
1440
- log_tool_error(x.name, str(args), e)
1441
- return ContentToolResult(value=None, error=e, request=x)
1506
+ log_tool_error(x.name, str(x.arguments), result.error)
1507
+
1508
+ self._on_tool_result_callbacks.invoke(result)
1509
+ return result
1442
1510
 
1443
1511
  async def _invoke_tool_async(self, x: ContentToolRequest) -> ContentToolResult:
1444
1512
  tool_def = self._tools.get(x.name, None)
@@ -1453,28 +1521,43 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1453
1521
  e = RuntimeError(f"Unknown tool: {x.name}")
1454
1522
  return ContentToolResult(value=None, error=e, request=x)
1455
1523
 
1456
- args = x.arguments
1457
-
1524
+ # First, invoke the request callbacks. If a ToolRejectError is raised,
1525
+ # treat it like a tool failure (i.e., gracefully handle it).
1526
+ result: ContentToolResult | None = None
1458
1527
  try:
1459
- if isinstance(args, dict):
1460
- result = await func(**args)
1461
- else:
1462
- result = await func(args)
1528
+ await self._on_tool_request_callbacks.invoke_async(x)
1529
+ except ToolRejectError as e:
1530
+ result = ContentToolResult(value=None, error=e, request=x)
1531
+
1532
+ # Invoke the tool (if it hasn't been rejected).
1533
+ if result is None:
1534
+ try:
1535
+ if isinstance(x.arguments, dict):
1536
+ res = await func(**x.arguments)
1537
+ else:
1538
+ res = await func(x.arguments)
1463
1539
 
1464
- if not isinstance(result, ContentToolResult):
1465
- result = ContentToolResult(value=result)
1540
+ if isinstance(res, ContentToolResult):
1541
+ result = res
1542
+ else:
1543
+ result = ContentToolResult(value=res)
1544
+
1545
+ result.request = x
1546
+ except Exception as e:
1547
+ result = ContentToolResult(value=None, error=e, request=x)
1466
1548
 
1467
- result.request = x
1468
- return result
1469
- except Exception as e:
1549
+ # If we've captured an error, notify and log it.
1550
+ if result.error:
1470
1551
  warnings.warn(
1471
1552
  f"Calling tool '{x.name}' led to an error.",
1472
1553
  ToolFailureWarning,
1473
1554
  stacklevel=2,
1474
1555
  )
1475
1556
  traceback.print_exc()
1476
- log_tool_error(x.name, str(args), e)
1477
- return ContentToolResult(value=None, error=e, request=x)
1557
+ log_tool_error(x.name, str(x.arguments), result.error)
1558
+
1559
+ await self._on_tool_result_callbacks.invoke_async(result)
1560
+ return result
1478
1561
 
1479
1562
  def _markdown_display(self, echo: EchoOptions) -> ChatMarkdownDisplay:
1480
1563
  """
@@ -1545,6 +1628,21 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1545
1628
  res += "\n" + turn.__repr__(indent=2)
1546
1629
  return res + "\n"
1547
1630
 
1631
+ def __deepcopy__(self, memo):
1632
+ result = self.__class__.__new__(self.__class__)
1633
+
1634
+ # Avoid recursive references
1635
+ memo[id(self)] = result
1636
+
1637
+ # Copy all attributes except the problematic provider attribute
1638
+ for key, value in self.__dict__.items():
1639
+ if key != "provider":
1640
+ setattr(result, key, copy.deepcopy(value, memo))
1641
+ else:
1642
+ setattr(result, key, value)
1643
+
1644
+ return result
1645
+
1548
1646
 
1549
1647
  class ChatResponse:
1550
1648
  """
chatlas/_content.py CHANGED
@@ -60,6 +60,12 @@ class ContentText(Content):
60
60
  text: str
61
61
  content_type: ContentTypeEnum = "text"
62
62
 
63
+ def __init__(self, **data: Any):
64
+ super().__init__(**data)
65
+
66
+ if self.text == "" or self.text.isspace():
67
+ self.text = "[empty string]"
68
+
63
69
  def __str__(self):
64
70
  return self.text
65
71
 
chatlas/_databricks.py CHANGED
@@ -85,7 +85,7 @@ def ChatDatabricks(
85
85
  A chat object that retains the state of the conversation.
86
86
  """
87
87
  if model is None:
88
- model = log_model_default("databricks-dbrx-instruct")
88
+ model = log_model_default("databricks-claude-3-7-sonnet")
89
89
 
90
90
  return Chat(
91
91
  provider=DatabricksProvider(
chatlas/_logging.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import os
3
3
  import warnings
4
+ from typing import Literal
4
5
 
5
6
  from rich.logging import RichHandler
6
7
 
@@ -12,15 +13,38 @@ def _rich_handler() -> RichHandler:
12
13
  return handler
13
14
 
14
15
 
15
- logger = logging.getLogger("chatlas")
16
-
17
- if os.environ.get("CHATLAS_LOG") == "info":
16
+ def setup_logger(x: str, level: Literal["debug", "info"]) -> logging.Logger:
17
+ logger = logging.getLogger(x)
18
+ if level == "debug":
19
+ logger.setLevel(logging.DEBUG)
20
+ elif level == "info":
21
+ logger.setLevel(logging.INFO)
18
22
  # By adding a RichHandler to chatlas' logger, we can guarantee that they
19
23
  # never get dropped, even if the root logger's handlers are not
20
24
  # RichHandlers.
21
- logger.setLevel(logging.INFO)
22
- logger.addHandler(_rich_handler())
25
+ if not any(isinstance(h, RichHandler) for h in logger.handlers):
26
+ logger.addHandler(_rich_handler())
23
27
  logger.propagate = False
28
+ return logger
29
+
30
+
31
+ logger = logging.getLogger("chatlas")
32
+ log_level = os.environ.get("CHATLAS_LOG")
33
+ if log_level:
34
+ if log_level != "debug" and log_level != "info":
35
+ warnings.warn(
36
+ f"CHATLAS_LOG is set to '{log_level}', but the log level must "
37
+ "be one of 'debug' or 'info'. Defaulting to 'info'.",
38
+ )
39
+ log_level = "info"
40
+
41
+ # Manually setup the logger for each dependency we care about. This way, we
42
+ # can ensure that the logs won't get dropped when a rich display is activate
43
+ logger = setup_logger("chatlas", log_level)
44
+ openai_logger = setup_logger("openai", log_level)
45
+ anthropic_logger = setup_logger("anthropic", log_level)
46
+ google_logger = setup_logger("google_genai.models", log_level)
47
+ httpx_logger = setup_logger("httpx", log_level)
24
48
 
25
49
  # Add a RichHandler to the root logger if there are no handlers. Note that
26
50
  # if chatlas is imported before other libraries that set up logging, (like