chatlas 0.3.0__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- chatlas/__init__.py +11 -1
- chatlas/_anthropic.py +8 -10
- chatlas/_auto.py +183 -0
- chatlas/_chat.py +50 -19
- chatlas/_content.py +23 -7
- chatlas/_display.py +12 -2
- chatlas/_github.py +1 -1
- chatlas/_google.py +263 -166
- chatlas/_groq.py +1 -1
- chatlas/_live_render.py +116 -0
- chatlas/_merge.py +1 -1
- chatlas/_ollama.py +1 -1
- chatlas/_openai.py +4 -6
- chatlas/_perplexity.py +1 -1
- chatlas/_provider.py +0 -9
- chatlas/_snowflake.py +321 -0
- chatlas/_utils.py +7 -0
- chatlas/_version.py +21 -0
- chatlas/py.typed +0 -0
- chatlas/types/__init__.py +5 -1
- chatlas/types/anthropic/_submit.py +24 -2
- chatlas/types/google/_client.py +12 -91
- chatlas/types/google/_submit.py +40 -87
- chatlas/types/openai/_submit.py +9 -2
- chatlas/types/snowflake/__init__.py +8 -0
- chatlas/types/snowflake/_submit.py +24 -0
- {chatlas-0.3.0.dist-info → chatlas-0.5.0.dist-info}/METADATA +35 -7
- chatlas-0.5.0.dist-info/RECORD +44 -0
- chatlas-0.3.0.dist-info/RECORD +0 -37
- {chatlas-0.3.0.dist-info → chatlas-0.5.0.dist-info}/WHEEL +0 -0
chatlas/__init__.py
CHANGED
|
@@ -1,21 +1,29 @@
|
|
|
1
1
|
from . import types
|
|
2
2
|
from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
3
|
+
from ._auto import ChatAuto
|
|
3
4
|
from ._chat import Chat
|
|
4
5
|
from ._content_image import content_image_file, content_image_plot, content_image_url
|
|
5
6
|
from ._github import ChatGithub
|
|
6
|
-
from ._google import ChatGoogle
|
|
7
|
+
from ._google import ChatGoogle, ChatVertex
|
|
7
8
|
from ._groq import ChatGroq
|
|
8
9
|
from ._interpolate import interpolate, interpolate_file
|
|
9
10
|
from ._ollama import ChatOllama
|
|
10
11
|
from ._openai import ChatAzureOpenAI, ChatOpenAI
|
|
11
12
|
from ._perplexity import ChatPerplexity
|
|
12
13
|
from ._provider import Provider
|
|
14
|
+
from ._snowflake import ChatSnowflake
|
|
13
15
|
from ._tokens import token_usage
|
|
14
16
|
from ._tools import Tool
|
|
15
17
|
from ._turn import Turn
|
|
16
18
|
|
|
19
|
+
try:
|
|
20
|
+
from ._version import version as __version__
|
|
21
|
+
except ImportError: # pragma: no cover
|
|
22
|
+
__version__ = "0.0.0" # stub value for docs
|
|
23
|
+
|
|
17
24
|
__all__ = (
|
|
18
25
|
"ChatAnthropic",
|
|
26
|
+
"ChatAuto",
|
|
19
27
|
"ChatBedrockAnthropic",
|
|
20
28
|
"ChatGithub",
|
|
21
29
|
"ChatGoogle",
|
|
@@ -24,6 +32,8 @@ __all__ = (
|
|
|
24
32
|
"ChatOpenAI",
|
|
25
33
|
"ChatAzureOpenAI",
|
|
26
34
|
"ChatPerplexity",
|
|
35
|
+
"ChatSnowflake",
|
|
36
|
+
"ChatVertex",
|
|
27
37
|
"Chat",
|
|
28
38
|
"content_image_file",
|
|
29
39
|
"content_image_plot",
|
chatlas/_anthropic.py
CHANGED
|
@@ -72,7 +72,7 @@ def ChatAnthropic(
|
|
|
72
72
|
::: {.callout-note}
|
|
73
73
|
## API key
|
|
74
74
|
|
|
75
|
-
Note that a Claude
|
|
75
|
+
Note that a Claude Pro membership does not give you the ability to call
|
|
76
76
|
models via the API. You will need to go to the [developer
|
|
77
77
|
console](https://console.anthropic.com/account/keys) to sign up (and pay
|
|
78
78
|
for) a developer account that will give you an API key that you can use with
|
|
@@ -82,7 +82,7 @@ def ChatAnthropic(
|
|
|
82
82
|
::: {.callout-note}
|
|
83
83
|
## Python requirements
|
|
84
84
|
|
|
85
|
-
`ChatAnthropic` requires the `anthropic` package
|
|
85
|
+
`ChatAnthropic` requires the `anthropic` package: `pip install "chatlas[anthropic]"`.
|
|
86
86
|
:::
|
|
87
87
|
|
|
88
88
|
Examples
|
|
@@ -164,7 +164,7 @@ def ChatAnthropic(
|
|
|
164
164
|
"""
|
|
165
165
|
|
|
166
166
|
if model is None:
|
|
167
|
-
model = log_model_default("claude-3-
|
|
167
|
+
model = log_model_default("claude-3-7-sonnet-latest")
|
|
168
168
|
|
|
169
169
|
return Chat(
|
|
170
170
|
provider=AnthropicProvider(
|
|
@@ -311,7 +311,8 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
|
|
|
311
311
|
if stream:
|
|
312
312
|
stream = False
|
|
313
313
|
warnings.warn(
|
|
314
|
-
"Anthropic does not support structured data extraction in streaming mode."
|
|
314
|
+
"Anthropic does not support structured data extraction in streaming mode.",
|
|
315
|
+
stacklevel=2,
|
|
315
316
|
)
|
|
316
317
|
|
|
317
318
|
kwargs_full: "SubmitInputArgs" = {
|
|
@@ -371,10 +372,7 @@ class AnthropicProvider(Provider[Message, RawMessageStreamEvent, Message]):
|
|
|
371
372
|
|
|
372
373
|
return completion
|
|
373
374
|
|
|
374
|
-
def stream_turn(self, completion, has_data_model
|
|
375
|
-
return self._as_turn(completion, has_data_model)
|
|
376
|
-
|
|
377
|
-
async def stream_turn_async(self, completion, has_data_model, stream) -> Turn:
|
|
375
|
+
def stream_turn(self, completion, has_data_model) -> Turn:
|
|
378
376
|
return self._as_turn(completion, has_data_model)
|
|
379
377
|
|
|
380
378
|
def value_turn(self, completion, has_data_model) -> Turn:
|
|
@@ -574,8 +572,8 @@ def ChatBedrockAnthropic(
|
|
|
574
572
|
::: {.callout-note}
|
|
575
573
|
## Python requirements
|
|
576
574
|
|
|
577
|
-
`ChatBedrockAnthropic`, requires the `anthropic` package with the `bedrock` extras
|
|
578
|
-
|
|
575
|
+
`ChatBedrockAnthropic`, requires the `anthropic` package with the `bedrock` extras:
|
|
576
|
+
`pip install "chatlas[bedrock-anthropic]"`
|
|
579
577
|
:::
|
|
580
578
|
|
|
581
579
|
Examples
|
chatlas/_auto.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Callable, Literal, Optional
|
|
6
|
+
|
|
7
|
+
from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
8
|
+
from ._chat import Chat
|
|
9
|
+
from ._github import ChatGithub
|
|
10
|
+
from ._google import ChatGoogle, ChatVertex
|
|
11
|
+
from ._groq import ChatGroq
|
|
12
|
+
from ._ollama import ChatOllama
|
|
13
|
+
from ._openai import ChatAzureOpenAI, ChatOpenAI
|
|
14
|
+
from ._perplexity import ChatPerplexity
|
|
15
|
+
from ._snowflake import ChatSnowflake
|
|
16
|
+
from ._turn import Turn
|
|
17
|
+
|
|
18
|
+
AutoProviders = Literal[
|
|
19
|
+
"anthropic",
|
|
20
|
+
"bedrock-anthropic",
|
|
21
|
+
"github",
|
|
22
|
+
"google",
|
|
23
|
+
"groq",
|
|
24
|
+
"ollama",
|
|
25
|
+
"openai",
|
|
26
|
+
"azure-openai",
|
|
27
|
+
"perplexity",
|
|
28
|
+
"snowflake",
|
|
29
|
+
"vertex",
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
_provider_chat_model_map: dict[AutoProviders, Callable[..., Chat]] = {
|
|
33
|
+
"anthropic": ChatAnthropic,
|
|
34
|
+
"bedrock-anthropic": ChatBedrockAnthropic,
|
|
35
|
+
"github": ChatGithub,
|
|
36
|
+
"google": ChatGoogle,
|
|
37
|
+
"groq": ChatGroq,
|
|
38
|
+
"ollama": ChatOllama,
|
|
39
|
+
"openai": ChatOpenAI,
|
|
40
|
+
"azure-openai": ChatAzureOpenAI,
|
|
41
|
+
"perplexity": ChatPerplexity,
|
|
42
|
+
"snowflake": ChatSnowflake,
|
|
43
|
+
"vertex": ChatVertex,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def ChatAuto(
|
|
48
|
+
system_prompt: Optional[str] = None,
|
|
49
|
+
turns: Optional[list[Turn]] = None,
|
|
50
|
+
*,
|
|
51
|
+
provider: Optional[AutoProviders] = None,
|
|
52
|
+
model: Optional[str] = None,
|
|
53
|
+
**kwargs,
|
|
54
|
+
) -> Chat:
|
|
55
|
+
"""
|
|
56
|
+
Use environment variables (env vars) to configure the Chat provider and model.
|
|
57
|
+
|
|
58
|
+
Creates a `:class:~chatlas.Chat` instance based on the specified provider.
|
|
59
|
+
The provider may be specified through the `provider` parameter and/or the
|
|
60
|
+
`CHATLAS_CHAT_PROVIDER` env var. If both are set, the env var takes
|
|
61
|
+
precedence. Similarly, the provider's model may be specified through the
|
|
62
|
+
`model` parameter and/or the `CHATLAS_CHAT_MODEL` env var. Also, additional
|
|
63
|
+
configuration may be provided through the `kwargs` parameter and/or the
|
|
64
|
+
`CHATLAS_CHAT_ARGS` env var (as a JSON string). In this case, when both are
|
|
65
|
+
set, they are merged, with the env var arguments taking precedence.
|
|
66
|
+
|
|
67
|
+
As a result, `ChatAuto()` provides a convenient way to set a default
|
|
68
|
+
provider and model in your Python code, while allowing you to override
|
|
69
|
+
these settings through env vars (i.e., without modifying your code).
|
|
70
|
+
|
|
71
|
+
Prerequisites
|
|
72
|
+
-------------
|
|
73
|
+
|
|
74
|
+
::: {.callout-note}
|
|
75
|
+
## API key
|
|
76
|
+
|
|
77
|
+
Follow the instructions for the specific provider to obtain an API key.
|
|
78
|
+
:::
|
|
79
|
+
|
|
80
|
+
::: {.callout-note}
|
|
81
|
+
## Python requirements
|
|
82
|
+
|
|
83
|
+
Follow the instructions for the specific provider to install the required
|
|
84
|
+
Python packages.
|
|
85
|
+
:::
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
Examples
|
|
89
|
+
--------
|
|
90
|
+
First, set the environment variables for the provider, arguments, and API key:
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
export CHATLAS_CHAT_PROVIDER=anthropic
|
|
94
|
+
export CHATLAS_CHAT_MODEL=claude-3-haiku-20240229
|
|
95
|
+
export CHATLAS_CHAT_ARGS='{"kwargs": {"max_retries": 3}}'
|
|
96
|
+
export ANTHROPIC_API_KEY=your_api_key
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Then, you can use the `ChatAuto` function to create a Chat instance:
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
from chatlas import ChatAuto
|
|
103
|
+
|
|
104
|
+
chat = ChatAuto()
|
|
105
|
+
chat.chat("What is the capital of France?")
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
Parameters
|
|
109
|
+
----------
|
|
110
|
+
provider
|
|
111
|
+
The name of the default chat provider to use. Providers are strings
|
|
112
|
+
formatted in kebab-case, e.g. to use `ChatBedrockAnthropic` set
|
|
113
|
+
`provider="bedrock-anthropic"`.
|
|
114
|
+
|
|
115
|
+
This value can also be provided via the `CHATLAS_CHAT_PROVIDER`
|
|
116
|
+
environment variable, which takes precedence over `provider`
|
|
117
|
+
when set.
|
|
118
|
+
model
|
|
119
|
+
The name of the default model to use. This value can also be provided
|
|
120
|
+
via the `CHATLAS_CHAT_MODEL` environment variable, which takes
|
|
121
|
+
precedence over `model` when set.
|
|
122
|
+
system_prompt
|
|
123
|
+
A system prompt to set the behavior of the assistant.
|
|
124
|
+
turns
|
|
125
|
+
A list of turns to start the chat with (i.e., continuing a previous
|
|
126
|
+
conversation). If not provided, the conversation begins from scratch. Do
|
|
127
|
+
not provide non-`None` values for both `turns` and `system_prompt`. Each
|
|
128
|
+
message in the list should be a dictionary with at least `role` (usually
|
|
129
|
+
`system`, `user`, or `assistant`, but `tool` is also possible). Normally
|
|
130
|
+
there is also a `content` field, which is a string.
|
|
131
|
+
**kwargs
|
|
132
|
+
Additional keyword arguments to pass to the Chat constructor. See the
|
|
133
|
+
documentation for each provider for more details on the available
|
|
134
|
+
options.
|
|
135
|
+
|
|
136
|
+
These arguments can also be provided via the `CHATLAS_CHAT_ARGS`
|
|
137
|
+
environment variable as a JSON string. When provided, the options
|
|
138
|
+
in the `CHATLAS_CHAT_ARGS` envvar take precedence over the options
|
|
139
|
+
passed to `kwargs`.
|
|
140
|
+
|
|
141
|
+
Note that `system_prompt` and `turns` in `kwargs` or in
|
|
142
|
+
`CHATLAS_CHAT_ARGS` are ignored.
|
|
143
|
+
|
|
144
|
+
Returns
|
|
145
|
+
-------
|
|
146
|
+
Chat
|
|
147
|
+
A chat instance using the specified provider.
|
|
148
|
+
|
|
149
|
+
Raises
|
|
150
|
+
------
|
|
151
|
+
ValueError
|
|
152
|
+
If no valid provider is specified either through parameters or
|
|
153
|
+
environment variables.
|
|
154
|
+
"""
|
|
155
|
+
the_provider = os.environ.get("CHATLAS_CHAT_PROVIDER", provider)
|
|
156
|
+
|
|
157
|
+
if the_provider is None:
|
|
158
|
+
raise ValueError(
|
|
159
|
+
"Provider name is required as parameter or `CHATLAS_CHAT_PROVIDER` must be set."
|
|
160
|
+
)
|
|
161
|
+
if the_provider not in _provider_chat_model_map:
|
|
162
|
+
raise ValueError(
|
|
163
|
+
f"Provider name '{the_provider}' is not a known chatlas provider: "
|
|
164
|
+
f"{', '.join(_provider_chat_model_map.keys())}"
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# `system_prompt` and `turns` always come from `ChatAuto()`
|
|
168
|
+
base_args = {"system_prompt": system_prompt, "turns": turns}
|
|
169
|
+
|
|
170
|
+
if env_model := os.environ.get("CHATLAS_CHAT_MODEL"):
|
|
171
|
+
model = env_model
|
|
172
|
+
|
|
173
|
+
if model:
|
|
174
|
+
base_args["model"] = model
|
|
175
|
+
|
|
176
|
+
env_kwargs = {}
|
|
177
|
+
if env_kwargs_str := os.environ.get("CHATLAS_CHAT_ARGS"):
|
|
178
|
+
env_kwargs = json.loads(env_kwargs_str)
|
|
179
|
+
|
|
180
|
+
kwargs = {**kwargs, **env_kwargs, **base_args}
|
|
181
|
+
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
|
182
|
+
|
|
183
|
+
return _provider_chat_model_map[the_provider](**kwargs)
|
chatlas/_chat.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import inspect
|
|
3
4
|
import os
|
|
5
|
+
import sys
|
|
4
6
|
from pathlib import Path
|
|
5
7
|
from threading import Thread
|
|
6
8
|
from typing import (
|
|
@@ -40,7 +42,7 @@ from ._provider import Provider
|
|
|
40
42
|
from ._tools import Tool
|
|
41
43
|
from ._turn import Turn, user_turn
|
|
42
44
|
from ._typing_extensions import TypedDict
|
|
43
|
-
from ._utils import html_escape
|
|
45
|
+
from ._utils import html_escape, wrap_async
|
|
44
46
|
|
|
45
47
|
|
|
46
48
|
class AnyTypeDict(TypedDict, total=False):
|
|
@@ -388,6 +390,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
388
390
|
port: int = 0,
|
|
389
391
|
launch_browser: bool = True,
|
|
390
392
|
bg_thread: Optional[bool] = None,
|
|
393
|
+
echo: Optional[Literal["text", "all", "none"]] = None,
|
|
391
394
|
kwargs: Optional[SubmitInputArgsT] = None,
|
|
392
395
|
):
|
|
393
396
|
"""
|
|
@@ -404,6 +407,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
404
407
|
bg_thread
|
|
405
408
|
Whether to run the app in a background thread. If `None`, the app will
|
|
406
409
|
run in a background thread if the current environment is a notebook.
|
|
410
|
+
echo
|
|
411
|
+
Whether to echo text content, all content (i.e., tool calls), or no
|
|
412
|
+
content. Defaults to `"none"` when `stream=True` and `"text"` when
|
|
413
|
+
`stream=False`.
|
|
407
414
|
kwargs
|
|
408
415
|
Additional keyword arguments to pass to the method used for requesting
|
|
409
416
|
the response.
|
|
@@ -438,10 +445,22 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
438
445
|
return
|
|
439
446
|
if stream:
|
|
440
447
|
await chat.append_message_stream(
|
|
441
|
-
self.
|
|
448
|
+
await self.stream_async(
|
|
449
|
+
user_input,
|
|
450
|
+
kwargs=kwargs,
|
|
451
|
+
echo=echo or "none",
|
|
452
|
+
)
|
|
442
453
|
)
|
|
443
454
|
else:
|
|
444
|
-
await chat.append_message(
|
|
455
|
+
await chat.append_message(
|
|
456
|
+
str(
|
|
457
|
+
self.chat(
|
|
458
|
+
user_input,
|
|
459
|
+
kwargs=kwargs,
|
|
460
|
+
echo=echo or "text",
|
|
461
|
+
)
|
|
462
|
+
)
|
|
463
|
+
)
|
|
445
464
|
|
|
446
465
|
app = App(app_ui, server)
|
|
447
466
|
|
|
@@ -948,11 +967,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
948
967
|
is_html = filename.suffix == ".html"
|
|
949
968
|
|
|
950
969
|
# Get contents from each turn
|
|
951
|
-
|
|
970
|
+
content_arr: list[str] = []
|
|
952
971
|
for turn in turns:
|
|
953
972
|
turn_content = "\n\n".join(
|
|
954
973
|
[
|
|
955
|
-
str(content)
|
|
974
|
+
str(content).strip()
|
|
956
975
|
for content in turn.contents
|
|
957
976
|
if include == "all" or isinstance(content, ContentText)
|
|
958
977
|
]
|
|
@@ -963,7 +982,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
963
982
|
turn_content = f"<shiny-{msg_type}-message content='{content_attr}'></shiny-{msg_type}-message>"
|
|
964
983
|
else:
|
|
965
984
|
turn_content = f"## {turn.role.capitalize()}\n\n{turn_content}"
|
|
966
|
-
|
|
985
|
+
content_arr.append(turn_content)
|
|
986
|
+
contents = "\n\n".join(content_arr)
|
|
967
987
|
|
|
968
988
|
# Shiny chat message components requires container elements
|
|
969
989
|
if is_html:
|
|
@@ -1093,7 +1113,6 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1093
1113
|
turn = self.provider.stream_turn(
|
|
1094
1114
|
result,
|
|
1095
1115
|
has_data_model=data_model is not None,
|
|
1096
|
-
stream=response,
|
|
1097
1116
|
)
|
|
1098
1117
|
|
|
1099
1118
|
if echo == "all":
|
|
@@ -1154,10 +1173,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1154
1173
|
yield text
|
|
1155
1174
|
result = self.provider.stream_merge_chunks(result, chunk)
|
|
1156
1175
|
|
|
1157
|
-
turn =
|
|
1176
|
+
turn = self.provider.stream_turn(
|
|
1158
1177
|
result,
|
|
1159
1178
|
has_data_model=data_model is not None,
|
|
1160
|
-
stream=response,
|
|
1161
1179
|
)
|
|
1162
1180
|
|
|
1163
1181
|
if echo == "all":
|
|
@@ -1210,7 +1228,12 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1210
1228
|
for x in turn.contents:
|
|
1211
1229
|
if isinstance(x, ContentToolRequest):
|
|
1212
1230
|
tool_def = self._tools.get(x.name, None)
|
|
1213
|
-
func =
|
|
1231
|
+
func = None
|
|
1232
|
+
if tool_def:
|
|
1233
|
+
if tool_def._is_async:
|
|
1234
|
+
func = tool_def.func
|
|
1235
|
+
else:
|
|
1236
|
+
func = wrap_async(tool_def.func)
|
|
1214
1237
|
results.append(await self._invoke_tool_async(func, x.arguments, x.id))
|
|
1215
1238
|
|
|
1216
1239
|
if not results:
|
|
@@ -1225,7 +1248,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1225
1248
|
id_: str,
|
|
1226
1249
|
) -> ContentToolResult:
|
|
1227
1250
|
if func is None:
|
|
1228
|
-
return ContentToolResult(id_, None, "Unknown tool")
|
|
1251
|
+
return ContentToolResult(id_, value=None, error="Unknown tool")
|
|
1252
|
+
|
|
1253
|
+
name = func.__name__
|
|
1229
1254
|
|
|
1230
1255
|
try:
|
|
1231
1256
|
if isinstance(arguments, dict):
|
|
@@ -1233,10 +1258,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1233
1258
|
else:
|
|
1234
1259
|
result = func(arguments)
|
|
1235
1260
|
|
|
1236
|
-
return ContentToolResult(id_, result, None)
|
|
1261
|
+
return ContentToolResult(id_, value=result, error=None, name=name)
|
|
1237
1262
|
except Exception as e:
|
|
1238
|
-
log_tool_error(
|
|
1239
|
-
return ContentToolResult(id_, None, str(e))
|
|
1263
|
+
log_tool_error(name, str(arguments), e)
|
|
1264
|
+
return ContentToolResult(id_, value=None, error=str(e), name=name)
|
|
1240
1265
|
|
|
1241
1266
|
@staticmethod
|
|
1242
1267
|
async def _invoke_tool_async(
|
|
@@ -1245,7 +1270,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1245
1270
|
id_: str,
|
|
1246
1271
|
) -> ContentToolResult:
|
|
1247
1272
|
if func is None:
|
|
1248
|
-
return ContentToolResult(id_, None, "Unknown tool")
|
|
1273
|
+
return ContentToolResult(id_, value=None, error="Unknown tool")
|
|
1274
|
+
|
|
1275
|
+
name = func.__name__
|
|
1249
1276
|
|
|
1250
1277
|
try:
|
|
1251
1278
|
if isinstance(arguments, dict):
|
|
@@ -1253,10 +1280,10 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
1253
1280
|
else:
|
|
1254
1281
|
result = await func(arguments)
|
|
1255
1282
|
|
|
1256
|
-
return ContentToolResult(id_, result, None)
|
|
1283
|
+
return ContentToolResult(id_, value=result, error=None, name=name)
|
|
1257
1284
|
except Exception as e:
|
|
1258
1285
|
log_tool_error(func.__name__, str(arguments), e)
|
|
1259
|
-
return ContentToolResult(id_, None, str(e))
|
|
1286
|
+
return ContentToolResult(id_, value=None, error=str(e), name=name)
|
|
1260
1287
|
|
|
1261
1288
|
def _markdown_display(
|
|
1262
1289
|
self, echo: Literal["text", "all", "none"]
|
|
@@ -1373,7 +1400,7 @@ class ChatResponse:
|
|
|
1373
1400
|
|
|
1374
1401
|
@property
|
|
1375
1402
|
def consumed(self) -> bool:
|
|
1376
|
-
return self._generator
|
|
1403
|
+
return inspect.getgeneratorstate(self._generator) == inspect.GEN_CLOSED
|
|
1377
1404
|
|
|
1378
1405
|
def __str__(self) -> str:
|
|
1379
1406
|
return self.get_content()
|
|
@@ -1423,7 +1450,11 @@ class ChatResponseAsync:
|
|
|
1423
1450
|
|
|
1424
1451
|
@property
|
|
1425
1452
|
def consumed(self) -> bool:
|
|
1426
|
-
|
|
1453
|
+
if sys.version_info < (3, 12):
|
|
1454
|
+
raise NotImplementedError(
|
|
1455
|
+
"Checking for consumed state is only supported in Python 3.12+"
|
|
1456
|
+
)
|
|
1457
|
+
return inspect.getasyncgenstate(self._generator) == inspect.AGEN_CLOSED
|
|
1427
1458
|
|
|
1428
1459
|
|
|
1429
1460
|
# ----------------------------------------------------------------------------
|
chatlas/_content.py
CHANGED
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
4
|
from dataclasses import dataclass
|
|
5
|
+
from pprint import pformat
|
|
5
6
|
from typing import Any, Literal, Optional
|
|
6
7
|
|
|
7
8
|
ImageContentTypes = Literal[
|
|
@@ -154,7 +155,7 @@ class ContentToolRequest(Content):
|
|
|
154
155
|
args_str = self._arguments_str()
|
|
155
156
|
func_call = f"{self.name}({args_str})"
|
|
156
157
|
comment = f"# tool request ({self.id})"
|
|
157
|
-
return f"
|
|
158
|
+
return f"```python\n{comment}\n{func_call}\n```\n"
|
|
158
159
|
|
|
159
160
|
def _repr_markdown_(self):
|
|
160
161
|
return self.__str__()
|
|
@@ -187,19 +188,35 @@ class ContentToolResult(Content):
|
|
|
187
188
|
The unique identifier of the tool request.
|
|
188
189
|
value
|
|
189
190
|
The value returned by the tool/function.
|
|
191
|
+
name
|
|
192
|
+
The name of the tool/function that was called.
|
|
190
193
|
error
|
|
191
194
|
An error message if the tool/function call failed.
|
|
192
195
|
"""
|
|
193
196
|
|
|
194
197
|
id: str
|
|
195
198
|
value: Any = None
|
|
199
|
+
name: Optional[str] = None
|
|
196
200
|
error: Optional[str] = None
|
|
197
201
|
|
|
202
|
+
def _get_value(self, pretty: bool = False) -> str:
|
|
203
|
+
if self.error:
|
|
204
|
+
return f"Tool calling failed with error: '{self.error}'"
|
|
205
|
+
if not pretty:
|
|
206
|
+
return str(self.value)
|
|
207
|
+
try:
|
|
208
|
+
json_val = json.loads(self.value) # type: ignore
|
|
209
|
+
return pformat(json_val, indent=2, sort_dicts=False)
|
|
210
|
+
except: # noqa: E722
|
|
211
|
+
return str(self.value)
|
|
212
|
+
|
|
213
|
+
# Primarily used for `echo="all"`...
|
|
198
214
|
def __str__(self):
|
|
199
215
|
comment = f"# tool result ({self.id})"
|
|
200
|
-
|
|
201
|
-
return f"""
|
|
216
|
+
value = self._get_value(pretty=True)
|
|
217
|
+
return f"""```python\n{comment}\n{value}\n```"""
|
|
202
218
|
|
|
219
|
+
# ... and for displaying in the notebook
|
|
203
220
|
def _repr_markdown_(self):
|
|
204
221
|
return self.__str__()
|
|
205
222
|
|
|
@@ -210,10 +227,9 @@ class ContentToolResult(Content):
|
|
|
210
227
|
res += f" error='{self.error}'"
|
|
211
228
|
return res + ">"
|
|
212
229
|
|
|
230
|
+
# The actual value to send to the model
|
|
213
231
|
def get_final_value(self) -> str:
|
|
214
|
-
|
|
215
|
-
return f"Tool calling failed with error: '{self.error}'"
|
|
216
|
-
return str(self.value)
|
|
232
|
+
return self._get_value()
|
|
217
233
|
|
|
218
234
|
|
|
219
235
|
@dataclass
|
|
@@ -236,7 +252,7 @@ class ContentJson(Content):
|
|
|
236
252
|
return json.dumps(self.value, indent=2)
|
|
237
253
|
|
|
238
254
|
def _repr_markdown_(self):
|
|
239
|
-
return f"""
|
|
255
|
+
return f"""```json\n{self.__str__()}\n```"""
|
|
240
256
|
|
|
241
257
|
def __repr__(self, indent: int = 0):
|
|
242
258
|
return " " * indent + f"<ContentJson value={self.value}>"
|
chatlas/_display.py
CHANGED
|
@@ -6,6 +6,7 @@ from uuid import uuid4
|
|
|
6
6
|
from rich.live import Live
|
|
7
7
|
from rich.logging import RichHandler
|
|
8
8
|
|
|
9
|
+
from ._live_render import LiveRender
|
|
9
10
|
from ._logging import logger
|
|
10
11
|
from ._typing_extensions import TypedDict
|
|
11
12
|
|
|
@@ -44,13 +45,22 @@ class LiveMarkdownDisplay(MarkdownDisplay):
|
|
|
44
45
|
from rich.console import Console
|
|
45
46
|
|
|
46
47
|
self.content: str = ""
|
|
47
|
-
|
|
48
|
+
live = Live(
|
|
48
49
|
auto_refresh=False,
|
|
49
|
-
vertical_overflow="visible",
|
|
50
50
|
console=Console(
|
|
51
51
|
**echo_options["rich_console"],
|
|
52
52
|
),
|
|
53
53
|
)
|
|
54
|
+
|
|
55
|
+
# Monkeypatch LiveRender() with our own version that add "crop_above"
|
|
56
|
+
# https://github.com/Textualize/rich/blob/43d3b047/rich/live.py#L87-L89
|
|
57
|
+
live.vertical_overflow = "crop_above"
|
|
58
|
+
live._live_render = LiveRender( # pyright: ignore[reportAttributeAccessIssue]
|
|
59
|
+
live.get_renderable(), vertical_overflow="crop_above"
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
self.live = live
|
|
63
|
+
|
|
54
64
|
self._markdown_options = echo_options["rich_markdown"]
|
|
55
65
|
|
|
56
66
|
def update(self, content: str):
|
chatlas/_github.py
CHANGED