chatlas 0.9.2__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- chatlas/__init__.py +12 -0
- chatlas/_chat.py +2 -2
- chatlas/_provider_anthropic.py +2 -3
- chatlas/_provider_cloudflare.py +165 -0
- chatlas/_provider_databricks.py +11 -0
- chatlas/_provider_deepseek.py +171 -0
- chatlas/_provider_huggingface.py +155 -0
- chatlas/_provider_mistral.py +181 -0
- chatlas/_provider_openai.py +9 -5
- chatlas/_provider_openrouter.py +149 -0
- chatlas/_provider_portkey.py +123 -0
- chatlas/_tokens.py +5 -5
- chatlas/_typing_extensions.py +3 -3
- chatlas/_version.py +16 -3
- chatlas/types/anthropic/_client.py +1 -1
- chatlas/types/anthropic/_client_bedrock.py +1 -1
- chatlas/types/openai/_client.py +1 -1
- chatlas/types/openai/_client_azure.py +1 -1
- chatlas/types/openai/_submit.py +3 -0
- {chatlas-0.9.2.dist-info → chatlas-0.10.0.dist-info}/METADATA +3 -3
- {chatlas-0.9.2.dist-info → chatlas-0.10.0.dist-info}/RECORD +23 -17
- {chatlas-0.9.2.dist-info → chatlas-0.10.0.dist-info}/WHEEL +0 -0
- {chatlas-0.9.2.dist-info → chatlas-0.10.0.dist-info}/licenses/LICENSE +0 -0
chatlas/__init__.py
CHANGED
|
@@ -7,13 +7,19 @@ from ._content_pdf import content_pdf_file, content_pdf_url
|
|
|
7
7
|
from ._interpolate import interpolate, interpolate_file
|
|
8
8
|
from ._provider import Provider
|
|
9
9
|
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
10
|
+
from ._provider_cloudflare import ChatCloudflare
|
|
10
11
|
from ._provider_databricks import ChatDatabricks
|
|
12
|
+
from ._provider_deepseek import ChatDeepSeek
|
|
11
13
|
from ._provider_github import ChatGithub
|
|
12
14
|
from ._provider_google import ChatGoogle, ChatVertex
|
|
13
15
|
from ._provider_groq import ChatGroq
|
|
16
|
+
from ._provider_huggingface import ChatHuggingFace
|
|
17
|
+
from ._provider_mistral import ChatMistral
|
|
14
18
|
from ._provider_ollama import ChatOllama
|
|
15
19
|
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
|
|
20
|
+
from ._provider_openrouter import ChatOpenRouter
|
|
16
21
|
from ._provider_perplexity import ChatPerplexity
|
|
22
|
+
from ._provider_portkey import ChatPortkey
|
|
17
23
|
from ._provider_snowflake import ChatSnowflake
|
|
18
24
|
from ._tokens import token_usage
|
|
19
25
|
from ._tools import Tool, ToolRejectError
|
|
@@ -28,14 +34,20 @@ __all__ = (
|
|
|
28
34
|
"ChatAnthropic",
|
|
29
35
|
"ChatAuto",
|
|
30
36
|
"ChatBedrockAnthropic",
|
|
37
|
+
"ChatCloudflare",
|
|
31
38
|
"ChatDatabricks",
|
|
39
|
+
"ChatDeepSeek",
|
|
32
40
|
"ChatGithub",
|
|
33
41
|
"ChatGoogle",
|
|
34
42
|
"ChatGroq",
|
|
43
|
+
"ChatHuggingFace",
|
|
44
|
+
"ChatMistral",
|
|
35
45
|
"ChatOllama",
|
|
36
46
|
"ChatOpenAI",
|
|
47
|
+
"ChatOpenRouter",
|
|
37
48
|
"ChatAzureOpenAI",
|
|
38
49
|
"ChatPerplexity",
|
|
50
|
+
"ChatPortkey",
|
|
39
51
|
"ChatSnowflake",
|
|
40
52
|
"ChatVertex",
|
|
41
53
|
"Chat",
|
chatlas/_chat.py
CHANGED
|
@@ -395,8 +395,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
395
395
|
)
|
|
396
396
|
|
|
397
397
|
input_token_price = price_token["input"] / 1e6
|
|
398
|
-
output_token_price = price_token
|
|
399
|
-
cached_token_price = price_token
|
|
398
|
+
output_token_price = price_token.get("output", 0) / 1e6
|
|
399
|
+
cached_token_price = price_token.get("cached_input", 0) / 1e6
|
|
400
400
|
|
|
401
401
|
if len(turns_tokens) == 0:
|
|
402
402
|
return 0.0
|
chatlas/_provider_anthropic.py
CHANGED
|
@@ -163,7 +163,7 @@ def ChatAnthropic(
|
|
|
163
163
|
"""
|
|
164
164
|
|
|
165
165
|
if model is None:
|
|
166
|
-
model = log_model_default("claude-
|
|
166
|
+
model = log_model_default("claude-sonnet-4-0")
|
|
167
167
|
|
|
168
168
|
return Chat(
|
|
169
169
|
provider=AnthropicProvider(
|
|
@@ -742,8 +742,7 @@ def ChatBedrockAnthropic(
|
|
|
742
742
|
"""
|
|
743
743
|
|
|
744
744
|
if model is None:
|
|
745
|
-
|
|
746
|
-
model = log_model_default("anthropic.claude-3-5-sonnet-20241022-v2:0")
|
|
745
|
+
model = log_model_default("us.anthropic.claude-sonnet-4-20250514-v1:0")
|
|
747
746
|
|
|
748
747
|
return Chat(
|
|
749
748
|
provider=AnthropicBedrockProvider(
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatCloudflare(
|
|
17
|
+
*,
|
|
18
|
+
account: Optional[str] = None,
|
|
19
|
+
system_prompt: Optional[str] = None,
|
|
20
|
+
model: Optional[str] = None,
|
|
21
|
+
api_key: Optional[str] = None,
|
|
22
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
23
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
24
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
25
|
+
"""
|
|
26
|
+
Chat with a model hosted on Cloudflare Workers AI.
|
|
27
|
+
|
|
28
|
+
Cloudflare Workers AI hosts a variety of open-source AI models.
|
|
29
|
+
|
|
30
|
+
Prerequisites
|
|
31
|
+
-------------
|
|
32
|
+
|
|
33
|
+
::: {.callout-note}
|
|
34
|
+
## API credentials
|
|
35
|
+
|
|
36
|
+
To use the Cloudflare API, you must have an Account ID and an Access Token,
|
|
37
|
+
which you can obtain by following the instructions at
|
|
38
|
+
<https://developers.cloudflare.com/workers-ai/get-started/rest-api/>.
|
|
39
|
+
:::
|
|
40
|
+
|
|
41
|
+
Examples
|
|
42
|
+
--------
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import os
|
|
46
|
+
from chatlas import ChatCloudflare
|
|
47
|
+
|
|
48
|
+
chat = ChatCloudflare(
|
|
49
|
+
api_key=os.getenv("CLOUDFLARE_API_KEY"),
|
|
50
|
+
account=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
|
|
51
|
+
)
|
|
52
|
+
chat.chat("What is the capital of France?")
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Known limitations
|
|
56
|
+
-----------------
|
|
57
|
+
|
|
58
|
+
- Tool calling does not appear to work.
|
|
59
|
+
- Images don't appear to work.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
account
|
|
64
|
+
The Cloudflare account ID. You generally should not supply this directly,
|
|
65
|
+
but instead set the `CLOUDFLARE_ACCOUNT_ID` environment variable.
|
|
66
|
+
system_prompt
|
|
67
|
+
A system prompt to set the behavior of the assistant.
|
|
68
|
+
model
|
|
69
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
70
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
71
|
+
a model for all but the most casual use.
|
|
72
|
+
api_key
|
|
73
|
+
The API key to use for authentication. You generally should not supply
|
|
74
|
+
this directly, but instead set the `CLOUDFLARE_API_KEY` environment
|
|
75
|
+
variable.
|
|
76
|
+
seed
|
|
77
|
+
Optional integer seed that ChatGPT uses to try and make output more
|
|
78
|
+
reproducible.
|
|
79
|
+
kwargs
|
|
80
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
81
|
+
|
|
82
|
+
Returns
|
|
83
|
+
-------
|
|
84
|
+
Chat
|
|
85
|
+
A chat object that retains the state of the conversation.
|
|
86
|
+
|
|
87
|
+
Note
|
|
88
|
+
----
|
|
89
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
90
|
+
the defaults tweaked for Cloudflare.
|
|
91
|
+
|
|
92
|
+
Note
|
|
93
|
+
----
|
|
94
|
+
Pasting credentials into a chat constructor (e.g.,
|
|
95
|
+
`ChatCloudflare(api_key="...", account="...")`) is the simplest way to get
|
|
96
|
+
started, and is fine for interactive use, but is problematic for code that
|
|
97
|
+
may be shared with others.
|
|
98
|
+
|
|
99
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
100
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
101
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
102
|
+
into your environment.
|
|
103
|
+
|
|
104
|
+
```shell
|
|
105
|
+
pip install python-dotenv
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```shell
|
|
109
|
+
# .env
|
|
110
|
+
CLOUDFLARE_API_KEY=...
|
|
111
|
+
CLOUDFLARE_ACCOUNT_ID=...
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from chatlas import ChatCloudflare
|
|
116
|
+
from dotenv import load_dotenv
|
|
117
|
+
|
|
118
|
+
load_dotenv()
|
|
119
|
+
chat = ChatCloudflare()
|
|
120
|
+
chat.console()
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
124
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
125
|
+
|
|
126
|
+
```shell
|
|
127
|
+
export CLOUDFLARE_API_KEY=...
|
|
128
|
+
export CLOUDFLARE_ACCOUNT_ID=...
|
|
129
|
+
```
|
|
130
|
+
"""
|
|
131
|
+
# List at https://developers.cloudflare.com/workers-ai/models/
|
|
132
|
+
# `@cf` appears to be part of the model name
|
|
133
|
+
if model is None:
|
|
134
|
+
model = log_model_default("@cf/meta/llama-3.3-70b-instruct-fp8-fast")
|
|
135
|
+
|
|
136
|
+
if api_key is None:
|
|
137
|
+
api_key = os.getenv("CLOUDFLARE_API_KEY")
|
|
138
|
+
|
|
139
|
+
if account is None:
|
|
140
|
+
account = os.getenv("CLOUDFLARE_ACCOUNT_ID")
|
|
141
|
+
|
|
142
|
+
if account is None:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
"Cloudflare account ID is required. Set the CLOUDFLARE_ACCOUNT_ID "
|
|
145
|
+
"environment variable or pass the `account` parameter."
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if isinstance(seed, MISSING_TYPE):
|
|
149
|
+
seed = 1014 if is_testing() else None
|
|
150
|
+
|
|
151
|
+
# https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/
|
|
152
|
+
cloudflare_api = "https://api.cloudflare.com/client/v4/accounts"
|
|
153
|
+
base_url = f"{cloudflare_api}/{account}/ai/v1/"
|
|
154
|
+
|
|
155
|
+
return Chat(
|
|
156
|
+
provider=OpenAIProvider(
|
|
157
|
+
api_key=api_key,
|
|
158
|
+
model=model,
|
|
159
|
+
base_url=base_url,
|
|
160
|
+
seed=seed,
|
|
161
|
+
name="Cloudflare",
|
|
162
|
+
kwargs=kwargs,
|
|
163
|
+
),
|
|
164
|
+
system_prompt=system_prompt,
|
|
165
|
+
)
|
chatlas/_provider_databricks.py
CHANGED
|
@@ -127,3 +127,14 @@ class DatabricksProvider(OpenAIProvider):
|
|
|
127
127
|
api_key="no-token", # A placeholder to pass validations, this will not be used
|
|
128
128
|
http_client=httpx.AsyncClient(auth=client._client.auth),
|
|
129
129
|
)
|
|
130
|
+
|
|
131
|
+
# Databricks doesn't support stream_options
|
|
132
|
+
def _chat_perform_args(
|
|
133
|
+
self, stream, turns, tools, data_model=None, kwargs=None
|
|
134
|
+
) -> "SubmitInputArgs":
|
|
135
|
+
kwargs2 = super()._chat_perform_args(stream, turns, tools, data_model, kwargs)
|
|
136
|
+
|
|
137
|
+
if "stream_options" in kwargs2:
|
|
138
|
+
del kwargs2["stream_options"]
|
|
139
|
+
|
|
140
|
+
return kwargs2
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional, cast
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._turn import Turn
|
|
10
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from openai.types.chat import ChatCompletion, ChatCompletionMessageParam
|
|
14
|
+
|
|
15
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def ChatDeepSeek(
|
|
19
|
+
*,
|
|
20
|
+
system_prompt: Optional[str] = None,
|
|
21
|
+
model: Optional[str] = None,
|
|
22
|
+
api_key: Optional[str] = None,
|
|
23
|
+
base_url: str = "https://api.deepseek.com",
|
|
24
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
25
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
26
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
27
|
+
"""
|
|
28
|
+
Chat with a model hosted on DeepSeek.
|
|
29
|
+
|
|
30
|
+
DeepSeek is a platform for AI inference with competitive pricing
|
|
31
|
+
and performance.
|
|
32
|
+
|
|
33
|
+
Prerequisites
|
|
34
|
+
-------------
|
|
35
|
+
|
|
36
|
+
::: {.callout-note}
|
|
37
|
+
## API key
|
|
38
|
+
|
|
39
|
+
Sign up at <https://platform.deepseek.com> to get an API key.
|
|
40
|
+
:::
|
|
41
|
+
|
|
42
|
+
Examples
|
|
43
|
+
--------
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
import os
|
|
47
|
+
from chatlas import ChatDeepSeek
|
|
48
|
+
|
|
49
|
+
chat = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"))
|
|
50
|
+
chat.chat("What is the capital of France?")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Known limitations
|
|
54
|
+
--------------
|
|
55
|
+
|
|
56
|
+
* Structured data extraction is not supported.
|
|
57
|
+
* Images are not supported.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
system_prompt
|
|
62
|
+
A system prompt to set the behavior of the assistant.
|
|
63
|
+
model
|
|
64
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
65
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
66
|
+
a model for all but the most casual use.
|
|
67
|
+
api_key
|
|
68
|
+
The API key to use for authentication. You generally should not supply
|
|
69
|
+
this directly, but instead set the `DEEPSEEK_API_KEY` environment variable.
|
|
70
|
+
base_url
|
|
71
|
+
The base URL to the endpoint; the default uses DeepSeek's API.
|
|
72
|
+
seed
|
|
73
|
+
Optional integer seed that DeepSeek uses to try and make output more
|
|
74
|
+
reproducible.
|
|
75
|
+
kwargs
|
|
76
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
Chat
|
|
81
|
+
A chat object that retains the state of the conversation.
|
|
82
|
+
|
|
83
|
+
Note
|
|
84
|
+
----
|
|
85
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
86
|
+
the defaults tweaked for DeepSeek.
|
|
87
|
+
|
|
88
|
+
Note
|
|
89
|
+
----
|
|
90
|
+
Pasting an API key into a chat constructor (e.g., `ChatDeepSeek(api_key="...")`)
|
|
91
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
92
|
+
problematic for code that may be shared with others.
|
|
93
|
+
|
|
94
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
95
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
96
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
97
|
+
into your environment.
|
|
98
|
+
|
|
99
|
+
```shell
|
|
100
|
+
pip install python-dotenv
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
```shell
|
|
104
|
+
# .env
|
|
105
|
+
DEEPSEEK_API_KEY=...
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
from chatlas import ChatDeepSeek
|
|
110
|
+
from dotenv import load_dotenv
|
|
111
|
+
|
|
112
|
+
load_dotenv()
|
|
113
|
+
chat = ChatDeepSeek()
|
|
114
|
+
chat.console()
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
118
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
119
|
+
|
|
120
|
+
```shell
|
|
121
|
+
export DEEPSEEK_API_KEY=...
|
|
122
|
+
```
|
|
123
|
+
"""
|
|
124
|
+
if model is None:
|
|
125
|
+
model = log_model_default("deepseek-chat")
|
|
126
|
+
|
|
127
|
+
if api_key is None:
|
|
128
|
+
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
129
|
+
|
|
130
|
+
if isinstance(seed, MISSING_TYPE):
|
|
131
|
+
seed = 1014 if is_testing() else None
|
|
132
|
+
|
|
133
|
+
return Chat(
|
|
134
|
+
provider=DeepSeekProvider(
|
|
135
|
+
api_key=api_key,
|
|
136
|
+
model=model,
|
|
137
|
+
base_url=base_url,
|
|
138
|
+
seed=seed,
|
|
139
|
+
name="DeepSeek",
|
|
140
|
+
kwargs=kwargs,
|
|
141
|
+
),
|
|
142
|
+
system_prompt=system_prompt,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class DeepSeekProvider(OpenAIProvider):
|
|
147
|
+
@staticmethod
|
|
148
|
+
def _as_message_param(turns: list[Turn]) -> list["ChatCompletionMessageParam"]:
|
|
149
|
+
from openai.types.chat import (
|
|
150
|
+
ChatCompletionAssistantMessageParam,
|
|
151
|
+
ChatCompletionUserMessageParam,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
params = OpenAIProvider._as_message_param(turns)
|
|
155
|
+
|
|
156
|
+
# Content must be a string
|
|
157
|
+
for i, param in enumerate(params):
|
|
158
|
+
if param["role"] in ["assistant", "user"]:
|
|
159
|
+
param = cast(
|
|
160
|
+
ChatCompletionAssistantMessageParam
|
|
161
|
+
| ChatCompletionUserMessageParam,
|
|
162
|
+
param,
|
|
163
|
+
)
|
|
164
|
+
contents = param.get("content", None)
|
|
165
|
+
if not isinstance(contents, list):
|
|
166
|
+
continue
|
|
167
|
+
params[i]["content"] = "".join(
|
|
168
|
+
content.get("text", "") for content in contents
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return params
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from openai.types.chat import ChatCompletion
|
|
12
|
+
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatHuggingFace(
|
|
17
|
+
*,
|
|
18
|
+
system_prompt: Optional[str] = None,
|
|
19
|
+
model: Optional[str] = None,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
22
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
23
|
+
"""
|
|
24
|
+
Chat with a model hosted on Hugging Face Inference API.
|
|
25
|
+
|
|
26
|
+
[Hugging Face](https://huggingface.co/) hosts a variety of open-source
|
|
27
|
+
and proprietary AI models available via their Inference API.
|
|
28
|
+
To use the Hugging Face API, you must have an Access Token, which you can obtain
|
|
29
|
+
from your [Hugging Face account](https://huggingface.co/settings/tokens).
|
|
30
|
+
Ensure that at least "Make calls to Inference Providers" and
|
|
31
|
+
"Make calls to your Inference Endpoints" is checked.
|
|
32
|
+
|
|
33
|
+
Prerequisites
|
|
34
|
+
--------------
|
|
35
|
+
|
|
36
|
+
::: {.callout-note}
|
|
37
|
+
## API key
|
|
38
|
+
|
|
39
|
+
You will need to create a Hugging Face account and generate an API token
|
|
40
|
+
from your [account settings](https://huggingface.co/settings/tokens).
|
|
41
|
+
Make sure to enable "Make calls to Inference Providers" permission.
|
|
42
|
+
:::
|
|
43
|
+
|
|
44
|
+
Examples
|
|
45
|
+
--------
|
|
46
|
+
```python
|
|
47
|
+
import os
|
|
48
|
+
from chatlas import ChatHuggingFace
|
|
49
|
+
|
|
50
|
+
chat = ChatHuggingFace(api_key=os.getenv("HUGGINGFACE_API_KEY"))
|
|
51
|
+
chat.chat("What is the capital of France?")
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Parameters
|
|
55
|
+
----------
|
|
56
|
+
system_prompt
|
|
57
|
+
A system prompt to set the behavior of the assistant.
|
|
58
|
+
model
|
|
59
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
60
|
+
default, and warn you about it. We strongly recommend explicitly
|
|
61
|
+
choosing a model for all but the most casual use.
|
|
62
|
+
api_key
|
|
63
|
+
The API key to use for authentication. You generally should not supply
|
|
64
|
+
this directly, but instead set the `HUGGINGFACE_API_KEY` environment
|
|
65
|
+
variable.
|
|
66
|
+
kwargs
|
|
67
|
+
Additional arguments to pass to the underlying OpenAI client
|
|
68
|
+
constructor.
|
|
69
|
+
|
|
70
|
+
Returns
|
|
71
|
+
-------
|
|
72
|
+
Chat
|
|
73
|
+
A chat object that retains the state of the conversation.
|
|
74
|
+
|
|
75
|
+
Known limitations
|
|
76
|
+
-----------------
|
|
77
|
+
|
|
78
|
+
* Some models do not support the chat interface or parts of it, for example
|
|
79
|
+
`google/gemma-2-2b-it` does not support a system prompt. You will need to
|
|
80
|
+
carefully choose the model.
|
|
81
|
+
* Tool calling support varies by model - many models do not support it.
|
|
82
|
+
|
|
83
|
+
Note
|
|
84
|
+
----
|
|
85
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`), with
|
|
86
|
+
the defaults tweaked for Hugging Face.
|
|
87
|
+
|
|
88
|
+
Note
|
|
89
|
+
----
|
|
90
|
+
Pasting an API key into a chat constructor (e.g., `ChatHuggingFace(api_key="...")`)
|
|
91
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
92
|
+
problematic for code that may be shared with others.
|
|
93
|
+
|
|
94
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
95
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
96
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
97
|
+
into your environment.
|
|
98
|
+
|
|
99
|
+
```shell
|
|
100
|
+
pip install python-dotenv
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
```shell
|
|
104
|
+
# .env
|
|
105
|
+
HUGGINGFACE_API_KEY=...
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
from chatlas import ChatHuggingFace
|
|
110
|
+
from dotenv import load_dotenv
|
|
111
|
+
|
|
112
|
+
load_dotenv()
|
|
113
|
+
chat = ChatHuggingFace()
|
|
114
|
+
chat.console()
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
118
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
119
|
+
|
|
120
|
+
```shell
|
|
121
|
+
export HUGGINGFACE_API_KEY=...
|
|
122
|
+
```
|
|
123
|
+
"""
|
|
124
|
+
if api_key is None:
|
|
125
|
+
api_key = os.getenv("HUGGINGFACE_API_KEY")
|
|
126
|
+
|
|
127
|
+
if model is None:
|
|
128
|
+
model = log_model_default("meta-llama/Llama-3.1-8B-Instruct")
|
|
129
|
+
|
|
130
|
+
return Chat(
|
|
131
|
+
provider=HuggingFaceProvider(
|
|
132
|
+
api_key=api_key,
|
|
133
|
+
model=model,
|
|
134
|
+
kwargs=kwargs,
|
|
135
|
+
),
|
|
136
|
+
system_prompt=system_prompt,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class HuggingFaceProvider(OpenAIProvider):
|
|
141
|
+
def __init__(
|
|
142
|
+
self,
|
|
143
|
+
*,
|
|
144
|
+
api_key: Optional[str] = None,
|
|
145
|
+
model: str,
|
|
146
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
147
|
+
):
|
|
148
|
+
# https://huggingface.co/docs/inference-providers/en/index?python-clients=requests#http--curl
|
|
149
|
+
super().__init__(
|
|
150
|
+
name="HuggingFace",
|
|
151
|
+
model=model,
|
|
152
|
+
api_key=api_key,
|
|
153
|
+
base_url="https://router.huggingface.co/v1",
|
|
154
|
+
kwargs=kwargs,
|
|
155
|
+
)
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from openai.types.chat import ChatCompletion
|
|
13
|
+
|
|
14
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def ChatMistral(
|
|
18
|
+
*,
|
|
19
|
+
system_prompt: Optional[str] = None,
|
|
20
|
+
model: Optional[str] = None,
|
|
21
|
+
api_key: Optional[str] = None,
|
|
22
|
+
base_url: str = "https://api.mistral.ai/v1/",
|
|
23
|
+
seed: int | None | MISSING_TYPE = MISSING,
|
|
24
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
25
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
26
|
+
"""
|
|
27
|
+
Chat with a model hosted on Mistral's La Plateforme.
|
|
28
|
+
|
|
29
|
+
Mistral AI provides high-performance language models through their API platform.
|
|
30
|
+
|
|
31
|
+
Prerequisites
|
|
32
|
+
-------------
|
|
33
|
+
|
|
34
|
+
::: {.callout-note}
|
|
35
|
+
## API credentials
|
|
36
|
+
|
|
37
|
+
Get your API key from https://console.mistral.ai/api-keys.
|
|
38
|
+
:::
|
|
39
|
+
|
|
40
|
+
Examples
|
|
41
|
+
--------
|
|
42
|
+
```python
|
|
43
|
+
import os
|
|
44
|
+
from chatlas import ChatMistral
|
|
45
|
+
|
|
46
|
+
chat = ChatMistral(api_key=os.getenv("MISTRAL_API_KEY"))
|
|
47
|
+
chat.chat("Tell me three jokes about statisticians")
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Known limitations
|
|
51
|
+
-----------------
|
|
52
|
+
|
|
53
|
+
* Tool calling may be unstable.
|
|
54
|
+
* Images require a model that supports vision.
|
|
55
|
+
|
|
56
|
+
Parameters
|
|
57
|
+
----------
|
|
58
|
+
system_prompt
|
|
59
|
+
A system prompt to set the behavior of the assistant.
|
|
60
|
+
model
|
|
61
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
62
|
+
default, and warn you about it. We strongly recommend explicitly
|
|
63
|
+
choosing a model for all but the most casual use.
|
|
64
|
+
api_key
|
|
65
|
+
The API key to use for authentication. You generally should not supply
|
|
66
|
+
this directly, but instead set the `MISTRAL_API_KEY` environment
|
|
67
|
+
variable.
|
|
68
|
+
base_url
|
|
69
|
+
The base URL to the endpoint; the default uses Mistral AI.
|
|
70
|
+
seed
|
|
71
|
+
Optional integer seed that Mistral uses to try and make output more
|
|
72
|
+
reproducible.
|
|
73
|
+
kwargs
|
|
74
|
+
Additional arguments to pass to the `openai.OpenAI()` client
|
|
75
|
+
constructor (Mistral uses OpenAI-compatible API).
|
|
76
|
+
|
|
77
|
+
Returns
|
|
78
|
+
-------
|
|
79
|
+
Chat
|
|
80
|
+
A chat object that retains the state of the conversation.
|
|
81
|
+
|
|
82
|
+
Note
|
|
83
|
+
----
|
|
84
|
+
Pasting an API key into a chat constructor (e.g., `ChatMistral(api_key="...")`)
|
|
85
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
86
|
+
problematic for code that may be shared with others.
|
|
87
|
+
|
|
88
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
89
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
90
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
91
|
+
into your environment.
|
|
92
|
+
|
|
93
|
+
```shell
|
|
94
|
+
pip install python-dotenv
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
```shell
|
|
98
|
+
# .env
|
|
99
|
+
MISTRAL_API_KEY=...
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from chatlas import ChatMistral
|
|
104
|
+
from dotenv import load_dotenv
|
|
105
|
+
|
|
106
|
+
load_dotenv()
|
|
107
|
+
chat = ChatMistral()
|
|
108
|
+
chat.console()
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
112
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
113
|
+
|
|
114
|
+
```shell
|
|
115
|
+
export MISTRAL_API_KEY=...
|
|
116
|
+
```
|
|
117
|
+
"""
|
|
118
|
+
if isinstance(seed, MISSING_TYPE):
|
|
119
|
+
seed = 1014 if is_testing() else None
|
|
120
|
+
|
|
121
|
+
if model is None:
|
|
122
|
+
model = log_model_default("mistral-large-latest")
|
|
123
|
+
|
|
124
|
+
if api_key is None:
|
|
125
|
+
api_key = os.getenv("MISTRAL_API_KEY")
|
|
126
|
+
|
|
127
|
+
return Chat(
|
|
128
|
+
provider=MistralProvider(
|
|
129
|
+
api_key=api_key,
|
|
130
|
+
model=model,
|
|
131
|
+
base_url=base_url,
|
|
132
|
+
seed=seed,
|
|
133
|
+
kwargs=kwargs,
|
|
134
|
+
),
|
|
135
|
+
system_prompt=system_prompt,
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class MistralProvider(OpenAIProvider):
|
|
140
|
+
def __init__(
|
|
141
|
+
self,
|
|
142
|
+
*,
|
|
143
|
+
api_key: Optional[str] = None,
|
|
144
|
+
model: str,
|
|
145
|
+
base_url: str = "https://api.mistral.ai/v1/",
|
|
146
|
+
seed: Optional[int] = None,
|
|
147
|
+
name: str = "Mistral",
|
|
148
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
149
|
+
):
|
|
150
|
+
super().__init__(
|
|
151
|
+
api_key=api_key,
|
|
152
|
+
model=model,
|
|
153
|
+
base_url=base_url,
|
|
154
|
+
seed=seed,
|
|
155
|
+
name=name,
|
|
156
|
+
kwargs=kwargs,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Mistral is essentially OpenAI-compatible, with a couple small differences.
|
|
160
|
+
# We _could_ bring in the Mistral SDK and use it directly for more precise typing,
|
|
161
|
+
# etc., but for now that doesn't seem worth it.
|
|
162
|
+
def _chat_perform_args(
|
|
163
|
+
self, stream, turns, tools, data_model=None, kwargs=None
|
|
164
|
+
) -> "SubmitInputArgs":
|
|
165
|
+
# Get the base arguments from OpenAI provider
|
|
166
|
+
kwargs2 = super()._chat_perform_args(stream, turns, tools, data_model, kwargs)
|
|
167
|
+
|
|
168
|
+
# Mistral doesn't support stream_options
|
|
169
|
+
if "stream_options" in kwargs2:
|
|
170
|
+
del kwargs2["stream_options"]
|
|
171
|
+
|
|
172
|
+
# Mistral wants random_seed, not seed
|
|
173
|
+
if seed := kwargs2.pop("seed", None):
|
|
174
|
+
if isinstance(seed, int):
|
|
175
|
+
kwargs2["extra_body"] = {"random_seed": seed}
|
|
176
|
+
elif seed is not None:
|
|
177
|
+
raise ValueError(
|
|
178
|
+
"MistralProvider only accepts an integer seed, or None."
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
return kwargs2
|
chatlas/_provider_openai.py
CHANGED
|
@@ -310,8 +310,7 @@ class OpenAIProvider(
|
|
|
310
310
|
del kwargs_full["tools"]
|
|
311
311
|
|
|
312
312
|
if stream and "stream_options" not in kwargs_full:
|
|
313
|
-
|
|
314
|
-
kwargs_full["stream_options"] = {"include_usage": True}
|
|
313
|
+
kwargs_full["stream_options"] = {"include_usage": True}
|
|
315
314
|
|
|
316
315
|
return kwargs_full
|
|
317
316
|
|
|
@@ -411,7 +410,9 @@ class OpenAIProvider(
|
|
|
411
410
|
if isinstance(x, ContentText):
|
|
412
411
|
content_parts.append({"type": "text", "text": x.text})
|
|
413
412
|
elif isinstance(x, ContentJson):
|
|
414
|
-
content_parts.append(
|
|
413
|
+
content_parts.append(
|
|
414
|
+
{"type": "text", "text": "<structured data/>"}
|
|
415
|
+
)
|
|
415
416
|
elif isinstance(x, ContentToolRequest):
|
|
416
417
|
tool_calls.append(
|
|
417
418
|
{
|
|
@@ -450,7 +451,7 @@ class OpenAIProvider(
|
|
|
450
451
|
if isinstance(x, ContentText):
|
|
451
452
|
contents.append({"type": "text", "text": x.text})
|
|
452
453
|
elif isinstance(x, ContentJson):
|
|
453
|
-
contents.append({"type": "text", "text": ""})
|
|
454
|
+
contents.append({"type": "text", "text": "<structured data/>"})
|
|
454
455
|
elif isinstance(x, ContentPDF):
|
|
455
456
|
contents.append(
|
|
456
457
|
{
|
|
@@ -522,7 +523,10 @@ class OpenAIProvider(
|
|
|
522
523
|
contents: list[Content] = []
|
|
523
524
|
if message.content is not None:
|
|
524
525
|
if has_data_model:
|
|
525
|
-
data =
|
|
526
|
+
data = message.content
|
|
527
|
+
# Some providers (e.g., Cloudflare) may already provide a dict
|
|
528
|
+
if not isinstance(data, dict):
|
|
529
|
+
data = orjson.loads(data)
|
|
526
530
|
contents = [ContentJson(value=data)]
|
|
527
531
|
else:
|
|
528
532
|
contents = [ContentText(text=message.content)]
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatOpenRouter(
|
|
17
|
+
*,
|
|
18
|
+
system_prompt: Optional[str] = None,
|
|
19
|
+
model: Optional[str] = None,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
base_url: str = "https://openrouter.ai/api/v1",
|
|
22
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
23
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
24
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
25
|
+
"""
|
|
26
|
+
Chat with one of the many models hosted on OpenRouter.
|
|
27
|
+
|
|
28
|
+
OpenRouter provides access to a wide variety of language models from different providers
|
|
29
|
+
through a unified API. Support for features depends on the underlying model that you use.
|
|
30
|
+
|
|
31
|
+
Prerequisites
|
|
32
|
+
-------------
|
|
33
|
+
|
|
34
|
+
::: {.callout-note}
|
|
35
|
+
## API key
|
|
36
|
+
|
|
37
|
+
Sign up at <https://openrouter.ai> to get an API key.
|
|
38
|
+
:::
|
|
39
|
+
|
|
40
|
+
Examples
|
|
41
|
+
--------
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
import os
|
|
45
|
+
from chatlas import ChatOpenRouter
|
|
46
|
+
|
|
47
|
+
chat = ChatOpenRouter(api_key=os.getenv("OPENROUTER_API_KEY"))
|
|
48
|
+
chat.chat("What is the capital of France?")
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
Parameters
|
|
52
|
+
----------
|
|
53
|
+
system_prompt
|
|
54
|
+
A system prompt to set the behavior of the assistant.
|
|
55
|
+
model
|
|
56
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
57
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
58
|
+
a model for all but the most casual use. See <https://openrouter.ai/models>
|
|
59
|
+
for available models.
|
|
60
|
+
api_key
|
|
61
|
+
The API key to use for authentication. You generally should not supply
|
|
62
|
+
this directly, but instead set the `OPENROUTER_API_KEY` environment variable.
|
|
63
|
+
base_url
|
|
64
|
+
The base URL to the endpoint; the default uses OpenRouter's API.
|
|
65
|
+
seed
|
|
66
|
+
Optional integer seed that the model uses to try and make output more
|
|
67
|
+
reproducible.
|
|
68
|
+
kwargs
|
|
69
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
70
|
+
|
|
71
|
+
Returns
|
|
72
|
+
-------
|
|
73
|
+
Chat
|
|
74
|
+
A chat object that retains the state of the conversation.
|
|
75
|
+
|
|
76
|
+
Note
|
|
77
|
+
----
|
|
78
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
79
|
+
the defaults tweaked for OpenRouter.
|
|
80
|
+
|
|
81
|
+
Note
|
|
82
|
+
----
|
|
83
|
+
Pasting an API key into a chat constructor (e.g., `ChatOpenRouter(api_key="...")`)
|
|
84
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
85
|
+
problematic for code that may be shared with others.
|
|
86
|
+
|
|
87
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
88
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
89
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
90
|
+
into your environment.
|
|
91
|
+
|
|
92
|
+
```shell
|
|
93
|
+
pip install python-dotenv
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
```shell
|
|
97
|
+
# .env
|
|
98
|
+
OPENROUTER_API_KEY=...
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
from chatlas import ChatOpenRouter
|
|
103
|
+
from dotenv import load_dotenv
|
|
104
|
+
|
|
105
|
+
load_dotenv()
|
|
106
|
+
chat = ChatOpenRouter()
|
|
107
|
+
chat.console()
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
111
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
112
|
+
|
|
113
|
+
```shell
|
|
114
|
+
export OPENROUTER_API_KEY=...
|
|
115
|
+
```
|
|
116
|
+
"""
|
|
117
|
+
if model is None:
|
|
118
|
+
model = log_model_default("gpt-4.1")
|
|
119
|
+
|
|
120
|
+
if api_key is None:
|
|
121
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
|
122
|
+
|
|
123
|
+
if isinstance(seed, MISSING_TYPE):
|
|
124
|
+
seed = 1014 if is_testing() else None
|
|
125
|
+
|
|
126
|
+
kwargs2 = add_default_headers(kwargs or {})
|
|
127
|
+
|
|
128
|
+
return Chat(
|
|
129
|
+
provider=OpenAIProvider(
|
|
130
|
+
api_key=api_key,
|
|
131
|
+
model=model,
|
|
132
|
+
base_url=base_url,
|
|
133
|
+
seed=seed,
|
|
134
|
+
name="OpenRouter",
|
|
135
|
+
kwargs=kwargs2,
|
|
136
|
+
),
|
|
137
|
+
system_prompt=system_prompt,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def add_default_headers(kwargs: "ChatClientArgs") -> "ChatClientArgs":
|
|
142
|
+
headers = kwargs.get("default_headers", None)
|
|
143
|
+
# https://openrouter.ai/docs/api-keys
|
|
144
|
+
default_headers = {
|
|
145
|
+
"HTTP-Referer": "https://posit-dev.github.io/chatlas",
|
|
146
|
+
"X-Title": "chatlas",
|
|
147
|
+
**(headers or {}),
|
|
148
|
+
}
|
|
149
|
+
return {"default_headers": default_headers, **kwargs}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import drop_none
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatPortkey(
|
|
17
|
+
*,
|
|
18
|
+
system_prompt: Optional[str] = None,
|
|
19
|
+
model: Optional[str] = None,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
virtual_key: Optional[str] = None,
|
|
22
|
+
base_url: str = "https://api.portkey.ai/v1",
|
|
23
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
24
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
25
|
+
"""
|
|
26
|
+
Chat with a model hosted on PortkeyAI
|
|
27
|
+
|
|
28
|
+
[PortkeyAI](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
|
29
|
+
provides an interface (AI Gateway) to connect through its Universal API to a
|
|
30
|
+
variety of LLMs providers with a single endpoint.
|
|
31
|
+
|
|
32
|
+
Prerequisites
|
|
33
|
+
-------------
|
|
34
|
+
|
|
35
|
+
::: {.callout-note}
|
|
36
|
+
## Portkey credentials
|
|
37
|
+
|
|
38
|
+
Follow the instructions at <https://portkey.ai/docs/introduction/make-your-first-request>
|
|
39
|
+
to get started making requests to PortkeyAI. You will need to set the
|
|
40
|
+
`PORTKEY_API_KEY` environment variable to your Portkey API key, and optionally
|
|
41
|
+
the `PORTKEY_VIRTUAL_KEY` environment variable to your virtual key.
|
|
42
|
+
:::
|
|
43
|
+
|
|
44
|
+
Examples
|
|
45
|
+
--------
|
|
46
|
+
```python
|
|
47
|
+
import os
|
|
48
|
+
from chatlas import ChatPortkey
|
|
49
|
+
|
|
50
|
+
chat = ChatPortkey(api_key=os.getenv("PORTKEY_API_KEY"))
|
|
51
|
+
chat.chat("What is the capital of France?")
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Parameters
|
|
55
|
+
----------
|
|
56
|
+
system_prompt
|
|
57
|
+
A system prompt to set the behavior of the assistant.
|
|
58
|
+
model
|
|
59
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
60
|
+
default, and warn you about it. We strongly recommend explicitly
|
|
61
|
+
choosing a model for all but the most casual use.
|
|
62
|
+
api_key
|
|
63
|
+
The API key to use for authentication. You generally should not supply
|
|
64
|
+
this directly, but instead set the `PORTKEY_API_KEY` environment variable.
|
|
65
|
+
virtual_key
|
|
66
|
+
An (optional) virtual identifier, storing the LLM provider's API key. See
|
|
67
|
+
[documentation](https://portkey.ai/docs/product/ai-gateway/virtual-keys).
|
|
68
|
+
You generally should not supply this directly, but instead set the
|
|
69
|
+
`PORTKEY_VIRTUAL_KEY` environment variable.
|
|
70
|
+
base_url
|
|
71
|
+
The base URL for the Portkey API. The default is suitable for most users.
|
|
72
|
+
kwargs
|
|
73
|
+
Additional arguments to pass to the OpenAIProvider, such as headers or
|
|
74
|
+
other client configuration options.
|
|
75
|
+
|
|
76
|
+
Returns
|
|
77
|
+
-------
|
|
78
|
+
Chat
|
|
79
|
+
A chat object that retains the state of the conversation.
|
|
80
|
+
|
|
81
|
+
Notes
|
|
82
|
+
-----
|
|
83
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
84
|
+
the defaults tweaked for PortkeyAI.
|
|
85
|
+
|
|
86
|
+
"""
|
|
87
|
+
if model is None:
|
|
88
|
+
model = log_model_default("gpt-4.1")
|
|
89
|
+
if api_key is None:
|
|
90
|
+
api_key = os.getenv("PORTKEY_API_KEY")
|
|
91
|
+
|
|
92
|
+
kwargs2 = add_default_headers(
|
|
93
|
+
kwargs or {},
|
|
94
|
+
api_key=api_key,
|
|
95
|
+
virtual_key=virtual_key,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
return Chat(
|
|
99
|
+
provider=OpenAIProvider(
|
|
100
|
+
api_key=api_key,
|
|
101
|
+
model=model,
|
|
102
|
+
base_url=base_url,
|
|
103
|
+
name="Portkey",
|
|
104
|
+
kwargs=kwargs2,
|
|
105
|
+
),
|
|
106
|
+
system_prompt=system_prompt,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def add_default_headers(
|
|
111
|
+
kwargs: "ChatClientArgs",
|
|
112
|
+
api_key: Optional[str] = None,
|
|
113
|
+
virtual_key: Optional[str] = None,
|
|
114
|
+
) -> "ChatClientArgs":
|
|
115
|
+
headers = kwargs.get("default_headers", None)
|
|
116
|
+
default_headers = drop_none(
|
|
117
|
+
{
|
|
118
|
+
"x-portkey-api-key": api_key,
|
|
119
|
+
"x-portkey-virtual-key": virtual_key,
|
|
120
|
+
**(headers or {}),
|
|
121
|
+
}
|
|
122
|
+
)
|
|
123
|
+
return {"default_headers": default_headers, **kwargs}
|
chatlas/_tokens.py
CHANGED
|
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING
|
|
|
8
8
|
import orjson
|
|
9
9
|
|
|
10
10
|
from ._logging import logger
|
|
11
|
-
from ._typing_extensions import TypedDict
|
|
11
|
+
from ._typing_extensions import NotRequired, TypedDict
|
|
12
12
|
|
|
13
13
|
if TYPE_CHECKING:
|
|
14
14
|
from ._provider import Provider
|
|
@@ -109,11 +109,11 @@ class TokenPrice(TypedDict):
|
|
|
109
109
|
"""The provider name (e.g., "OpenAI", "Anthropic", etc.)"""
|
|
110
110
|
model: str
|
|
111
111
|
"""The model name (e.g., "gpt-3.5-turbo", "claude-2", etc.)"""
|
|
112
|
-
cached_input: float
|
|
112
|
+
cached_input: NotRequired[float]
|
|
113
113
|
"""The cost per user token in USD per million tokens for cached input"""
|
|
114
114
|
input: float
|
|
115
115
|
"""The cost per user token in USD per million tokens"""
|
|
116
|
-
output: float
|
|
116
|
+
output: NotRequired[float]
|
|
117
117
|
"""The cost per assistant token in USD per million tokens"""
|
|
118
118
|
|
|
119
119
|
|
|
@@ -160,8 +160,8 @@ def compute_cost(
|
|
|
160
160
|
if price is None:
|
|
161
161
|
return None
|
|
162
162
|
input_price = input_tokens * (price["input"] / 1e6)
|
|
163
|
-
output_price = output_tokens * (price
|
|
164
|
-
cached_price = cached_tokens * (price
|
|
163
|
+
output_price = output_tokens * (price.get("output", 0) / 1e6)
|
|
164
|
+
cached_price = cached_tokens * (price.get("cached_input", 0) / 1e6)
|
|
165
165
|
return input_price + output_price + cached_price
|
|
166
166
|
|
|
167
167
|
|
chatlas/_typing_extensions.py
CHANGED
|
@@ -14,13 +14,13 @@ else:
|
|
|
14
14
|
# they should both come from the same typing module.
|
|
15
15
|
# https://peps.python.org/pep-0655/#usage-in-python-3-11
|
|
16
16
|
if sys.version_info >= (3, 11):
|
|
17
|
-
from typing import Required, TypedDict
|
|
17
|
+
from typing import NotRequired, Required, TypedDict
|
|
18
18
|
else:
|
|
19
|
-
from typing_extensions import Required, TypedDict
|
|
19
|
+
from typing_extensions import NotRequired, Required, TypedDict
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
# The only purpose of the following line is so that pyright will put all of the
|
|
23
23
|
# conditional imports into the .pyi file when generating type stubs. Without this line,
|
|
24
24
|
# pyright will not include the above imports in the generated .pyi file, and it will
|
|
25
25
|
# result in a lot of red squiggles in user code.
|
|
26
|
-
_: "ParamSpec | TypeGuard | is_typeddict | Required | TypedDict" # type: ignore
|
|
26
|
+
_: "ParamSpec | TypeGuard | is_typeddict | NotRequired | Required | TypedDict" # type: ignore
|
chatlas/_version.py
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
# file generated by setuptools-scm
|
|
2
2
|
# don't change, don't track in version control
|
|
3
3
|
|
|
4
|
-
__all__ = [
|
|
4
|
+
__all__ = [
|
|
5
|
+
"__version__",
|
|
6
|
+
"__version_tuple__",
|
|
7
|
+
"version",
|
|
8
|
+
"version_tuple",
|
|
9
|
+
"__commit_id__",
|
|
10
|
+
"commit_id",
|
|
11
|
+
]
|
|
5
12
|
|
|
6
13
|
TYPE_CHECKING = False
|
|
7
14
|
if TYPE_CHECKING:
|
|
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
|
|
|
9
16
|
from typing import Union
|
|
10
17
|
|
|
11
18
|
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
19
|
+
COMMIT_ID = Union[str, None]
|
|
12
20
|
else:
|
|
13
21
|
VERSION_TUPLE = object
|
|
22
|
+
COMMIT_ID = object
|
|
14
23
|
|
|
15
24
|
version: str
|
|
16
25
|
__version__: str
|
|
17
26
|
__version_tuple__: VERSION_TUPLE
|
|
18
27
|
version_tuple: VERSION_TUPLE
|
|
28
|
+
commit_id: COMMIT_ID
|
|
29
|
+
__commit_id__: COMMIT_ID
|
|
19
30
|
|
|
20
|
-
__version__ = version = '0.
|
|
21
|
-
__version_tuple__ = version_tuple = (0,
|
|
31
|
+
__version__ = version = '0.10.0'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 10, 0)
|
|
33
|
+
|
|
34
|
+
__commit_id__ = commit_id = None
|
|
@@ -17,5 +17,5 @@ class ChatClientArgs(TypedDict, total=False):
|
|
|
17
17
|
max_retries: int
|
|
18
18
|
default_headers: Optional[Mapping[str, str]]
|
|
19
19
|
default_query: Optional[Mapping[str, object]]
|
|
20
|
-
http_client: httpx.AsyncClient
|
|
20
|
+
http_client: httpx.AsyncClient | None
|
|
21
21
|
_strict_response_validation: bool
|
|
@@ -19,5 +19,5 @@ class ChatBedrockClientArgs(TypedDict, total=False):
|
|
|
19
19
|
max_retries: int
|
|
20
20
|
default_headers: Optional[Mapping[str, str]]
|
|
21
21
|
default_query: Optional[Mapping[str, object]]
|
|
22
|
-
http_client: httpx.AsyncClient
|
|
22
|
+
http_client: httpx.AsyncClient | None
|
|
23
23
|
_strict_response_validation: bool
|
chatlas/types/openai/_client.py
CHANGED
|
@@ -20,5 +20,5 @@ class ChatClientArgs(TypedDict, total=False):
|
|
|
20
20
|
max_retries: int
|
|
21
21
|
default_headers: Optional[Mapping[str, str]]
|
|
22
22
|
default_query: Optional[Mapping[str, object]]
|
|
23
|
-
http_client: httpx.AsyncClient
|
|
23
|
+
http_client: httpx.AsyncClient | None
|
|
24
24
|
_strict_response_validation: bool
|
|
@@ -23,5 +23,5 @@ class ChatAzureClientArgs(TypedDict, total=False):
|
|
|
23
23
|
max_retries: int
|
|
24
24
|
default_headers: Optional[Mapping[str, str]]
|
|
25
25
|
default_query: Optional[Mapping[str, object]]
|
|
26
|
-
http_client: httpx.AsyncClient
|
|
26
|
+
http_client: httpx.AsyncClient | None
|
|
27
27
|
_strict_response_validation: bool
|
chatlas/types/openai/_submit.py
CHANGED
|
@@ -177,6 +177,9 @@ class SubmitInputArgs(TypedDict, total=False):
|
|
|
177
177
|
top_p: Union[float, None, openai.NotGiven]
|
|
178
178
|
user: str | openai.NotGiven
|
|
179
179
|
verbosity: Union[Literal["low", "medium", "high"], None, openai.NotGiven]
|
|
180
|
+
web_search_options: (
|
|
181
|
+
openai.types.chat.completion_create_params.WebSearchOptions | openai.NotGiven
|
|
182
|
+
)
|
|
180
183
|
extra_headers: Optional[Mapping[str, Union[str, openai.Omit]]]
|
|
181
184
|
extra_query: Optional[Mapping[str, object]]
|
|
182
185
|
extra_body: object | None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: chatlas
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.0
|
|
4
4
|
Summary: A simple and consistent interface for chatting with LLMs
|
|
5
5
|
Project-URL: Homepage, https://posit-dev.github.io/chatlas
|
|
6
6
|
Project-URL: Documentation, https://posit-dev.github.io/chatlas
|
|
@@ -79,7 +79,7 @@ Provides-Extra: vertex
|
|
|
79
79
|
Requires-Dist: google-genai>=1.14.0; extra == 'vertex'
|
|
80
80
|
Description-Content-Type: text/markdown
|
|
81
81
|
|
|
82
|
-
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="
|
|
82
|
+
# chatlas <a href="https://posit-dev.github.io/chatlas"><img src="https://posit-dev.github.io/chatlas/logos/hex/logo.png" align="right" height="138" alt="chatlas website" /></a>
|
|
83
83
|
|
|
84
84
|
<p>
|
|
85
85
|
<!-- badges start -->
|
|
@@ -135,7 +135,7 @@ chat.chat("How's the weather in San Francisco?")
|
|
|
135
135
|
```
|
|
136
136
|
|
|
137
137
|
|
|
138
|
-
<img src="
|
|
138
|
+
<img src="https://posit-dev.github.io/chatlas/images/chatlas-hello.png" alt="Model response output to the user query: 'How's the weather in San Francisco?'" width="67%" style="display: block; margin-left: auto; margin-right: auto">
|
|
139
139
|
|
|
140
140
|
|
|
141
141
|
Learn more at <https://posit-dev.github.io/chatlas>
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
chatlas/__init__.py,sha256=
|
|
1
|
+
chatlas/__init__.py,sha256=acbfWs7cquCWnP3ZyxYNNQLSQyIvEg2jbDn2O0iArnE,2111
|
|
2
2
|
chatlas/_auto.py,sha256=-s7XGzsKLX4RipWtk4WOE8iKbOBhXPUPtI0-63PpXCY,5660
|
|
3
3
|
chatlas/_callbacks.py,sha256=3RpPaOQonTqScjXbaShgKJ1Rc-YxzWerxKRBjVssFnc,1838
|
|
4
|
-
chatlas/_chat.py,sha256=
|
|
4
|
+
chatlas/_chat.py,sha256=cZ7HG4jELhRumNvbSvsG0Lv-GZTl9wQrx8oydnAmKxE,82062
|
|
5
5
|
chatlas/_content.py,sha256=Jk0frLSdZTEyGu4KDHsgQbQQDHEX9nYVMNUXx4OKGSo,19775
|
|
6
6
|
chatlas/_content_image.py,sha256=EUK6wAint-JatLsiwvaPDu4D3W-NcIsDCkzABkXgfDg,8304
|
|
7
7
|
chatlas/_content_pdf.py,sha256=cffeuJxzhUDukQ-Srkmpy62M8X12skYpU_FVq-Wvya4,2420
|
|
@@ -12,37 +12,43 @@ chatlas/_logging.py,sha256=weKvXZDIZ88X7X61ruXM_S0AAhQ5mgiW9dR-km8x7Mg,3324
|
|
|
12
12
|
chatlas/_mcp_manager.py,sha256=smMXeKZzP90MrlCdnTHMyo7AWHwl7J2jkU8dKSlnEsQ,10237
|
|
13
13
|
chatlas/_merge.py,sha256=SGj_BetgA7gaOqSBKOhYmW3CYeQKTEehFrXvx3y4OYE,3924
|
|
14
14
|
chatlas/_provider.py,sha256=0cl6JtMe6xRbc-ghp4JqdwTv6OQeewQUgdToRSktJ3I,5374
|
|
15
|
-
chatlas/_provider_anthropic.py,sha256=
|
|
16
|
-
chatlas/
|
|
15
|
+
chatlas/_provider_anthropic.py,sha256=YHXZLtrNnZEVPGYGgsfsiqpM9Zm6HDv8zuj3cW7i8FE,25888
|
|
16
|
+
chatlas/_provider_cloudflare.py,sha256=Q3YB0wMl_DWWBCp7RmDMXxDqIScM1mSKeNTEJ5xpIOY,4932
|
|
17
|
+
chatlas/_provider_databricks.py,sha256=XytRyn_oAJzSUfz4CkTqq2LsCyYror4VN5jBO55tx0E,4526
|
|
18
|
+
chatlas/_provider_deepseek.py,sha256=6nPtPSo-Po6sD4i8PZJHuI5T2oATpLi5djXFGdlserk,4906
|
|
17
19
|
chatlas/_provider_github.py,sha256=68dXdNxEQo5Yf7MtxA-SdV3HGXzMEQie1sP3c5u7Akk,3921
|
|
18
20
|
chatlas/_provider_google.py,sha256=Q-VgKij7T3y3zPMhx73BxoHM8RG499Ediw4lMNWWVYA,20297
|
|
19
21
|
chatlas/_provider_groq.py,sha256=XB2JDyuF95CcSbNkgk7JHcuy9KCW7hxTVaONDSjK8U8,3671
|
|
22
|
+
chatlas/_provider_huggingface.py,sha256=feJ416X0UdtyoeHZbkgolFf62D7zxNwM7i_X3NYsQQw,4669
|
|
23
|
+
chatlas/_provider_mistral.py,sha256=-p4rut0KCn-PrwnOlvr6lK8-K-OXvc5H9vTX-rCzUkk,5309
|
|
20
24
|
chatlas/_provider_ollama.py,sha256=2TNg5UTEYQvjeChqyJ8hh8fV-A0Xh-B5Hv8b5rtA9FY,3309
|
|
21
|
-
chatlas/_provider_openai.py,sha256=
|
|
25
|
+
chatlas/_provider_openai.py,sha256=wupHVATX3Ra244jRLhQkAitBZzp0MIIuCYIzdYiPdEU,25554
|
|
26
|
+
chatlas/_provider_openrouter.py,sha256=9sCXvROVIiUdwfEbkVA-15_kc6ouFUP2uV2MmUe2rFk,4385
|
|
22
27
|
chatlas/_provider_perplexity.py,sha256=hEfKYmNrv3yU-IP_3xCS02u7pQunhKC6iZpoiKWX9fc,3958
|
|
28
|
+
chatlas/_provider_portkey.py,sha256=G-U66By6t5iW4h5m4ut_2DBUq8Z7qTtcjuty-WF3nSw,3809
|
|
23
29
|
chatlas/_provider_snowflake.py,sha256=83P7NiNT3D_JNeX_zs4JXwDWeoaYo9IKxN91W0GWWho,24310
|
|
24
|
-
chatlas/_tokens.py,sha256=
|
|
30
|
+
chatlas/_tokens.py,sha256=QUsBLNJPgXk8vovcG5JdQU8NarCv7FRpOVBdgFkBgHs,5388
|
|
25
31
|
chatlas/_tokens_old.py,sha256=L9d9oafrXvEx2u4nIn_Jjn7adnQyLBnYBuPwJUE8Pl8,5005
|
|
26
32
|
chatlas/_tools.py,sha256=bOXJ0ry6vQqU8Qm-PVdESN8HTuUv1teqPH_vtqILv9k,11088
|
|
27
33
|
chatlas/_turn.py,sha256=yK7alUxeP8d2iBc7amyz20BtEqcpvX6BCwWZsnlQ5R4,4515
|
|
28
|
-
chatlas/_typing_extensions.py,sha256=
|
|
34
|
+
chatlas/_typing_extensions.py,sha256=MB9vWMWlm-IF8uOQfrTcfb66MV6gYXn3zgnbdwAC7BQ,1076
|
|
29
35
|
chatlas/_utils.py,sha256=Kku2fa1mvTYCr5D28VxE6-fwfy2e2doCi-eKQkLEg4Y,4686
|
|
30
|
-
chatlas/_version.py,sha256=
|
|
36
|
+
chatlas/_version.py,sha256=XS8OMho0YiZyQ_qDeRsy__m_nWUzYVEJw-NLk1VtDQU,706
|
|
31
37
|
chatlas/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
38
|
chatlas/data/prices.json,sha256=vESC5G1SDCnPrfRQTrvJ9wVdxyLEJZrAygV0oJ99ccc,56230
|
|
33
39
|
chatlas/types/__init__.py,sha256=oRgbo1FIC2qkiZ6Bi4n3RNgCA6VDTeueoCHO4-6h1NA,725
|
|
34
40
|
chatlas/types/anthropic/__init__.py,sha256=OwubA-DPHYpYo0XyRyAFwftOI0mOxtHzAyhUSLcDx54,417
|
|
35
|
-
chatlas/types/anthropic/_client.py,sha256=
|
|
36
|
-
chatlas/types/anthropic/_client_bedrock.py,sha256=
|
|
41
|
+
chatlas/types/anthropic/_client.py,sha256=t_tnOzzsW1xWNADkNoAuZJYoE9QJ8ie7DQNnFO1pvoM,697
|
|
42
|
+
chatlas/types/anthropic/_client_bedrock.py,sha256=2J6U1QcSx1KwiiHfXs3i4YEXDXw11sp-x3iLOuESrgQ,792
|
|
37
43
|
chatlas/types/anthropic/_submit.py,sha256=o5bpKEne6lqBz4YBLoYwRLKCVmBIdzRetiMCoOdUfb0,3661
|
|
38
44
|
chatlas/types/google/__init__.py,sha256=ZJhi8Kwvio2zp8T1TQqmvdHqkS-Khb6BGESPjREADgo,337
|
|
39
45
|
chatlas/types/google/_client.py,sha256=t7aKbxYq_xOA1Z3RnWcjewifdQFSHi7vKEj6MyKMCJk,729
|
|
40
46
|
chatlas/types/google/_submit.py,sha256=19Ji4fAo1lTCbNSpR6Yi0i64RJwMGBdiZKQcnoDNRwY,1796
|
|
41
47
|
chatlas/types/openai/__init__.py,sha256=Q2RAr1bSH1nHsxICK05nAmKmxdhKmhbBkWD_XHiVSrI,411
|
|
42
|
-
chatlas/types/openai/_client.py,sha256=
|
|
43
|
-
chatlas/types/openai/_client_azure.py,sha256=
|
|
44
|
-
chatlas/types/openai/_submit.py,sha256=
|
|
45
|
-
chatlas-0.
|
|
46
|
-
chatlas-0.
|
|
47
|
-
chatlas-0.
|
|
48
|
-
chatlas-0.
|
|
48
|
+
chatlas/types/openai/_client.py,sha256=SttisELwAd52_Je_5q3RfWGdX5wbg2CoGbxhS8ThS0A,792
|
|
49
|
+
chatlas/types/openai/_client_azure.py,sha256=b8Hr7iKYA5-sq9r7uEqbBFv9yo3itppmHIgkEGvChMs,896
|
|
50
|
+
chatlas/types/openai/_submit.py,sha256=rhft1h7zy6eSlSBLkt7ZAySFh-8WnR5UEG-BXaFTxag,7815
|
|
51
|
+
chatlas-0.10.0.dist-info/METADATA,sha256=dhX_Mf6xlpwwaJajsGTGuJ7GCwEZLmBZqqU7NvbOObY,5594
|
|
52
|
+
chatlas-0.10.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
53
|
+
chatlas-0.10.0.dist-info/licenses/LICENSE,sha256=zyuGzPOC7CcbOaBHsQ3UEyKYRO56KDUkor0OA4LqqDg,1081
|
|
54
|
+
chatlas-0.10.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|