chatlas 0.9.1__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chatlas might be problematic. Click here for more details.
- chatlas/__init__.py +21 -9
- chatlas/_auto.py +9 -9
- chatlas/_chat.py +38 -9
- chatlas/{_anthropic.py → _provider_anthropic.py} +13 -5
- chatlas/_provider_cloudflare.py +165 -0
- chatlas/{_databricks.py → _provider_databricks.py} +13 -2
- chatlas/_provider_deepseek.py +171 -0
- chatlas/{_github.py → _provider_github.py} +2 -2
- chatlas/{_google.py → _provider_google.py} +5 -5
- chatlas/{_groq.py → _provider_groq.py} +2 -2
- chatlas/_provider_huggingface.py +155 -0
- chatlas/_provider_mistral.py +181 -0
- chatlas/{_ollama.py → _provider_ollama.py} +2 -2
- chatlas/{_openai.py → _provider_openai.py} +28 -9
- chatlas/_provider_openrouter.py +149 -0
- chatlas/{_perplexity.py → _provider_perplexity.py} +2 -2
- chatlas/_provider_portkey.py +123 -0
- chatlas/{_snowflake.py → _provider_snowflake.py} +3 -3
- chatlas/_tokens.py +27 -12
- chatlas/_turn.py +3 -4
- chatlas/_typing_extensions.py +3 -3
- chatlas/_version.py +16 -3
- chatlas/data/prices.json +2769 -163
- chatlas/types/__init__.py +3 -3
- chatlas/types/anthropic/_client.py +1 -1
- chatlas/types/anthropic/_client_bedrock.py +1 -1
- chatlas/types/anthropic/_submit.py +5 -5
- chatlas/types/google/_submit.py +23 -29
- chatlas/types/openai/_client.py +1 -1
- chatlas/types/openai/_client_azure.py +1 -1
- chatlas/types/openai/_submit.py +28 -3
- {chatlas-0.9.1.dist-info → chatlas-0.10.0.dist-info}/METADATA +4 -4
- chatlas-0.10.0.dist-info/RECORD +54 -0
- chatlas-0.9.1.dist-info/RECORD +0 -48
- {chatlas-0.9.1.dist-info → chatlas-0.10.0.dist-info}/WHEEL +0 -0
- {chatlas-0.9.1.dist-info → chatlas-0.10.0.dist-info}/licenses/LICENSE +0 -0
chatlas/__init__.py
CHANGED
|
@@ -1,20 +1,26 @@
|
|
|
1
1
|
from . import types
|
|
2
|
-
from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
3
2
|
from ._auto import ChatAuto
|
|
4
3
|
from ._chat import Chat
|
|
5
4
|
from ._content import ContentToolRequest, ContentToolResult, ContentToolResultImage
|
|
6
5
|
from ._content_image import content_image_file, content_image_plot, content_image_url
|
|
7
6
|
from ._content_pdf import content_pdf_file, content_pdf_url
|
|
8
|
-
from ._databricks import ChatDatabricks
|
|
9
|
-
from ._github import ChatGithub
|
|
10
|
-
from ._google import ChatGoogle, ChatVertex
|
|
11
|
-
from ._groq import ChatGroq
|
|
12
7
|
from ._interpolate import interpolate, interpolate_file
|
|
13
|
-
from ._ollama import ChatOllama
|
|
14
|
-
from ._openai import ChatAzureOpenAI, ChatOpenAI
|
|
15
|
-
from ._perplexity import ChatPerplexity
|
|
16
8
|
from ._provider import Provider
|
|
17
|
-
from .
|
|
9
|
+
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
10
|
+
from ._provider_cloudflare import ChatCloudflare
|
|
11
|
+
from ._provider_databricks import ChatDatabricks
|
|
12
|
+
from ._provider_deepseek import ChatDeepSeek
|
|
13
|
+
from ._provider_github import ChatGithub
|
|
14
|
+
from ._provider_google import ChatGoogle, ChatVertex
|
|
15
|
+
from ._provider_groq import ChatGroq
|
|
16
|
+
from ._provider_huggingface import ChatHuggingFace
|
|
17
|
+
from ._provider_mistral import ChatMistral
|
|
18
|
+
from ._provider_ollama import ChatOllama
|
|
19
|
+
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
|
|
20
|
+
from ._provider_openrouter import ChatOpenRouter
|
|
21
|
+
from ._provider_perplexity import ChatPerplexity
|
|
22
|
+
from ._provider_portkey import ChatPortkey
|
|
23
|
+
from ._provider_snowflake import ChatSnowflake
|
|
18
24
|
from ._tokens import token_usage
|
|
19
25
|
from ._tools import Tool, ToolRejectError
|
|
20
26
|
from ._turn import Turn
|
|
@@ -28,14 +34,20 @@ __all__ = (
|
|
|
28
34
|
"ChatAnthropic",
|
|
29
35
|
"ChatAuto",
|
|
30
36
|
"ChatBedrockAnthropic",
|
|
37
|
+
"ChatCloudflare",
|
|
31
38
|
"ChatDatabricks",
|
|
39
|
+
"ChatDeepSeek",
|
|
32
40
|
"ChatGithub",
|
|
33
41
|
"ChatGoogle",
|
|
34
42
|
"ChatGroq",
|
|
43
|
+
"ChatHuggingFace",
|
|
44
|
+
"ChatMistral",
|
|
35
45
|
"ChatOllama",
|
|
36
46
|
"ChatOpenAI",
|
|
47
|
+
"ChatOpenRouter",
|
|
37
48
|
"ChatAzureOpenAI",
|
|
38
49
|
"ChatPerplexity",
|
|
50
|
+
"ChatPortkey",
|
|
39
51
|
"ChatSnowflake",
|
|
40
52
|
"ChatVertex",
|
|
41
53
|
"Chat",
|
chatlas/_auto.py
CHANGED
|
@@ -5,16 +5,16 @@ from typing import Callable, Literal, Optional
|
|
|
5
5
|
|
|
6
6
|
import orjson
|
|
7
7
|
|
|
8
|
-
from ._anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
9
8
|
from ._chat import Chat
|
|
10
|
-
from .
|
|
11
|
-
from .
|
|
12
|
-
from .
|
|
13
|
-
from .
|
|
14
|
-
from .
|
|
15
|
-
from .
|
|
16
|
-
from .
|
|
17
|
-
from .
|
|
9
|
+
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
|
|
10
|
+
from ._provider_databricks import ChatDatabricks
|
|
11
|
+
from ._provider_github import ChatGithub
|
|
12
|
+
from ._provider_google import ChatGoogle, ChatVertex
|
|
13
|
+
from ._provider_groq import ChatGroq
|
|
14
|
+
from ._provider_ollama import ChatOllama
|
|
15
|
+
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
|
|
16
|
+
from ._provider_perplexity import ChatPerplexity
|
|
17
|
+
from ._provider_snowflake import ChatSnowflake
|
|
18
18
|
|
|
19
19
|
AutoProviders = Literal[
|
|
20
20
|
"anthropic",
|
chatlas/_chat.py
CHANGED
|
@@ -65,6 +65,7 @@ class TokensDict(TypedDict):
|
|
|
65
65
|
role: Literal["user", "assistant"]
|
|
66
66
|
tokens: int
|
|
67
67
|
tokens_total: int
|
|
68
|
+
tokens_cached: int
|
|
68
69
|
|
|
69
70
|
|
|
70
71
|
CompletionT = TypeVar("CompletionT")
|
|
@@ -293,12 +294,15 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
293
294
|
{
|
|
294
295
|
"role": "user",
|
|
295
296
|
"tokens": turns[1].tokens[0],
|
|
297
|
+
# Number of tokens currently cached (reduces input token usage)
|
|
298
|
+
"tokens_cached": turns[1].tokens[2],
|
|
296
299
|
"tokens_total": turns[1].tokens[0],
|
|
297
300
|
},
|
|
298
301
|
# The token count for the 1st assistant response
|
|
299
302
|
{
|
|
300
303
|
"role": "assistant",
|
|
301
304
|
"tokens": turns[1].tokens[1],
|
|
305
|
+
"tokens_cached": 0,
|
|
302
306
|
"tokens_total": turns[1].tokens[1],
|
|
303
307
|
},
|
|
304
308
|
]
|
|
@@ -319,8 +323,11 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
319
323
|
[
|
|
320
324
|
{
|
|
321
325
|
"role": "user",
|
|
322
|
-
# Implied token count for the user input
|
|
326
|
+
# Implied new token count for the user input (input tokens - context - cached reads)
|
|
327
|
+
# Cached reads are only subtracted for particular providers
|
|
323
328
|
"tokens": tj.tokens[0] - sum(ti.tokens),
|
|
329
|
+
# Number of tokens currently cached (reduces input token usage depending on provider's API)
|
|
330
|
+
"tokens_cached": tj.tokens[2],
|
|
324
331
|
# Total tokens = Total User Tokens for the Turn = Distinct new tokens + context sent
|
|
325
332
|
"tokens_total": tj.tokens[0],
|
|
326
333
|
},
|
|
@@ -329,6 +336,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
329
336
|
# The token count for the assistant response
|
|
330
337
|
"tokens": tj.tokens[1],
|
|
331
338
|
# Total tokens = Total Assistant tokens used in the turn
|
|
339
|
+
"tokens_cached": 0,
|
|
332
340
|
"tokens_total": tj.tokens[1],
|
|
333
341
|
},
|
|
334
342
|
]
|
|
@@ -339,7 +347,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
339
347
|
def get_cost(
|
|
340
348
|
self,
|
|
341
349
|
options: Literal["all", "last"] = "all",
|
|
342
|
-
token_price: Optional[tuple[float, float]] = None,
|
|
350
|
+
token_price: Optional[tuple[float, float, float]] = None,
|
|
343
351
|
) -> float:
|
|
344
352
|
"""
|
|
345
353
|
Estimate the cost of the chat.
|
|
@@ -357,11 +365,13 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
357
365
|
- `"last"`: Return the cost of the last turn in the chat.
|
|
358
366
|
token_price
|
|
359
367
|
An optional tuple in the format of (input_token_cost,
|
|
360
|
-
output_token_cost) for bringing your own cost information.
|
|
368
|
+
output_token_cost, cached_token_cost) for bringing your own cost information.
|
|
361
369
|
- `"input_token_cost"`: The cost per user token in USD per
|
|
362
370
|
million tokens.
|
|
363
371
|
- `"output_token_cost"`: The cost per assistant token in USD
|
|
364
372
|
per million tokens.
|
|
373
|
+
- `"cached_token_cost"`: The cost per cached token read in USD
|
|
374
|
+
per million tokens.
|
|
365
375
|
|
|
366
376
|
Returns
|
|
367
377
|
-------
|
|
@@ -374,15 +384,19 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
374
384
|
if token_price:
|
|
375
385
|
input_token_price = token_price[0] / 1e6
|
|
376
386
|
output_token_price = token_price[1] / 1e6
|
|
387
|
+
cached_token_price = token_price[2] / 1e6
|
|
377
388
|
else:
|
|
378
389
|
price_token = get_token_pricing(self.provider.name, self.provider.model)
|
|
379
390
|
if not price_token:
|
|
380
391
|
raise KeyError(
|
|
381
|
-
f"We could not locate pricing information for model '{self.provider.model}'
|
|
392
|
+
f"We could not locate pricing information for model '{self.provider.model}'"
|
|
393
|
+
f" from provider '{self.provider.name}'. "
|
|
382
394
|
"If you know the pricing for this model, specify it in `token_price`."
|
|
383
395
|
)
|
|
396
|
+
|
|
384
397
|
input_token_price = price_token["input"] / 1e6
|
|
385
|
-
output_token_price = price_token
|
|
398
|
+
output_token_price = price_token.get("output", 0) / 1e6
|
|
399
|
+
cached_token_price = price_token.get("cached_input", 0) / 1e6
|
|
386
400
|
|
|
387
401
|
if len(turns_tokens) == 0:
|
|
388
402
|
return 0.0
|
|
@@ -399,8 +413,16 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
399
413
|
user_tokens = sum(
|
|
400
414
|
u["tokens_total"] for u in turns_tokens if u["role"] == "user"
|
|
401
415
|
)
|
|
402
|
-
|
|
403
|
-
|
|
416
|
+
# We add the cached tokens here because for relevant providers they have already been subtracted
|
|
417
|
+
# from the user tokens. This assumes the provider uses (reads) the cache each time.
|
|
418
|
+
cached_token_reads = sum(
|
|
419
|
+
u["tokens_cached"] for u in turns_tokens if u["role"] == "user"
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
cost = (
|
|
423
|
+
(asst_tokens * output_token_price)
|
|
424
|
+
+ (user_tokens * input_token_price)
|
|
425
|
+
+ (cached_token_reads * cached_token_price)
|
|
404
426
|
)
|
|
405
427
|
return cost
|
|
406
428
|
|
|
@@ -408,7 +430,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
408
430
|
if last_turn["role"] == "assistant":
|
|
409
431
|
return last_turn["tokens"] * output_token_price
|
|
410
432
|
if last_turn["role"] == "user":
|
|
411
|
-
return last_turn["tokens_total"] * input_token_price
|
|
433
|
+
return (last_turn["tokens_total"] * input_token_price) + (
|
|
434
|
+
last_turn["tokens_cached"] * cached_token_price
|
|
435
|
+
)
|
|
412
436
|
raise ValueError(
|
|
413
437
|
f"Expected last turn to have a role of 'user' or `'assistant'`, not '{last_turn['role']}'"
|
|
414
438
|
)
|
|
@@ -2224,8 +2248,12 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
2224
2248
|
tokens = self.get_tokens()
|
|
2225
2249
|
tokens_asst = sum(u["tokens_total"] for u in tokens if u["role"] == "assistant")
|
|
2226
2250
|
tokens_user = sum(u["tokens_total"] for u in tokens if u["role"] == "user")
|
|
2251
|
+
tokens_cached = sum(u["tokens_cached"] for u in tokens if u["role"] == "user")
|
|
2227
2252
|
|
|
2228
|
-
res =
|
|
2253
|
+
res = (
|
|
2254
|
+
f"<Chat {self.provider.name}/{self.provider.model} turns={len(turns)}"
|
|
2255
|
+
f" tokens={tokens_user + tokens_cached}/{tokens_asst}"
|
|
2256
|
+
)
|
|
2229
2257
|
|
|
2230
2258
|
# Add cost info only if we can compute it
|
|
2231
2259
|
cost = compute_cost(
|
|
@@ -2233,6 +2261,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
|
|
|
2233
2261
|
self.provider.model,
|
|
2234
2262
|
tokens_user,
|
|
2235
2263
|
tokens_asst,
|
|
2264
|
+
tokens_cached,
|
|
2236
2265
|
)
|
|
2237
2266
|
if cost is not None:
|
|
2238
2267
|
res += f" ${round(cost, ndigits=2)}"
|
|
@@ -163,7 +163,7 @@ def ChatAnthropic(
|
|
|
163
163
|
"""
|
|
164
164
|
|
|
165
165
|
if model is None:
|
|
166
|
-
model = log_model_default("claude-
|
|
166
|
+
model = log_model_default("claude-sonnet-4-0")
|
|
167
167
|
|
|
168
168
|
return Chat(
|
|
169
169
|
provider=AnthropicProvider(
|
|
@@ -586,7 +586,16 @@ class AnthropicProvider(
|
|
|
586
586
|
)
|
|
587
587
|
)
|
|
588
588
|
|
|
589
|
-
|
|
589
|
+
usage = completion.usage
|
|
590
|
+
# N.B. Currently, Anthropic doesn't cache by default and we currently do not support
|
|
591
|
+
# manual caching in chatlas. Note also that this only tracks reads, NOT writes, which
|
|
592
|
+
# have their own cost. To track that properly, we would need another caching category and per-token cost.
|
|
593
|
+
|
|
594
|
+
tokens = (
|
|
595
|
+
completion.usage.input_tokens,
|
|
596
|
+
completion.usage.output_tokens,
|
|
597
|
+
usage.cache_read_input_tokens if usage.cache_read_input_tokens else 0,
|
|
598
|
+
)
|
|
590
599
|
|
|
591
600
|
tokens_log(self, tokens)
|
|
592
601
|
|
|
@@ -733,8 +742,7 @@ def ChatBedrockAnthropic(
|
|
|
733
742
|
"""
|
|
734
743
|
|
|
735
744
|
if model is None:
|
|
736
|
-
|
|
737
|
-
model = log_model_default("anthropic.claude-3-5-sonnet-20241022-v2:0")
|
|
745
|
+
model = log_model_default("us.anthropic.claude-sonnet-4-20250514-v1:0")
|
|
738
746
|
|
|
739
747
|
return Chat(
|
|
740
748
|
provider=AnthropicBedrockProvider(
|
|
@@ -764,7 +772,7 @@ class AnthropicBedrockProvider(AnthropicProvider):
|
|
|
764
772
|
aws_session_token: str | None,
|
|
765
773
|
max_tokens: int = 4096,
|
|
766
774
|
base_url: str | None,
|
|
767
|
-
name: str = "
|
|
775
|
+
name: str = "AWS/Bedrock",
|
|
768
776
|
kwargs: Optional["ChatBedrockClientArgs"] = None,
|
|
769
777
|
):
|
|
770
778
|
super().__init__(name=name, model=model, max_tokens=max_tokens)
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def ChatCloudflare(
|
|
17
|
+
*,
|
|
18
|
+
account: Optional[str] = None,
|
|
19
|
+
system_prompt: Optional[str] = None,
|
|
20
|
+
model: Optional[str] = None,
|
|
21
|
+
api_key: Optional[str] = None,
|
|
22
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
23
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
24
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
25
|
+
"""
|
|
26
|
+
Chat with a model hosted on Cloudflare Workers AI.
|
|
27
|
+
|
|
28
|
+
Cloudflare Workers AI hosts a variety of open-source AI models.
|
|
29
|
+
|
|
30
|
+
Prerequisites
|
|
31
|
+
-------------
|
|
32
|
+
|
|
33
|
+
::: {.callout-note}
|
|
34
|
+
## API credentials
|
|
35
|
+
|
|
36
|
+
To use the Cloudflare API, you must have an Account ID and an Access Token,
|
|
37
|
+
which you can obtain by following the instructions at
|
|
38
|
+
<https://developers.cloudflare.com/workers-ai/get-started/rest-api/>.
|
|
39
|
+
:::
|
|
40
|
+
|
|
41
|
+
Examples
|
|
42
|
+
--------
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
import os
|
|
46
|
+
from chatlas import ChatCloudflare
|
|
47
|
+
|
|
48
|
+
chat = ChatCloudflare(
|
|
49
|
+
api_key=os.getenv("CLOUDFLARE_API_KEY"),
|
|
50
|
+
account=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
|
|
51
|
+
)
|
|
52
|
+
chat.chat("What is the capital of France?")
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
Known limitations
|
|
56
|
+
-----------------
|
|
57
|
+
|
|
58
|
+
- Tool calling does not appear to work.
|
|
59
|
+
- Images don't appear to work.
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
account
|
|
64
|
+
The Cloudflare account ID. You generally should not supply this directly,
|
|
65
|
+
but instead set the `CLOUDFLARE_ACCOUNT_ID` environment variable.
|
|
66
|
+
system_prompt
|
|
67
|
+
A system prompt to set the behavior of the assistant.
|
|
68
|
+
model
|
|
69
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
70
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
71
|
+
a model for all but the most casual use.
|
|
72
|
+
api_key
|
|
73
|
+
The API key to use for authentication. You generally should not supply
|
|
74
|
+
this directly, but instead set the `CLOUDFLARE_API_KEY` environment
|
|
75
|
+
variable.
|
|
76
|
+
seed
|
|
77
|
+
Optional integer seed that ChatGPT uses to try and make output more
|
|
78
|
+
reproducible.
|
|
79
|
+
kwargs
|
|
80
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
81
|
+
|
|
82
|
+
Returns
|
|
83
|
+
-------
|
|
84
|
+
Chat
|
|
85
|
+
A chat object that retains the state of the conversation.
|
|
86
|
+
|
|
87
|
+
Note
|
|
88
|
+
----
|
|
89
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
90
|
+
the defaults tweaked for Cloudflare.
|
|
91
|
+
|
|
92
|
+
Note
|
|
93
|
+
----
|
|
94
|
+
Pasting credentials into a chat constructor (e.g.,
|
|
95
|
+
`ChatCloudflare(api_key="...", account="...")`) is the simplest way to get
|
|
96
|
+
started, and is fine for interactive use, but is problematic for code that
|
|
97
|
+
may be shared with others.
|
|
98
|
+
|
|
99
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
100
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
101
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
102
|
+
into your environment.
|
|
103
|
+
|
|
104
|
+
```shell
|
|
105
|
+
pip install python-dotenv
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```shell
|
|
109
|
+
# .env
|
|
110
|
+
CLOUDFLARE_API_KEY=...
|
|
111
|
+
CLOUDFLARE_ACCOUNT_ID=...
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
from chatlas import ChatCloudflare
|
|
116
|
+
from dotenv import load_dotenv
|
|
117
|
+
|
|
118
|
+
load_dotenv()
|
|
119
|
+
chat = ChatCloudflare()
|
|
120
|
+
chat.console()
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
124
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
125
|
+
|
|
126
|
+
```shell
|
|
127
|
+
export CLOUDFLARE_API_KEY=...
|
|
128
|
+
export CLOUDFLARE_ACCOUNT_ID=...
|
|
129
|
+
```
|
|
130
|
+
"""
|
|
131
|
+
# List at https://developers.cloudflare.com/workers-ai/models/
|
|
132
|
+
# `@cf` appears to be part of the model name
|
|
133
|
+
if model is None:
|
|
134
|
+
model = log_model_default("@cf/meta/llama-3.3-70b-instruct-fp8-fast")
|
|
135
|
+
|
|
136
|
+
if api_key is None:
|
|
137
|
+
api_key = os.getenv("CLOUDFLARE_API_KEY")
|
|
138
|
+
|
|
139
|
+
if account is None:
|
|
140
|
+
account = os.getenv("CLOUDFLARE_ACCOUNT_ID")
|
|
141
|
+
|
|
142
|
+
if account is None:
|
|
143
|
+
raise ValueError(
|
|
144
|
+
"Cloudflare account ID is required. Set the CLOUDFLARE_ACCOUNT_ID "
|
|
145
|
+
"environment variable or pass the `account` parameter."
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if isinstance(seed, MISSING_TYPE):
|
|
149
|
+
seed = 1014 if is_testing() else None
|
|
150
|
+
|
|
151
|
+
# https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/
|
|
152
|
+
cloudflare_api = "https://api.cloudflare.com/client/v4/accounts"
|
|
153
|
+
base_url = f"{cloudflare_api}/{account}/ai/v1/"
|
|
154
|
+
|
|
155
|
+
return Chat(
|
|
156
|
+
provider=OpenAIProvider(
|
|
157
|
+
api_key=api_key,
|
|
158
|
+
model=model,
|
|
159
|
+
base_url=base_url,
|
|
160
|
+
seed=seed,
|
|
161
|
+
name="Cloudflare",
|
|
162
|
+
kwargs=kwargs,
|
|
163
|
+
),
|
|
164
|
+
system_prompt=system_prompt,
|
|
165
|
+
)
|
|
@@ -4,12 +4,12 @@ from typing import TYPE_CHECKING, Optional
|
|
|
4
4
|
|
|
5
5
|
from ._chat import Chat
|
|
6
6
|
from ._logging import log_model_default
|
|
7
|
-
from .
|
|
7
|
+
from ._provider_openai import OpenAIProvider
|
|
8
8
|
|
|
9
9
|
if TYPE_CHECKING:
|
|
10
10
|
from databricks.sdk import WorkspaceClient
|
|
11
11
|
|
|
12
|
-
from .
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
13
|
from .types.openai import SubmitInputArgs
|
|
14
14
|
|
|
15
15
|
|
|
@@ -127,3 +127,14 @@ class DatabricksProvider(OpenAIProvider):
|
|
|
127
127
|
api_key="no-token", # A placeholder to pass validations, this will not be used
|
|
128
128
|
http_client=httpx.AsyncClient(auth=client._client.auth),
|
|
129
129
|
)
|
|
130
|
+
|
|
131
|
+
# Databricks doesn't support stream_options
|
|
132
|
+
def _chat_perform_args(
|
|
133
|
+
self, stream, turns, tools, data_model=None, kwargs=None
|
|
134
|
+
) -> "SubmitInputArgs":
|
|
135
|
+
kwargs2 = super()._chat_perform_args(stream, turns, tools, data_model, kwargs)
|
|
136
|
+
|
|
137
|
+
if "stream_options" in kwargs2:
|
|
138
|
+
del kwargs2["stream_options"]
|
|
139
|
+
|
|
140
|
+
return kwargs2
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import TYPE_CHECKING, Optional, cast
|
|
5
|
+
|
|
6
|
+
from ._chat import Chat
|
|
7
|
+
from ._logging import log_model_default
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
|
+
from ._turn import Turn
|
|
10
|
+
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from openai.types.chat import ChatCompletion, ChatCompletionMessageParam
|
|
14
|
+
|
|
15
|
+
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def ChatDeepSeek(
|
|
19
|
+
*,
|
|
20
|
+
system_prompt: Optional[str] = None,
|
|
21
|
+
model: Optional[str] = None,
|
|
22
|
+
api_key: Optional[str] = None,
|
|
23
|
+
base_url: str = "https://api.deepseek.com",
|
|
24
|
+
seed: Optional[int] | MISSING_TYPE = MISSING,
|
|
25
|
+
kwargs: Optional["ChatClientArgs"] = None,
|
|
26
|
+
) -> Chat["SubmitInputArgs", ChatCompletion]:
|
|
27
|
+
"""
|
|
28
|
+
Chat with a model hosted on DeepSeek.
|
|
29
|
+
|
|
30
|
+
DeepSeek is a platform for AI inference with competitive pricing
|
|
31
|
+
and performance.
|
|
32
|
+
|
|
33
|
+
Prerequisites
|
|
34
|
+
-------------
|
|
35
|
+
|
|
36
|
+
::: {.callout-note}
|
|
37
|
+
## API key
|
|
38
|
+
|
|
39
|
+
Sign up at <https://platform.deepseek.com> to get an API key.
|
|
40
|
+
:::
|
|
41
|
+
|
|
42
|
+
Examples
|
|
43
|
+
--------
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
import os
|
|
47
|
+
from chatlas import ChatDeepSeek
|
|
48
|
+
|
|
49
|
+
chat = ChatDeepSeek(api_key=os.getenv("DEEPSEEK_API_KEY"))
|
|
50
|
+
chat.chat("What is the capital of France?")
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Known limitations
|
|
54
|
+
--------------
|
|
55
|
+
|
|
56
|
+
* Structured data extraction is not supported.
|
|
57
|
+
* Images are not supported.
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
system_prompt
|
|
62
|
+
A system prompt to set the behavior of the assistant.
|
|
63
|
+
model
|
|
64
|
+
The model to use for the chat. The default, None, will pick a reasonable
|
|
65
|
+
default, and warn you about it. We strongly recommend explicitly choosing
|
|
66
|
+
a model for all but the most casual use.
|
|
67
|
+
api_key
|
|
68
|
+
The API key to use for authentication. You generally should not supply
|
|
69
|
+
this directly, but instead set the `DEEPSEEK_API_KEY` environment variable.
|
|
70
|
+
base_url
|
|
71
|
+
The base URL to the endpoint; the default uses DeepSeek's API.
|
|
72
|
+
seed
|
|
73
|
+
Optional integer seed that DeepSeek uses to try and make output more
|
|
74
|
+
reproducible.
|
|
75
|
+
kwargs
|
|
76
|
+
Additional arguments to pass to the `openai.OpenAI()` client constructor.
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
Chat
|
|
81
|
+
A chat object that retains the state of the conversation.
|
|
82
|
+
|
|
83
|
+
Note
|
|
84
|
+
----
|
|
85
|
+
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
|
|
86
|
+
the defaults tweaked for DeepSeek.
|
|
87
|
+
|
|
88
|
+
Note
|
|
89
|
+
----
|
|
90
|
+
Pasting an API key into a chat constructor (e.g., `ChatDeepSeek(api_key="...")`)
|
|
91
|
+
is the simplest way to get started, and is fine for interactive use, but is
|
|
92
|
+
problematic for code that may be shared with others.
|
|
93
|
+
|
|
94
|
+
Instead, consider using environment variables or a configuration file to manage
|
|
95
|
+
your credentials. One popular way to manage credentials is to use a `.env` file
|
|
96
|
+
to store your credentials, and then use the `python-dotenv` package to load them
|
|
97
|
+
into your environment.
|
|
98
|
+
|
|
99
|
+
```shell
|
|
100
|
+
pip install python-dotenv
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
```shell
|
|
104
|
+
# .env
|
|
105
|
+
DEEPSEEK_API_KEY=...
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
from chatlas import ChatDeepSeek
|
|
110
|
+
from dotenv import load_dotenv
|
|
111
|
+
|
|
112
|
+
load_dotenv()
|
|
113
|
+
chat = ChatDeepSeek()
|
|
114
|
+
chat.console()
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
Another, more general, solution is to load your environment variables into the shell
|
|
118
|
+
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
|
|
119
|
+
|
|
120
|
+
```shell
|
|
121
|
+
export DEEPSEEK_API_KEY=...
|
|
122
|
+
```
|
|
123
|
+
"""
|
|
124
|
+
if model is None:
|
|
125
|
+
model = log_model_default("deepseek-chat")
|
|
126
|
+
|
|
127
|
+
if api_key is None:
|
|
128
|
+
api_key = os.getenv("DEEPSEEK_API_KEY")
|
|
129
|
+
|
|
130
|
+
if isinstance(seed, MISSING_TYPE):
|
|
131
|
+
seed = 1014 if is_testing() else None
|
|
132
|
+
|
|
133
|
+
return Chat(
|
|
134
|
+
provider=DeepSeekProvider(
|
|
135
|
+
api_key=api_key,
|
|
136
|
+
model=model,
|
|
137
|
+
base_url=base_url,
|
|
138
|
+
seed=seed,
|
|
139
|
+
name="DeepSeek",
|
|
140
|
+
kwargs=kwargs,
|
|
141
|
+
),
|
|
142
|
+
system_prompt=system_prompt,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class DeepSeekProvider(OpenAIProvider):
|
|
147
|
+
@staticmethod
|
|
148
|
+
def _as_message_param(turns: list[Turn]) -> list["ChatCompletionMessageParam"]:
|
|
149
|
+
from openai.types.chat import (
|
|
150
|
+
ChatCompletionAssistantMessageParam,
|
|
151
|
+
ChatCompletionUserMessageParam,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
params = OpenAIProvider._as_message_param(turns)
|
|
155
|
+
|
|
156
|
+
# Content must be a string
|
|
157
|
+
for i, param in enumerate(params):
|
|
158
|
+
if param["role"] in ["assistant", "user"]:
|
|
159
|
+
param = cast(
|
|
160
|
+
ChatCompletionAssistantMessageParam
|
|
161
|
+
| ChatCompletionUserMessageParam,
|
|
162
|
+
param,
|
|
163
|
+
)
|
|
164
|
+
contents = param.get("content", None)
|
|
165
|
+
if not isinstance(contents, list):
|
|
166
|
+
continue
|
|
167
|
+
params[i]["content"] = "".join(
|
|
168
|
+
content.get("text", "") for content in contents
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return params
|
|
@@ -5,11 +5,11 @@ from typing import TYPE_CHECKING, Optional
|
|
|
5
5
|
|
|
6
6
|
from ._chat import Chat
|
|
7
7
|
from ._logging import log_model_default
|
|
8
|
-
from .
|
|
8
|
+
from ._provider_openai import OpenAIProvider
|
|
9
9
|
from ._utils import MISSING, MISSING_TYPE, is_testing
|
|
10
10
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
|
-
from .
|
|
12
|
+
from ._provider_openai import ChatCompletion
|
|
13
13
|
from .types.openai import ChatClientArgs, SubmitInputArgs
|
|
14
14
|
|
|
15
15
|
|
|
@@ -426,9 +426,7 @@ class GoogleProvider(
|
|
|
426
426
|
)
|
|
427
427
|
)
|
|
428
428
|
elif isinstance(content, ContentToolResult):
|
|
429
|
-
if isinstance(
|
|
430
|
-
content, (ContentToolResultImage, ContentToolResultResource)
|
|
431
|
-
):
|
|
429
|
+
if isinstance(content, (ContentToolResultImage, ContentToolResultResource)):
|
|
432
430
|
raise NotImplementedError(
|
|
433
431
|
"Tool results with images or resources aren't supported by Google (Gemini). "
|
|
434
432
|
)
|
|
@@ -507,11 +505,13 @@ class GoogleProvider(
|
|
|
507
505
|
)
|
|
508
506
|
|
|
509
507
|
usage = message.get("usage_metadata")
|
|
510
|
-
tokens = (0, 0)
|
|
508
|
+
tokens = (0, 0, 0)
|
|
511
509
|
if usage:
|
|
510
|
+
cached = usage.get("cached_content_token_count") or 0
|
|
512
511
|
tokens = (
|
|
513
|
-
usage.get("prompt_token_count") or 0,
|
|
512
|
+
(usage.get("prompt_token_count") or 0) - cached,
|
|
514
513
|
usage.get("candidates_token_count") or 0,
|
|
514
|
+
usage.get("cached_content_token_count") or 0,
|
|
515
515
|
)
|
|
516
516
|
|
|
517
517
|
tokens_log(self, tokens)
|