chatlas 0.9.2__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/__init__.py CHANGED
@@ -1,19 +1,30 @@
1
1
  from . import types
2
2
  from ._auto import ChatAuto
3
3
  from ._chat import Chat
4
- from ._content import ContentToolRequest, ContentToolResult, ContentToolResultImage
4
+ from ._content import (
5
+ ContentToolRequest,
6
+ ContentToolResult,
7
+ ContentToolResultImage,
8
+ ContentToolResultResource,
9
+ )
5
10
  from ._content_image import content_image_file, content_image_plot, content_image_url
6
11
  from ._content_pdf import content_pdf_file, content_pdf_url
7
12
  from ._interpolate import interpolate, interpolate_file
8
13
  from ._provider import Provider
9
14
  from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
15
+ from ._provider_cloudflare import ChatCloudflare
10
16
  from ._provider_databricks import ChatDatabricks
17
+ from ._provider_deepseek import ChatDeepSeek
11
18
  from ._provider_github import ChatGithub
12
19
  from ._provider_google import ChatGoogle, ChatVertex
13
20
  from ._provider_groq import ChatGroq
21
+ from ._provider_huggingface import ChatHuggingFace
22
+ from ._provider_mistral import ChatMistral
14
23
  from ._provider_ollama import ChatOllama
15
24
  from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
25
+ from ._provider_openrouter import ChatOpenRouter
16
26
  from ._provider_perplexity import ChatPerplexity
27
+ from ._provider_portkey import ChatPortkey
17
28
  from ._provider_snowflake import ChatSnowflake
18
29
  from ._tokens import token_usage
19
30
  from ._tools import Tool, ToolRejectError
@@ -28,14 +39,20 @@ __all__ = (
28
39
  "ChatAnthropic",
29
40
  "ChatAuto",
30
41
  "ChatBedrockAnthropic",
42
+ "ChatCloudflare",
31
43
  "ChatDatabricks",
44
+ "ChatDeepSeek",
32
45
  "ChatGithub",
33
46
  "ChatGoogle",
34
47
  "ChatGroq",
48
+ "ChatHuggingFace",
49
+ "ChatMistral",
35
50
  "ChatOllama",
36
51
  "ChatOpenAI",
52
+ "ChatOpenRouter",
37
53
  "ChatAzureOpenAI",
38
54
  "ChatPerplexity",
55
+ "ChatPortkey",
39
56
  "ChatSnowflake",
40
57
  "ChatVertex",
41
58
  "Chat",
@@ -47,6 +64,7 @@ __all__ = (
47
64
  "ContentToolRequest",
48
65
  "ContentToolResult",
49
66
  "ContentToolResultImage",
67
+ "ContentToolResultResource",
50
68
  "interpolate",
51
69
  "interpolate_file",
52
70
  "Provider",
@@ -56,3 +74,9 @@ __all__ = (
56
74
  "Turn",
57
75
  "types",
58
76
  )
77
+
78
+ # Rebuild content models to resolve forward references to ToolAnnotation
79
+ ContentToolRequest.model_rebuild()
80
+ ContentToolResult.model_rebuild()
81
+ ContentToolResultImage.model_rebuild()
82
+ ContentToolResultResource.model_rebuild()
chatlas/_chat.py CHANGED
@@ -9,6 +9,7 @@ import warnings
9
9
  from pathlib import Path
10
10
  from threading import Thread
11
11
  from typing import (
12
+ TYPE_CHECKING,
12
13
  Any,
13
14
  AsyncGenerator,
14
15
  AsyncIterator,
@@ -43,13 +44,16 @@ from ._display import (
43
44
  )
44
45
  from ._logging import log_tool_error
45
46
  from ._mcp_manager import MCPSessionManager
46
- from ._provider import Provider, StandardModelParams, SubmitInputArgsT
47
+ from ._provider import ModelInfo, Provider, StandardModelParams, SubmitInputArgsT
47
48
  from ._tokens import compute_cost, get_token_pricing
48
49
  from ._tools import Tool, ToolRejectError
49
50
  from ._turn import Turn, user_turn
50
51
  from ._typing_extensions import TypedDict, TypeGuard
51
52
  from ._utils import MISSING, MISSING_TYPE, html_escape, wrap_async
52
53
 
54
+ if TYPE_CHECKING:
55
+ from mcp.types import ToolAnnotations
56
+
53
57
 
54
58
  class TokensDict(TypedDict):
55
59
  """
@@ -128,6 +132,78 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
128
132
  self._standard_model_params: StandardModelParams = {}
129
133
  self._submit_input_kwargs: Optional[SubmitInputArgsT] = None
130
134
 
135
+ def list_models(self) -> list[ModelInfo]:
136
+ """
137
+ List all models available for the provider.
138
+
139
+ This method returns detailed information about all models supported by the provider,
140
+ including model IDs, pricing information, creation dates, and other metadata. This is
141
+ useful for discovering available models and their characteristics without needing to
142
+ consult provider documentation.
143
+
144
+ Examples
145
+ --------
146
+ Get all available models:
147
+
148
+ ```python
149
+ from chatlas import ChatOpenAI
150
+
151
+ chat = ChatOpenAI()
152
+ models = chat.list_models()
153
+ print(f"Found {len(models)} models")
154
+ print(f"First model: {models[0]['id']}")
155
+ ```
156
+
157
+ View models in a table format:
158
+
159
+ ```python
160
+ import pandas as pd
161
+ from chatlas import ChatAnthropic
162
+
163
+ chat = ChatAnthropic()
164
+ df = pd.DataFrame(chat.list_models())
165
+ print(df[["id", "input", "output"]].head()) # Show pricing info
166
+ ```
167
+
168
+ Find models by criteria:
169
+
170
+ ```python
171
+ from chatlas import ChatGoogle
172
+
173
+ chat = ChatGoogle()
174
+ models = chat.list_models()
175
+
176
+ # Find cheapest input model
177
+ cheapest = min(models, key=lambda m: m.get("input", float("inf")))
178
+ print(f"Cheapest model: {cheapest['id']}")
179
+ ```
180
+
181
+ Returns
182
+ -------
183
+ list[ModelInfo]
184
+ A list of ModelInfo dictionaries containing model information. Each dictionary
185
+ contains:
186
+
187
+ - `id` (str): The model identifier to use with the Chat constructor
188
+ - `name` (str, optional): Human-readable model name
189
+ - `input` (float, optional): Cost per input token in USD per million tokens
190
+ - `output` (float, optional): Cost per output token in USD per million tokens
191
+ - `cached_input` (float, optional): Cost per cached input token in USD per million tokens
192
+ - `created_at` (date, optional): Date the model was created
193
+ - `owned_by` (str, optional): Organization that owns the model
194
+ - `provider` (str, optional): Model provider name
195
+ - `size` (int, optional): Model size in bytes
196
+ - `url` (str, optional): URL with more information about the model
197
+
198
+ The list is typically sorted by creation date (most recent first).
199
+
200
+ Note
201
+ ----
202
+ Not all providers support this method. Some providers may raise NotImplementedError
203
+ with information about where to find model listings online.
204
+ """
205
+ return self.provider.list_models()
206
+
131
207
  def get_turns(
132
208
  self,
133
209
  *,
@@ -395,8 +471,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
395
471
  )
396
472
 
397
473
  input_token_price = price_token["input"] / 1e6
398
- output_token_price = price_token["output"] / 1e6
399
- cached_token_price = price_token["cached_input"] / 1e6
474
+ output_token_price = price_token.get("output", 0) / 1e6
475
+ cached_token_price = price_token.get("cached_input", 0) / 1e6
400
476
 
401
477
  if len(turns_tokens) == 0:
402
478
  return 0.0
@@ -1462,6 +1538,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1462
1538
  *,
1463
1539
  force: bool = False,
1464
1540
  model: Optional[type[BaseModel]] = None,
1541
+ annotations: "Optional[ToolAnnotations]" = None,
1465
1542
  ):
1466
1543
  """
1467
1544
  Register a tool (function) with the chat.
@@ -1539,13 +1616,16 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1539
1616
  The primary reason why you might want to provide a model in
1540
1617
  Note that the name and docstring of the model takes precedence over the
1541
1618
  name and docstring of the function.
1619
+ annotations
1620
+ Additional properties that describe the tool and its behavior.
1621
+ Should be a `from mcp.types import ToolAnnotations` instance.
1542
1622
 
1543
1623
  Raises
1544
1624
  ------
1545
1625
  ValueError
1546
1626
  If a tool with the same name already exists and `force` is `False`.
1547
1627
  """
1548
- tool = Tool.from_func(func, model=model)
1628
+ tool = Tool.from_func(func, model=model, annotations=annotations)
1549
1629
  if tool.name in self._tools and not force:
1550
1630
  raise ValueError(
1551
1631
  f"Tool with name '{tool.name}' is already registered. "
@@ -1853,6 +1933,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1853
1933
  all_results: list[ContentToolResult] = []
1854
1934
  for x in turn.contents:
1855
1935
  if isinstance(x, ContentToolRequest):
1936
+ x.tool = self._tools.get(x.name)
1856
1937
  if echo == "output":
1857
1938
  self._echo_content(f"\n\n{x}\n\n")
1858
1939
  if content == "all":
@@ -1913,6 +1994,7 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1913
1994
  all_results: list[ContentToolResult] = []
1914
1995
  for x in turn.contents:
1915
1996
  if isinstance(x, ContentToolRequest):
1997
+ x.tool = self._tools.get(x.name)
1916
1998
  if echo == "output":
1917
1999
  self._echo_content(f"\n\n{x}\n\n")
1918
2000
  if content == "all":
@@ -2070,8 +2152,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
2070
2152
  self._turns.extend([user_turn, turn])
2071
2153
 
2072
2154
  def _invoke_tool(self, request: ContentToolRequest):
2073
- tool_def = self._tools.get(request.name, None)
2074
- func = tool_def.func if tool_def is not None else None
2155
+ tool = request.tool
2156
+ func = tool.func if tool is not None else None
2075
2157
 
2076
2158
  if func is None:
2077
2159
  yield self._handle_tool_error_result(
@@ -2118,21 +2200,20 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
2118
2200
  yield self._handle_tool_error_result(request, e)
2119
2201
 
2120
2202
  async def _invoke_tool_async(self, request: ContentToolRequest):
2121
- tool_def = self._tools.get(request.name, None)
2122
- func = None
2123
- if tool_def:
2124
- if tool_def._is_async:
2125
- func = tool_def.func
2126
- else:
2127
- func = wrap_async(tool_def.func)
2203
+ tool = request.tool
2128
2204
 
2129
- if func is None:
2205
+ if tool is None:
2130
2206
  yield self._handle_tool_error_result(
2131
2207
  request,
2132
2208
  error=RuntimeError("Unknown tool."),
2133
2209
  )
2134
2210
  return
2135
2211
 
2212
+ if tool._is_async:
2213
+ func = tool.func
2214
+ else:
2215
+ func = wrap_async(tool.func)
2216
+
2136
2217
  # First, invoke the request callbacks. If a ToolRejectError is raised,
2137
2218
  # treat it like a tool failure (i.e., gracefully handle it).
2138
2219
  result: ContentToolResult | None = None
chatlas/_content.py CHANGED
@@ -1,11 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from pprint import pformat
4
- from typing import Any, Literal, Optional, Union
4
+ from typing import TYPE_CHECKING, Any, Literal, Optional, Union
5
5
 
6
6
  import orjson
7
7
  from pydantic import BaseModel, ConfigDict
8
8
 
9
+ if TYPE_CHECKING:
10
+ from ._tools import Tool
11
+
9
12
  ImageContentTypes = Literal[
10
13
  "image/png",
11
14
  "image/jpeg",
@@ -171,11 +174,15 @@ class ContentToolRequest(Content):
171
174
  The name of the tool/function to call.
172
175
  arguments
173
176
  The arguments to pass to the tool/function.
177
+ tool
178
+ The tool/function to be called. This is set internally by chatlas's tool
179
+ calling loop.
174
180
  """
175
181
 
176
182
  id: str
177
183
  name: str
178
184
  arguments: object
185
+ tool: Optional["Tool"] = None
179
186
 
180
187
  content_type: ContentTypeEnum = "tool_request"
181
188
 
chatlas/_provider.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
+ from datetime import date
4
5
  from typing import (
5
6
  AsyncIterable,
6
7
  Generic,
@@ -16,7 +17,7 @@ from pydantic import BaseModel
16
17
  from ._content import Content
17
18
  from ._tools import Tool
18
19
  from ._turn import Turn
19
- from ._typing_extensions import TypedDict
20
+ from ._typing_extensions import NotRequired, TypedDict
20
21
 
21
22
  ChatCompletionT = TypeVar("ChatCompletionT")
22
23
  ChatCompletionChunkT = TypeVar("ChatCompletionChunkT")
@@ -35,6 +36,40 @@ submitting input to a model provider.
35
36
  """
36
37
 
37
38
 
39
+ class ModelInfo(TypedDict):
40
+ "Information returned from the `.list_models()` method"
41
+
42
+ id: str
43
+ "The model ID (this gets passed to the `model` parameter of the `Chat` constructor)"
44
+
45
+ cached_input: NotRequired[float | None]
46
+ "The cost per user token in USD per million tokens for cached input"
47
+
48
+ input: NotRequired[float | None]
49
+ "The cost per user token in USD per million tokens"
50
+
51
+ output: NotRequired[float | None]
52
+ "The cost per assistant token in USD per million tokens"
53
+
54
+ created_at: NotRequired[date]
55
+ "The date the model was created"
56
+
57
+ name: NotRequired[str]
58
+ "The model name"
59
+
60
+ owned_by: NotRequired[str]
61
+ "The owner of the model"
62
+
63
+ size: NotRequired[int]
64
+ "The size of the model in bytes"
65
+
66
+ provider: NotRequired[str]
67
+ "The provider of the model"
68
+
69
+ url: NotRequired[str]
70
+ "A URL to learn more about the model"
71
+
72
+
38
73
  class StandardModelParams(TypedDict, total=False):
39
74
  """
40
75
  A TypedDict representing the standard model parameters that can be set
@@ -102,6 +137,13 @@ class Provider(
102
137
  """
103
138
  return self._model
104
139
 
140
+ @abstractmethod
141
+ def list_models(self) -> list[ModelInfo]:
142
+ """
143
+ List all available models for the provider.
144
+ """
145
+ pass
146
+
105
147
  @overload
106
148
  @abstractmethod
107
149
  def chat_perform(
@@ -21,8 +21,8 @@ from ._content import (
21
21
  ContentToolResultResource,
22
22
  )
23
23
  from ._logging import log_model_default
24
- from ._provider import Provider, StandardModelParamNames, StandardModelParams
25
- from ._tokens import tokens_log
24
+ from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
25
+ from ._tokens import get_token_pricing, tokens_log
26
26
  from ._tools import Tool, basemodel_to_param_schema
27
27
  from ._turn import Turn, user_turn
28
28
  from ._utils import split_http_client_kwargs
@@ -163,7 +163,7 @@ def ChatAnthropic(
163
163
  """
164
164
 
165
165
  if model is None:
166
- model = log_model_default("claude-3-7-sonnet-latest")
166
+ model = log_model_default("claude-sonnet-4-0")
167
167
 
168
168
  return Chat(
169
169
  provider=AnthropicProvider(
@@ -209,6 +209,30 @@ class AnthropicProvider(
209
209
  self._client = Anthropic(**sync_kwargs) # type: ignore
210
210
  self._async_client = AsyncAnthropic(**async_kwargs)
211
211
 
212
+ def list_models(self):
213
+ models = self._client.models.list()
214
+
215
+ res: list[ModelInfo] = []
216
+ for m in models:
217
+ pricing = get_token_pricing(self.name, m.id) or {}
218
+ info: ModelInfo = {
219
+ "id": m.id,
220
+ "name": m.display_name,
221
+ "created_at": m.created_at.date(),
222
+ "input": pricing.get("input"),
223
+ "output": pricing.get("output"),
224
+ "cached_input": pricing.get("cached_input"),
225
+ }
226
+ res.append(info)
227
+
228
+ # Sort list by created_by field (more recent first)
229
+ res.sort(
230
+ key=lambda x: x.get("created_at", 0),
231
+ reverse=True,
232
+ )
233
+
234
+ return res
235
+
212
236
  @overload
213
237
  def chat_perform(
214
238
  self,
@@ -742,8 +766,7 @@ def ChatBedrockAnthropic(
742
766
  """
743
767
 
744
768
  if model is None:
745
- # Default model from https://github.com/anthropics/anthropic-sdk-python?tab=readme-ov-file#aws-bedrock
746
- model = log_model_default("anthropic.claude-3-5-sonnet-20241022-v2:0")
769
+ model = log_model_default("us.anthropic.claude-sonnet-4-20250514-v1:0")
747
770
 
748
771
  return Chat(
749
772
  provider=AnthropicBedrockProvider(
@@ -798,3 +821,26 @@ class AnthropicBedrockProvider(AnthropicProvider):
798
821
 
799
822
  self._client = AnthropicBedrock(**kwargs_full) # type: ignore
800
823
  self._async_client = AsyncAnthropicBedrock(**kwargs_full) # type: ignore
824
+
825
+ def list_models(self):
826
+ # boto3 should come via anthropic's bedrock extras
827
+ import boto3
828
+
829
+ bedrock = boto3.client("bedrock")
830
+ resp = bedrock.list_foundation_models()
831
+ models = resp["modelSummaries"]
832
+
833
+ res: list[ModelInfo] = []
834
+ for m in models:
835
+ pricing = get_token_pricing(self.name, m["modelId"]) or {}
836
+ info: ModelInfo = {
837
+ "id": m["modelId"],
838
+ "name": m["modelName"],
839
+ "provider": m["providerName"],
840
+ "input": pricing.get("input"),
841
+ "output": pricing.get("output"),
842
+ "cached_input": pricing.get("cached_input"),
843
+ }
844
+ res.append(info)
845
+
846
+ return res
@@ -0,0 +1,173 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import TYPE_CHECKING, Optional
5
+
6
+ from ._chat import Chat
7
+ from ._logging import log_model_default
8
+ from ._provider_openai import OpenAIProvider
9
+ from ._utils import MISSING, MISSING_TYPE, is_testing
10
+
11
+ if TYPE_CHECKING:
12
+ from ._provider_openai import ChatCompletion
13
+ from .types.openai import ChatClientArgs, SubmitInputArgs
14
+
15
+
16
+ def ChatCloudflare(
17
+ *,
18
+ account: Optional[str] = None,
19
+ system_prompt: Optional[str] = None,
20
+ model: Optional[str] = None,
21
+ api_key: Optional[str] = None,
22
+ seed: Optional[int] | MISSING_TYPE = MISSING,
23
+ kwargs: Optional["ChatClientArgs"] = None,
24
+ ) -> Chat["SubmitInputArgs", ChatCompletion]:
25
+ """
26
+ Chat with a model hosted on Cloudflare Workers AI.
27
+
28
+ Cloudflare Workers AI hosts a variety of open-source AI models.
29
+
30
+ Prerequisites
31
+ -------------
32
+
33
+ ::: {.callout-note}
34
+ ## API credentials
35
+
36
+ To use the Cloudflare API, you must have an Account ID and an Access Token,
37
+ which you can obtain by following the instructions at
38
+ <https://developers.cloudflare.com/workers-ai/get-started/rest-api/>.
39
+ :::
40
+
41
+ Examples
42
+ --------
43
+
44
+ ```python
45
+ import os
46
+ from chatlas import ChatCloudflare
47
+
48
+ chat = ChatCloudflare(
49
+ api_key=os.getenv("CLOUDFLARE_API_KEY"),
50
+ account=os.getenv("CLOUDFLARE_ACCOUNT_ID"),
51
+ )
52
+ chat.chat("What is the capital of France?")
53
+ ```
54
+
55
+ Known limitations
56
+ -----------------
57
+
58
+ - Tool calling does not appear to work.
59
+ - Images don't appear to work.
60
+
61
+ Parameters
62
+ ----------
63
+ account
64
+ The Cloudflare account ID. You generally should not supply this directly,
65
+ but instead set the `CLOUDFLARE_ACCOUNT_ID` environment variable.
66
+ system_prompt
67
+ A system prompt to set the behavior of the assistant.
68
+ model
69
+ The model to use for the chat. The default, None, will pick a reasonable
70
+ default, and warn you about it. We strongly recommend explicitly choosing
71
+ a model for all but the most casual use.
72
+ api_key
73
+ The API key to use for authentication. You generally should not supply
74
+ this directly, but instead set the `CLOUDFLARE_API_KEY` environment
75
+ variable.
76
+ seed
77
+ Optional integer seed that ChatGPT uses to try and make output more
78
+ reproducible.
79
+ kwargs
80
+ Additional arguments to pass to the `openai.OpenAI()` client constructor.
81
+
82
+ Returns
83
+ -------
84
+ Chat
85
+ A chat object that retains the state of the conversation.
86
+
87
+ Note
88
+ ----
89
+ This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
90
+ the defaults tweaked for Cloudflare.
91
+
92
+ Note
93
+ ----
94
+ Pasting credentials into a chat constructor (e.g.,
95
+ `ChatCloudflare(api_key="...", account="...")`) is the simplest way to get
96
+ started, and is fine for interactive use, but is problematic for code that
97
+ may be shared with others.
98
+
99
+ Instead, consider using environment variables or a configuration file to manage
100
+ your credentials. One popular way to manage credentials is to use a `.env` file
101
+ to store your credentials, and then use the `python-dotenv` package to load them
102
+ into your environment.
103
+
104
+ ```shell
105
+ pip install python-dotenv
106
+ ```
107
+
108
+ ```shell
109
+ # .env
110
+ CLOUDFLARE_API_KEY=...
111
+ CLOUDFLARE_ACCOUNT_ID=...
112
+ ```
113
+
114
+ ```python
115
+ from chatlas import ChatCloudflare
116
+ from dotenv import load_dotenv
117
+
118
+ load_dotenv()
119
+ chat = ChatCloudflare()
120
+ chat.console()
121
+ ```
122
+
123
+ Another, more general, solution is to load your environment variables into the shell
124
+ before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
125
+
126
+ ```shell
127
+ export CLOUDFLARE_API_KEY=...
128
+ export CLOUDFLARE_ACCOUNT_ID=...
129
+ ```
130
+ """
131
+ # List at https://developers.cloudflare.com/workers-ai/models/
132
+ # `@cf` appears to be part of the model name
133
+ if model is None:
134
+ model = log_model_default("@cf/meta/llama-3.3-70b-instruct-fp8-fast")
135
+
136
+ if api_key is None:
137
+ api_key = os.getenv("CLOUDFLARE_API_KEY")
138
+
139
+ if account is None:
140
+ account = os.getenv("CLOUDFLARE_ACCOUNT_ID")
141
+
142
+ if account is None:
143
+ raise ValueError(
144
+ "Cloudflare account ID is required. Set the CLOUDFLARE_ACCOUNT_ID "
145
+ "environment variable or pass the `account` parameter."
146
+ )
147
+
148
+ if isinstance(seed, MISSING_TYPE):
149
+ seed = 1014 if is_testing() else None
150
+
151
+ # https://developers.cloudflare.com/workers-ai/configuration/open-ai-compatibility/
152
+ cloudflare_api = "https://api.cloudflare.com/client/v4/accounts"
153
+ base_url = f"{cloudflare_api}/{account}/ai/v1/"
154
+
155
+ return Chat(
156
+ provider=CloudflareProvider(
157
+ api_key=api_key,
158
+ model=model,
159
+ base_url=base_url,
160
+ seed=seed,
161
+ name="Cloudflare",
162
+ kwargs=kwargs,
163
+ ),
164
+ system_prompt=system_prompt,
165
+ )
166
+
167
+
168
+ class CloudflareProvider(OpenAIProvider):
169
+ def list_models(self):
170
+ raise NotImplementedError(
171
+ ".list_models() is not yet implemented for Cloudflare. "
172
+ "To view model availability online, see https://developers.cloudflare.com/workers-ai/models/"
173
+ )
@@ -127,3 +127,21 @@ class DatabricksProvider(OpenAIProvider):
127
127
  api_key="no-token", # A placeholder to pass validations, this will not be used
128
128
  http_client=httpx.AsyncClient(auth=client._client.auth),
129
129
  )
130
+
131
+ def list_models(self):
132
+ raise NotImplementedError(
133
+ ".list_models() is not yet implemented for Databricks. "
134
+ "To view model availability online, see "
135
+ "https://docs.databricks.com/aws/en/machine-learning/model-serving/score-foundation-models#-foundation-model-types"
136
+ )
137
+
138
+ # Databricks doesn't support stream_options
139
+ def _chat_perform_args(
140
+ self, stream, turns, tools, data_model=None, kwargs=None
141
+ ) -> "SubmitInputArgs":
142
+ kwargs2 = super()._chat_perform_args(stream, turns, tools, data_model, kwargs)
143
+
144
+ if "stream_options" in kwargs2:
145
+ del kwargs2["stream_options"]
146
+
147
+ return kwargs2