chatlas 0.10.0__py3-none-any.whl → 0.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

chatlas/__init__.py CHANGED
@@ -1,7 +1,12 @@
1
1
  from . import types
2
2
  from ._auto import ChatAuto
3
3
  from ._chat import Chat
4
- from ._content import ContentToolRequest, ContentToolResult, ContentToolResultImage
4
+ from ._content import (
5
+ ContentToolRequest,
6
+ ContentToolResult,
7
+ ContentToolResultImage,
8
+ ContentToolResultResource,
9
+ )
5
10
  from ._content_image import content_image_file, content_image_plot, content_image_url
6
11
  from ._content_pdf import content_pdf_file, content_pdf_url
7
12
  from ._interpolate import interpolate, interpolate_file
@@ -59,6 +64,7 @@ __all__ = (
59
64
  "ContentToolRequest",
60
65
  "ContentToolResult",
61
66
  "ContentToolResultImage",
67
+ "ContentToolResultResource",
62
68
  "interpolate",
63
69
  "interpolate_file",
64
70
  "Provider",
@@ -68,3 +74,9 @@ __all__ = (
68
74
  "Turn",
69
75
  "types",
70
76
  )
77
+
78
+ # Rebuild content models to resolve forward references to ToolAnnotation
79
+ ContentToolRequest.model_rebuild()
80
+ ContentToolResult.model_rebuild()
81
+ ContentToolResultImage.model_rebuild()
82
+ ContentToolResultResource.model_rebuild()
chatlas/_chat.py CHANGED
@@ -9,6 +9,7 @@ import warnings
9
9
  from pathlib import Path
10
10
  from threading import Thread
11
11
  from typing import (
12
+ TYPE_CHECKING,
12
13
  Any,
13
14
  AsyncGenerator,
14
15
  AsyncIterator,
@@ -33,6 +34,7 @@ from ._content import (
33
34
  ContentText,
34
35
  ContentToolRequest,
35
36
  ContentToolResult,
37
+ ToolInfo,
36
38
  )
37
39
  from ._display import (
38
40
  EchoDisplayOptions,
@@ -43,13 +45,16 @@ from ._display import (
43
45
  )
44
46
  from ._logging import log_tool_error
45
47
  from ._mcp_manager import MCPSessionManager
46
- from ._provider import Provider, StandardModelParams, SubmitInputArgsT
48
+ from ._provider import ModelInfo, Provider, StandardModelParams, SubmitInputArgsT
47
49
  from ._tokens import compute_cost, get_token_pricing
48
50
  from ._tools import Tool, ToolRejectError
49
51
  from ._turn import Turn, user_turn
50
52
  from ._typing_extensions import TypedDict, TypeGuard
51
53
  from ._utils import MISSING, MISSING_TYPE, html_escape, wrap_async
52
54
 
55
+ if TYPE_CHECKING:
56
+ from ._content import ToolAnnotations
57
+
53
58
 
54
59
  class TokensDict(TypedDict):
55
60
  """
@@ -128,6 +133,78 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
128
133
  self._standard_model_params: StandardModelParams = {}
129
134
  self._submit_input_kwargs: Optional[SubmitInputArgsT] = None
130
135
 
136
+ def list_models(self) -> list[ModelInfo]:
137
+ """
138
+ List all models available for the provider.
139
+
140
+ This method returns detailed information about all models supported by the provider,
141
+ including model IDs, pricing information, creation dates, and other metadata. This is
142
+ useful for discovering available models and their characteristics without needing to
143
+ consult provider documentation.
144
+
145
+ Examples
146
+ --------
147
+ Get all available models:
148
+
149
+ ```python
150
+ from chatlas import ChatOpenAI
151
+
152
+ chat = ChatOpenAI()
153
+ models = chat.list_models()
154
+ print(f"Found {len(models)} models")
155
+ print(f"First model: {models[0]['id']}")
156
+ ```
157
+
158
+ View models in a table format:
159
+
160
+ ```python
161
+ import pandas as pd
162
+ from chatlas import ChatAnthropic
163
+
164
+ chat = ChatAnthropic()
165
+ df = pd.DataFrame(chat.list_models())
166
+ print(df[["id", "input", "output"]].head()) # Show pricing info
167
+ ```
168
+
169
+ Find models by criteria:
170
+
171
+ ```python
172
+ from chatlas import ChatGoogle
173
+
174
+ chat = ChatGoogle()
175
+ models = chat.list_models()
176
+
177
+ # Find cheapest input model
178
+ cheapest = min(models, key=lambda m: m.get("input", float("inf")))
179
+ print(f"Cheapest model: {cheapest['id']}")
180
+ ```
181
+
182
+ Returns
183
+ -------
184
+ list[ModelInfo]
185
+ A list of ModelInfo dictionaries containing model information. Each dictionary
186
+ contains:
187
+
188
+ - `id` (str): The model identifier to use with the Chat constructor
189
+ - `name` (str, optional): Human-readable model name
190
+ - `input` (float, optional): Cost per input token in USD per million tokens
191
+ - `output` (float, optional): Cost per output token in USD per million tokens
192
+ - `cached_input` (float, optional): Cost per cached input token in USD per million tokens
193
+ - `created_at` (date, optional): Date the model was created
194
+ - `owned_by` (str, optional): Organization that owns the model
195
+ - `provider` (str, optional): Model provider name
196
+ - `size` (int, optional): Model size in bytes
197
+ - `url` (str, optional): URL with more information about the model
198
+
199
+ The list is typically sorted by creation date (most recent first).
200
+
201
+ Note
202
+ ----
203
+ Not all providers support this method. Some providers may raise NotImplementedError
204
+ with information about where to find model listings online.
205
+ """
206
+ return self.provider.list_models()
207
+
131
208
  def get_turns(
132
209
  self,
133
210
  *,
@@ -1461,7 +1538,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1461
1538
  func: Callable[..., Any] | Callable[..., Awaitable[Any]],
1462
1539
  *,
1463
1540
  force: bool = False,
1541
+ name: Optional[str] = None,
1464
1542
  model: Optional[type[BaseModel]] = None,
1543
+ annotations: "Optional[ToolAnnotations]" = None,
1465
1544
  ):
1466
1545
  """
1467
1546
  Register a tool (function) with the chat.
@@ -1533,19 +1612,24 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1533
1612
  force
1534
1613
  If `True`, overwrite any existing tool with the same name. If `False`
1535
1614
  (the default), raise an error if a tool with the same name already exists.
1615
+ name
1616
+ The name of the tool. If not provided, the name will be inferred from the
1617
+ `func`'s name (or the `model`'s name, if provided).
1536
1618
  model
1537
1619
  A Pydantic model that describes the input parameters for the function.
1538
1620
  If not provided, the model will be inferred from the function's type hints.
1539
1621
  The primary reason why you might want to provide a model in
1540
1622
  Note that the name and docstring of the model takes precedence over the
1541
1623
  name and docstring of the function.
1624
+ annotations
1625
+ Additional properties that describe the tool and its behavior.
1542
1626
 
1543
1627
  Raises
1544
1628
  ------
1545
1629
  ValueError
1546
1630
  If a tool with the same name already exists and `force` is `False`.
1547
1631
  """
1548
- tool = Tool.from_func(func, model=model)
1632
+ tool = Tool.from_func(func, name=name, model=model, annotations=annotations)
1549
1633
  if tool.name in self._tools and not force:
1550
1634
  raise ValueError(
1551
1635
  f"Tool with name '{tool.name}' is already registered. "
@@ -1853,6 +1937,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1853
1937
  all_results: list[ContentToolResult] = []
1854
1938
  for x in turn.contents:
1855
1939
  if isinstance(x, ContentToolRequest):
1940
+ tool = self._tools.get(x.name)
1941
+ if tool is not None:
1942
+ x.tool = ToolInfo.from_tool(tool)
1856
1943
  if echo == "output":
1857
1944
  self._echo_content(f"\n\n{x}\n\n")
1858
1945
  if content == "all":
@@ -1913,6 +2000,9 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
1913
2000
  all_results: list[ContentToolResult] = []
1914
2001
  for x in turn.contents:
1915
2002
  if isinstance(x, ContentToolRequest):
2003
+ tool = self._tools.get(x.name)
2004
+ if tool is not None:
2005
+ x.tool = ToolInfo.from_tool(tool)
1916
2006
  if echo == "output":
1917
2007
  self._echo_content(f"\n\n{x}\n\n")
1918
2008
  if content == "all":
@@ -2070,8 +2160,8 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
2070
2160
  self._turns.extend([user_turn, turn])
2071
2161
 
2072
2162
  def _invoke_tool(self, request: ContentToolRequest):
2073
- tool_def = self._tools.get(request.name, None)
2074
- func = tool_def.func if tool_def is not None else None
2163
+ tool = self._tools.get(request.name)
2164
+ func = tool.func if tool is not None else None
2075
2165
 
2076
2166
  if func is None:
2077
2167
  yield self._handle_tool_error_result(
@@ -2118,21 +2208,20 @@ class Chat(Generic[SubmitInputArgsT, CompletionT]):
2118
2208
  yield self._handle_tool_error_result(request, e)
2119
2209
 
2120
2210
  async def _invoke_tool_async(self, request: ContentToolRequest):
2121
- tool_def = self._tools.get(request.name, None)
2122
- func = None
2123
- if tool_def:
2124
- if tool_def._is_async:
2125
- func = tool_def.func
2126
- else:
2127
- func = wrap_async(tool_def.func)
2211
+ tool = self._tools.get(request.name)
2128
2212
 
2129
- if func is None:
2213
+ if tool is None:
2130
2214
  yield self._handle_tool_error_result(
2131
2215
  request,
2132
2216
  error=RuntimeError("Unknown tool."),
2133
2217
  )
2134
2218
  return
2135
2219
 
2220
+ if tool._is_async:
2221
+ func = tool.func
2222
+ else:
2223
+ func = wrap_async(tool.func)
2224
+
2136
2225
  # First, invoke the request callbacks. If a ToolRejectError is raised,
2137
2226
  # treat it like a tool failure (i.e., gracefully handle it).
2138
2227
  result: ContentToolResult | None = None
chatlas/_content.py CHANGED
@@ -1,11 +1,64 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from pprint import pformat
4
- from typing import Any, Literal, Optional, Union
4
+ from typing import TYPE_CHECKING, Any, Literal, Optional, Union
5
5
 
6
6
  import orjson
7
7
  from pydantic import BaseModel, ConfigDict
8
8
 
9
+ from ._typing_extensions import NotRequired, TypedDict
10
+
11
+ if TYPE_CHECKING:
12
+ from ._tools import Tool
13
+
14
+
15
+ class ToolAnnotations(TypedDict, total=False):
16
+ """
17
+ Additional properties describing a Tool to clients.
18
+
19
+ NOTE: all properties in ToolAnnotations are **hints**.
20
+ They are not guaranteed to provide a faithful description of
21
+ tool behavior (including descriptive properties like `title`).
22
+
23
+ Clients should never make tool use decisions based on ToolAnnotations
24
+ received from untrusted servers.
25
+ """
26
+
27
+ title: NotRequired[str]
28
+ """A human-readable title for the tool."""
29
+
30
+ readOnlyHint: NotRequired[bool]
31
+ """
32
+ If true, the tool does not modify its environment.
33
+ Default: false
34
+ """
35
+
36
+ destructiveHint: NotRequired[bool]
37
+ """
38
+ If true, the tool may perform destructive updates to its environment.
39
+ If false, the tool performs only additive updates.
40
+ (This property is meaningful only when `readOnlyHint == false`)
41
+ Default: true
42
+ """
43
+
44
+ idempotentHint: NotRequired[bool]
45
+ """
46
+ If true, calling the tool repeatedly with the same arguments
47
+ will have no additional effect on the its environment.
48
+ (This property is meaningful only when `readOnlyHint == false`)
49
+ Default: false
50
+ """
51
+
52
+ openWorldHint: NotRequired[bool]
53
+ """
54
+ If true, this tool may interact with an "open world" of external
55
+ entities. If false, the tool's domain of interaction is closed.
56
+ For example, the world of a web search tool is open, whereas that
57
+ of a memory tool is not.
58
+ Default: true
59
+ """
60
+
61
+
9
62
  ImageContentTypes = Literal[
10
63
  "image/png",
11
64
  "image/jpeg",
@@ -16,6 +69,45 @@ ImageContentTypes = Literal[
16
69
  Allowable content types for images.
17
70
  """
18
71
 
72
+
73
+ class ToolInfo(BaseModel):
74
+ """
75
+ Serializable tool information
76
+
77
+ This contains only the serializable parts of a Tool that are needed
78
+ for ContentToolRequest to be JSON-serializable. This allows tool
79
+ metadata to be preserved without including the non-serializable
80
+ function reference.
81
+
82
+ Parameters
83
+ ----------
84
+ name
85
+ The name of the tool.
86
+ description
87
+ A description of what the tool does.
88
+ parameters
89
+ A dictionary describing the input parameters and their types.
90
+ annotations
91
+ Additional properties that describe the tool and its behavior.
92
+ """
93
+
94
+ name: str
95
+ description: str
96
+ parameters: dict[str, Any]
97
+ annotations: Optional[ToolAnnotations] = None
98
+
99
+ @classmethod
100
+ def from_tool(cls, tool: "Tool") -> "ToolInfo":
101
+ """Create a ToolInfo from a Tool instance."""
102
+ func_schema = tool.schema["function"]
103
+ return cls(
104
+ name=tool.name,
105
+ description=func_schema.get("description", ""),
106
+ parameters=func_schema.get("parameters", {}),
107
+ annotations=tool.annotations,
108
+ )
109
+
110
+
19
111
  ContentTypeEnum = Literal[
20
112
  "text",
21
113
  "image_remote",
@@ -171,11 +263,16 @@ class ContentToolRequest(Content):
171
263
  The name of the tool/function to call.
172
264
  arguments
173
265
  The arguments to pass to the tool/function.
266
+ tool
267
+ Serializable information about the tool. This is set internally by
268
+ chatlas's tool calling loop and contains only the metadata needed
269
+ for serialization (name, description, parameters, annotations).
174
270
  """
175
271
 
176
272
  id: str
177
273
  name: str
178
274
  arguments: object
275
+ tool: Optional[ToolInfo] = None
179
276
 
180
277
  content_type: ContentTypeEnum = "tool_request"
181
278
 
chatlas/_provider.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
+ from datetime import date
4
5
  from typing import (
5
6
  AsyncIterable,
6
7
  Generic,
@@ -16,7 +17,7 @@ from pydantic import BaseModel
16
17
  from ._content import Content
17
18
  from ._tools import Tool
18
19
  from ._turn import Turn
19
- from ._typing_extensions import TypedDict
20
+ from ._typing_extensions import NotRequired, TypedDict
20
21
 
21
22
  ChatCompletionT = TypeVar("ChatCompletionT")
22
23
  ChatCompletionChunkT = TypeVar("ChatCompletionChunkT")
@@ -35,6 +36,40 @@ submitting input to a model provider.
35
36
  """
36
37
 
37
38
 
39
+ class ModelInfo(TypedDict):
40
+ "Information returned from the `.list_models()` method"
41
+
42
+ id: str
43
+ "The model ID (this gets passed to the `model` parameter of the `Chat` constructor)"
44
+
45
+ cached_input: NotRequired[float | None]
46
+ "The cost per user token in USD per million tokens for cached input"
47
+
48
+ input: NotRequired[float | None]
49
+ "The cost per user token in USD per million tokens"
50
+
51
+ output: NotRequired[float | None]
52
+ "The cost per assistant token in USD per million tokens"
53
+
54
+ created_at: NotRequired[date]
55
+ "The date the model was created"
56
+
57
+ name: NotRequired[str]
58
+ "The model name"
59
+
60
+ owned_by: NotRequired[str]
61
+ "The owner of the model"
62
+
63
+ size: NotRequired[int]
64
+ "The size of the model in bytes"
65
+
66
+ provider: NotRequired[str]
67
+ "The provider of the model"
68
+
69
+ url: NotRequired[str]
70
+ "A URL to learn more about the model"
71
+
72
+
38
73
  class StandardModelParams(TypedDict, total=False):
39
74
  """
40
75
  A TypedDict representing the standard model parameters that can be set
@@ -102,6 +137,13 @@ class Provider(
102
137
  """
103
138
  return self._model
104
139
 
140
+ @abstractmethod
141
+ def list_models(self) -> list[ModelInfo]:
142
+ """
143
+ List all available models for the provider.
144
+ """
145
+ pass
146
+
105
147
  @overload
106
148
  @abstractmethod
107
149
  def chat_perform(
@@ -21,8 +21,8 @@ from ._content import (
21
21
  ContentToolResultResource,
22
22
  )
23
23
  from ._logging import log_model_default
24
- from ._provider import Provider, StandardModelParamNames, StandardModelParams
25
- from ._tokens import tokens_log
24
+ from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
25
+ from ._tokens import get_token_pricing, tokens_log
26
26
  from ._tools import Tool, basemodel_to_param_schema
27
27
  from ._turn import Turn, user_turn
28
28
  from ._utils import split_http_client_kwargs
@@ -209,6 +209,30 @@ class AnthropicProvider(
209
209
  self._client = Anthropic(**sync_kwargs) # type: ignore
210
210
  self._async_client = AsyncAnthropic(**async_kwargs)
211
211
 
212
+ def list_models(self):
213
+ models = self._client.models.list()
214
+
215
+ res: list[ModelInfo] = []
216
+ for m in models:
217
+ pricing = get_token_pricing(self.name, m.id) or {}
218
+ info: ModelInfo = {
219
+ "id": m.id,
220
+ "name": m.display_name,
221
+ "created_at": m.created_at.date(),
222
+ "input": pricing.get("input"),
223
+ "output": pricing.get("output"),
224
+ "cached_input": pricing.get("cached_input"),
225
+ }
226
+ res.append(info)
227
+
228
+ # Sort list by created_by field (more recent first)
229
+ res.sort(
230
+ key=lambda x: x.get("created_at", 0),
231
+ reverse=True,
232
+ )
233
+
234
+ return res
235
+
212
236
  @overload
213
237
  def chat_perform(
214
238
  self,
@@ -797,3 +821,26 @@ class AnthropicBedrockProvider(AnthropicProvider):
797
821
 
798
822
  self._client = AnthropicBedrock(**kwargs_full) # type: ignore
799
823
  self._async_client = AsyncAnthropicBedrock(**kwargs_full) # type: ignore
824
+
825
+ def list_models(self):
826
+ # boto3 should come via anthropic's bedrock extras
827
+ import boto3
828
+
829
+ bedrock = boto3.client("bedrock")
830
+ resp = bedrock.list_foundation_models()
831
+ models = resp["modelSummaries"]
832
+
833
+ res: list[ModelInfo] = []
834
+ for m in models:
835
+ pricing = get_token_pricing(self.name, m["modelId"]) or {}
836
+ info: ModelInfo = {
837
+ "id": m["modelId"],
838
+ "name": m["modelName"],
839
+ "provider": m["providerName"],
840
+ "input": pricing.get("input"),
841
+ "output": pricing.get("output"),
842
+ "cached_input": pricing.get("cached_input"),
843
+ }
844
+ res.append(info)
845
+
846
+ return res
@@ -153,7 +153,7 @@ def ChatCloudflare(
153
153
  base_url = f"{cloudflare_api}/{account}/ai/v1/"
154
154
 
155
155
  return Chat(
156
- provider=OpenAIProvider(
156
+ provider=CloudflareProvider(
157
157
  api_key=api_key,
158
158
  model=model,
159
159
  base_url=base_url,
@@ -163,3 +163,11 @@ def ChatCloudflare(
163
163
  ),
164
164
  system_prompt=system_prompt,
165
165
  )
166
+
167
+
168
+ class CloudflareProvider(OpenAIProvider):
169
+ def list_models(self):
170
+ raise NotImplementedError(
171
+ ".list_models() is not yet implemented for Cloudflare. "
172
+ "To view model availability online, see https://developers.cloudflare.com/workers-ai/models/"
173
+ )
@@ -128,6 +128,13 @@ class DatabricksProvider(OpenAIProvider):
128
128
  http_client=httpx.AsyncClient(auth=client._client.auth),
129
129
  )
130
130
 
131
+ def list_models(self):
132
+ raise NotImplementedError(
133
+ ".list_models() is not yet implemented for Databricks. "
134
+ "To view model availability online, see "
135
+ "https://docs.databricks.com/aws/en/machine-learning/model-serving/score-foundation-models#-foundation-model-types"
136
+ )
137
+
131
138
  # Databricks doesn't support stream_options
132
139
  def _chat_perform_args(
133
140
  self, stream, turns, tools, data_model=None, kwargs=None
@@ -3,9 +3,11 @@ from __future__ import annotations
3
3
  import os
4
4
  from typing import TYPE_CHECKING, Optional
5
5
 
6
+ import requests
7
+
6
8
  from ._chat import Chat
7
9
  from ._logging import log_model_default
8
- from ._provider_openai import OpenAIProvider
10
+ from ._provider_openai import ModelInfo, OpenAIProvider
9
11
  from ._utils import MISSING, MISSING_TYPE, is_testing
10
12
 
11
13
  if TYPE_CHECKING:
@@ -18,7 +20,7 @@ def ChatGithub(
18
20
  system_prompt: Optional[str] = None,
19
21
  model: Optional[str] = None,
20
22
  api_key: Optional[str] = None,
21
- base_url: str = "https://models.inference.ai.azure.com/",
23
+ base_url: str = "https://models.github.ai/inference/",
22
24
  seed: Optional[int] | MISSING_TYPE = MISSING,
23
25
  kwargs: Optional["ChatClientArgs"] = None,
24
26
  ) -> Chat["SubmitInputArgs", ChatCompletion]:
@@ -125,7 +127,7 @@ def ChatGithub(
125
127
  seed = 1014 if is_testing() else None
126
128
 
127
129
  return Chat(
128
- provider=OpenAIProvider(
130
+ provider=GitHubProvider(
129
131
  api_key=api_key,
130
132
  model=model,
131
133
  base_url=base_url,
@@ -135,3 +137,61 @@ def ChatGithub(
135
137
  ),
136
138
  system_prompt=system_prompt,
137
139
  )
140
+
141
+
142
+ class GitHubProvider(OpenAIProvider):
143
+ def __init__(self, base_url: str, **kwargs):
144
+ super().__init__(**kwargs)
145
+ self._base_url = base_url
146
+
147
+ def list_models(self) -> list[ModelInfo]:
148
+ # For some reason the OpenAI SDK API fails here? So perform request manually
149
+ # models = self._client.models.list()
150
+
151
+ base_url = self._base_url
152
+ if not base_url.endswith("/"):
153
+ base_url += "/"
154
+
155
+ if "azure" in base_url:
156
+ # i.e., https://models.inference.ai.azure.com
157
+ return list_models_gh_azure(base_url)
158
+ else:
159
+ # i.e., https://models.github.ai/inference/
160
+ return list_models_gh(base_url)
161
+
162
+
163
+ def list_models_gh(base_url: str = "https://models.github.ai/inference/"):
164
+ # replace /inference endpoint with /catalog
165
+ base_url = base_url.replace("/inference", "/catalog")
166
+ response = requests.get(f"{base_url}models")
167
+ response.raise_for_status()
168
+ models = response.json()
169
+
170
+ res: list[ModelInfo] = []
171
+ for m in models:
172
+ _id = m["id"].split("/")[-1]
173
+ info: ModelInfo = {
174
+ "id": _id,
175
+ "name": m["name"],
176
+ "provider": m["publisher"],
177
+ "url": m["html_url"],
178
+ }
179
+ res.append(info)
180
+
181
+ return res
182
+
183
+
184
+ def list_models_gh_azure(base_url: str = "https://models.inference.ai.azure.com"):
185
+ response = requests.get(f"{base_url}models")
186
+ response.raise_for_status()
187
+ models = response.json()
188
+
189
+ res: list[ModelInfo] = []
190
+ for m in models:
191
+ info: ModelInfo = {
192
+ "id": m["name"],
193
+ "provider": m["publisher"]
194
+ }
195
+ res.append(info)
196
+
197
+ return res
@@ -21,8 +21,8 @@ from ._content import (
21
21
  )
22
22
  from ._logging import log_model_default
23
23
  from ._merge import merge_dicts
24
- from ._provider import Provider, StandardModelParamNames, StandardModelParams
25
- from ._tokens import tokens_log
24
+ from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
25
+ from ._tokens import get_token_pricing, tokens_log
26
26
  from ._tools import Tool
27
27
  from ._turn import Turn, user_turn
28
28
 
@@ -180,6 +180,30 @@ class GoogleProvider(
180
180
 
181
181
  self._client = genai.Client(**kwargs_full)
182
182
 
183
+ def list_models(self):
184
+ models = self._client.models.list()
185
+
186
+ res: list[ModelInfo] = []
187
+ for m in models:
188
+ name = m.name or "[unknown]"
189
+ pricing = get_token_pricing(self.name, name) or {}
190
+ info: ModelInfo = {
191
+ "id": name,
192
+ "name": m.display_name or "[unknown]",
193
+ "input": pricing.get("input"),
194
+ "output": pricing.get("output"),
195
+ "cached_input": pricing.get("cached_input"),
196
+ }
197
+ res.append(info)
198
+
199
+ # Sort list by created_by field (more recent first)
200
+ res.sort(
201
+ key=lambda x: x.get("created", 0),
202
+ reverse=True,
203
+ )
204
+
205
+ return res
206
+
183
207
  @overload
184
208
  def chat_perform(
185
209
  self,