inspect-ai 0.3.84__py3-none-any.whl → 0.3.85__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -225,16 +225,12 @@ def agent_with(
225
225
  name = name or info.name
226
226
  description = description or info.metadata.get(AGENT_DESCRIPTION, None)
227
227
 
228
- # if the name is null then raise
229
- if name is None:
230
- raise ValueError("You must provide a name to agent_with")
231
-
232
228
  # now set registry info
233
229
  set_registry_info(
234
230
  agent,
235
231
  RegistryInfo(
236
232
  type="agent",
237
- name=name,
233
+ name=name or "agent",
238
234
  metadata={AGENT_DESCRIPTION: description}
239
235
  if description is not None
240
236
  else {},
@@ -253,28 +253,6 @@ def none() -> type[ModelAPI]:
253
253
  return NoModel
254
254
 
255
255
 
256
- @modelapi("goodfire")
257
- def goodfire() -> type[ModelAPI]:
258
- """Get the Goodfire API provider."""
259
- FEATURE = "Goodfire API"
260
- PACKAGE = "goodfire"
261
- MIN_VERSION = "0.3.4" # Support for newer Llama models and OpenAI compatibility
262
-
263
- # verify we have the package
264
- try:
265
- import goodfire # noqa: F401
266
- except ImportError:
267
- raise pip_dependency_error(FEATURE, [PACKAGE])
268
-
269
- # verify version
270
- verify_required_version(FEATURE, PACKAGE, MIN_VERSION)
271
-
272
- # in the clear
273
- from .goodfire import GoodfireAPI
274
-
275
- return GoodfireAPI
276
-
277
-
278
256
  def validate_openai_client(feature: str) -> None:
279
257
  FEATURE = feature
280
258
  PACKAGE = "openai"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: inspect_ai
3
- Version: 0.3.84
3
+ Version: 0.3.85
4
4
  Summary: Framework for large language model evaluations
5
5
  Author: UK AI Security Institute
6
6
  License: MIT License
@@ -56,7 +56,6 @@ Requires-Dist: aioboto3; extra == "dev"
56
56
  Requires-Dist: azure-ai-inference; extra == "dev"
57
57
  Requires-Dist: google-cloud-aiplatform; extra == "dev"
58
58
  Requires-Dist: google-genai; extra == "dev"
59
- Requires-Dist: goodfire; extra == "dev"
60
59
  Requires-Dist: griffe; extra == "dev"
61
60
  Requires-Dist: groq; extra == "dev"
62
61
  Requires-Dist: ipython; extra == "dev"
@@ -450,7 +450,7 @@ inspect_ai/_view/www/src/workspace/tabs/SamplesTab.tsx,sha256=s6jt1-5_Hrgz3_ysT1
450
450
  inspect_ai/_view/www/src/workspace/tabs/grouping.ts,sha256=6lvFzReQKQ_43S20xN4kfBJN2F7Tfs2VWeSMIuHxUAI,6187
451
451
  inspect_ai/_view/www/src/workspace/tabs/types.ts,sha256=Wa1Y4tZwYO_QJr0Tg9-5xJFztmcMYCODSm6JvdzMpDw,471
452
452
  inspect_ai/agent/__init__.py,sha256=nzL9TPAARSJVZRPogWHxZ-qJriXBGmFUM9DV4NRi21o,749
453
- inspect_ai/agent/_agent.py,sha256=g0hw6sDTXg_4NRjs5Ohze404HzyyIyFNFDvlgGqL2Vw,7736
453
+ inspect_ai/agent/_agent.py,sha256=5MXMrY5bsBQ4AI5y5rVaIYywp7JxQzUnhp-KqBIMF7I,7622
454
454
  inspect_ai/agent/_as_solver.py,sha256=_6H0L9JidC6JjpMaBRBAjIrgzE8GKEoygJjOC_JRoLQ,2340
455
455
  inspect_ai/agent/_as_tool.py,sha256=vT5hrcKfkyP90i4Ieuy_dx4cYsFKOMdPs-6x12cuqMk,4449
456
456
  inspect_ai/agent/_filter.py,sha256=qnT0HbT4edpDi0MwXY3Q3It2pzNRkTRXZDOqfCwMY6M,1234
@@ -547,7 +547,6 @@ inspect_ai/model/_providers/anthropic.py,sha256=PYxV0D_bt0Icp2wEWb6GMCpDb-uBFKYy
547
547
  inspect_ai/model/_providers/azureai.py,sha256=uXED_qmeyW1XAGBosbG7PJNk833RIeokKX3l_8O9gYA,14341
548
548
  inspect_ai/model/_providers/bedrock.py,sha256=rh8BvSUPWiFMh0TQwMYTlucfFrDKswtLhzozulrz7wE,24004
549
549
  inspect_ai/model/_providers/cloudflare.py,sha256=mWqBqc0zzf29UWz34biq8CxSu99a95YjpH_6A4na52g,4617
550
- inspect_ai/model/_providers/goodfire.py,sha256=J0nxGbF8lXBmc5YHBJCsZdF03mWT5SuWMb21d9ho3FM,8799
551
550
  inspect_ai/model/_providers/google.py,sha256=gcg8pvYAV5gYc4NXC5mLqFyuU7KuhyNrzdXIY57sYl8,28207
552
551
  inspect_ai/model/_providers/grok.py,sha256=dS88ueXiD-kHAFr0jCoTpTGLGa2VsUlB_TFP8L_2lBM,995
553
552
  inspect_ai/model/_providers/groq.py,sha256=mcRKu33e-mO5l06PGV6SjsildQd0XCti6QNXwwFWL7I,11246
@@ -561,7 +560,7 @@ inspect_ai/model/_providers/openai.py,sha256=NFdMpnI2vlmpI8h_vWnt8y4X_XaydaL9gH5
561
560
  inspect_ai/model/_providers/openai_o1.py,sha256=k-Xm_Wzn1KHKL6Z1KTHg4CTTr8ybgiHvXkLiLdjP7Os,12926
562
561
  inspect_ai/model/_providers/openai_responses.py,sha256=YPXt8KQfIEiiTpvtoQECBoNQLDLbwBW_KhBfM8vEhJk,6324
563
562
  inspect_ai/model/_providers/openrouter.py,sha256=pDimDmm_4FzS4GZx0n9z8z717mQf3IQlgEy30huzpc4,4730
564
- inspect_ai/model/_providers/providers.py,sha256=0WSi_FOWxW71sZ4GJ-OgJqbPS4tMIaPQqEG2hnxqfqc,6378
563
+ inspect_ai/model/_providers/providers.py,sha256=Sd2D9OcWkukuBcl_-KDfdpxMaAShv1JZhL5KfAM87CE,5817
565
564
  inspect_ai/model/_providers/together.py,sha256=MoA3tyMKUnE0EekTqEIBBwvsaOp5c697kydLi1ZMYzE,9745
566
565
  inspect_ai/model/_providers/vertex.py,sha256=60W7kgoA83GtKdMeJgNU2IAw0N0wTscg4YCcMPu2bwo,17185
567
566
  inspect_ai/model/_providers/vllm.py,sha256=UYjCCXzw2hGJHVC3oPl-u2EI4iAm8ZncoIfYp1QJkbQ,14238
@@ -693,9 +692,9 @@ inspect_ai/util/_sandbox/docker/internal.py,sha256=c8X8TLrBPOvsfnq5TkMlb_bzTALyc
693
692
  inspect_ai/util/_sandbox/docker/prereqs.py,sha256=0j6_OauBBnVlpBleADcZavIAAQZy4WewVjbRn9c0stg,3355
694
693
  inspect_ai/util/_sandbox/docker/service.py,sha256=hhHIWH1VDFLwehdGd19aUBD_VKfDO3GCPxpw1HSwVQk,2437
695
694
  inspect_ai/util/_sandbox/docker/util.py,sha256=EeInihCNXgUWxaqZ4dNOJd719kXL2_jr63QCoXn68vA,3154
696
- inspect_ai-0.3.84.dist-info/licenses/LICENSE,sha256=xZPCr8gTiFIerrA_DRpLAbw-UUftnLFsHxKeW-NTtq8,1081
697
- inspect_ai-0.3.84.dist-info/METADATA,sha256=g-2UAMeNEN0cyQB6JUowoPVFebPlFpNsZuFiQwsxpVE,5005
698
- inspect_ai-0.3.84.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
699
- inspect_ai-0.3.84.dist-info/entry_points.txt,sha256=WGGLmzTzDWLzYfiyovSY6oEKuf-gqzSDNOb5V-hk3fM,54
700
- inspect_ai-0.3.84.dist-info/top_level.txt,sha256=Tp3za30CHXJEKLk8xLe9qGsW4pBzJpEIOMHOHNCXiVo,11
701
- inspect_ai-0.3.84.dist-info/RECORD,,
695
+ inspect_ai-0.3.85.dist-info/licenses/LICENSE,sha256=xZPCr8gTiFIerrA_DRpLAbw-UUftnLFsHxKeW-NTtq8,1081
696
+ inspect_ai-0.3.85.dist-info/METADATA,sha256=yNjYFfYdXBQnzqg-_uT3awl6p1mr9eOKlJrIOIPtCZs,4965
697
+ inspect_ai-0.3.85.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
698
+ inspect_ai-0.3.85.dist-info/entry_points.txt,sha256=WGGLmzTzDWLzYfiyovSY6oEKuf-gqzSDNOb5V-hk3fM,54
699
+ inspect_ai-0.3.85.dist-info/top_level.txt,sha256=Tp3za30CHXJEKLk8xLe9qGsW4pBzJpEIOMHOHNCXiVo,11
700
+ inspect_ai-0.3.85.dist-info/RECORD,,
@@ -1,253 +0,0 @@
1
- import os
2
- from typing import Any, List, Literal, get_args
3
-
4
- from goodfire import AsyncClient
5
- from goodfire.api.chat.interfaces import ChatMessage as GoodfireChatMessage
6
- from goodfire.api.exceptions import (
7
- InvalidRequestException,
8
- RateLimitException,
9
- ServerErrorException,
10
- )
11
- from goodfire.variants.variants import SUPPORTED_MODELS, Variant
12
- from typing_extensions import override
13
-
14
- from inspect_ai.tool._tool_choice import ToolChoice
15
- from inspect_ai.tool._tool_info import ToolInfo
16
-
17
- from .._chat_message import (
18
- ChatMessage,
19
- ChatMessageAssistant,
20
- ChatMessageSystem,
21
- ChatMessageTool,
22
- ChatMessageUser,
23
- )
24
- from .._generate_config import GenerateConfig
25
- from .._model import ModelAPI
26
- from .._model_call import ModelCall
27
- from .._model_output import (
28
- ChatCompletionChoice,
29
- ModelOutput,
30
- ModelUsage,
31
- )
32
- from .util import environment_prerequisite_error, model_base_url
33
-
34
- # Constants
35
- GOODFIRE_API_KEY = "GOODFIRE_API_KEY"
36
- DEFAULT_BASE_URL = "https://api.goodfire.ai"
37
- DEFAULT_MAX_TOKENS = 4096
38
- DEFAULT_TEMPERATURE = 1.0 # Standard sampling temperature (baseline)
39
- DEFAULT_TOP_P = 1.0 # No nucleus sampling truncation (baseline)
40
-
41
-
42
- class GoodfireAPI(ModelAPI):
43
- """Goodfire API provider.
44
-
45
- This provider implements the Goodfire API for LLM inference. It supports:
46
- - Chat completions with standard message formats
47
- - Basic parameter controls (temperature, top_p, etc.)
48
- - Usage statistics tracking
49
- - Stop reason handling
50
-
51
- Does not currently support:
52
- - Tool calls
53
- - Feature analysis
54
- - Streaming responses
55
-
56
- Known limitations:
57
- - Limited role support (system/user/assistant only)
58
- - Tool messages converted to user messages
59
- """
60
-
61
- client: AsyncClient
62
- variant: Variant
63
- model_args: dict[str, Any]
64
-
65
- def __init__(
66
- self,
67
- model_name: str,
68
- base_url: str | None = None,
69
- api_key: str | None = None,
70
- config: GenerateConfig = GenerateConfig(),
71
- **model_args: Any,
72
- ) -> None:
73
- """Initialize the Goodfire API provider.
74
-
75
- Args:
76
- model_name: Name of the model to use
77
- base_url: Optional custom API base URL
78
- api_key: Optional API key (will check env vars if not provided)
79
- config: Generation config options
80
- **model_args: Additional arguments passed to the API
81
- """
82
- super().__init__(
83
- model_name=model_name,
84
- base_url=base_url,
85
- api_key=api_key,
86
- api_key_vars=[GOODFIRE_API_KEY],
87
- config=config,
88
- )
89
-
90
- # resolve api_key
91
- if not self.api_key:
92
- self.api_key = os.environ.get(GOODFIRE_API_KEY)
93
- if not self.api_key:
94
- raise environment_prerequisite_error("Goodfire", GOODFIRE_API_KEY)
95
-
96
- # Validate model name against supported models
97
- supported_models = list(get_args(SUPPORTED_MODELS))
98
- if self.model_name not in supported_models:
99
- raise ValueError(
100
- f"Model {self.model_name} not supported. Supported models: {supported_models}"
101
- )
102
-
103
- # Initialize client with minimal configuration
104
- base_url_val = model_base_url(base_url, "GOODFIRE_BASE_URL")
105
- assert isinstance(base_url_val, str) or base_url_val is None
106
-
107
- # Store model args for use in generate
108
- self.model_args = model_args
109
-
110
- self.client = AsyncClient(
111
- api_key=self.api_key,
112
- base_url=base_url_val or DEFAULT_BASE_URL,
113
- )
114
-
115
- # Initialize variant directly with model name
116
- self.variant = Variant(self.model_name) # type: ignore
117
-
118
- def _to_goodfire_message(self, message: ChatMessage) -> GoodfireChatMessage:
119
- """Convert an Inspect message to a Goodfire message format.
120
-
121
- Args:
122
- message: The message to convert
123
-
124
- Returns:
125
- The converted message in Goodfire format
126
-
127
- Raises:
128
- ValueError: If the message type is unknown
129
- """
130
- role: Literal["system", "user", "assistant"] = "user"
131
- if isinstance(message, ChatMessageSystem):
132
- role = "system"
133
- elif isinstance(message, ChatMessageUser):
134
- role = "user"
135
- elif isinstance(message, ChatMessageAssistant):
136
- role = "assistant"
137
- elif isinstance(message, ChatMessageTool):
138
- role = "user" # Convert tool messages to user messages
139
- else:
140
- raise ValueError(f"Unknown message type: {type(message)}")
141
-
142
- content = str(message.content)
143
- if isinstance(message, ChatMessageTool):
144
- content = f"Tool {message.function}: {content}"
145
-
146
- return GoodfireChatMessage(role=role, content=content)
147
-
148
- def handle_error(self, ex: Exception) -> ModelOutput | Exception:
149
- """Handle only errors that need special treatment for retry logic or model limits."""
150
- # Handle token/context length errors
151
- if isinstance(ex, InvalidRequestException):
152
- error_msg = str(ex).lower()
153
- if "context length" in error_msg or "max tokens" in error_msg:
154
- return ModelOutput.from_content(
155
- model=self.model_name,
156
- content=str(ex),
157
- stop_reason="model_length",
158
- error=error_msg,
159
- )
160
-
161
- # Let all other errors propagate
162
- return ex
163
-
164
- @override
165
- def should_retry(self, ex: Exception) -> bool:
166
- """Check if exception is due to rate limiting."""
167
- return isinstance(ex, RateLimitException | ServerErrorException)
168
-
169
- @override
170
- def connection_key(self) -> str:
171
- """Return key for connection pooling."""
172
- return f"goodfire:{self.api_key}"
173
-
174
- @override
175
- def max_tokens(self) -> int | None:
176
- """Return maximum tokens supported by model."""
177
- return DEFAULT_MAX_TOKENS # Let Goodfire's Variant handle model-specific limits
178
-
179
- async def generate(
180
- self,
181
- input: List[ChatMessage],
182
- tools: List[ToolInfo],
183
- tool_choice: ToolChoice,
184
- config: GenerateConfig,
185
- *,
186
- cache: bool = True,
187
- ) -> tuple[ModelOutput | Exception, ModelCall]:
188
- """Generate output from the model."""
189
- # Convert messages and prepare request params
190
- messages = [self._to_goodfire_message(msg) for msg in input]
191
- # Build request parameters with type hints
192
- params: dict[str, Any] = {
193
- "model": self.variant.base_model, # Use base_model instead of stringifying the Variant
194
- "messages": messages,
195
- "max_completion_tokens": int(config.max_tokens)
196
- if config.max_tokens
197
- else DEFAULT_MAX_TOKENS,
198
- "stream": False,
199
- }
200
-
201
- # Add generation parameters from config if not in model_args
202
- if "temperature" not in self.model_args and config.temperature is not None:
203
- params["temperature"] = float(config.temperature)
204
- elif "temperature" not in self.model_args:
205
- params["temperature"] = DEFAULT_TEMPERATURE
206
-
207
- if "top_p" not in self.model_args and config.top_p is not None:
208
- params["top_p"] = float(config.top_p)
209
- elif "top_p" not in self.model_args:
210
- params["top_p"] = DEFAULT_TOP_P
211
-
212
- # Add any additional model args (highest priority)
213
- api_params = {
214
- k: v
215
- for k, v in self.model_args.items()
216
- if k not in ["api_key", "base_url", "model_args"]
217
- }
218
- params.update(api_params)
219
-
220
- try:
221
- # Use native async client
222
- response = await self.client.chat.completions.create(**params)
223
- response_dict = response.model_dump()
224
-
225
- output = ModelOutput(
226
- model=self.model_name,
227
- choices=[
228
- ChatCompletionChoice(
229
- message=ChatMessageAssistant(
230
- content=response_dict["choices"][0]["message"]["content"],
231
- model=self.model_name,
232
- ),
233
- stop_reason="stop",
234
- )
235
- ],
236
- usage=ModelUsage(**response_dict["usage"])
237
- if "usage" in response_dict
238
- else None,
239
- )
240
- model_call = ModelCall.create(request=params, response=response_dict)
241
- return (output, model_call)
242
- except Exception as ex:
243
- result = self.handle_error(ex)
244
- model_call = ModelCall.create(
245
- request=params,
246
- response={}, # Empty response for error case
247
- )
248
- return (result, model_call)
249
-
250
- @property
251
- def name(self) -> str:
252
- """Get provider name."""
253
- return "goodfire"