inspect-ai 0.3.75__py3-none-any.whl → 0.3.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. inspect_ai/_cli/eval.py +16 -0
  2. inspect_ai/_display/core/results.py +6 -1
  3. inspect_ai/_eval/eval.py +8 -1
  4. inspect_ai/_eval/evalset.py +6 -2
  5. inspect_ai/_eval/registry.py +3 -5
  6. inspect_ai/_eval/run.py +7 -2
  7. inspect_ai/_eval/task/run.py +4 -0
  8. inspect_ai/_util/content.py +3 -0
  9. inspect_ai/_util/logger.py +3 -0
  10. inspect_ai/_view/www/dist/assets/index.css +28 -16
  11. inspect_ai/_view/www/dist/assets/index.js +4811 -4609
  12. inspect_ai/_view/www/log-schema.json +79 -9
  13. inspect_ai/_view/www/src/samples/chat/tools/ToolCallView.tsx +22 -4
  14. inspect_ai/_view/www/src/samples/chat/tools/ToolInput.tsx +1 -1
  15. inspect_ai/_view/www/src/samples/descriptor/score/CategoricalScoreDescriptor.tsx +1 -1
  16. inspect_ai/_view/www/src/samples/descriptor/score/NumericScoreDescriptor.tsx +2 -2
  17. inspect_ai/_view/www/src/samples/sample-tools/SortFilter.tsx +1 -1
  18. inspect_ai/_view/www/src/samples/transcript/ModelEventView.module.css +2 -2
  19. inspect_ai/_view/www/src/types/log.d.ts +11 -5
  20. inspect_ai/log/_recorders/json.py +8 -0
  21. inspect_ai/log/_transcript.py +13 -4
  22. inspect_ai/model/_call_tools.py +13 -4
  23. inspect_ai/model/_chat_message.py +3 -0
  24. inspect_ai/model/_model.py +5 -1
  25. inspect_ai/model/_model_output.py +6 -1
  26. inspect_ai/model/_openai.py +78 -10
  27. inspect_ai/model/_openai_responses.py +277 -0
  28. inspect_ai/model/_providers/anthropic.py +134 -75
  29. inspect_ai/model/_providers/azureai.py +2 -2
  30. inspect_ai/model/_providers/mistral.py +29 -13
  31. inspect_ai/model/_providers/openai.py +64 -57
  32. inspect_ai/model/_providers/openai_responses.py +177 -0
  33. inspect_ai/model/_providers/openrouter.py +52 -2
  34. inspect_ai/model/_providers/providers.py +1 -1
  35. inspect_ai/model/_providers/vertex.py +5 -2
  36. inspect_ai/tool/__init__.py +6 -0
  37. inspect_ai/tool/_tool.py +23 -3
  38. inspect_ai/tool/_tool_call.py +5 -2
  39. inspect_ai/tool/_tool_support_helpers.py +200 -0
  40. inspect_ai/tool/_tools/_bash_session.py +119 -0
  41. inspect_ai/tool/_tools/_computer/_computer.py +1 -1
  42. inspect_ai/tool/_tools/_text_editor.py +121 -0
  43. inspect_ai/tool/_tools/_think.py +48 -0
  44. inspect_ai/tool/_tools/_web_browser/_back_compat.py +150 -0
  45. inspect_ai/tool/_tools/_web_browser/_web_browser.py +75 -130
  46. inspect_ai/tool/_tools/_web_search.py +1 -1
  47. inspect_ai/util/_json.py +28 -0
  48. inspect_ai/util/_sandbox/context.py +16 -7
  49. inspect_ai/util/_sandbox/docker/config.py +1 -1
  50. inspect_ai/util/_sandbox/docker/internal.py +3 -3
  51. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info}/METADATA +5 -2
  52. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info}/RECORD +56 -80
  53. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info}/WHEEL +1 -1
  54. inspect_ai/model/_image.py +0 -15
  55. inspect_ai/tool/_tools/_web_browser/_resources/.pylintrc +0 -8
  56. inspect_ai/tool/_tools/_web_browser/_resources/.vscode/launch.json +0 -24
  57. inspect_ai/tool/_tools/_web_browser/_resources/.vscode/settings.json +0 -25
  58. inspect_ai/tool/_tools/_web_browser/_resources/Dockerfile +0 -22
  59. inspect_ai/tool/_tools/_web_browser/_resources/README.md +0 -63
  60. inspect_ai/tool/_tools/_web_browser/_resources/accessibility_tree.py +0 -71
  61. inspect_ai/tool/_tools/_web_browser/_resources/accessibility_tree_node.py +0 -323
  62. inspect_ai/tool/_tools/_web_browser/_resources/cdp/__init__.py +0 -5
  63. inspect_ai/tool/_tools/_web_browser/_resources/cdp/a11y.py +0 -279
  64. inspect_ai/tool/_tools/_web_browser/_resources/cdp/dom.py +0 -9
  65. inspect_ai/tool/_tools/_web_browser/_resources/cdp/dom_snapshot.py +0 -293
  66. inspect_ai/tool/_tools/_web_browser/_resources/cdp/page.py +0 -94
  67. inspect_ai/tool/_tools/_web_browser/_resources/constants.py +0 -2
  68. inspect_ai/tool/_tools/_web_browser/_resources/images/usage_diagram.svg +0 -2
  69. inspect_ai/tool/_tools/_web_browser/_resources/mock_environment.py +0 -45
  70. inspect_ai/tool/_tools/_web_browser/_resources/playwright_browser.py +0 -50
  71. inspect_ai/tool/_tools/_web_browser/_resources/playwright_crawler.py +0 -48
  72. inspect_ai/tool/_tools/_web_browser/_resources/playwright_page_crawler.py +0 -280
  73. inspect_ai/tool/_tools/_web_browser/_resources/pyproject.toml +0 -65
  74. inspect_ai/tool/_tools/_web_browser/_resources/rectangle.py +0 -64
  75. inspect_ai/tool/_tools/_web_browser/_resources/rpc_client_helpers.py +0 -146
  76. inspect_ai/tool/_tools/_web_browser/_resources/scale_factor.py +0 -64
  77. inspect_ai/tool/_tools/_web_browser/_resources/test_accessibility_tree_node.py +0 -180
  78. inspect_ai/tool/_tools/_web_browser/_resources/test_playwright_crawler.py +0 -99
  79. inspect_ai/tool/_tools/_web_browser/_resources/test_rectangle.py +0 -15
  80. inspect_ai/tool/_tools/_web_browser/_resources/test_web_client.py +0 -44
  81. inspect_ai/tool/_tools/_web_browser/_resources/web_browser_rpc_types.py +0 -39
  82. inspect_ai/tool/_tools/_web_browser/_resources/web_client.py +0 -214
  83. inspect_ai/tool/_tools/_web_browser/_resources/web_client_new_session.py +0 -35
  84. inspect_ai/tool/_tools/_web_browser/_resources/web_server.py +0 -192
  85. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info}/entry_points.txt +0 -0
  86. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info/licenses}/LICENSE +0 -0
  87. {inspect_ai-0.3.75.dist-info → inspect_ai-0.3.77.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  import json
2
2
  import re
3
+ from copy import copy
3
4
  from typing import Literal
4
5
 
6
+ from openai import BadRequestError, OpenAIError
5
7
  from openai.types.chat import (
6
8
  ChatCompletion,
7
9
  ChatCompletionAssistantMessageParam,
@@ -26,7 +28,9 @@ from openai.types.chat.chat_completion import Choice, ChoiceLogprobs
26
28
  from openai.types.chat.chat_completion_message_tool_call import Function
27
29
  from openai.types.completion_usage import CompletionUsage
28
30
  from openai.types.shared_params.function_definition import FunctionDefinition
31
+ from pydantic import JsonValue
29
32
 
33
+ from inspect_ai._util.constants import BASE_64_DATA_REMOVED
30
34
  from inspect_ai._util.content import (
31
35
  Content,
32
36
  ContentAudio,
@@ -48,23 +52,39 @@ from ._chat_message import (
48
52
  ChatMessageTool,
49
53
  ChatMessageUser,
50
54
  )
51
- from ._model_output import ModelUsage, StopReason, as_stop_reason
55
+ from ._model_output import ModelOutput, ModelUsage, StopReason, as_stop_reason
56
+
57
+
58
+ class OpenAIResponseError(OpenAIError):
59
+ def __init__(self, code: str, message: str) -> None:
60
+ self.code = code
61
+ self.message = message
62
+
63
+ def __str__(self) -> str:
64
+ return f"{self.code}: {self.message}"
52
65
 
53
66
 
54
67
  def is_o_series(name: str) -> bool:
55
- return bool(re.match(r"(^|.*\/)o\d+", name))
68
+ if bool(re.match(r"^o\d+", name)):
69
+ return True
70
+ else:
71
+ return not is_gpt(name) and bool(re.search(r"o\d+", name))
72
+
73
+
74
+ def is_o1_pro(name: str) -> bool:
75
+ return "o1-pro" in name
56
76
 
57
77
 
58
78
  def is_o1_mini(name: str) -> bool:
59
- return name.startswith("o1-mini")
79
+ return "o1-mini" in name
60
80
 
61
81
 
62
82
  def is_o1_preview(name: str) -> bool:
63
- return name.startswith("o1-preview")
83
+ return "o1-preview" in name
64
84
 
65
85
 
66
86
  def is_gpt(name: str) -> bool:
67
- return name.startswith("gpt")
87
+ return "gpt" in name
68
88
 
69
89
 
70
90
  def openai_chat_tool_call(tool_call: ToolCall) -> ChatCompletionMessageToolCall:
@@ -80,12 +100,13 @@ def openai_chat_tool_call(tool_call: ToolCall) -> ChatCompletionMessageToolCall:
80
100
  def openai_chat_tool_call_param(
81
101
  tool_call: ToolCall,
82
102
  ) -> ChatCompletionMessageToolCallParam:
103
+ assert tool_call.type == "function", f"Unexpected tool call type {tool_call.type}"
83
104
  return ChatCompletionMessageToolCallParam(
84
105
  id=tool_call.id,
85
106
  function=dict(
86
107
  name=tool_call.function, arguments=json.dumps(tool_call.arguments)
87
108
  ),
88
- type=tool_call.type,
109
+ type="function", # Type narrowing couldn't figure it out
89
110
  )
90
111
 
91
112
 
@@ -108,7 +129,8 @@ async def openai_chat_completion_part(
108
129
  image_url=dict(url=image_url, detail=detail),
109
130
  )
110
131
  elif content.type == "audio":
111
- audio_data = await file_as_data_uri(content.audio)
132
+ audio_data_uri = await file_as_data_uri(content.audio)
133
+ audio_data = audio_data_uri.split("base64,")[1]
112
134
 
113
135
  return ChatCompletionContentPartInputAudioParam(
114
136
  type="input_audio", input_audio=dict(data=audio_data, format=content.format)
@@ -315,6 +337,7 @@ def chat_messages_from_openai(
315
337
  chat_messages.append(ChatMessageUser(content=content))
316
338
  elif message["role"] == "assistant":
317
339
  # resolve content
340
+ refusal: Literal[True] | None = None
318
341
  asst_content = message.get("content", None)
319
342
  if isinstance(asst_content, str):
320
343
  result = parse_content_with_reasoning(asst_content)
@@ -331,6 +354,8 @@ def chat_messages_from_openai(
331
354
  content = asst_content
332
355
  elif asst_content is None:
333
356
  content = message.get("refusal", None) or ""
357
+ if content:
358
+ refusal = True
334
359
  else:
335
360
  content = []
336
361
  for ac in asst_content:
@@ -343,7 +368,7 @@ def chat_messages_from_openai(
343
368
  )
344
369
  if reasoning is not None:
345
370
  if isinstance(content, str):
346
- content = [ContentText(text=content)]
371
+ content = [ContentText(text=content, refusal=refusal)]
347
372
  else:
348
373
  content.insert(0, ContentReasoning(reasoning=str(reasoning)))
349
374
 
@@ -432,7 +457,7 @@ def content_from_openai(
432
457
  )
433
458
  ]
434
459
  elif content["type"] == "refusal":
435
- return [ContentText(text=content["refusal"])]
460
+ return [ContentText(text=content["refusal"], refusal=True)]
436
461
  else:
437
462
  content_type = content["type"]
438
463
  raise ValueError(f"Unexpected content type '{content_type}' in message.")
@@ -450,8 +475,10 @@ def chat_message_assistant_from_openai(
450
475
  if reasoning is not None:
451
476
  content: str | list[Content] = [
452
477
  ContentReasoning(reasoning=str(reasoning)),
453
- ContentText(text=msg_content),
478
+ ContentText(text=msg_content, refusal=True if refusal else None),
454
479
  ]
480
+ elif refusal is not None:
481
+ content = [ContentText(text=msg_content, refusal=True)]
455
482
  else:
456
483
  content = msg_content
457
484
 
@@ -479,3 +506,44 @@ def chat_choices_from_openai(
479
506
  )
480
507
  for choice in choices
481
508
  ]
509
+
510
+
511
+ def openai_handle_bad_request(
512
+ model_name: str, e: BadRequestError
513
+ ) -> ModelOutput | Exception:
514
+ # extract message
515
+ if isinstance(e.body, dict) and "message" in e.body.keys():
516
+ content = str(e.body.get("message"))
517
+ else:
518
+ content = e.message
519
+
520
+ # narrow stop_reason
521
+ stop_reason: StopReason | None = None
522
+ if e.code == "context_length_exceeded":
523
+ stop_reason = "model_length"
524
+ elif (
525
+ e.code == "invalid_prompt" # seems to happen for o1/o3
526
+ or e.code == "content_policy_violation" # seems to happen for vision
527
+ or e.code == "content_filter" # seems to happen on azure
528
+ ):
529
+ stop_reason = "content_filter"
530
+
531
+ if stop_reason:
532
+ return ModelOutput.from_content(
533
+ model=model_name, content=content, stop_reason=stop_reason
534
+ )
535
+ else:
536
+ return e
537
+
538
+
539
+ def openai_media_filter(key: JsonValue | None, value: JsonValue) -> JsonValue:
540
+ # remove images from raw api call
541
+ if key == "image_url" and isinstance(value, dict) and "url" in value:
542
+ url = str(value.get("url"))
543
+ if url.startswith("data:"):
544
+ value = copy(value)
545
+ value.update(url=BASE_64_DATA_REMOVED)
546
+ elif key == "input_audio" and isinstance(value, dict) and "data" in value:
547
+ value = copy(value)
548
+ value.update(data=BASE_64_DATA_REMOVED)
549
+ return value
@@ -0,0 +1,277 @@
1
+ import json
2
+
3
+ from openai.types.responses import (
4
+ FunctionToolParam,
5
+ Response,
6
+ ResponseFunctionToolCall,
7
+ ResponseFunctionToolCallParam,
8
+ ResponseInputContentParam,
9
+ ResponseInputImageParam,
10
+ ResponseInputItemParam,
11
+ ResponseInputMessageContentListParam,
12
+ ResponseInputTextParam,
13
+ ResponseOutputMessage,
14
+ ResponseOutputMessageParam,
15
+ ResponseOutputRefusalParam,
16
+ ResponseOutputText,
17
+ ResponseOutputTextParam,
18
+ ResponseReasoningItem,
19
+ ResponseReasoningItemParam,
20
+ ToolChoiceFunctionParam,
21
+ ToolParam,
22
+ )
23
+ from openai.types.responses.response_create_params import (
24
+ ToolChoice as ResponsesToolChoice,
25
+ )
26
+ from openai.types.responses.response_input_item_param import FunctionCallOutput, Message
27
+ from openai.types.responses.response_reasoning_item_param import Summary
28
+
29
+ from inspect_ai._util.content import (
30
+ Content,
31
+ ContentImage,
32
+ ContentReasoning,
33
+ ContentText,
34
+ )
35
+ from inspect_ai._util.images import file_as_data_uri
36
+ from inspect_ai._util.url import is_http_url
37
+ from inspect_ai.model._call_tools import parse_tool_call
38
+ from inspect_ai.model._model_output import ChatCompletionChoice, StopReason
39
+ from inspect_ai.model._openai import is_o_series
40
+ from inspect_ai.tool._tool_call import ToolCall
41
+ from inspect_ai.tool._tool_choice import ToolChoice
42
+ from inspect_ai.tool._tool_info import ToolInfo
43
+
44
+ from ._chat_message import ChatMessage, ChatMessageAssistant
45
+
46
+
47
+ async def openai_responses_inputs(
48
+ messages: list[ChatMessage], model: str
49
+ ) -> list[ResponseInputItemParam]:
50
+ responses_inputs: list[ResponseInputItemParam] = []
51
+ for message in messages:
52
+ responses_inputs.extend(await openai_responses_input(message, model))
53
+ return responses_inputs
54
+
55
+
56
+ async def openai_responses_input(
57
+ message: ChatMessage, model: str
58
+ ) -> list[ResponseInputItemParam]:
59
+ if message.role == "system":
60
+ content = await openai_responses_content_list_param(message.content)
61
+ if is_o_series(model):
62
+ return [Message(type="message", role="developer", content=content)]
63
+ else:
64
+ return [Message(type="message", role="system", content=content)]
65
+ elif message.role == "user":
66
+ return [
67
+ Message(
68
+ type="message",
69
+ role="user",
70
+ content=await openai_responses_content_list_param(message.content),
71
+ )
72
+ ]
73
+ elif message.role == "assistant":
74
+ reasoning_content = openai_responses_reasponing_content_params(message.content)
75
+ if message.content:
76
+ text_content = [
77
+ ResponseOutputMessageParam(
78
+ type="message",
79
+ role="assistant",
80
+ id=str(message.id).replace("resp_", "msg_", 1),
81
+ content=openai_responses_text_content_params(message.content),
82
+ status="completed",
83
+ )
84
+ ]
85
+ else:
86
+ text_content = []
87
+ tools_content = openai_responses_tools_content_params(message.tool_calls)
88
+ return reasoning_content + text_content + tools_content
89
+ elif message.role == "tool":
90
+ # TODO: Return ouptut types for internal tools e.g. computer, web_search
91
+ if message.error is not None:
92
+ output = message.error.message
93
+ else:
94
+ output = message.text
95
+ return [
96
+ FunctionCallOutput(
97
+ type="function_call_output",
98
+ call_id=message.tool_call_id or str(message.function),
99
+ output=output,
100
+ )
101
+ ]
102
+ else:
103
+ raise ValueError(f"Unexpected message role '{message.role}'")
104
+
105
+
106
+ async def openai_responses_content_list_param(
107
+ content: str | list[Content],
108
+ ) -> ResponseInputMessageContentListParam:
109
+ if isinstance(content, str):
110
+ content = [ContentText(text=content)]
111
+ return [await openai_responses_content_param(c) for c in content]
112
+
113
+
114
+ async def openai_responses_content_param(content: Content) -> ResponseInputContentParam: # type: ignore[return]
115
+ if isinstance(content, ContentText):
116
+ return ResponseInputTextParam(type="input_text", text=content.text)
117
+ elif isinstance(content, ContentImage):
118
+ image_url = content.image
119
+ if not is_http_url(image_url):
120
+ image_url = await file_as_data_uri(image_url)
121
+
122
+ return ResponseInputImageParam(
123
+ type="input_image", detail=content.detail, image_url=image_url
124
+ )
125
+ else:
126
+ # TODO: support for files (PDFs) and audio and video whenever
127
+ # that is supported by the responses API (was not on initial release)
128
+
129
+ # TODO: note that when doing this we should ensure that the
130
+ # openai_media_filter is properly screening out base64 encoded
131
+ # audio and video (if it exists, looks like it may all be done
132
+ # w/ file uploads in the responses API)
133
+
134
+ raise ValueError("Unsupported content type.")
135
+
136
+
137
+ def openai_responses_reasponing_content_params(
138
+ content: str | list[Content],
139
+ ) -> list[ResponseInputItemParam]:
140
+ if isinstance(content, list):
141
+ return [
142
+ ResponseReasoningItemParam(
143
+ type="reasoning",
144
+ id=str(c.signature),
145
+ summary=[Summary(type="summary_text", text=c.reasoning)],
146
+ )
147
+ for c in content
148
+ if isinstance(c, ContentReasoning)
149
+ ]
150
+ else:
151
+ return []
152
+
153
+
154
+ def openai_responses_text_content_params(
155
+ content: str | list[Content],
156
+ ) -> list[ResponseOutputTextParam | ResponseOutputRefusalParam]:
157
+ if isinstance(content, str):
158
+ content = [ContentText(text=content)]
159
+
160
+ params: list[ResponseOutputTextParam | ResponseOutputRefusalParam] = []
161
+
162
+ for c in content:
163
+ if isinstance(c, ContentText):
164
+ if c.refusal:
165
+ params.append(
166
+ ResponseOutputRefusalParam(type="refusal", refusal=c.text)
167
+ )
168
+ else:
169
+ params.append(
170
+ ResponseOutputTextParam(
171
+ type="output_text", text=c.text, annotations=[]
172
+ )
173
+ )
174
+
175
+ return params
176
+
177
+
178
+ def openai_responses_tools_content_params(
179
+ tool_calls: list[ToolCall] | None,
180
+ ) -> list[ResponseInputItemParam]:
181
+ if tool_calls is not None:
182
+ return [
183
+ ResponseFunctionToolCallParam(
184
+ type="function_call",
185
+ call_id=call.id,
186
+ name=call.function,
187
+ arguments=json.dumps(call.arguments),
188
+ status="completed",
189
+ )
190
+ for call in tool_calls
191
+ ]
192
+ else:
193
+ return []
194
+
195
+
196
+ def openai_responses_tool_choice(tool_choice: ToolChoice) -> ResponsesToolChoice:
197
+ match tool_choice:
198
+ case "none" | "auto":
199
+ return tool_choice
200
+ case "any":
201
+ return "required"
202
+ # TODO: internal tools need to be converted to ToolChoiceTypesParam
203
+ case _:
204
+ return ToolChoiceFunctionParam(type="function", name=tool_choice.name)
205
+
206
+
207
+ def openai_responses_tools(tools: list[ToolInfo]) -> list[ToolParam]:
208
+ # TODO: return special types for internal tools
209
+ return [
210
+ FunctionToolParam(
211
+ type="function",
212
+ name=tool.name,
213
+ description=tool.description,
214
+ parameters=tool.parameters.model_dump(exclude_none=True),
215
+ strict=False, # default parameters don't work in strict mode
216
+ )
217
+ for tool in tools
218
+ ]
219
+
220
+
221
+ def openai_responses_chat_choices(
222
+ response: Response, tools: list[ToolInfo]
223
+ ) -> list[ChatCompletionChoice]:
224
+ # determine the StopReason
225
+ stop_reason: StopReason = "stop"
226
+ if response.incomplete_details is not None:
227
+ if response.incomplete_details.reason == "max_output_tokens":
228
+ stop_reason = "max_tokens"
229
+ elif response.incomplete_details.reason == "content_filter":
230
+ stop_reason = "content_filter"
231
+
232
+ # collect output and tool calls
233
+ message_content: list[Content] = []
234
+ tool_calls: list[ToolCall] = []
235
+ for output in response.output:
236
+ if isinstance(output, ResponseOutputMessage):
237
+ for content in output.content:
238
+ if isinstance(content, ResponseOutputText):
239
+ message_content.append(ContentText(text=content.text))
240
+ else:
241
+ message_content.append(
242
+ ContentText(text=content.refusal, refusal=True)
243
+ )
244
+ elif isinstance(output, ResponseReasoningItem):
245
+ reasoning = "\n".join([summary.text for summary in output.summary])
246
+ if reasoning:
247
+ message_content.append(
248
+ ContentReasoning(signature=output.id, reasoning=reasoning)
249
+ )
250
+ else:
251
+ stop_reason = "tool_calls"
252
+ if isinstance(output, ResponseFunctionToolCall):
253
+ tool_calls.append(
254
+ parse_tool_call(
255
+ output.call_id,
256
+ output.name,
257
+ output.arguments,
258
+ tools,
259
+ )
260
+ )
261
+ pass
262
+ else:
263
+ ## TODO: implement support for internal tools
264
+ raise ValueError(f"Unexpected output type: {output.__class__}")
265
+
266
+ # return choice
267
+ return [
268
+ ChatCompletionChoice(
269
+ message=ChatMessageAssistant(
270
+ id=response.id,
271
+ content=message_content,
272
+ tool_calls=tool_calls if len(tool_calls) > 0 else None,
273
+ source="generate",
274
+ ),
275
+ stop_reason=stop_reason,
276
+ )
277
+ ]