inspect-ai 0.3.72__py3-none-any.whl → 0.3.73__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. inspect_ai/_cli/eval.py +14 -3
  2. inspect_ai/_cli/sandbox.py +3 -3
  3. inspect_ai/_cli/score.py +6 -4
  4. inspect_ai/_cli/trace.py +53 -6
  5. inspect_ai/_display/core/config.py +1 -1
  6. inspect_ai/_display/core/display.py +2 -1
  7. inspect_ai/_display/core/footer.py +6 -6
  8. inspect_ai/_display/plain/display.py +11 -6
  9. inspect_ai/_display/rich/display.py +23 -13
  10. inspect_ai/_display/textual/app.py +10 -9
  11. inspect_ai/_display/textual/display.py +2 -2
  12. inspect_ai/_display/textual/widgets/footer.py +4 -0
  13. inspect_ai/_display/textual/widgets/samples.py +14 -5
  14. inspect_ai/_eval/context.py +1 -2
  15. inspect_ai/_eval/eval.py +54 -41
  16. inspect_ai/_eval/loader.py +9 -2
  17. inspect_ai/_eval/run.py +148 -81
  18. inspect_ai/_eval/score.py +13 -8
  19. inspect_ai/_eval/task/images.py +31 -21
  20. inspect_ai/_eval/task/run.py +62 -59
  21. inspect_ai/_eval/task/rundir.py +16 -9
  22. inspect_ai/_eval/task/sandbox.py +7 -8
  23. inspect_ai/_eval/task/util.py +7 -0
  24. inspect_ai/_util/_async.py +118 -10
  25. inspect_ai/_util/constants.py +0 -2
  26. inspect_ai/_util/file.py +15 -29
  27. inspect_ai/_util/future.py +37 -0
  28. inspect_ai/_util/http.py +3 -99
  29. inspect_ai/_util/httpx.py +60 -0
  30. inspect_ai/_util/interrupt.py +2 -2
  31. inspect_ai/_util/json.py +5 -52
  32. inspect_ai/_util/logger.py +30 -86
  33. inspect_ai/_util/retry.py +10 -61
  34. inspect_ai/_util/trace.py +2 -2
  35. inspect_ai/_view/server.py +86 -3
  36. inspect_ai/_view/www/dist/assets/index.js +25837 -13269
  37. inspect_ai/_view/www/log-schema.json +253 -186
  38. inspect_ai/_view/www/package.json +2 -2
  39. inspect_ai/_view/www/src/plan/PlanDetailView.tsx +8 -3
  40. inspect_ai/_view/www/src/samples/transcript/StepEventView.tsx +2 -3
  41. inspect_ai/_view/www/src/types/log.d.ts +122 -94
  42. inspect_ai/approval/_human/manager.py +6 -10
  43. inspect_ai/approval/_human/panel.py +2 -2
  44. inspect_ai/dataset/_sources/util.py +7 -6
  45. inspect_ai/log/__init__.py +4 -0
  46. inspect_ai/log/_file.py +35 -61
  47. inspect_ai/log/_log.py +18 -1
  48. inspect_ai/log/_recorders/eval.py +14 -23
  49. inspect_ai/log/_recorders/json.py +3 -18
  50. inspect_ai/log/_samples.py +27 -2
  51. inspect_ai/log/_transcript.py +8 -8
  52. inspect_ai/model/__init__.py +2 -1
  53. inspect_ai/model/_call_tools.py +60 -40
  54. inspect_ai/model/_chat_message.py +3 -2
  55. inspect_ai/model/_generate_config.py +25 -0
  56. inspect_ai/model/_model.py +74 -36
  57. inspect_ai/model/_openai.py +9 -1
  58. inspect_ai/model/_providers/anthropic.py +24 -26
  59. inspect_ai/model/_providers/azureai.py +11 -9
  60. inspect_ai/model/_providers/bedrock.py +33 -24
  61. inspect_ai/model/_providers/cloudflare.py +8 -9
  62. inspect_ai/model/_providers/goodfire.py +7 -3
  63. inspect_ai/model/_providers/google.py +47 -13
  64. inspect_ai/model/_providers/groq.py +15 -15
  65. inspect_ai/model/_providers/hf.py +24 -17
  66. inspect_ai/model/_providers/mistral.py +36 -20
  67. inspect_ai/model/_providers/openai.py +30 -25
  68. inspect_ai/model/_providers/openai_o1.py +1 -1
  69. inspect_ai/model/_providers/providers.py +1 -1
  70. inspect_ai/model/_providers/together.py +3 -4
  71. inspect_ai/model/_providers/util/__init__.py +2 -2
  72. inspect_ai/model/_providers/util/chatapi.py +6 -19
  73. inspect_ai/model/_providers/util/hooks.py +165 -0
  74. inspect_ai/model/_providers/vertex.py +20 -3
  75. inspect_ai/model/_providers/vllm.py +16 -19
  76. inspect_ai/scorer/_multi.py +5 -2
  77. inspect_ai/solver/_bridge/patch.py +31 -1
  78. inspect_ai/solver/_fork.py +5 -3
  79. inspect_ai/solver/_human_agent/agent.py +3 -2
  80. inspect_ai/tool/__init__.py +8 -2
  81. inspect_ai/tool/_tool_info.py +4 -90
  82. inspect_ai/tool/_tool_params.py +4 -34
  83. inspect_ai/tool/_tools/_web_search.py +30 -24
  84. inspect_ai/util/__init__.py +4 -0
  85. inspect_ai/util/_concurrency.py +5 -6
  86. inspect_ai/util/_display.py +6 -0
  87. inspect_ai/util/_json.py +170 -0
  88. inspect_ai/util/_sandbox/docker/cleanup.py +13 -9
  89. inspect_ai/util/_sandbox/docker/docker.py +5 -0
  90. inspect_ai/util/_sandbox/environment.py +56 -9
  91. inspect_ai/util/_sandbox/service.py +12 -5
  92. inspect_ai/util/_subprocess.py +94 -113
  93. inspect_ai/util/_subtask.py +2 -4
  94. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/METADATA +6 -2
  95. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/RECORD +99 -99
  96. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/WHEEL +1 -1
  97. inspect_ai/_util/timeouts.py +0 -160
  98. inspect_ai/_view/www/node_modules/flatted/python/flatted.py +0 -149
  99. inspect_ai/_view/www/node_modules/flatted/python/test.py +0 -63
  100. inspect_ai/model/_providers/util/tracker.py +0 -92
  101. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/LICENSE +0 -0
  102. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/entry_points.txt +0 -0
  103. {inspect_ai-0.3.72.dist-info → inspect_ai-0.3.73.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,7 @@ from typing import Any, Callable, Tuple, cast
13
13
  from typing_extensions import overload
14
14
 
15
15
  from inspect_ai._eval.task.util import task_file, task_run_dir
16
+ from inspect_ai._util._async import configured_async_backend
16
17
  from inspect_ai._util.decorator import parse_decorators
17
18
  from inspect_ai._util.error import PrerequisiteError
18
19
  from inspect_ai._util.logger import warn_once
@@ -306,11 +307,17 @@ def create_file_tasks(
306
307
  setattr(task, TASK_RUN_DIR_ATTR, run_dir)
307
308
  tasks.append(task)
308
309
 
309
- # warn about deprecated chdir attrib
310
+ # warn that chdir is deprecated
310
311
  if "chdir" in task.attribs:
312
+ if configured_async_backend() == "trio":
313
+ raise RuntimeError(
314
+ "The task 'chdir' attribute is not compatible with the trio async backend."
315
+ )
316
+
311
317
  warn_once(
312
318
  logger,
313
- "The 'chdir' task attribute is deprecated (tasks now always chdir)",
319
+ "The 'chdir' task attribute is deprecated and will be removed in a future release "
320
+ + "(you should write your tasks to not depend on their runtime working directory)",
314
321
  )
315
322
 
316
323
  return tasks
inspect_ai/_eval/run.py CHANGED
@@ -1,8 +1,15 @@
1
- import asyncio
1
+ import functools
2
2
  import logging
3
3
  import os
4
+ import sys
4
5
  from typing import Any, Awaitable, Callable, Set, cast
5
6
 
7
+ from inspect_ai._util.trace import trace_action
8
+
9
+ if sys.version_info < (3, 11):
10
+ from exceptiongroup import ExceptionGroup
11
+
12
+ import anyio
6
13
  from shortuuid import uuid
7
14
  from typing_extensions import Unpack
8
15
 
@@ -12,6 +19,7 @@ from inspect_ai._display.core.active import (
12
19
  init_task_screen,
13
20
  )
14
21
  from inspect_ai._display.core.display import TaskSpec
22
+ from inspect_ai._util._async import tg_collect
15
23
  from inspect_ai._util.error import PrerequisiteError, exception_message
16
24
  from inspect_ai._util.path import chdir
17
25
  from inspect_ai._util.registry import registry_unqualified_name
@@ -44,7 +52,7 @@ from .task.log import TaskLogger
44
52
  from .task.run import TaskRunOptions, task_run
45
53
  from .task.rundir import task_run_dir_switching
46
54
  from .task.sandbox import TaskSandboxEnvironment, resolve_sandbox_for_task
47
- from .task.util import slice_dataset, task_run_dir
55
+ from .task.util import slice_dataset, task_chdir, task_run_dir
48
56
 
49
57
  log = logging.getLogger(__name__)
50
58
 
@@ -67,6 +75,7 @@ async def eval_run(
67
75
  # see if we need to use run_dir switching
68
76
  run_dir = task_run_dir(tasks[0].task)
69
77
  multiple_run_dirs = any([task_run_dir(task.task) != run_dir for task in tasks])
78
+ tasks_chdir = any([task_chdir(task.task) is not None for task in tasks])
70
79
  has_sandbox = next((task.has_sandbox for task in tasks), None)
71
80
 
72
81
  # get cwd before switching to task dir
@@ -219,19 +228,25 @@ async def eval_run(
219
228
  # multiple mode is for running/displaying multiple
220
229
  # task definitions, which requires some smart scheduling
221
230
  # to ensure that we spread work among models
222
- if parallel > 1:
223
- if multiple_run_dirs:
224
- with task_run_dir_switching():
225
- return await run_multiple(task_run_options, parallel)
231
+ if tasks_chdir:
232
+ if parallel > 1:
233
+ if multiple_run_dirs:
234
+ with task_run_dir_switching():
235
+ return await run_multiple(task_run_options, parallel)
236
+ else:
237
+ with chdir(run_dir):
238
+ return await run_multiple(task_run_options, parallel)
239
+
240
+ # single mode is for a single task definitions (which
241
+ # could in turn be executed for multiple models)
226
242
  else:
227
243
  with chdir(run_dir):
228
- return await run_multiple(task_run_options, parallel)
229
-
230
- # single mode is for a single task definitions (which
231
- # could in turn be executed for multiple models)
244
+ return await run_single(task_run_options, debug_errors)
232
245
  else:
233
- with chdir(run_dir):
234
- return await run_single(task_run_options)
246
+ if parallel > 1:
247
+ return await run_multiple(task_run_options, parallel)
248
+ else:
249
+ return await run_single(task_run_options, debug_errors)
235
250
 
236
251
  finally:
237
252
  # shutdown sandbox environments
@@ -246,28 +261,37 @@ async def eval_run(
246
261
 
247
262
  # single mode -- run a single logical task (could consist of multiple
248
263
  # executable tasks if we are evaluating against multiple models)
249
- async def run_single(tasks: list[TaskRunOptions]) -> list[EvalLog]:
250
- # https://discuss.python.org/t/asyncio-cancel-a-cancellation-utility-as-a-coroutine-this-time-with-feeling/26304/3
251
-
264
+ async def run_single(tasks: list[TaskRunOptions], debug_errors: bool) -> list[EvalLog]:
252
265
  async with display().task_screen(task_specs(tasks), parallel=False) as screen:
266
+ # init ui
253
267
  init_task_screen(screen)
254
- asyncio_tasks = [asyncio.create_task(task_run(task)) for task in tasks]
255
268
 
269
+ results: list[tuple[int, EvalLog]] = []
256
270
  try:
257
- return await asyncio.gather(*asyncio_tasks)
258
- except asyncio.CancelledError:
259
- results: list[EvalLog] = []
260
- for task in asyncio_tasks:
261
- if task.done():
262
- results.append(task.result())
263
- else:
264
- task.cancel()
265
- await task
266
- results.append(task.result())
267
- return results
271
+ async with anyio.create_task_group() as tg:
272
+
273
+ async def run_task(index: int) -> None:
274
+ result = await task_run(tasks[index])
275
+ results.append((index, result))
276
+
277
+ for i in range(0, len(tasks)):
278
+ tg.start_soon(run_task, i)
279
+ # exceptions can escape when debug_errors is True and that's okay
280
+ except ExceptionGroup as ex:
281
+ if debug_errors:
282
+ raise ex.exceptions[0]
283
+ else:
284
+ raise
285
+ except anyio.get_cancelled_exc_class():
286
+ # child tasks have already each handled this and updated results
287
+ pass
268
288
  finally:
289
+ # clear ui
269
290
  clear_task_screen()
270
291
 
292
+ # sort results by original index and return just the values
293
+ return [r for _, r in sorted(results)]
294
+
271
295
 
272
296
  # multiple mode -- run multiple logical tasks (requires some smart
273
297
  # schedluing to ensure that we are spreading work among models)
@@ -280,82 +304,125 @@ async def run_multiple(tasks: list[TaskRunOptions], parallel: int) -> list[EvalL
280
304
 
281
305
  # setup pending tasks, queue, and results
282
306
  pending_tasks = tasks.copy()
283
- queue: asyncio.Queue[TaskRunOptions] = asyncio.Queue()
284
307
  results: list[EvalLog] = []
285
308
  tasks_completed = 0
286
309
  total_tasks = len(tasks)
287
310
 
311
+ # produce/consume tasks
312
+ send_channel, receive_channel = anyio.create_memory_object_stream[TaskRunOptions](
313
+ parallel * 2
314
+ )
315
+
316
+ # find a task that keeps as many different models as possible running concurrently
288
317
  async def enque_next_task() -> bool:
289
318
  if tasks_completed < total_tasks:
290
- # find a task that keeps as many different models as possible running concurrently
291
- model = min(model_counts.items(), key=lambda m: m[1])[0]
292
- next_task = next((t for t in pending_tasks if str(t.model) == model), None)
293
- if next_task:
294
- pending_tasks.remove(next_task)
295
- model_counts[str(next_task.model)] += 1
296
- await queue.put(next_task)
297
- return True
298
- else:
319
+ # filter out models that have no pending tasks
320
+ models_with_pending = {
321
+ model
322
+ for model in model_counts
323
+ if any(str(t.model) == model for t in pending_tasks)
324
+ }
325
+ if not models_with_pending:
299
326
  return False
327
+
328
+ # among those models, pick one with the least usage
329
+ model = min(models_with_pending, key=lambda m: model_counts[m])
330
+
331
+ # now we know there’s at least one pending task for this model so it’s safe to pick it
332
+ next_task = next(t for t in pending_tasks if str(t.model) == model)
333
+ pending_tasks.remove(next_task)
334
+ model_counts[str(next_task.model)] += 1
335
+ with trace_action(
336
+ log, "Enque Task", f"task: {next_task.task.name} ({next_task.model})"
337
+ ):
338
+ await send_channel.send(next_task)
339
+ return True
300
340
  else:
301
341
  return False
302
342
 
303
343
  async def worker() -> None:
304
- # worker runs untiil cancelled
305
- nonlocal tasks_completed
306
- while True:
307
- # remove the task from the queue and run it
308
- task_options = await queue.get()
309
- task = asyncio.create_task(task_run(task_options))
310
- try:
311
- await task
312
- result = task.result()
313
- results.append(result)
314
- except asyncio.CancelledError:
315
- task.cancel()
316
- await task
317
- result = task.result()
318
- results.append(result)
319
- except Exception as ex:
320
- # errors generally don't escape from tasks (the exception being if an error
321
- # occurs during the final write of the log)
322
- log.error(
323
- f"Task '{task_options.task.name}' encountered an error during finalisation: {ex}"
324
- )
325
-
326
- # tracking
327
- tasks_completed += 1
328
- model_counts[str(task_options.model)] -= 1
329
- queue.task_done()
344
+ try:
345
+ nonlocal tasks_completed
346
+ async for task_options in receive_channel:
347
+ result: EvalLog | None = None
348
+
349
+ # run the task
350
+ try:
351
+ with trace_action(
352
+ log,
353
+ "Run Task",
354
+ f"task: {task_options.task.name} ({task_options.model})",
355
+ ):
356
+ tg_results = await tg_collect(
357
+ [functools.partial(task_run, task_options)]
358
+ )
359
+ # check for empty results list (indicates cancellation)
360
+ if len(tg_results) == 0:
361
+ # task was cancelled, break out of the worker loop
362
+ result = None
363
+
364
+ else:
365
+ result = tg_results[0]
366
+ results.append(result)
367
+
368
+ except Exception as ex:
369
+ # errors generally don't escape from tasks (the exception being if an error
370
+ # occurs during the final write of the log)
371
+ log.error(
372
+ f"Task '{task_options.task.name}' encountered an error during finalisation: {ex}"
373
+ )
330
374
 
331
- if result.status != "cancelled":
332
- await enque_next_task()
333
- else:
334
- break
375
+ # tracking
376
+ tasks_completed += 1
377
+ model_counts[str(task_options.model)] -= 1
378
+
379
+ # if a task was cancelled we are done
380
+ if not result or result.status == "cancelled":
381
+ break
382
+
383
+ # check if there are more tasks to process
384
+ if tasks_completed < total_tasks:
385
+ await enque_next_task()
386
+ elif tasks_completed == total_tasks:
387
+ # all tasks are complete, close the stream
388
+ try:
389
+ await send_channel.aclose()
390
+ except anyio.ClosedResourceError:
391
+ # another worker might have already closed it
392
+ pass
393
+ except anyio.EndOfStream:
394
+ pass
335
395
 
336
396
  # with task display
337
397
  async with display().task_screen(task_specs(tasks), parallel=True) as screen:
338
398
  # init screen
339
399
  init_task_screen(screen)
340
400
 
341
- # start worker tasks
342
- workers = [asyncio.create_task(worker()) for _ in range(0, parallel)]
343
-
344
- # enque initial set of tasks
345
- for _ in range(0, parallel):
346
- await enque_next_task()
347
-
348
- # wait for all tasks to complete
401
+ # Use anyio task group instead of manual task management
349
402
  try:
350
- await queue.join()
351
- except asyncio.CancelledError:
403
+ async with anyio.create_task_group() as tg:
404
+ # start worker tasks
405
+ for _ in range(parallel):
406
+ tg.start_soon(worker)
407
+
408
+ # enqueue initial set of tasks
409
+ for _ in range(min(parallel, total_tasks)):
410
+ await enque_next_task()
411
+ except anyio.get_cancelled_exc_class():
352
412
  pass
353
413
  finally:
354
- clear_task_screen()
414
+ # Always ensure channels are closed
415
+ try:
416
+ await send_channel.aclose()
417
+ except anyio.ClosedResourceError:
418
+ pass
355
419
 
356
- # cancel worker tasks
357
- for w in workers:
358
- w.cancel()
420
+ try:
421
+ await receive_channel.aclose()
422
+ except anyio.ClosedResourceError:
423
+ pass
424
+
425
+ clear_task_screen()
359
426
 
360
427
  return results
361
428
 
inspect_ai/_eval/score.py CHANGED
@@ -1,10 +1,13 @@
1
- import asyncio
1
+ import functools
2
2
  from copy import deepcopy
3
3
  from pathlib import Path
4
4
  from typing import Any, Callable, Literal, cast
5
5
 
6
+ import anyio
7
+
6
8
  from inspect_ai._display import display
7
9
  from inspect_ai._eval.loader import scorer_from_spec
10
+ from inspect_ai._util._async import tg_collect
8
11
  from inspect_ai._util.platform import platform_init
9
12
  from inspect_ai._util.registry import registry_create, registry_unqualified_name
10
13
  from inspect_ai.log import (
@@ -53,7 +56,7 @@ def score(
53
56
  # resolve scorers into a list
54
57
  scorers = [scorers] if isinstance(scorers, Scorer) else scorers
55
58
 
56
- return asyncio.run(score_async(log, scorers, epochs_reducer, action))
59
+ return anyio.run(score_async, log, scorers, epochs_reducer, action)
57
60
 
58
61
 
59
62
  async def score_async(
@@ -105,13 +108,15 @@ async def score_async(
105
108
  def progress() -> None:
106
109
  p.update(1)
107
110
 
108
- tasks = [
109
- run_score_task(state, Target(sample.target), scorers, progress)
110
- for (sample, state) in zip(log.samples, states)
111
- ]
112
-
113
111
  # do scoring
114
- scores: list[dict[str, SampleScore]] = await asyncio.gather(*tasks)
112
+ scores: list[dict[str, SampleScore]] = await tg_collect(
113
+ [
114
+ functools.partial(
115
+ run_score_task, state, Target(sample.target), scorers, progress
116
+ )
117
+ for (sample, state) in zip(log.samples, states)
118
+ ]
119
+ )
115
120
 
116
121
  # write them back (gather ensures that they come back in the same order)
117
122
  for index, score in enumerate(scores):
@@ -1,16 +1,19 @@
1
- import asyncio
1
+ import functools
2
2
 
3
+ from inspect_ai._util._async import tg_collect
3
4
  from inspect_ai._util.constants import BASE_64_DATA_REMOVED
4
5
  from inspect_ai._util.content import Content, ContentAudio, ContentImage, ContentVideo
5
6
  from inspect_ai._util.images import file_as_data_uri
6
7
  from inspect_ai._util.url import is_data_uri
7
8
  from inspect_ai.dataset import Sample
8
- from inspect_ai.model import ChatMessage, ChatMessageUser
9
+ from inspect_ai.model import ChatMessage
9
10
  from inspect_ai.solver import TaskState
10
11
 
11
12
 
12
13
  async def states_with_base64_content(states: list[TaskState]) -> list[TaskState]:
13
- return await asyncio.gather(*[state_with_base64_content(state) for state in states])
14
+ return await tg_collect(
15
+ [functools.partial(state_with_base64_content, state) for state in states]
16
+ )
14
17
 
15
18
 
16
19
  async def state_with_base64_content(state: TaskState) -> TaskState:
@@ -24,8 +27,8 @@ def state_without_base64_content(state: TaskState) -> TaskState:
24
27
 
25
28
 
26
29
  async def samples_with_base64_content(samples: list[Sample]) -> list[Sample]:
27
- return await asyncio.gather(
28
- *[sample_with_base64_content(sample) for sample in samples]
30
+ return await tg_collect(
31
+ [functools.partial(sample_with_base64_content, sample) for sample in samples]
29
32
  )
30
33
 
31
34
 
@@ -50,8 +53,11 @@ def sample_without_base64_content(sample: Sample) -> Sample:
50
53
  async def messages_with_base64_content(
51
54
  messages: list[ChatMessage],
52
55
  ) -> list[ChatMessage]:
53
- return await asyncio.gather(
54
- *[message_with_base64_content(message) for message in messages]
56
+ return await tg_collect(
57
+ [
58
+ functools.partial(message_with_base64_content, message)
59
+ for message in messages
60
+ ]
55
61
  )
56
62
 
57
63
 
@@ -60,27 +66,31 @@ def messages_without_base64_content(messages: list[ChatMessage]) -> list[ChatMes
60
66
 
61
67
 
62
68
  async def message_with_base64_content(message: ChatMessage) -> ChatMessage:
63
- if isinstance(message, ChatMessageUser) and not isinstance(message.content, str):
64
- return ChatMessageUser(
65
- content=[
66
- await chat_content_with_base64_content(content)
67
- for content in message.content
68
- ],
69
- source=message.source,
69
+ if not isinstance(message.content, str):
70
+ return message.model_copy(
71
+ update=dict(
72
+ content=[
73
+ await chat_content_with_base64_content(content)
74
+ for content in message.content
75
+ ]
76
+ )
70
77
  )
78
+
71
79
  else:
72
80
  return message
73
81
 
74
82
 
75
83
  def message_without_base64_content(message: ChatMessage) -> ChatMessage:
76
- if isinstance(message, ChatMessageUser) and not isinstance(message.content, str):
77
- return ChatMessageUser(
78
- content=[
79
- chat_content_without_base64_content(content)
80
- for content in message.content
81
- ],
82
- source=message.source,
84
+ if not isinstance(message.content, str):
85
+ return message.model_copy(
86
+ update=dict(
87
+ content=[
88
+ chat_content_without_base64_content(content)
89
+ for content in message.content
90
+ ]
91
+ )
83
92
  )
93
+
84
94
  else:
85
95
  return message
86
96