pydantic-ai-slim 0.3.3__tar.gz → 0.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

Files changed (81) hide show
  1. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/PKG-INFO +4 -4
  2. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_agent_graph.py +7 -1
  3. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/agent.py +1 -0
  4. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/messages.py +20 -11
  5. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/anthropic.py +7 -9
  6. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/instrumented.py +5 -1
  7. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/tools.py +3 -2
  8. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/.gitignore +0 -0
  9. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/LICENSE +0 -0
  10. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/README.md +0 -0
  11. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/__init__.py +0 -0
  12. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/__main__.py +0 -0
  13. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_a2a.py +0 -0
  14. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_cli.py +0 -0
  15. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_function_schema.py +0 -0
  16. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_griffe.py +0 -0
  17. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_mcp.py +0 -0
  18. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_output.py +0 -0
  19. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_parts_manager.py +0 -0
  20. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_run_context.py +0 -0
  21. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_system_prompt.py +0 -0
  22. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_thinking_part.py +0 -0
  23. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/_utils.py +0 -0
  24. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/__init__.py +0 -0
  25. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  26. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/common_tools/tavily.py +0 -0
  27. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/direct.py +0 -0
  28. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/exceptions.py +0 -0
  29. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/ext/__init__.py +0 -0
  30. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/ext/langchain.py +0 -0
  31. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/format_as_xml.py +0 -0
  32. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/format_prompt.py +0 -0
  33. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/mcp.py +0 -0
  34. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/__init__.py +0 -0
  35. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/bedrock.py +0 -0
  36. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/cohere.py +0 -0
  37. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/fallback.py +0 -0
  38. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/function.py +0 -0
  39. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/gemini.py +0 -0
  40. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/google.py +0 -0
  41. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/groq.py +0 -0
  42. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/mcp_sampling.py +0 -0
  43. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/mistral.py +0 -0
  44. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/openai.py +0 -0
  45. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/test.py +0 -0
  46. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/models/wrapper.py +0 -0
  47. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/output.py +0 -0
  48. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/__init__.py +0 -0
  49. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/_json_schema.py +0 -0
  50. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/amazon.py +0 -0
  51. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/anthropic.py +0 -0
  52. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/cohere.py +0 -0
  53. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/deepseek.py +0 -0
  54. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/google.py +0 -0
  55. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/grok.py +0 -0
  56. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/meta.py +0 -0
  57. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/mistral.py +0 -0
  58. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/openai.py +0 -0
  59. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/profiles/qwen.py +0 -0
  60. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/__init__.py +0 -0
  61. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/anthropic.py +0 -0
  62. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/azure.py +0 -0
  63. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/bedrock.py +0 -0
  64. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/cohere.py +0 -0
  65. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/deepseek.py +0 -0
  66. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/fireworks.py +0 -0
  67. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google.py +0 -0
  68. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google_gla.py +0 -0
  69. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/google_vertex.py +0 -0
  70. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/grok.py +0 -0
  71. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/groq.py +0 -0
  72. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/heroku.py +0 -0
  73. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/mistral.py +0 -0
  74. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/openai.py +0 -0
  75. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/openrouter.py +0 -0
  76. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/providers/together.py +0 -0
  77. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/py.typed +0 -0
  78. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/result.py +0 -0
  79. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/settings.py +0 -0
  80. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pydantic_ai/usage.py +0 -0
  81. {pydantic_ai_slim-0.3.3 → pydantic_ai_slim-0.3.4}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 0.3.3
3
+ Version: 0.3.4
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
6
6
  License-Expression: MIT
@@ -30,11 +30,11 @@ Requires-Dist: exceptiongroup; python_version < '3.11'
30
30
  Requires-Dist: griffe>=1.3.2
31
31
  Requires-Dist: httpx>=0.27
32
32
  Requires-Dist: opentelemetry-api>=1.28.0
33
- Requires-Dist: pydantic-graph==0.3.3
33
+ Requires-Dist: pydantic-graph==0.3.4
34
34
  Requires-Dist: pydantic>=2.10
35
35
  Requires-Dist: typing-inspection>=0.4.0
36
36
  Provides-Extra: a2a
37
- Requires-Dist: fasta2a==0.3.3; extra == 'a2a'
37
+ Requires-Dist: fasta2a==0.3.4; extra == 'a2a'
38
38
  Provides-Extra: anthropic
39
39
  Requires-Dist: anthropic>=0.52.0; extra == 'anthropic'
40
40
  Provides-Extra: bedrock
@@ -48,7 +48,7 @@ Requires-Dist: cohere>=5.13.11; (platform_system != 'Emscripten') and extra == '
48
48
  Provides-Extra: duckduckgo
49
49
  Requires-Dist: duckduckgo-search>=7.0.0; extra == 'duckduckgo'
50
50
  Provides-Extra: evals
51
- Requires-Dist: pydantic-evals==0.3.3; extra == 'evals'
51
+ Requires-Dist: pydantic-evals==0.3.4; extra == 'evals'
52
52
  Provides-Extra: google
53
53
  Requires-Dist: google-genai>=1.15.0; extra == 'google'
54
54
  Provides-Extra: groq
@@ -24,6 +24,7 @@ from .tools import RunContext, Tool, ToolDefinition, ToolsPrepareFunc
24
24
 
25
25
  if TYPE_CHECKING:
26
26
  from .mcp import MCPServer
27
+ from .models.instrumented import InstrumentationSettings
27
28
 
28
29
  __all__ = (
29
30
  'GraphAgentState',
@@ -112,6 +113,7 @@ class GraphAgentDeps(Generic[DepsT, OutputDataT]):
112
113
  default_retries: int
113
114
 
114
115
  tracer: Tracer
116
+ instrumentation_settings: InstrumentationSettings | None = None
115
117
 
116
118
  prepare_tools: ToolsPrepareFunc[DepsT] | None = None
117
119
 
@@ -712,6 +714,10 @@ async def process_function_tools( # noqa C901
712
714
 
713
715
  user_parts: list[_messages.UserPromptPart] = []
714
716
 
717
+ include_content = (
718
+ ctx.deps.instrumentation_settings is not None and ctx.deps.instrumentation_settings.include_content
719
+ )
720
+
715
721
  # Run all tool tasks in parallel
716
722
  results_by_index: dict[int, _messages.ModelRequestPart] = {}
717
723
  with ctx.deps.tracer.start_as_current_span(
@@ -722,7 +728,7 @@ async def process_function_tools( # noqa C901
722
728
  },
723
729
  ):
724
730
  tasks = [
725
- asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer), name=call.tool_name)
731
+ asyncio.create_task(tool.run(call, run_context, ctx.deps.tracer, include_content), name=call.tool_name)
726
732
  for tool, call in calls_to_run
727
733
  ]
728
734
 
@@ -719,6 +719,7 @@ class Agent(Generic[AgentDepsT, OutputDataT]):
719
719
  tracer=tracer,
720
720
  prepare_tools=self._prepare_tools,
721
721
  get_instructions=get_instructions,
722
+ instrumentation_settings=instrumentation_settings,
722
723
  )
723
724
  start_node = _agent_graph.UserPromptNode[AgentDepsT](
724
725
  user_prompt=user_prompt,
@@ -76,8 +76,11 @@ class SystemPromptPart:
76
76
  part_kind: Literal['system-prompt'] = 'system-prompt'
77
77
  """Part type identifier, this is available on all parts as a discriminator."""
78
78
 
79
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
80
- return Event('gen_ai.system.message', body={'content': self.content, 'role': 'system'})
79
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
80
+ return Event(
81
+ 'gen_ai.system.message',
82
+ body={'role': 'system', **({'content': self.content} if settings.include_content else {})},
83
+ )
81
84
 
82
85
  __repr__ = _utils.dataclasses_no_defaults_repr
83
86
 
@@ -362,12 +365,12 @@ class UserPromptPart:
362
365
  content = []
363
366
  for part in self.content:
364
367
  if isinstance(part, str):
365
- content.append(part)
368
+ content.append(part if settings.include_content else {'kind': 'text'})
366
369
  elif isinstance(part, (ImageUrl, AudioUrl, DocumentUrl, VideoUrl)):
367
- content.append({'kind': part.kind, 'url': part.url})
370
+ content.append({'kind': part.kind, **({'url': part.url} if settings.include_content else {})})
368
371
  elif isinstance(part, BinaryContent):
369
372
  converted_part = {'kind': part.kind, 'media_type': part.media_type}
370
- if settings.include_binary_content:
373
+ if settings.include_content and settings.include_binary_content:
371
374
  converted_part['binary_content'] = base64.b64encode(part.data).decode()
372
375
  content.append(converted_part)
373
376
  else:
@@ -414,10 +417,15 @@ class ToolReturnPart:
414
417
  else:
415
418
  return {'return_value': tool_return_ta.dump_python(self.content, mode='json')}
416
419
 
417
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
420
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
418
421
  return Event(
419
422
  'gen_ai.tool.message',
420
- body={'content': self.content, 'role': 'tool', 'id': self.tool_call_id, 'name': self.tool_name},
423
+ body={
424
+ **({'content': self.content} if settings.include_content else {}),
425
+ 'role': 'tool',
426
+ 'id': self.tool_call_id,
427
+ 'name': self.tool_name,
428
+ },
421
429
  )
422
430
 
423
431
  __repr__ = _utils.dataclasses_no_defaults_repr
@@ -473,14 +481,14 @@ class RetryPromptPart:
473
481
  description = f'{len(self.content)} validation errors: {json_errors.decode()}'
474
482
  return f'{description}\n\nFix the errors and try again.'
475
483
 
476
- def otel_event(self, _settings: InstrumentationSettings) -> Event:
484
+ def otel_event(self, settings: InstrumentationSettings) -> Event:
477
485
  if self.tool_name is None:
478
486
  return Event('gen_ai.user.message', body={'content': self.model_response(), 'role': 'user'})
479
487
  else:
480
488
  return Event(
481
489
  'gen_ai.tool.message',
482
490
  body={
483
- 'content': self.model_response(),
491
+ **({'content': self.model_response()} if settings.include_content else {}),
484
492
  'role': 'tool',
485
493
  'id': self.tool_call_id,
486
494
  'name': self.tool_name,
@@ -657,7 +665,7 @@ class ModelResponse:
657
665
  vendor_id: str | None = None
658
666
  """Vendor ID as specified by the model provider. This can be used to track the specific request to the model."""
659
667
 
660
- def otel_events(self) -> list[Event]:
668
+ def otel_events(self, settings: InstrumentationSettings) -> list[Event]:
661
669
  """Return OpenTelemetry events for the response."""
662
670
  result: list[Event] = []
663
671
 
@@ -683,7 +691,8 @@ class ModelResponse:
683
691
  elif isinstance(part, TextPart):
684
692
  if body.get('content'):
685
693
  body = new_event_body()
686
- body['content'] = part.content
694
+ if settings.include_content:
695
+ body['content'] = part.content
687
696
 
688
697
  return result
689
698
 
@@ -342,15 +342,13 @@ class AnthropicModel(Model):
342
342
  if response_part.content: # Only add non-empty text
343
343
  assistant_content_params.append(BetaTextBlockParam(text=response_part.content, type='text'))
344
344
  elif isinstance(response_part, ThinkingPart):
345
- # NOTE: We don't send ThinkingPart to the providers yet. If you are unsatisfied with this,
346
- # please open an issue. The below code is the code to send thinking to the provider.
347
- # assert response_part.signature is not None, 'Thinking part must have a signature'
348
- # assistant_content_params.append(
349
- # BetaThinkingBlockParam(
350
- # thinking=response_part.content, signature=response_part.signature, type='thinking'
351
- # )
352
- # )
353
- pass
345
+ # NOTE: We only send thinking part back for Anthropic, otherwise they raise an error.
346
+ if response_part.signature is not None: # pragma: no branch
347
+ assistant_content_params.append(
348
+ BetaThinkingBlockParam(
349
+ thinking=response_part.content, signature=response_part.signature, type='thinking'
350
+ )
351
+ )
354
352
  else:
355
353
  tool_use_block_param = BetaToolUseBlockParam(
356
354
  id=_guard_tool_call_id(t=response_part),
@@ -92,6 +92,7 @@ class InstrumentationSettings:
92
92
  meter_provider: MeterProvider | None = None,
93
93
  event_logger_provider: EventLoggerProvider | None = None,
94
94
  include_binary_content: bool = True,
95
+ include_content: bool = True,
95
96
  ):
96
97
  """Create instrumentation options.
97
98
 
@@ -109,6 +110,8 @@ class InstrumentationSettings:
109
110
  Calling `logfire.configure()` sets the global event logger provider, so most users don't need this.
110
111
  This is only used if `event_mode='logs'`.
111
112
  include_binary_content: Whether to include binary content in the instrumentation events.
113
+ include_content: Whether to include prompts, completions, and tool call arguments and responses
114
+ in the instrumentation events.
112
115
  """
113
116
  from pydantic_ai import __version__
114
117
 
@@ -121,6 +124,7 @@ class InstrumentationSettings:
121
124
  self.event_logger = event_logger_provider.get_event_logger(scope_name, __version__)
122
125
  self.event_mode = event_mode
123
126
  self.include_binary_content = include_binary_content
127
+ self.include_content = include_content
124
128
 
125
129
  # As specified in the OpenTelemetry GenAI metrics spec:
126
130
  # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-metrics/#metric-gen_aiclienttokenusage
@@ -161,7 +165,7 @@ class InstrumentationSettings:
161
165
  if hasattr(part, 'otel_event'):
162
166
  message_events.append(part.otel_event(self))
163
167
  elif isinstance(message, ModelResponse): # pragma: no branch
164
- message_events = message.otel_events()
168
+ message_events = message.otel_events(self)
165
169
  for event in message_events:
166
170
  event.attributes = {
167
171
  'gen_ai.message.index': message_index,
@@ -327,6 +327,7 @@ class Tool(Generic[AgentDepsT]):
327
327
  message: _messages.ToolCallPart,
328
328
  run_context: RunContext[AgentDepsT],
329
329
  tracer: Tracer,
330
+ include_content: bool = False,
330
331
  ) -> _messages.ToolReturnPart | _messages.RetryPromptPart:
331
332
  """Run the tool function asynchronously.
332
333
 
@@ -338,14 +339,14 @@ class Tool(Generic[AgentDepsT]):
338
339
  'gen_ai.tool.name': self.name,
339
340
  # NOTE: this means `gen_ai.tool.call.id` will be included even if it was generated by pydantic-ai
340
341
  'gen_ai.tool.call.id': message.tool_call_id,
341
- 'tool_arguments': message.args_as_json_str(),
342
+ **({'tool_arguments': message.args_as_json_str()} if include_content else {}),
342
343
  'logfire.msg': f'running tool: {self.name}',
343
344
  # add the JSON schema so these attributes are formatted nicely in Logfire
344
345
  'logfire.json_schema': json.dumps(
345
346
  {
346
347
  'type': 'object',
347
348
  'properties': {
348
- 'tool_arguments': {'type': 'object'},
349
+ **({'tool_arguments': {'type': 'object'}} if include_content else {}),
349
350
  'gen_ai.tool.name': {},
350
351
  'gen_ai.tool.call.id': {},
351
352
  },