letta-nightly 0.6.37.dev20250311104150__py3-none-any.whl → 0.6.39.dev20250313104142__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (58) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +83 -23
  3. letta/agents/low_latency_agent.py +3 -2
  4. letta/client/client.py +1 -50
  5. letta/constants.py +4 -1
  6. letta/functions/function_sets/base.py +1 -1
  7. letta/functions/function_sets/multi_agent.py +9 -8
  8. letta/functions/helpers.py +47 -6
  9. letta/functions/schema_generator.py +47 -0
  10. letta/helpers/mcp_helpers.py +108 -0
  11. letta/llm_api/cohere.py +1 -1
  12. letta/llm_api/google_ai_client.py +332 -0
  13. letta/llm_api/google_vertex_client.py +214 -0
  14. letta/llm_api/helpers.py +1 -2
  15. letta/llm_api/llm_api_tools.py +0 -1
  16. letta/llm_api/llm_client.py +48 -0
  17. letta/llm_api/llm_client_base.py +129 -0
  18. letta/local_llm/utils.py +30 -20
  19. letta/log.py +1 -1
  20. letta/memory.py +1 -1
  21. letta/orm/__init__.py +1 -0
  22. letta/orm/block.py +8 -0
  23. letta/orm/enums.py +2 -0
  24. letta/orm/identities_blocks.py +13 -0
  25. letta/orm/identity.py +9 -0
  26. letta/orm/sqlalchemy_base.py +4 -4
  27. letta/orm/step.py +1 -0
  28. letta/schemas/block.py +4 -48
  29. letta/schemas/identity.py +3 -0
  30. letta/schemas/letta_message.py +26 -0
  31. letta/schemas/message.py +69 -63
  32. letta/schemas/step.py +1 -0
  33. letta/schemas/tool.py +39 -2
  34. letta/serialize_schemas/agent.py +8 -1
  35. letta/server/rest_api/app.py +15 -0
  36. letta/server/rest_api/chat_completions_interface.py +2 -0
  37. letta/server/rest_api/interface.py +46 -13
  38. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +2 -7
  39. letta/server/rest_api/routers/v1/agents.py +14 -10
  40. letta/server/rest_api/routers/v1/blocks.py +5 -1
  41. letta/server/rest_api/routers/v1/steps.py +2 -0
  42. letta/server/rest_api/routers/v1/tools.py +71 -1
  43. letta/server/rest_api/routers/v1/voice.py +3 -6
  44. letta/server/server.py +102 -5
  45. letta/services/agent_manager.py +58 -3
  46. letta/services/block_manager.py +10 -1
  47. letta/services/helpers/agent_manager_helper.py +12 -1
  48. letta/services/identity_manager.py +61 -15
  49. letta/services/message_manager.py +40 -0
  50. letta/services/step_manager.py +8 -1
  51. letta/services/summarizer/summarizer.py +1 -1
  52. letta/services/tool_manager.py +6 -0
  53. letta/settings.py +11 -12
  54. {letta_nightly-0.6.37.dev20250311104150.dist-info → letta_nightly-0.6.39.dev20250313104142.dist-info}/METADATA +20 -18
  55. {letta_nightly-0.6.37.dev20250311104150.dist-info → letta_nightly-0.6.39.dev20250313104142.dist-info}/RECORD +58 -52
  56. {letta_nightly-0.6.37.dev20250311104150.dist-info → letta_nightly-0.6.39.dev20250313104142.dist-info}/LICENSE +0 -0
  57. {letta_nightly-0.6.37.dev20250311104150.dist-info → letta_nightly-0.6.39.dev20250313104142.dist-info}/WHEEL +0 -0
  58. {letta_nightly-0.6.37.dev20250311104150.dist-info → letta_nightly-0.6.39.dev20250313104142.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,129 @@
1
+ from abc import abstractmethod
2
+ from typing import List, Optional, Union
3
+
4
+ from openai import AsyncStream, Stream
5
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
6
+
7
+ from letta.schemas.llm_config import LLMConfig
8
+ from letta.schemas.message import Message
9
+ from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
10
+ from letta.tracing import log_event
11
+
12
+
13
+ class LLMClientBase:
14
+ """
15
+ Abstract base class for LLM clients, formatting the request objects,
16
+ handling the downstream request and parsing into chat completions response format
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ agent_id: str,
22
+ llm_config: LLMConfig,
23
+ put_inner_thoughts_first: Optional[bool] = True,
24
+ use_structured_output: Optional[bool] = True,
25
+ use_tool_naming: bool = True,
26
+ actor_id: Optional[str] = None,
27
+ ):
28
+ self.agent_id = agent_id
29
+ self.llm_config = llm_config
30
+ self.put_inner_thoughts_first = put_inner_thoughts_first
31
+ self.actor_id = actor_id
32
+
33
+ def send_llm_request(
34
+ self,
35
+ messages: List[Message],
36
+ tools: Optional[List[dict]] = None, # TODO: change to Tool object
37
+ tool_call: Optional[str] = None,
38
+ stream: bool = False,
39
+ first_message: bool = False,
40
+ force_tool_call: Optional[str] = None,
41
+ ) -> Union[ChatCompletionResponse, Stream[ChatCompletionChunk]]:
42
+ """
43
+ Issues a request to the downstream model endpoint and parses response.
44
+ If stream=True, returns a Stream[ChatCompletionChunk] that can be iterated over.
45
+ Otherwise returns a ChatCompletionResponse.
46
+ """
47
+ request_data = self.build_request_data(messages, tools, tool_call)
48
+ log_event(name="llm_request_sent", attributes=request_data)
49
+ if stream:
50
+ return self.stream(request_data)
51
+ else:
52
+ response_data = self.request(request_data)
53
+ log_event(name="llm_response_received", attributes=response_data)
54
+ return self.convert_response_to_chat_completion(response_data, messages)
55
+
56
+ async def send_llm_request_async(
57
+ self,
58
+ messages: List[Message],
59
+ tools: Optional[List[dict]] = None, # TODO: change to Tool object
60
+ tool_call: Optional[str] = None,
61
+ stream: bool = False,
62
+ first_message: bool = False,
63
+ force_tool_call: Optional[str] = None,
64
+ ) -> Union[ChatCompletionResponse, AsyncStream[ChatCompletionChunk]]:
65
+ """
66
+ Issues a request to the downstream model endpoint.
67
+ If stream=True, returns an AsyncStream[ChatCompletionChunk] that can be async iterated over.
68
+ Otherwise returns a ChatCompletionResponse.
69
+ """
70
+ request_data = self.build_request_data(messages, tools, tool_call)
71
+ log_event(name="llm_request_sent", attributes=request_data)
72
+ if stream:
73
+ return await self.stream_async(request_data)
74
+ else:
75
+ response_data = await self.request_async(request_data)
76
+ log_event(name="llm_response_received", attributes=response_data)
77
+ return self.convert_response_to_chat_completion(response_data, messages)
78
+
79
+ @abstractmethod
80
+ def build_request_data(
81
+ self,
82
+ messages: List[Message],
83
+ tools: List[dict],
84
+ tool_call: Optional[str],
85
+ ) -> dict:
86
+ """
87
+ Constructs a request object in the expected data format for this client.
88
+ """
89
+ raise NotImplementedError
90
+
91
+ @abstractmethod
92
+ def request(self, request_data: dict) -> dict:
93
+ """
94
+ Performs underlying request to llm and returns raw response.
95
+ """
96
+ raise NotImplementedError
97
+
98
+ @abstractmethod
99
+ async def request_async(self, request_data: dict) -> dict:
100
+ """
101
+ Performs underlying request to llm and returns raw response.
102
+ """
103
+ raise NotImplementedError
104
+
105
+ @abstractmethod
106
+ def convert_response_to_chat_completion(
107
+ self,
108
+ response_data: dict,
109
+ input_messages: List[Message],
110
+ ) -> ChatCompletionResponse:
111
+ """
112
+ Converts custom response format from llm client into an OpenAI
113
+ ChatCompletionsResponse object.
114
+ """
115
+ raise NotImplementedError
116
+
117
+ @abstractmethod
118
+ def stream(self, request_data: dict) -> Stream[ChatCompletionChunk]:
119
+ """
120
+ Performs underlying streaming request to llm and returns raw response.
121
+ """
122
+ raise NotImplementedError(f"Streaming is not supported for {self.llm_config.model_endpoint_type}")
123
+
124
+ @abstractmethod
125
+ async def stream_async(self, request_data: dict) -> AsyncStream[ChatCompletionChunk]:
126
+ """
127
+ Performs underlying streaming request to llm and returns raw response.
128
+ """
129
+ raise NotImplementedError(f"Streaming is not supported for {self.llm_config.model_endpoint_type}")
letta/local_llm/utils.py CHANGED
@@ -114,26 +114,36 @@ def num_tokens_from_functions(functions: List[dict], model: str = "gpt-4"):
114
114
  function_tokens += len(encoding.encode(propertiesKey))
115
115
  v = parameters["properties"][propertiesKey]
116
116
  for field in v:
117
- if field == "type":
118
- function_tokens += 2
119
- function_tokens += len(encoding.encode(v["type"]))
120
- elif field == "description":
121
- function_tokens += 2
122
- function_tokens += len(encoding.encode(v["description"]))
123
- elif field == "enum":
124
- function_tokens -= 3
125
- for o in v["enum"]:
126
- function_tokens += 3
127
- function_tokens += len(encoding.encode(o))
128
- elif field == "items":
129
- function_tokens += 2
130
- if isinstance(v["items"], dict) and "type" in v["items"]:
131
- function_tokens += len(encoding.encode(v["items"]["type"]))
132
- elif field == "default":
133
- function_tokens += 2
134
- function_tokens += len(encoding.encode(str(v["default"])))
135
- else:
136
- logger.warning(f"num_tokens_from_functions: Unsupported field {field} in function {function}")
117
+ try:
118
+ if field == "type":
119
+ function_tokens += 2
120
+ function_tokens += len(encoding.encode(v["type"]))
121
+ elif field == "description":
122
+ function_tokens += 2
123
+ function_tokens += len(encoding.encode(v["description"]))
124
+ elif field == "enum":
125
+ function_tokens -= 3
126
+ for o in v["enum"]:
127
+ function_tokens += 3
128
+ function_tokens += len(encoding.encode(o))
129
+ elif field == "items":
130
+ function_tokens += 2
131
+ if isinstance(v["items"], dict) and "type" in v["items"]:
132
+ function_tokens += len(encoding.encode(v["items"]["type"]))
133
+ elif field == "default":
134
+ function_tokens += 2
135
+ function_tokens += len(encoding.encode(str(v["default"])))
136
+ elif field == "title":
137
+ # TODO: Is this right? For MCP
138
+ continue
139
+ else:
140
+ # TODO: Handle nesting here properly
141
+ # Disable this for now for MCP
142
+ continue
143
+ # logger.warning(f"num_tokens_from_functions: Unsupported field {field} in function {function}")
144
+ except:
145
+ logger.error(f"Failed to encode field {field} with value {v}")
146
+ raise
137
147
  function_tokens += 11
138
148
 
139
149
  num_tokens += function_tokens
letta/log.py CHANGED
@@ -54,7 +54,7 @@ DEVELOPMENT_LOGGING = {
54
54
  "propagate": True, # Let logs bubble up to root
55
55
  },
56
56
  "uvicorn": {
57
- "level": "INFO",
57
+ "level": "CRITICAL",
58
58
  "handlers": ["console"],
59
59
  "propagate": True,
60
60
  },
letta/memory.py CHANGED
@@ -36,7 +36,7 @@ def get_memory_functions(cls: Memory) -> Dict[str, Callable]:
36
36
 
37
37
  def _format_summary_history(message_history: List[Message]):
38
38
  # TODO use existing prompt formatters for this (eg ChatML)
39
- return "\n".join([f"{m.role}: {m.text}" for m in message_history])
39
+ return "\n".join([f"{m.role}: {m.content[0].text}" for m in message_history])
40
40
 
41
41
 
42
42
  def summarize_messages(
letta/orm/__init__.py CHANGED
@@ -5,6 +5,7 @@ from letta.orm.block import Block
5
5
  from letta.orm.blocks_agents import BlocksAgents
6
6
  from letta.orm.file import FileMetadata
7
7
  from letta.orm.identities_agents import IdentitiesAgents
8
+ from letta.orm.identities_blocks import IdentitiesBlocks
8
9
  from letta.orm.identity import Identity
9
10
  from letta.orm.job import Job
10
11
  from letta.orm.job_messages import JobMessage
letta/orm/block.py CHANGED
@@ -12,6 +12,7 @@ from letta.schemas.block import Human, Persona
12
12
 
13
13
  if TYPE_CHECKING:
14
14
  from letta.orm import Organization
15
+ from letta.orm.identity import Identity
15
16
 
16
17
 
17
18
  class Block(OrganizationMixin, SqlalchemyBase):
@@ -47,6 +48,13 @@ class Block(OrganizationMixin, SqlalchemyBase):
47
48
  back_populates="core_memory",
48
49
  doc="Agents associated with this block.",
49
50
  )
51
+ identities: Mapped[List["Identity"]] = relationship(
52
+ "Identity",
53
+ secondary="identities_blocks",
54
+ lazy="selectin",
55
+ back_populates="blocks",
56
+ passive_deletes=True,
57
+ )
50
58
 
51
59
  def to_pydantic(self) -> Type:
52
60
  match self.label:
letta/orm/enums.py CHANGED
@@ -8,6 +8,8 @@ class ToolType(str, Enum):
8
8
  LETTA_MULTI_AGENT_CORE = "letta_multi_agent_core"
9
9
  EXTERNAL_COMPOSIO = "external_composio"
10
10
  EXTERNAL_LANGCHAIN = "external_langchain"
11
+ # TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
12
+ EXTERNAL_MCP = "external_mcp"
11
13
 
12
14
 
13
15
  class JobType(str, Enum):
@@ -0,0 +1,13 @@
1
+ from sqlalchemy import ForeignKey, String
2
+ from sqlalchemy.orm import Mapped, mapped_column
3
+
4
+ from letta.orm.base import Base
5
+
6
+
7
+ class IdentitiesBlocks(Base):
8
+ """Identities may have one or many blocks associated with them."""
9
+
10
+ __tablename__ = "identities_blocks"
11
+
12
+ identity_id: Mapped[str] = mapped_column(String, ForeignKey("identities.id", ondelete="CASCADE"), primary_key=True)
13
+ block_id: Mapped[str] = mapped_column(String, ForeignKey("block.id", ondelete="CASCADE"), primary_key=True)
letta/orm/identity.py CHANGED
@@ -40,12 +40,20 @@ class Identity(SqlalchemyBase, OrganizationMixin):
40
40
  agents: Mapped[List["Agent"]] = relationship(
41
41
  "Agent", secondary="identities_agents", lazy="selectin", passive_deletes=True, back_populates="identities"
42
42
  )
43
+ blocks: Mapped[List["Block"]] = relationship(
44
+ "Block", secondary="identities_blocks", lazy="selectin", passive_deletes=True, back_populates="identities"
45
+ )
43
46
 
44
47
  @property
45
48
  def agent_ids(self) -> List[str]:
46
49
  """Get just the agent IDs without loading the full agent objects"""
47
50
  return [agent.id for agent in self.agents]
48
51
 
52
+ @property
53
+ def block_ids(self) -> List[str]:
54
+ """Get just the block IDs without loading the full agent objects"""
55
+ return [block.id for block in self.blocks]
56
+
49
57
  def to_pydantic(self) -> PydanticIdentity:
50
58
  state = {
51
59
  "id": self.id,
@@ -54,6 +62,7 @@ class Identity(SqlalchemyBase, OrganizationMixin):
54
62
  "identity_type": self.identity_type,
55
63
  "project_id": self.project_id,
56
64
  "agent_ids": self.agent_ids,
65
+ "block_ids": self.block_ids,
57
66
  "organization_id": self.organization_id,
58
67
  "properties": self.properties,
59
68
  }
@@ -69,7 +69,7 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
69
69
  join_model: Optional[Base] = None,
70
70
  join_conditions: Optional[Union[Tuple, List]] = None,
71
71
  identifier_keys: Optional[List[str]] = None,
72
- identifier_id: Optional[str] = None,
72
+ identity_id: Optional[str] = None,
73
73
  **kwargs,
74
74
  ) -> List["SqlalchemyBase"]:
75
75
  """
@@ -148,9 +148,9 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
148
148
  if identifier_keys and hasattr(cls, "identities"):
149
149
  query = query.join(cls.identities).filter(cls.identities.property.mapper.class_.identifier_key.in_(identifier_keys))
150
150
 
151
- # given the identifier_id, we can find within the agents table any agents that have the identifier_id in their identity_ids
152
- if identifier_id and hasattr(cls, "identities"):
153
- query = query.join(cls.identities).filter(cls.identities.property.mapper.class_.id == identifier_id)
151
+ # given the identity_id, we can find within the agents table any agents that have the identity_id in their identity_ids
152
+ if identity_id and hasattr(cls, "identities"):
153
+ query = query.join(cls.identities).filter(cls.identities.property.mapper.class_.id == identity_id)
154
154
 
155
155
  # Apply filtering logic from kwargs
156
156
  for key, value in kwargs.items():
letta/orm/step.py CHANGED
@@ -33,6 +33,7 @@ class Step(SqlalchemyBase):
33
33
  job_id: Mapped[Optional[str]] = mapped_column(
34
34
  ForeignKey("jobs.id", ondelete="SET NULL"), nullable=True, doc="The unique identified of the job run that triggered this step"
35
35
  )
36
+ agent_id: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The name of the model used for this step.")
36
37
  provider_name: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The name of the provider used for this step.")
37
38
  model: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The name of the model used for this step.")
38
39
  model_endpoint: Mapped[Optional[str]] = mapped_column(None, nullable=True, doc="The model endpoint url used for this step.")
letta/schemas/block.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from typing import Optional
2
2
 
3
- from pydantic import BaseModel, Field, model_validator
3
+ from pydantic import Field, model_validator
4
4
  from typing_extensions import Self
5
5
 
6
6
  from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
@@ -37,7 +37,8 @@ class BaseBlock(LettaBase, validate_assignment=True):
37
37
 
38
38
  @model_validator(mode="after")
39
39
  def verify_char_limit(self) -> Self:
40
- if self.value and len(self.value) > self.limit:
40
+ # self.limit can be None from
41
+ if self.limit is not None and self.value and len(self.value) > self.limit:
41
42
  error_msg = f"Edit failed: Exceeds {self.limit} character limit (requested {len(self.value)}) - {str(self)}."
42
43
  raise ValueError(error_msg)
43
44
 
@@ -89,61 +90,16 @@ class Persona(Block):
89
90
  label: str = "persona"
90
91
 
91
92
 
92
- # class CreateBlock(BaseBlock):
93
- # """Create a block"""
94
- #
95
- # is_template: bool = True
96
- # label: str = Field(..., description="Label of the block.")
97
-
98
-
99
- class BlockLabelUpdate(BaseModel):
100
- """Update the label of a block"""
101
-
102
- current_label: str = Field(..., description="Current label of the block.")
103
- new_label: str = Field(..., description="New label of the block.")
104
-
105
-
106
- # class CreatePersona(CreateBlock):
107
- # """Create a persona block"""
108
- #
109
- # label: str = "persona"
110
- #
111
- #
112
- # class CreateHuman(CreateBlock):
113
- # """Create a human block"""
114
- #
115
- # label: str = "human"
116
-
117
-
118
93
  class BlockUpdate(BaseBlock):
119
94
  """Update a block"""
120
95
 
121
- limit: Optional[int] = Field(CORE_MEMORY_BLOCK_CHAR_LIMIT, description="Character limit of the block.")
96
+ limit: Optional[int] = Field(None, description="Character limit of the block.")
122
97
  value: Optional[str] = Field(None, description="Value of the block.")
123
98
 
124
99
  class Config:
125
100
  extra = "ignore" # Ignores extra fields
126
101
 
127
102
 
128
- class BlockLimitUpdate(BaseModel):
129
- """Update the limit of a block"""
130
-
131
- label: str = Field(..., description="Label of the block.")
132
- limit: int = Field(..., description="New limit of the block.")
133
-
134
-
135
- # class UpdatePersona(BlockUpdate):
136
- # """Update a persona block"""
137
- #
138
- # label: str = "persona"
139
- #
140
- #
141
- # class UpdateHuman(BlockUpdate):
142
- # """Update a human block"""
143
- #
144
- # label: str = "human"
145
-
146
-
147
103
  class CreateBlock(BaseBlock):
148
104
  """Create a block"""
149
105
 
letta/schemas/identity.py CHANGED
@@ -46,6 +46,7 @@ class Identity(IdentityBase):
46
46
  identity_type: IdentityType = Field(..., description="The type of the identity.")
47
47
  project_id: Optional[str] = Field(None, description="The project id of the identity, if applicable.")
48
48
  agent_ids: List[str] = Field(..., description="The IDs of the agents associated with the identity.")
49
+ block_ids: List[str] = Field(..., description="The IDs of the blocks associated with the identity.")
49
50
  organization_id: Optional[str] = Field(None, description="The organization id of the user")
50
51
  properties: List[IdentityProperty] = Field(default_factory=list, description="List of properties associated with the identity")
51
52
 
@@ -56,6 +57,7 @@ class IdentityCreate(LettaBase):
56
57
  identity_type: IdentityType = Field(..., description="The type of the identity.")
57
58
  project_id: Optional[str] = Field(None, description="The project id of the identity, if applicable.")
58
59
  agent_ids: Optional[List[str]] = Field(None, description="The agent ids that are associated with the identity.")
60
+ block_ids: Optional[List[str]] = Field(None, description="The IDs of the blocks associated with the identity.")
59
61
  properties: Optional[List[IdentityProperty]] = Field(None, description="List of properties associated with the identity.")
60
62
 
61
63
 
@@ -64,4 +66,5 @@ class IdentityUpdate(LettaBase):
64
66
  name: Optional[str] = Field(None, description="The name of the identity.")
65
67
  identity_type: Optional[IdentityType] = Field(None, description="The type of the identity.")
66
68
  agent_ids: Optional[List[str]] = Field(None, description="The agent ids that are associated with the identity.")
69
+ block_ids: Optional[List[str]] = Field(None, description="The IDs of the blocks associated with the identity.")
67
70
  properties: Optional[List[IdentityProperty]] = Field(None, description="List of properties associated with the identity.")
@@ -236,6 +236,32 @@ LettaMessageUnion = Annotated[
236
236
  ]
237
237
 
238
238
 
239
+ class UpdateSystemMessage(BaseModel):
240
+ content: Union[str, List[MessageContentUnion]]
241
+ message_type: Literal["system_message"] = "system_message"
242
+
243
+
244
+ class UpdateUserMessage(BaseModel):
245
+ content: Union[str, List[MessageContentUnion]]
246
+ message_type: Literal["user_message"] = "user_message"
247
+
248
+
249
+ class UpdateReasoningMessage(BaseModel):
250
+ reasoning: Union[str, List[MessageContentUnion]]
251
+ message_type: Literal["reasoning_message"] = "reasoning_message"
252
+
253
+
254
+ class UpdateAssistantMessage(BaseModel):
255
+ content: Union[str, List[MessageContentUnion]]
256
+ message_type: Literal["assistant_message"] = "assistant_message"
257
+
258
+
259
+ LettaMessageUpdateUnion = Annotated[
260
+ Union[UpdateSystemMessage, UpdateUserMessage, UpdateReasoningMessage, UpdateAssistantMessage],
261
+ Field(discriminator="message_type"),
262
+ ]
263
+
264
+
239
265
  def create_letta_message_union_schema():
240
266
  return {
241
267
  "oneOf": [