langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (74) hide show
  1. langchain_core/_api/beta_decorator.py +2 -2
  2. langchain_core/_api/deprecation.py +1 -1
  3. langchain_core/beta/runnables/context.py +1 -1
  4. langchain_core/callbacks/base.py +14 -23
  5. langchain_core/callbacks/file.py +13 -2
  6. langchain_core/callbacks/manager.py +74 -157
  7. langchain_core/callbacks/streaming_stdout.py +3 -4
  8. langchain_core/callbacks/usage.py +2 -12
  9. langchain_core/chat_history.py +6 -6
  10. langchain_core/documents/base.py +1 -1
  11. langchain_core/documents/compressor.py +9 -6
  12. langchain_core/indexing/base.py +2 -2
  13. langchain_core/language_models/_utils.py +230 -101
  14. langchain_core/language_models/base.py +35 -23
  15. langchain_core/language_models/chat_models.py +245 -53
  16. langchain_core/language_models/fake_chat_models.py +28 -81
  17. langchain_core/load/dump.py +3 -4
  18. langchain_core/messages/__init__.py +38 -22
  19. langchain_core/messages/ai.py +188 -30
  20. langchain_core/messages/base.py +164 -25
  21. langchain_core/messages/block_translators/__init__.py +89 -0
  22. langchain_core/messages/block_translators/anthropic.py +451 -0
  23. langchain_core/messages/block_translators/bedrock.py +45 -0
  24. langchain_core/messages/block_translators/bedrock_converse.py +47 -0
  25. langchain_core/messages/block_translators/google_genai.py +45 -0
  26. langchain_core/messages/block_translators/google_vertexai.py +47 -0
  27. langchain_core/messages/block_translators/groq.py +45 -0
  28. langchain_core/messages/block_translators/langchain_v0.py +297 -0
  29. langchain_core/messages/block_translators/ollama.py +45 -0
  30. langchain_core/messages/block_translators/openai.py +586 -0
  31. langchain_core/messages/{content_blocks.py → content.py} +346 -213
  32. langchain_core/messages/human.py +29 -9
  33. langchain_core/messages/system.py +29 -9
  34. langchain_core/messages/tool.py +94 -13
  35. langchain_core/messages/utils.py +32 -234
  36. langchain_core/output_parsers/base.py +14 -50
  37. langchain_core/output_parsers/json.py +2 -5
  38. langchain_core/output_parsers/list.py +2 -7
  39. langchain_core/output_parsers/openai_functions.py +5 -28
  40. langchain_core/output_parsers/openai_tools.py +49 -90
  41. langchain_core/output_parsers/pydantic.py +2 -3
  42. langchain_core/output_parsers/transform.py +12 -53
  43. langchain_core/output_parsers/xml.py +9 -17
  44. langchain_core/prompt_values.py +8 -112
  45. langchain_core/prompts/chat.py +1 -3
  46. langchain_core/runnables/base.py +500 -451
  47. langchain_core/runnables/branch.py +1 -1
  48. langchain_core/runnables/fallbacks.py +4 -4
  49. langchain_core/runnables/history.py +1 -1
  50. langchain_core/runnables/passthrough.py +3 -3
  51. langchain_core/runnables/retry.py +1 -1
  52. langchain_core/runnables/router.py +1 -1
  53. langchain_core/structured_query.py +3 -7
  54. langchain_core/tools/base.py +14 -41
  55. langchain_core/tools/convert.py +2 -22
  56. langchain_core/tools/retriever.py +1 -8
  57. langchain_core/tools/structured.py +2 -10
  58. langchain_core/tracers/_streaming.py +6 -7
  59. langchain_core/tracers/base.py +7 -14
  60. langchain_core/tracers/core.py +4 -27
  61. langchain_core/tracers/event_stream.py +4 -15
  62. langchain_core/tracers/langchain.py +3 -14
  63. langchain_core/tracers/log_stream.py +2 -3
  64. langchain_core/utils/_merge.py +45 -7
  65. langchain_core/utils/function_calling.py +22 -9
  66. langchain_core/utils/utils.py +29 -0
  67. langchain_core/version.py +1 -1
  68. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/METADATA +7 -9
  69. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/RECORD +71 -64
  70. langchain_core/v1/__init__.py +0 -1
  71. langchain_core/v1/chat_models.py +0 -1047
  72. langchain_core/v1/messages.py +0 -755
  73. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/WHEEL +0 -0
  74. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a1.dist-info}/entry_points.txt +0 -0
@@ -4,16 +4,14 @@ import threading
4
4
  from collections.abc import Generator
5
5
  from contextlib import contextmanager
6
6
  from contextvars import ContextVar
7
- from typing import Any, Optional, Union
7
+ from typing import Any, Optional
8
8
 
9
9
  from typing_extensions import override
10
10
 
11
11
  from langchain_core.callbacks import BaseCallbackHandler
12
12
  from langchain_core.messages import AIMessage
13
13
  from langchain_core.messages.ai import UsageMetadata, add_usage
14
- from langchain_core.messages.utils import convert_from_v1_message
15
14
  from langchain_core.outputs import ChatGeneration, LLMResult
16
- from langchain_core.v1.messages import AIMessage as AIMessageV1
17
15
 
18
16
 
19
17
  class UsageMetadataCallbackHandler(BaseCallbackHandler):
@@ -60,17 +58,9 @@ class UsageMetadataCallbackHandler(BaseCallbackHandler):
60
58
  return str(self.usage_metadata)
61
59
 
62
60
  @override
63
- def on_llm_end(
64
- self, response: Union[LLMResult, AIMessageV1], **kwargs: Any
65
- ) -> None:
61
+ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
66
62
  """Collect token usage."""
67
63
  # Check for usage_metadata (langchain-core >= 0.2.2)
68
- if isinstance(response, AIMessageV1):
69
- response = LLMResult(
70
- generations=[
71
- [ChatGeneration(message=convert_from_v1_message(response))]
72
- ]
73
- )
74
64
  try:
75
65
  generation = response.generations[0][0]
76
66
  except IndexError:
@@ -117,9 +117,9 @@ class BaseChatMessageHistory(ABC):
117
117
  def add_user_message(self, message: Union[HumanMessage, str]) -> None:
118
118
  """Convenience method for adding a human message string to the store.
119
119
 
120
- Please note that this is a convenience method. Code should favor the
121
- bulk add_messages interface instead to save on round-trips to the underlying
122
- persistence layer.
120
+ .. note::
121
+ This is a convenience method. Code should favor the bulk ``add_messages``
122
+ interface instead to save on round-trips to the persistence layer.
123
123
 
124
124
  This method may be deprecated in a future release.
125
125
 
@@ -134,9 +134,9 @@ class BaseChatMessageHistory(ABC):
134
134
  def add_ai_message(self, message: Union[AIMessage, str]) -> None:
135
135
  """Convenience method for adding an AI message string to the store.
136
136
 
137
- Please note that this is a convenience method. Code should favor the bulk
138
- add_messages interface instead to save on round-trips to the underlying
139
- persistence layer.
137
+ .. note::
138
+ This is a convenience method. Code should favor the bulk ``add_messages``
139
+ interface instead to save on round-trips to the persistence layer.
140
140
 
141
141
  This method may be deprecated in a future release.
142
142
 
@@ -277,7 +277,7 @@ class Document(BaseMedia):
277
277
  """Pass page_content in as positional or named arg."""
278
278
  # my-py is complaining that page_content is not defined on the base class.
279
279
  # Here, we're relying on pydantic base class to handle the validation.
280
- super().__init__(page_content=page_content, **kwargs) # type: ignore[call-arg]
280
+ super().__init__(page_content=page_content, **kwargs)
281
281
 
282
282
  @classmethod
283
283
  def is_lc_serializable(cls) -> bool:
@@ -19,17 +19,18 @@ if TYPE_CHECKING:
19
19
  class BaseDocumentCompressor(BaseModel, ABC):
20
20
  """Base class for document compressors.
21
21
 
22
- This abstraction is primarily used for
23
- post-processing of retrieved documents.
22
+ This abstraction is primarily used for post-processing of retrieved documents.
24
23
 
25
24
  Documents matching a given query are first retrieved.
25
+
26
26
  Then the list of documents can be further processed.
27
27
 
28
- For example, one could re-rank the retrieved documents
29
- using an LLM.
28
+ For example, one could re-rank the retrieved documents using an LLM.
29
+
30
+ .. note::
31
+ Users should favor using a RunnableLambda instead of sub-classing from this
32
+ interface.
30
33
 
31
- **Note** users should favor using a RunnableLambda
32
- instead of sub-classing from this interface.
33
34
  """
34
35
 
35
36
  @abstractmethod
@@ -48,6 +49,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
48
49
 
49
50
  Returns:
50
51
  The compressed documents.
52
+
51
53
  """
52
54
 
53
55
  async def acompress_documents(
@@ -65,6 +67,7 @@ class BaseDocumentCompressor(BaseModel, ABC):
65
67
 
66
68
  Returns:
67
69
  The compressed documents.
70
+
68
71
  """
69
72
  return await run_in_executor(
70
73
  None, self.compress_documents, documents, query, callbacks
@@ -488,8 +488,8 @@ class DeleteResponse(TypedDict, total=False):
488
488
  failed: Sequence[str]
489
489
  """The IDs that failed to be deleted.
490
490
 
491
- Please note that deleting an ID that
492
- does not exist is **NOT** considered a failure.
491
+ .. warning::
492
+ Deleting an ID that does not exist is **NOT** considered a failure.
493
493
  """
494
494
 
495
495
  num_failed: int
@@ -1,14 +1,30 @@
1
- import copy
2
1
  import re
3
2
  from collections.abc import Sequence
4
- from typing import Optional
5
-
6
- from langchain_core.messages import BaseMessage
7
- from langchain_core.v1.messages import MessageV1
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Literal,
6
+ Optional,
7
+ TypedDict,
8
+ TypeVar,
9
+ Union,
10
+ )
11
+
12
+ if TYPE_CHECKING:
13
+ from langchain_core.messages import BaseMessage
14
+ from langchain_core.messages.content import (
15
+ ContentBlock,
16
+ )
8
17
 
9
18
 
10
19
  def _is_openai_data_block(block: dict) -> bool:
11
- """Check if the block contains multimodal data in OpenAI Chat Completions format."""
20
+ """Check if the block contains multimodal data in OpenAI Chat Completions format.
21
+
22
+ Supports both data and ID-style blocks (e.g. ``'file_data'`` and ``'file_id'``)
23
+
24
+ If additional keys are present, they are ignored / will not affect outcome as long
25
+ as the required keys are present and valid.
26
+
27
+ """
12
28
  if block.get("type") == "image_url":
13
29
  if (
14
30
  (set(block.keys()) <= {"type", "image_url", "detail"})
@@ -17,29 +33,43 @@ def _is_openai_data_block(block: dict) -> bool:
17
33
  ):
18
34
  url = image_url.get("url")
19
35
  if isinstance(url, str):
36
+ # Required per OpenAI spec
37
+ return True
38
+ # Ignore `'detail'` since it's optional and specific to OpenAI
39
+
40
+ elif block.get("type") == "input_audio":
41
+ if (audio := block.get("input_audio")) and isinstance(audio, dict):
42
+ audio_data = audio.get("data")
43
+ audio_format = audio.get("format")
44
+ # Both required per OpenAI spec
45
+ if isinstance(audio_data, str) and isinstance(audio_format, str):
20
46
  return True
21
47
 
22
48
  elif block.get("type") == "file":
23
49
  if (file := block.get("file")) and isinstance(file, dict):
24
50
  file_data = file.get("file_data")
25
- if isinstance(file_data, str):
26
- return True
27
-
28
- elif block.get("type") == "input_audio":
29
- if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
30
- audio_data = input_audio.get("data")
31
- audio_format = input_audio.get("format")
32
- if isinstance(audio_data, str) and isinstance(audio_format, str):
51
+ file_id = file.get("file_id")
52
+ # Files can be either base64-encoded or pre-uploaded with an ID
53
+ if isinstance(file_data, str) or isinstance(file_id, str):
33
54
  return True
34
55
 
35
56
  else:
36
57
  return False
37
58
 
59
+ # Has no `'type'` key
38
60
  return False
39
61
 
40
62
 
41
- def _parse_data_uri(uri: str) -> Optional[dict]:
42
- """Parse a data URI into its components. If parsing fails, return None.
63
+ class ParsedDataUri(TypedDict):
64
+ source_type: Literal["base64"]
65
+ data: str
66
+ mime_type: str
67
+
68
+
69
+ def _parse_data_uri(uri: str) -> Optional[ParsedDataUri]:
70
+ """Parse a data URI into its components.
71
+
72
+ If parsing fails, return None. If either MIME type or data is missing, return None.
43
73
 
44
74
  Example:
45
75
 
@@ -59,118 +89,217 @@ def _parse_data_uri(uri: str) -> Optional[dict]:
59
89
  match = re.match(regex, uri)
60
90
  if match is None:
61
91
  return None
92
+
93
+ mime_type = match.group("mime_type")
94
+ data = match.group("data")
95
+ if not mime_type or not data:
96
+ return None
97
+
62
98
  return {
63
99
  "source_type": "base64",
64
- "data": match.group("data"),
65
- "mime_type": match.group("mime_type"),
100
+ "data": data,
101
+ "mime_type": mime_type,
66
102
  }
67
103
 
68
104
 
69
- def _convert_openai_format_to_data_block(block: dict) -> dict:
70
- """Convert OpenAI image content block to standard data content block.
105
+ def _normalize_messages(
106
+ messages: Sequence["BaseMessage"],
107
+ ) -> list["BaseMessage"]:
108
+ """Normalize message formats to LangChain v1 standard content blocks.
71
109
 
72
- If parsing fails, pass-through.
110
+ Chat models already implement support for:
111
+ - Images in OpenAI Chat Completions format
112
+ These will be passed through unchanged
113
+ - LangChain v1 standard content blocks
73
114
 
74
- Args:
75
- block: The OpenAI image content block to convert.
115
+ This function extends support to:
116
+ - `Audio <https://platform.openai.com/docs/api-reference/chat/create>`__ and
117
+ `file <https://platform.openai.com/docs/api-reference/files>`__ data in OpenAI
118
+ Chat Completions format
119
+ - Images are technically supported but we expect chat models to handle them
120
+ directly; this may change in the future
121
+ - LangChain v0 standard content blocks for backward compatibility
76
122
 
77
- Returns:
78
- The converted standard data content block.
79
- """
80
- if block["type"] == "image_url":
81
- parsed = _parse_data_uri(block["image_url"]["url"])
82
- if parsed is not None:
83
- parsed["type"] = "image"
84
- return parsed
85
- return block
86
-
87
- if block["type"] == "file":
88
- parsed = _parse_data_uri(block["file"]["file_data"])
89
- if parsed is not None:
90
- parsed["type"] = "file"
91
- if filename := block["file"].get("filename"):
92
- parsed["filename"] = filename
93
- return parsed
94
- return block
95
-
96
- if block["type"] == "input_audio":
97
- data = block["input_audio"].get("data")
98
- audio_format = block["input_audio"].get("format")
99
- if data and audio_format:
100
- return {
101
- "type": "audio",
102
- "source_type": "base64",
103
- "data": data,
104
- "mime_type": f"audio/{audio_format}",
123
+ .. versionchanged:: 1.0.0
124
+ In previous versions, this function returned messages in LangChain v0 format.
125
+ Now, it returns messages in LangChain v1 format, which upgraded chat models now
126
+ expect to receive when passing back in message history. For backward
127
+ compatibility, this function will convert v0 message content to v1 format.
128
+
129
+ .. dropdown:: v0 Content Block Schemas
130
+
131
+ ``URLContentBlock``:
132
+
133
+ .. codeblock::
134
+
135
+ {
136
+ mime_type: NotRequired[str]
137
+ type: Literal['image', 'audio', 'file'],
138
+ source_type: Literal['url'],
139
+ url: str,
105
140
  }
106
- return block
107
141
 
108
- return block
142
+ ``Base64ContentBlock``:
143
+
144
+ .. codeblock::
145
+
146
+ {
147
+ mime_type: NotRequired[str]
148
+ type: Literal['image', 'audio', 'file'],
149
+ source_type: Literal['base64'],
150
+ data: str,
151
+ }
109
152
 
153
+ ``IDContentBlock``:
110
154
 
111
- def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
112
- """Extend support for message formats.
155
+ (In practice, this was never used)
156
+
157
+ .. codeblock::
158
+
159
+ {
160
+ type: Literal['image', 'audio', 'file'],
161
+ source_type: Literal['id'],
162
+ id: str,
163
+ }
164
+
165
+ ``PlainTextContentBlock``:
166
+
167
+ .. codeblock::
168
+
169
+ {
170
+ mime_type: NotRequired[str]
171
+ type: Literal['file'],
172
+ source_type: Literal['text'],
173
+ url: str,
174
+ }
175
+
176
+ If a v1 message is passed in, it will be returned as-is, meaning it is safe to
177
+ always pass in v1 messages to this function for assurance.
178
+
179
+ For posterity, here are the OpenAI Chat Completions schemas we expect:
180
+
181
+ Chat Completions image. Can be URL-based or base64-encoded. Supports MIME types
182
+ png, jpeg/jpg, webp, static gif:
183
+ {
184
+ "type": Literal['image_url'],
185
+ "image_url": {
186
+ "url": Union["data:$MIME_TYPE;base64,$BASE64_ENCODED_IMAGE", "$IMAGE_URL"],
187
+ "detail": Literal['low', 'high', 'auto'] = 'auto', # Supported by OpenAI
188
+ }
189
+ }
190
+
191
+ Chat Completions audio:
192
+ {
193
+ "type": Literal['input_audio'],
194
+ "input_audio": {
195
+ "format": Literal['wav', 'mp3'],
196
+ "data": str = "$BASE64_ENCODED_AUDIO",
197
+ },
198
+ }
199
+
200
+ Chat Completions files: either base64 or pre-uploaded file ID
201
+ {
202
+ "type": Literal['file'],
203
+ "file": Union[
204
+ {
205
+ "filename": Optional[str] = "$FILENAME",
206
+ "file_data": str = "$BASE64_ENCODED_FILE",
207
+ },
208
+ {
209
+ "file_id": str = "$FILE_ID", # For pre-uploaded files to OpenAI
210
+ },
211
+ ],
212
+ }
113
213
 
114
- Chat models implement support for images in OpenAI Chat Completions format, as well
115
- as other multimodal data as standard data blocks. This function extends support to
116
- audio and file data in OpenAI Chat Completions format by converting them to standard
117
- data blocks.
118
214
  """
215
+ from langchain_core.messages.block_translators.langchain_v0 import (
216
+ _convert_legacy_v0_content_block_to_v1,
217
+ _convert_openai_format_to_data_block,
218
+ )
219
+
119
220
  formatted_messages = []
120
221
  for message in messages:
222
+ # We preserve input messages - the caller may reuse them elsewhere and expects
223
+ # them to remain unchanged. We only create a copy if we need to translate.
121
224
  formatted_message = message
225
+
122
226
  if isinstance(message.content, list):
123
227
  for idx, block in enumerate(message.content):
228
+ # OpenAI Chat Completions multimodal data blocks to v1 standard
124
229
  if (
125
230
  isinstance(block, dict)
126
- # Subset to (PDF) files and audio, as most relevant chat models
127
- # support images in OAI format (and some may not yet support the
128
- # standard data block format)
129
- and block.get("type") in {"file", "input_audio"}
231
+ and block.get("type") in {"input_audio", "file"}
232
+ # Discriminate between OpenAI/LC format since they share `'type'`
130
233
  and _is_openai_data_block(block)
131
234
  ):
132
- if formatted_message is message:
133
- formatted_message = message.model_copy()
134
- # Also shallow-copy content
135
- formatted_message.content = list(formatted_message.content)
136
-
137
- formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
138
- _convert_openai_format_to_data_block(block)
139
- )
140
- formatted_messages.append(formatted_message)
141
-
142
- return formatted_messages
235
+ formatted_message = _ensure_message_copy(message, formatted_message)
143
236
 
237
+ converted_block = _convert_openai_format_to_data_block(block)
238
+ _update_content_block(formatted_message, idx, converted_block)
144
239
 
145
- def _normalize_messages_v1(messages: Sequence[MessageV1]) -> list[MessageV1]:
146
- """Extend support for message formats.
147
-
148
- Chat models implement support for images in OpenAI Chat Completions format, as well
149
- as other multimodal data as standard data blocks. This function extends support to
150
- audio and file data in OpenAI Chat Completions format by converting them to standard
151
- data blocks.
152
- """
153
- formatted_messages = []
154
- for message in messages:
155
- formatted_message = message
156
- if isinstance(message.content, list):
157
- for idx, block in enumerate(message.content):
158
- if (
240
+ # Convert multimodal LangChain v0 to v1 standard content blocks
241
+ elif (
159
242
  isinstance(block, dict)
160
- # Subset to (PDF) files and audio, as most relevant chat models
161
- # support images in OAI format (and some may not yet support the
162
- # standard data block format)
163
- and block.get("type") in {"file", "input_audio"}
164
- and _is_openai_data_block(block) # type: ignore[arg-type]
243
+ and block.get("type")
244
+ in {
245
+ "image",
246
+ "audio",
247
+ "file",
248
+ }
249
+ and block.get("source_type") # v1 doesn't have `source_type`
250
+ in {
251
+ "url",
252
+ "base64",
253
+ "id",
254
+ "text",
255
+ }
165
256
  ):
166
- if formatted_message is message:
167
- formatted_message = copy.copy(message)
168
- # Also shallow-copy content
169
- formatted_message.content = list(formatted_message.content)
170
-
171
- formatted_message.content[idx] = ( # type: ignore[call-overload]
172
- _convert_openai_format_to_data_block(block) # type: ignore[arg-type]
173
- )
257
+ formatted_message = _ensure_message_copy(message, formatted_message)
258
+
259
+ converted_block = _convert_legacy_v0_content_block_to_v1(block)
260
+ _update_content_block(formatted_message, idx, converted_block)
261
+ continue
262
+
263
+ # else, pass through blocks that look like they have v1 format unchanged
264
+
174
265
  formatted_messages.append(formatted_message)
175
266
 
176
267
  return formatted_messages
268
+
269
+
270
+ T = TypeVar("T", bound="BaseMessage")
271
+
272
+
273
+ def _ensure_message_copy(message: T, formatted_message: T) -> T:
274
+ """Create a copy of the message if it hasn't been copied yet."""
275
+ if formatted_message is message:
276
+ formatted_message = message.model_copy()
277
+ # Shallow-copy content list to allow modifications
278
+ formatted_message.content = list(formatted_message.content)
279
+ return formatted_message
280
+
281
+
282
+ def _update_content_block(
283
+ formatted_message: "BaseMessage", idx: int, new_block: Union[ContentBlock, dict]
284
+ ) -> None:
285
+ """Update a content block at the given index, handling type issues."""
286
+ # Type ignore needed because:
287
+ # - `BaseMessage.content` is typed as `Union[str, list[Union[str, dict]]]`
288
+ # - When content is str, indexing fails (index error)
289
+ # - When content is list, the items are `Union[str, dict]` but we're assigning
290
+ # `Union[ContentBlock, dict]` where ContentBlock is richer than dict
291
+ # - This is safe because we only call this when we've verified content is a list and
292
+ # we're doing content block conversions
293
+ formatted_message.content[idx] = new_block # type: ignore[index, assignment]
294
+
295
+
296
+ def _update_message_content_to_blocks(message: T, output_version: str) -> T:
297
+ return message.model_copy(
298
+ update={
299
+ "content": message.content_blocks,
300
+ "response_metadata": {
301
+ **message.response_metadata,
302
+ "output_version": output_version,
303
+ },
304
+ }
305
+ )