noesium 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/agents/askura_agent/__init__.py +22 -0
- noesium/agents/askura_agent/askura_agent.py +480 -0
- noesium/agents/askura_agent/conversation.py +164 -0
- noesium/agents/askura_agent/extractor.py +175 -0
- noesium/agents/askura_agent/memory.py +14 -0
- noesium/agents/askura_agent/models.py +239 -0
- noesium/agents/askura_agent/prompts.py +202 -0
- noesium/agents/askura_agent/reflection.py +234 -0
- noesium/agents/askura_agent/summarizer.py +30 -0
- noesium/agents/askura_agent/utils.py +6 -0
- noesium/agents/deep_research/__init__.py +13 -0
- noesium/agents/deep_research/agent.py +398 -0
- noesium/agents/deep_research/prompts.py +84 -0
- noesium/agents/deep_research/schemas.py +42 -0
- noesium/agents/deep_research/state.py +54 -0
- noesium/agents/search/__init__.py +5 -0
- noesium/agents/search/agent.py +474 -0
- noesium/agents/search/state.py +28 -0
- noesium/core/__init__.py +1 -1
- noesium/core/agent/base.py +10 -2
- noesium/core/goalith/decomposer/llm_decomposer.py +1 -1
- noesium/core/llm/__init__.py +1 -1
- noesium/core/llm/base.py +2 -2
- noesium/core/llm/litellm.py +42 -21
- noesium/core/llm/llamacpp.py +25 -4
- noesium/core/llm/ollama.py +43 -22
- noesium/core/llm/openai.py +25 -5
- noesium/core/llm/openrouter.py +1 -1
- noesium/core/toolify/base.py +9 -2
- noesium/core/toolify/config.py +2 -2
- noesium/core/toolify/registry.py +21 -5
- noesium/core/tracing/opik_tracing.py +7 -7
- noesium/core/vector_store/__init__.py +2 -2
- noesium/core/vector_store/base.py +1 -1
- noesium/core/vector_store/pgvector.py +10 -13
- noesium/core/vector_store/weaviate.py +2 -1
- noesium/toolkits/__init__.py +1 -0
- noesium/toolkits/arxiv_toolkit.py +310 -0
- noesium/toolkits/audio_aliyun_toolkit.py +441 -0
- noesium/toolkits/audio_toolkit.py +370 -0
- noesium/toolkits/bash_toolkit.py +332 -0
- noesium/toolkits/document_toolkit.py +454 -0
- noesium/toolkits/file_edit_toolkit.py +552 -0
- noesium/toolkits/github_toolkit.py +395 -0
- noesium/toolkits/gmail_toolkit.py +575 -0
- noesium/toolkits/image_toolkit.py +425 -0
- noesium/toolkits/memory_toolkit.py +398 -0
- noesium/toolkits/python_executor_toolkit.py +334 -0
- noesium/toolkits/search_toolkit.py +451 -0
- noesium/toolkits/serper_toolkit.py +623 -0
- noesium/toolkits/tabular_data_toolkit.py +537 -0
- noesium/toolkits/user_interaction_toolkit.py +365 -0
- noesium/toolkits/video_toolkit.py +168 -0
- noesium/toolkits/wikipedia_toolkit.py +420 -0
- noesium-0.2.1.dist-info/METADATA +253 -0
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/RECORD +59 -23
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/licenses/LICENSE +1 -1
- noesium-0.1.0.dist-info/METADATA +0 -525
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/WHEEL +0 -0
- {noesium-0.1.0.dist-info → noesium-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Image analysis toolkit for visual understanding and processing.
|
|
3
|
+
|
|
4
|
+
Provides tools for image analysis, visual question answering, and image processing
|
|
5
|
+
using OpenAI's vision models and PIL for image manipulation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import base64
|
|
9
|
+
import os
|
|
10
|
+
from io import BytesIO
|
|
11
|
+
from typing import Callable, Dict
|
|
12
|
+
from urllib.parse import urlparse
|
|
13
|
+
|
|
14
|
+
import aiohttp
|
|
15
|
+
|
|
16
|
+
from noesium.core.toolify.base import AsyncBaseToolkit
|
|
17
|
+
from noesium.core.toolify.config import ToolkitConfig
|
|
18
|
+
from noesium.core.toolify.registry import register_toolkit
|
|
19
|
+
from noesium.core.utils.logging import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
from PIL import Image
|
|
25
|
+
|
|
26
|
+
PIL_AVAILABLE = True
|
|
27
|
+
except ImportError:
|
|
28
|
+
Image = None
|
|
29
|
+
PIL_AVAILABLE = False
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@register_toolkit("image")
|
|
33
|
+
class ImageToolkit(AsyncBaseToolkit):
|
|
34
|
+
"""
|
|
35
|
+
Toolkit for image analysis and visual understanding.
|
|
36
|
+
|
|
37
|
+
This toolkit provides capabilities for:
|
|
38
|
+
- Visual question answering using OpenAI's vision models
|
|
39
|
+
- Image analysis and description generation
|
|
40
|
+
- Image format conversion and processing
|
|
41
|
+
- Support for both local files and URLs
|
|
42
|
+
- Batch image processing
|
|
43
|
+
|
|
44
|
+
Features:
|
|
45
|
+
- Automatic image format conversion to RGB
|
|
46
|
+
- Base64 encoding for API compatibility
|
|
47
|
+
- URL and local file support
|
|
48
|
+
- Comprehensive error handling
|
|
49
|
+
- Configurable image quality and size limits
|
|
50
|
+
|
|
51
|
+
Required dependencies:
|
|
52
|
+
- PIL (Pillow) for image processing
|
|
53
|
+
- OpenAI API access for vision capabilities
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(self, config: ToolkitConfig = None):
|
|
57
|
+
"""
|
|
58
|
+
Initialize the image toolkit.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
config: Toolkit configuration containing API keys and settings
|
|
62
|
+
|
|
63
|
+
Raises:
|
|
64
|
+
ImportError: If PIL (Pillow) is not installed
|
|
65
|
+
"""
|
|
66
|
+
super().__init__(config)
|
|
67
|
+
|
|
68
|
+
if not PIL_AVAILABLE:
|
|
69
|
+
raise ImportError("PIL (Pillow) is required for ImageToolkit. " "Install with: pip install Pillow")
|
|
70
|
+
|
|
71
|
+
# Configuration
|
|
72
|
+
self.max_image_size = self.config.config.get("max_image_size", (1024, 1024))
|
|
73
|
+
self.image_quality = self.config.config.get("image_quality", 85)
|
|
74
|
+
self.supported_formats = self.config.config.get(
|
|
75
|
+
"supported_formats", ["JPEG", "PNG", "GIF", "BMP", "TIFF", "WEBP"]
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
async def _load_image_from_url(self, url: str) -> Image.Image:
|
|
79
|
+
"""
|
|
80
|
+
Load an image from a URL.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
url: Image URL
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
PIL Image object
|
|
87
|
+
"""
|
|
88
|
+
self.logger.info(f"Loading image from URL: {url}")
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
async with aiohttp.ClientSession() as session:
|
|
92
|
+
async with session.get(url, timeout=30) as response:
|
|
93
|
+
response.raise_for_status()
|
|
94
|
+
|
|
95
|
+
# Check content type
|
|
96
|
+
content_type = response.headers.get("content-type", "")
|
|
97
|
+
if not content_type.startswith("image/"):
|
|
98
|
+
raise ValueError(f"URL does not point to an image: {content_type}")
|
|
99
|
+
|
|
100
|
+
image_data = await response.read()
|
|
101
|
+
image = Image.open(BytesIO(image_data))
|
|
102
|
+
|
|
103
|
+
# Convert to RGB if necessary
|
|
104
|
+
if image.mode != "RGB":
|
|
105
|
+
image = image.convert("RGB")
|
|
106
|
+
|
|
107
|
+
return image
|
|
108
|
+
|
|
109
|
+
except Exception as e:
|
|
110
|
+
self.logger.error(f"Failed to load image from URL: {e}")
|
|
111
|
+
raise
|
|
112
|
+
|
|
113
|
+
def _load_image_from_file(self, file_path: str) -> Image.Image:
|
|
114
|
+
"""
|
|
115
|
+
Load an image from a local file.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
file_path: Path to image file
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
PIL Image object
|
|
122
|
+
"""
|
|
123
|
+
self.logger.info(f"Loading image from file: {file_path}")
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
if not os.path.exists(file_path):
|
|
127
|
+
raise FileNotFoundError(f"Image file not found: {file_path}")
|
|
128
|
+
|
|
129
|
+
image = Image.open(file_path)
|
|
130
|
+
|
|
131
|
+
# Convert to RGB if necessary
|
|
132
|
+
if image.mode != "RGB":
|
|
133
|
+
image = image.convert("RGB")
|
|
134
|
+
|
|
135
|
+
return image
|
|
136
|
+
|
|
137
|
+
except Exception as e:
|
|
138
|
+
self.logger.error(f"Failed to load image from file: {e}")
|
|
139
|
+
raise
|
|
140
|
+
|
|
141
|
+
async def _load_image(self, image_path: str) -> Image.Image:
|
|
142
|
+
"""
|
|
143
|
+
Load an image from either a URL or local file path.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
image_path: URL or local file path to image
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
PIL Image object
|
|
150
|
+
"""
|
|
151
|
+
parsed = urlparse(image_path)
|
|
152
|
+
|
|
153
|
+
if parsed.scheme in ("http", "https"):
|
|
154
|
+
return await self._load_image_from_url(image_path)
|
|
155
|
+
else:
|
|
156
|
+
return self._load_image_from_file(image_path)
|
|
157
|
+
|
|
158
|
+
def _resize_image(self, image: Image.Image, max_size: tuple = None) -> Image.Image:
|
|
159
|
+
"""
|
|
160
|
+
Resize image if it exceeds maximum dimensions.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
image: PIL Image object
|
|
164
|
+
max_size: Maximum (width, height) tuple or single integer for both dimensions
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Resized PIL Image object
|
|
168
|
+
"""
|
|
169
|
+
max_size = max_size or self.max_image_size
|
|
170
|
+
|
|
171
|
+
# Handle case where max_size is a single integer
|
|
172
|
+
if isinstance(max_size, int):
|
|
173
|
+
max_size = (max_size, max_size)
|
|
174
|
+
|
|
175
|
+
# Ensure max_size is a tuple
|
|
176
|
+
if not isinstance(max_size, tuple):
|
|
177
|
+
max_size = (max_size, max_size)
|
|
178
|
+
|
|
179
|
+
if image.size[0] <= max_size[0] and image.size[1] <= max_size[1]:
|
|
180
|
+
return image
|
|
181
|
+
|
|
182
|
+
# Calculate new size maintaining aspect ratio
|
|
183
|
+
ratio = min(max_size[0] / image.size[0], max_size[1] / image.size[1])
|
|
184
|
+
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
|
185
|
+
|
|
186
|
+
self.logger.info(f"Resizing image from {image.size} to {new_size}")
|
|
187
|
+
return image.resize(new_size, Image.Resampling.LANCZOS)
|
|
188
|
+
|
|
189
|
+
def _image_to_base64(self, image: Image.Image, format: str = "JPEG") -> str:
|
|
190
|
+
"""
|
|
191
|
+
Convert PIL Image to base64 string.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
image: PIL Image object
|
|
195
|
+
format: Output format (JPEG, PNG, etc.)
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Base64 encoded image string
|
|
199
|
+
"""
|
|
200
|
+
buffer = BytesIO()
|
|
201
|
+
|
|
202
|
+
# Resize if necessary
|
|
203
|
+
image = self._resize_image(image)
|
|
204
|
+
|
|
205
|
+
# Save to buffer
|
|
206
|
+
save_kwargs = {}
|
|
207
|
+
if format.upper() == "JPEG":
|
|
208
|
+
save_kwargs["quality"] = self.image_quality
|
|
209
|
+
save_kwargs["optimize"] = True
|
|
210
|
+
|
|
211
|
+
image.save(buffer, format=format.upper(), **save_kwargs)
|
|
212
|
+
|
|
213
|
+
# Encode to base64
|
|
214
|
+
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
|
215
|
+
|
|
216
|
+
self.logger.debug(f"Converted image to base64 ({len(base64_image)} chars)")
|
|
217
|
+
return base64_image
|
|
218
|
+
|
|
219
|
+
async def analyze_image(
|
|
220
|
+
self, image_path: str, prompt: str = "Describe this image in detail.", max_tokens: int = 500
|
|
221
|
+
) -> str:
|
|
222
|
+
"""
|
|
223
|
+
Analyze an image using OpenAI's vision model.
|
|
224
|
+
|
|
225
|
+
This tool uses advanced vision models to analyze and describe images.
|
|
226
|
+
It can answer questions about image content, identify objects, read text,
|
|
227
|
+
describe scenes, and provide detailed visual analysis.
|
|
228
|
+
|
|
229
|
+
Capabilities:
|
|
230
|
+
- Object and scene recognition
|
|
231
|
+
- Text extraction (OCR)
|
|
232
|
+
- Facial expression analysis
|
|
233
|
+
- Color and composition analysis
|
|
234
|
+
- Artistic style identification
|
|
235
|
+
- Technical image assessment
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
image_path: Path to local image file or URL to image
|
|
239
|
+
prompt: Question or instruction about the image analysis
|
|
240
|
+
max_tokens: Maximum tokens in the response
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Detailed analysis or answer based on the image and prompt
|
|
244
|
+
|
|
245
|
+
Examples:
|
|
246
|
+
- "What objects can you see in this image?"
|
|
247
|
+
- "Read any text visible in this image"
|
|
248
|
+
- "Describe the mood and atmosphere of this scene"
|
|
249
|
+
- "What colors dominate this image?"
|
|
250
|
+
- "Is this image suitable for a professional presentation?"
|
|
251
|
+
"""
|
|
252
|
+
self.logger.info(f"Analyzing image: {image_path}")
|
|
253
|
+
self.logger.info(f"Prompt: {prompt}")
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
# Load and process the image
|
|
257
|
+
image = await self._load_image(image_path)
|
|
258
|
+
base64_image = self._image_to_base64(image)
|
|
259
|
+
|
|
260
|
+
# Prepare the vision API request
|
|
261
|
+
messages = [
|
|
262
|
+
{
|
|
263
|
+
"role": "user",
|
|
264
|
+
"content": [
|
|
265
|
+
{"type": "text", "text": prompt},
|
|
266
|
+
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}},
|
|
267
|
+
],
|
|
268
|
+
}
|
|
269
|
+
]
|
|
270
|
+
|
|
271
|
+
# Use the LLM client for vision analysis
|
|
272
|
+
response = await self.llm_client.completion(messages=messages, max_tokens=max_tokens, temperature=0.1)
|
|
273
|
+
|
|
274
|
+
self.logger.info("Image analysis completed successfully")
|
|
275
|
+
return response.strip()
|
|
276
|
+
|
|
277
|
+
except Exception as e:
|
|
278
|
+
error_msg = f"Image analysis failed: {str(e)}"
|
|
279
|
+
self.logger.error(error_msg)
|
|
280
|
+
return error_msg
|
|
281
|
+
|
|
282
|
+
async def extract_text_from_image(self, image_path: str) -> str:
|
|
283
|
+
"""
|
|
284
|
+
Extract text content from an image using OCR capabilities.
|
|
285
|
+
|
|
286
|
+
This tool specializes in reading and extracting text from images,
|
|
287
|
+
including documents, screenshots, signs, and handwritten content.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
image_path: Path to local image file or URL to image
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Extracted text content from the image
|
|
294
|
+
"""
|
|
295
|
+
prompt = """Please extract all visible text from this image.
|
|
296
|
+
|
|
297
|
+
Instructions:
|
|
298
|
+
- Transcribe all text exactly as it appears
|
|
299
|
+
- Maintain the original formatting and line breaks where possible
|
|
300
|
+
- If text is unclear or partially obscured, indicate with [unclear]
|
|
301
|
+
- Include any numbers, symbols, or special characters
|
|
302
|
+
- If no text is visible, respond with "No text detected"
|
|
303
|
+
|
|
304
|
+
Extracted text:"""
|
|
305
|
+
|
|
306
|
+
return await self.analyze_image(image_path, prompt, max_tokens=1000)
|
|
307
|
+
|
|
308
|
+
async def describe_image(self, image_path: str, detail_level: str = "medium") -> str:
|
|
309
|
+
"""
|
|
310
|
+
Generate a comprehensive description of an image.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
image_path: Path to local image file or URL to image
|
|
314
|
+
detail_level: Level of detail - "brief", "medium", or "detailed"
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Description of the image content
|
|
318
|
+
"""
|
|
319
|
+
prompts = {
|
|
320
|
+
"brief": "Provide a brief, one-sentence description of this image.",
|
|
321
|
+
"medium": "Describe this image in detail, including the main subjects, setting, colors, and overall composition.",
|
|
322
|
+
"detailed": """Provide a comprehensive analysis of this image including:
|
|
323
|
+
- Main subjects and their positions
|
|
324
|
+
- Setting and environment
|
|
325
|
+
- Colors, lighting, and mood
|
|
326
|
+
- Composition and artistic elements
|
|
327
|
+
- Any text or symbols visible
|
|
328
|
+
- Technical quality and style
|
|
329
|
+
- Overall impression and context""",
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
prompt = prompts.get(detail_level, prompts["medium"])
|
|
333
|
+
max_tokens = {"brief": 100, "medium": 300, "detailed": 600}.get(detail_level, 300)
|
|
334
|
+
|
|
335
|
+
return await self.analyze_image(image_path, prompt, max_tokens)
|
|
336
|
+
|
|
337
|
+
async def compare_images(self, image_path1: str, image_path2: str) -> str:
|
|
338
|
+
"""
|
|
339
|
+
Compare two images and describe their similarities and differences.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
image_path1: Path to first image
|
|
343
|
+
image_path2: Path to second image
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Comparison analysis of the two images
|
|
347
|
+
"""
|
|
348
|
+
# Note: This would require a multi-image capable model
|
|
349
|
+
# For now, we'll analyze each image separately and provide a comparison
|
|
350
|
+
|
|
351
|
+
desc1 = await self.describe_image(image_path1, "medium")
|
|
352
|
+
desc2 = await self.describe_image(image_path2, "medium")
|
|
353
|
+
|
|
354
|
+
comparison_prompt = f"""Based on these two image descriptions, provide a comparison analysis:
|
|
355
|
+
|
|
356
|
+
Image 1: {desc1}
|
|
357
|
+
|
|
358
|
+
Image 2: {desc2}
|
|
359
|
+
|
|
360
|
+
Please compare and contrast these images, highlighting:
|
|
361
|
+
- Similarities in content, style, or composition
|
|
362
|
+
- Key differences in subjects, colors, or mood
|
|
363
|
+
- Which image might be more suitable for different purposes
|
|
364
|
+
- Overall relationship between the images"""
|
|
365
|
+
|
|
366
|
+
response = await self.llm_client.completion(
|
|
367
|
+
messages=[{"role": "user", "content": comparison_prompt}], max_tokens=400, temperature=0.1
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return response.strip()
|
|
371
|
+
|
|
372
|
+
async def get_image_info(self, image_path: str) -> Dict:
|
|
373
|
+
"""
|
|
374
|
+
Get technical information about an image file.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
image_path: Path to local image file or URL to image
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
Dictionary with image metadata and technical information
|
|
381
|
+
"""
|
|
382
|
+
try:
|
|
383
|
+
image = await self._load_image(image_path)
|
|
384
|
+
|
|
385
|
+
# Get basic image info
|
|
386
|
+
info = {
|
|
387
|
+
"path": image_path,
|
|
388
|
+
"format": image.format or "Unknown",
|
|
389
|
+
"mode": image.mode,
|
|
390
|
+
"size": image.size,
|
|
391
|
+
"width": image.size[0],
|
|
392
|
+
"height": image.size[1],
|
|
393
|
+
"aspect_ratio": round(image.size[0] / image.size[1], 2),
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
# Add file size for local files
|
|
397
|
+
if not urlparse(image_path).scheme:
|
|
398
|
+
if os.path.exists(image_path):
|
|
399
|
+
file_size = os.path.getsize(image_path)
|
|
400
|
+
info["file_size_bytes"] = file_size
|
|
401
|
+
info["file_size_mb"] = round(file_size / (1024 * 1024), 2)
|
|
402
|
+
|
|
403
|
+
# Add additional metadata if available
|
|
404
|
+
if hasattr(image, "info"):
|
|
405
|
+
info["metadata"] = dict(image.info)
|
|
406
|
+
|
|
407
|
+
return info
|
|
408
|
+
|
|
409
|
+
except Exception as e:
|
|
410
|
+
return {"error": f"Failed to get image info: {str(e)}"}
|
|
411
|
+
|
|
412
|
+
async def get_tools_map(self) -> Dict[str, Callable]:
|
|
413
|
+
"""
|
|
414
|
+
Get the mapping of tool names to their implementation functions.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Dictionary mapping tool names to callable functions
|
|
418
|
+
"""
|
|
419
|
+
return {
|
|
420
|
+
"analyze_image": self.analyze_image,
|
|
421
|
+
"extract_text_from_image": self.extract_text_from_image,
|
|
422
|
+
"describe_image": self.describe_image,
|
|
423
|
+
"compare_images": self.compare_images,
|
|
424
|
+
"get_image_info": self.get_image_info,
|
|
425
|
+
}
|