fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +59 -371
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +3 -1
  14. mcp_agent/cli/commands/bootstrap.py +18 -7
  15. mcp_agent/cli/commands/setup.py +12 -4
  16. mcp_agent/cli/main.py +1 -1
  17. mcp_agent/cli/terminal.py +1 -1
  18. mcp_agent/config.py +24 -35
  19. mcp_agent/context.py +3 -1
  20. mcp_agent/context_dependent.py +3 -1
  21. mcp_agent/core/agent_types.py +10 -7
  22. mcp_agent/core/direct_agent_app.py +179 -0
  23. mcp_agent/core/direct_decorators.py +443 -0
  24. mcp_agent/core/direct_factory.py +476 -0
  25. mcp_agent/core/enhanced_prompt.py +15 -20
  26. mcp_agent/core/fastagent.py +151 -337
  27. mcp_agent/core/interactive_prompt.py +424 -0
  28. mcp_agent/core/mcp_content.py +19 -11
  29. mcp_agent/core/prompt.py +6 -2
  30. mcp_agent/core/validation.py +89 -16
  31. mcp_agent/executor/decorator_registry.py +6 -2
  32. mcp_agent/executor/temporal.py +35 -11
  33. mcp_agent/executor/workflow_signal.py +8 -2
  34. mcp_agent/human_input/handler.py +3 -1
  35. mcp_agent/llm/__init__.py +2 -0
  36. mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
  37. mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
  38. mcp_agent/llm/augmented_llm_playback.py +83 -0
  39. mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
  40. mcp_agent/llm/providers/__init__.py +8 -0
  41. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
  42. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
  43. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  44. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
  45. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
  46. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
  47. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
  48. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
  49. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
  50. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
  51. mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
  52. mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
  53. mcp_agent/logging/logger.py +2 -2
  54. mcp_agent/mcp/gen_client.py +9 -3
  55. mcp_agent/mcp/interfaces.py +67 -45
  56. mcp_agent/mcp/logger_textio.py +97 -0
  57. mcp_agent/mcp/mcp_agent_client_session.py +12 -4
  58. mcp_agent/mcp/mcp_agent_server.py +3 -1
  59. mcp_agent/mcp/mcp_aggregator.py +124 -93
  60. mcp_agent/mcp/mcp_connection_manager.py +21 -7
  61. mcp_agent/mcp/prompt_message_multipart.py +59 -1
  62. mcp_agent/mcp/prompt_render.py +77 -0
  63. mcp_agent/mcp/prompt_serialization.py +20 -13
  64. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  65. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  66. mcp_agent/mcp/prompts/prompt_load.py +15 -5
  67. mcp_agent/mcp/prompts/prompt_server.py +154 -87
  68. mcp_agent/mcp/prompts/prompt_template.py +26 -35
  69. mcp_agent/mcp/resource_utils.py +3 -1
  70. mcp_agent/mcp/sampling.py +24 -15
  71. mcp_agent/mcp_server/agent_server.py +8 -5
  72. mcp_agent/mcp_server_registry.py +22 -9
  73. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
  74. mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
  75. mcp_agent/resources/examples/internal/agent.py +4 -2
  76. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  77. mcp_agent/resources/examples/prompting/image_server.py +3 -1
  78. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  79. mcp_agent/ui/console_display.py +27 -7
  80. fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
  81. mcp_agent/core/agent_app.py +0 -570
  82. mcp_agent/core/agent_utils.py +0 -69
  83. mcp_agent/core/decorators.py +0 -448
  84. mcp_agent/core/factory.py +0 -422
  85. mcp_agent/core/proxies.py +0 -278
  86. mcp_agent/core/types.py +0 -22
  87. mcp_agent/eval/__init__.py +0 -0
  88. mcp_agent/mcp/stdio.py +0 -114
  89. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  90. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  91. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  92. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  93. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  94. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  95. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  96. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
  97. mcp_agent/resources/examples/researcher/researcher.py +0 -39
  98. mcp_agent/resources/examples/workflows/chaining.py +0 -45
  99. mcp_agent/resources/examples/workflows/evaluator.py +0 -79
  100. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  101. mcp_agent/resources/examples/workflows/human_input.py +0 -26
  102. mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
  103. mcp_agent/resources/examples/workflows/parallel.py +0 -79
  104. mcp_agent/resources/examples/workflows/router.py +0 -54
  105. mcp_agent/resources/examples/workflows/sse.py +0 -23
  106. mcp_agent/telemetry/__init__.py +0 -0
  107. mcp_agent/telemetry/usage_tracking.py +0 -19
  108. mcp_agent/workflows/__init__.py +0 -0
  109. mcp_agent/workflows/embedding/__init__.py +0 -0
  110. mcp_agent/workflows/embedding/embedding_base.py +0 -58
  111. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  112. mcp_agent/workflows/embedding/embedding_openai.py +0 -37
  113. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  114. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
  115. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  116. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
  117. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
  118. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
  119. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
  120. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
  121. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  122. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
  123. mcp_agent/workflows/llm/__init__.py +0 -0
  124. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
  125. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  126. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  127. mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
  128. mcp_agent/workflows/parallel/__init__.py +0 -0
  129. mcp_agent/workflows/parallel/fan_in.py +0 -320
  130. mcp_agent/workflows/parallel/fan_out.py +0 -181
  131. mcp_agent/workflows/parallel/parallel_llm.py +0 -149
  132. mcp_agent/workflows/router/__init__.py +0 -0
  133. mcp_agent/workflows/router/router_base.py +0 -338
  134. mcp_agent/workflows/router/router_embedding.py +0 -226
  135. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  136. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  137. mcp_agent/workflows/router/router_llm.py +0 -304
  138. mcp_agent/workflows/swarm/__init__.py +0 -0
  139. mcp_agent/workflows/swarm/swarm.py +0 -292
  140. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  141. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  142. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  143. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  144. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  145. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
  146. /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
  147. /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
@@ -28,7 +28,9 @@ async def gen_client(
28
28
  For persistent connections, use connect() or MCPConnectionManager instead.
29
29
  """
30
30
  if not server_registry:
31
- raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
31
+ raise ValueError(
32
+ "Server registry not found in the context. Please specify one either on this method, or in the context."
33
+ )
32
34
 
33
35
  async with server_registry.initialize_server(
34
36
  server_name=server_name,
@@ -51,7 +53,9 @@ async def connect(
51
53
  If required, callers can specify their own message receive loop and ClientSession class constructor to customize further.
52
54
  """
53
55
  if not server_registry:
54
- raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
56
+ raise ValueError(
57
+ "Server registry not found in the context. Please specify one either on this method, or in the context."
58
+ )
55
59
 
56
60
  server_connection = await server_registry.connection_manager.get_server(
57
61
  server_name=server_name,
@@ -69,7 +73,9 @@ async def disconnect(
69
73
  Disconnect from the specified server. If server_name is None, disconnect from all servers.
70
74
  """
71
75
  if not server_registry:
72
- raise ValueError("Server registry not found in the context. Please specify one either on this method, or in the context.")
76
+ raise ValueError(
77
+ "Server registry not found in the context. Please specify one either on this method, or in the context."
78
+ )
73
79
 
74
80
  if server_name:
75
81
  await server_registry.connection_manager.disconnect_server(server_name=server_name)
@@ -8,10 +8,11 @@ from typing import (
8
8
  Any,
9
9
  AsyncContextManager,
10
10
  Callable,
11
- Generic,
11
+ Dict,
12
12
  List,
13
13
  Optional,
14
14
  Protocol,
15
+ Tuple,
15
16
  Type,
16
17
  TypeVar,
17
18
  Union,
@@ -19,9 +20,11 @@ from typing import (
19
20
  )
20
21
 
21
22
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
22
- from mcp import ClientSession
23
- from mcp.types import PromptMessage
23
+ from deprecated import deprecated
24
+ from mcp import ClientSession, GetPromptResult, ReadResourceResult
25
+ from pydantic import BaseModel
24
26
 
27
+ from mcp_agent.core.prompt import Prompt
25
28
  from mcp_agent.core.request_params import RequestParams
26
29
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
27
30
 
@@ -83,71 +86,90 @@ class ServerConnection(Protocol):
83
86
  def session(self) -> ClientSession: ...
84
87
 
85
88
 
86
- # Regular invariant type variables
87
- MessageParamT = TypeVar("MessageParamT")
88
- MessageT = TypeVar("MessageT")
89
- ModelT = TypeVar("ModelT")
89
+ ModelT = TypeVar("ModelT", bound=BaseModel)
90
90
 
91
- # Variance-annotated type variables
92
- MessageParamT_co = TypeVar("MessageParamT_co", contravariant=True)
93
- MessageT_co = TypeVar("MessageT_co")
94
91
 
95
-
96
- class AugmentedLLMProtocol(Protocol, Generic[MessageParamT_co, MessageT_co]):
92
+ class AugmentedLLMProtocol(Protocol):
97
93
  """Protocol defining the interface for augmented LLMs"""
98
94
 
99
- async def generate(
100
- self,
101
- message: Union[str, MessageParamT_co, List[MessageParamT_co]],
102
- request_params: RequestParams | None = None,
103
- ) -> List[MessageT_co]:
104
- """Request an LLM generation, which may run multiple iterations, and return the result"""
105
- ...
106
-
107
- async def generate_str(
108
- self,
109
- message: Union[str, MessageParamT_co, List[MessageParamT_co]],
110
- request_params: RequestParams | None = None,
111
- ) -> str:
112
- """Request an LLM generation and return the string representation of the result"""
113
- ...
114
-
115
95
  async def structured(
116
96
  self,
117
- prompt: Union[str, PromptMessage, PromptMessageMultipart, List[str]],
97
+ prompt: List[PromptMessageMultipart],
118
98
  model: Type[ModelT],
119
- request_params: RequestParams | None,
120
- ) -> ModelT:
99
+ request_params: RequestParams | None = None,
100
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
121
101
  """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
122
102
  ...
123
103
 
124
- async def generate_prompt(
125
- self,
126
- prompt: Union[str, PromptMessage, PromptMessageMultipart, List[str]],
127
- request_params: RequestParams | None,
128
- ) -> str:
129
- """Request an LLM generation and return a string representation of the result"""
130
- ...
131
-
132
- async def apply_prompt(
104
+ async def generate(
133
105
  self,
134
- multipart_messages: List["PromptMessageMultipart"],
106
+ multipart_messages: List[PromptMessageMultipart],
135
107
  request_params: RequestParams | None = None,
136
- ) -> str:
108
+ ) -> PromptMessageMultipart:
137
109
  """
138
110
  Apply a list of PromptMessageMultipart messages directly to the LLM.
139
- This is a cleaner interface to _apply_prompt_template_provider_specific.
111
+
140
112
 
141
113
  Args:
142
114
  multipart_messages: List of PromptMessageMultipart objects
143
115
  request_params: Optional parameters to configure the LLM request
144
116
 
145
117
  Returns:
146
- String representation of the assistant's response
118
+ A PromptMessageMultipart containing the Assistant response, including Tool Content
147
119
  """
148
120
  ...
149
121
 
150
122
 
123
+ class AgentProtocol(AugmentedLLMProtocol, Protocol):
124
+ """Protocol defining the standard agent interface"""
125
+
126
+ name: str
127
+
128
+ async def __call__(self, message: Union[str, PromptMessageMultipart] | None = None) -> str:
129
+ """Make the agent callable for sending messages directly."""
130
+ ...
131
+
132
+ async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
133
+ """Send a message to the agent and get a response"""
134
+ ...
135
+
136
+ async def prompt(self, default_prompt: str = "") -> str:
137
+ """Start an interactive prompt session with the agent"""
138
+ ...
139
+
140
+ async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
141
+ """Apply an MCP prompt template by name"""
142
+ ...
143
+
144
+ async def get_prompt(self, prompt_name: str) -> GetPromptResult: ...
145
+
146
+ async def list_prompts(self, server_name: str | None) -> Dict[str, List[Prompt]]: ...
147
+
148
+ async def get_resource(self, server_name: str, resource_uri: str) -> ReadResourceResult: ...
149
+
150
+ @deprecated
151
+ async def generate_str(self, message: str, request_params: RequestParams | None) -> str:
152
+ """Generate a response. Deprecated: please use send instead"""
153
+ ...
154
+
155
+ async def with_resource(
156
+ self,
157
+ prompt_content: Union[str, PromptMessageMultipart],
158
+ server_name: str,
159
+ resource_name: str,
160
+ ) -> str:
161
+ """Send a message with an attached MCP resource"""
162
+ ...
163
+
164
+ async def initialize(self) -> None:
165
+ """Initialize the agent and connect to MCP servers"""
166
+ ...
167
+
168
+ async def shutdown(self) -> None:
169
+ """Shut down the agent and close connections"""
170
+ ...
171
+
172
+
151
173
  class ModelFactoryClassProtocol(Protocol):
152
174
  """
153
175
  Protocol defining the minimal interface of the ModelFactory class needed by sampling.
@@ -157,7 +179,7 @@ class ModelFactoryClassProtocol(Protocol):
157
179
  @classmethod
158
180
  def create_factory(
159
181
  cls, model_string: str, request_params: Optional[RequestParams] = None
160
- ) -> Callable[..., AugmentedLLMProtocol[Any, Any]]:
182
+ ) -> Callable[..., Any]:
161
183
  """
162
184
  Creates a factory function that can be used to construct an LLM instance.
163
185
 
@@ -0,0 +1,97 @@
1
+ """
2
+ Utilities for MCP stdio client integration with our logging system.
3
+ """
4
+
5
+ import io
6
+ import sys
7
+ from typing import TextIO
8
+
9
+ from mcp_agent.logging.logger import get_logger
10
+
11
+ logger = get_logger(__name__)
12
+
13
+
14
+ class LoggerTextIO(TextIO):
15
+ """
16
+ A TextIO implementation that logs to our application logger.
17
+ This implements the full TextIO interface as specified by Python.
18
+
19
+ Args:
20
+ server_name: The name of the server to include in logs
21
+ """
22
+
23
+ def __init__(self, server_name: str) -> None:
24
+ super().__init__()
25
+ self.server_name = server_name
26
+ # Use a StringIO for buffering
27
+ self._buffer = io.StringIO()
28
+ # Keep track of complete and partial lines
29
+ self._line_buffer = ""
30
+
31
+ def write(self, s: str) -> int:
32
+ """
33
+ Write data to our buffer and log any complete lines.
34
+ """
35
+ if not s:
36
+ return 0
37
+
38
+ # Handle line buffering for clean log output
39
+ text = self._line_buffer + s
40
+ lines = text.split("\n")
41
+
42
+ # If the text ends with a newline, the last line is complete
43
+ if text.endswith("\n"):
44
+ complete_lines = lines
45
+ self._line_buffer = ""
46
+ else:
47
+ # Otherwise, the last line is incomplete
48
+ complete_lines = lines[:-1]
49
+ self._line_buffer = lines[-1]
50
+
51
+ # Log complete lines but at debug level instead of info to prevent console spam
52
+ for line in complete_lines:
53
+ if line.strip(): # Only log non-empty lines
54
+ logger.debug(f"{self.server_name} (stderr): {line}")
55
+
56
+ # Always write to the underlying buffer
57
+ return self._buffer.write(s)
58
+
59
+ def flush(self) -> None:
60
+ """Flush the internal buffer."""
61
+ self._buffer.flush()
62
+
63
+ def close(self) -> None:
64
+ """Close the stream."""
65
+ # Log any remaining content in the line buffer
66
+ if self._line_buffer and self._line_buffer.strip():
67
+ logger.debug(f"{self.server_name} (stderr): {self._line_buffer}")
68
+ self._buffer.close()
69
+
70
+ def readable(self) -> bool:
71
+ return False
72
+
73
+ def writable(self) -> bool:
74
+ return True
75
+
76
+ def seekable(self) -> bool:
77
+ return False
78
+
79
+ def fileno(self) -> int:
80
+ """
81
+ Return a file descriptor for this stream.
82
+ We use sys.stderr's fileno since TextIO is expected to return a real file descriptor.
83
+ """
84
+ return sys.stderr.fileno()
85
+
86
+
87
+ def get_stderr_handler(server_name: str) -> TextIO:
88
+ """
89
+ Get a stderr handler that routes MCP server errors to our logger.
90
+
91
+ Args:
92
+ server_name: The name of the server to include in logs
93
+
94
+ Returns:
95
+ A TextIO object that can be used as stderr by MCP
96
+ """
97
+ return LoggerTextIO(server_name)
@@ -91,7 +91,9 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
91
91
  logger.error("send_notification failed", data=e)
92
92
  raise
93
93
 
94
- async def _send_response(self, request_id: RequestId, response: SendResultT | ErrorData) -> None:
94
+ async def _send_response(
95
+ self, request_id: RequestId, response: SendResultT | ErrorData
96
+ ) -> None:
95
97
  logger.debug(
96
98
  f"send_response: request_id={request_id}, response=",
97
99
  data=response.model_dump(),
@@ -109,10 +111,16 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
109
111
  )
110
112
  return await super()._received_notification(notification)
111
113
 
112
- async def send_progress_notification(self, progress_token: str | int, progress: float, total: float | None = None) -> None:
114
+ async def send_progress_notification(
115
+ self, progress_token: str | int, progress: float, total: float | None = None
116
+ ) -> None:
113
117
  """
114
118
  Sends a progress notification for a request that is currently being
115
119
  processed.
116
120
  """
117
- logger.debug("send_progress_notification: progress_token={progress_token}, progress={progress}, total={total}")
118
- return await super().send_progress_notification(progress_token=progress_token, progress=progress, total=total)
121
+ logger.debug(
122
+ "send_progress_notification: progress_token={progress_token}, progress={progress}, total={total}"
123
+ )
124
+ return await super().send_progress_notification(
125
+ progress_token=progress_token, progress=progress, total=total
126
+ )
@@ -17,7 +17,9 @@ async def run() -> None:
17
17
  await app._mcp_server.run(
18
18
  read_stream,
19
19
  write_stream,
20
- app._mcp_server.create_initialization_options(notification_options=NotificationOptions(tools_changed=True, resources_changed=True)),
20
+ app._mcp_server.create_initialization_options(
21
+ notification_options=NotificationOptions(tools_changed=True, resources_changed=True)
22
+ ),
21
23
  )
22
24
 
23
25