fast-agent-mcp 0.0.7__py3-none-any.whl → 0.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (31) hide show
  1. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.8.dist-info}/METADATA +22 -57
  2. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.8.dist-info}/RECORD +29 -23
  3. mcp_agent/agents/agent.py +8 -4
  4. mcp_agent/app.py +5 -1
  5. mcp_agent/cli/commands/bootstrap.py +180 -121
  6. mcp_agent/cli/commands/setup.py +20 -16
  7. mcp_agent/core/__init__.py +0 -0
  8. mcp_agent/core/exceptions.py +47 -0
  9. mcp_agent/core/fastagent.py +176 -85
  10. mcp_agent/core/server_validation.py +44 -0
  11. mcp_agent/event_progress.py +4 -1
  12. mcp_agent/logging/rich_progress.py +11 -0
  13. mcp_agent/mcp/mcp_connection_manager.py +11 -2
  14. mcp_agent/resources/examples/data-analysis/analysis.py +35 -0
  15. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +20 -0
  16. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
  17. mcp_agent/resources/examples/workflows/chaining.py +31 -0
  18. mcp_agent/resources/examples/{decorator/optimizer.py → workflows/evaluator.py} +7 -10
  19. mcp_agent/resources/examples/workflows/human_input.py +26 -0
  20. mcp_agent/resources/examples/{decorator → workflows}/orchestrator.py +20 -11
  21. mcp_agent/resources/examples/{decorator → workflows}/parallel.py +14 -18
  22. mcp_agent/resources/examples/{decorator → workflows}/router.py +9 -10
  23. mcp_agent/workflows/llm/augmented_llm_anthropic.py +48 -12
  24. mcp_agent/workflows/llm/augmented_llm_openai.py +38 -9
  25. mcp_agent/resources/examples/decorator/main.py +0 -26
  26. mcp_agent/resources/examples/decorator/tiny.py +0 -22
  27. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.8.dist-info}/WHEEL +0 -0
  28. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.8.dist-info}/entry_points.txt +0 -0
  29. {fast_agent_mcp-0.0.7.dist-info → fast_agent_mcp-0.0.8.dist-info}/licenses/LICENSE +0 -0
  30. /mcp_agent/resources/examples/mcp_researcher/{main-evalopt.py → researcher-eval.py} +0 -0
  31. /mcp_agent/resources/examples/mcp_researcher/{main.py → researcher.py} +0 -0
@@ -0,0 +1,31 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("Agent Chaining")
6
+
7
+
8
+ @fast.agent(
9
+ "url_fetcher",
10
+ instruction="Given a URL, provide a complete and comprehensive summary",
11
+ servers=["fetch"],
12
+ model="haiku",
13
+ )
14
+ @fast.agent(
15
+ "social_media",
16
+ instruction="""
17
+ Write a 280 character social media post for any given text.
18
+ Respond only with the post, never use hashtags.
19
+ """,
20
+ )
21
+ async def main():
22
+ async with fast.run() as agent:
23
+ await agent.social_media(
24
+ await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
25
+ )
26
+
27
+ # alternative syntax for above is agent["social_media"].send(message)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ asyncio.run(main())
@@ -1,5 +1,4 @@
1
1
  """
2
- Example showing how to use the evaluator-optimizer functionality with the decorator API.
3
2
  This demonstrates creating an optimizer and evaluator to iteratively improve content.
4
3
  """
5
4
 
@@ -7,22 +6,21 @@ import asyncio
7
6
  from mcp_agent.core.fastagent import FastAgent
8
7
 
9
8
  # Create the application
10
- agent_app = FastAgent("Cover Letter Writer")
11
- agent_app.app._human_input_callback = None
9
+ fast = FastAgent("Evaluator-Optimizer")
12
10
 
13
11
 
14
12
  # Define optimizer agent
15
- @agent_app.agent(
13
+ @fast.agent(
16
14
  name="optimizer",
17
15
  instruction="""You are a career coach specializing in cover letter writing.
18
16
  You are tasked with generating a compelling cover letter given the job posting,
19
17
  candidate details, and company information. Tailor the response to the company and job requirements.
20
18
  """,
21
19
  servers=["fetch"],
22
- model="gpt-4o-mini", # Using a capable model for content generation
20
+ model="gpt-4o-mini",
23
21
  )
24
22
  # Define evaluator agent
25
- @agent_app.agent(
23
+ @fast.agent(
26
24
  name="evaluator",
27
25
  instruction="""Evaluate the following response based on the criteria below:
28
26
  1. Clarity: Is the language clear, concise, and grammatically correct?
@@ -40,11 +38,10 @@ agent_app.app._human_input_callback = None
40
38
  Summarize your evaluation as a structured response with:
41
39
  - Overall quality rating.
42
40
  - Specific feedback and areas for improvement.""",
43
- servers=[], # Evaluator doesn't need special server access
44
- model="sonnet", # Using a capable model for evaluation
41
+ model="sonnet",
45
42
  )
46
43
  # Define the evaluator-optimizer workflow
47
- @agent_app.evaluator_optimizer(
44
+ @fast.evaluator_optimizer(
48
45
  name="cover_letter_writer",
49
46
  optimizer="optimizer", # Reference to optimizer agent
50
47
  evaluator="evaluator", # Reference to evaluator agent
@@ -52,7 +49,7 @@ agent_app.app._human_input_callback = None
52
49
  max_refinements=3, # Maximum iterations
53
50
  )
54
51
  async def main():
55
- async with agent_app.run() as agent:
52
+ async with fast.run() as agent:
56
53
  job_posting = (
57
54
  "Software Engineer at LastMile AI. Responsibilities include developing AI systems, "
58
55
  "collaborating with cross-functional teams, and enhancing scalability. Skills required: "
@@ -0,0 +1,26 @@
1
+ """
2
+ Agent which demonstrates Human Input tool
3
+ """
4
+
5
+ import asyncio
6
+ from mcp_agent.core.fastagent import FastAgent
7
+
8
+ # Create the application
9
+ fast = FastAgent("Human Input")
10
+
11
+
12
+ # Define the agent
13
+ @fast.agent(
14
+ instruction="An AI agent that assists with basic tasks. Request Human Input when needed.",
15
+ human_input=True,
16
+ model="gpt-4o",
17
+ )
18
+ async def main():
19
+ async with fast.run() as agent:
20
+ # this usually causes the LLM to request the Human Input Tool
21
+ await agent("print the next number in the sequence")
22
+ await agent.prompt(default="STOP")
23
+
24
+
25
+ if __name__ == "__main__":
26
+ asyncio.run(main())
@@ -1,5 +1,4 @@
1
1
  """
2
- Example showing how to use the orchestrator functionality with the decorator API.
3
2
  This demonstrates creating multiple agents and an orchestrator to coordinate them.
4
3
  """
5
4
 
@@ -7,11 +6,18 @@ import asyncio
7
6
  from mcp_agent.core.fastagent import FastAgent
8
7
 
9
8
  # Create the application
10
- agent_app = FastAgent("Orchestrator Example")
9
+ fast = FastAgent("Orchestrator-Workers")
11
10
 
12
11
 
12
+ @fast.agent(
13
+ "author",
14
+ instruction="""You are to role play a poorly skilled writer,
15
+ who makes frequent grammar, punctuations and spelling errors. You enjoy
16
+ writing short stories, but the narrative doesn't always make sense""",
17
+ servers=["filesystem"],
18
+ )
13
19
  # Define worker agents
14
- @agent_app.agent(
20
+ @fast.agent(
15
21
  name="finder",
16
22
  instruction="""You are an agent with access to the filesystem,
17
23
  as well as the ability to fetch URLs. Your job is to identify
@@ -20,15 +26,14 @@ agent_app = FastAgent("Orchestrator Example")
20
26
  servers=["fetch", "filesystem"],
21
27
  model="gpt-4o-mini",
22
28
  )
23
- @agent_app.agent(
29
+ @fast.agent(
24
30
  name="writer",
25
31
  instruction="""You are an agent that can write to the filesystem.
26
32
  You are tasked with taking the user's input, addressing it, and
27
33
  writing the result to disk in the appropriate location.""",
28
34
  servers=["filesystem"],
29
- model="gpt-4o",
30
35
  )
31
- @agent_app.agent(
36
+ @fast.agent(
32
37
  name="proofreader",
33
38
  instruction=""""Review the short story for grammar, spelling, and punctuation errors.
34
39
  Identify any awkward phrasing or structural issues that could improve clarity.
@@ -37,8 +42,8 @@ agent_app = FastAgent("Orchestrator Example")
37
42
  model="gpt-4o",
38
43
  )
39
44
  # Define the orchestrator to coordinate the other agents
40
- @agent_app.orchestrator(
41
- name="document_processor",
45
+ @fast.orchestrator(
46
+ name="orchestrate",
42
47
  instruction="""Load the student's short story from short_story.md,
43
48
  and generate a report with feedback across proofreading,
44
49
  factuality/logical consistency and style adherence. Use the style rules from
@@ -46,10 +51,14 @@ agent_app = FastAgent("Orchestrator Example")
46
51
  https://apastyle.apa.org/learn/quick-guide-on-references.
47
52
  Write the graded report to graded_report.md in the same directory as short_story.md""",
48
53
  agents=["finder", "writer", "proofreader"],
49
- model="sonnet", # Orchestrators typically need more capable models
54
+ model="sonnet",
50
55
  )
51
56
  async def main():
52
- async with agent_app.run() as agent:
57
+ async with fast.run() as agent:
58
+ await agent.author(
59
+ "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
60
+ )
61
+
53
62
  # The orchestrator can be used just like any other agent
54
63
  task = (
55
64
  """Load the student's short story from short_story.md,
@@ -61,7 +70,7 @@ async def main():
61
70
  )
62
71
 
63
72
  # Send the task
64
- await agent.send("document_processor", task)
73
+ await agent.orchestrate(task)
65
74
 
66
75
 
67
76
  if __name__ == "__main__":
@@ -1,18 +1,14 @@
1
1
  """
2
- Example MCP Agent application showing simplified agent access.
2
+ Parallel Workflow showing Fan Out and Fan In agents, using different models
3
3
  """
4
4
 
5
5
  import asyncio
6
6
  from mcp_agent.core.fastagent import FastAgent
7
7
 
8
8
  # Create the application
9
- app = FastAgent(
10
- "Parallel Workflow Example",
11
- # config={
12
- # "human_input_handler": None # Disable human input handling
13
- # },
9
+ fast = FastAgent(
10
+ "Parallel Workflow",
14
11
  )
15
- app.app._human_input_callback = None
16
12
  SHORT_STORY = """
17
13
  The Battle of Glimmerwood
18
14
 
@@ -36,45 +32,45 @@ and whispers of a hidden agenda linger among the villagers.
36
32
  """
37
33
 
38
34
 
39
- @app.agent(
35
+ @fast.agent(
40
36
  name="proofreader",
41
37
  instruction=""""Review the short story for grammar, spelling, and punctuation errors.
42
38
  Identify any awkward phrasing or structural issues that could improve clarity.
43
39
  Provide detailed feedback on corrections.""",
44
40
  )
45
- @app.agent(
41
+ @fast.agent(
46
42
  name="fact_checker",
47
- model="gpt-4o",
48
43
  instruction="""Verify the factual consistency within the story. Identify any contradictions,
49
44
  logical inconsistencies, or inaccuracies in the plot, character actions, or setting.
50
45
  Highlight potential issues with reasoning or coherence.""",
46
+ model="gpt-4o",
51
47
  )
52
- @app.agent(
48
+ @fast.agent(
53
49
  name="style_enforcer",
54
- model="sonnet",
55
50
  instruction="""Analyze the story for adherence to style guidelines.
56
51
  Evaluate the narrative flow, clarity of expression, and tone. Suggest improvements to
57
52
  enhance storytelling, readability, and engagement.""",
53
+ model="sonnet",
58
54
  )
59
- @app.agent(
55
+ @fast.agent(
60
56
  name="grader",
61
- model="o3-mini.high",
62
57
  instruction="""Compile the feedback from the Proofreader, Fact Checker, and Style Enforcer
63
58
  into a structured report. Summarize key issues and categorize them by type.
64
59
  Provide actionable recommendations for improving the story,
65
60
  and give an overall grade based on the feedback.""",
61
+ model="o3-mini.low",
66
62
  )
67
- @app.parallel(
63
+ @fast.parallel(
68
64
  fan_out=["proofreader", "fact_checker", "style_enforcer"],
69
65
  fan_in="grader",
70
66
  name="parallel",
71
67
  )
72
68
  async def main():
73
69
  # Use the app's context manager
74
- async with app.run() as agent:
75
- await agent.send("parallel", f"student short story submission: {SHORT_STORY}")
70
+ async with fast.run() as agent:
71
+ await agent.parallel(f"student short story submission: {SHORT_STORY}")
76
72
  # follow-on prompt to task agent
77
- # await agent.prompt("style_enforcer", default="STOP")
73
+ # await agent.style_enforcer.prompt(default="STOP")
78
74
 
79
75
 
80
76
  if __name__ == "__main__":
@@ -9,10 +9,9 @@ import asyncio
9
9
  from mcp_agent.core.fastagent import FastAgent
10
10
 
11
11
  # Create the application
12
- agent_app = FastAgent(
13
- "Router Workflow Example",
12
+ fast = FastAgent(
13
+ "Router Workflow",
14
14
  )
15
- agent_app.app._human_input_callback = None
16
15
 
17
16
  # Sample requests demonstrating direct tool use vs agent delegation
18
17
  SAMPLE_REQUESTS = [
@@ -22,13 +21,13 @@ SAMPLE_REQUESTS = [
22
21
  ]
23
22
 
24
23
 
25
- @agent_app.agent(
24
+ @fast.agent(
26
25
  name="fetcher",
27
26
  instruction="""You are an agent, with a tool enabling you to fetch URLs.""",
28
27
  servers=["fetch"],
29
28
  model="haiku",
30
29
  )
31
- @agent_app.agent(
30
+ @fast.agent(
32
31
  name="code_expert",
33
32
  instruction="""You are an expert in code analysis and software engineering.
34
33
  When asked about code, architecture, or development practices,
@@ -36,20 +35,20 @@ SAMPLE_REQUESTS = [
36
35
  servers=["filesystem"],
37
36
  model="gpt-4o",
38
37
  )
39
- @agent_app.agent(
38
+ @fast.agent(
40
39
  name="general_assistant",
41
40
  instruction="""You are a knowledgeable assistant that provides clear,
42
41
  well-reasoned responses about general topics, concepts, and principles.""",
43
42
  )
44
- @agent_app.router(
45
- name="llm_router",
43
+ @fast.router(
44
+ name="route",
46
45
  model="sonnet",
47
46
  agents=["code_expert", "general_assistant", "fetcher"],
48
47
  )
49
48
  async def main():
50
- async with agent_app.run() as agent:
49
+ async with fast.run() as agent:
51
50
  for request in SAMPLE_REQUESTS:
52
- await agent.send("llm_router", request)
51
+ await agent.route(request)
53
52
 
54
53
 
55
54
  if __name__ == "__main__":
@@ -1,10 +1,11 @@
1
1
  import json
2
+ import os
2
3
  from typing import Iterable, List, Type
3
4
 
4
5
  from pydantic import BaseModel
5
6
 
6
7
  import instructor
7
- from anthropic import Anthropic
8
+ from anthropic import Anthropic, AuthenticationError
8
9
  from anthropic.types import (
9
10
  ContentBlock,
10
11
  DocumentBlockParam,
@@ -36,6 +37,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
36
37
  ProviderToMCPConverter,
37
38
  RequestParams,
38
39
  )
40
+ from mcp_agent.core.exceptions import ProviderKeyError
39
41
  from mcp_agent.logging.logger import get_logger
40
42
 
41
43
  DEFAULT_ANTHROPIC_MODEL = "claude-3-5-sonnet-latest"
@@ -78,9 +80,34 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
78
80
  Override this method to use a different LLM.
79
81
  """
80
82
  config = self.context.config
81
- anthropic = Anthropic(api_key=config.anthropic.api_key)
82
- messages: List[MessageParam] = []
83
- params = self.get_request_params(request_params)
83
+
84
+ api_key = None
85
+
86
+ if hasattr(config, "anthropic") and config.anthropic:
87
+ api_key = config.anthropic.api_key
88
+ if api_key == "<your-api-key-here>":
89
+ api_key = None
90
+
91
+ if api_key is None:
92
+ api_key = os.getenv("ANTHROPIC_API_KEY")
93
+
94
+ if not api_key:
95
+ raise ProviderKeyError(
96
+ "Anthropic API key not configured",
97
+ "The Anthropic API key is required but not set.\n"
98
+ "Add it to your configuration file under anthropic.api_key "
99
+ "or set the ANTHROPIC_API_KEY environment variable.",
100
+ )
101
+ try:
102
+ anthropic = Anthropic(api_key=api_key)
103
+ messages: List[MessageParam] = []
104
+ params = self.get_request_params(request_params)
105
+ except AuthenticationError as e:
106
+ raise ProviderKeyError(
107
+ "Invalid Anthropic API key",
108
+ "The configured Anthropic API key was rejected.\n"
109
+ "Please check that your API key is valid and not expired.",
110
+ ) from e
84
111
 
85
112
  if params.use_history:
86
113
  messages.extend(self.history.get())
@@ -131,15 +158,24 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
131
158
 
132
159
  response = executor_result[0]
133
160
 
134
- if isinstance(response, BaseException):
161
+ if isinstance(response, AuthenticationError):
162
+ raise ProviderKeyError(
163
+ "Invalid Anthropic API key",
164
+ "The configured Anthropic API key was rejected.\n"
165
+ "Please check that your API key is valid and not expired.",
166
+ ) from response
167
+ elif isinstance(response, BaseException):
135
168
  self.logger.error(f"Error: {executor_result}")
136
- # Don't break, instead create an error response
169
+ # Convert other errors to text response
137
170
  error_message = f"Error during generation: {str(response)}"
138
171
  response = Message(
172
+ id="error", # Required field
173
+ model="error", # Required field
139
174
  role="assistant",
140
175
  type="message",
141
176
  content=[TextBlock(type="text", text=error_message)],
142
- stop_reason="error",
177
+ stop_reason="end_turn", # Must be one of the allowed values
178
+ usage={"input_tokens": 0, "output_tokens": 0}, # Required field
143
179
  )
144
180
 
145
181
  self.logger.debug(
@@ -249,11 +285,11 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
249
285
  for content in response.content:
250
286
  if content.type == "text":
251
287
  final_text.append(content.text)
252
- elif content.type == "tool_use":
253
- final_text.append(
254
- f"[Calling tool {content.name} with args {content.input}]"
255
- )
256
-
288
+ # elif content.type == "tool_use":
289
+ # final_text.append(
290
+ # f"[Calling tool {content.name} with args {content.input}]"
291
+ # )
292
+ # TODO -- check whether this should be reinstated - OpenAI doesn't return this....
257
293
  return "\n".join(final_text)
258
294
 
259
295
  async def generate_structured(
@@ -1,8 +1,9 @@
1
1
  import json
2
+ import os
2
3
  from typing import Iterable, List, Type
3
4
 
4
5
  import instructor
5
- from openai import OpenAI
6
+ from openai import OpenAI, AuthenticationError
6
7
  from openai.types.chat import (
7
8
  ChatCompletionAssistantMessageParam,
8
9
  ChatCompletionContentPartParam,
@@ -33,6 +34,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
33
34
  ProviderToMCPConverter,
34
35
  RequestParams,
35
36
  )
37
+ from mcp_agent.core.exceptions import ProviderKeyError
36
38
  from mcp_agent.logging.logger import get_logger
37
39
  from rich.text import Text
38
40
 
@@ -102,13 +104,34 @@ class OpenAIAugmentedLLM(
102
104
  Override this method to use a different LLM.
103
105
  """
104
106
  config = self.context.config
105
- if not config.openai.api_key:
106
- raise "OpenAI API key is not set"
107
- openai_client = OpenAI(
108
- api_key=config.openai.api_key, base_url=config.openai.base_url
109
- )
110
- messages: List[ChatCompletionMessageParam] = []
111
- params = self.get_request_params(request_params)
107
+ api_key = None
108
+
109
+ if hasattr(config, "openai") and config.openai:
110
+ api_key = config.openai.api_key
111
+ if api_key == "<your-api-key-here>":
112
+ api_key = None
113
+
114
+ if api_key is None:
115
+ api_key = os.getenv("OPENAI_API_KEY")
116
+
117
+ if not api_key:
118
+ raise ProviderKeyError(
119
+ "OpenAI API key not configured",
120
+ "The OpenAI API key is required but not set.\n"
121
+ "Add it to your configuration file under openai.api_key\n"
122
+ "Or set the OPENAI_API_KEY environment variable",
123
+ )
124
+
125
+ try:
126
+ openai_client = OpenAI(api_key=api_key, base_url=config.openai.base_url)
127
+ messages: List[ChatCompletionMessageParam] = []
128
+ params = self.get_request_params(request_params)
129
+ except AuthenticationError as e:
130
+ raise ProviderKeyError(
131
+ "Invalid OpenAI API key",
132
+ "The configured OpenAI API key was rejected.\n"
133
+ "Please check that your API key is valid and not expired.",
134
+ ) from e
112
135
 
113
136
  system_prompt = self.instruction or params.systemPrompt
114
137
  if system_prompt:
@@ -189,7 +212,13 @@ class OpenAIAugmentedLLM(
189
212
  data=response,
190
213
  )
191
214
 
192
- if isinstance(response, BaseException):
215
+ if isinstance(response, AuthenticationError):
216
+ raise ProviderKeyError(
217
+ "Invalid OpenAI API key",
218
+ "The configured OpenAI API key was rejected.\n"
219
+ "Please check that your API key is valid and not expired.",
220
+ ) from response
221
+ elif isinstance(response, BaseException):
193
222
  self.logger.error(f"Error: {response}")
194
223
  break
195
224
 
@@ -1,26 +0,0 @@
1
- """
2
- Example MCP Agent application showing simplified agent access.
3
- """
4
-
5
- import asyncio
6
- from mcp_agent.core.fastagent import FastAgent
7
-
8
- # Create the application
9
- agent_app = FastAgent("Interactive Agent Example")
10
-
11
-
12
- # Define the agent
13
- @agent_app.agent(
14
- instruction="A simple agent that helps with basic tasks. Request Human Input when needed.",
15
- servers=["mcp_root"],
16
- # model="gpt-4o", model override here takes precedence
17
- )
18
- async def main():
19
- # use the --model= command line switch to specify model
20
- async with agent_app.run() as agent:
21
- await agent("print the next number in the sequence")
22
- await agent.prompt(default="STOP")
23
-
24
-
25
- if __name__ == "__main__":
26
- asyncio.run(main())
@@ -1,22 +0,0 @@
1
- """
2
- Example MCP Agent application showing simplified agent access.
3
- """
4
-
5
- import asyncio
6
- from mcp_agent.core.fastagent import FastAgent
7
-
8
- # Create the application
9
- agent_app = FastAgent("Interactive Agent Example")
10
- # agent_app.app._human_input_callback = None
11
-
12
-
13
- # Define the agent
14
- @agent_app.agent()
15
- async def main():
16
- # use the --model= command line switch to specify model
17
- async with agent_app.run() as agent:
18
- await agent()
19
-
20
-
21
- if __name__ == "__main__":
22
- asyncio.run(main())