camel-ai 0.2.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.2.1'
15
+ __version__ = '0.2.2'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -15,6 +15,8 @@ from __future__ import annotations
15
15
 
16
16
  import json
17
17
  import logging
18
+ import re
19
+ import uuid
18
20
  from collections import defaultdict
19
21
  from typing import (
20
22
  TYPE_CHECKING,
@@ -28,6 +30,7 @@ from typing import (
28
30
  )
29
31
 
30
32
  from openai.types.chat import ChatCompletionMessageToolCall
33
+ from openai.types.chat.chat_completion_message_tool_call import Function
31
34
  from pydantic import BaseModel
32
35
 
33
36
  from camel.agents.base import BaseAgent
@@ -185,12 +188,12 @@ class ChatAgent(BaseAgent):
185
188
  # the tools set from `ChatAgent` will be used.
186
189
  # This design simplifies the interface while retaining tool-running
187
190
  # capabilities for `BaseModelBackend`.
188
- if all_tools and not self.model_backend.model_config_dict['tools']:
191
+ if all_tools and not self.model_backend.model_config_dict.get("tools"):
189
192
  tool_schema_list = [
190
193
  tool.get_openai_tool_schema() for tool in all_tools
191
194
  ]
192
195
  self.model_backend.model_config_dict['tools'] = tool_schema_list
193
-
196
+ self.tool_schema_list = tool_schema_list
194
197
  self.model_config_dict = self.model_backend.model_config_dict
195
198
 
196
199
  self.model_token_limit = token_limit or self.model_backend.token_limit
@@ -206,6 +209,56 @@ class ChatAgent(BaseAgent):
206
209
  self.response_terminators = response_terminators or []
207
210
  self.init_messages()
208
211
 
212
+ # ruff: noqa: E501
213
+ def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
214
+ tool_prompts = []
215
+
216
+ for tool in tool_schema_list:
217
+ tool_info = tool['function']
218
+ tool_name = tool_info['name']
219
+ tool_description = tool_info['description']
220
+ tool_json = json.dumps(tool_info, indent=4)
221
+
222
+ prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
223
+ tool_prompts.append(prompt)
224
+
225
+ tool_prompt_str = "\n".join(tool_prompts)
226
+
227
+ final_prompt = f'''
228
+ # Tool prompt
229
+ TOOL_PROMPT = f"""
230
+ You have access to the following functions:
231
+
232
+ {tool_prompt_str}
233
+
234
+ If you choose to call a function ONLY reply in the following format with no prefix or suffix:
235
+
236
+ <function=example_function_name>{{"example_name": "example_value"}}</function>
237
+
238
+ Reminder:
239
+ - Function calls MUST follow the specified format, start with <function= and end with </function>
240
+ - Required parameters MUST be specified
241
+ - Only call one function at a time
242
+ - Put the entire function call reply on one line
243
+ - If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
244
+ """
245
+ '''
246
+ return final_prompt
247
+
248
+ def _parse_tool_response(self, response: str):
249
+ function_regex = r"<function=(\w+)>(.*?)</function>"
250
+ match = re.search(function_regex, response)
251
+
252
+ if match:
253
+ function_name, args_string = match.groups()
254
+ try:
255
+ args = json.loads(args_string)
256
+ return {"function": function_name, "arguments": args}
257
+ except json.JSONDecodeError as error:
258
+ print(f"Error parsing function arguments: {error}")
259
+ return None
260
+ return None
261
+
209
262
  def reset(self):
210
263
  r"""Resets the :obj:`ChatAgent` to its initial state and returns the
211
264
  stored messages.
@@ -367,89 +420,221 @@ class ChatAgent(BaseAgent):
367
420
  a boolean indicating whether the chat session has terminated,
368
421
  and information about the chat session.
369
422
  """
370
- self.update_memory(input_message, OpenAIBackendRole.USER)
423
+ if (
424
+ isinstance(self.model_type, ModelType)
425
+ and "lama" in self.model_type.value
426
+ or isinstance(self.model_type, str)
427
+ and "lama" in self.model_type
428
+ ):
429
+ if self.model_backend.model_config_dict.get("tools", None):
430
+ tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
431
+
432
+ tool_sys_msg = BaseMessage.make_assistant_message(
433
+ role_name="Assistant",
434
+ content=tool_prompt,
435
+ )
371
436
 
372
- tool_call_records: List[FunctionCallingRecord] = []
373
- while True:
374
- # Check if token has exceeded
375
- try:
376
- openai_messages, num_tokens = self.memory.get_context()
377
- except RuntimeError as e:
378
- return self._step_token_exceed(
379
- e.args[1], tool_call_records, "max_tokens_exceeded"
437
+ self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
438
+
439
+ self.update_memory(input_message, OpenAIBackendRole.USER)
440
+
441
+ tool_call_records: List[FunctionCallingRecord] = []
442
+ while True:
443
+ # Check if token has exceeded
444
+ try:
445
+ openai_messages, num_tokens = self.memory.get_context()
446
+ except RuntimeError as e:
447
+ return self._step_token_exceed(
448
+ e.args[1], tool_call_records, "max_tokens_exceeded"
449
+ )
450
+
451
+ (
452
+ response,
453
+ output_messages,
454
+ finish_reasons,
455
+ usage_dict,
456
+ response_id,
457
+ ) = self._step_model_response(openai_messages, num_tokens)
458
+ # If the model response is not a function call, meaning the
459
+ # model has generated a message response, break the loop
460
+ if (
461
+ not self.is_tools_added()
462
+ or not isinstance(response, ChatCompletion)
463
+ or "</function>" not in response.choices[0].message.content # type: ignore[operator]
464
+ ):
465
+ break
466
+
467
+ parsed_content = self._parse_tool_response(
468
+ response.choices[0].message.content # type: ignore[arg-type]
380
469
  )
381
470
 
382
- (
383
- response,
471
+ response.choices[0].message.tool_calls = [
472
+ ChatCompletionMessageToolCall(
473
+ id=str(uuid.uuid4()),
474
+ function=Function(
475
+ arguments=str(parsed_content["arguments"]).replace(
476
+ "'", '"'
477
+ ),
478
+ name=str(parsed_content["function"]),
479
+ ),
480
+ type="function",
481
+ )
482
+ ]
483
+
484
+ # Check for external tool call
485
+ tool_call_request = response.choices[0].message.tool_calls[0]
486
+ if tool_call_request.function.name in self.external_tool_names:
487
+ # if model calls an external tool, directly return the
488
+ # request
489
+ info = self._step_get_info(
490
+ output_messages,
491
+ finish_reasons,
492
+ usage_dict,
493
+ response_id,
494
+ tool_call_records,
495
+ num_tokens,
496
+ tool_call_request,
497
+ )
498
+ return ChatAgentResponse(
499
+ msgs=output_messages,
500
+ terminated=self.terminated,
501
+ info=info,
502
+ )
503
+
504
+ # Normal function calling
505
+ tool_call_records.append(
506
+ self._step_tool_call_and_update(response)
507
+ )
508
+
509
+ if (
510
+ output_schema is not None
511
+ and self.model_type.supports_tool_calling
512
+ ):
513
+ (
514
+ output_messages,
515
+ finish_reasons,
516
+ usage_dict,
517
+ response_id,
518
+ tool_call,
519
+ num_tokens,
520
+ ) = self._structure_output_with_function(output_schema)
521
+ tool_call_records.append(tool_call)
522
+
523
+ info = self._step_get_info(
384
524
  output_messages,
385
525
  finish_reasons,
386
526
  usage_dict,
387
527
  response_id,
388
- ) = self._step_model_response(openai_messages, num_tokens)
528
+ tool_call_records,
529
+ num_tokens,
530
+ )
389
531
 
390
- # If the model response is not a function call, meaning the model
391
- # has generated a message response, break the loop
392
- if (
393
- not self.is_tools_added()
394
- or not isinstance(response, ChatCompletion)
395
- or response.choices[0].message.tool_calls is None
396
- ):
397
- break
532
+ if len(output_messages) == 1:
533
+ # Auto record if the output result is a single message
534
+ self.record_message(output_messages[0])
535
+ else:
536
+ logger.warning(
537
+ "Multiple messages returned in `step()`, message won't be "
538
+ "recorded automatically. Please call `record_message()` "
539
+ "to record the selected message manually."
540
+ )
398
541
 
399
- # Check for external tool call
400
- tool_call_request = response.choices[0].message.tool_calls[0]
401
- if tool_call_request.function.name in self.external_tool_names:
402
- # if model calls an external tool, directly return the request
403
- info = self._step_get_info(
542
+ return ChatAgentResponse(
543
+ msgs=output_messages, terminated=self.terminated, info=info
544
+ )
545
+
546
+ else:
547
+ self.update_memory(input_message, OpenAIBackendRole.USER)
548
+
549
+ tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
550
+ while True:
551
+ # Check if token has exceeded
552
+ try:
553
+ openai_messages, num_tokens = self.memory.get_context()
554
+ except RuntimeError as e:
555
+ return self._step_token_exceed(
556
+ e.args[1], tool_call_records, "max_tokens_exceeded"
557
+ )
558
+
559
+ (
560
+ response,
404
561
  output_messages,
405
562
  finish_reasons,
406
563
  usage_dict,
407
564
  response_id,
408
- tool_call_records,
409
- num_tokens,
410
- tool_call_request,
411
- )
412
- return ChatAgentResponse(
413
- msgs=output_messages, terminated=self.terminated, info=info
565
+ ) = self._step_model_response(openai_messages, num_tokens)
566
+ # If the model response is not a function call, meaning the
567
+ # model has generated a message response, break the loop
568
+ if (
569
+ not self.is_tools_added()
570
+ or not isinstance(response, ChatCompletion)
571
+ or response.choices[0].message.tool_calls is None
572
+ ):
573
+ break
574
+
575
+ # Check for external tool call
576
+ tool_call_request = response.choices[0].message.tool_calls[0]
577
+
578
+ if tool_call_request.function.name in self.external_tool_names:
579
+ # if model calls an external tool, directly return the
580
+ # request
581
+ info = self._step_get_info(
582
+ output_messages,
583
+ finish_reasons,
584
+ usage_dict,
585
+ response_id,
586
+ tool_call_records,
587
+ num_tokens,
588
+ tool_call_request,
589
+ )
590
+ return ChatAgentResponse(
591
+ msgs=output_messages,
592
+ terminated=self.terminated,
593
+ info=info,
594
+ )
595
+
596
+ # Normal function calling
597
+ tool_call_records.append(
598
+ self._step_tool_call_and_update(response)
414
599
  )
415
600
 
416
- # Normal function calling
417
- tool_call_records.append(self._step_tool_call_and_update(response))
601
+ if (
602
+ output_schema is not None
603
+ and self.model_type.supports_tool_calling
604
+ ):
605
+ (
606
+ output_messages,
607
+ finish_reasons,
608
+ usage_dict,
609
+ response_id,
610
+ tool_call,
611
+ num_tokens,
612
+ ) = self._structure_output_with_function(output_schema)
613
+ tool_call_records.append(tool_call)
418
614
 
419
- if output_schema is not None and self.model_type.supports_tool_calling:
420
- (
615
+ info = self._step_get_info(
421
616
  output_messages,
422
617
  finish_reasons,
423
618
  usage_dict,
424
619
  response_id,
425
- tool_call,
620
+ tool_call_records,
426
621
  num_tokens,
427
- ) = self._structure_output_with_function(output_schema)
428
- tool_call_records.append(tool_call)
622
+ )
429
623
 
430
- info = self._step_get_info(
431
- output_messages,
432
- finish_reasons,
433
- usage_dict,
434
- response_id,
435
- tool_call_records,
436
- num_tokens,
437
- )
624
+ if len(output_messages) == 1:
625
+ # Auto record if the output result is a single message
626
+ self.record_message(output_messages[0])
627
+ else:
628
+ logger.warning(
629
+ "Multiple messages returned in `step()`, message won't be "
630
+ "recorded automatically. Please call `record_message()` "
631
+ "to record the selected message manually."
632
+ )
438
633
 
439
- if len(output_messages) == 1:
440
- # Auto record if the output result is a single message
441
- self.record_message(output_messages[0])
442
- else:
443
- logger.warning(
444
- "Multiple messages returned in `step()`, message won't be "
445
- "recorded automatically. Please call `record_message()` to "
446
- "record the selected message manually."
634
+ return ChatAgentResponse(
635
+ msgs=output_messages, terminated=self.terminated, info=info
447
636
  )
448
637
 
449
- return ChatAgentResponse(
450
- msgs=output_messages, terminated=self.terminated, info=info
451
- )
452
-
453
638
  async def step_async(
454
639
  self,
455
640
  input_message: BaseMessage,
camel/bots/__init__.py ADDED
@@ -0,0 +1,20 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .discord_bot import DiscordBot
15
+ from .telegram_bot import TelegramBot
16
+
17
+ __all__ = [
18
+ 'DiscordBot',
19
+ 'TelegramBot',
20
+ ]
@@ -0,0 +1,206 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, List, Optional, Union
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+ from camel.retrievers import AutoRetriever
20
+ from camel.utils import dependencies_required
21
+
22
+ try:
23
+ from unstructured.documents.elements import Element
24
+ except ImportError:
25
+ Element = None
26
+
27
+ if TYPE_CHECKING:
28
+ from discord import Message
29
+
30
+
31
+ class DiscordBot:
32
+ r"""Represents a Discord bot that is powered by a CAMEL `ChatAgent`.
33
+
34
+ Attributes:
35
+ chat_agent (ChatAgent): Chat agent that will power the bot.
36
+ channel_ids (List[int], optional): The channel IDs that the bot will
37
+ listen to.
38
+ discord_token (str, optional): The bot token.
39
+ auto_retriever (AutoRetriever): AutoRetriever instance for RAG.
40
+ vector_storage_local_path (Union[str, List[str]]): The paths to the
41
+ contents for RAG.
42
+ top_k (int): Top choice for the RAG response.
43
+ return_detailed_info (bool): If show detailed info of the RAG response.
44
+ contents (Union[str, List[str], Element, List[Element]], optional):
45
+ Local file paths, remote URLs, string contents or Element objects.
46
+ """
47
+
48
+ @dependencies_required('discord')
49
+ def __init__(
50
+ self,
51
+ chat_agent: ChatAgent,
52
+ contents: Union[str, List[str], Element, List[Element]] = None,
53
+ channel_ids: Optional[List[int]] = None,
54
+ discord_token: Optional[str] = None,
55
+ auto_retriever: Optional[AutoRetriever] = None,
56
+ vector_storage_local_path: Union[str, List[str]] = "",
57
+ top_k: int = 1,
58
+ return_detailed_info: bool = True,
59
+ ) -> None:
60
+ self.chat_agent = chat_agent
61
+ self.token = discord_token or os.getenv('DISCORD_TOKEN')
62
+ self.channel_ids = channel_ids
63
+ self.auto_retriever = auto_retriever
64
+ self.vector_storage_local_path = vector_storage_local_path
65
+ self.top_k = top_k
66
+ self.return_detailed_info = return_detailed_info
67
+ self.contents = contents
68
+
69
+ if not self.token:
70
+ raise ValueError(
71
+ "`DISCORD_TOKEN` not found in environment variables. Get it"
72
+ " here: `https://discord.com/developers/applications`."
73
+ )
74
+
75
+ import discord
76
+
77
+ intents = discord.Intents.default()
78
+ intents.message_content = True
79
+ self.client = discord.Client(intents=intents)
80
+
81
+ # Register event handlers
82
+ self.client.event(self.on_ready)
83
+ self.client.event(self.on_message)
84
+
85
+ def run(self) -> None:
86
+ r"""Start the Discord bot using its token.
87
+
88
+ This method starts the Discord bot by running the client with the
89
+ provided token.
90
+ """
91
+ self.client.run(self.token) # type: ignore[arg-type]
92
+
93
+ async def on_ready(self) -> None:
94
+ r"""This method is called when the bot has successfully connected to
95
+ the Discord server.
96
+
97
+ It prints a message indicating that the bot has logged in and displays
98
+ the username of the bot.
99
+ """
100
+ print(f'We have logged in as {self.client.user}')
101
+
102
+ async def on_message(self, message: 'Message') -> None:
103
+ r"""Event handler for when a message is received.
104
+
105
+ Args:
106
+ message (discord.Message): The message object received.
107
+ """
108
+
109
+ # If the message author is the bot itself,
110
+ # do not respond to this message
111
+ if message.author == self.client.user:
112
+ return
113
+
114
+ # If allowed channel IDs are provided,
115
+ # only respond to messages in those channels
116
+ if self.channel_ids and message.channel.id not in self.channel_ids:
117
+ return
118
+
119
+ # Only respond to messages that mention the bot
120
+ if not self.client.user or not self.client.user.mentioned_in(message):
121
+ return
122
+
123
+ user_raw_msg = message.content
124
+
125
+ if self.auto_retriever:
126
+ retrieved_content = self.auto_retriever.run_vector_retriever(
127
+ query=user_raw_msg,
128
+ contents=self.contents,
129
+ top_k=self.top_k,
130
+ return_detailed_info=self.return_detailed_info,
131
+ )
132
+ user_raw_msg = (
133
+ f"Here is the query to you: {user_raw_msg}\n"
134
+ f"Based on the retrieved content: {retrieved_content}, \n"
135
+ f"answer the query from {message.author.name}"
136
+ )
137
+
138
+ user_msg = BaseMessage.make_user_message(
139
+ role_name="User", content=user_raw_msg
140
+ )
141
+ assistant_response = self.chat_agent.step(user_msg)
142
+ await message.channel.send(assistant_response.msg.content)
143
+
144
+
145
+ if __name__ == "__main__":
146
+ assistant_sys_msg = BaseMessage.make_assistant_message(
147
+ role_name="Assistant",
148
+ content='''
149
+ Objective:
150
+ You are a customer service bot designed to assist users
151
+ with inquiries related to our open-source project.
152
+ Your responses should be informative, concise, and helpful.
153
+
154
+ Instructions:
155
+ Understand User Queries: Carefully read and understand the
156
+ user's question. Focus on keywords and context to
157
+ determine the user's intent.
158
+ Search for Relevant Information: Use the provided dataset
159
+ and refer to the RAG (file to find answers that
160
+ closely match the user's query. The RAG file contains
161
+ detailed interactions and should be your primary
162
+ resource for crafting responses.
163
+ Provide Clear and Concise Responses: Your answers should
164
+ be clear and to the point. Avoid overly technical
165
+ language unless the user's query indicates
166
+ familiarity with technical terms.
167
+ Encourage Engagement: Where applicable, encourage users
168
+ to contribute to the project or seek further
169
+ assistance.
170
+
171
+ Response Structure:
172
+ Greeting: Begin with a polite greeting or acknowledgment.
173
+ Main Response: Provide the main answer to the user's query.
174
+ Additional Information: Offer any extra tips or direct the
175
+ user to additional resources if necessary.
176
+ Closing: Close the response politely, encouraging
177
+ further engagement if appropriate.
178
+ bd
179
+ Tone:
180
+ Professional: Maintain a professional tone that
181
+ instills confidence in the user.
182
+ Friendly: Be approachable and friendly to make users
183
+ feel comfortable.
184
+ Helpful: Always aim to be as helpful as possible,
185
+ guiding users to solutions.
186
+ ''',
187
+ )
188
+
189
+ agent = ChatAgent(
190
+ assistant_sys_msg,
191
+ message_window_size=10,
192
+ )
193
+ # Uncommented the folowing code and offer storage information
194
+ # for RAG functionality
195
+
196
+ # auto_retriever = AutoRetriever(
197
+ # vector_storage_local_path="examples/bots",
198
+ # storage_type=StorageType.QDRANT,
199
+ # )
200
+
201
+ bot = DiscordBot(
202
+ agent,
203
+ # auto_retriever=auto_retriever,
204
+ # vector_storage_local_path=["local_data/"],
205
+ )
206
+ bot.run()
@@ -0,0 +1,82 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import TYPE_CHECKING, Optional
16
+
17
+ from camel.agents import ChatAgent
18
+ from camel.messages import BaseMessage
19
+ from camel.utils import dependencies_required
20
+
21
+ # Conditionally import telebot types only for type checking
22
+ if TYPE_CHECKING:
23
+ from telebot.types import ( # type: ignore[import-untyped]
24
+ Message,
25
+ )
26
+
27
+
28
+ class TelegramBot:
29
+ r"""Represents a Telegram bot that is powered by an agent.
30
+
31
+ Attributes:
32
+ chat_agent (ChatAgent): Chat agent that will power the bot.
33
+ telegram_token (str, optional): The bot token.
34
+ """
35
+
36
+ @dependencies_required('telebot')
37
+ def __init__(
38
+ self,
39
+ chat_agent: ChatAgent,
40
+ telegram_token: Optional[str] = None,
41
+ ) -> None:
42
+ self.chat_agent = chat_agent
43
+
44
+ if not telegram_token:
45
+ self.token = os.getenv('TELEGRAM_TOKEN')
46
+ if not self.token:
47
+ raise ValueError(
48
+ "`TELEGRAM_TOKEN` not found in environment variables. "
49
+ "Get it from t.me/BotFather."
50
+ )
51
+ else:
52
+ self.token = telegram_token
53
+
54
+ import telebot # type: ignore[import-untyped]
55
+
56
+ self.bot = telebot.TeleBot(token=self.token)
57
+
58
+ # Register the message handler within the constructor
59
+ self.bot.message_handler(func=lambda message: True)(self.on_message)
60
+
61
+ def run(self) -> None:
62
+ r"""Start the Telegram bot."""
63
+ print("Telegram bot is running...")
64
+ self.bot.infinity_polling()
65
+
66
+ def on_message(self, message: 'Message') -> None:
67
+ r"""Handles incoming messages from the user.
68
+
69
+ Args:
70
+ message (types.Message): The incoming message object.
71
+ """
72
+ self.chat_agent.reset()
73
+
74
+ if not message.text:
75
+ return
76
+
77
+ user_msg = BaseMessage.make_user_message(
78
+ role_name="User", content=message.text
79
+ )
80
+ assistant_response = self.chat_agent.step(user_msg)
81
+
82
+ self.bot.reply_to(message, assistant_response.msg.content)