camel-ai 0.2.3a1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (87) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +93 -69
  3. camel/agents/knowledge_graph_agent.py +4 -6
  4. camel/bots/__init__.py +16 -2
  5. camel/bots/discord_app.py +138 -0
  6. camel/bots/slack/__init__.py +30 -0
  7. camel/bots/slack/models.py +158 -0
  8. camel/bots/slack/slack_app.py +255 -0
  9. camel/configs/__init__.py +1 -2
  10. camel/configs/anthropic_config.py +2 -5
  11. camel/configs/base_config.py +6 -6
  12. camel/configs/groq_config.py +2 -3
  13. camel/configs/ollama_config.py +1 -2
  14. camel/configs/openai_config.py +2 -23
  15. camel/configs/samba_config.py +2 -2
  16. camel/configs/togetherai_config.py +1 -1
  17. camel/configs/vllm_config.py +1 -1
  18. camel/configs/zhipuai_config.py +2 -3
  19. camel/embeddings/openai_embedding.py +2 -2
  20. camel/loaders/__init__.py +2 -0
  21. camel/loaders/chunkr_reader.py +163 -0
  22. camel/loaders/firecrawl_reader.py +3 -3
  23. camel/loaders/unstructured_io.py +35 -33
  24. camel/messages/__init__.py +1 -0
  25. camel/models/__init__.py +2 -4
  26. camel/models/anthropic_model.py +32 -26
  27. camel/models/azure_openai_model.py +39 -36
  28. camel/models/base_model.py +31 -20
  29. camel/models/gemini_model.py +37 -29
  30. camel/models/groq_model.py +29 -23
  31. camel/models/litellm_model.py +44 -61
  32. camel/models/mistral_model.py +32 -29
  33. camel/models/model_factory.py +66 -76
  34. camel/models/nemotron_model.py +33 -23
  35. camel/models/ollama_model.py +42 -47
  36. camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +31 -49
  37. camel/models/openai_model.py +48 -29
  38. camel/models/reka_model.py +30 -28
  39. camel/models/samba_model.py +82 -177
  40. camel/models/stub_model.py +2 -2
  41. camel/models/togetherai_model.py +37 -43
  42. camel/models/vllm_model.py +43 -50
  43. camel/models/zhipuai_model.py +33 -27
  44. camel/retrievers/auto_retriever.py +29 -97
  45. camel/retrievers/vector_retriever.py +58 -47
  46. camel/societies/babyagi_playing.py +6 -3
  47. camel/societies/role_playing.py +5 -3
  48. camel/storages/graph_storages/graph_element.py +2 -2
  49. camel/storages/key_value_storages/json.py +6 -1
  50. camel/toolkits/__init__.py +20 -7
  51. camel/toolkits/arxiv_toolkit.py +155 -0
  52. camel/toolkits/ask_news_toolkit.py +653 -0
  53. camel/toolkits/base.py +2 -3
  54. camel/toolkits/code_execution.py +6 -7
  55. camel/toolkits/dalle_toolkit.py +6 -6
  56. camel/toolkits/{openai_function.py → function_tool.py} +34 -11
  57. camel/toolkits/github_toolkit.py +9 -10
  58. camel/toolkits/google_maps_toolkit.py +7 -7
  59. camel/toolkits/google_scholar_toolkit.py +146 -0
  60. camel/toolkits/linkedin_toolkit.py +7 -7
  61. camel/toolkits/math_toolkit.py +8 -8
  62. camel/toolkits/open_api_toolkit.py +5 -5
  63. camel/toolkits/reddit_toolkit.py +7 -7
  64. camel/toolkits/retrieval_toolkit.py +5 -5
  65. camel/toolkits/search_toolkit.py +9 -9
  66. camel/toolkits/slack_toolkit.py +11 -11
  67. camel/toolkits/twitter_toolkit.py +378 -452
  68. camel/toolkits/weather_toolkit.py +6 -6
  69. camel/toolkits/whatsapp_toolkit.py +177 -0
  70. camel/types/__init__.py +6 -1
  71. camel/types/enums.py +40 -85
  72. camel/types/openai_types.py +3 -0
  73. camel/types/unified_model_type.py +104 -0
  74. camel/utils/__init__.py +0 -2
  75. camel/utils/async_func.py +7 -7
  76. camel/utils/commons.py +32 -3
  77. camel/utils/token_counting.py +30 -212
  78. camel/workforce/role_playing_worker.py +1 -1
  79. camel/workforce/single_agent_worker.py +1 -1
  80. camel/workforce/task_channel.py +4 -3
  81. camel/workforce/workforce.py +4 -4
  82. camel_ai-0.2.4.dist-info/LICENSE +201 -0
  83. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/METADATA +27 -56
  84. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/RECORD +85 -76
  85. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/WHEEL +1 -1
  86. camel/bots/discord_bot.py +0 -206
  87. camel/models/open_source_model.py +0 -170
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.2.2'
15
+ __version__ = '0.2.4'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -34,7 +34,6 @@ from openai.types.chat.chat_completion_message_tool_call import Function
34
34
  from pydantic import BaseModel
35
35
 
36
36
  from camel.agents.base import BaseAgent
37
- from camel.configs import ChatGPTConfig
38
37
  from camel.memories import (
39
38
  AgentMemory,
40
39
  ChatHistoryMemory,
@@ -63,7 +62,7 @@ if TYPE_CHECKING:
63
62
  from openai import Stream
64
63
 
65
64
  from camel.terminators import ResponseTerminator
66
- from camel.toolkits import OpenAIFunction
65
+ from camel.toolkits import FunctionTool
67
66
 
68
67
 
69
68
  logger = logging.getLogger(__name__)
@@ -115,7 +114,8 @@ class ChatAgent(BaseAgent):
115
114
  r"""Class for managing conversations of CAMEL Chat Agents.
116
115
 
117
116
  Args:
118
- system_message (BaseMessage): The system message for the chat agent.
117
+ system_message (Union[BaseMessage, str], optional): The system message
118
+ for the chat agent.
119
119
  model (BaseModelBackend, optional): The model backend to use for
120
120
  generating responses. (default: :obj:`OpenAIModel` with
121
121
  `GPT_4O_MINI`)
@@ -131,10 +131,10 @@ class ChatAgent(BaseAgent):
131
131
  (default: :obj:`None`)
132
132
  output_language (str, optional): The language to be output by the
133
133
  agent. (default: :obj:`None`)
134
- tools (List[OpenAIFunction], optional): List of available
135
- :obj:`OpenAIFunction`. (default: :obj:`None`)
136
- external_tools (List[OpenAIFunction], optional): List of external tools
137
- (:obj:`OpenAIFunction`) bind to one chat agent. When these tools
134
+ tools (List[FunctionTool], optional): List of available
135
+ :obj:`FunctionTool`. (default: :obj:`None`)
136
+ external_tools (List[FunctionTool], optional): List of external tools
137
+ (:obj:`FunctionTool`) bind to one chat agent. When these tools
138
138
  are called, the agent will directly return the request instead of
139
139
  processing it. (default: :obj:`None`)
140
140
  response_terminators (List[ResponseTerminator], optional): List of
@@ -144,34 +144,42 @@ class ChatAgent(BaseAgent):
144
144
 
145
145
  def __init__(
146
146
  self,
147
- system_message: BaseMessage,
147
+ system_message: Optional[Union[BaseMessage, str]] = None,
148
148
  model: Optional[BaseModelBackend] = None,
149
149
  memory: Optional[AgentMemory] = None,
150
150
  message_window_size: Optional[int] = None,
151
151
  token_limit: Optional[int] = None,
152
152
  output_language: Optional[str] = None,
153
- tools: Optional[List[OpenAIFunction]] = None,
154
- external_tools: Optional[List[OpenAIFunction]] = None,
153
+ tools: Optional[List[FunctionTool]] = None,
154
+ external_tools: Optional[List[FunctionTool]] = None,
155
155
  response_terminators: Optional[List[ResponseTerminator]] = None,
156
156
  ) -> None:
157
- self.orig_sys_message: BaseMessage = system_message
158
- self.system_message = system_message
159
- self.role_name: str = system_message.role_name
160
- self.role_type: RoleType = system_message.role_type
157
+ if isinstance(system_message, str):
158
+ system_message = BaseMessage.make_assistant_message(
159
+ role_name='Assistant', content=system_message
160
+ )
161
+
162
+ self.orig_sys_message: Optional[BaseMessage] = system_message
163
+ self._system_message: Optional[BaseMessage] = system_message
164
+ self.role_name: str = (
165
+ getattr(system_message, 'role_name', None) or "assistant"
166
+ )
167
+ self.role_type: RoleType = (
168
+ getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
169
+ )
161
170
  self.model_backend: BaseModelBackend = (
162
171
  model
163
172
  if model is not None
164
173
  else ModelFactory.create(
165
- model_platform=ModelPlatformType.OPENAI,
166
- model_type=ModelType.GPT_4O_MINI,
167
- model_config_dict=ChatGPTConfig().as_dict(),
174
+ model_platform=ModelPlatformType.DEFAULT,
175
+ model_type=ModelType.DEFAULT,
168
176
  )
169
177
  )
170
178
  self.output_language: Optional[str] = output_language
171
179
  if self.output_language is not None:
172
180
  self.set_output_language(self.output_language)
173
181
 
174
- self.model_type: ModelType = self.model_backend.model_type
182
+ self.model_type = self.model_backend.model_type
175
183
 
176
184
  # tool registration
177
185
  external_tools = external_tools or []
@@ -272,11 +280,12 @@ class ChatAgent(BaseAgent):
272
280
  terminator.reset()
273
281
 
274
282
  @property
275
- def system_message(self) -> BaseMessage:
283
+ def system_message(self) -> Optional[BaseMessage]:
276
284
  r"""The getter method for the property :obj:`system_message`.
277
285
 
278
286
  Returns:
279
- BaseMessage: The system message of this agent.
287
+ Optional[BaseMessage]: The system message of this agent if set,
288
+ else :obj:`None`.
280
289
  """
281
290
  return self._system_message
282
291
 
@@ -327,12 +336,22 @@ class ChatAgent(BaseAgent):
327
336
  BaseMessage: The updated system message object.
328
337
  """
329
338
  self.output_language = output_language
330
- content = self.orig_sys_message.content + (
339
+ language_prompt = (
331
340
  "\nRegardless of the input language, "
332
341
  f"you must output text in {output_language}."
333
342
  )
334
- self.system_message = self.system_message.create_new_instance(content)
335
- return self.system_message
343
+ if self.orig_sys_message is not None:
344
+ content = self.orig_sys_message.content + language_prompt
345
+ self._system_message = self.orig_sys_message.create_new_instance(
346
+ content
347
+ )
348
+ return self._system_message
349
+ else:
350
+ self._system_message = BaseMessage.make_assistant_message(
351
+ role_name="Assistant",
352
+ content=language_prompt,
353
+ )
354
+ return self._system_message
336
355
 
337
356
  def get_info(
338
357
  self,
@@ -377,12 +396,15 @@ class ChatAgent(BaseAgent):
377
396
  r"""Initializes the stored messages list with the initial system
378
397
  message.
379
398
  """
380
- system_record = MemoryRecord(
381
- message=self.system_message,
382
- role_at_backend=OpenAIBackendRole.SYSTEM,
383
- )
384
- self.memory.clear()
385
- self.memory.write_record(system_record)
399
+ if self.orig_sys_message is not None:
400
+ system_record = MemoryRecord(
401
+ message=self.orig_sys_message,
402
+ role_at_backend=OpenAIBackendRole.SYSTEM,
403
+ )
404
+ self.memory.clear()
405
+ self.memory.write_record(system_record)
406
+ else:
407
+ self.memory.clear()
386
408
 
387
409
  def record_message(self, message: BaseMessage) -> None:
388
410
  r"""Records the externally provided message into the agent memory as if
@@ -397,19 +419,19 @@ class ChatAgent(BaseAgent):
397
419
 
398
420
  def step(
399
421
  self,
400
- input_message: BaseMessage,
401
- output_schema: Optional[Type[BaseModel]] = None,
422
+ input_message: Union[BaseMessage, str],
423
+ response_format: Optional[Type[BaseModel]] = None,
402
424
  ) -> ChatAgentResponse:
403
425
  r"""Performs a single step in the chat session by generating a response
404
426
  to the input message.
405
427
 
406
428
  Args:
407
- input_message (BaseMessage): The input message to the agent.
408
- Its `role` field that specifies the role at backend may be
409
- either `user` or `assistant` but it will be set to `user`
410
- anyway since for the self agent any incoming message is
411
- external.
412
- output_schema (Optional[Type[BaseModel]], optional): A pydantic
429
+ input_message (Union[BaseMessage, str]): The input message to the
430
+ agent. For BaseMessage input, its `role` field that specifies
431
+ the role at backend may be either `user` or `assistant` but it
432
+ will be set to `user` anyway since for the self agent any
433
+ incoming message is external. For str input, the `role_name` would be `User`.
434
+ response_format (Optional[Type[BaseModel]], optional): A pydantic
413
435
  model class that includes value types and field descriptions
414
436
  used to generate a structured response by LLM. This schema
415
437
  helps in defining the expected output format. (default:
@@ -420,12 +442,12 @@ class ChatAgent(BaseAgent):
420
442
  a boolean indicating whether the chat session has terminated,
421
443
  and information about the chat session.
422
444
  """
423
- if (
424
- isinstance(self.model_type, ModelType)
425
- and "lama" in self.model_type.value
426
- or isinstance(self.model_type, str)
427
- and "lama" in self.model_type
428
- ):
445
+ if isinstance(input_message, str):
446
+ input_message = BaseMessage.make_user_message(
447
+ role_name='User', content=input_message
448
+ )
449
+
450
+ if "llama" in self.model_type.lower():
429
451
  if self.model_backend.model_config_dict.get("tools", None):
430
452
  tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
431
453
 
@@ -506,10 +528,7 @@ class ChatAgent(BaseAgent):
506
528
  self._step_tool_call_and_update(response)
507
529
  )
508
530
 
509
- if (
510
- output_schema is not None
511
- and self.model_type.supports_tool_calling
512
- ):
531
+ if response_format is not None:
513
532
  (
514
533
  output_messages,
515
534
  finish_reasons,
@@ -517,7 +536,7 @@ class ChatAgent(BaseAgent):
517
536
  response_id,
518
537
  tool_call,
519
538
  num_tokens,
520
- ) = self._structure_output_with_function(output_schema)
539
+ ) = self._structure_output_with_function(response_format)
521
540
  tool_call_records.append(tool_call)
522
541
 
523
542
  info = self._step_get_info(
@@ -599,8 +618,8 @@ class ChatAgent(BaseAgent):
599
618
  )
600
619
 
601
620
  if (
602
- output_schema is not None
603
- and self.model_type.supports_tool_calling
621
+ response_format is not None
622
+ and self.model_type.support_native_tool_calling
604
623
  ):
605
624
  (
606
625
  output_messages,
@@ -609,7 +628,7 @@ class ChatAgent(BaseAgent):
609
628
  response_id,
610
629
  tool_call,
611
630
  num_tokens,
612
- ) = self._structure_output_with_function(output_schema)
631
+ ) = self._structure_output_with_function(response_format)
613
632
  tool_call_records.append(tool_call)
614
633
 
615
634
  info = self._step_get_info(
@@ -637,19 +656,19 @@ class ChatAgent(BaseAgent):
637
656
 
638
657
  async def step_async(
639
658
  self,
640
- input_message: BaseMessage,
641
- output_schema: Optional[Type[BaseModel]] = None,
659
+ input_message: Union[BaseMessage, str],
660
+ response_format: Optional[Type[BaseModel]] = None,
642
661
  ) -> ChatAgentResponse:
643
662
  r"""Performs a single step in the chat session by generating a response
644
663
  to the input message. This agent step can call async function calls.
645
664
 
646
665
  Args:
647
- input_message (BaseMessage): The input message to the agent.
648
- Its `role` field that specifies the role at backend may be
649
- either `user` or `assistant` but it will be set to `user`
650
- anyway since for the self agent any incoming message is
651
- external.
652
- output_schema (Optional[Type[BaseModel]], optional): A pydantic
666
+ input_message (Union[BaseMessage, str]): The input message to the
667
+ agent. For BaseMessage input, its `role` field that specifies
668
+ the role at backend may be either `user` or `assistant` but it
669
+ will be set to `user` anyway since for the self agent any
670
+ incoming message is external. For str input, the `role_name` would be `User`.
671
+ response_format (Optional[Type[BaseModel]], optional): A pydantic
653
672
  model class that includes value types and field descriptions
654
673
  used to generate a structured response by LLM. This schema
655
674
  helps in defining the expected output format. (default:
@@ -660,6 +679,11 @@ class ChatAgent(BaseAgent):
660
679
  a boolean indicating whether the chat session has terminated,
661
680
  and information about the chat session.
662
681
  """
682
+ if isinstance(input_message, str):
683
+ input_message = BaseMessage.make_user_message(
684
+ role_name='User', content=input_message
685
+ )
686
+
663
687
  self.update_memory(input_message, OpenAIBackendRole.USER)
664
688
 
665
689
  tool_call_records: List[FunctionCallingRecord] = []
@@ -708,7 +732,10 @@ class ChatAgent(BaseAgent):
708
732
  await self._step_tool_call_and_update_async(response)
709
733
  )
710
734
 
711
- if output_schema is not None and self.model_type.supports_tool_calling:
735
+ if (
736
+ response_format is not None
737
+ and self.model_type.support_native_tool_calling
738
+ ):
712
739
  (
713
740
  output_messages,
714
741
  finish_reasons,
@@ -716,7 +743,7 @@ class ChatAgent(BaseAgent):
716
743
  response_id,
717
744
  tool_call_record,
718
745
  num_tokens,
719
- ) = self._structure_output_with_function(output_schema)
746
+ ) = self._structure_output_with_function(response_format)
720
747
  tool_call_records.append(tool_call_record)
721
748
 
722
749
  info = self._step_get_info(
@@ -783,7 +810,7 @@ class ChatAgent(BaseAgent):
783
810
  return func_record
784
811
 
785
812
  def _structure_output_with_function(
786
- self, output_schema: Type[BaseModel]
813
+ self, response_format: Type[BaseModel]
787
814
  ) -> Tuple[
788
815
  List[BaseMessage],
789
816
  List[str],
@@ -795,12 +822,12 @@ class ChatAgent(BaseAgent):
795
822
  r"""Internal function of structuring the output of the agent based on
796
823
  the given output schema.
797
824
  """
798
- from camel.toolkits import OpenAIFunction
825
+ from camel.toolkits import FunctionTool
799
826
 
800
- schema_json = get_pydantic_object_schema(output_schema)
827
+ schema_json = get_pydantic_object_schema(response_format)
801
828
  func_str = json_to_function_code(schema_json)
802
829
  func_callable = func_string_to_callable(func_str)
803
- func = OpenAIFunction(func_callable)
830
+ func = FunctionTool(func_callable)
804
831
 
805
832
  original_func_dict = self.func_dict
806
833
  original_model_dict = self.model_backend.model_config_dict
@@ -1174,10 +1201,7 @@ class ChatAgent(BaseAgent):
1174
1201
  Returns:
1175
1202
  dict: Usage dictionary.
1176
1203
  """
1177
- if isinstance(self.model_type, ModelType):
1178
- encoding = get_model_encoding(self.model_type.value_for_tiktoken)
1179
- else:
1180
- encoding = get_model_encoding("gpt-4o-mini")
1204
+ encoding = get_model_encoding(self.model_type.value_for_tiktoken)
1181
1205
  completion_tokens = 0
1182
1206
  for message in output_messages:
1183
1207
  completion_tokens += len(encoding.encode(message.content))
@@ -11,12 +11,10 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Optional, Union
14
+ from typing import TYPE_CHECKING, Optional, Union
15
15
 
16
- try:
16
+ if TYPE_CHECKING:
17
17
  from unstructured.documents.elements import Element
18
- except ImportError:
19
- Element = None
20
18
 
21
19
  from camel.agents import ChatAgent
22
20
  from camel.messages import BaseMessage
@@ -144,13 +142,13 @@ class KnowledgeGraphAgent(ChatAgent):
144
142
 
145
143
  def run(
146
144
  self,
147
- element: Union[str, Element],
145
+ element: "Element",
148
146
  parse_graph_elements: bool = False,
149
147
  ) -> Union[str, GraphElement]:
150
148
  r"""Run the agent to extract node and relationship information.
151
149
 
152
150
  Args:
153
- element (Union[str, Element]): The input element or string.
151
+ element (Element): The input element.
154
152
  parse_graph_elements (bool, optional): Whether to parse into
155
153
  `GraphElement`. Defaults to `False`.
156
154
 
camel/bots/__init__.py CHANGED
@@ -11,10 +11,24 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from .discord_bot import DiscordBot
14
+ from .discord_app import DiscordApp
15
+ from .slack.models import (
16
+ SlackAppMentionEventBody,
17
+ SlackAppMentionEventProfile,
18
+ SlackAuthProfile,
19
+ SlackEventBody,
20
+ SlackEventProfile,
21
+ )
22
+ from .slack.slack_app import SlackApp
15
23
  from .telegram_bot import TelegramBot
16
24
 
17
25
  __all__ = [
18
- 'DiscordBot',
26
+ 'DiscordApp',
27
+ 'SlackApp',
28
+ 'SlackAppMentionEventBody',
29
+ 'SlackAppMentionEventProfile',
30
+ 'SlackAuthProfile',
31
+ 'SlackEventBody',
32
+ 'SlackEventProfile',
19
33
  'TelegramBot',
20
34
  ]
@@ -0,0 +1,138 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import logging
15
+ import os
16
+ from typing import TYPE_CHECKING, List, Optional
17
+
18
+ from camel.utils import dependencies_required
19
+
20
+ if TYPE_CHECKING:
21
+ from discord import Message
22
+
23
+ logging.basicConfig(level=logging.INFO)
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class DiscordApp:
28
+ r"""A class representing a Discord app that uses the `discord.py` library
29
+ to interact with Discord servers.
30
+
31
+ This bot can respond to messages in specific channels and only reacts to
32
+ messages that mention the bot.
33
+
34
+ Attributes:
35
+ channel_ids (Optional[List[int]]): A list of allowed channel IDs. If
36
+ provided, the bot will only respond to messages in these channels.
37
+ token (Optional[str]): The Discord bot token used for authentication.
38
+ """
39
+
40
+ @dependencies_required('discord')
41
+ def __init__(
42
+ self,
43
+ channel_ids: Optional[List[int]] = None,
44
+ token: Optional[str] = None,
45
+ ) -> None:
46
+ r"""Initialize the DiscordApp instance by setting up the Discord client
47
+ and event handlers.
48
+
49
+ Args:
50
+ channel_ids (Optional[List[int]]): A list of allowed channel IDs.
51
+ The bot will only respond to messages in these channels if
52
+ provided.
53
+ token (Optional[str]): The Discord bot token for authentication.
54
+ If not provided, the token will be retrieved from the
55
+ environment variable `DISCORD_TOKEN`.
56
+
57
+ Raises:
58
+ ValueError: If the `DISCORD_TOKEN` is not found in environment
59
+ variables.
60
+ """
61
+ self.token = token or os.getenv('DISCORD_TOKEN')
62
+ self.channel_ids = channel_ids
63
+
64
+ if not self.token:
65
+ raise ValueError(
66
+ "`DISCORD_TOKEN` not found in environment variables. Get it"
67
+ " here: `https://discord.com/developers/applications`."
68
+ )
69
+
70
+ import discord
71
+
72
+ intents = discord.Intents.default()
73
+ intents.message_content = True
74
+ self._client = discord.Client(intents=intents)
75
+
76
+ # Register event handlers
77
+ self._client.event(self.on_ready)
78
+ self._client.event(self.on_message)
79
+
80
+ async def start(self):
81
+ r"""Asynchronously start the Discord bot using its token.
82
+
83
+ This method starts the bot and logs into Discord asynchronously using
84
+ the provided token. It should be awaited when used in an async
85
+ environment.
86
+ """
87
+ await self._client.start(self.token)
88
+
89
+ def run(self) -> None:
90
+ r"""Start the Discord bot using its token.
91
+
92
+ This method starts the bot and logs into Discord synchronously using
93
+ the provided token. It blocks execution and keeps the bot running.
94
+ """
95
+ self._client.run(self.token) # type: ignore[arg-type]
96
+
97
+ async def on_ready(self) -> None:
98
+ r"""Event handler that is called when the bot has successfully
99
+ connected to the Discord server.
100
+
101
+ When the bot is ready and logged into Discord, it prints a message
102
+ displaying the bot's username.
103
+ """
104
+ logger.info(f'We have logged in as {self._client.user}')
105
+
106
+ async def on_message(self, message: 'Message') -> None:
107
+ r"""Event handler for processing incoming messages.
108
+
109
+ This method is called whenever a new message is received by the bot. It
110
+ will ignore messages sent by the bot itself, only respond to messages
111
+ in allowed channels (if specified), and only to messages that mention
112
+ the bot.
113
+
114
+ Args:
115
+ message (discord.Message): The message object received from
116
+ Discord.
117
+ """
118
+ # If the message author is the bot itself,
119
+ # do not respond to this message
120
+ if message.author == self._client.user:
121
+ return
122
+
123
+ # If allowed channel IDs are provided,
124
+ # only respond to messages in those channels
125
+ if self.channel_ids and message.channel.id not in self.channel_ids:
126
+ return
127
+
128
+ # Only respond to messages that mention the bot
129
+ if not self._client.user or not self._client.user.mentioned_in(
130
+ message
131
+ ):
132
+ return
133
+
134
+ logger.info(f"Received message: {message.content}")
135
+
136
+ @property
137
+ def client(self):
138
+ return self._client
@@ -0,0 +1,30 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .models import (
15
+ SlackAppMentionEventBody,
16
+ SlackAppMentionEventProfile,
17
+ SlackAuthProfile,
18
+ SlackEventBody,
19
+ SlackEventProfile,
20
+ )
21
+ from .slack_app import SlackApp
22
+
23
+ __all__ = [
24
+ 'SlackApp',
25
+ 'SlackAppMentionEventBody',
26
+ 'SlackAppMentionEventProfile',
27
+ 'SlackAuthProfile',
28
+ 'SlackEventBody',
29
+ 'SlackEventProfile',
30
+ ]