camel-ai 0.2.3__py3-none-any.whl → 0.2.3a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +69 -93
- camel/agents/knowledge_graph_agent.py +6 -4
- camel/bots/__init__.py +2 -16
- camel/bots/discord_bot.py +206 -0
- camel/configs/__init__.py +2 -1
- camel/configs/anthropic_config.py +5 -2
- camel/configs/base_config.py +6 -6
- camel/configs/groq_config.py +3 -2
- camel/configs/ollama_config.py +2 -1
- camel/configs/openai_config.py +23 -2
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +3 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +0 -2
- camel/loaders/firecrawl_reader.py +3 -3
- camel/loaders/unstructured_io.py +33 -35
- camel/messages/__init__.py +0 -1
- camel/models/__init__.py +4 -2
- camel/models/anthropic_model.py +26 -32
- camel/models/azure_openai_model.py +36 -39
- camel/models/base_model.py +20 -31
- camel/models/gemini_model.py +29 -37
- camel/models/groq_model.py +23 -29
- camel/models/litellm_model.py +61 -44
- camel/models/mistral_model.py +29 -32
- camel/models/model_factory.py +76 -66
- camel/models/nemotron_model.py +23 -33
- camel/models/ollama_model.py +47 -42
- camel/models/open_source_model.py +170 -0
- camel/models/{openai_compatible_model.py → openai_compatibility_model.py} +49 -31
- camel/models/openai_model.py +29 -48
- camel/models/reka_model.py +28 -30
- camel/models/samba_model.py +177 -82
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +43 -37
- camel/models/vllm_model.py +50 -43
- camel/models/zhipuai_model.py +27 -33
- camel/retrievers/auto_retriever.py +10 -28
- camel/retrievers/vector_retriever.py +47 -58
- camel/societies/babyagi_playing.py +3 -6
- camel/societies/role_playing.py +3 -5
- camel/storages/graph_storages/graph_element.py +5 -3
- camel/storages/key_value_storages/json.py +1 -6
- camel/toolkits/__init__.py +7 -20
- camel/toolkits/base.py +3 -2
- camel/toolkits/code_execution.py +7 -6
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/github_toolkit.py +10 -9
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/{function_tool.py → openai_function.py} +11 -34
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +452 -378
- camel/toolkits/weather_toolkit.py +6 -6
- camel/types/__init__.py +1 -6
- camel/types/enums.py +85 -40
- camel/types/openai_types.py +0 -3
- camel/utils/__init__.py +2 -0
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +3 -32
- camel/utils/token_counting.py +212 -30
- camel/workforce/role_playing_worker.py +1 -1
- camel/workforce/single_agent_worker.py +1 -1
- camel/workforce/task_channel.py +3 -4
- camel/workforce/workforce.py +4 -4
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/METADATA +56 -27
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/RECORD +76 -85
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/WHEEL +1 -1
- camel/bots/discord_app.py +0 -138
- camel/bots/slack/__init__.py +0 -30
- camel/bots/slack/models.py +0 -158
- camel/bots/slack/slack_app.py +0 -255
- camel/loaders/chunkr_reader.py +0 -163
- camel/toolkits/arxiv_toolkit.py +0 -155
- camel/toolkits/ask_news_toolkit.py +0 -653
- camel/toolkits/google_scholar_toolkit.py +0 -146
- camel/toolkits/whatsapp_toolkit.py +0 -177
- camel/types/unified_model_type.py +0 -104
- camel_ai-0.2.3.dist-info/LICENSE +0 -201
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -34,6 +34,7 @@ from openai.types.chat.chat_completion_message_tool_call import Function
|
|
|
34
34
|
from pydantic import BaseModel
|
|
35
35
|
|
|
36
36
|
from camel.agents.base import BaseAgent
|
|
37
|
+
from camel.configs import ChatGPTConfig
|
|
37
38
|
from camel.memories import (
|
|
38
39
|
AgentMemory,
|
|
39
40
|
ChatHistoryMemory,
|
|
@@ -62,7 +63,7 @@ if TYPE_CHECKING:
|
|
|
62
63
|
from openai import Stream
|
|
63
64
|
|
|
64
65
|
from camel.terminators import ResponseTerminator
|
|
65
|
-
from camel.toolkits import
|
|
66
|
+
from camel.toolkits import OpenAIFunction
|
|
66
67
|
|
|
67
68
|
|
|
68
69
|
logger = logging.getLogger(__name__)
|
|
@@ -114,8 +115,7 @@ class ChatAgent(BaseAgent):
|
|
|
114
115
|
r"""Class for managing conversations of CAMEL Chat Agents.
|
|
115
116
|
|
|
116
117
|
Args:
|
|
117
|
-
system_message (
|
|
118
|
-
for the chat agent.
|
|
118
|
+
system_message (BaseMessage): The system message for the chat agent.
|
|
119
119
|
model (BaseModelBackend, optional): The model backend to use for
|
|
120
120
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
121
121
|
`GPT_4O_MINI`)
|
|
@@ -131,10 +131,10 @@ class ChatAgent(BaseAgent):
|
|
|
131
131
|
(default: :obj:`None`)
|
|
132
132
|
output_language (str, optional): The language to be output by the
|
|
133
133
|
agent. (default: :obj:`None`)
|
|
134
|
-
tools (List[
|
|
135
|
-
:obj:`
|
|
136
|
-
external_tools (List[
|
|
137
|
-
(:obj:`
|
|
134
|
+
tools (List[OpenAIFunction], optional): List of available
|
|
135
|
+
:obj:`OpenAIFunction`. (default: :obj:`None`)
|
|
136
|
+
external_tools (List[OpenAIFunction], optional): List of external tools
|
|
137
|
+
(:obj:`OpenAIFunction`) bind to one chat agent. When these tools
|
|
138
138
|
are called, the agent will directly return the request instead of
|
|
139
139
|
processing it. (default: :obj:`None`)
|
|
140
140
|
response_terminators (List[ResponseTerminator], optional): List of
|
|
@@ -144,42 +144,34 @@ class ChatAgent(BaseAgent):
|
|
|
144
144
|
|
|
145
145
|
def __init__(
|
|
146
146
|
self,
|
|
147
|
-
system_message:
|
|
147
|
+
system_message: BaseMessage,
|
|
148
148
|
model: Optional[BaseModelBackend] = None,
|
|
149
149
|
memory: Optional[AgentMemory] = None,
|
|
150
150
|
message_window_size: Optional[int] = None,
|
|
151
151
|
token_limit: Optional[int] = None,
|
|
152
152
|
output_language: Optional[str] = None,
|
|
153
|
-
tools: Optional[List[
|
|
154
|
-
external_tools: Optional[List[
|
|
153
|
+
tools: Optional[List[OpenAIFunction]] = None,
|
|
154
|
+
external_tools: Optional[List[OpenAIFunction]] = None,
|
|
155
155
|
response_terminators: Optional[List[ResponseTerminator]] = None,
|
|
156
156
|
) -> None:
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
self.orig_sys_message: Optional[BaseMessage] = system_message
|
|
163
|
-
self._system_message: Optional[BaseMessage] = system_message
|
|
164
|
-
self.role_name: str = (
|
|
165
|
-
getattr(system_message, 'role_name', None) or "assistant"
|
|
166
|
-
)
|
|
167
|
-
self.role_type: RoleType = (
|
|
168
|
-
getattr(system_message, 'role_type', None) or RoleType.ASSISTANT
|
|
169
|
-
)
|
|
157
|
+
self.orig_sys_message: BaseMessage = system_message
|
|
158
|
+
self.system_message = system_message
|
|
159
|
+
self.role_name: str = system_message.role_name
|
|
160
|
+
self.role_type: RoleType = system_message.role_type
|
|
170
161
|
self.model_backend: BaseModelBackend = (
|
|
171
162
|
model
|
|
172
163
|
if model is not None
|
|
173
164
|
else ModelFactory.create(
|
|
174
|
-
model_platform=ModelPlatformType.
|
|
175
|
-
model_type=ModelType.
|
|
165
|
+
model_platform=ModelPlatformType.OPENAI,
|
|
166
|
+
model_type=ModelType.GPT_4O_MINI,
|
|
167
|
+
model_config_dict=ChatGPTConfig().as_dict(),
|
|
176
168
|
)
|
|
177
169
|
)
|
|
178
170
|
self.output_language: Optional[str] = output_language
|
|
179
171
|
if self.output_language is not None:
|
|
180
172
|
self.set_output_language(self.output_language)
|
|
181
173
|
|
|
182
|
-
self.model_type = self.model_backend.model_type
|
|
174
|
+
self.model_type: ModelType = self.model_backend.model_type
|
|
183
175
|
|
|
184
176
|
# tool registration
|
|
185
177
|
external_tools = external_tools or []
|
|
@@ -280,12 +272,11 @@ class ChatAgent(BaseAgent):
|
|
|
280
272
|
terminator.reset()
|
|
281
273
|
|
|
282
274
|
@property
|
|
283
|
-
def system_message(self) ->
|
|
275
|
+
def system_message(self) -> BaseMessage:
|
|
284
276
|
r"""The getter method for the property :obj:`system_message`.
|
|
285
277
|
|
|
286
278
|
Returns:
|
|
287
|
-
|
|
288
|
-
else :obj:`None`.
|
|
279
|
+
BaseMessage: The system message of this agent.
|
|
289
280
|
"""
|
|
290
281
|
return self._system_message
|
|
291
282
|
|
|
@@ -336,22 +327,12 @@ class ChatAgent(BaseAgent):
|
|
|
336
327
|
BaseMessage: The updated system message object.
|
|
337
328
|
"""
|
|
338
329
|
self.output_language = output_language
|
|
339
|
-
|
|
330
|
+
content = self.orig_sys_message.content + (
|
|
340
331
|
"\nRegardless of the input language, "
|
|
341
332
|
f"you must output text in {output_language}."
|
|
342
333
|
)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
self._system_message = self.orig_sys_message.create_new_instance(
|
|
346
|
-
content
|
|
347
|
-
)
|
|
348
|
-
return self._system_message
|
|
349
|
-
else:
|
|
350
|
-
self._system_message = BaseMessage.make_assistant_message(
|
|
351
|
-
role_name="Assistant",
|
|
352
|
-
content=language_prompt,
|
|
353
|
-
)
|
|
354
|
-
return self._system_message
|
|
334
|
+
self.system_message = self.system_message.create_new_instance(content)
|
|
335
|
+
return self.system_message
|
|
355
336
|
|
|
356
337
|
def get_info(
|
|
357
338
|
self,
|
|
@@ -396,15 +377,12 @@ class ChatAgent(BaseAgent):
|
|
|
396
377
|
r"""Initializes the stored messages list with the initial system
|
|
397
378
|
message.
|
|
398
379
|
"""
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
self.memory.write_record(system_record)
|
|
406
|
-
else:
|
|
407
|
-
self.memory.clear()
|
|
380
|
+
system_record = MemoryRecord(
|
|
381
|
+
message=self.system_message,
|
|
382
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
383
|
+
)
|
|
384
|
+
self.memory.clear()
|
|
385
|
+
self.memory.write_record(system_record)
|
|
408
386
|
|
|
409
387
|
def record_message(self, message: BaseMessage) -> None:
|
|
410
388
|
r"""Records the externally provided message into the agent memory as if
|
|
@@ -419,19 +397,19 @@ class ChatAgent(BaseAgent):
|
|
|
419
397
|
|
|
420
398
|
def step(
|
|
421
399
|
self,
|
|
422
|
-
input_message:
|
|
423
|
-
|
|
400
|
+
input_message: BaseMessage,
|
|
401
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
424
402
|
) -> ChatAgentResponse:
|
|
425
403
|
r"""Performs a single step in the chat session by generating a response
|
|
426
404
|
to the input message.
|
|
427
405
|
|
|
428
406
|
Args:
|
|
429
|
-
input_message (
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
407
|
+
input_message (BaseMessage): The input message to the agent.
|
|
408
|
+
Its `role` field that specifies the role at backend may be
|
|
409
|
+
either `user` or `assistant` but it will be set to `user`
|
|
410
|
+
anyway since for the self agent any incoming message is
|
|
411
|
+
external.
|
|
412
|
+
output_schema (Optional[Type[BaseModel]], optional): A pydantic
|
|
435
413
|
model class that includes value types and field descriptions
|
|
436
414
|
used to generate a structured response by LLM. This schema
|
|
437
415
|
helps in defining the expected output format. (default:
|
|
@@ -442,12 +420,12 @@ class ChatAgent(BaseAgent):
|
|
|
442
420
|
a boolean indicating whether the chat session has terminated,
|
|
443
421
|
and information about the chat session.
|
|
444
422
|
"""
|
|
445
|
-
if
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
)
|
|
449
|
-
|
|
450
|
-
|
|
423
|
+
if (
|
|
424
|
+
isinstance(self.model_type, ModelType)
|
|
425
|
+
and "lama" in self.model_type.value
|
|
426
|
+
or isinstance(self.model_type, str)
|
|
427
|
+
and "lama" in self.model_type
|
|
428
|
+
):
|
|
451
429
|
if self.model_backend.model_config_dict.get("tools", None):
|
|
452
430
|
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
|
453
431
|
|
|
@@ -528,7 +506,10 @@ class ChatAgent(BaseAgent):
|
|
|
528
506
|
self._step_tool_call_and_update(response)
|
|
529
507
|
)
|
|
530
508
|
|
|
531
|
-
if
|
|
509
|
+
if (
|
|
510
|
+
output_schema is not None
|
|
511
|
+
and self.model_type.supports_tool_calling
|
|
512
|
+
):
|
|
532
513
|
(
|
|
533
514
|
output_messages,
|
|
534
515
|
finish_reasons,
|
|
@@ -536,7 +517,7 @@ class ChatAgent(BaseAgent):
|
|
|
536
517
|
response_id,
|
|
537
518
|
tool_call,
|
|
538
519
|
num_tokens,
|
|
539
|
-
) = self._structure_output_with_function(
|
|
520
|
+
) = self._structure_output_with_function(output_schema)
|
|
540
521
|
tool_call_records.append(tool_call)
|
|
541
522
|
|
|
542
523
|
info = self._step_get_info(
|
|
@@ -618,8 +599,8 @@ class ChatAgent(BaseAgent):
|
|
|
618
599
|
)
|
|
619
600
|
|
|
620
601
|
if (
|
|
621
|
-
|
|
622
|
-
and self.model_type.
|
|
602
|
+
output_schema is not None
|
|
603
|
+
and self.model_type.supports_tool_calling
|
|
623
604
|
):
|
|
624
605
|
(
|
|
625
606
|
output_messages,
|
|
@@ -628,7 +609,7 @@ class ChatAgent(BaseAgent):
|
|
|
628
609
|
response_id,
|
|
629
610
|
tool_call,
|
|
630
611
|
num_tokens,
|
|
631
|
-
) = self._structure_output_with_function(
|
|
612
|
+
) = self._structure_output_with_function(output_schema)
|
|
632
613
|
tool_call_records.append(tool_call)
|
|
633
614
|
|
|
634
615
|
info = self._step_get_info(
|
|
@@ -656,19 +637,19 @@ class ChatAgent(BaseAgent):
|
|
|
656
637
|
|
|
657
638
|
async def step_async(
|
|
658
639
|
self,
|
|
659
|
-
input_message:
|
|
660
|
-
|
|
640
|
+
input_message: BaseMessage,
|
|
641
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
661
642
|
) -> ChatAgentResponse:
|
|
662
643
|
r"""Performs a single step in the chat session by generating a response
|
|
663
644
|
to the input message. This agent step can call async function calls.
|
|
664
645
|
|
|
665
646
|
Args:
|
|
666
|
-
input_message (
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
647
|
+
input_message (BaseMessage): The input message to the agent.
|
|
648
|
+
Its `role` field that specifies the role at backend may be
|
|
649
|
+
either `user` or `assistant` but it will be set to `user`
|
|
650
|
+
anyway since for the self agent any incoming message is
|
|
651
|
+
external.
|
|
652
|
+
output_schema (Optional[Type[BaseModel]], optional): A pydantic
|
|
672
653
|
model class that includes value types and field descriptions
|
|
673
654
|
used to generate a structured response by LLM. This schema
|
|
674
655
|
helps in defining the expected output format. (default:
|
|
@@ -679,11 +660,6 @@ class ChatAgent(BaseAgent):
|
|
|
679
660
|
a boolean indicating whether the chat session has terminated,
|
|
680
661
|
and information about the chat session.
|
|
681
662
|
"""
|
|
682
|
-
if isinstance(input_message, str):
|
|
683
|
-
input_message = BaseMessage.make_user_message(
|
|
684
|
-
role_name='User', content=input_message
|
|
685
|
-
)
|
|
686
|
-
|
|
687
663
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
688
664
|
|
|
689
665
|
tool_call_records: List[FunctionCallingRecord] = []
|
|
@@ -732,10 +708,7 @@ class ChatAgent(BaseAgent):
|
|
|
732
708
|
await self._step_tool_call_and_update_async(response)
|
|
733
709
|
)
|
|
734
710
|
|
|
735
|
-
if
|
|
736
|
-
response_format is not None
|
|
737
|
-
and self.model_type.support_native_tool_calling
|
|
738
|
-
):
|
|
711
|
+
if output_schema is not None and self.model_type.supports_tool_calling:
|
|
739
712
|
(
|
|
740
713
|
output_messages,
|
|
741
714
|
finish_reasons,
|
|
@@ -743,7 +716,7 @@ class ChatAgent(BaseAgent):
|
|
|
743
716
|
response_id,
|
|
744
717
|
tool_call_record,
|
|
745
718
|
num_tokens,
|
|
746
|
-
) = self._structure_output_with_function(
|
|
719
|
+
) = self._structure_output_with_function(output_schema)
|
|
747
720
|
tool_call_records.append(tool_call_record)
|
|
748
721
|
|
|
749
722
|
info = self._step_get_info(
|
|
@@ -810,7 +783,7 @@ class ChatAgent(BaseAgent):
|
|
|
810
783
|
return func_record
|
|
811
784
|
|
|
812
785
|
def _structure_output_with_function(
|
|
813
|
-
self,
|
|
786
|
+
self, output_schema: Type[BaseModel]
|
|
814
787
|
) -> Tuple[
|
|
815
788
|
List[BaseMessage],
|
|
816
789
|
List[str],
|
|
@@ -822,12 +795,12 @@ class ChatAgent(BaseAgent):
|
|
|
822
795
|
r"""Internal function of structuring the output of the agent based on
|
|
823
796
|
the given output schema.
|
|
824
797
|
"""
|
|
825
|
-
from camel.toolkits import
|
|
798
|
+
from camel.toolkits import OpenAIFunction
|
|
826
799
|
|
|
827
|
-
schema_json = get_pydantic_object_schema(
|
|
800
|
+
schema_json = get_pydantic_object_schema(output_schema)
|
|
828
801
|
func_str = json_to_function_code(schema_json)
|
|
829
802
|
func_callable = func_string_to_callable(func_str)
|
|
830
|
-
func =
|
|
803
|
+
func = OpenAIFunction(func_callable)
|
|
831
804
|
|
|
832
805
|
original_func_dict = self.func_dict
|
|
833
806
|
original_model_dict = self.model_backend.model_config_dict
|
|
@@ -1201,7 +1174,10 @@ class ChatAgent(BaseAgent):
|
|
|
1201
1174
|
Returns:
|
|
1202
1175
|
dict: Usage dictionary.
|
|
1203
1176
|
"""
|
|
1204
|
-
|
|
1177
|
+
if isinstance(self.model_type, ModelType):
|
|
1178
|
+
encoding = get_model_encoding(self.model_type.value_for_tiktoken)
|
|
1179
|
+
else:
|
|
1180
|
+
encoding = get_model_encoding("gpt-4o-mini")
|
|
1205
1181
|
completion_tokens = 0
|
|
1206
1182
|
for message in output_messages:
|
|
1207
1183
|
completion_tokens += len(encoding.encode(message.content))
|
|
@@ -11,10 +11,12 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import
|
|
14
|
+
from typing import Optional, Union
|
|
15
15
|
|
|
16
|
-
|
|
16
|
+
try:
|
|
17
17
|
from unstructured.documents.elements import Element
|
|
18
|
+
except ImportError:
|
|
19
|
+
Element = None
|
|
18
20
|
|
|
19
21
|
from camel.agents import ChatAgent
|
|
20
22
|
from camel.messages import BaseMessage
|
|
@@ -142,13 +144,13 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
142
144
|
|
|
143
145
|
def run(
|
|
144
146
|
self,
|
|
145
|
-
element:
|
|
147
|
+
element: Union[str, Element],
|
|
146
148
|
parse_graph_elements: bool = False,
|
|
147
149
|
) -> Union[str, GraphElement]:
|
|
148
150
|
r"""Run the agent to extract node and relationship information.
|
|
149
151
|
|
|
150
152
|
Args:
|
|
151
|
-
element (Element): The input element.
|
|
153
|
+
element (Union[str, Element]): The input element or string.
|
|
152
154
|
parse_graph_elements (bool, optional): Whether to parse into
|
|
153
155
|
`GraphElement`. Defaults to `False`.
|
|
154
156
|
|
camel/bots/__init__.py
CHANGED
|
@@ -11,24 +11,10 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from .
|
|
15
|
-
from .slack.models import (
|
|
16
|
-
SlackAppMentionEventBody,
|
|
17
|
-
SlackAppMentionEventProfile,
|
|
18
|
-
SlackAuthProfile,
|
|
19
|
-
SlackEventBody,
|
|
20
|
-
SlackEventProfile,
|
|
21
|
-
)
|
|
22
|
-
from .slack.slack_app import SlackApp
|
|
14
|
+
from .discord_bot import DiscordBot
|
|
23
15
|
from .telegram_bot import TelegramBot
|
|
24
16
|
|
|
25
17
|
__all__ = [
|
|
26
|
-
'
|
|
27
|
-
'SlackApp',
|
|
28
|
-
'SlackAppMentionEventBody',
|
|
29
|
-
'SlackAppMentionEventProfile',
|
|
30
|
-
'SlackAuthProfile',
|
|
31
|
-
'SlackEventBody',
|
|
32
|
-
'SlackEventProfile',
|
|
18
|
+
'DiscordBot',
|
|
33
19
|
'TelegramBot',
|
|
34
20
|
]
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import TYPE_CHECKING, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.agents import ChatAgent
|
|
18
|
+
from camel.messages import BaseMessage
|
|
19
|
+
from camel.retrievers import AutoRetriever
|
|
20
|
+
from camel.utils import dependencies_required
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
from unstructured.documents.elements import Element
|
|
24
|
+
except ImportError:
|
|
25
|
+
Element = None
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
from discord import Message
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class DiscordBot:
|
|
32
|
+
r"""Represents a Discord bot that is powered by a CAMEL `ChatAgent`.
|
|
33
|
+
|
|
34
|
+
Attributes:
|
|
35
|
+
chat_agent (ChatAgent): Chat agent that will power the bot.
|
|
36
|
+
channel_ids (List[int], optional): The channel IDs that the bot will
|
|
37
|
+
listen to.
|
|
38
|
+
discord_token (str, optional): The bot token.
|
|
39
|
+
auto_retriever (AutoRetriever): AutoRetriever instance for RAG.
|
|
40
|
+
vector_storage_local_path (Union[str, List[str]]): The paths to the
|
|
41
|
+
contents for RAG.
|
|
42
|
+
top_k (int): Top choice for the RAG response.
|
|
43
|
+
return_detailed_info (bool): If show detailed info of the RAG response.
|
|
44
|
+
contents (Union[str, List[str], Element, List[Element]], optional):
|
|
45
|
+
Local file paths, remote URLs, string contents or Element objects.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
@dependencies_required('discord')
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
chat_agent: ChatAgent,
|
|
52
|
+
contents: Union[str, List[str], Element, List[Element]] = None,
|
|
53
|
+
channel_ids: Optional[List[int]] = None,
|
|
54
|
+
discord_token: Optional[str] = None,
|
|
55
|
+
auto_retriever: Optional[AutoRetriever] = None,
|
|
56
|
+
vector_storage_local_path: Union[str, List[str]] = "",
|
|
57
|
+
top_k: int = 1,
|
|
58
|
+
return_detailed_info: bool = True,
|
|
59
|
+
) -> None:
|
|
60
|
+
self.chat_agent = chat_agent
|
|
61
|
+
self.token = discord_token or os.getenv('DISCORD_TOKEN')
|
|
62
|
+
self.channel_ids = channel_ids
|
|
63
|
+
self.auto_retriever = auto_retriever
|
|
64
|
+
self.vector_storage_local_path = vector_storage_local_path
|
|
65
|
+
self.top_k = top_k
|
|
66
|
+
self.return_detailed_info = return_detailed_info
|
|
67
|
+
self.contents = contents
|
|
68
|
+
|
|
69
|
+
if not self.token:
|
|
70
|
+
raise ValueError(
|
|
71
|
+
"`DISCORD_TOKEN` not found in environment variables. Get it"
|
|
72
|
+
" here: `https://discord.com/developers/applications`."
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
import discord
|
|
76
|
+
|
|
77
|
+
intents = discord.Intents.default()
|
|
78
|
+
intents.message_content = True
|
|
79
|
+
self.client = discord.Client(intents=intents)
|
|
80
|
+
|
|
81
|
+
# Register event handlers
|
|
82
|
+
self.client.event(self.on_ready)
|
|
83
|
+
self.client.event(self.on_message)
|
|
84
|
+
|
|
85
|
+
def run(self) -> None:
|
|
86
|
+
r"""Start the Discord bot using its token.
|
|
87
|
+
|
|
88
|
+
This method starts the Discord bot by running the client with the
|
|
89
|
+
provided token.
|
|
90
|
+
"""
|
|
91
|
+
self.client.run(self.token) # type: ignore[arg-type]
|
|
92
|
+
|
|
93
|
+
async def on_ready(self) -> None:
|
|
94
|
+
r"""This method is called when the bot has successfully connected to
|
|
95
|
+
the Discord server.
|
|
96
|
+
|
|
97
|
+
It prints a message indicating that the bot has logged in and displays
|
|
98
|
+
the username of the bot.
|
|
99
|
+
"""
|
|
100
|
+
print(f'We have logged in as {self.client.user}')
|
|
101
|
+
|
|
102
|
+
async def on_message(self, message: 'Message') -> None:
|
|
103
|
+
r"""Event handler for when a message is received.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
message (discord.Message): The message object received.
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
# If the message author is the bot itself,
|
|
110
|
+
# do not respond to this message
|
|
111
|
+
if message.author == self.client.user:
|
|
112
|
+
return
|
|
113
|
+
|
|
114
|
+
# If allowed channel IDs are provided,
|
|
115
|
+
# only respond to messages in those channels
|
|
116
|
+
if self.channel_ids and message.channel.id not in self.channel_ids:
|
|
117
|
+
return
|
|
118
|
+
|
|
119
|
+
# Only respond to messages that mention the bot
|
|
120
|
+
if not self.client.user or not self.client.user.mentioned_in(message):
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
user_raw_msg = message.content
|
|
124
|
+
|
|
125
|
+
if self.auto_retriever:
|
|
126
|
+
retrieved_content = self.auto_retriever.run_vector_retriever(
|
|
127
|
+
query=user_raw_msg,
|
|
128
|
+
contents=self.contents,
|
|
129
|
+
top_k=self.top_k,
|
|
130
|
+
return_detailed_info=self.return_detailed_info,
|
|
131
|
+
)
|
|
132
|
+
user_raw_msg = (
|
|
133
|
+
f"Here is the query to you: {user_raw_msg}\n"
|
|
134
|
+
f"Based on the retrieved content: {retrieved_content}, \n"
|
|
135
|
+
f"answer the query from {message.author.name}"
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
user_msg = BaseMessage.make_user_message(
|
|
139
|
+
role_name="User", content=user_raw_msg
|
|
140
|
+
)
|
|
141
|
+
assistant_response = self.chat_agent.step(user_msg)
|
|
142
|
+
await message.channel.send(assistant_response.msg.content)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
if __name__ == "__main__":
|
|
146
|
+
assistant_sys_msg = BaseMessage.make_assistant_message(
|
|
147
|
+
role_name="Assistant",
|
|
148
|
+
content='''
|
|
149
|
+
Objective:
|
|
150
|
+
You are a customer service bot designed to assist users
|
|
151
|
+
with inquiries related to our open-source project.
|
|
152
|
+
Your responses should be informative, concise, and helpful.
|
|
153
|
+
|
|
154
|
+
Instructions:
|
|
155
|
+
Understand User Queries: Carefully read and understand the
|
|
156
|
+
user's question. Focus on keywords and context to
|
|
157
|
+
determine the user's intent.
|
|
158
|
+
Search for Relevant Information: Use the provided dataset
|
|
159
|
+
and refer to the RAG (file to find answers that
|
|
160
|
+
closely match the user's query. The RAG file contains
|
|
161
|
+
detailed interactions and should be your primary
|
|
162
|
+
resource for crafting responses.
|
|
163
|
+
Provide Clear and Concise Responses: Your answers should
|
|
164
|
+
be clear and to the point. Avoid overly technical
|
|
165
|
+
language unless the user's query indicates
|
|
166
|
+
familiarity with technical terms.
|
|
167
|
+
Encourage Engagement: Where applicable, encourage users
|
|
168
|
+
to contribute to the project or seek further
|
|
169
|
+
assistance.
|
|
170
|
+
|
|
171
|
+
Response Structure:
|
|
172
|
+
Greeting: Begin with a polite greeting or acknowledgment.
|
|
173
|
+
Main Response: Provide the main answer to the user's query.
|
|
174
|
+
Additional Information: Offer any extra tips or direct the
|
|
175
|
+
user to additional resources if necessary.
|
|
176
|
+
Closing: Close the response politely, encouraging
|
|
177
|
+
further engagement if appropriate.
|
|
178
|
+
bd
|
|
179
|
+
Tone:
|
|
180
|
+
Professional: Maintain a professional tone that
|
|
181
|
+
instills confidence in the user.
|
|
182
|
+
Friendly: Be approachable and friendly to make users
|
|
183
|
+
feel comfortable.
|
|
184
|
+
Helpful: Always aim to be as helpful as possible,
|
|
185
|
+
guiding users to solutions.
|
|
186
|
+
''',
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
agent = ChatAgent(
|
|
190
|
+
assistant_sys_msg,
|
|
191
|
+
message_window_size=10,
|
|
192
|
+
)
|
|
193
|
+
# Uncommented the folowing code and offer storage information
|
|
194
|
+
# for RAG functionality
|
|
195
|
+
|
|
196
|
+
# auto_retriever = AutoRetriever(
|
|
197
|
+
# vector_storage_local_path="examples/bots",
|
|
198
|
+
# storage_type=StorageType.QDRANT,
|
|
199
|
+
# )
|
|
200
|
+
|
|
201
|
+
bot = DiscordBot(
|
|
202
|
+
agent,
|
|
203
|
+
# auto_retriever=auto_retriever,
|
|
204
|
+
# vector_storage_local_path=["local_data/"],
|
|
205
|
+
)
|
|
206
|
+
bot.run()
|
camel/configs/__init__.py
CHANGED
|
@@ -18,7 +18,7 @@ from .groq_config import GROQ_API_PARAMS, GroqConfig
|
|
|
18
18
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
19
19
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
20
20
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
21
|
-
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig
|
|
21
|
+
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
|
|
22
22
|
from .reka_config import REKA_API_PARAMS, RekaConfig
|
|
23
23
|
from .samba_config import (
|
|
24
24
|
SAMBA_CLOUD_API_PARAMS,
|
|
@@ -40,6 +40,7 @@ __all__ = [
|
|
|
40
40
|
'ANTHROPIC_API_PARAMS',
|
|
41
41
|
'GROQ_API_PARAMS',
|
|
42
42
|
'GroqConfig',
|
|
43
|
+
'OpenSourceConfig',
|
|
43
44
|
'LiteLLMConfig',
|
|
44
45
|
'LITELLM_API_PARAMS',
|
|
45
46
|
'OllamaConfig',
|
|
@@ -15,8 +15,9 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
from typing import List, Union
|
|
17
17
|
|
|
18
|
+
from anthropic import NOT_GIVEN, NotGiven
|
|
19
|
+
|
|
18
20
|
from camel.configs.base_config import BaseConfig
|
|
19
|
-
from camel.types import NOT_GIVEN, NotGiven
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
class AnthropicConfig(BaseConfig):
|
|
@@ -54,7 +55,9 @@ class AnthropicConfig(BaseConfig):
|
|
|
54
55
|
(default: :obj:`5`)
|
|
55
56
|
metadata: An object describing metadata about the request.
|
|
56
57
|
stream (bool, optional): Whether to incrementally stream the response
|
|
57
|
-
|
|
58
|
+
using server-sent events.
|
|
59
|
+
(default: :obj:`False`)
|
|
60
|
+
|
|
58
61
|
"""
|
|
59
62
|
|
|
60
63
|
max_tokens: int = 256
|