camel-ai 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (117) hide show
  1. camel/__init__.py +1 -11
  2. camel/agents/__init__.py +7 -5
  3. camel/agents/chat_agent.py +134 -86
  4. camel/agents/critic_agent.py +28 -17
  5. camel/agents/deductive_reasoner_agent.py +235 -0
  6. camel/agents/embodied_agent.py +92 -40
  7. camel/agents/knowledge_graph_agent.py +221 -0
  8. camel/agents/role_assignment_agent.py +27 -17
  9. camel/agents/task_agent.py +60 -34
  10. camel/agents/tool_agents/base.py +0 -1
  11. camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
  12. camel/configs/__init__.py +29 -0
  13. camel/configs/anthropic_config.py +73 -0
  14. camel/configs/base_config.py +22 -0
  15. camel/{configs.py → configs/openai_config.py} +37 -64
  16. camel/embeddings/__init__.py +2 -0
  17. camel/embeddings/base.py +3 -2
  18. camel/embeddings/openai_embedding.py +10 -5
  19. camel/embeddings/sentence_transformers_embeddings.py +65 -0
  20. camel/functions/__init__.py +18 -3
  21. camel/functions/google_maps_function.py +335 -0
  22. camel/functions/math_functions.py +7 -7
  23. camel/functions/open_api_function.py +380 -0
  24. camel/functions/open_api_specs/coursera/__init__.py +13 -0
  25. camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
  26. camel/functions/open_api_specs/klarna/__init__.py +13 -0
  27. camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
  28. camel/functions/open_api_specs/speak/__init__.py +13 -0
  29. camel/functions/open_api_specs/speak/openapi.yaml +151 -0
  30. camel/functions/openai_function.py +346 -42
  31. camel/functions/retrieval_functions.py +61 -0
  32. camel/functions/search_functions.py +100 -35
  33. camel/functions/slack_functions.py +275 -0
  34. camel/functions/twitter_function.py +484 -0
  35. camel/functions/weather_functions.py +36 -23
  36. camel/generators.py +65 -46
  37. camel/human.py +17 -11
  38. camel/interpreters/__init__.py +25 -0
  39. camel/interpreters/base.py +49 -0
  40. camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
  41. camel/interpreters/interpreter_error.py +19 -0
  42. camel/interpreters/subprocess_interpreter.py +190 -0
  43. camel/loaders/__init__.py +22 -0
  44. camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
  45. camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
  46. camel/memories/__init__.py +17 -7
  47. camel/memories/agent_memories.py +156 -0
  48. camel/memories/base.py +97 -32
  49. camel/memories/blocks/__init__.py +21 -0
  50. camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
  51. camel/memories/blocks/vectordb_block.py +101 -0
  52. camel/memories/context_creators/__init__.py +3 -2
  53. camel/memories/context_creators/score_based.py +32 -20
  54. camel/memories/records.py +6 -5
  55. camel/messages/__init__.py +2 -2
  56. camel/messages/base.py +99 -16
  57. camel/messages/func_message.py +7 -4
  58. camel/models/__init__.py +6 -2
  59. camel/models/anthropic_model.py +146 -0
  60. camel/models/base_model.py +10 -3
  61. camel/models/model_factory.py +17 -11
  62. camel/models/open_source_model.py +25 -13
  63. camel/models/openai_audio_models.py +251 -0
  64. camel/models/openai_model.py +20 -13
  65. camel/models/stub_model.py +10 -5
  66. camel/prompts/__init__.py +7 -5
  67. camel/prompts/ai_society.py +21 -14
  68. camel/prompts/base.py +54 -47
  69. camel/prompts/code.py +22 -14
  70. camel/prompts/evaluation.py +8 -5
  71. camel/prompts/misalignment.py +26 -19
  72. camel/prompts/object_recognition.py +35 -0
  73. camel/prompts/prompt_templates.py +14 -8
  74. camel/prompts/role_description_prompt_template.py +16 -10
  75. camel/prompts/solution_extraction.py +9 -5
  76. camel/prompts/task_prompt_template.py +24 -21
  77. camel/prompts/translation.py +9 -5
  78. camel/responses/agent_responses.py +5 -2
  79. camel/retrievers/__init__.py +26 -0
  80. camel/retrievers/auto_retriever.py +330 -0
  81. camel/retrievers/base.py +69 -0
  82. camel/retrievers/bm25_retriever.py +140 -0
  83. camel/retrievers/cohere_rerank_retriever.py +108 -0
  84. camel/retrievers/vector_retriever.py +183 -0
  85. camel/societies/__init__.py +1 -1
  86. camel/societies/babyagi_playing.py +56 -32
  87. camel/societies/role_playing.py +188 -133
  88. camel/storages/__init__.py +18 -0
  89. camel/storages/graph_storages/__init__.py +23 -0
  90. camel/storages/graph_storages/base.py +82 -0
  91. camel/storages/graph_storages/graph_element.py +74 -0
  92. camel/storages/graph_storages/neo4j_graph.py +582 -0
  93. camel/storages/key_value_storages/base.py +1 -2
  94. camel/storages/key_value_storages/in_memory.py +1 -2
  95. camel/storages/key_value_storages/json.py +8 -13
  96. camel/storages/vectordb_storages/__init__.py +33 -0
  97. camel/storages/vectordb_storages/base.py +202 -0
  98. camel/storages/vectordb_storages/milvus.py +396 -0
  99. camel/storages/vectordb_storages/qdrant.py +373 -0
  100. camel/terminators/__init__.py +1 -1
  101. camel/terminators/base.py +2 -3
  102. camel/terminators/response_terminator.py +21 -12
  103. camel/terminators/token_limit_terminator.py +5 -3
  104. camel/toolkits/__init__.py +21 -0
  105. camel/toolkits/base.py +22 -0
  106. camel/toolkits/github_toolkit.py +245 -0
  107. camel/types/__init__.py +18 -6
  108. camel/types/enums.py +129 -15
  109. camel/types/openai_types.py +10 -5
  110. camel/utils/__init__.py +20 -13
  111. camel/utils/commons.py +170 -85
  112. camel/utils/token_counting.py +135 -15
  113. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/METADATA +123 -75
  114. camel_ai-0.1.4.dist-info/RECORD +119 -0
  115. {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +1 -1
  116. camel/memories/context_creators/base.py +0 -72
  117. camel_ai-0.1.1.dist-info/RECORD +0 -75
camel/messages/base.py CHANGED
@@ -11,8 +11,12 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import base64
15
+ import io
14
16
  from dataclasses import dataclass
15
- from typing import Any, Dict, List, Optional, Tuple, Union
17
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
18
+
19
+ from PIL import Image
16
20
 
17
21
  from camel.messages import (
18
22
  OpenAIAssistantMessage,
@@ -21,7 +25,12 @@ from camel.messages import (
21
25
  OpenAIUserMessage,
22
26
  )
23
27
  from camel.prompts import CodePrompt, TextPrompt
24
- from camel.types import OpenAIBackendRole, RoleType
28
+ from camel.types import (
29
+ OpenAIBackendRole,
30
+ OpenAIImageDetailType,
31
+ OpenAIImageType,
32
+ RoleType,
33
+ )
25
34
 
26
35
 
27
36
  @dataclass
@@ -36,22 +45,49 @@ class BaseMessage:
36
45
  for the message.
37
46
  content (str): The content of the message.
38
47
  """
48
+
39
49
  role_name: str
40
50
  role_type: RoleType
41
51
  meta_dict: Optional[Dict[str, str]]
42
52
  content: str
53
+ image: Optional[Image.Image] = None
54
+ image_detail: Literal["auto", "low", "high"] = "auto"
43
55
 
44
56
  @classmethod
45
57
  def make_user_message(
46
- cls, role_name: str, content: str,
47
- meta_dict: Optional[Dict[str, str]] = None) -> 'BaseMessage':
48
- return cls(role_name, RoleType.USER, meta_dict, content)
58
+ cls,
59
+ role_name: str,
60
+ content: str,
61
+ meta_dict: Optional[Dict[str, str]] = None,
62
+ image: Optional[Image.Image] = None,
63
+ image_detail: Union[OpenAIImageDetailType, str] = "auto",
64
+ ) -> 'BaseMessage':
65
+ return cls(
66
+ role_name,
67
+ RoleType.USER,
68
+ meta_dict,
69
+ content,
70
+ image,
71
+ OpenAIImageDetailType(image_detail).value,
72
+ )
49
73
 
50
74
  @classmethod
51
75
  def make_assistant_message(
52
- cls, role_name: str, content: str,
53
- meta_dict: Optional[Dict[str, str]] = None) -> 'BaseMessage':
54
- return cls(role_name, RoleType.ASSISTANT, meta_dict, content)
76
+ cls,
77
+ role_name: str,
78
+ content: str,
79
+ meta_dict: Optional[Dict[str, str]] = None,
80
+ image: Optional[Image.Image] = None,
81
+ image_detail: Union[OpenAIImageDetailType, str] = "auto",
82
+ ) -> 'BaseMessage':
83
+ return cls(
84
+ role_name,
85
+ RoleType.ASSISTANT,
86
+ meta_dict,
87
+ content,
88
+ image,
89
+ OpenAIImageDetailType(image_detail).value,
90
+ )
55
91
 
56
92
  def create_new_instance(self, content: str) -> "BaseMessage":
57
93
  r"""Create a new instance of the :obj:`BaseMessage` with updated
@@ -63,9 +99,12 @@ class BaseMessage:
63
99
  Returns:
64
100
  BaseMessage: The new instance of :obj:`BaseMessage`.
65
101
  """
66
- return self.__class__(role_name=self.role_name,
67
- role_type=self.role_type,
68
- meta_dict=self.meta_dict, content=content)
102
+ return self.__class__(
103
+ role_name=self.role_name,
104
+ role_type=self.role_type,
105
+ meta_dict=self.meta_dict,
106
+ content=content,
107
+ )
69
108
 
70
109
  def __add__(self, other: Any) -> Union["BaseMessage", Any]:
71
110
  r"""Addition operator override for :obj:`BaseMessage`.
@@ -83,7 +122,8 @@ class BaseMessage:
83
122
  else:
84
123
  raise TypeError(
85
124
  f"Unsupported operand type(s) for +: '{type(self)}' and "
86
- f"'{type(other)}'")
125
+ f"'{type(other)}'"
126
+ )
87
127
  return self.create_new_instance(combined_content)
88
128
 
89
129
  def __mul__(self, other: Any) -> Union["BaseMessage", Any]:
@@ -101,7 +141,8 @@ class BaseMessage:
101
141
  else:
102
142
  raise TypeError(
103
143
  f"Unsupported operand type(s) for *: '{type(self)}' and "
104
- f"'{type(other)}'")
144
+ f"'{type(other)}'"
145
+ )
105
146
 
106
147
  def __len__(self) -> int:
107
148
  r"""Length operator override for :obj:`BaseMessage`.
@@ -124,7 +165,8 @@ class BaseMessage:
124
165
  return item in self.content
125
166
 
126
167
  def extract_text_and_code_prompts(
127
- self) -> Tuple[List[TextPrompt], List[CodePrompt]]:
168
+ self,
169
+ ) -> Tuple[List[TextPrompt], List[CodePrompt]]:
128
170
  r"""Extract text and code prompts from the message content.
129
171
 
130
172
  Returns:
@@ -140,7 +182,8 @@ class BaseMessage:
140
182
  start_idx = 0
141
183
  while idx < len(lines):
142
184
  while idx < len(lines) and (
143
- not lines[idx].lstrip().startswith("```")):
185
+ not lines[idx].lstrip().startswith("```")
186
+ ):
144
187
  idx += 1
145
188
  text = "\n".join(lines[start_idx:idx]).strip()
146
189
  text_prompts.append(TextPrompt(text))
@@ -198,7 +241,47 @@ class BaseMessage:
198
241
  Returns:
199
242
  OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object.
200
243
  """
201
- return {"role": "user", "content": self.content}
244
+ if self.image is None:
245
+ return {"role": "user", "content": self.content}
246
+ else:
247
+ #
248
+ if self.image.format is None:
249
+ raise ValueError(
250
+ f"Image's `format` is `None`, please "
251
+ f"transform the `PIL.Image.Image` to one of "
252
+ f"following supported formats, such as "
253
+ f"{list(OpenAIImageType)}"
254
+ )
255
+
256
+ image_type: str = self.image.format.lower()
257
+ if image_type not in OpenAIImageType:
258
+ raise ValueError(
259
+ f"Image type {self.image.format} "
260
+ f"is not supported by OpenAI vision model"
261
+ )
262
+ with io.BytesIO() as buffer:
263
+ self.image.save(fp=buffer, format=self.image.format)
264
+ encoded_image = base64.b64encode(buffer.getvalue()).decode(
265
+ "utf-8"
266
+ )
267
+ image_prefix = f"data:image/{image_type};base64,"
268
+
269
+ return {
270
+ "role": "user",
271
+ "content": [
272
+ {
273
+ "type": "text",
274
+ "text": self.content,
275
+ },
276
+ {
277
+ "type": "image_url",
278
+ "image_url": {
279
+ "url": f"{image_prefix}{encoded_image}",
280
+ "detail": self.image_detail,
281
+ },
282
+ },
283
+ ],
284
+ }
202
285
 
203
286
  def to_openai_assistant_message(self) -> OpenAIAssistantMessage:
204
287
  r"""Converts the message to an :obj:`OpenAIAssistantMessage` object.
@@ -36,6 +36,7 @@ class FunctionCallingMessage(BaseMessage):
36
36
  result (Optional[Any]): The result of function execution.
37
37
  (default: :obj:`None`)
38
38
  """
39
+
39
40
  func_name: Optional[str] = None
40
41
  args: Optional[Dict] = None
41
42
  result: Optional[Any] = None
@@ -67,10 +68,11 @@ class FunctionCallingMessage(BaseMessage):
67
68
  OpenAIAssistantMessage: The converted :obj:`OpenAIAssistantMessage`
68
69
  object.
69
70
  """
70
- if (not self.func_name) or (not self.args):
71
+ if (not self.func_name) or (self.args is None):
71
72
  raise ValueError(
72
73
  "Invalid request for converting into assistant message"
73
- " due to missing function name or arguments.")
74
+ " due to missing function name or arguments."
75
+ )
74
76
 
75
77
  msg_dict: OpenAIAssistantMessage = {
76
78
  "role": "assistant",
@@ -78,7 +80,7 @@ class FunctionCallingMessage(BaseMessage):
78
80
  "function_call": {
79
81
  "name": self.func_name,
80
82
  "arguments": str(self.args),
81
- }
83
+ },
82
84
  }
83
85
 
84
86
  return msg_dict
@@ -94,7 +96,8 @@ class FunctionCallingMessage(BaseMessage):
94
96
  if (not self.func_name) or (not self.result):
95
97
  raise ValueError(
96
98
  "Invalid request for converting into function message"
97
- " due to missing function name or results.")
99
+ " due to missing function name or results."
100
+ )
98
101
 
99
102
  result_content = {"result": {str(self.result)}}
100
103
  msg_dict: OpenAIFunctionMessage = {
camel/models/__init__.py CHANGED
@@ -11,16 +11,20 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from .anthropic_model import AnthropicModel
14
15
  from .base_model import BaseModelBackend
16
+ from .model_factory import ModelFactory
17
+ from .open_source_model import OpenSourceModel
18
+ from .openai_audio_models import OpenAIAudioModels
15
19
  from .openai_model import OpenAIModel
16
20
  from .stub_model import StubModel
17
- from .open_source_model import OpenSourceModel
18
- from .model_factory import ModelFactory
19
21
 
20
22
  __all__ = [
21
23
  'BaseModelBackend',
22
24
  'OpenAIModel',
25
+ 'AnthropicModel',
23
26
  'StubModel',
24
27
  'OpenSourceModel',
25
28
  'ModelFactory',
29
+ 'OpenAIAudioModels',
26
30
  ]
@@ -0,0 +1,146 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import Any, Dict, Optional
16
+
17
+ from anthropic import Anthropic
18
+ from anthropic._types import NOT_GIVEN
19
+
20
+ from camel.configs import ANTHROPIC_API_PARAMS
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import ChatCompletion, ModelType
23
+ from camel.utils import AnthropicTokenCounter, BaseTokenCounter
24
+
25
+
26
+ class AnthropicModel(BaseModelBackend):
27
+ r"""Anthropic API in a unified BaseModelBackend interface."""
28
+
29
+ def __init__(
30
+ self,
31
+ model_type: ModelType,
32
+ model_config_dict: Dict[str, Any],
33
+ api_key: Optional[str] = None,
34
+ ) -> None:
35
+ r"""Constructor for Anthropic backend.
36
+
37
+ Args:
38
+ model_type (ModelType): Model for which a backend is created,
39
+ one of GPT_* series.
40
+ model_config_dict (Dict[str, Any]): A dictionary that will
41
+ be fed into openai.ChatCompletion.create().
42
+ api_key (Optional[str]): The API key for authenticating with the
43
+ Anthropic service. (default: :obj:`None`)
44
+ """
45
+ super().__init__(model_type, model_config_dict)
46
+ self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
47
+ self.client = Anthropic(api_key=self._api_key)
48
+ self._token_counter: Optional[BaseTokenCounter] = None
49
+
50
+ def _convert_response_from_anthropic_to_openai(self, response):
51
+ # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
52
+ obj = ChatCompletion.construct(
53
+ id=None,
54
+ choices=[
55
+ dict(
56
+ index=0,
57
+ message={
58
+ "role": "assistant",
59
+ "content": response.content[0].text,
60
+ },
61
+ finish_reason=response.stop_reason,
62
+ )
63
+ ],
64
+ created=None,
65
+ model=response.model,
66
+ object="chat.completion",
67
+ )
68
+ return obj
69
+
70
+ @property
71
+ def token_counter(self) -> BaseTokenCounter:
72
+ r"""Initialize the token counter for the model backend.
73
+
74
+ Returns:
75
+ BaseTokenCounter: The token counter following the model's
76
+ tokenization style.
77
+ """
78
+ if not self._token_counter:
79
+ self._token_counter = AnthropicTokenCounter(self.model_type)
80
+ return self._token_counter
81
+
82
+ def count_tokens_from_prompt(self, prompt: str) -> int:
83
+ r"""Count the number of tokens from a prompt.
84
+
85
+ Args:
86
+ prompt (str): The prompt string.
87
+
88
+ Returns:
89
+ int: The number of tokens in the prompt.
90
+ """
91
+ return self.client.count_tokens(prompt)
92
+
93
+ def run(
94
+ self,
95
+ messages,
96
+ ):
97
+ r"""Run inference of Anthropic chat completion.
98
+
99
+ Args:
100
+ messages (List[Dict]): Message list with the chat history
101
+ in OpenAI API format.
102
+
103
+ Returns:
104
+ Dict[str, Any]: Response in the OpenAI API format.
105
+ """
106
+
107
+ if messages[0]["role"] == "system":
108
+ sys_msg = messages.pop(0)["content"]
109
+ else:
110
+ sys_msg = NOT_GIVEN
111
+ response = self.client.messages.create(
112
+ model=self.model_type.value,
113
+ system=sys_msg,
114
+ messages=messages,
115
+ **self.model_config_dict,
116
+ )
117
+
118
+ # format response to openai format
119
+ response = self._convert_response_from_anthropic_to_openai(response)
120
+
121
+ return response
122
+
123
+ def check_model_config(self):
124
+ r"""Check whether the model configuration is valid for anthropic
125
+ model backends.
126
+
127
+ Raises:
128
+ ValueError: If the model configuration dictionary contains any
129
+ unexpected arguments to OpenAI API, or it does not contain
130
+ :obj:`model_path` or :obj:`server_url`.
131
+ """
132
+ for param in self.model_config_dict:
133
+ if param not in ANTHROPIC_API_PARAMS:
134
+ raise ValueError(
135
+ f"Unexpected argument `{param}` is "
136
+ "input into Anthropic model backend."
137
+ )
138
+
139
+ @property
140
+ def stream(self) -> bool:
141
+ r"""Returns whether the model is in stream mode,
142
+ which sends partial results each time.
143
+ Returns:
144
+ bool: Whether the model is in stream mode.
145
+ """
146
+ return self.model_config_dict.get("stream", False)
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from abc import ABC, abstractmethod
15
- from typing import Any, Dict, List, Union
15
+ from typing import Any, Dict, List, Optional, Union
16
16
 
17
17
  from openai import Stream
18
18
 
@@ -26,17 +26,24 @@ class BaseModelBackend(ABC):
26
26
  May be OpenAI API, a local LLM, a stub for unit tests, etc.
27
27
  """
28
28
 
29
- def __init__(self, model_type: ModelType,
30
- model_config_dict: Dict[str, Any]) -> None:
29
+ def __init__(
30
+ self,
31
+ model_type: ModelType,
32
+ model_config_dict: Dict[str, Any],
33
+ api_key: Optional[str] = None,
34
+ ) -> None:
31
35
  r"""Constructor for the model backend.
32
36
 
33
37
  Args:
34
38
  model_type (ModelType): Model for which a backend is created.
35
39
  model_config_dict (Dict[str, Any]): A config dictionary.
40
+ api_key (Optional[str]): The API key for authenticating with the
41
+ LLM service.
36
42
  """
37
43
  self.model_type = model_type
38
44
 
39
45
  self.model_config_dict = model_config_dict
46
+ self._api_key = api_key
40
47
  self.check_model_config()
41
48
 
42
49
  @property
@@ -11,14 +11,13 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Dict
15
-
16
- from camel.models import (
17
- BaseModelBackend,
18
- OpenAIModel,
19
- OpenSourceModel,
20
- StubModel,
21
- )
14
+ from typing import Any, Dict, Optional
15
+
16
+ from camel.models.anthropic_model import AnthropicModel
17
+ from camel.models.base_model import BaseModelBackend
18
+ from camel.models.open_source_model import OpenSourceModel
19
+ from camel.models.openai_model import OpenAIModel
20
+ from camel.models.stub_model import StubModel
22
21
  from camel.types import ModelType
23
22
 
24
23
 
@@ -30,14 +29,19 @@ class ModelFactory:
30
29
  """
31
30
 
32
31
  @staticmethod
33
- def create(model_type: ModelType,
34
- model_config_dict: Dict) -> BaseModelBackend:
32
+ def create(
33
+ model_type: ModelType,
34
+ model_config_dict: Dict,
35
+ api_key: Optional[str] = None,
36
+ ) -> BaseModelBackend:
35
37
  r"""Creates an instance of `BaseModelBackend` of the specified type.
36
38
 
37
39
  Args:
38
40
  model_type (ModelType): Model for which a backend is created.
39
41
  model_config_dict (Dict): A dictionary that will be fed into
40
42
  the backend constructor.
43
+ api_key (Optional[str]): The API key for authenticating with the
44
+ LLM service.
41
45
 
42
46
  Raises:
43
47
  ValueError: If there is not backend for the model.
@@ -52,8 +56,10 @@ class ModelFactory:
52
56
  model_class = StubModel
53
57
  elif model_type.is_open_source:
54
58
  model_class = OpenSourceModel
59
+ elif model_type.is_anthropic:
60
+ model_class = AnthropicModel
55
61
  else:
56
62
  raise ValueError(f"Unknown model type `{model_type}` is input")
57
63
 
58
- inst = model_class(model_type, model_config_dict)
64
+ inst = model_class(model_type, model_config_dict, api_key)
59
65
  return inst
@@ -45,11 +45,13 @@ class OpenSourceModel(BaseModelBackend):
45
45
  # Check whether the input model type is open-source
46
46
  if not model_type.is_open_source:
47
47
  raise ValueError(
48
- f"Model `{model_type}` is not a supported open-source model.")
48
+ f"Model `{model_type}` is not a supported open-source model."
49
+ )
49
50
 
50
51
  # Check whether input model path is empty
51
- model_path: Optional[str] = (self.model_config_dict.get(
52
- "model_path", None))
52
+ model_path: Optional[str] = self.model_config_dict.get(
53
+ "model_path", None
54
+ )
53
55
  if not model_path:
54
56
  raise ValueError("Path to open-source model is not provided.")
55
57
  self.model_path: str = model_path
@@ -59,19 +61,23 @@ class OpenSourceModel(BaseModelBackend):
59
61
  if not self.model_type.validate_model_name(self.model_name):
60
62
  raise ValueError(
61
63
  f"Model name `{self.model_name}` does not match model type "
62
- f"`{self.model_type.value}`.")
64
+ f"`{self.model_type.value}`."
65
+ )
63
66
 
64
67
  # Load the server URL and check whether it is None
65
- server_url: Optional[str] = (self.model_config_dict.get(
66
- "server_url", None))
68
+ server_url: Optional[str] = self.model_config_dict.get(
69
+ "server_url", None
70
+ )
67
71
  if not server_url:
68
72
  raise ValueError(
69
- "URL to server running open-source LLM is not provided.")
73
+ "URL to server running open-source LLM is not provided."
74
+ )
70
75
  self.server_url: str = server_url
71
76
  self._client = OpenAI(
72
77
  base_url=self.server_url,
73
78
  timeout=60,
74
79
  max_retries=3,
80
+ api_key="fake_key",
75
81
  )
76
82
 
77
83
  # Replace `model_config_dict` with only the params to be
@@ -88,7 +94,8 @@ class OpenSourceModel(BaseModelBackend):
88
94
  """
89
95
  if not self._token_counter:
90
96
  self._token_counter = OpenSourceTokenCounter(
91
- self.model_type, self.model_path)
97
+ self.model_type, self.model_path
98
+ )
92
99
  return self._token_counter
93
100
 
94
101
  def run(
@@ -123,16 +130,21 @@ class OpenSourceModel(BaseModelBackend):
123
130
  unexpected arguments to OpenAI API, or it does not contain
124
131
  :obj:`model_path` or :obj:`server_url`.
125
132
  """
126
- if ("model_path" not in self.model_config_dict
127
- or "server_url" not in self.model_config_dict):
133
+ if (
134
+ "model_path" not in self.model_config_dict
135
+ or "server_url" not in self.model_config_dict
136
+ ):
128
137
  raise ValueError(
129
138
  "Invalid configuration for open-source model backend with "
130
- ":obj:`model_path` or :obj:`server_url` missing.")
139
+ ":obj:`model_path` or :obj:`server_url` missing."
140
+ )
131
141
 
132
142
  for param in self.model_config_dict["api_params"].__dict__:
133
143
  if param not in OPENAI_API_PARAMS:
134
- raise ValueError(f"Unexpected argument `{param}` is "
135
- "input into open-source model backend.")
144
+ raise ValueError(
145
+ f"Unexpected argument `{param}` is "
146
+ "input into open-source model backend."
147
+ )
136
148
 
137
149
  @property
138
150
  def stream(self) -> bool: