camel-ai 0.1.5.6__py3-none-any.whl → 0.1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (133) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +249 -36
  3. camel/agents/critic_agent.py +18 -2
  4. camel/agents/deductive_reasoner_agent.py +16 -4
  5. camel/agents/embodied_agent.py +20 -6
  6. camel/agents/knowledge_graph_agent.py +24 -5
  7. camel/agents/role_assignment_agent.py +13 -1
  8. camel/agents/search_agent.py +16 -5
  9. camel/agents/task_agent.py +20 -5
  10. camel/configs/__init__.py +11 -9
  11. camel/configs/anthropic_config.py +5 -6
  12. camel/configs/base_config.py +50 -4
  13. camel/configs/gemini_config.py +69 -17
  14. camel/configs/groq_config.py +105 -0
  15. camel/configs/litellm_config.py +2 -8
  16. camel/configs/mistral_config.py +78 -0
  17. camel/configs/ollama_config.py +5 -7
  18. camel/configs/openai_config.py +12 -23
  19. camel/configs/vllm_config.py +102 -0
  20. camel/configs/zhipuai_config.py +5 -11
  21. camel/embeddings/__init__.py +2 -0
  22. camel/embeddings/mistral_embedding.py +89 -0
  23. camel/human.py +1 -1
  24. camel/interpreters/__init__.py +2 -0
  25. camel/interpreters/ipython_interpreter.py +167 -0
  26. camel/loaders/__init__.py +2 -0
  27. camel/loaders/firecrawl_reader.py +213 -0
  28. camel/memories/agent_memories.py +1 -4
  29. camel/memories/blocks/chat_history_block.py +6 -2
  30. camel/memories/blocks/vectordb_block.py +3 -1
  31. camel/memories/context_creators/score_based.py +6 -6
  32. camel/memories/records.py +9 -7
  33. camel/messages/base.py +1 -0
  34. camel/models/__init__.py +8 -0
  35. camel/models/anthropic_model.py +7 -2
  36. camel/models/azure_openai_model.py +152 -0
  37. camel/models/base_model.py +9 -2
  38. camel/models/gemini_model.py +14 -2
  39. camel/models/groq_model.py +131 -0
  40. camel/models/litellm_model.py +26 -4
  41. camel/models/mistral_model.py +169 -0
  42. camel/models/model_factory.py +30 -3
  43. camel/models/ollama_model.py +21 -2
  44. camel/models/open_source_model.py +13 -5
  45. camel/models/openai_model.py +7 -2
  46. camel/models/stub_model.py +4 -4
  47. camel/models/vllm_model.py +138 -0
  48. camel/models/zhipuai_model.py +7 -4
  49. camel/prompts/__init__.py +8 -1
  50. camel/prompts/image_craft.py +34 -0
  51. camel/prompts/multi_condition_image_craft.py +34 -0
  52. camel/prompts/task_prompt_template.py +10 -4
  53. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  54. camel/responses/agent_responses.py +4 -3
  55. camel/retrievers/auto_retriever.py +2 -2
  56. camel/societies/babyagi_playing.py +6 -4
  57. camel/societies/role_playing.py +16 -8
  58. camel/storages/graph_storages/graph_element.py +10 -14
  59. camel/storages/graph_storages/neo4j_graph.py +5 -0
  60. camel/storages/vectordb_storages/base.py +24 -13
  61. camel/storages/vectordb_storages/milvus.py +1 -1
  62. camel/storages/vectordb_storages/qdrant.py +2 -3
  63. camel/tasks/__init__.py +22 -0
  64. camel/tasks/task.py +408 -0
  65. camel/tasks/task_prompt.py +65 -0
  66. camel/toolkits/__init__.py +39 -0
  67. camel/toolkits/base.py +4 -2
  68. camel/toolkits/code_execution.py +1 -1
  69. camel/toolkits/dalle_toolkit.py +146 -0
  70. camel/toolkits/github_toolkit.py +19 -34
  71. camel/toolkits/google_maps_toolkit.py +368 -0
  72. camel/toolkits/math_toolkit.py +79 -0
  73. camel/toolkits/open_api_toolkit.py +547 -0
  74. camel/{functions → toolkits}/openai_function.py +2 -7
  75. camel/toolkits/retrieval_toolkit.py +76 -0
  76. camel/toolkits/search_toolkit.py +326 -0
  77. camel/toolkits/slack_toolkit.py +308 -0
  78. camel/toolkits/twitter_toolkit.py +522 -0
  79. camel/toolkits/weather_toolkit.py +173 -0
  80. camel/types/enums.py +154 -35
  81. camel/utils/__init__.py +14 -2
  82. camel/utils/async_func.py +1 -1
  83. camel/utils/commons.py +152 -2
  84. camel/utils/constants.py +3 -0
  85. camel/utils/token_counting.py +148 -40
  86. camel/workforce/__init__.py +23 -0
  87. camel/workforce/base.py +50 -0
  88. camel/workforce/manager_node.py +299 -0
  89. camel/workforce/role_playing_node.py +168 -0
  90. camel/workforce/single_agent_node.py +77 -0
  91. camel/workforce/task_channel.py +173 -0
  92. camel/workforce/utils.py +97 -0
  93. camel/workforce/worker_node.py +115 -0
  94. camel/workforce/workforce.py +49 -0
  95. camel/workforce/workforce_prompt.py +125 -0
  96. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.6.1.dist-info}/METADATA +45 -3
  97. camel_ai-0.1.6.1.dist-info/RECORD +182 -0
  98. camel/functions/__init__.py +0 -51
  99. camel/functions/google_maps_function.py +0 -335
  100. camel/functions/math_functions.py +0 -61
  101. camel/functions/open_api_function.py +0 -508
  102. camel/functions/retrieval_functions.py +0 -61
  103. camel/functions/search_functions.py +0 -298
  104. camel/functions/slack_functions.py +0 -286
  105. camel/functions/twitter_function.py +0 -479
  106. camel/functions/weather_functions.py +0 -144
  107. camel_ai-0.1.5.6.dist-info/RECORD +0 -157
  108. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  109. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  110. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  111. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  112. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  113. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  114. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  115. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  116. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  117. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  118. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  119. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  120. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  121. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  122. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  123. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  124. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  125. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  126. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  127. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  128. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  129. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  130. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  131. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  132. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  133. {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.6.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,213 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional
17
+
18
+ from pydantic import BaseModel
19
+
20
+
21
+ class Firecrawl:
22
+ r"""Firecrawl allows you to turn entire websites into LLM-ready markdown.
23
+
24
+ Args:
25
+ api_key (Optional[str]): API key for authenticating with the Firecrawl
26
+ API.
27
+ api_url (Optional[str]): Base URL for the Firecrawl API.
28
+
29
+ References:
30
+ https://docs.firecrawl.dev/introduction
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ api_key: Optional[str] = None,
36
+ api_url: Optional[str] = None,
37
+ ) -> None:
38
+ from firecrawl import FirecrawlApp
39
+
40
+ self._api_key = api_key or os.environ.get("FIRECRAWL_API_KEY")
41
+ self._api_url = api_url or os.environ.get("FIRECRAWL_API_URL")
42
+
43
+ self.app = FirecrawlApp(api_key=self._api_key, api_url=self._api_url)
44
+
45
+ def crawl(
46
+ self,
47
+ url: str,
48
+ params: Optional[Dict[str, Any]] = None,
49
+ wait_until_done: bool = True,
50
+ **kwargs: Any,
51
+ ) -> Any:
52
+ r"""Crawl a URL and all accessible subpages. Customize the crawl by
53
+ setting different parameters, and receive the full response or a job
54
+ ID based on the specified options.
55
+
56
+ Args:
57
+ url (str): The URL to crawl.
58
+ params (Optional[Dict[str, Any]]): Additional parameters for the
59
+ crawl request. Defaults to `None`.
60
+ wait_until_done (bool): Whether to wait until the crawl job is
61
+ completed. Defaults to `True`.
62
+ **kwargs (Any): Additional keyword arguments, such as
63
+ `poll_interval`, `idempotency_key`, etc.
64
+
65
+ Returns:
66
+ Any: The list content of the URL if `wait_until_done` is True;
67
+ otherwise, a string job ID.
68
+
69
+ Raises:
70
+ RuntimeError: If the crawling process fails.
71
+ """
72
+
73
+ try:
74
+ crawl_response = self.app.crawl_url(
75
+ url=url,
76
+ params=params,
77
+ **kwargs,
78
+ wait_until_done=wait_until_done,
79
+ )
80
+ return (
81
+ crawl_response
82
+ if wait_until_done
83
+ else crawl_response.get("jobId")
84
+ )
85
+ except Exception as e:
86
+ raise RuntimeError(f"Failed to crawl the URL: {e}")
87
+
88
+ def markdown_crawl(self, url: str) -> str:
89
+ r"""Crawl a URL and all accessible subpages and return the content in
90
+ Markdown format.
91
+
92
+ Args:
93
+ url (str): The URL to crawl.
94
+
95
+ Returns:
96
+ str: The content of the URL in Markdown format.
97
+
98
+ Raises:
99
+ RuntimeError: If the crawling process fails.
100
+ """
101
+
102
+ try:
103
+ crawl_result = self.app.crawl_url(url=url)
104
+ if not isinstance(crawl_result, list):
105
+ raise ValueError("Unexpected response format")
106
+ markdown_contents = [
107
+ result.get('markdown', '') for result in crawl_result
108
+ ]
109
+ return '\n'.join(markdown_contents)
110
+ except Exception as e:
111
+ raise RuntimeError(
112
+ f"Failed to crawl the URL and retrieve markdown: {e}"
113
+ )
114
+
115
+ def check_crawl_job(self, job_id: str) -> Dict:
116
+ r"""Check the status of a crawl job.
117
+
118
+ Args:
119
+ job_id (str): The ID of the crawl job.
120
+
121
+ Returns:
122
+ Dict: The response including status of the crawl job.
123
+
124
+ Raises:
125
+ RuntimeError: If the check process fails.
126
+ """
127
+
128
+ try:
129
+ return self.app.check_crawl_status(job_id)
130
+ except Exception as e:
131
+ raise RuntimeError(f"Failed to check the crawl job status: {e}")
132
+
133
+ def scrape(
134
+ self,
135
+ url: str,
136
+ params: Optional[Dict[str, Any]] = None,
137
+ ) -> Dict:
138
+ r"""To scrape a single URL. This function supports advanced scraping
139
+ by setting different parameters and returns the full scraped data as a
140
+ dictionary.
141
+
142
+ Reference: https://docs.firecrawl.dev/advanced-scraping-guide
143
+
144
+ Args:
145
+ url (str): The URL to read.
146
+ params (Optional[Dict[str, Any]]): Additional parameters for the
147
+ scrape request.
148
+
149
+ Returns:
150
+ Dict: The scraped data.
151
+
152
+ Raises:
153
+ RuntimeError: If the scrape process fails.
154
+ """
155
+ try:
156
+ return self.app.scrape_url(url=url, params=params)
157
+ except Exception as e:
158
+ raise RuntimeError(f"Failed to scrape the URL: {e}")
159
+
160
+ def structured_scrape(self, url: str, output_schema: BaseModel) -> Dict:
161
+ r"""Use LLM to extract structured data from given URL.
162
+
163
+ Args:
164
+ url (str): The URL to read.
165
+ output_schema (BaseModel): A pydantic model
166
+ that includes value types and field descriptions used to
167
+ generate a structured response by LLM. This schema helps
168
+ in defining the expected output format.
169
+
170
+ Returns:
171
+ Dict: The content of the URL.
172
+
173
+ Raises:
174
+ RuntimeError: If the scrape process fails.
175
+ """
176
+ try:
177
+ data = self.app.scrape_url(
178
+ url,
179
+ {
180
+ 'extractorOptions': {
181
+ "mode": "llm-extraction",
182
+ "extractionPrompt": "Based on the information on "
183
+ "the page, extract the information from the schema.",
184
+ 'extractionSchema': output_schema.model_json_schema(),
185
+ },
186
+ 'pageOptions': {'onlyMainContent': True},
187
+ },
188
+ )
189
+ return data.get("llm_extraction", {})
190
+ except Exception as e:
191
+ raise RuntimeError(f"Failed to perform structured scrape: {e}")
192
+
193
+ def tidy_scrape(self, url: str) -> str:
194
+ r"""Only return the main content of the page, excluding headers,
195
+ navigation bars, footers, etc. in Markdown format.
196
+
197
+ Args:
198
+ url (str): The URL to read.
199
+
200
+ Returns:
201
+ str: The markdown content of the URL.
202
+
203
+ Raises:
204
+ RuntimeError: If the scrape process fails.
205
+ """
206
+
207
+ try:
208
+ scrape_result = self.app.scrape_url(
209
+ url, {'pageOptions': {'onlyMainContent': True}}
210
+ )
211
+ return scrape_result.get("markdown", "")
212
+ except Exception as e:
213
+ raise RuntimeError(f"Failed to perform tidy scrape: {e}")
@@ -16,10 +16,7 @@ from typing import List, Optional
16
16
 
17
17
  from camel.memories.base import AgentMemory, BaseContextCreator
18
18
  from camel.memories.blocks import ChatHistoryBlock, VectorDBBlock
19
- from camel.memories.records import (
20
- ContextRecord,
21
- MemoryRecord,
22
- )
19
+ from camel.memories.records import ContextRecord, MemoryRecord
23
20
  from camel.storages import BaseKeyValueStorage, BaseVectorStorage
24
21
  from camel.types import OpenAIBackendRole
25
22
 
@@ -84,11 +84,15 @@ class ChatHistoryBlock(MemoryBlock):
84
84
  for record in reversed(chat_records):
85
85
  if record.role_at_backend == OpenAIBackendRole.SYSTEM:
86
86
  # System messages are always kept.
87
- output_records.append(ContextRecord(record, 1.0))
87
+ output_records.append(
88
+ ContextRecord(memory_record=record, score=1.0)
89
+ )
88
90
  else:
89
91
  # Other messages' score drops down gradually
90
92
  score *= self.keep_rate
91
- output_records.append(ContextRecord(record, score))
93
+ output_records.append(
94
+ ContextRecord(memory_record=record, score=score)
95
+ )
92
96
 
93
97
  output_records.reverse()
94
98
  return output_records
@@ -67,7 +67,9 @@ class VectorDBBlock(MemoryBlock):
67
67
  vector database based on similarity to :obj:`current_state`.
68
68
  """
69
69
  query_vector = self.embedding.embed(keyword)
70
- results = self.storage.query(VectorDBQuery(query_vector, top_k=limit))
70
+ results = self.storage.query(
71
+ VectorDBQuery(query_vector=query_vector, top_k=limit)
72
+ )
71
73
  return [
72
74
  ContextRecord(
73
75
  memory_record=MemoryRecord.from_dict(result.record.payload),
@@ -11,17 +11,17 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from dataclasses import dataclass
15
14
  from typing import List, Tuple
16
15
 
16
+ from pydantic import BaseModel
17
+
17
18
  from camel.memories.base import BaseContextCreator
18
19
  from camel.memories.records import ContextRecord
19
20
  from camel.messages import OpenAIMessage
20
21
  from camel.utils import BaseTokenCounter
21
22
 
22
23
 
23
- @dataclass(frozen=True)
24
- class _ContextUnit:
24
+ class _ContextUnit(BaseModel):
25
25
  idx: int
26
26
  record: ContextRecord
27
27
  num_tokens: int
@@ -88,9 +88,9 @@ class ScoreBasedContextCreator(BaseContextCreator):
88
88
  uuid_set.add(record.memory_record.uuid)
89
89
  context_units.append(
90
90
  _ContextUnit(
91
- idx,
92
- record,
93
- self.token_counter.count_tokens_from_messages(
91
+ idx=idx,
92
+ record=record,
93
+ num_tokens=self.token_counter.count_tokens_from_messages(
94
94
  [record.memory_record.to_openai_message()]
95
95
  ),
96
96
  )
camel/memories/records.py CHANGED
@@ -12,16 +12,17 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- from dataclasses import asdict, dataclass, field
15
+ from dataclasses import asdict
16
16
  from typing import Any, ClassVar, Dict
17
17
  from uuid import UUID, uuid4
18
18
 
19
+ from pydantic import BaseModel, ConfigDict, Field
20
+
19
21
  from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
20
22
  from camel.types import OpenAIBackendRole
21
23
 
22
24
 
23
- @dataclass(frozen=True)
24
- class MemoryRecord:
25
+ class MemoryRecord(BaseModel):
25
26
  r"""The basic message storing unit in the CAMEL memory system.
26
27
 
27
28
  Attributes:
@@ -38,10 +39,12 @@ class MemoryRecord:
38
39
  will be an empty `Dict`.
39
40
  """
40
41
 
42
+ model_config = ConfigDict(arbitrary_types_allowed=True)
43
+
41
44
  message: BaseMessage
42
45
  role_at_backend: OpenAIBackendRole
43
- uuid: UUID = field(default_factory=uuid4)
44
- extra_info: Dict[str, str] = field(default_factory=dict)
46
+ uuid: UUID = Field(default_factory=uuid4)
47
+ extra_info: Dict[str, str] = Field(default_factory=dict)
45
48
 
46
49
  _MESSAGE_TYPES: ClassVar[dict] = {
47
50
  "BaseMessage": BaseMessage,
@@ -85,8 +88,7 @@ class MemoryRecord:
85
88
  return self.message.to_openai_message(self.role_at_backend)
86
89
 
87
90
 
88
- @dataclass(frozen=True)
89
- class ContextRecord:
91
+ class ContextRecord(BaseModel):
90
92
  r"""The result of memory retrieving."""
91
93
 
92
94
  memory_record: MemoryRecord
camel/messages/base.py CHANGED
@@ -60,6 +60,7 @@ class BaseMessage:
60
60
  role_type: RoleType
61
61
  meta_dict: Optional[Dict[str, str]]
62
62
  content: str
63
+
63
64
  video_bytes: Optional[bytes] = None
64
65
  image_list: Optional[List[Image.Image]] = None
65
66
  image_detail: Literal["auto", "low", "high"] = "auto"
camel/models/__init__.py CHANGED
@@ -12,9 +12,12 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
+ from .azure_openai_model import AzureOpenAIModel
15
16
  from .base_model import BaseModelBackend
16
17
  from .gemini_model import GeminiModel
18
+ from .groq_model import GroqModel
17
19
  from .litellm_model import LiteLLMModel
20
+ from .mistral_model import MistralModel
18
21
  from .model_factory import ModelFactory
19
22
  from .nemotron_model import NemotronModel
20
23
  from .ollama_model import OllamaModel
@@ -22,12 +25,16 @@ from .open_source_model import OpenSourceModel
22
25
  from .openai_audio_models import OpenAIAudioModels
23
26
  from .openai_model import OpenAIModel
24
27
  from .stub_model import StubModel
28
+ from .vllm_model import VLLMModel
25
29
  from .zhipuai_model import ZhipuAIModel
26
30
 
27
31
  __all__ = [
28
32
  'BaseModelBackend',
29
33
  'OpenAIModel',
34
+ 'AzureOpenAIModel',
30
35
  'AnthropicModel',
36
+ 'MistralModel',
37
+ 'GroqModel',
31
38
  'StubModel',
32
39
  'ZhipuAIModel',
33
40
  'OpenSourceModel',
@@ -36,5 +43,6 @@ __all__ = [
36
43
  'OpenAIAudioModels',
37
44
  'NemotronModel',
38
45
  'OllamaModel',
46
+ 'VLLMModel',
39
47
  'GeminiModel',
40
48
  ]
@@ -36,6 +36,7 @@ class AnthropicModel(BaseModelBackend):
36
36
  model_config_dict: Dict[str, Any],
37
37
  api_key: Optional[str] = None,
38
38
  url: Optional[str] = None,
39
+ token_counter: Optional[BaseTokenCounter] = None,
39
40
  ) -> None:
40
41
  r"""Constructor for Anthropic backend.
41
42
 
@@ -48,12 +49,16 @@ class AnthropicModel(BaseModelBackend):
48
49
  Anthropic service. (default: :obj:`None`)
49
50
  url (Optional[str]): The url to the Anthropic service. (default:
50
51
  :obj:`None`)
52
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
53
+ for the model. If not provided, `AnthropicTokenCounter` will
54
+ be used.
51
55
  """
52
- super().__init__(model_type, model_config_dict, api_key, url)
56
+ super().__init__(
57
+ model_type, model_config_dict, api_key, url, token_counter
58
+ )
53
59
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
54
60
  self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
55
61
  self.client = Anthropic(api_key=self._api_key, base_url=self._url)
56
- self._token_counter: Optional[BaseTokenCounter] = None
57
62
 
58
63
  def _convert_response_from_anthropic_to_openai(self, response):
59
64
  # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
@@ -0,0 +1,152 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import AzureOpenAI, Stream
18
+
19
+ from camel.configs import OPENAI_API_PARAMS
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
23
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
24
+
25
+
26
+ class AzureOpenAIModel(BaseModelBackend):
27
+ r"""Azure OpenAI API in a unified BaseModelBackend interface.
28
+ Doc: https://learn.microsoft.com/en-us/azure/ai-services/openai/
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ model_type: ModelType,
34
+ model_config_dict: Dict[str, Any],
35
+ api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
37
+ api_version: Optional[str] = None,
38
+ azure_deployment_name: Optional[str] = None,
39
+ ) -> None:
40
+ r"""Constructor for OpenAI backend.
41
+
42
+ Args:
43
+ model_type (ModelType): Model for which a backend is created,
44
+ one of GPT_* series.
45
+ model_config_dict (Dict[str, Any]): A dictionary that will
46
+ be fed into openai.ChatCompletion.create().
47
+ api_key (Optional[str]): The API key for authenticating with the
48
+ OpenAI service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the OpenAI service. (default:
50
+ :obj:`None`)
51
+ api_version (Optional[str]): The api version for the model.
52
+ azure_deployment_name (Optional[str]): The deployment name you
53
+ chose when you deployed an azure model. (default: :obj:`None`)
54
+ """
55
+ super().__init__(model_type, model_config_dict, api_key, url)
56
+ self._url = url or os.environ.get("AZURE_OPENAI_ENDPOINT")
57
+ self._api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
58
+ self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
59
+ self.azure_deployment_name = azure_deployment_name or os.environ.get(
60
+ "AZURE_DEPLOYMENT_NAME"
61
+ )
62
+
63
+ if self._url is None:
64
+ raise ValueError(
65
+ "Must provide either the `url` argument "
66
+ "or `AZURE_OPENAI_ENDPOINT` environment variable."
67
+ )
68
+ if self._api_key is None:
69
+ raise ValueError(
70
+ "Must provide either the `api_key` argument "
71
+ "or `AZURE_OPENAI_API_KEY` environment variable."
72
+ )
73
+ if self.api_version is None:
74
+ raise ValueError(
75
+ "Must provide either the `api_version` argument "
76
+ "or `AZURE_API_VERSION` environment variable."
77
+ )
78
+ if self.azure_deployment_name is None:
79
+ raise ValueError(
80
+ "Must provide either the `azure_deployment_name` argument "
81
+ "or `AZURE_DEPLOYMENT_NAME` environment variable."
82
+ )
83
+ self.model = str(self.azure_deployment_name)
84
+
85
+ self._client = AzureOpenAI(
86
+ azure_endpoint=str(self._url),
87
+ azure_deployment=self.azure_deployment_name,
88
+ api_version=self.api_version,
89
+ api_key=self._api_key,
90
+ timeout=60,
91
+ max_retries=3,
92
+ )
93
+ self._token_counter: Optional[BaseTokenCounter] = None
94
+
95
+ @property
96
+ def token_counter(self) -> BaseTokenCounter:
97
+ r"""Initialize the token counter for the model backend.
98
+
99
+ Returns:
100
+ BaseTokenCounter: The token counter following the model's
101
+ tokenization style.
102
+ """
103
+ if not self._token_counter:
104
+ self._token_counter = OpenAITokenCounter(self.model_type)
105
+ return self._token_counter
106
+
107
+ @api_keys_required("AZURE_OPENAI_API_KEY", "AZURE_API_VERSION")
108
+ def run(
109
+ self,
110
+ messages: List[OpenAIMessage],
111
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
112
+ r"""Runs inference of Azure OpenAI chat completion.
113
+
114
+ Args:
115
+ messages (List[OpenAIMessage]): Message list with the chat history
116
+ in OpenAI API format.
117
+
118
+ Returns:
119
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
120
+ `ChatCompletion` in the non-stream mode, or
121
+ `Stream[ChatCompletionChunk]` in the stream mode.
122
+ """
123
+ response = self._client.chat.completions.create(
124
+ messages=messages,
125
+ model=self.model,
126
+ **self.model_config_dict,
127
+ )
128
+ return response
129
+
130
+ def check_model_config(self):
131
+ r"""Check whether the model configuration contains any
132
+ unexpected arguments to Azure OpenAI API.
133
+
134
+ Raises:
135
+ ValueError: If the model configuration dictionary contains any
136
+ unexpected arguments to Azure OpenAI API.
137
+ """
138
+ for param in self.model_config_dict:
139
+ if param not in OPENAI_API_PARAMS:
140
+ raise ValueError(
141
+ f"Unexpected argument `{param}` is "
142
+ "input into Azure OpenAI model backend."
143
+ )
144
+
145
+ @property
146
+ def stream(self) -> bool:
147
+ r"""Returns whether the model is in stream mode,
148
+ which sends partial results each time.
149
+ Returns:
150
+ bool: Whether the model is in stream mode.
151
+ """
152
+ return self.model_config_dict.get("stream", False)
@@ -32,6 +32,7 @@ class BaseModelBackend(ABC):
32
32
  model_config_dict: Dict[str, Any],
33
33
  api_key: Optional[str] = None,
34
34
  url: Optional[str] = None,
35
+ token_counter: Optional[BaseTokenCounter] = None,
35
36
  ) -> None:
36
37
  r"""Constructor for the model backend.
37
38
 
@@ -41,13 +42,16 @@ class BaseModelBackend(ABC):
41
42
  api_key (Optional[str]): The API key for authenticating with the
42
43
  model service.
43
44
  url (Optional[str]): The url to the model service.
45
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
46
+ for the model. If not provided, `OpenAITokenCounter` will
47
+ be used.
44
48
  """
45
49
  self.model_type = model_type
46
-
47
50
  self.model_config_dict = model_config_dict
48
51
  self._api_key = api_key
49
52
  self._url = url
50
53
  self.check_model_config()
54
+ self._token_counter = token_counter
51
55
 
52
56
  @property
53
57
  @abstractmethod
@@ -109,7 +113,10 @@ class BaseModelBackend(ABC):
109
113
  Returns:
110
114
  int: The maximum token limit for the given model.
111
115
  """
112
- return self.model_type.token_limit
116
+ return (
117
+ self.model_config_dict.get("max_tokens")
118
+ or self.model_type.token_limit
119
+ )
113
120
 
114
121
  @property
115
122
  def stream(self) -> bool:
@@ -44,6 +44,7 @@ class GeminiModel(BaseModelBackend):
44
44
  model_config_dict: Dict[str, Any],
45
45
  api_key: Optional[str] = None,
46
46
  url: Optional[str] = None,
47
+ token_counter: Optional[BaseTokenCounter] = None,
47
48
  ) -> None:
48
49
  r"""Constructor for Gemini backend.
49
50
 
@@ -54,17 +55,22 @@ class GeminiModel(BaseModelBackend):
54
55
  api_key (Optional[str]): The API key for authenticating with the
55
56
  gemini service. (default: :obj:`None`)
56
57
  url (Optional[str]): The url to the gemini service.
58
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
59
+ for the model. If not provided, `GeminiTokenCounter` will be
60
+ used.
57
61
  """
58
62
  import os
59
63
 
60
64
  import google.generativeai as genai
61
65
  from google.generativeai.types.generation_types import GenerationConfig
62
66
 
63
- super().__init__(model_type, model_config_dict, api_key, url)
67
+ super().__init__(
68
+ model_type, model_config_dict, api_key, url, token_counter
69
+ )
64
70
  self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
65
71
  genai.configure(api_key=self._api_key)
66
72
  self._client = genai.GenerativeModel(self.model_type.value)
67
- self._token_counter: Optional[BaseTokenCounter] = None
73
+
68
74
  keys = list(self.model_config_dict.keys())
69
75
  generation_config_dict = {
70
76
  k: self.model_config_dict.pop(k)
@@ -78,6 +84,12 @@ class GeminiModel(BaseModelBackend):
78
84
 
79
85
  @property
80
86
  def token_counter(self) -> BaseTokenCounter:
87
+ r"""Initialize the token counter for the model backend.
88
+
89
+ Returns:
90
+ BaseTokenCounter: The token counter following the model's
91
+ tokenization style.
92
+ """
81
93
  if not self._token_counter:
82
94
  self._token_counter = GeminiTokenCounter(self.model_type)
83
95
  return self._token_counter