camel-ai 0.1.5.5__py3-none-any.whl → 0.1.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (97) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +3 -3
  3. camel/agents/critic_agent.py +1 -1
  4. camel/agents/deductive_reasoner_agent.py +4 -4
  5. camel/agents/embodied_agent.py +1 -1
  6. camel/agents/knowledge_graph_agent.py +13 -17
  7. camel/agents/role_assignment_agent.py +1 -1
  8. camel/agents/search_agent.py +4 -5
  9. camel/agents/task_agent.py +5 -6
  10. camel/configs/__init__.py +15 -0
  11. camel/configs/gemini_config.py +98 -0
  12. camel/configs/groq_config.py +119 -0
  13. camel/configs/litellm_config.py +1 -1
  14. camel/configs/mistral_config.py +81 -0
  15. camel/configs/ollama_config.py +1 -1
  16. camel/configs/openai_config.py +1 -1
  17. camel/configs/vllm_config.py +103 -0
  18. camel/configs/zhipuai_config.py +1 -1
  19. camel/embeddings/__init__.py +2 -0
  20. camel/embeddings/mistral_embedding.py +89 -0
  21. camel/interpreters/__init__.py +2 -0
  22. camel/interpreters/ipython_interpreter.py +167 -0
  23. camel/models/__init__.py +10 -0
  24. camel/models/anthropic_model.py +7 -2
  25. camel/models/azure_openai_model.py +152 -0
  26. camel/models/base_model.py +9 -2
  27. camel/models/gemini_model.py +215 -0
  28. camel/models/groq_model.py +131 -0
  29. camel/models/litellm_model.py +26 -4
  30. camel/models/mistral_model.py +169 -0
  31. camel/models/model_factory.py +33 -5
  32. camel/models/ollama_model.py +21 -2
  33. camel/models/open_source_model.py +11 -3
  34. camel/models/openai_model.py +7 -2
  35. camel/models/stub_model.py +4 -4
  36. camel/models/vllm_model.py +138 -0
  37. camel/models/zhipuai_model.py +7 -4
  38. camel/prompts/__init__.py +2 -2
  39. camel/prompts/task_prompt_template.py +4 -4
  40. camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
  41. camel/retrievers/auto_retriever.py +2 -0
  42. camel/storages/graph_storages/neo4j_graph.py +5 -0
  43. camel/toolkits/__init__.py +36 -0
  44. camel/toolkits/base.py +1 -1
  45. camel/toolkits/code_execution.py +1 -1
  46. camel/toolkits/github_toolkit.py +3 -2
  47. camel/toolkits/google_maps_toolkit.py +367 -0
  48. camel/toolkits/math_toolkit.py +79 -0
  49. camel/toolkits/open_api_toolkit.py +548 -0
  50. camel/toolkits/retrieval_toolkit.py +76 -0
  51. camel/toolkits/search_toolkit.py +326 -0
  52. camel/toolkits/slack_toolkit.py +308 -0
  53. camel/toolkits/twitter_toolkit.py +522 -0
  54. camel/toolkits/weather_toolkit.py +173 -0
  55. camel/types/enums.py +163 -30
  56. camel/utils/__init__.py +4 -0
  57. camel/utils/async_func.py +1 -1
  58. camel/utils/token_counting.py +182 -40
  59. {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.9.dist-info}/METADATA +43 -3
  60. camel_ai-0.1.5.9.dist-info/RECORD +165 -0
  61. camel/functions/__init__.py +0 -51
  62. camel/functions/google_maps_function.py +0 -335
  63. camel/functions/math_functions.py +0 -61
  64. camel/functions/open_api_function.py +0 -508
  65. camel/functions/retrieval_functions.py +0 -61
  66. camel/functions/search_functions.py +0 -298
  67. camel/functions/slack_functions.py +0 -286
  68. camel/functions/twitter_function.py +0 -479
  69. camel/functions/weather_functions.py +0 -144
  70. camel_ai-0.1.5.5.dist-info/RECORD +0 -155
  71. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  72. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  73. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  74. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  75. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  76. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  77. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  78. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  79. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  80. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  81. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  82. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  83. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  84. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  85. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  86. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  87. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  88. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  89. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  90. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  91. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  92. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  93. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  94. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  95. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  96. /camel/{functions → toolkits}/openai_function.py +0 -0
  97. {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.9.dist-info}/WHEEL +0 -0
@@ -0,0 +1,103 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass, field
17
+ from typing import Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ # flake8: noqa: E501
25
+ @dataclass(frozen=True)
26
+ class VLLMConfig(BaseConfig):
27
+ r"""Defines the parameters for generating chat completions using the
28
+ OpenAI API.
29
+
30
+ Reference: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
31
+
32
+ Args:
33
+ temperature (float, optional): Sampling temperature to use, between
34
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
35
+ while lower values make it more focused and deterministic.
36
+ (default: :obj:`0.2`)
37
+ top_p (float, optional): An alternative to sampling with temperature,
38
+ called nucleus sampling, where the model considers the results of
39
+ the tokens with top_p probability mass. So :obj:`0.1` means only
40
+ the tokens comprising the top 10% probability mass are considered.
41
+ (default: :obj:`1.0`)
42
+ n (int, optional): How many chat completion choices to generate for
43
+ each input message. (default: :obj:`1`)
44
+ response_format (object, optional): An object specifying the format
45
+ that the model must output. Compatible with GPT-4 Turbo and all
46
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
47
+ {"type": "json_object"} enables JSON mode, which guarantees the
48
+ message the model generates is valid JSON. Important: when using
49
+ JSON mode, you must also instruct the model to produce JSON
50
+ yourself via a system or user message. Without this, the model
51
+ may generate an unending stream of whitespace until the generation
52
+ reaches the token limit, resulting in a long-running and seemingly
53
+ "stuck" request. Also note that the message content may be
54
+ partially cut off if finish_reason="length", which indicates the
55
+ generation exceeded max_tokens or the conversation exceeded the
56
+ max context length.
57
+ stream (bool, optional): If True, partial message deltas will be sent
58
+ as data-only server-sent events as they become available.
59
+ (default: :obj:`False`)
60
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
61
+ will stop generating further tokens. (default: :obj:`None`)
62
+ max_tokens (int, optional): The maximum number of tokens to generate
63
+ in the chat completion. The total length of input tokens and
64
+ generated tokens is limited by the model's context length.
65
+ (default: :obj:`None`)
66
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
67
+ :obj:`2.0`. Positive values penalize new tokens based on whether
68
+ they appear in the text so far, increasing the model's likelihood
69
+ to talk about new topics. See more information about frequency and
70
+ presence penalties. (default: :obj:`0.0`)
71
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
72
+ :obj:`2.0`. Positive values penalize new tokens based on their
73
+ existing frequency in the text so far, decreasing the model's
74
+ likelihood to repeat the same line verbatim. See more information
75
+ about frequency and presence penalties. (default: :obj:`0.0`)
76
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
77
+ appearing in the completion. Accepts a json object that maps tokens
78
+ (specified by their token ID in the tokenizer) to an associated
79
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
80
+ is added to the logits generated by the model prior to sampling.
81
+ The exact effect will vary per model, but values between:obj:` -1`
82
+ and :obj:`1` should decrease or increase likelihood of selection;
83
+ values like :obj:`-100` or :obj:`100` should result in a ban or
84
+ exclusive selection of the relevant token. (default: :obj:`{}`)
85
+ user (str, optional): A unique identifier representing your end-user,
86
+ which can help OpenAI to monitor and detect abuse.
87
+ (default: :obj:`""`)
88
+ """
89
+
90
+ temperature: float = 0.2 # openai default: 1.0
91
+ top_p: float = 1.0
92
+ n: int = 1
93
+ stream: bool = False
94
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
95
+ max_tokens: int | NotGiven = NOT_GIVEN
96
+ presence_penalty: float = 0.0
97
+ response_format: dict | NotGiven = NOT_GIVEN
98
+ frequency_penalty: float = 0.0
99
+ logit_bias: dict = field(default_factory=dict)
100
+ user: str = ""
101
+
102
+
103
+ VLLM_API_PARAMS = {param for param in asdict(VLLMConfig()).keys()}
@@ -21,7 +21,7 @@ from openai._types import NOT_GIVEN, NotGiven
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
  if TYPE_CHECKING:
24
- from camel.functions import OpenAIFunction
24
+ from camel.toolkits import OpenAIFunction
25
25
 
26
26
 
27
27
  @dataclass(frozen=True)
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .base import BaseEmbedding
15
+ from .mistral_embedding import MistralEmbedding
15
16
  from .openai_embedding import OpenAIEmbedding
16
17
  from .sentence_transformers_embeddings import SentenceTransformerEncoder
17
18
  from .vlm_embedding import VisionLanguageEmbedding
@@ -21,4 +22,5 @@ __all__ = [
21
22
  "OpenAIEmbedding",
22
23
  "SentenceTransformerEncoder",
23
24
  "VisionLanguageEmbedding",
25
+ "MistralEmbedding",
24
26
  ]
@@ -0,0 +1,89 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ import os
17
+ from typing import Any
18
+
19
+ from camel.embeddings.base import BaseEmbedding
20
+ from camel.types import EmbeddingModelType
21
+ from camel.utils import api_keys_required
22
+
23
+
24
+ class MistralEmbedding(BaseEmbedding[str]):
25
+ r"""Provides text embedding functionalities using Mistral's models.
26
+
27
+ Args:
28
+ model_type (EmbeddingModelType, optional): The model type to be
29
+ used for text embeddings.
30
+ (default: :obj:`MISTRAL_EMBED`)
31
+ api_key (str, optional): The API key for authenticating with the
32
+ Mistral service. (default: :obj:`None`)
33
+ dimensions (int, optional): The text embedding output dimensions.
34
+ (default: :obj:`None`)
35
+
36
+ Raises:
37
+ RuntimeError: If an unsupported model type is specified.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ model_type: EmbeddingModelType = (EmbeddingModelType.MISTRAL_EMBED),
43
+ api_key: str | None = None,
44
+ dimensions: int | None = None,
45
+ ) -> None:
46
+ from mistralai.client import MistralClient
47
+
48
+ if not model_type.is_mistral:
49
+ raise ValueError("Invalid Mistral embedding model type.")
50
+ self.model_type = model_type
51
+ if dimensions is None:
52
+ self.output_dim = model_type.output_dim
53
+ else:
54
+ assert isinstance(dimensions, int)
55
+ self.output_dim = dimensions
56
+ self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
57
+ self._client = MistralClient(api_key=self._api_key)
58
+
59
+ @api_keys_required("MISTRAL_API_KEY")
60
+ def embed_list(
61
+ self,
62
+ objs: list[str],
63
+ **kwargs: Any,
64
+ ) -> list[list[float]]:
65
+ r"""Generates embeddings for the given texts.
66
+
67
+ Args:
68
+ objs (list[str]): The texts for which to generate the embeddings.
69
+ **kwargs (Any): Extra kwargs passed to the embedding API.
70
+
71
+ Returns:
72
+ list[list[float]]: A list that represents the generated embedding
73
+ as a list of floating-point numbers.
74
+ """
75
+ # TODO: count tokens
76
+ response = self._client.embeddings(
77
+ input=objs,
78
+ model=self.model_type.value,
79
+ **kwargs,
80
+ )
81
+ return [data.embedding for data in response.data]
82
+
83
+ def get_output_dim(self) -> int:
84
+ r"""Returns the output dimension of the embeddings.
85
+
86
+ Returns:
87
+ int: The dimensionality of the embedding for the current model.
88
+ """
89
+ return self.output_dim
@@ -16,6 +16,7 @@ from .base import BaseInterpreter
16
16
  from .docker_interpreter import DockerInterpreter
17
17
  from .internal_python_interpreter import InternalPythonInterpreter
18
18
  from .interpreter_error import InterpreterError
19
+ from .ipython_interpreter import JupyterKernelInterpreter
19
20
  from .subprocess_interpreter import SubprocessInterpreter
20
21
 
21
22
  __all__ = [
@@ -24,4 +25,5 @@ __all__ = [
24
25
  'InternalPythonInterpreter',
25
26
  'SubprocessInterpreter',
26
27
  'DockerInterpreter',
28
+ 'JupyterKernelInterpreter',
27
29
  ]
@@ -0,0 +1,167 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import queue
16
+ import re
17
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
18
+
19
+ from camel.interpreters.base import BaseInterpreter
20
+ from camel.interpreters.interpreter_error import InterpreterError
21
+
22
+ if TYPE_CHECKING:
23
+ from jupyter_client import BlockingKernelClient, KernelManager
24
+
25
+ TIMEOUT = 30
26
+
27
+
28
+ class JupyterKernelInterpreter(BaseInterpreter):
29
+ r"""A class for executing code strings in a Jupyter Kernel.
30
+
31
+ Args:
32
+ require_confirm (bool, optional): If `True`, prompt user before
33
+ running code strings for security. Defaults to `True`.
34
+ print_stdout (bool, optional): If `True`, print the standard
35
+ output of the executed code. Defaults to `False`.
36
+ print_stderr (bool, optional): If `True`, print the standard error
37
+ of the executed code. Defaults to `True`.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ require_confirm: bool = True,
43
+ print_stdout: bool = False,
44
+ print_stderr: bool = True,
45
+ ) -> None:
46
+ self.require_confirm = require_confirm
47
+ self.print_stdout = print_stdout
48
+ self.print_stderr = print_stderr
49
+
50
+ self.kernel_manager: Optional[KernelManager] = None
51
+ self.client: Optional[BlockingKernelClient] = None
52
+
53
+ def __del__(self) -> None:
54
+ r"""Clean up the kernel and client."""
55
+
56
+ if self.kernel_manager:
57
+ self.kernel_manager.shutdown_kernel()
58
+ if self.client:
59
+ self.client.stop_channels()
60
+
61
+ def _initialize_if_needed(self) -> None:
62
+ r"""Initialize the kernel manager and client if they are not already
63
+ initialized.
64
+ """
65
+
66
+ if self.kernel_manager is not None:
67
+ return
68
+
69
+ from jupyter_client.manager import start_new_kernel
70
+
71
+ self.kernel_manager, self.client = start_new_kernel()
72
+
73
+ @staticmethod
74
+ def _clean_ipython_output(output: str) -> str:
75
+ r"""Remove ANSI escape sequences from the output."""
76
+
77
+ ansi_escape = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
78
+ return ansi_escape.sub('', output)
79
+
80
+ def _execute(self, code: str, timeout: float) -> str:
81
+ r"""Execute the code in the Jupyter kernel and return the result."""
82
+
83
+ if not self.kernel_manager or not self.client:
84
+ raise InterpreterError("Jupyter client is not initialized.")
85
+
86
+ self.client.execute(code)
87
+ outputs = []
88
+ while True:
89
+ try:
90
+ msg = self.client.get_iopub_msg(timeout=timeout)
91
+ msg_content = msg["content"]
92
+ msg_type = msg.get("msg_type", None)
93
+
94
+ if msg_content.get("execution_state", None) == "idle":
95
+ break
96
+
97
+ if msg_type == "error":
98
+ print(msg_content.keys())
99
+ print(msg_content)
100
+ traceback = "\n".join(msg_content["traceback"])
101
+ outputs.append(traceback)
102
+ elif msg_type == "stream":
103
+ outputs.append(msg_content["text"])
104
+ elif msg_type in ["execute_result", "display_data"]:
105
+ outputs.append(msg_content["data"]["text/plain"])
106
+ if "image/png" in msg_content["data"]:
107
+ outputs.append(
108
+ f"\n![image](data:image/png;base64,{msg_content['data']['image/png']})\n"
109
+ )
110
+ except queue.Empty:
111
+ outputs.append("Time out")
112
+ break
113
+ except Exception as e:
114
+ outputs.append(f"Exception occurred: {e!s}")
115
+ break
116
+
117
+ exec_result = "\n".join(outputs)
118
+ return self._clean_ipython_output(exec_result)
119
+
120
+ def run(self, code: str, code_type: str) -> str:
121
+ r"""Executes the given code in the Jupyter kernel.
122
+
123
+ Args:
124
+ code (str): The code string to execute.
125
+ code_type (str): The type of code to execute (e.g., 'python',
126
+ 'bash').
127
+
128
+ Returns:
129
+ str: A string containing the captured result of the
130
+ executed code.
131
+
132
+ Raises:
133
+ InterpreterError: If there is an error when doing code execution.
134
+ """
135
+ self._initialize_if_needed()
136
+
137
+ if code_type == "bash":
138
+ code = f"%%bash\n({code})"
139
+ try:
140
+ result = self._execute(code, timeout=TIMEOUT)
141
+ except Exception as e:
142
+ raise InterpreterError(f"Execution failed: {e!s}")
143
+
144
+ return result
145
+
146
+ def supported_code_types(self) -> List[str]:
147
+ r"""Provides supported code types by the interpreter.
148
+
149
+ Returns:
150
+ List[str]: Supported code types.
151
+ """
152
+ return ["python", "bash"]
153
+
154
+ def update_action_space(self, action_space: Dict[str, Any]) -> None:
155
+ r"""Updates the action space for the interpreter.
156
+
157
+ Args:
158
+ action_space (Dict[str, Any]): A dictionary representing the
159
+ new or updated action space.
160
+
161
+ Raises:
162
+ RuntimeError: Always raised because `JupyterKernelInterpreter`
163
+ does not support updating the action space.
164
+ """
165
+ raise RuntimeError(
166
+ "SubprocessInterpreter doesn't support " "`action_space`."
167
+ )
camel/models/__init__.py CHANGED
@@ -12,8 +12,12 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
+ from .azure_openai_model import AzureOpenAIModel
15
16
  from .base_model import BaseModelBackend
17
+ from .gemini_model import GeminiModel
18
+ from .groq_model import GroqModel
16
19
  from .litellm_model import LiteLLMModel
20
+ from .mistral_model import MistralModel
17
21
  from .model_factory import ModelFactory
18
22
  from .nemotron_model import NemotronModel
19
23
  from .ollama_model import OllamaModel
@@ -21,12 +25,16 @@ from .open_source_model import OpenSourceModel
21
25
  from .openai_audio_models import OpenAIAudioModels
22
26
  from .openai_model import OpenAIModel
23
27
  from .stub_model import StubModel
28
+ from .vllm_model import VLLMModel
24
29
  from .zhipuai_model import ZhipuAIModel
25
30
 
26
31
  __all__ = [
27
32
  'BaseModelBackend',
28
33
  'OpenAIModel',
34
+ 'AzureOpenAIModel',
29
35
  'AnthropicModel',
36
+ 'MistralModel',
37
+ 'GroqModel',
30
38
  'StubModel',
31
39
  'ZhipuAIModel',
32
40
  'OpenSourceModel',
@@ -35,4 +43,6 @@ __all__ = [
35
43
  'OpenAIAudioModels',
36
44
  'NemotronModel',
37
45
  'OllamaModel',
46
+ 'VLLMModel',
47
+ 'GeminiModel',
38
48
  ]
@@ -36,6 +36,7 @@ class AnthropicModel(BaseModelBackend):
36
36
  model_config_dict: Dict[str, Any],
37
37
  api_key: Optional[str] = None,
38
38
  url: Optional[str] = None,
39
+ token_counter: Optional[BaseTokenCounter] = None,
39
40
  ) -> None:
40
41
  r"""Constructor for Anthropic backend.
41
42
 
@@ -48,12 +49,16 @@ class AnthropicModel(BaseModelBackend):
48
49
  Anthropic service. (default: :obj:`None`)
49
50
  url (Optional[str]): The url to the Anthropic service. (default:
50
51
  :obj:`None`)
52
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
53
+ for the model. If not provided, `AnthropicTokenCounter` will
54
+ be used.
51
55
  """
52
- super().__init__(model_type, model_config_dict, api_key, url)
56
+ super().__init__(
57
+ model_type, model_config_dict, api_key, url, token_counter
58
+ )
53
59
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
54
60
  self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
55
61
  self.client = Anthropic(api_key=self._api_key, base_url=self._url)
56
- self._token_counter: Optional[BaseTokenCounter] = None
57
62
 
58
63
  def _convert_response_from_anthropic_to_openai(self, response):
59
64
  # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
@@ -0,0 +1,152 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import os
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import AzureOpenAI, Stream
18
+
19
+ from camel.configs import OPENAI_API_PARAMS
20
+ from camel.messages import OpenAIMessage
21
+ from camel.models.base_model import BaseModelBackend
22
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
23
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
24
+
25
+
26
+ class AzureOpenAIModel(BaseModelBackend):
27
+ r"""Azure OpenAI API in a unified BaseModelBackend interface.
28
+ Doc: https://learn.microsoft.com/en-us/azure/ai-services/openai/
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ model_type: ModelType,
34
+ model_config_dict: Dict[str, Any],
35
+ api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
37
+ api_version: Optional[str] = None,
38
+ azure_deployment_name: Optional[str] = None,
39
+ ) -> None:
40
+ r"""Constructor for OpenAI backend.
41
+
42
+ Args:
43
+ model_type (ModelType): Model for which a backend is created,
44
+ one of GPT_* series.
45
+ model_config_dict (Dict[str, Any]): A dictionary that will
46
+ be fed into openai.ChatCompletion.create().
47
+ api_key (Optional[str]): The API key for authenticating with the
48
+ OpenAI service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the OpenAI service. (default:
50
+ :obj:`None`)
51
+ api_version (Optional[str]): The api version for the model.
52
+ azure_deployment_name (Optional[str]): The deployment name you
53
+ chose when you deployed an azure model. (default: :obj:`None`)
54
+ """
55
+ super().__init__(model_type, model_config_dict, api_key, url)
56
+ self._url = url or os.environ.get("AZURE_OPENAI_ENDPOINT")
57
+ self._api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
58
+ self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
59
+ self.azure_deployment_name = azure_deployment_name or os.environ.get(
60
+ "AZURE_DEPLOYMENT_NAME"
61
+ )
62
+
63
+ if self._url is None:
64
+ raise ValueError(
65
+ "Must provide either the `url` argument "
66
+ "or `AZURE_OPENAI_ENDPOINT` environment variable."
67
+ )
68
+ if self._api_key is None:
69
+ raise ValueError(
70
+ "Must provide either the `api_key` argument "
71
+ "or `AZURE_OPENAI_API_KEY` environment variable."
72
+ )
73
+ if self.api_version is None:
74
+ raise ValueError(
75
+ "Must provide either the `api_version` argument "
76
+ "or `AZURE_API_VERSION` environment variable."
77
+ )
78
+ if self.azure_deployment_name is None:
79
+ raise ValueError(
80
+ "Must provide either the `azure_deployment_name` argument "
81
+ "or `AZURE_DEPLOYMENT_NAME` environment variable."
82
+ )
83
+ self.model = str(self.azure_deployment_name)
84
+
85
+ self._client = AzureOpenAI(
86
+ azure_endpoint=str(self._url),
87
+ azure_deployment=self.azure_deployment_name,
88
+ api_version=self.api_version,
89
+ api_key=self._api_key,
90
+ timeout=60,
91
+ max_retries=3,
92
+ )
93
+ self._token_counter: Optional[BaseTokenCounter] = None
94
+
95
+ @property
96
+ def token_counter(self) -> BaseTokenCounter:
97
+ r"""Initialize the token counter for the model backend.
98
+
99
+ Returns:
100
+ BaseTokenCounter: The token counter following the model's
101
+ tokenization style.
102
+ """
103
+ if not self._token_counter:
104
+ self._token_counter = OpenAITokenCounter(self.model_type)
105
+ return self._token_counter
106
+
107
+ @api_keys_required("AZURE_OPENAI_API_KEY", "AZURE_API_VERSION")
108
+ def run(
109
+ self,
110
+ messages: List[OpenAIMessage],
111
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
112
+ r"""Runs inference of Azure OpenAI chat completion.
113
+
114
+ Args:
115
+ messages (List[OpenAIMessage]): Message list with the chat history
116
+ in OpenAI API format.
117
+
118
+ Returns:
119
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
120
+ `ChatCompletion` in the non-stream mode, or
121
+ `Stream[ChatCompletionChunk]` in the stream mode.
122
+ """
123
+ response = self._client.chat.completions.create(
124
+ messages=messages,
125
+ model=self.model,
126
+ **self.model_config_dict,
127
+ )
128
+ return response
129
+
130
+ def check_model_config(self):
131
+ r"""Check whether the model configuration contains any
132
+ unexpected arguments to Azure OpenAI API.
133
+
134
+ Raises:
135
+ ValueError: If the model configuration dictionary contains any
136
+ unexpected arguments to Azure OpenAI API.
137
+ """
138
+ for param in self.model_config_dict:
139
+ if param not in OPENAI_API_PARAMS:
140
+ raise ValueError(
141
+ f"Unexpected argument `{param}` is "
142
+ "input into Azure OpenAI model backend."
143
+ )
144
+
145
+ @property
146
+ def stream(self) -> bool:
147
+ r"""Returns whether the model is in stream mode,
148
+ which sends partial results each time.
149
+ Returns:
150
+ bool: Whether the model is in stream mode.
151
+ """
152
+ return self.model_config_dict.get("stream", False)
@@ -32,6 +32,7 @@ class BaseModelBackend(ABC):
32
32
  model_config_dict: Dict[str, Any],
33
33
  api_key: Optional[str] = None,
34
34
  url: Optional[str] = None,
35
+ token_counter: Optional[BaseTokenCounter] = None,
35
36
  ) -> None:
36
37
  r"""Constructor for the model backend.
37
38
 
@@ -41,13 +42,16 @@ class BaseModelBackend(ABC):
41
42
  api_key (Optional[str]): The API key for authenticating with the
42
43
  model service.
43
44
  url (Optional[str]): The url to the model service.
45
+ token_counter (Optional[BaseTokenCounter]): Token counter to use
46
+ for the model. If not provided, `OpenAITokenCounter` will
47
+ be used.
44
48
  """
45
49
  self.model_type = model_type
46
-
47
50
  self.model_config_dict = model_config_dict
48
51
  self._api_key = api_key
49
52
  self._url = url
50
53
  self.check_model_config()
54
+ self._token_counter = token_counter
51
55
 
52
56
  @property
53
57
  @abstractmethod
@@ -109,7 +113,10 @@ class BaseModelBackend(ABC):
109
113
  Returns:
110
114
  int: The maximum token limit for the given model.
111
115
  """
112
- return self.model_type.token_limit
116
+ return (
117
+ self.model_config_dict.get("max_tokens")
118
+ or self.model_type.token_limit
119
+ )
113
120
 
114
121
  @property
115
122
  def stream(self) -> bool: