camel-ai 0.2.11__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (55) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +13 -1
  3. camel/benchmarks/__init__.py +18 -0
  4. camel/benchmarks/base.py +152 -0
  5. camel/benchmarks/gaia.py +478 -0
  6. camel/configs/__init__.py +3 -0
  7. camel/configs/ollama_config.py +4 -2
  8. camel/configs/sglang_config.py +71 -0
  9. camel/data_collector/__init__.py +19 -0
  10. camel/data_collector/alpaca_collector.py +127 -0
  11. camel/data_collector/base.py +211 -0
  12. camel/data_collector/sharegpt_collector.py +205 -0
  13. camel/datahubs/__init__.py +23 -0
  14. camel/datahubs/base.py +136 -0
  15. camel/datahubs/huggingface.py +433 -0
  16. camel/datahubs/models.py +22 -0
  17. camel/interpreters/__init__.py +2 -0
  18. camel/interpreters/e2b_interpreter.py +136 -0
  19. camel/loaders/__init__.py +3 -1
  20. camel/loaders/base_io.py +41 -41
  21. camel/messages/__init__.py +2 -0
  22. camel/models/__init__.py +2 -0
  23. camel/models/anthropic_model.py +14 -4
  24. camel/models/base_model.py +28 -0
  25. camel/models/groq_model.py +1 -1
  26. camel/models/model_factory.py +3 -0
  27. camel/models/ollama_model.py +12 -0
  28. camel/models/openai_model.py +0 -26
  29. camel/models/reward/__init__.py +22 -0
  30. camel/models/reward/base_reward_model.py +58 -0
  31. camel/models/reward/evaluator.py +63 -0
  32. camel/models/reward/nemotron_model.py +112 -0
  33. camel/models/sglang_model.py +225 -0
  34. camel/models/vllm_model.py +1 -1
  35. camel/personas/persona_hub.py +2 -2
  36. camel/schemas/openai_converter.py +2 -2
  37. camel/societies/workforce/role_playing_worker.py +2 -2
  38. camel/societies/workforce/single_agent_worker.py +2 -2
  39. camel/societies/workforce/workforce.py +3 -3
  40. camel/storages/object_storages/amazon_s3.py +2 -2
  41. camel/storages/object_storages/azure_blob.py +2 -2
  42. camel/storages/object_storages/google_cloud.py +2 -2
  43. camel/toolkits/__init__.py +2 -0
  44. camel/toolkits/code_execution.py +5 -1
  45. camel/toolkits/function_tool.py +41 -0
  46. camel/toolkits/math_toolkit.py +47 -16
  47. camel/toolkits/search_toolkit.py +154 -2
  48. camel/toolkits/stripe_toolkit.py +273 -0
  49. camel/types/__init__.py +2 -0
  50. camel/types/enums.py +27 -2
  51. camel/utils/token_counting.py +22 -10
  52. {camel_ai-0.2.11.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
  53. {camel_ai-0.2.11.dist-info → camel_ai-0.2.12.dist-info}/RECORD +55 -36
  54. {camel_ai-0.2.11.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
  55. {camel_ai-0.2.11.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,225 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import logging
15
+ import threading
16
+ import time
17
+ from typing import Any, Dict, List, Optional, Union
18
+
19
+ from openai import OpenAI, Stream
20
+
21
+ from camel.configs import SGLANG_API_PARAMS, SGLangConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models import BaseModelBackend
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter
30
+
31
+
32
+ class SGLangModel(BaseModelBackend):
33
+ r"""SGLang service interface.
34
+
35
+ Args:
36
+ model_type (Union[ModelType, str]): Model for which a backend is
37
+ created.
38
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
39
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
40
+ :obj:`None`, :obj:`SGLangConfig().as_dict()` will be used.
41
+ (default: :obj:`None`)
42
+ api_key (Optional[str], optional): The API key for authenticating with
43
+ the model service. SGLang doesn't need API key, it would be ignored
44
+ if set. (default: :obj:`None`)
45
+ url (Optional[str], optional): The url to the model service. If not
46
+ provided, :obj:`"http://127.0.0.1:30000/v1"` will be used.
47
+ (default: :obj:`None`)
48
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
49
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
50
+ ModelType.GPT_4O_MINI)` will be used.
51
+ (default: :obj:`None`)
52
+
53
+ Reference: https://sgl-project.github.io/backend/openai_api_completions.html
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ ) -> None:
64
+ if model_config_dict is None:
65
+ model_config_dict = SGLangConfig().as_dict()
66
+
67
+ self.server_process = None
68
+ self.last_run_time: Optional[float] = (
69
+ None # Will be set when the server starts
70
+ )
71
+ self._lock = threading.Lock()
72
+ self._inactivity_thread: Optional[threading.Thread] = None
73
+
74
+ super().__init__(
75
+ model_type, model_config_dict, api_key, url, token_counter
76
+ )
77
+
78
+ self._client = None
79
+
80
+ if self._url:
81
+ # Initialize the client if an existing URL is provided
82
+ self._client = OpenAI(
83
+ timeout=60,
84
+ max_retries=3,
85
+ api_key="Set-but-ignored", # required but ignored
86
+ base_url=self._url,
87
+ )
88
+
89
+ def _start_server(self) -> None:
90
+ from sglang.utils import ( # type: ignore[import-untyped]
91
+ execute_shell_command,
92
+ wait_for_server,
93
+ )
94
+
95
+ try:
96
+ if not self._url:
97
+ cmd = (
98
+ f"python -m sglang.launch_server "
99
+ f"--model-path {self.model_type} "
100
+ f"--port 30000 "
101
+ f"--host 0.0.0.0"
102
+ )
103
+
104
+ server_process = execute_shell_command(cmd)
105
+ wait_for_server("http://localhost:30000")
106
+ self._url = "http://127.0.0.1:30000/v1"
107
+ self.server_process = server_process
108
+ # Start the inactivity monitor in a background thread
109
+ self._inactivity_thread = threading.Thread(
110
+ target=self._monitor_inactivity, daemon=True
111
+ )
112
+ self._inactivity_thread.start()
113
+ self.last_run_time = time.time()
114
+ # Initialize the client after the server starts
115
+ self._client = OpenAI(
116
+ timeout=60,
117
+ max_retries=3,
118
+ api_key="Set-but-ignored", # required but ignored
119
+ base_url=self._url,
120
+ )
121
+ except Exception as e:
122
+ raise RuntimeError(f"Failed to start SGLang server: {e}") from e
123
+
124
+ def _ensure_server_running(self) -> None:
125
+ r"""Ensures that the server is running. If not, starts the server."""
126
+ with self._lock:
127
+ if self.server_process is None:
128
+ self._start_server()
129
+
130
+ def _monitor_inactivity(self):
131
+ r"""Monitor whether the server process has been inactive for over 10
132
+ minutes.
133
+ """
134
+ from sglang.utils import terminate_process
135
+
136
+ while True:
137
+ # Check every 10 seconds
138
+ time.sleep(10)
139
+ # Over 10 minutes
140
+ with self._lock:
141
+ # Over 10 minutes
142
+ if self.last_run_time and (
143
+ time.time() - self.last_run_time > 600
144
+ ):
145
+ if self.server_process:
146
+ terminate_process(self.server_process)
147
+ self.server_process = None
148
+ self._client = None # Invalidate the client
149
+ logging.info(
150
+ "Server process terminated due to inactivity."
151
+ )
152
+ break
153
+
154
+ @property
155
+ def token_counter(self) -> BaseTokenCounter:
156
+ r"""Initialize the token counter for the model backend.
157
+
158
+ Returns:
159
+ BaseTokenCounter: The token counter following the model's
160
+ tokenization style.
161
+ """
162
+ if not self._token_counter:
163
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
164
+ return self._token_counter
165
+
166
+ def check_model_config(self):
167
+ r"""Check whether the model configuration contains any
168
+ unexpected arguments to SGLang API.
169
+
170
+ Raises:
171
+ ValueError: If the model configuration dictionary contains any
172
+ unexpected arguments to OpenAI API.
173
+ """
174
+ for param in self.model_config_dict:
175
+ if param not in SGLANG_API_PARAMS:
176
+ raise ValueError(
177
+ f"Unexpected argument `{param}` is "
178
+ "input into SGLang model backend."
179
+ )
180
+
181
+ def run(
182
+ self,
183
+ messages: List[OpenAIMessage],
184
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
+ r"""Runs inference of OpenAI chat completion.
186
+
187
+ Args:
188
+ messages (List[OpenAIMessage]): Message list with the chat history
189
+ in OpenAI API format.
190
+
191
+ Returns:
192
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
193
+ `ChatCompletion` in the non-stream mode, or
194
+ `Stream[ChatCompletionChunk]` in the stream mode.
195
+ """
196
+
197
+ # Ensure server is running
198
+ self._ensure_server_running()
199
+
200
+ with self._lock:
201
+ # Update last run time
202
+ self.last_run_time = time.time()
203
+
204
+ if self._client is None:
205
+ raise RuntimeError(
206
+ "Client is not initialized. Ensure the server is running."
207
+ )
208
+
209
+ response = self._client.chat.completions.create(
210
+ messages=messages,
211
+ model=self.model_type,
212
+ **self.model_config_dict,
213
+ )
214
+
215
+ return response
216
+
217
+ @property
218
+ def stream(self) -> bool:
219
+ r"""Returns whether the model is in stream mode, which sends partial
220
+ results each time.
221
+
222
+ Returns:
223
+ bool: Whether the model is in stream mode.
224
+ """
225
+ return self.model_config_dict.get('stream', False)
@@ -74,7 +74,7 @@ class VLLMModel(BaseModelBackend):
74
74
  self._client = OpenAI(
75
75
  timeout=60,
76
76
  max_retries=3,
77
- api_key="Set-but-ignored", # required but ignored
77
+ api_key="EMPTY", # required but ignored
78
78
  base_url=self._url,
79
79
  )
80
80
 
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- import ast
14
+ import json
15
15
  import re
16
16
  import uuid
17
17
  from functools import lru_cache
@@ -130,7 +130,7 @@ class PersonaHub:
130
130
  text_to_persona_prompt_instruction,
131
131
  response_format=PersonaResponse, # type: ignore[arg-type]
132
132
  )
133
- parsed_content = ast.literal_eval(response.msg.content)
133
+ parsed_content = json.loads(response.msg.content)
134
134
  persona.name = parsed_content["persona_name"]
135
135
  persona.description = parsed_content["persona_description"]
136
136
  except Exception as e:
@@ -28,8 +28,8 @@ from camel.utils import (
28
28
  from .base import BaseConverter
29
29
 
30
30
  DEFAULT_CONVERTER_PROMPTS = """
31
- Extract key entities and attributes from the provided text,
32
- and convert them into a structured JSON format.
31
+ Extract key entities and attributes from the user
32
+ provided text, and convert them into a structured JSON format.
33
33
  """
34
34
 
35
35
 
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
16
+ import json
17
17
  from typing import Dict, List, Optional
18
18
 
19
19
  from colorama import Fore
@@ -173,7 +173,7 @@ class RolePlayingWorker(Worker):
173
173
  content=prompt,
174
174
  )
175
175
  response = self.summarize_agent.step(req, response_format=TaskResult)
176
- result_dict = ast.literal_eval(response.msg.content)
176
+ result_dict = json.loads(response.msg.content)
177
177
  task_result = TaskResult(**result_dict)
178
178
  task.result = task_result.content
179
179
 
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
16
+ import json
17
17
  from typing import Any, List
18
18
 
19
19
  from colorama import Fore
@@ -87,7 +87,7 @@ class SingleAgentWorker(Worker):
87
87
 
88
88
  print(f"======\n{Fore.GREEN}Reply from {self}:{Fore.RESET}")
89
89
 
90
- result_dict = ast.literal_eval(response.msg.content)
90
+ result_dict = json.loads(response.msg.content)
91
91
  task_result = TaskResult(**result_dict)
92
92
 
93
93
  color = Fore.RED if task_result.failed else Fore.GREEN
@@ -13,8 +13,8 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
17
16
  import asyncio
17
+ import json
18
18
  import logging
19
19
  from collections import deque
20
20
  from typing import Deque, Dict, List, Optional
@@ -289,7 +289,7 @@ class Workforce(BaseNode):
289
289
  response = self.coordinator_agent.step(
290
290
  req, response_format=TaskAssignResult
291
291
  )
292
- result_dict = ast.literal_eval(response.msg.content)
292
+ result_dict = json.loads(response.msg.content)
293
293
  task_assign_result = TaskAssignResult(**result_dict)
294
294
  return task_assign_result.assignee_id
295
295
 
@@ -320,7 +320,7 @@ class Workforce(BaseNode):
320
320
  content=prompt,
321
321
  )
322
322
  response = self.coordinator_agent.step(req, response_format=WorkerConf)
323
- result_dict = ast.literal_eval(response.msg.content)
323
+ result_dict = json.loads(response.msg.content)
324
324
  new_node_conf = WorkerConf(**result_dict)
325
325
 
326
326
  new_agent = self._create_new_agent(
@@ -17,7 +17,7 @@ from pathlib import Path, PurePath
17
17
  from typing import Optional, Tuple
18
18
  from warnings import warn
19
19
 
20
- from camel.loaders import File
20
+ from camel.loaders import File, create_file_from_raw_bytes
21
21
  from camel.storages.object_storages.base import BaseObjectStorage
22
22
 
23
23
 
@@ -156,7 +156,7 @@ class AmazonS3Storage(BaseObjectStorage):
156
156
  Bucket=self._bucket_name, Key=file_key
157
157
  )
158
158
  raw_bytes = response["Body"].read()
159
- return File.create_file_from_raw_bytes(raw_bytes, filename)
159
+ return create_file_from_raw_bytes(raw_bytes, filename)
160
160
 
161
161
  def _upload_file(
162
162
  self, local_file_path: Path, remote_file_key: str
@@ -16,7 +16,7 @@ from pathlib import Path, PurePath
16
16
  from typing import Optional, Tuple
17
17
  from warnings import warn
18
18
 
19
- from camel.loaders import File
19
+ from camel.loaders import File, create_file_from_raw_bytes
20
20
  from camel.storages.object_storages.base import BaseObjectStorage
21
21
 
22
22
 
@@ -123,7 +123,7 @@ class AzureBlobStorage(BaseObjectStorage):
123
123
  File: The object from the container.
124
124
  """
125
125
  raw_bytes = self._client.download_blob(file_key).readall()
126
- file = File.create_file_from_raw_bytes(raw_bytes, filename)
126
+ file = create_file_from_raw_bytes(raw_bytes, filename)
127
127
  return file
128
128
 
129
129
  def _upload_file(
@@ -15,7 +15,7 @@ from pathlib import Path, PurePath
15
15
  from typing import Tuple
16
16
  from warnings import warn
17
17
 
18
- from camel.loaders import File
18
+ from camel.loaders import File, create_file_from_raw_bytes
19
19
  from camel.storages.object_storages.base import BaseObjectStorage
20
20
 
21
21
 
@@ -111,7 +111,7 @@ class GoogleCloudStorage(BaseObjectStorage):
111
111
  File: The object from the S3 bucket.
112
112
  """
113
113
  raw_bytes = self._client.get_blob(file_key).download_as_bytes()
114
- return File.create_file_from_raw_bytes(raw_bytes, filename)
114
+ return create_file_from_raw_bytes(raw_bytes, filename)
115
115
 
116
116
  def _upload_file(
117
117
  self, local_file_path: Path, remote_file_key: str
@@ -41,6 +41,7 @@ from .open_api_toolkit import OpenAPIToolkit
41
41
  from .retrieval_toolkit import RetrievalToolkit
42
42
  from .notion_toolkit import NotionToolkit
43
43
  from .human_toolkit import HumanToolkit
44
+ from .stripe_toolkit import StripeToolkit
44
45
  from .video_toolkit import VideoDownloaderToolkit
45
46
 
46
47
  __all__ = [
@@ -70,5 +71,6 @@ __all__ = [
70
71
  'ArxivToolkit',
71
72
  'HumanToolkit',
72
73
  'VideoDownloaderToolkit',
74
+ 'StripeToolkit',
73
75
  'MeshyToolkit',
74
76
  ]
@@ -15,6 +15,7 @@ from typing import List, Literal, Optional, Union
15
15
 
16
16
  from camel.interpreters import (
17
17
  DockerInterpreter,
18
+ E2BInterpreter,
18
19
  InternalPythonInterpreter,
19
20
  JupyterKernelInterpreter,
20
21
  SubprocessInterpreter,
@@ -41,7 +42,7 @@ class CodeExecutionToolkit(BaseToolkit):
41
42
  def __init__(
42
43
  self,
43
44
  sandbox: Literal[
44
- "internal_python", "jupyter", "docker", "subprocess"
45
+ "internal_python", "jupyter", "docker", "subprocess", "e2b"
45
46
  ] = "internal_python",
46
47
  verbose: bool = False,
47
48
  unsafe_mode: bool = False,
@@ -58,6 +59,7 @@ class CodeExecutionToolkit(BaseToolkit):
58
59
  JupyterKernelInterpreter,
59
60
  DockerInterpreter,
60
61
  SubprocessInterpreter,
62
+ E2BInterpreter,
61
63
  ]
62
64
 
63
65
  if sandbox == "internal_python":
@@ -83,6 +85,8 @@ class CodeExecutionToolkit(BaseToolkit):
83
85
  print_stdout=self.verbose,
84
86
  print_stderr=self.verbose,
85
87
  )
88
+ elif sandbox == "e2b":
89
+ self.interpreter = E2BInterpreter(require_confirm=require_confirm)
86
90
  else:
87
91
  raise RuntimeError(
88
92
  f"The sandbox type `{sandbox}` is not supported."
@@ -165,9 +165,15 @@ def get_openai_tool_schema(func: Callable) -> Dict[str, Any]:
165
165
  else:
166
166
  func_description = short_description
167
167
 
168
+ # OpenAI client.beta.chat.completions.parse for structured output has
169
+ # additional requirements for the schema, refer:
170
+ # https://platform.openai.com/docs/guides/structured-outputs/some-type-specific-keywords-are-not-yet-supported#supported-schemas
171
+ parameters_dict["additionalProperties"] = False
172
+
168
173
  openai_function_schema = {
169
174
  "name": func.__name__,
170
175
  "description": func_description,
176
+ "strict": True,
171
177
  "parameters": parameters_dict,
172
178
  }
173
179
 
@@ -175,9 +181,44 @@ def get_openai_tool_schema(func: Callable) -> Dict[str, Any]:
175
181
  "type": "function",
176
182
  "function": openai_function_schema,
177
183
  }
184
+
185
+ openai_tool_schema = sanitize_and_enforce_required(openai_tool_schema)
178
186
  return openai_tool_schema
179
187
 
180
188
 
189
+ def sanitize_and_enforce_required(parameters_dict):
190
+ r"""Cleans and updates the function schema to conform with OpenAI's
191
+ requirements:
192
+ - Removes invalid 'default' fields from the parameters schema.
193
+ - Ensures all fields or function parameters are marked as required.
194
+
195
+ Args:
196
+ parameters_dict (dict): The dictionary representing the function
197
+ schema.
198
+
199
+ Returns:
200
+ dict: The updated dictionary with invalid defaults removed and all
201
+ fields set as required.
202
+ """
203
+ # Check if 'function' and 'parameters' exist
204
+ if (
205
+ 'function' in parameters_dict
206
+ and 'parameters' in parameters_dict['function']
207
+ ):
208
+ # Access the 'parameters' section
209
+ parameters = parameters_dict['function']['parameters']
210
+ properties = parameters.get('properties', {})
211
+
212
+ # Remove 'default' key from each property
213
+ for field in properties.values():
214
+ field.pop('default', None)
215
+
216
+ # Mark all keys in 'properties' as required
217
+ parameters['required'] = list(properties.keys())
218
+
219
+ return parameters_dict
220
+
221
+
181
222
  def generate_docstring(
182
223
  code: str,
183
224
  model: Optional[BaseModelBackend] = None,
@@ -22,44 +22,73 @@ class MathToolkit(BaseToolkit):
22
22
  r"""A class representing a toolkit for mathematical operations.
23
23
 
24
24
  This class provides methods for basic mathematical operations such as
25
- addition, subtraction, and multiplication.
25
+ addition, subtraction, multiplication, division, and rounding.
26
26
  """
27
27
 
28
- def add(self, a: int, b: int) -> int:
28
+ def add(self, a: float, b: float) -> float:
29
29
  r"""Adds two numbers.
30
30
 
31
31
  Args:
32
- a (int): The first number to be added.
33
- b (int): The second number to be added.
32
+ a (float): The first number to be added.
33
+ b (float): The second number to be added.
34
34
 
35
35
  Returns:
36
- integer: The sum of the two numbers.
36
+ float: The sum of the two numbers.
37
37
  """
38
38
  return a + b
39
39
 
40
- def sub(self, a: int, b: int) -> int:
40
+ def sub(self, a: float, b: float) -> float:
41
41
  r"""Do subtraction between two numbers.
42
42
 
43
43
  Args:
44
- a (int): The minuend in subtraction.
45
- b (int): The subtrahend in subtraction.
44
+ a (float): The minuend in subtraction.
45
+ b (float): The subtrahend in subtraction.
46
46
 
47
47
  Returns:
48
- integer: The result of subtracting :obj:`b` from :obj:`a`.
48
+ float: The result of subtracting :obj:`b` from :obj:`a`.
49
49
  """
50
50
  return a - b
51
51
 
52
- def mul(self, a: int, b: int) -> int:
53
- r"""Multiplies two integers.
52
+ def multiply(self, a: float, b: float, decimal_places: int = 2) -> float:
53
+ r"""Multiplies two numbers.
54
54
 
55
55
  Args:
56
- a (int): The multiplier in the multiplication.
57
- b (int): The multiplicand in the multiplication.
56
+ a (float): The multiplier in the multiplication.
57
+ b (float): The multiplicand in the multiplication.
58
+ decimal_places (int, optional): The number of decimal
59
+ places to round to. Defaults to 2.
58
60
 
59
61
  Returns:
60
- integer: The product of the two numbers.
62
+ float: The product of the two numbers.
61
63
  """
62
- return a * b
64
+ return round(a * b, decimal_places)
65
+
66
+ def divide(self, a: float, b: float, decimal_places: int = 2) -> float:
67
+ r"""Divides two numbers.
68
+
69
+ Args:
70
+ a (float): The dividend in the division.
71
+ b (float): The divisor in the division.
72
+ decimal_places (int, optional): The number of
73
+ decimal places to round to. Defaults to 2.
74
+
75
+ Returns:
76
+ float: The result of dividing :obj:`a` by :obj:`b`.
77
+ """
78
+ return round(a / b, decimal_places)
79
+
80
+ def round(self, a: float, decimal_places: int = 0) -> float:
81
+ r"""Rounds a number to a specified number of decimal places.
82
+
83
+ Args:
84
+ a (float): The number to be rounded.
85
+ decimal_places (int, optional): The number of decimal places
86
+ to round to. Defaults to 0.
87
+
88
+ Returns:
89
+ float: The rounded number.
90
+ """
91
+ return round(a, decimal_places)
63
92
 
64
93
  def get_tools(self) -> List[FunctionTool]:
65
94
  r"""Returns a list of FunctionTool objects representing the
@@ -72,5 +101,7 @@ class MathToolkit(BaseToolkit):
72
101
  return [
73
102
  FunctionTool(self.add),
74
103
  FunctionTool(self.sub),
75
- FunctionTool(self.mul),
104
+ FunctionTool(self.multiply),
105
+ FunctionTool(self.divide),
106
+ FunctionTool(self.round),
76
107
  ]