camel-ai 0.2.67__py3-none-any.whl → 0.2.69a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (43) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +170 -11
  3. camel/configs/vllm_config.py +2 -0
  4. camel/datagen/self_improving_cot.py +1 -1
  5. camel/environments/__init__.py +12 -0
  6. camel/environments/rlcards_env.py +860 -0
  7. camel/interpreters/docker/Dockerfile +2 -5
  8. camel/loaders/firecrawl_reader.py +4 -4
  9. camel/memories/blocks/vectordb_block.py +8 -1
  10. camel/memories/context_creators/score_based.py +185 -39
  11. camel/models/anthropic_model.py +114 -2
  12. camel/runtimes/configs.py +11 -11
  13. camel/runtimes/daytona_runtime.py +4 -4
  14. camel/runtimes/docker_runtime.py +6 -6
  15. camel/runtimes/remote_http_runtime.py +5 -5
  16. camel/societies/workforce/prompts.py +55 -21
  17. camel/societies/workforce/single_agent_worker.py +274 -14
  18. camel/societies/workforce/task_channel.py +9 -2
  19. camel/societies/workforce/utils.py +10 -2
  20. camel/societies/workforce/worker.py +74 -16
  21. camel/societies/workforce/workforce.py +90 -35
  22. camel/tasks/task.py +18 -12
  23. camel/toolkits/__init__.py +2 -0
  24. camel/toolkits/aci_toolkit.py +19 -19
  25. camel/toolkits/arxiv_toolkit.py +6 -6
  26. camel/toolkits/dappier_toolkit.py +5 -5
  27. camel/toolkits/file_write_toolkit.py +10 -10
  28. camel/toolkits/github_toolkit.py +3 -3
  29. camel/toolkits/non_visual_browser_toolkit/__init__.py +18 -0
  30. camel/toolkits/non_visual_browser_toolkit/actions.py +196 -0
  31. camel/toolkits/non_visual_browser_toolkit/agent.py +278 -0
  32. camel/toolkits/non_visual_browser_toolkit/browser_non_visual_toolkit.py +363 -0
  33. camel/toolkits/non_visual_browser_toolkit/nv_browser_session.py +175 -0
  34. camel/toolkits/non_visual_browser_toolkit/snapshot.js +188 -0
  35. camel/toolkits/non_visual_browser_toolkit/snapshot.py +164 -0
  36. camel/toolkits/pptx_toolkit.py +4 -4
  37. camel/toolkits/sympy_toolkit.py +1 -1
  38. camel/toolkits/task_planning_toolkit.py +3 -3
  39. camel/toolkits/thinking_toolkit.py +1 -1
  40. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/METADATA +2 -1
  41. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/RECORD +43 -35
  42. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/WHEEL +0 -0
  43. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/licenses/LICENSE +0 -0
@@ -55,11 +55,8 @@ RUN curl -fsSL https://install.python-poetry.org | python3.10 - && \
55
55
  # Upgrade pip and install base Python packages
56
56
  RUN python3.10 -m pip install --upgrade pip setuptools wheel
57
57
 
58
- # Install uv
59
- RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \
60
- mv /root/.local/bin/uv /usr/local/bin/uv && \
61
- mv /root/.local/bin/uvx /usr/local/bin/uvx && \
62
- chmod +x /usr/local/bin/uv /usr/local/bin/uvx
58
+ # Install uv using pip instead of the shell script
59
+ RUN pip install uv
63
60
 
64
61
  # Setup working directory
65
62
  WORKDIR /workspace
@@ -98,8 +98,8 @@ class Firecrawl:
98
98
  def scrape(
99
99
  self,
100
100
  url: str,
101
- params: Optional[Dict[str, Any]] = None,
102
- ) -> Dict:
101
+ params: Optional[Dict[str, str]] = None,
102
+ ) -> Dict[str, str]:
103
103
  r"""To scrape a single URL. This function supports advanced scraping
104
104
  by setting different parameters and returns the full scraped data as a
105
105
  dictionary.
@@ -108,11 +108,11 @@ class Firecrawl:
108
108
 
109
109
  Args:
110
110
  url (str): The URL to read.
111
- params (Optional[Dict[str, Any]]): Additional parameters for the
111
+ params (Optional[Dict[str, str]]): Additional parameters for the
112
112
  scrape request.
113
113
 
114
114
  Returns:
115
- Dict: The scraped data.
115
+ Dict[str, str]: The scraped data.
116
116
 
117
117
  Raises:
118
118
  RuntimeError: If the scrape process fails.
@@ -89,13 +89,20 @@ class VectorDBBlock(MemoryBlock):
89
89
  records (List[MemoryRecord]): Memory records to be added to the
90
90
  memory.
91
91
  """
92
+ # Filter out records with empty message content
93
+ valid_records = [
94
+ record
95
+ for record in records
96
+ if record.message.content and record.message.content.strip()
97
+ ]
98
+
92
99
  v_records = [
93
100
  VectorRecord(
94
101
  vector=self.embedding.embed(record.message.content),
95
102
  payload=record.to_dict(),
96
103
  id=str(record.uuid),
97
104
  )
98
- for record in records
105
+ for record in valid_records
99
106
  ]
100
107
  self.storage.add(v_records)
101
108
 
@@ -11,14 +11,15 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from typing import List, Optional, Tuple
14
+ from collections import defaultdict
15
+ from typing import Dict, List, Optional, Tuple
15
16
 
16
17
  from pydantic import BaseModel
17
18
 
18
19
  from camel.logger import get_logger
19
20
  from camel.memories.base import BaseContextCreator
20
21
  from camel.memories.records import ContextRecord
21
- from camel.messages import OpenAIMessage
22
+ from camel.messages import FunctionCallingMessage, OpenAIMessage
22
23
  from camel.types.enums import OpenAIBackendRole
23
24
  from camel.utils import BaseTokenCounter
24
25
 
@@ -74,6 +75,8 @@ class ScoreBasedContextCreator(BaseContextCreator):
74
75
  3. Final output maintains chronological order and in history memory,
75
76
  the score of each message decreases according to keep_rate. The
76
77
  newer the message, the higher the score.
78
+ 4. Tool calls and their responses are kept together to maintain
79
+ API compatibility
77
80
 
78
81
  Args:
79
82
  records (List[ContextRecord]): List of context records with scores
@@ -110,6 +113,11 @@ class ScoreBasedContextCreator(BaseContextCreator):
110
113
 
111
114
  # Process non-system messages with deduplication
112
115
  for idx, record in enumerate(records):
116
+ if (
117
+ record.memory_record.role_at_backend
118
+ == OpenAIBackendRole.SYSTEM
119
+ ):
120
+ continue
113
121
  if record.memory_record.uuid in seen_uuids:
114
122
  continue
115
123
  seen_uuids.add(record.memory_record.uuid)
@@ -126,12 +134,17 @@ class ScoreBasedContextCreator(BaseContextCreator):
126
134
  )
127
135
 
128
136
  # ======================
129
- # 3. Token Calculation
137
+ # 3. Tool Call Relationship Mapping
138
+ # ======================
139
+ tool_call_groups = self._group_tool_calls_and_responses(regular_units)
140
+
141
+ # ======================
142
+ # 4. Token Calculation
130
143
  # ======================
131
144
  total_tokens = system_tokens + sum(u.num_tokens for u in regular_units)
132
145
 
133
146
  # ======================
134
- # 4. Early Return if Within Limit
147
+ # 5. Early Return if Within Limit
135
148
  # ======================
136
149
  if total_tokens <= self.token_limit:
137
150
  sorted_units = sorted(
@@ -140,7 +153,7 @@ class ScoreBasedContextCreator(BaseContextCreator):
140
153
  return self._assemble_output(sorted_units, system_unit)
141
154
 
142
155
  # ======================
143
- # 5. Truncation Logic
156
+ # 6. Truncation Logic with Tool Call Awareness
144
157
  # ======================
145
158
  logger.warning(
146
159
  f"Context truncation required "
@@ -148,24 +161,12 @@ class ScoreBasedContextCreator(BaseContextCreator):
148
161
  f"pruning low-score messages."
149
162
  )
150
163
 
151
- # Sort for truncation: high scores first, older messages first at same
152
- # score
153
- sorted_for_truncation = sorted(
154
- regular_units, key=self._truncation_sort_key
164
+ remaining_units = self._truncate_with_tool_call_awareness(
165
+ regular_units, tool_call_groups, system_tokens
155
166
  )
156
167
 
157
- # Reverse to process from lowest score (end of sorted list)
158
- remaining_units = []
159
- current_total = system_tokens
160
-
161
- for unit in sorted_for_truncation:
162
- potential_total = current_total + unit.num_tokens
163
- if potential_total <= self.token_limit:
164
- remaining_units.append(unit)
165
- current_total = potential_total
166
-
167
168
  # ======================
168
- # 6. Output Assembly
169
+ # 7. Output Assembly
169
170
  # ======================
170
171
 
171
172
  # In case system message is the only message in memory when sorted
@@ -180,6 +181,170 @@ class ScoreBasedContextCreator(BaseContextCreator):
180
181
  final_units = sorted(remaining_units, key=self._conversation_sort_key)
181
182
  return self._assemble_output(final_units, system_unit)
182
183
 
184
+ def _group_tool_calls_and_responses(
185
+ self, units: List[_ContextUnit]
186
+ ) -> Dict[str, List[_ContextUnit]]:
187
+ r"""Groups tool calls with their corresponding responses based on
188
+ `tool_call_id`.
189
+
190
+ This improved logic robustly gathers all messages (assistant requests
191
+ and tool responses, including chunks) that share a `tool_call_id`.
192
+
193
+ Args:
194
+ units (List[_ContextUnit]): List of context units to analyze.
195
+
196
+ Returns:
197
+ Dict[str, List[_ContextUnit]]: Mapping from `tool_call_id` to a
198
+ list of related units.
199
+ """
200
+ tool_call_groups: Dict[str, List[_ContextUnit]] = defaultdict(list)
201
+
202
+ for unit in units:
203
+ # FunctionCallingMessage stores tool_call_id.
204
+ message = unit.record.memory_record.message
205
+ tool_call_id = getattr(message, 'tool_call_id', None)
206
+
207
+ if tool_call_id:
208
+ tool_call_groups[tool_call_id].append(unit)
209
+
210
+ # Filter out empty or incomplete groups if necessary,
211
+ # though defaultdict and getattr handle this gracefully.
212
+ return dict(tool_call_groups)
213
+
214
+ def _truncate_with_tool_call_awareness(
215
+ self,
216
+ regular_units: List[_ContextUnit],
217
+ tool_call_groups: Dict[str, List[_ContextUnit]],
218
+ system_tokens: int,
219
+ ) -> List[_ContextUnit]:
220
+ r"""Truncates messages while preserving tool call-response pairs.
221
+ This method implements a more sophisticated truncation strategy:
222
+ 1. It treats tool call groups (request + responses) and standalone
223
+ messages as individual items to be included.
224
+ 2. It sorts all items by score and greedily adds them to the context.
225
+ 3. **Partial Truncation**: If a complete tool group is too large to
226
+ fit,it attempts to add the request message and as many of the most
227
+ recent response chunks as the token budget allows.
228
+
229
+ Args:
230
+ regular_units (List[_ContextUnit]): All regular message units.
231
+ tool_call_groups (Dict[str, List[_ContextUnit]]): Grouped tool
232
+ calls.
233
+ system_tokens (int): Tokens used by the system message.
234
+
235
+ Returns:
236
+ List[_ContextUnit]: A list of units that fit within the token
237
+ limit.
238
+ """
239
+
240
+ # Create a set for quick lookup of units belonging to any tool call
241
+ tool_call_unit_ids = {
242
+ unit.record.memory_record.uuid
243
+ for group in tool_call_groups.values()
244
+ for unit in group
245
+ }
246
+
247
+ # Separate standalone units from tool call groups
248
+ standalone_units = [
249
+ u
250
+ for u in regular_units
251
+ if u.record.memory_record.uuid not in tool_call_unit_ids
252
+ ]
253
+
254
+ # Prepare all items (standalone units and groups) for sorting
255
+ all_potential_items: List[Dict] = []
256
+ for unit in standalone_units:
257
+ all_potential_items.append(
258
+ {
259
+ "type": "standalone",
260
+ "score": unit.record.score,
261
+ "timestamp": unit.record.timestamp,
262
+ "tokens": unit.num_tokens,
263
+ "item": unit,
264
+ }
265
+ )
266
+ for group in tool_call_groups.values():
267
+ all_potential_items.append(
268
+ {
269
+ "type": "group",
270
+ "score": max(u.record.score for u in group),
271
+ "timestamp": max(u.record.timestamp for u in group),
272
+ "tokens": sum(u.num_tokens for u in group),
273
+ "item": group,
274
+ }
275
+ )
276
+
277
+ # Sort all potential items by score (high to low), then timestamp
278
+ all_potential_items.sort(key=lambda x: (-x["score"], -x["timestamp"]))
279
+
280
+ remaining_units: List[_ContextUnit] = []
281
+ current_tokens = system_tokens
282
+
283
+ for item_dict in all_potential_items:
284
+ item_type = item_dict["type"]
285
+ item = item_dict["item"]
286
+ item_tokens = item_dict["tokens"]
287
+
288
+ if current_tokens + item_tokens <= self.token_limit:
289
+ # The whole item (standalone or group) fits, so add it
290
+ if item_type == "standalone":
291
+ remaining_units.append(item)
292
+ else: # item_type == "group"
293
+ remaining_units.extend(item)
294
+ current_tokens += item_tokens
295
+
296
+ elif item_type == "group":
297
+ # The group does not fit completely; try partial inclusion.
298
+ request_unit: Optional[_ContextUnit] = None
299
+ response_units: List[_ContextUnit] = []
300
+
301
+ for unit in item:
302
+ # Assistant msg with `args` is the request
303
+ if (
304
+ isinstance(
305
+ unit.record.memory_record.message,
306
+ FunctionCallingMessage,
307
+ )
308
+ and unit.record.memory_record.message.args is not None
309
+ ):
310
+ request_unit = unit
311
+ else:
312
+ response_units.append(unit)
313
+
314
+ # A group must have a request to be considered for inclusion.
315
+ if request_unit is None:
316
+ continue
317
+
318
+ # Check if we can at least fit the request.
319
+ if (
320
+ current_tokens + request_unit.num_tokens
321
+ <= self.token_limit
322
+ ):
323
+ units_to_add = [request_unit]
324
+ tokens_to_add = request_unit.num_tokens
325
+
326
+ # Sort responses by timestamp to add newest chunks first
327
+ response_units.sort(
328
+ key=lambda u: u.record.timestamp, reverse=True
329
+ )
330
+
331
+ for resp_unit in response_units:
332
+ if (
333
+ current_tokens
334
+ + tokens_to_add
335
+ + resp_unit.num_tokens
336
+ <= self.token_limit
337
+ ):
338
+ units_to_add.append(resp_unit)
339
+ tokens_to_add += resp_unit.num_tokens
340
+
341
+ # A request must be followed by at least one response
342
+ if len(units_to_add) > 1:
343
+ remaining_units.extend(units_to_add)
344
+ current_tokens += tokens_to_add
345
+
346
+ return remaining_units
347
+
183
348
  def _extract_system_message(
184
349
  self, records: List[ContextRecord]
185
350
  ) -> Tuple[Optional[_ContextUnit], List[_ContextUnit]]:
@@ -215,25 +380,6 @@ class ScoreBasedContextCreator(BaseContextCreator):
215
380
  )
216
381
  return system_message_unit, []
217
382
 
218
- def _truncation_sort_key(self, unit: _ContextUnit) -> Tuple[float, float]:
219
- r"""Defines the sorting key for the truncation phase.
220
-
221
- Sorting priority:
222
- - Primary: Sort by score in descending order (higher scores first).
223
- - Secondary: Sort by timestamp in ascending order (older messages
224
- first when scores are equal).
225
-
226
- Args:
227
- unit (_ContextUnit): A `_ContextUnit` representing a conversation
228
- record.
229
-
230
- Returns:
231
- Tuple[float, float]:
232
- - Negative score for descending order sorting.
233
- - Timestamp for ascending order sorting.
234
- """
235
- return (-unit.record.score, unit.record.timestamp)
236
-
237
383
  def _conversation_sort_key(
238
384
  self, unit: _ContextUnit
239
385
  ) -> Tuple[float, float]:
@@ -12,11 +12,14 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Union
16
+
17
+ from openai import AsyncStream, Stream
16
18
 
17
19
  from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
20
+ from camel.messages import OpenAIMessage
18
21
  from camel.models.openai_compatible_model import OpenAICompatibleModel
19
- from camel.types import ModelType
22
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
20
23
  from camel.utils import (
21
24
  AnthropicTokenCounter,
22
25
  BaseTokenCounter,
@@ -25,6 +28,47 @@ from camel.utils import (
25
28
  )
26
29
 
27
30
 
31
+ def strip_trailing_whitespace_from_messages(
32
+ messages: List[OpenAIMessage],
33
+ ) -> List[OpenAIMessage]:
34
+ r"""Strip trailing whitespace from all message contents in a list of
35
+ messages. This is necessary because the Anthropic API doesn't allow
36
+ trailing whitespace in message content.
37
+
38
+ Args:
39
+ messages (List[OpenAIMessage]): List of messages to process
40
+
41
+ Returns:
42
+ List[OpenAIMessage]: The processed messages with trailing whitespace
43
+ removed
44
+ """
45
+ if not messages:
46
+ return messages
47
+
48
+ # Create a deep copy to avoid modifying the original messages
49
+ processed_messages = [dict(msg) for msg in messages]
50
+
51
+ # Process each message
52
+ for msg in processed_messages:
53
+ if "content" in msg and msg["content"] is not None:
54
+ if isinstance(msg["content"], str):
55
+ msg["content"] = msg["content"].rstrip()
56
+ elif isinstance(msg["content"], list):
57
+ # Handle content that's a list of content parts (e.g., for
58
+ # multimodal content)
59
+ for i, part in enumerate(msg["content"]):
60
+ if (
61
+ isinstance(part, dict)
62
+ and "text" in part
63
+ and isinstance(part["text"], str)
64
+ ):
65
+ part["text"] = part["text"].rstrip()
66
+ elif isinstance(part, str):
67
+ msg["content"][i] = part.rstrip()
68
+
69
+ return processed_messages # type: ignore[return-value]
70
+
71
+
28
72
  class AnthropicModel(OpenAICompatibleModel):
29
73
  r"""Anthropic API in a unified OpenAICompatibleModel interface.
30
74
 
@@ -89,6 +133,9 @@ class AnthropicModel(OpenAICompatibleModel):
89
133
  **kwargs,
90
134
  )
91
135
 
136
+ # Monkey patch the AnthropicTokenCounter to handle trailing whitespace
137
+ self._patch_anthropic_token_counter()
138
+
92
139
  @property
93
140
  def token_counter(self) -> BaseTokenCounter:
94
141
  r"""Initialize the token counter for the model backend.
@@ -115,3 +162,68 @@ class AnthropicModel(OpenAICompatibleModel):
115
162
  f"Unexpected argument `{param}` is "
116
163
  "input into Anthropic model backend."
117
164
  )
165
+
166
+ def _request_chat_completion(
167
+ self,
168
+ messages: List[OpenAIMessage],
169
+ tools: Optional[List[Dict[str, Any]]] = None,
170
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
171
+ # Strip trailing whitespace from all message contents to prevent
172
+ # Anthropic API errors
173
+ processed_messages = strip_trailing_whitespace_from_messages(messages)
174
+
175
+ # Call the parent class method
176
+ return super()._request_chat_completion(processed_messages, tools)
177
+
178
+ async def _arequest_chat_completion(
179
+ self,
180
+ messages: List[OpenAIMessage],
181
+ tools: Optional[List[Dict[str, Any]]] = None,
182
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
183
+ # Strip trailing whitespace from all message contents to prevent
184
+ # Anthropic API errors
185
+ processed_messages = strip_trailing_whitespace_from_messages(messages)
186
+
187
+ # Call the parent class method
188
+ return await super()._arequest_chat_completion(
189
+ processed_messages, tools
190
+ )
191
+
192
+ def _patch_anthropic_token_counter(self):
193
+ r"""Monkey patch the AnthropicTokenCounter class to handle trailing
194
+ whitespace.
195
+
196
+ This patches the count_tokens_from_messages method to strip trailing
197
+ whitespace from message content before sending to the Anthropic API.
198
+ """
199
+ import functools
200
+
201
+ from anthropic.types import MessageParam
202
+
203
+ from camel.utils import AnthropicTokenCounter
204
+
205
+ original_count_tokens = (
206
+ AnthropicTokenCounter.count_tokens_from_messages
207
+ )
208
+
209
+ @functools.wraps(original_count_tokens)
210
+ def patched_count_tokens(self, messages):
211
+ # Process messages to remove trailing whitespace
212
+ processed_messages = strip_trailing_whitespace_from_messages(
213
+ messages
214
+ )
215
+
216
+ # Use the processed messages with the original method
217
+ return self.client.messages.count_tokens(
218
+ messages=[
219
+ MessageParam(
220
+ content=str(msg["content"]),
221
+ role="user" if msg["role"] == "user" else "assistant",
222
+ )
223
+ for msg in processed_messages
224
+ ],
225
+ model=self.model,
226
+ ).input_tokens
227
+
228
+ # Apply the monkey patch
229
+ AnthropicTokenCounter.count_tokens_from_messages = patched_count_tokens
camel/runtimes/configs.py CHANGED
@@ -21,22 +21,22 @@ class TaskConfig(BaseModel):
21
21
 
22
22
  Arttributes:
23
23
  cmd (str or list): Command to be executed
24
- stdout (bool): Attach to stdout. (default: :obj: `True`)
25
- stderr (bool): Attach to stderr. (default: :obj: `True`)
26
- stdin (bool): Attach to stdin. (default: :obj: `False`)
27
- tty (bool): Allocate a pseudo-TTY. (default: :obj: `False`)
28
- privileged (bool): Run as privileged. (default: :obj: `False`)
29
- user (str): User to execute command as. (default: :obj: `""`)
24
+ stdout (bool): Attach to stdout. (default: :obj:`True`)
25
+ stderr (bool): Attach to stderr. (default: :obj:`True`)
26
+ stdin (bool): Attach to stdin. (default: :obj:`False`)
27
+ tty (bool): Allocate a pseudo-TTY. (default: :obj:`False`)
28
+ privileged (bool): Run as privileged. (default: :obj:`False`)
29
+ user (str): User to execute command as. (default: :obj:`""`)
30
30
  detach (bool): If true, detach from the exec command.
31
- (default: :obj: `False`)
32
- stream (bool): Stream response data. (default: :obj: `False`)
31
+ (default: :obj:`False`)
32
+ stream (bool): Stream response data. (default: :obj:`False`)
33
33
  socket (bool): Return the connection socket to allow custom
34
- read/write operations. (default: :obj: `False`)
34
+ read/write operations. (default: :obj:`False`)
35
35
  environment (dict or list): A dictionary or a list of strings in
36
36
  the following format ``["PASSWORD=xxx"]`` or
37
- ``{"PASSWORD": "xxx"}``. (default: :obj: `None`)
37
+ ``{"PASSWORD": "xxx"}``. (default: :obj:`None`)
38
38
  workdir (str): Path to working directory for this exec session.
39
- (default: :obj: `None`)
39
+ (default: :obj:`None`)
40
40
  demux (bool): Return stdout and stderr separately. (default: :obj:
41
41
  `False`)
42
42
  """
@@ -34,13 +34,13 @@ class DaytonaRuntime(BaseRuntime):
34
34
  Args:
35
35
  api_key (Optional[str]): The Daytona API key for authentication. If not
36
36
  provided, it will try to use the DAYTONA_API_KEY environment
37
- variable. (default: :obj: `None`)
37
+ variable. (default: :obj:`None`)
38
38
  api_url (Optional[str]): The URL of the Daytona server. If not
39
39
  provided, it will try to use the DAYTONA_API_URL environment
40
40
  variable. If none is provided, it will use "http://localhost:8000".
41
- (default: :obj: `None`)
41
+ (default: :obj:`None`)
42
42
  language (Optional[str]): The programming language for the sandbox.
43
- (default: :obj: `"python"`)
43
+ (default: :obj:`"python"`)
44
44
  """
45
45
 
46
46
  def __init__(
@@ -102,7 +102,7 @@ class DaytonaRuntime(BaseRuntime):
102
102
  list of functions to add.
103
103
  entrypoint (str): The entrypoint for the function.
104
104
  arguments (Optional[Dict[str, Any]]): The arguments for the
105
- function. (default: :obj: `None`)
105
+ function. (default: :obj:`None`)
106
106
 
107
107
  Returns:
108
108
  DaytonaRuntime: The current runtime.
@@ -45,7 +45,7 @@ class DockerRuntime(BaseRuntime):
45
45
  port (int): The port number to use for the runtime API. (default: :obj:
46
46
  `8000`)
47
47
  remove (bool): Whether to remove the container after stopping it. '
48
- (default: :obj: `True`)
48
+ (default: :obj:`True`)
49
49
  kwargs (dict): Additional keyword arguments to pass to the
50
50
  Docker client.
51
51
  """
@@ -170,7 +170,7 @@ class DockerRuntime(BaseRuntime):
170
170
 
171
171
  Args:
172
172
  time_out (int): The number of seconds to wait for the container to
173
- start. (default: :obj: `15`)
173
+ start. (default: :obj:`15`)
174
174
 
175
175
  Returns:
176
176
  DockerRuntime: The DockerRuntime instance.
@@ -259,9 +259,9 @@ class DockerRuntime(BaseRuntime):
259
259
  list of functions to add.
260
260
  entrypoint (str): The entrypoint for the function.
261
261
  redirect_stdout (bool): Whether to return the stdout of
262
- the function. (default: :obj: `False`)
262
+ the function. (default: :obj:`False`)
263
263
  arguments (Optional[Dict[str, Any]]): The arguments for the
264
- function. (default: :obj: `None`)
264
+ function. (default: :obj:`None`)
265
265
 
266
266
  Returns:
267
267
  DockerRuntime: The DockerRuntime instance.
@@ -330,7 +330,7 @@ class DockerRuntime(BaseRuntime):
330
330
 
331
331
  Args:
332
332
  remove (Optional[bool]): Whether to remove the container
333
- after stopping it. (default: :obj: `None`)
333
+ after stopping it. (default: :obj:`None`)
334
334
 
335
335
  Returns:
336
336
  DockerRuntime: The DockerRuntime instance.
@@ -366,7 +366,7 @@ class DockerRuntime(BaseRuntime):
366
366
  r"""Wait for the API Server to be ready.
367
367
 
368
368
  Args:
369
- timeout (int): The number of seconds to wait. (default: :obj: `10`)
369
+ timeout (int): The number of seconds to wait. (default: :obj:`10`)
370
370
 
371
371
  Returns:
372
372
  bool: Whether the API Server is ready.
@@ -36,9 +36,9 @@ class RemoteHttpRuntime(BaseRuntime):
36
36
 
37
37
  Args:
38
38
  host (str): The host of the remote server.
39
- port (int): The port of the remote server. (default: :obj: `8000`)
39
+ port (int): The port of the remote server. (default: :obj:`8000`)
40
40
  python_exec (str): The python executable to run the API server.
41
- (default: :obj: `python3`)
41
+ (default: :obj:`python3`)
42
42
  """
43
43
 
44
44
  def __init__(
@@ -90,9 +90,9 @@ class RemoteHttpRuntime(BaseRuntime):
90
90
  list of functions to add.
91
91
  entrypoint (str): The entrypoint for the function.
92
92
  redirect_stdout (bool): Whether to return the stdout of
93
- the function. (default: :obj: `False`)
93
+ the function. (default: :obj:`False`)
94
94
  arguments (Optional[Dict[str, Any]]): The arguments for the
95
- function. (default: :obj: `None`)
95
+ function. (default: :obj:`None`)
96
96
 
97
97
  Returns:
98
98
  RemoteHttpRuntime: The current runtime.
@@ -162,7 +162,7 @@ class RemoteHttpRuntime(BaseRuntime):
162
162
  r"""Wait for the API Server to be ready.
163
163
 
164
164
  Args:
165
- timeout (int): The number of seconds to wait. (default: :obj: `10`)
165
+ timeout (int): The number of seconds to wait. (default: :obj:`10`)
166
166
 
167
167
  Returns:
168
168
  bool: Whether the API Server is ready.