camel-ai 0.2.68__py3-none-any.whl → 0.2.69a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (36) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +170 -11
  3. camel/configs/vllm_config.py +2 -0
  4. camel/datagen/self_improving_cot.py +1 -1
  5. camel/memories/context_creators/score_based.py +129 -87
  6. camel/runtimes/configs.py +11 -11
  7. camel/runtimes/daytona_runtime.py +4 -4
  8. camel/runtimes/docker_runtime.py +6 -6
  9. camel/runtimes/remote_http_runtime.py +5 -5
  10. camel/societies/workforce/prompts.py +13 -12
  11. camel/societies/workforce/single_agent_worker.py +252 -22
  12. camel/societies/workforce/utils.py +10 -2
  13. camel/societies/workforce/worker.py +21 -45
  14. camel/societies/workforce/workforce.py +36 -15
  15. camel/tasks/task.py +18 -12
  16. camel/toolkits/__init__.py +2 -0
  17. camel/toolkits/aci_toolkit.py +19 -19
  18. camel/toolkits/arxiv_toolkit.py +6 -6
  19. camel/toolkits/dappier_toolkit.py +5 -5
  20. camel/toolkits/file_write_toolkit.py +10 -10
  21. camel/toolkits/github_toolkit.py +3 -3
  22. camel/toolkits/non_visual_browser_toolkit/__init__.py +18 -0
  23. camel/toolkits/non_visual_browser_toolkit/actions.py +196 -0
  24. camel/toolkits/non_visual_browser_toolkit/agent.py +278 -0
  25. camel/toolkits/non_visual_browser_toolkit/browser_non_visual_toolkit.py +363 -0
  26. camel/toolkits/non_visual_browser_toolkit/nv_browser_session.py +175 -0
  27. camel/toolkits/non_visual_browser_toolkit/snapshot.js +188 -0
  28. camel/toolkits/non_visual_browser_toolkit/snapshot.py +164 -0
  29. camel/toolkits/pptx_toolkit.py +4 -4
  30. camel/toolkits/sympy_toolkit.py +1 -1
  31. camel/toolkits/task_planning_toolkit.py +3 -3
  32. camel/toolkits/thinking_toolkit.py +1 -1
  33. {camel_ai-0.2.68.dist-info → camel_ai-0.2.69a1.dist-info}/METADATA +1 -1
  34. {camel_ai-0.2.68.dist-info → camel_ai-0.2.69a1.dist-info}/RECORD +36 -29
  35. {camel_ai-0.2.68.dist-info → camel_ai-0.2.69a1.dist-info}/WHEEL +0 -0
  36. {camel_ai-0.2.68.dist-info → camel_ai-0.2.69a1.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.68'
17
+ __version__ = '0.2.69a1'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -54,7 +54,11 @@ from camel.memories import (
54
54
  MemoryRecord,
55
55
  ScoreBasedContextCreator,
56
56
  )
57
- from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
57
+ from camel.messages import (
58
+ BaseMessage,
59
+ FunctionCallingMessage,
60
+ OpenAIMessage,
61
+ )
58
62
  from camel.models import (
59
63
  BaseModelBackend,
60
64
  ModelFactory,
@@ -512,27 +516,172 @@ class ChatAgent(BaseAgent):
512
516
  ) -> None:
513
517
  r"""Updates the agent memory with a new message.
514
518
 
519
+ If the single *message* exceeds the model's context window, it will
520
+ be **automatically split into multiple smaller chunks** before being
521
+ written into memory. This prevents later failures in
522
+ `ScoreBasedContextCreator` where an over-sized message cannot fit
523
+ into the available token budget at all.
524
+
525
+ This slicing logic handles both regular text messages (in the
526
+ `content` field) and long tool call results (in the `result` field of
527
+ a `FunctionCallingMessage`).
528
+
515
529
  Args:
516
530
  message (BaseMessage): The new message to add to the stored
517
531
  messages.
518
532
  role (OpenAIBackendRole): The backend role type.
519
533
  timestamp (Optional[float], optional): Custom timestamp for the
520
- memory record. If None, current timestamp will be used.
534
+ memory record. If `None`, the current time will be used.
521
535
  (default: :obj:`None`)
536
+ (default: obj:`None`)
522
537
  """
538
+ import math
523
539
  import time
540
+ import uuid as _uuid
541
+
542
+ # 1. Helper to write a record to memory
543
+ def _write_single_record(
544
+ message: BaseMessage, role: OpenAIBackendRole, timestamp: float
545
+ ):
546
+ self.memory.write_record(
547
+ MemoryRecord(
548
+ message=message,
549
+ role_at_backend=role,
550
+ timestamp=timestamp,
551
+ agent_id=self.agent_id,
552
+ )
553
+ )
554
+
555
+ base_ts = (
556
+ timestamp
557
+ if timestamp is not None
558
+ else time.time_ns() / 1_000_000_000
559
+ )
560
+
561
+ # 2. Get token handling utilities, fallback if unavailable
562
+ try:
563
+ context_creator = self.memory.get_context_creator()
564
+ token_counter = context_creator.token_counter
565
+ token_limit = context_creator.token_limit
566
+ except AttributeError:
567
+ _write_single_record(message, role, base_ts)
568
+ return
524
569
 
525
- self.memory.write_record(
526
- MemoryRecord(
527
- message=message,
528
- role_at_backend=role,
529
- timestamp=timestamp
530
- if timestamp is not None
531
- else time.time_ns() / 1_000_000_000, # Nanosecond precision
532
- agent_id=self.agent_id,
570
+ # 3. Check if slicing is necessary
571
+ try:
572
+ current_tokens = token_counter.count_tokens_from_messages(
573
+ [message.to_openai_message(role)]
574
+ )
575
+ _, ctx_tokens = self.memory.get_context()
576
+ remaining_budget = max(0, token_limit - ctx_tokens)
577
+
578
+ if current_tokens <= remaining_budget:
579
+ _write_single_record(message, role, base_ts)
580
+ return
581
+ except Exception as e:
582
+ logger.warning(
583
+ f"Token calculation failed before chunking, "
584
+ f"writing message as-is. Error: {e}"
533
585
  )
586
+ _write_single_record(message, role, base_ts)
587
+ return
588
+
589
+ # 4. Perform slicing
590
+ logger.warning(
591
+ f"Message with {current_tokens} tokens exceeds remaining budget "
592
+ f"of {remaining_budget}. Slicing into smaller chunks."
534
593
  )
535
594
 
595
+ text_to_chunk: Optional[str] = None
596
+ is_function_result = False
597
+
598
+ if isinstance(message, FunctionCallingMessage) and isinstance(
599
+ message.result, str
600
+ ):
601
+ text_to_chunk = message.result
602
+ is_function_result = True
603
+ elif isinstance(message.content, str):
604
+ text_to_chunk = message.content
605
+
606
+ if not text_to_chunk or not text_to_chunk.strip():
607
+ _write_single_record(message, role, base_ts)
608
+ return
609
+ # Encode the entire text to get a list of all token IDs
610
+ try:
611
+ all_token_ids = token_counter.encode(text_to_chunk)
612
+ except Exception as e:
613
+ logger.error(f"Failed to encode text for chunking: {e}")
614
+ _write_single_record(message, role, base_ts) # Fallback
615
+ return
616
+
617
+ if not all_token_ids:
618
+ _write_single_record(message, role, base_ts) # Nothing to chunk
619
+ return
620
+
621
+ # 1. Base chunk size: one-tenth of the smaller of (a) total token
622
+ # limit and (b) current remaining budget. This prevents us from
623
+ # creating chunks that are guaranteed to overflow the
624
+ # immediate context window.
625
+ base_chunk_size = max(1, remaining_budget) // 10
626
+
627
+ # 2. Each chunk gets a textual prefix such as:
628
+ # "[chunk 3/12 of a long message]\n"
629
+ # The prefix itself consumes tokens, so if we do not subtract its
630
+ # length the *total* tokens of the outgoing message (prefix + body)
631
+ # can exceed the intended bound. We estimate the prefix length
632
+ # with a representative example that is safely long enough for the
633
+ # vast majority of cases (three-digit indices).
634
+ sample_prefix = "[chunk 1/1000 of a long message]\n"
635
+ prefix_token_len = len(token_counter.encode(sample_prefix))
636
+
637
+ # 3. The real capacity for the message body is therefore the base
638
+ # chunk size minus the prefix length. Fallback to at least one
639
+ # token to avoid zero or negative sizes.
640
+ chunk_body_limit = max(1, base_chunk_size - prefix_token_len)
641
+
642
+ # 4. Calculate how many chunks we will need with this body size.
643
+ num_chunks = math.ceil(len(all_token_ids) / chunk_body_limit)
644
+ group_id = str(_uuid.uuid4())
645
+
646
+ for i in range(num_chunks):
647
+ start_idx = i * chunk_body_limit
648
+ end_idx = start_idx + chunk_body_limit
649
+ chunk_token_ids = all_token_ids[start_idx:end_idx]
650
+
651
+ chunk_body = token_counter.decode(chunk_token_ids)
652
+
653
+ prefix = f"[chunk {i + 1}/{num_chunks} of a long message]\n"
654
+ new_body = prefix + chunk_body
655
+
656
+ if is_function_result and isinstance(
657
+ message, FunctionCallingMessage
658
+ ):
659
+ new_msg: BaseMessage = FunctionCallingMessage(
660
+ role_name=message.role_name,
661
+ role_type=message.role_type,
662
+ meta_dict=message.meta_dict,
663
+ content=message.content,
664
+ func_name=message.func_name,
665
+ args=message.args,
666
+ result=new_body,
667
+ tool_call_id=message.tool_call_id,
668
+ )
669
+ else:
670
+ new_msg = message.create_new_instance(new_body)
671
+
672
+ meta = (new_msg.meta_dict or {}).copy()
673
+ meta.update(
674
+ {
675
+ "chunk_idx": i + 1,
676
+ "chunk_total": num_chunks,
677
+ "chunk_group_id": group_id,
678
+ }
679
+ )
680
+ new_msg.meta_dict = meta
681
+
682
+ # Increment timestamp slightly to maintain order
683
+ _write_single_record(new_msg, role, base_ts + i * 1e-6)
684
+
536
685
  def load_memory(self, memory: AgentMemory) -> None:
537
686
  r"""Load the provided memory into the agent.
538
687
 
@@ -652,9 +801,19 @@ class ChatAgent(BaseAgent):
652
801
  r"""Initializes the stored messages list with the current system
653
802
  message.
654
803
  """
804
+ import time
805
+
655
806
  self.memory.clear()
807
+ # avoid UserWarning: The `ChatHistoryMemory` is empty.
656
808
  if self.system_message is not None:
657
- self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM)
809
+ self.memory.write_record(
810
+ MemoryRecord(
811
+ message=self.system_message,
812
+ role_at_backend=OpenAIBackendRole.SYSTEM,
813
+ timestamp=time.time_ns() / 1_000_000_000,
814
+ agent_id=self.agent_id,
815
+ )
816
+ )
658
817
 
659
818
  def record_message(self, message: BaseMessage) -> None:
660
819
  r"""Records the externally provided message into the agent memory as if
@@ -90,6 +90,7 @@ class VLLMConfig(BaseConfig):
90
90
  most likely tokens to return at each token position, each with an
91
91
  associated log probability. `logprobs` must be set to `true` if
92
92
  this parameter is used. (default: :obj:`None`)
93
+ extra_body: Add additional JSON properties to the request. (default: :obj:`None`)
93
94
  """
94
95
 
95
96
  temperature: Optional[float] = None # openai default: 1.0
@@ -105,6 +106,7 @@ class VLLMConfig(BaseConfig):
105
106
  user: Optional[str] = None
106
107
  logprobs: Optional[bool] = None
107
108
  top_logprobs: Optional[int] = None
109
+ extra_body: Optional[dict] = None
108
110
 
109
111
 
110
112
  VLLM_API_PARAMS = {param for param in VLLMConfig.model_fields.keys()}
@@ -116,7 +116,7 @@ class SelfImprovingCoTPipeline:
116
116
  samples to be drawn using the rejection sampling
117
117
  method, where samples are accepted or rejected based on
118
118
  a predefined condition to achieve a desired distribution.
119
- (default: :obj: `None`)
119
+ (default: :obj:`None`)
120
120
  evaluate_agent (Optional[ChatAgent]): The chat agent used for
121
121
  evaluating reasoning traces. (default: :obj:`None`)
122
122
  reward_model (BaseRewardModel, optional): Model used to evaluate
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from collections import defaultdict
14
15
  from typing import Dict, List, Optional, Tuple
15
16
 
16
17
  from pydantic import BaseModel
@@ -18,7 +19,7 @@ from pydantic import BaseModel
18
19
  from camel.logger import get_logger
19
20
  from camel.memories.base import BaseContextCreator
20
21
  from camel.memories.records import ContextRecord
21
- from camel.messages import OpenAIMessage
22
+ from camel.messages import FunctionCallingMessage, OpenAIMessage
22
23
  from camel.types.enums import OpenAIBackendRole
23
24
  from camel.utils import BaseTokenCounter
24
25
 
@@ -112,6 +113,11 @@ class ScoreBasedContextCreator(BaseContextCreator):
112
113
 
113
114
  # Process non-system messages with deduplication
114
115
  for idx, record in enumerate(records):
116
+ if (
117
+ record.memory_record.role_at_backend
118
+ == OpenAIBackendRole.SYSTEM
119
+ ):
120
+ continue
115
121
  if record.memory_record.uuid in seen_uuids:
116
122
  continue
117
123
  seen_uuids.add(record.memory_record.uuid)
@@ -178,47 +184,32 @@ class ScoreBasedContextCreator(BaseContextCreator):
178
184
  def _group_tool_calls_and_responses(
179
185
  self, units: List[_ContextUnit]
180
186
  ) -> Dict[str, List[_ContextUnit]]:
181
- r"""Groups tool calls with their corresponding responses.
187
+ r"""Groups tool calls with their corresponding responses based on
188
+ `tool_call_id`.
189
+
190
+ This improved logic robustly gathers all messages (assistant requests
191
+ and tool responses, including chunks) that share a `tool_call_id`.
182
192
 
183
193
  Args:
184
- units (List[_ContextUnit]): List of context units to analyze
194
+ units (List[_ContextUnit]): List of context units to analyze.
185
195
 
186
196
  Returns:
187
- Dict[str, List[_ContextUnit]]: Mapping from tool_call_id to list of
188
- related units (tool call + responses)
197
+ Dict[str, List[_ContextUnit]]: Mapping from `tool_call_id` to a
198
+ list of related units.
189
199
  """
190
- tool_call_groups: Dict[str, List[_ContextUnit]] = {}
200
+ tool_call_groups: Dict[str, List[_ContextUnit]] = defaultdict(list)
191
201
 
192
202
  for unit in units:
203
+ # FunctionCallingMessage stores tool_call_id.
193
204
  message = unit.record.memory_record.message
194
- backend_role = unit.record.memory_record.role_at_backend
205
+ tool_call_id = getattr(message, 'tool_call_id', None)
195
206
 
196
- # Check if this is a tool call message
197
- if hasattr(message, 'func_name') and hasattr(
198
- message, 'tool_call_id'
199
- ):
200
- tool_call_id = getattr(message, 'tool_call_id', None)
201
- if tool_call_id:
202
- if tool_call_id not in tool_call_groups:
203
- tool_call_groups[tool_call_id] = []
204
- tool_call_groups[tool_call_id].append(unit)
205
-
206
- # Check if this is a tool response message
207
- elif backend_role == OpenAIBackendRole.FUNCTION:
208
- tool_call_id = None
209
- if hasattr(message, 'tool_call_id'):
210
- tool_call_id = getattr(message, 'tool_call_id', None)
211
- elif hasattr(message, 'result') and hasattr(
212
- message, 'tool_call_id'
213
- ):
214
- tool_call_id = getattr(message, 'tool_call_id', None)
215
-
216
- if tool_call_id:
217
- if tool_call_id not in tool_call_groups:
218
- tool_call_groups[tool_call_id] = []
219
- tool_call_groups[tool_call_id].append(unit)
207
+ if tool_call_id:
208
+ tool_call_groups[tool_call_id].append(unit)
220
209
 
221
- return tool_call_groups
210
+ # Filter out empty or incomplete groups if necessary,
211
+ # though defaultdict and getattr handle this gracefully.
212
+ return dict(tool_call_groups)
222
213
 
223
214
  def _truncate_with_tool_call_awareness(
224
215
  self,
@@ -227,60 +218,130 @@ class ScoreBasedContextCreator(BaseContextCreator):
227
218
  system_tokens: int,
228
219
  ) -> List[_ContextUnit]:
229
220
  r"""Truncates messages while preserving tool call-response pairs.
221
+ This method implements a more sophisticated truncation strategy:
222
+ 1. It treats tool call groups (request + responses) and standalone
223
+ messages as individual items to be included.
224
+ 2. It sorts all items by score and greedily adds them to the context.
225
+ 3. **Partial Truncation**: If a complete tool group is too large to
226
+ fit,it attempts to add the request message and as many of the most
227
+ recent response chunks as the token budget allows.
230
228
 
231
229
  Args:
232
- regular_units (List[_ContextUnit]): All regular message units
230
+ regular_units (List[_ContextUnit]): All regular message units.
233
231
  tool_call_groups (Dict[str, List[_ContextUnit]]): Grouped tool
234
- calls
235
- system_tokens (int): Tokens used by system message
232
+ calls.
233
+ system_tokens (int): Tokens used by the system message.
236
234
 
237
235
  Returns:
238
- List[_ContextUnit]: Units that fit within token limit
236
+ List[_ContextUnit]: A list of units that fit within the token
237
+ limit.
239
238
  """
240
- # Create sets for quick lookup of tool call related units
241
- tool_call_unit_ids = set()
242
- for group in tool_call_groups.values():
243
- for unit in group:
244
- tool_call_unit_ids.add(unit.record.memory_record.uuid)
245
239
 
246
- # Separate tool call groups and standalone units
240
+ # Create a set for quick lookup of units belonging to any tool call
241
+ tool_call_unit_ids = {
242
+ unit.record.memory_record.uuid
243
+ for group in tool_call_groups.values()
244
+ for unit in group
245
+ }
246
+
247
+ # Separate standalone units from tool call groups
247
248
  standalone_units = [
248
249
  u
249
250
  for u in regular_units
250
251
  if u.record.memory_record.uuid not in tool_call_unit_ids
251
252
  ]
252
253
 
253
- # Sort standalone units for truncation (high scores first)
254
- standalone_units.sort(key=self._truncation_sort_key)
255
-
256
- # Sort tool call groups by their best (highest) score
257
- sorted_tool_groups = []
258
- for _tool_call_id, group in tool_call_groups.items():
259
- # Use the highest score in the group as the group's score
260
- best_score = max(unit.record.score for unit in group)
261
- latest_timestamp = max(unit.record.timestamp for unit in group)
262
- group_tokens = sum(unit.num_tokens for unit in group)
263
- sorted_tool_groups.append(
264
- ((-best_score, -latest_timestamp), group, group_tokens)
254
+ # Prepare all items (standalone units and groups) for sorting
255
+ all_potential_items: List[Dict] = []
256
+ for unit in standalone_units:
257
+ all_potential_items.append(
258
+ {
259
+ "type": "standalone",
260
+ "score": unit.record.score,
261
+ "timestamp": unit.record.timestamp,
262
+ "tokens": unit.num_tokens,
263
+ "item": unit,
264
+ }
265
+ )
266
+ for group in tool_call_groups.values():
267
+ all_potential_items.append(
268
+ {
269
+ "type": "group",
270
+ "score": max(u.record.score for u in group),
271
+ "timestamp": max(u.record.timestamp for u in group),
272
+ "tokens": sum(u.num_tokens for u in group),
273
+ "item": group,
274
+ }
265
275
  )
266
276
 
267
- sorted_tool_groups.sort(key=lambda x: x[0])
277
+ # Sort all potential items by score (high to low), then timestamp
278
+ all_potential_items.sort(key=lambda x: (-x["score"], -x["timestamp"]))
268
279
 
269
- # Greedy selection to fit within token limit
270
- remaining_units = []
280
+ remaining_units: List[_ContextUnit] = []
271
281
  current_tokens = system_tokens
272
282
 
273
- # First, try to include complete tool call groups
274
- for _, group, group_tokens in sorted_tool_groups:
275
- if current_tokens + group_tokens <= self.token_limit:
276
- remaining_units.extend(group)
277
- current_tokens += group_tokens
278
-
279
- # Then, include standalone units
280
- for unit in standalone_units:
281
- if current_tokens + unit.num_tokens <= self.token_limit:
282
- remaining_units.append(unit)
283
- current_tokens += unit.num_tokens
283
+ for item_dict in all_potential_items:
284
+ item_type = item_dict["type"]
285
+ item = item_dict["item"]
286
+ item_tokens = item_dict["tokens"]
287
+
288
+ if current_tokens + item_tokens <= self.token_limit:
289
+ # The whole item (standalone or group) fits, so add it
290
+ if item_type == "standalone":
291
+ remaining_units.append(item)
292
+ else: # item_type == "group"
293
+ remaining_units.extend(item)
294
+ current_tokens += item_tokens
295
+
296
+ elif item_type == "group":
297
+ # The group does not fit completely; try partial inclusion.
298
+ request_unit: Optional[_ContextUnit] = None
299
+ response_units: List[_ContextUnit] = []
300
+
301
+ for unit in item:
302
+ # Assistant msg with `args` is the request
303
+ if (
304
+ isinstance(
305
+ unit.record.memory_record.message,
306
+ FunctionCallingMessage,
307
+ )
308
+ and unit.record.memory_record.message.args is not None
309
+ ):
310
+ request_unit = unit
311
+ else:
312
+ response_units.append(unit)
313
+
314
+ # A group must have a request to be considered for inclusion.
315
+ if request_unit is None:
316
+ continue
317
+
318
+ # Check if we can at least fit the request.
319
+ if (
320
+ current_tokens + request_unit.num_tokens
321
+ <= self.token_limit
322
+ ):
323
+ units_to_add = [request_unit]
324
+ tokens_to_add = request_unit.num_tokens
325
+
326
+ # Sort responses by timestamp to add newest chunks first
327
+ response_units.sort(
328
+ key=lambda u: u.record.timestamp, reverse=True
329
+ )
330
+
331
+ for resp_unit in response_units:
332
+ if (
333
+ current_tokens
334
+ + tokens_to_add
335
+ + resp_unit.num_tokens
336
+ <= self.token_limit
337
+ ):
338
+ units_to_add.append(resp_unit)
339
+ tokens_to_add += resp_unit.num_tokens
340
+
341
+ # A request must be followed by at least one response
342
+ if len(units_to_add) > 1:
343
+ remaining_units.extend(units_to_add)
344
+ current_tokens += tokens_to_add
284
345
 
285
346
  return remaining_units
286
347
 
@@ -319,25 +380,6 @@ class ScoreBasedContextCreator(BaseContextCreator):
319
380
  )
320
381
  return system_message_unit, []
321
382
 
322
- def _truncation_sort_key(self, unit: _ContextUnit) -> Tuple[float, float]:
323
- r"""Defines the sorting key for the truncation phase.
324
-
325
- Sorting priority:
326
- - Primary: Sort by score in descending order (higher scores first).
327
- - Secondary: Sort by timestamp in ascending order (older messages
328
- first when scores are equal).
329
-
330
- Args:
331
- unit (_ContextUnit): A `_ContextUnit` representing a conversation
332
- record.
333
-
334
- Returns:
335
- Tuple[float, float]:
336
- - Negative score for descending order sorting.
337
- - Timestamp for ascending order sorting.
338
- """
339
- return (-unit.record.score, unit.record.timestamp)
340
-
341
383
  def _conversation_sort_key(
342
384
  self, unit: _ContextUnit
343
385
  ) -> Tuple[float, float]:
camel/runtimes/configs.py CHANGED
@@ -21,22 +21,22 @@ class TaskConfig(BaseModel):
21
21
 
22
22
  Arttributes:
23
23
  cmd (str or list): Command to be executed
24
- stdout (bool): Attach to stdout. (default: :obj: `True`)
25
- stderr (bool): Attach to stderr. (default: :obj: `True`)
26
- stdin (bool): Attach to stdin. (default: :obj: `False`)
27
- tty (bool): Allocate a pseudo-TTY. (default: :obj: `False`)
28
- privileged (bool): Run as privileged. (default: :obj: `False`)
29
- user (str): User to execute command as. (default: :obj: `""`)
24
+ stdout (bool): Attach to stdout. (default: :obj:`True`)
25
+ stderr (bool): Attach to stderr. (default: :obj:`True`)
26
+ stdin (bool): Attach to stdin. (default: :obj:`False`)
27
+ tty (bool): Allocate a pseudo-TTY. (default: :obj:`False`)
28
+ privileged (bool): Run as privileged. (default: :obj:`False`)
29
+ user (str): User to execute command as. (default: :obj:`""`)
30
30
  detach (bool): If true, detach from the exec command.
31
- (default: :obj: `False`)
32
- stream (bool): Stream response data. (default: :obj: `False`)
31
+ (default: :obj:`False`)
32
+ stream (bool): Stream response data. (default: :obj:`False`)
33
33
  socket (bool): Return the connection socket to allow custom
34
- read/write operations. (default: :obj: `False`)
34
+ read/write operations. (default: :obj:`False`)
35
35
  environment (dict or list): A dictionary or a list of strings in
36
36
  the following format ``["PASSWORD=xxx"]`` or
37
- ``{"PASSWORD": "xxx"}``. (default: :obj: `None`)
37
+ ``{"PASSWORD": "xxx"}``. (default: :obj:`None`)
38
38
  workdir (str): Path to working directory for this exec session.
39
- (default: :obj: `None`)
39
+ (default: :obj:`None`)
40
40
  demux (bool): Return stdout and stderr separately. (default: :obj:
41
41
  `False`)
42
42
  """
@@ -34,13 +34,13 @@ class DaytonaRuntime(BaseRuntime):
34
34
  Args:
35
35
  api_key (Optional[str]): The Daytona API key for authentication. If not
36
36
  provided, it will try to use the DAYTONA_API_KEY environment
37
- variable. (default: :obj: `None`)
37
+ variable. (default: :obj:`None`)
38
38
  api_url (Optional[str]): The URL of the Daytona server. If not
39
39
  provided, it will try to use the DAYTONA_API_URL environment
40
40
  variable. If none is provided, it will use "http://localhost:8000".
41
- (default: :obj: `None`)
41
+ (default: :obj:`None`)
42
42
  language (Optional[str]): The programming language for the sandbox.
43
- (default: :obj: `"python"`)
43
+ (default: :obj:`"python"`)
44
44
  """
45
45
 
46
46
  def __init__(
@@ -102,7 +102,7 @@ class DaytonaRuntime(BaseRuntime):
102
102
  list of functions to add.
103
103
  entrypoint (str): The entrypoint for the function.
104
104
  arguments (Optional[Dict[str, Any]]): The arguments for the
105
- function. (default: :obj: `None`)
105
+ function. (default: :obj:`None`)
106
106
 
107
107
  Returns:
108
108
  DaytonaRuntime: The current runtime.
@@ -45,7 +45,7 @@ class DockerRuntime(BaseRuntime):
45
45
  port (int): The port number to use for the runtime API. (default: :obj:
46
46
  `8000`)
47
47
  remove (bool): Whether to remove the container after stopping it. '
48
- (default: :obj: `True`)
48
+ (default: :obj:`True`)
49
49
  kwargs (dict): Additional keyword arguments to pass to the
50
50
  Docker client.
51
51
  """
@@ -170,7 +170,7 @@ class DockerRuntime(BaseRuntime):
170
170
 
171
171
  Args:
172
172
  time_out (int): The number of seconds to wait for the container to
173
- start. (default: :obj: `15`)
173
+ start. (default: :obj:`15`)
174
174
 
175
175
  Returns:
176
176
  DockerRuntime: The DockerRuntime instance.
@@ -259,9 +259,9 @@ class DockerRuntime(BaseRuntime):
259
259
  list of functions to add.
260
260
  entrypoint (str): The entrypoint for the function.
261
261
  redirect_stdout (bool): Whether to return the stdout of
262
- the function. (default: :obj: `False`)
262
+ the function. (default: :obj:`False`)
263
263
  arguments (Optional[Dict[str, Any]]): The arguments for the
264
- function. (default: :obj: `None`)
264
+ function. (default: :obj:`None`)
265
265
 
266
266
  Returns:
267
267
  DockerRuntime: The DockerRuntime instance.
@@ -330,7 +330,7 @@ class DockerRuntime(BaseRuntime):
330
330
 
331
331
  Args:
332
332
  remove (Optional[bool]): Whether to remove the container
333
- after stopping it. (default: :obj: `None`)
333
+ after stopping it. (default: :obj:`None`)
334
334
 
335
335
  Returns:
336
336
  DockerRuntime: The DockerRuntime instance.
@@ -366,7 +366,7 @@ class DockerRuntime(BaseRuntime):
366
366
  r"""Wait for the API Server to be ready.
367
367
 
368
368
  Args:
369
- timeout (int): The number of seconds to wait. (default: :obj: `10`)
369
+ timeout (int): The number of seconds to wait. (default: :obj:`10`)
370
370
 
371
371
  Returns:
372
372
  bool: Whether the API Server is ready.