camel-ai 0.2.67__py3-none-any.whl → 0.2.69a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (43) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +170 -11
  3. camel/configs/vllm_config.py +2 -0
  4. camel/datagen/self_improving_cot.py +1 -1
  5. camel/environments/__init__.py +12 -0
  6. camel/environments/rlcards_env.py +860 -0
  7. camel/interpreters/docker/Dockerfile +2 -5
  8. camel/loaders/firecrawl_reader.py +4 -4
  9. camel/memories/blocks/vectordb_block.py +8 -1
  10. camel/memories/context_creators/score_based.py +185 -39
  11. camel/models/anthropic_model.py +114 -2
  12. camel/runtimes/configs.py +11 -11
  13. camel/runtimes/daytona_runtime.py +4 -4
  14. camel/runtimes/docker_runtime.py +6 -6
  15. camel/runtimes/remote_http_runtime.py +5 -5
  16. camel/societies/workforce/prompts.py +55 -21
  17. camel/societies/workforce/single_agent_worker.py +274 -14
  18. camel/societies/workforce/task_channel.py +9 -2
  19. camel/societies/workforce/utils.py +10 -2
  20. camel/societies/workforce/worker.py +74 -16
  21. camel/societies/workforce/workforce.py +90 -35
  22. camel/tasks/task.py +18 -12
  23. camel/toolkits/__init__.py +2 -0
  24. camel/toolkits/aci_toolkit.py +19 -19
  25. camel/toolkits/arxiv_toolkit.py +6 -6
  26. camel/toolkits/dappier_toolkit.py +5 -5
  27. camel/toolkits/file_write_toolkit.py +10 -10
  28. camel/toolkits/github_toolkit.py +3 -3
  29. camel/toolkits/non_visual_browser_toolkit/__init__.py +18 -0
  30. camel/toolkits/non_visual_browser_toolkit/actions.py +196 -0
  31. camel/toolkits/non_visual_browser_toolkit/agent.py +278 -0
  32. camel/toolkits/non_visual_browser_toolkit/browser_non_visual_toolkit.py +363 -0
  33. camel/toolkits/non_visual_browser_toolkit/nv_browser_session.py +175 -0
  34. camel/toolkits/non_visual_browser_toolkit/snapshot.js +188 -0
  35. camel/toolkits/non_visual_browser_toolkit/snapshot.py +164 -0
  36. camel/toolkits/pptx_toolkit.py +4 -4
  37. camel/toolkits/sympy_toolkit.py +1 -1
  38. camel/toolkits/task_planning_toolkit.py +3 -3
  39. camel/toolkits/thinking_toolkit.py +1 -1
  40. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/METADATA +2 -1
  41. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/RECORD +43 -35
  42. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/WHEEL +0 -0
  43. {camel_ai-0.2.67.dist-info → camel_ai-0.2.69a1.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.67'
17
+ __version__ = '0.2.69a1'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -54,7 +54,11 @@ from camel.memories import (
54
54
  MemoryRecord,
55
55
  ScoreBasedContextCreator,
56
56
  )
57
- from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
57
+ from camel.messages import (
58
+ BaseMessage,
59
+ FunctionCallingMessage,
60
+ OpenAIMessage,
61
+ )
58
62
  from camel.models import (
59
63
  BaseModelBackend,
60
64
  ModelFactory,
@@ -512,27 +516,172 @@ class ChatAgent(BaseAgent):
512
516
  ) -> None:
513
517
  r"""Updates the agent memory with a new message.
514
518
 
519
+ If the single *message* exceeds the model's context window, it will
520
+ be **automatically split into multiple smaller chunks** before being
521
+ written into memory. This prevents later failures in
522
+ `ScoreBasedContextCreator` where an over-sized message cannot fit
523
+ into the available token budget at all.
524
+
525
+ This slicing logic handles both regular text messages (in the
526
+ `content` field) and long tool call results (in the `result` field of
527
+ a `FunctionCallingMessage`).
528
+
515
529
  Args:
516
530
  message (BaseMessage): The new message to add to the stored
517
531
  messages.
518
532
  role (OpenAIBackendRole): The backend role type.
519
533
  timestamp (Optional[float], optional): Custom timestamp for the
520
- memory record. If None, current timestamp will be used.
534
+ memory record. If `None`, the current time will be used.
521
535
  (default: :obj:`None`)
536
+ (default: obj:`None`)
522
537
  """
538
+ import math
523
539
  import time
540
+ import uuid as _uuid
541
+
542
+ # 1. Helper to write a record to memory
543
+ def _write_single_record(
544
+ message: BaseMessage, role: OpenAIBackendRole, timestamp: float
545
+ ):
546
+ self.memory.write_record(
547
+ MemoryRecord(
548
+ message=message,
549
+ role_at_backend=role,
550
+ timestamp=timestamp,
551
+ agent_id=self.agent_id,
552
+ )
553
+ )
554
+
555
+ base_ts = (
556
+ timestamp
557
+ if timestamp is not None
558
+ else time.time_ns() / 1_000_000_000
559
+ )
560
+
561
+ # 2. Get token handling utilities, fallback if unavailable
562
+ try:
563
+ context_creator = self.memory.get_context_creator()
564
+ token_counter = context_creator.token_counter
565
+ token_limit = context_creator.token_limit
566
+ except AttributeError:
567
+ _write_single_record(message, role, base_ts)
568
+ return
524
569
 
525
- self.memory.write_record(
526
- MemoryRecord(
527
- message=message,
528
- role_at_backend=role,
529
- timestamp=timestamp
530
- if timestamp is not None
531
- else time.time_ns() / 1_000_000_000, # Nanosecond precision
532
- agent_id=self.agent_id,
570
+ # 3. Check if slicing is necessary
571
+ try:
572
+ current_tokens = token_counter.count_tokens_from_messages(
573
+ [message.to_openai_message(role)]
574
+ )
575
+ _, ctx_tokens = self.memory.get_context()
576
+ remaining_budget = max(0, token_limit - ctx_tokens)
577
+
578
+ if current_tokens <= remaining_budget:
579
+ _write_single_record(message, role, base_ts)
580
+ return
581
+ except Exception as e:
582
+ logger.warning(
583
+ f"Token calculation failed before chunking, "
584
+ f"writing message as-is. Error: {e}"
533
585
  )
586
+ _write_single_record(message, role, base_ts)
587
+ return
588
+
589
+ # 4. Perform slicing
590
+ logger.warning(
591
+ f"Message with {current_tokens} tokens exceeds remaining budget "
592
+ f"of {remaining_budget}. Slicing into smaller chunks."
534
593
  )
535
594
 
595
+ text_to_chunk: Optional[str] = None
596
+ is_function_result = False
597
+
598
+ if isinstance(message, FunctionCallingMessage) and isinstance(
599
+ message.result, str
600
+ ):
601
+ text_to_chunk = message.result
602
+ is_function_result = True
603
+ elif isinstance(message.content, str):
604
+ text_to_chunk = message.content
605
+
606
+ if not text_to_chunk or not text_to_chunk.strip():
607
+ _write_single_record(message, role, base_ts)
608
+ return
609
+ # Encode the entire text to get a list of all token IDs
610
+ try:
611
+ all_token_ids = token_counter.encode(text_to_chunk)
612
+ except Exception as e:
613
+ logger.error(f"Failed to encode text for chunking: {e}")
614
+ _write_single_record(message, role, base_ts) # Fallback
615
+ return
616
+
617
+ if not all_token_ids:
618
+ _write_single_record(message, role, base_ts) # Nothing to chunk
619
+ return
620
+
621
+ # 1. Base chunk size: one-tenth of the smaller of (a) total token
622
+ # limit and (b) current remaining budget. This prevents us from
623
+ # creating chunks that are guaranteed to overflow the
624
+ # immediate context window.
625
+ base_chunk_size = max(1, remaining_budget) // 10
626
+
627
+ # 2. Each chunk gets a textual prefix such as:
628
+ # "[chunk 3/12 of a long message]\n"
629
+ # The prefix itself consumes tokens, so if we do not subtract its
630
+ # length the *total* tokens of the outgoing message (prefix + body)
631
+ # can exceed the intended bound. We estimate the prefix length
632
+ # with a representative example that is safely long enough for the
633
+ # vast majority of cases (three-digit indices).
634
+ sample_prefix = "[chunk 1/1000 of a long message]\n"
635
+ prefix_token_len = len(token_counter.encode(sample_prefix))
636
+
637
+ # 3. The real capacity for the message body is therefore the base
638
+ # chunk size minus the prefix length. Fallback to at least one
639
+ # token to avoid zero or negative sizes.
640
+ chunk_body_limit = max(1, base_chunk_size - prefix_token_len)
641
+
642
+ # 4. Calculate how many chunks we will need with this body size.
643
+ num_chunks = math.ceil(len(all_token_ids) / chunk_body_limit)
644
+ group_id = str(_uuid.uuid4())
645
+
646
+ for i in range(num_chunks):
647
+ start_idx = i * chunk_body_limit
648
+ end_idx = start_idx + chunk_body_limit
649
+ chunk_token_ids = all_token_ids[start_idx:end_idx]
650
+
651
+ chunk_body = token_counter.decode(chunk_token_ids)
652
+
653
+ prefix = f"[chunk {i + 1}/{num_chunks} of a long message]\n"
654
+ new_body = prefix + chunk_body
655
+
656
+ if is_function_result and isinstance(
657
+ message, FunctionCallingMessage
658
+ ):
659
+ new_msg: BaseMessage = FunctionCallingMessage(
660
+ role_name=message.role_name,
661
+ role_type=message.role_type,
662
+ meta_dict=message.meta_dict,
663
+ content=message.content,
664
+ func_name=message.func_name,
665
+ args=message.args,
666
+ result=new_body,
667
+ tool_call_id=message.tool_call_id,
668
+ )
669
+ else:
670
+ new_msg = message.create_new_instance(new_body)
671
+
672
+ meta = (new_msg.meta_dict or {}).copy()
673
+ meta.update(
674
+ {
675
+ "chunk_idx": i + 1,
676
+ "chunk_total": num_chunks,
677
+ "chunk_group_id": group_id,
678
+ }
679
+ )
680
+ new_msg.meta_dict = meta
681
+
682
+ # Increment timestamp slightly to maintain order
683
+ _write_single_record(new_msg, role, base_ts + i * 1e-6)
684
+
536
685
  def load_memory(self, memory: AgentMemory) -> None:
537
686
  r"""Load the provided memory into the agent.
538
687
 
@@ -652,9 +801,19 @@ class ChatAgent(BaseAgent):
652
801
  r"""Initializes the stored messages list with the current system
653
802
  message.
654
803
  """
804
+ import time
805
+
655
806
  self.memory.clear()
807
+ # avoid UserWarning: The `ChatHistoryMemory` is empty.
656
808
  if self.system_message is not None:
657
- self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM)
809
+ self.memory.write_record(
810
+ MemoryRecord(
811
+ message=self.system_message,
812
+ role_at_backend=OpenAIBackendRole.SYSTEM,
813
+ timestamp=time.time_ns() / 1_000_000_000,
814
+ agent_id=self.agent_id,
815
+ )
816
+ )
658
817
 
659
818
  def record_message(self, message: BaseMessage) -> None:
660
819
  r"""Records the externally provided message into the agent memory as if
@@ -90,6 +90,7 @@ class VLLMConfig(BaseConfig):
90
90
  most likely tokens to return at each token position, each with an
91
91
  associated log probability. `logprobs` must be set to `true` if
92
92
  this parameter is used. (default: :obj:`None`)
93
+ extra_body: Add additional JSON properties to the request. (default: :obj:`None`)
93
94
  """
94
95
 
95
96
  temperature: Optional[float] = None # openai default: 1.0
@@ -105,6 +106,7 @@ class VLLMConfig(BaseConfig):
105
106
  user: Optional[str] = None
106
107
  logprobs: Optional[bool] = None
107
108
  top_logprobs: Optional[int] = None
109
+ extra_body: Optional[dict] = None
108
110
 
109
111
 
110
112
  VLLM_API_PARAMS = {param for param in VLLMConfig.model_fields.keys()}
@@ -116,7 +116,7 @@ class SelfImprovingCoTPipeline:
116
116
  samples to be drawn using the rejection sampling
117
117
  method, where samples are accepted or rejected based on
118
118
  a predefined condition to achieve a desired distribution.
119
- (default: :obj: `None`)
119
+ (default: :obj:`None`)
120
120
  evaluate_agent (Optional[ChatAgent]): The chat agent used for
121
121
  evaluating reasoning traces. (default: :obj:`None`)
122
122
  reward_model (BaseRewardModel, optional): Model used to evaluate
@@ -13,6 +13,13 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .models import Action, Environment, Observation, StepResult
15
15
  from .multi_step import MultiStepEnv
16
+ from .rlcards_env import (
17
+ ActionExtractor,
18
+ BlackjackEnv,
19
+ DoudizhuEnv,
20
+ LeducHoldemEnv,
21
+ RLCardsEnv,
22
+ )
16
23
  from .single_step import SingleStepEnv
17
24
  from .tic_tac_toe import Opponent, TicTacToeEnv
18
25
 
@@ -25,4 +32,9 @@ __all__ = [
25
32
  "StepResult",
26
33
  "TicTacToeEnv",
27
34
  "Opponent",
35
+ "RLCardsEnv",
36
+ "BlackjackEnv",
37
+ "LeducHoldemEnv",
38
+ "ActionExtractor",
39
+ "DoudizhuEnv",
28
40
  ]