camel-ai 0.2.72a5__py3-none-any.whl → 0.2.72a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.72a5'
17
+ __version__ = '0.2.72a6'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -2896,21 +2896,13 @@ class ChatAgent(BaseAgent):
2896
2896
  status_response
2897
2897
  ) in self._execute_tools_sync_with_status_accumulator(
2898
2898
  accumulated_tool_calls,
2899
- content_accumulator,
2900
- step_token_usage,
2901
2899
  tool_call_records,
2902
2900
  ):
2903
2901
  yield status_response
2904
2902
 
2905
- # Yield "Sending back result to model" status
2903
+ # Log sending status instead of adding to content
2906
2904
  if tool_call_records:
2907
- sending_status = self._create_tool_status_response_with_accumulator( # noqa: E501
2908
- content_accumulator,
2909
- "\n------\n\nSending back result to model\n\n",
2910
- "tool_sending",
2911
- step_token_usage,
2912
- )
2913
- yield sending_status
2905
+ logger.info("Sending back result to model")
2914
2906
 
2915
2907
  # Record final message only if we have content AND no tool
2916
2908
  # calls. If there are tool calls, _record_tool_calling
@@ -3008,15 +3000,13 @@ class ChatAgent(BaseAgent):
3008
3000
  def _execute_tools_sync_with_status_accumulator(
3009
3001
  self,
3010
3002
  accumulated_tool_calls: Dict[str, Any],
3011
- content_accumulator: StreamContentAccumulator,
3012
- step_token_usage: Dict[str, int],
3013
3003
  tool_call_records: List[ToolCallingRecord],
3014
3004
  ) -> Generator[ChatAgentResponse, None, None]:
3015
3005
  r"""Execute multiple tools synchronously with
3016
3006
  proper content accumulation, using threads+queue for
3017
3007
  non-blocking status streaming."""
3018
3008
 
3019
- def tool_worker(tool_func, args, result_queue, tool_call_data):
3009
+ def tool_worker(result_queue, tool_call_data):
3020
3010
  try:
3021
3011
  tool_call_record = self._execute_tool_from_stream_data(
3022
3012
  tool_call_data
@@ -3052,36 +3042,22 @@ class ChatAgent(BaseAgent):
3052
3042
  )
3053
3043
  thread.start()
3054
3044
 
3055
- status_message = (
3056
- f"\nCalling function: {function_name} "
3057
- f"with arguments:\n{args}\n"
3058
- )
3059
- status_status = self._create_tool_status_response_with_accumulator(
3060
- content_accumulator,
3061
- status_message,
3062
- "tool_calling",
3063
- step_token_usage,
3045
+ # Log debug info instead of adding to content
3046
+ logger.info(
3047
+ f"Calling function: {function_name} with arguments: {args}"
3064
3048
  )
3065
- yield status_status
3049
+
3066
3050
  # wait for tool thread to finish with optional timeout
3067
3051
  thread.join(self.tool_execution_timeout)
3068
3052
 
3069
3053
  # If timeout occurred, mark as error and continue
3070
3054
  if thread.is_alive():
3071
- timeout_msg = (
3072
- f"\nFunction '{function_name}' timed out after "
3073
- f"{self.tool_execution_timeout} seconds.\n---------\n"
3074
- )
3075
- timeout_status = (
3076
- self._create_tool_status_response_with_accumulator(
3077
- content_accumulator,
3078
- timeout_msg,
3079
- "tool_timeout",
3080
- step_token_usage,
3081
- )
3055
+ # Log timeout info instead of adding to content
3056
+ logger.warning(
3057
+ f"Function '{function_name}' timed out after "
3058
+ f"{self.tool_execution_timeout} seconds"
3082
3059
  )
3083
- yield timeout_status
3084
- logger.error(timeout_msg.strip())
3060
+
3085
3061
  # Detach thread (it may still finish later). Skip recording.
3086
3062
  continue
3087
3063
 
@@ -3091,23 +3067,17 @@ class ChatAgent(BaseAgent):
3091
3067
  tool_call_records.append(tool_call_record)
3092
3068
  raw_result = tool_call_record.result
3093
3069
  result_str = str(raw_result)
3094
- status_message = (
3095
- f"\nFunction output: {result_str}\n---------\n"
3096
- )
3097
- output_status = (
3098
- self._create_tool_status_response_with_accumulator(
3099
- content_accumulator,
3100
- status_message,
3101
- "tool_output",
3102
- step_token_usage,
3103
- [tool_call_record],
3104
- )
3105
- )
3106
- yield output_status
3070
+
3071
+ # Log debug info instead of adding to content
3072
+ logger.info(f"Function output: {result_str}")
3107
3073
  else:
3108
3074
  # Error already logged
3109
3075
  continue
3110
3076
 
3077
+ # Ensure this function remains a generator (required by type signature)
3078
+ return
3079
+ yield # This line is never reached but makes this a generator function
3080
+
3111
3081
  def _execute_tool_from_stream_data(
3112
3082
  self, tool_call_data: Dict[str, Any]
3113
3083
  ) -> Optional[ToolCallingRecord]:
@@ -3614,15 +3584,9 @@ class ChatAgent(BaseAgent):
3614
3584
  ):
3615
3585
  yield status_response
3616
3586
 
3617
- # Yield "Sending back result to model" status
3587
+ # Log sending status instead of adding to content
3618
3588
  if tool_call_records:
3619
- sending_status = self._create_tool_status_response_with_accumulator( # noqa: E501
3620
- content_accumulator,
3621
- "\n------\n\nSending back result to model\n\n",
3622
- "tool_sending",
3623
- step_token_usage,
3624
- )
3625
- yield sending_status
3589
+ logger.info("Sending back result to model")
3626
3590
 
3627
3591
  # Record final message only if we have content AND no tool
3628
3592
  # calls. If there are tool calls, _record_tool_calling
@@ -3669,21 +3633,10 @@ class ChatAgent(BaseAgent):
3669
3633
  except json.JSONDecodeError:
3670
3634
  args = tool_call_data['function']['arguments']
3671
3635
 
3672
- status_message = (
3673
- f"\nCalling function: {function_name} "
3674
- f"with arguments:\n{args}\n"
3675
- )
3676
-
3677
- # Immediately yield "Calling function" status
3678
- calling_status = (
3679
- self._create_tool_status_response_with_accumulator(
3680
- content_accumulator,
3681
- status_message,
3682
- "tool_calling",
3683
- step_token_usage,
3684
- )
3636
+ # Log debug info instead of adding to content
3637
+ logger.info(
3638
+ f"Calling function: {function_name} with arguments: {args}"
3685
3639
  )
3686
- yield calling_status
3687
3640
 
3688
3641
  # Start tool execution asynchronously (non-blocking)
3689
3642
  if self.tool_execution_timeout is not None:
@@ -3716,80 +3669,25 @@ class ChatAgent(BaseAgent):
3716
3669
  # Create output status message
3717
3670
  raw_result = tool_call_record.result
3718
3671
  result_str = str(raw_result)
3719
- status_message = (
3720
- f"\nFunction output: {result_str}\n---------\n"
3721
- )
3722
3672
 
3723
- # Yield "Function output" status as soon as this
3724
- # tool completes
3725
- output_status = (
3726
- self._create_tool_status_response_with_accumulator(
3727
- content_accumulator,
3728
- status_message,
3729
- "tool_output",
3730
- step_token_usage,
3731
- [tool_call_record],
3732
- )
3733
- )
3734
- yield output_status
3673
+ # Log debug info instead of adding to content
3674
+ logger.info(f"Function output: {result_str}")
3735
3675
 
3736
3676
  except Exception as e:
3737
3677
  if isinstance(e, asyncio.TimeoutError):
3738
- timeout_msg = (
3739
- f"\nFunction timed out after "
3740
- f"{self.tool_execution_timeout} seconds.\n"
3741
- f"---------\n"
3742
- )
3743
- timeout_status = (
3744
- self._create_tool_status_response_with_accumulator(
3745
- content_accumulator,
3746
- timeout_msg,
3747
- "tool_timeout",
3748
- step_token_usage,
3749
- )
3678
+ # Log timeout info instead of adding to content
3679
+ logger.warning(
3680
+ f"Function timed out after "
3681
+ f"{self.tool_execution_timeout} seconds"
3750
3682
  )
3751
- yield timeout_status
3752
- logger.error("Async tool execution timeout")
3753
3683
  else:
3754
3684
  logger.error(f"Error in async tool execution: {e}")
3755
3685
  continue
3756
3686
 
3757
- def _create_tool_status_response_with_accumulator(
3758
- self,
3759
- accumulator: StreamContentAccumulator,
3760
- status_message: str,
3761
- status_type: str,
3762
- step_token_usage: Dict[str, int],
3763
- tool_calls: Optional[List[ToolCallingRecord]] = None,
3764
- ) -> ChatAgentResponse:
3765
- r"""Create a tool status response using content accumulator."""
3766
-
3767
- # Add this status message to accumulator and get full content
3768
- accumulator.add_tool_status(status_message)
3769
- full_content = accumulator.get_full_content()
3770
-
3771
- message = BaseMessage(
3772
- role_name=self.role_name,
3773
- role_type=self.role_type,
3774
- meta_dict={},
3775
- content=full_content,
3776
- )
3777
-
3778
- return ChatAgentResponse(
3779
- msgs=[message],
3780
- terminated=False,
3781
- info={
3782
- "id": "",
3783
- "usage": step_token_usage.copy(),
3784
- "finish_reasons": [status_type],
3785
- "num_tokens": self._get_token_count(full_content),
3786
- "tool_calls": tool_calls or [],
3787
- "external_tool_requests": None,
3788
- "streaming": True,
3789
- "tool_status": status_type,
3790
- "partial": True,
3791
- },
3792
- )
3687
+ # Ensure this function remains an async generator
3688
+ return
3689
+ # This line is never reached but makes this an async generator function
3690
+ yield
3793
3691
 
3794
3692
  def _create_streaming_response_with_accumulator(
3795
3693
  self,
@@ -243,7 +243,7 @@ class GeminiModel(OpenAICompatibleModel):
243
243
  function_dict = tool.get('function', {})
244
244
  function_dict.pop("strict", None)
245
245
 
246
- # Process parameters to remove anyOf
246
+ # Process parameters to remove anyOf and handle enum/format
247
247
  if 'parameters' in function_dict:
248
248
  params = function_dict['parameters']
249
249
  if 'properties' in params:
@@ -260,6 +260,20 @@ class GeminiModel(OpenAICompatibleModel):
260
260
  'description'
261
261
  ] = prop_value['description']
262
262
 
263
+ # Handle enum and format restrictions for Gemini
264
+ # API enum: only allowed for string type
265
+ if prop_value.get('type') != 'string':
266
+ prop_value.pop('enum', None)
267
+
268
+ # format: only allowed for string, integer, and
269
+ # number types
270
+ if prop_value.get('type') not in [
271
+ 'string',
272
+ 'integer',
273
+ 'number',
274
+ ]:
275
+ prop_value.pop('format', None)
276
+
263
277
  request_config["tools"] = tools
264
278
 
265
279
  return self._client.chat.completions.create(
@@ -283,7 +297,7 @@ class GeminiModel(OpenAICompatibleModel):
283
297
  function_dict = tool.get('function', {})
284
298
  function_dict.pop("strict", None)
285
299
 
286
- # Process parameters to remove anyOf
300
+ # Process parameters to remove anyOf and handle enum/format
287
301
  if 'parameters' in function_dict:
288
302
  params = function_dict['parameters']
289
303
  if 'properties' in params:
@@ -300,6 +314,20 @@ class GeminiModel(OpenAICompatibleModel):
300
314
  'description'
301
315
  ] = prop_value['description']
302
316
 
317
+ # Handle enum and format restrictions for Gemini
318
+ # API enum: only allowed for string type
319
+ if prop_value.get('type') != 'string':
320
+ prop_value.pop('enum', None)
321
+
322
+ # format: only allowed for string, integer, and
323
+ # number types
324
+ if prop_value.get('type') not in [
325
+ 'string',
326
+ 'integer',
327
+ 'number',
328
+ ]:
329
+ prop_value.pop('format', None)
330
+
303
331
  request_config["tools"] = tools
304
332
 
305
333
  return await self._async_client.chat.completions.create(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: camel-ai
3
- Version: 0.2.72a5
3
+ Version: 0.2.72a6
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Project-URL: Homepage, https://www.camel-ai.org/
6
6
  Project-URL: Repository, https://github.com/camel-ai/camel
@@ -1,4 +1,4 @@
1
- camel/__init__.py,sha256=1CBJAF1mkpASoGuIu5or-PNvl4ZJZ11EMC1iSaJPBek,901
1
+ camel/__init__.py,sha256=yyrDoeZVKnsVRSVn6NlNEv--B8opSk3W-4W__oZRROs,901
2
2
  camel/generators.py,sha256=JRqj9_m1PF4qT6UtybzTQ-KBT9MJQt18OAAYvQ_fr2o,13844
3
3
  camel/human.py,sha256=Xg8x1cS5KK4bQ1SDByiHZnzsRpvRP-KZViNvmu38xo4,5475
4
4
  camel/logger.py,sha256=WgEwael_eT6D-lVAKHpKIpwXSTjvLbny5jbV1Ab8lnA,5760
@@ -7,7 +7,7 @@ camel/agents/__init__.py,sha256=64weKqdvmpZcGWyVkO-OKASAmVUdrQjv60JApgPk_SA,1644
7
7
  camel/agents/_types.py,sha256=MeFZzay2kJA6evALQ-MbBTKW-0lu_0wBuKsxzH_4gWI,1552
8
8
  camel/agents/_utils.py,sha256=AR7Qqgbkmn4X2edYUQf1rdksGUyV5hm3iK1z-Dn0Mcg,6266
9
9
  camel/agents/base.py,sha256=c4bJYL3G3Z41SaFdMPMn8ZjLdFiFaVOFO6EQIfuCVR8,1124
10
- camel/agents/chat_agent.py,sha256=IAd0uY42DVsP8rV6XcgTO3G_iUREGI0NAq7KqFnZPHY,162638
10
+ camel/agents/chat_agent.py,sha256=iA_VXclenw5EDLjsabP7XKrbb-9Ofvg7VydaBY7NtwY,158402
11
11
  camel/agents/critic_agent.py,sha256=L6cTbYjyZB0DCa51tQ6LZLA6my8kHLC4nktHySH78H4,10433
12
12
  camel/agents/deductive_reasoner_agent.py,sha256=6BZGaq1hR6hKJuQtOfoYQnk_AkZpw_Mr7mUy2MspQgs,13540
13
13
  camel/agents/embodied_agent.py,sha256=XBxBu5ZMmSJ4B2U3Z7SMwvLlgp6yNpaBe8HNQmY9CZA,7536
@@ -184,7 +184,7 @@ camel/models/cohere_model.py,sha256=6KvYnYxoBZ8kLMmRp5wcJisY65KhCTl6kiG1W3xOkJQ,
184
184
  camel/models/crynux_model.py,sha256=leEiFz1RcRtVJLJvYt_Ydyg5jkSk3VEJphgmHFz67bw,4118
185
185
  camel/models/deepseek_model.py,sha256=MXOdmpFQFbxpgh5DiTETzx3qDFUkKiFjnEjTMDeWk2k,11015
186
186
  camel/models/fish_audio_model.py,sha256=RCwORRIdCbjZXWWjjctpksPI2DnS0b68JjxunHBQ1xk,5981
187
- camel/models/gemini_model.py,sha256=DuOLXojfAPwIAswIz9_B6Qa9GDaGalzr1k4Pdw_TFvI,12827
187
+ camel/models/gemini_model.py,sha256=tXElVatzgk66NirfzXxaYiene-3Iv26VKrvF2DAv_1Y,14231
188
188
  camel/models/groq_model.py,sha256=TK3zlvCy1-nYqLJrVSfIcJGKm5hKVp_k91cFcI420N0,4091
189
189
  camel/models/internlm_model.py,sha256=sbLEttl3PTaNlSU-5N047TQLe2-DQHDyFD86I26KbPQ,4691
190
190
  camel/models/litellm_model.py,sha256=IEYVfzk1u32oTkwSHPTvoSojLG1aai3Caw8ri5lzF8Y,8485
@@ -447,7 +447,7 @@ camel/verifiers/math_verifier.py,sha256=tA1D4S0sm8nsWISevxSN0hvSVtIUpqmJhzqfbuMo
447
447
  camel/verifiers/models.py,sha256=GdxYPr7UxNrR1577yW4kyroRcLGfd-H1GXgv8potDWU,2471
448
448
  camel/verifiers/physics_verifier.py,sha256=c1grrRddcrVN7szkxhv2QirwY9viIRSITWeWFF5HmLs,30187
449
449
  camel/verifiers/python_verifier.py,sha256=ogTz77wODfEcDN4tMVtiSkRQyoiZbHPY2fKybn59lHw,20558
450
- camel_ai-0.2.72a5.dist-info/METADATA,sha256=Wz9MWyKAIycL-cbi72RKL7fa0PGkah967IzH03-ag_o,50002
451
- camel_ai-0.2.72a5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
452
- camel_ai-0.2.72a5.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
453
- camel_ai-0.2.72a5.dist-info/RECORD,,
450
+ camel_ai-0.2.72a6.dist-info/METADATA,sha256=Zl8v1N6JRu8P11p3eRCvN2Pu9tU3lF8O9DaLLdOPzrk,50002
451
+ camel_ai-0.2.72a6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
452
+ camel_ai-0.2.72a6.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
453
+ camel_ai-0.2.72a6.dist-info/RECORD,,