camel-ai 0.2.76a3__py3-none-any.whl → 0.2.76a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (36) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +276 -21
  3. camel/configs/__init__.py +3 -0
  4. camel/configs/cometapi_config.py +104 -0
  5. camel/interpreters/docker/Dockerfile +3 -12
  6. camel/memories/blocks/chat_history_block.py +4 -1
  7. camel/memories/records.py +52 -8
  8. camel/messages/base.py +1 -1
  9. camel/models/__init__.py +2 -0
  10. camel/models/cometapi_model.py +83 -0
  11. camel/models/model_factory.py +2 -0
  12. camel/retrievers/auto_retriever.py +1 -0
  13. camel/societies/workforce/workforce.py +60 -9
  14. camel/storages/key_value_storages/json.py +15 -2
  15. camel/storages/vectordb_storages/tidb.py +8 -6
  16. camel/toolkits/__init__.py +4 -0
  17. camel/toolkits/dingtalk.py +1135 -0
  18. camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
  19. camel/toolkits/google_drive_mcp_toolkit.py +12 -31
  20. camel/toolkits/message_integration.py +3 -0
  21. camel/toolkits/notion_mcp_toolkit.py +16 -26
  22. camel/toolkits/origene_mcp_toolkit.py +8 -49
  23. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  24. camel/toolkits/resend_toolkit.py +168 -0
  25. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  26. camel/toolkits/terminal_toolkit/terminal_toolkit.py +909 -0
  27. camel/toolkits/terminal_toolkit/utils.py +580 -0
  28. camel/types/enums.py +109 -0
  29. camel/types/unified_model_type.py +5 -0
  30. camel/utils/commons.py +2 -0
  31. camel/utils/context_utils.py +52 -0
  32. {camel_ai-0.2.76a3.dist-info → camel_ai-0.2.76a5.dist-info}/METADATA +25 -6
  33. {camel_ai-0.2.76a3.dist-info → camel_ai-0.2.76a5.dist-info}/RECORD +35 -29
  34. camel/toolkits/terminal_toolkit.py +0 -1798
  35. {camel_ai-0.2.76a3.dist-info → camel_ai-0.2.76a5.dist-info}/WHEEL +0 -0
  36. {camel_ai-0.2.76a3.dist-info → camel_ai-0.2.76a5.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.76a3'
17
+ __version__ = '0.2.76a5'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -23,6 +23,7 @@ import textwrap
23
23
  import threading
24
24
  import time
25
25
  import uuid
26
+ from datetime import datetime
26
27
  from pathlib import Path
27
28
  from typing import (
28
29
  TYPE_CHECKING,
@@ -93,6 +94,7 @@ from camel.utils import (
93
94
  model_from_json_schema,
94
95
  )
95
96
  from camel.utils.commons import dependencies_required
97
+ from camel.utils.context_utils import ContextUtility
96
98
 
97
99
  if TYPE_CHECKING:
98
100
  from camel.terminators import ResponseTerminator
@@ -142,7 +144,7 @@ class StreamContentAccumulator:
142
144
 
143
145
  def __init__(self):
144
146
  self.base_content = "" # Content before tool calls
145
- self.current_content = "" # Current streaming content
147
+ self.current_content = [] # Accumulated streaming fragments
146
148
  self.tool_status_messages = [] # Accumulated tool status messages
147
149
 
148
150
  def set_base_content(self, content: str):
@@ -151,7 +153,7 @@ class StreamContentAccumulator:
151
153
 
152
154
  def add_streaming_content(self, new_content: str):
153
155
  r"""Add new streaming content."""
154
- self.current_content += new_content
156
+ self.current_content.append(new_content)
155
157
 
156
158
  def add_tool_status(self, status_message: str):
157
159
  r"""Add a tool status message."""
@@ -160,16 +162,18 @@ class StreamContentAccumulator:
160
162
  def get_full_content(self) -> str:
161
163
  r"""Get the complete accumulated content."""
162
164
  tool_messages = "".join(self.tool_status_messages)
163
- return self.base_content + tool_messages + self.current_content
165
+ current = "".join(self.current_content)
166
+ return self.base_content + tool_messages + current
164
167
 
165
168
  def get_content_with_new_status(self, status_message: str) -> str:
166
169
  r"""Get content with a new status message appended."""
167
170
  tool_messages = "".join([*self.tool_status_messages, status_message])
168
- return self.base_content + tool_messages + self.current_content
171
+ current = "".join(self.current_content)
172
+ return self.base_content + tool_messages + current
169
173
 
170
174
  def reset_streaming_content(self):
171
175
  r"""Reset only the streaming content, keep base and tool status."""
172
- self.current_content = ""
176
+ self.current_content = []
173
177
 
174
178
 
175
179
  class StreamingChatAgentResponse:
@@ -397,6 +401,10 @@ class ChatAgent(BaseAgent):
397
401
  step_timeout (Optional[float], optional): Timeout in seconds for the
398
402
  entire step operation. If None, no timeout is applied.
399
403
  (default: :obj:`None`)
404
+ stream_accumulate (bool, optional): When True, partial streaming
405
+ updates return accumulated content (current behavior). When False,
406
+ partial updates return only the incremental delta. (default:
407
+ :obj:`True`)
400
408
  """
401
409
 
402
410
  def __init__(
@@ -440,6 +448,7 @@ class ChatAgent(BaseAgent):
440
448
  retry_attempts: int = 3,
441
449
  retry_delay: float = 1.0,
442
450
  step_timeout: Optional[float] = None,
451
+ stream_accumulate: bool = True,
443
452
  ) -> None:
444
453
  if isinstance(model, ModelManager):
445
454
  self.model_backend = model
@@ -528,6 +537,9 @@ class ChatAgent(BaseAgent):
528
537
  self.retry_attempts = max(1, retry_attempts)
529
538
  self.retry_delay = max(0.0, retry_delay)
530
539
  self.step_timeout = step_timeout
540
+ self._context_utility: Optional[ContextUtility] = None
541
+ self._context_summary_agent: Optional["ChatAgent"] = None
542
+ self.stream_accumulate = stream_accumulate
531
543
 
532
544
  def reset(self):
533
545
  r"""Resets the :obj:`ChatAgent` to its initial state."""
@@ -1034,6 +1046,163 @@ class ChatAgent(BaseAgent):
1034
1046
  json_store.save(to_save)
1035
1047
  logger.info(f"Memory saved to {path}")
1036
1048
 
1049
+ def summarize(
1050
+ self,
1051
+ filename: Optional[str] = None,
1052
+ summary_prompt: Optional[str] = None,
1053
+ working_directory: Optional[Union[str, Path]] = None,
1054
+ ) -> Dict[str, Any]:
1055
+ r"""Summarize the agent's current conversation context and persist it
1056
+ to a markdown file.
1057
+
1058
+ Args:
1059
+ filename (Optional[str]): The base filename (without extension) to
1060
+ use for the markdown file. Defaults to a timestamped name when
1061
+ not provided.
1062
+ summary_prompt (Optional[str]): Custom prompt for the summarizer.
1063
+ When omitted, a default prompt highlighting key decisions,
1064
+ action items, and open questions is used.
1065
+ working_directory (Optional[str|Path]): Optional directory to save
1066
+ the markdown summary file. If provided, overrides the default
1067
+ directory used by ContextUtility.
1068
+
1069
+ Returns:
1070
+ Dict[str, Any]: A dictionary containing the summary text, file
1071
+ path, and status message.
1072
+ """
1073
+
1074
+ result: Dict[str, Any] = {
1075
+ "summary": "",
1076
+ "file_path": None,
1077
+ "status": "",
1078
+ }
1079
+
1080
+ try:
1081
+ if self._context_utility is None:
1082
+ if working_directory is not None:
1083
+ self._context_utility = ContextUtility(
1084
+ working_directory=str(working_directory)
1085
+ )
1086
+ else:
1087
+ self._context_utility = ContextUtility()
1088
+
1089
+ # Get conversation directly from agent's memory
1090
+ messages, _ = self.memory.get_context()
1091
+
1092
+ if not messages:
1093
+ status_message = (
1094
+ "No conversation context available to summarize."
1095
+ )
1096
+ result["status"] = status_message
1097
+ return result
1098
+
1099
+ # Convert messages to conversation text
1100
+ conversation_lines = []
1101
+ for message in messages:
1102
+ role = message.get('role', 'unknown')
1103
+ content = message.get('content', '')
1104
+ if content:
1105
+ conversation_lines.append(f"{role}: {content}")
1106
+
1107
+ conversation_text = "\n".join(conversation_lines).strip()
1108
+
1109
+ if not conversation_text:
1110
+ status_message = (
1111
+ "Conversation context is empty; skipping summary."
1112
+ )
1113
+ result["status"] = status_message
1114
+ return result
1115
+
1116
+ if self._context_summary_agent is None:
1117
+ self._context_summary_agent = ChatAgent(
1118
+ system_message=(
1119
+ "You are a helpful assistant that summarizes "
1120
+ "conversations into concise markdown bullet lists."
1121
+ ),
1122
+ model=self.model_backend,
1123
+ agent_id=f"{self.agent_id}_context_summarizer",
1124
+ )
1125
+ else:
1126
+ self._context_summary_agent.reset()
1127
+
1128
+ if summary_prompt:
1129
+ prompt_text = (
1130
+ f"{summary_prompt.rstrip()}\n\n"
1131
+ f"Context information:\n{conversation_text}"
1132
+ )
1133
+ else:
1134
+ prompt_text = (
1135
+ "Summarize the context information in concise markdown "
1136
+ "bullet points highlighting key decisions, action items.\n"
1137
+ f"Context information:\n{conversation_text}"
1138
+ )
1139
+
1140
+ try:
1141
+ response = self._context_summary_agent.step(prompt_text)
1142
+ except Exception as step_exc:
1143
+ error_message = (
1144
+ f"Failed to generate summary using model: {step_exc}"
1145
+ )
1146
+ logger.error(error_message)
1147
+ result["status"] = error_message
1148
+ return result
1149
+
1150
+ if not response.msgs:
1151
+ status_message = (
1152
+ "Failed to generate summary from model response."
1153
+ )
1154
+ result["status"] = status_message
1155
+ return result
1156
+
1157
+ summary_content = response.msgs[-1].content.strip()
1158
+ if not summary_content:
1159
+ status_message = "Generated summary is empty."
1160
+ result["status"] = status_message
1161
+ return result
1162
+
1163
+ base_filename = (
1164
+ filename
1165
+ if filename
1166
+ else f"context_summary_{datetime.now().strftime('%Y%m%d_%H%M%S')}" # noqa: E501
1167
+ )
1168
+ base_filename = Path(base_filename).with_suffix("").name
1169
+
1170
+ metadata = self._context_utility.get_session_metadata()
1171
+ metadata.update(
1172
+ {
1173
+ "agent_id": self.agent_id,
1174
+ "message_count": len(messages),
1175
+ }
1176
+ )
1177
+
1178
+ save_status = self._context_utility.save_markdown_file(
1179
+ base_filename,
1180
+ summary_content,
1181
+ title="Conversation Summary",
1182
+ metadata=metadata,
1183
+ )
1184
+
1185
+ file_path = (
1186
+ self._context_utility.get_working_directory()
1187
+ / f"{base_filename}.md"
1188
+ )
1189
+
1190
+ result.update(
1191
+ {
1192
+ "summary": summary_content,
1193
+ "file_path": str(file_path),
1194
+ "status": save_status,
1195
+ }
1196
+ )
1197
+ logger.info("Conversation summary saved to %s", file_path)
1198
+ return result
1199
+
1200
+ except Exception as exc:
1201
+ error_message = f"Failed to summarize conversation context: {exc}"
1202
+ logger.error(error_message)
1203
+ result["status"] = error_message
1204
+ return result
1205
+
1037
1206
  def clear_memory(self) -> None:
1038
1207
  r"""Clear the agent's memory and reset to initial state.
1039
1208
 
@@ -1239,6 +1408,35 @@ class ChatAgent(BaseAgent):
1239
1408
  # and True to indicate we used prompt formatting
1240
1409
  return modified_message, None, True
1241
1410
 
1411
+ def _is_called_from_registered_toolkit(self) -> bool:
1412
+ r"""Check if current step/astep call originates from a
1413
+ RegisteredAgentToolkit.
1414
+
1415
+ This method uses stack inspection to detect if the current call
1416
+ is originating from a toolkit that inherits from
1417
+ RegisteredAgentToolkit. When detected, tools should be disabled to
1418
+ prevent recursive calls.
1419
+
1420
+ Returns:
1421
+ bool: True if called from a RegisteredAgentToolkit, False otherwise
1422
+ """
1423
+ import inspect
1424
+
1425
+ from camel.toolkits.base import RegisteredAgentToolkit
1426
+
1427
+ try:
1428
+ for frame_info in inspect.stack():
1429
+ frame_locals = frame_info.frame.f_locals
1430
+ if 'self' in frame_locals:
1431
+ caller_self = frame_locals['self']
1432
+ if isinstance(caller_self, RegisteredAgentToolkit):
1433
+ return True
1434
+
1435
+ except Exception:
1436
+ return False
1437
+
1438
+ return False
1439
+
1242
1440
  def _apply_prompt_based_parsing(
1243
1441
  self,
1244
1442
  response: ModelResponse,
@@ -1432,6 +1630,10 @@ class ChatAgent(BaseAgent):
1432
1630
  except ImportError:
1433
1631
  pass # Langfuse not available
1434
1632
 
1633
+ # Check if this call is from a RegisteredAgentToolkit to prevent tool
1634
+ # use
1635
+ disable_tools = self._is_called_from_registered_toolkit()
1636
+
1435
1637
  # Handle response format compatibility with non-strict tools
1436
1638
  original_response_format = response_format
1437
1639
  input_message, response_format, used_prompt_formatting = (
@@ -1479,7 +1681,9 @@ class ChatAgent(BaseAgent):
1479
1681
  num_tokens=num_tokens,
1480
1682
  current_iteration=iteration_count,
1481
1683
  response_format=response_format,
1482
- tool_schemas=self._get_full_tool_schemas(),
1684
+ tool_schemas=[]
1685
+ if disable_tools
1686
+ else self._get_full_tool_schemas(),
1483
1687
  prev_num_openai_messages=prev_num_openai_messages,
1484
1688
  )
1485
1689
  prev_num_openai_messages = len(openai_messages)
@@ -1644,6 +1848,10 @@ class ChatAgent(BaseAgent):
1644
1848
  except ImportError:
1645
1849
  pass # Langfuse not available
1646
1850
 
1851
+ # Check if this call is from a RegisteredAgentToolkit to prevent tool
1852
+ # use
1853
+ disable_tools = self._is_called_from_registered_toolkit()
1854
+
1647
1855
  # Handle response format compatibility with non-strict tools
1648
1856
  original_response_format = response_format
1649
1857
  input_message, response_format, used_prompt_formatting = (
@@ -1679,13 +1887,14 @@ class ChatAgent(BaseAgent):
1679
1887
  return self._step_terminate(
1680
1888
  e.args[1], tool_call_records, "max_tokens_exceeded"
1681
1889
  )
1682
-
1683
1890
  response = await self._aget_model_response(
1684
1891
  openai_messages,
1685
1892
  num_tokens=num_tokens,
1686
1893
  current_iteration=iteration_count,
1687
1894
  response_format=response_format,
1688
- tool_schemas=self._get_full_tool_schemas(),
1895
+ tool_schemas=[]
1896
+ if disable_tools
1897
+ else self._get_full_tool_schemas(),
1689
1898
  prev_num_openai_messages=prev_num_openai_messages,
1690
1899
  )
1691
1900
  prev_num_openai_messages = len(openai_messages)
@@ -3668,15 +3877,18 @@ class ChatAgent(BaseAgent):
3668
3877
  ) -> ChatAgentResponse:
3669
3878
  r"""Create a streaming response using content accumulator."""
3670
3879
 
3671
- # Add new content to accumulator and get full content
3880
+ # Add new content; only build full content when needed
3672
3881
  accumulator.add_streaming_content(new_content)
3673
- full_content = accumulator.get_full_content()
3882
+ if self.stream_accumulate:
3883
+ message_content = accumulator.get_full_content()
3884
+ else:
3885
+ message_content = new_content
3674
3886
 
3675
3887
  message = BaseMessage(
3676
3888
  role_name=self.role_name,
3677
3889
  role_type=self.role_type,
3678
3890
  meta_dict={},
3679
- content=full_content,
3891
+ content=message_content,
3680
3892
  )
3681
3893
 
3682
3894
  return ChatAgentResponse(
@@ -3686,7 +3898,7 @@ class ChatAgent(BaseAgent):
3686
3898
  "id": response_id,
3687
3899
  "usage": step_token_usage.copy(),
3688
3900
  "finish_reasons": ["streaming"],
3689
- "num_tokens": self._get_token_count(full_content),
3901
+ "num_tokens": self._get_token_count(message_content),
3690
3902
  "tool_calls": tool_call_records or [],
3691
3903
  "external_tool_requests": None,
3692
3904
  "streaming": True,
@@ -3759,7 +3971,7 @@ class ChatAgent(BaseAgent):
3759
3971
  self.memory.get_context_creator(), "token_limit", None
3760
3972
  ),
3761
3973
  output_language=self._output_language,
3762
- tools=cloned_tools,
3974
+ tools=cast(List[Union[FunctionTool, Callable]], cloned_tools),
3763
3975
  toolkits_to_register_agent=toolkits_to_register,
3764
3976
  external_tools=[
3765
3977
  schema for schema in self._external_tool_schemas.values()
@@ -3773,6 +3985,7 @@ class ChatAgent(BaseAgent):
3773
3985
  tool_execution_timeout=self.tool_execution_timeout,
3774
3986
  pause_event=self.pause_event,
3775
3987
  prune_tool_calls_from_memory=self.prune_tool_calls_from_memory,
3988
+ stream_accumulate=self.stream_accumulate,
3776
3989
  )
3777
3990
 
3778
3991
  # Copy memory if requested
@@ -3787,9 +4000,7 @@ class ChatAgent(BaseAgent):
3787
4000
 
3788
4001
  def _clone_tools(
3789
4002
  self,
3790
- ) -> Tuple[
3791
- List[Union[FunctionTool, Callable]], List[RegisteredAgentToolkit]
3792
- ]:
4003
+ ) -> Tuple[List[FunctionTool], List[RegisteredAgentToolkit]]:
3793
4004
  r"""Clone tools and return toolkits that need agent registration.
3794
4005
 
3795
4006
  This method handles stateful toolkits by cloning them if they have
@@ -3841,18 +4052,62 @@ class ChatAgent(BaseAgent):
3841
4052
  # Toolkit doesn't support cloning, use original
3842
4053
  cloned_toolkits[toolkit_id] = toolkit_instance
3843
4054
 
4055
+ if getattr(
4056
+ tool.func, "__message_integration_enhanced__", False
4057
+ ):
4058
+ cloned_tools.append(
4059
+ FunctionTool(
4060
+ func=tool.func,
4061
+ openai_tool_schema=tool.get_openai_tool_schema(),
4062
+ )
4063
+ )
4064
+ continue
4065
+
3844
4066
  # Get the method from the cloned (or original) toolkit
3845
4067
  toolkit = cloned_toolkits[toolkit_id]
3846
4068
  method_name = tool.func.__name__
4069
+
3847
4070
  if hasattr(toolkit, method_name):
3848
4071
  new_method = getattr(toolkit, method_name)
3849
- cloned_tools.append(new_method)
4072
+ # Wrap cloned method into a new FunctionTool,
4073
+ # preserving schema
4074
+ try:
4075
+ new_tool = FunctionTool(
4076
+ func=new_method,
4077
+ openai_tool_schema=tool.get_openai_tool_schema(),
4078
+ )
4079
+ cloned_tools.append(new_tool)
4080
+ except Exception as e:
4081
+ # If wrapping fails, fallback to wrapping the original
4082
+ # function with its schema to maintain consistency
4083
+ logger.warning(
4084
+ f"Failed to wrap cloned toolkit "
4085
+ f"method '{method_name}' "
4086
+ f"with FunctionTool: {e}. Using original "
4087
+ f"function with preserved schema instead."
4088
+ )
4089
+ cloned_tools.append(
4090
+ FunctionTool(
4091
+ func=tool.func,
4092
+ openai_tool_schema=tool.get_openai_tool_schema(),
4093
+ )
4094
+ )
3850
4095
  else:
3851
- # Fallback to original function
3852
- cloned_tools.append(tool.func)
4096
+ # Fallback to original function wrapped in FunctionTool
4097
+ cloned_tools.append(
4098
+ FunctionTool(
4099
+ func=tool.func,
4100
+ openai_tool_schema=tool.get_openai_tool_schema(),
4101
+ )
4102
+ )
3853
4103
  else:
3854
- # Not a toolkit method, just use the original function
3855
- cloned_tools.append(tool.func)
4104
+ # Not a toolkit method, preserve FunctionTool schema directly
4105
+ cloned_tools.append(
4106
+ FunctionTool(
4107
+ func=tool.func,
4108
+ openai_tool_schema=tool.get_openai_tool_schema(),
4109
+ )
4110
+ )
3856
4111
 
3857
4112
  return cloned_tools, toolkits_to_register
3858
4113
 
camel/configs/__init__.py CHANGED
@@ -17,6 +17,7 @@ from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
17
17
  from .base_config import BaseConfig
18
18
  from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
19
19
  from .cohere_config import COHERE_API_PARAMS, CohereConfig
20
+ from .cometapi_config import COMETAPI_API_PARAMS, CometAPIConfig
20
21
  from .crynux_config import CRYNUX_API_PARAMS, CrynuxConfig
21
22
  from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
22
23
  from .gemini_config import Gemini_API_PARAMS, GeminiConfig
@@ -90,6 +91,8 @@ __all__ = [
90
91
  'TOGETHERAI_API_PARAMS',
91
92
  'CohereConfig',
92
93
  'COHERE_API_PARAMS',
94
+ 'CometAPIConfig',
95
+ 'COMETAPI_API_PARAMS',
93
96
  'YiConfig',
94
97
  'YI_API_PARAMS',
95
98
  'QwenConfig',
@@ -0,0 +1,104 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from __future__ import annotations
15
+
16
+ from typing import Optional, Sequence, Union
17
+
18
+ from camel.configs.base_config import BaseConfig
19
+
20
+
21
+ class CometAPIConfig(BaseConfig):
22
+ r"""Defines the parameters for generating chat completions using CometAPI's
23
+ OpenAI-compatible interface.
24
+
25
+ Reference: https://api.cometapi.com/v1/
26
+
27
+ Args:
28
+ temperature (float, optional): Sampling temperature to use, between
29
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
30
+ while lower values make it more focused and deterministic.
31
+ (default: :obj:`None`)
32
+ top_p (float, optional): An alternative to sampling with temperature,
33
+ called nucleus sampling, where the model considers the results of
34
+ the tokens with top_p probability mass. So :obj:`0.1` means only
35
+ the tokens comprising the top 10% probability mass are considered.
36
+ (default: :obj:`None`)
37
+ n (int, optional): How many chat completion choices to generate for
38
+ each input message. (default: :obj:`None`)
39
+ response_format (object, optional): An object specifying the format
40
+ that the model must output. Compatible with GPT-4 Turbo and all
41
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
42
+ {"type": "json_object"} enables JSON mode, which guarantees the
43
+ message the model generates is valid JSON. Important: when using
44
+ JSON mode, you must also instruct the model to produce JSON
45
+ yourself via a system or user message. Without this, the model
46
+ may generate an unending stream of whitespace until the generation
47
+ reaches the token limit, resulting in a long-running and seemingly
48
+ "stuck" request. Also note that the message content may be
49
+ partially cut off if finish_reason="length", which indicates the
50
+ generation exceeded max_tokens or the conversation exceeded the
51
+ max context length.
52
+ stream (bool, optional): If True, partial message deltas will be sent
53
+ as data-only server-sent events as they become available.
54
+ (default: :obj:`None`)
55
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
56
+ will stop generating further tokens. (default: :obj:`None`)
57
+ max_tokens (int, optional): The maximum number of tokens to generate
58
+ in the chat completion. The total length of input tokens and
59
+ generated tokens is limited by the model's context length.
60
+ (default: :obj:`None`)
61
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
62
+ :obj:`2.0`. Positive values penalize new tokens based on whether
63
+ they appear in the text so far, increasing the model's likelihood
64
+ to talk about new topics. See more information about frequency and
65
+ presence penalties. (default: :obj:`None`)
66
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
67
+ :obj:`2.0`. Positive values penalize new tokens based on their
68
+ existing frequency in the text so far, decreasing the model's
69
+ likelihood to repeat the same line verbatim. See more information
70
+ about frequency and presence penalties. (default: :obj:`None`)
71
+ user (str, optional): A unique identifier representing your end-user,
72
+ which can help CometAPI to monitor and detect abuse.
73
+ (default: :obj:`None`)
74
+ tools (list[FunctionTool], optional): A list of tools the model may
75
+ call. Currently, only functions are supported as a tool. Use this
76
+ to provide a list of functions the model may generate JSON inputs
77
+ for. A max of 128 functions are supported.
78
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
79
+ any) tool is called by the model. :obj:`"none"` means the model
80
+ will not call any tool and instead generates a message.
81
+ :obj:`"auto"` means the model can pick between generating a
82
+ message or calling one or more tools. :obj:`"required"` means the
83
+ model must call one or more tools. Specifying a particular tool
84
+ via {"type": "function", "function": {"name": "my_function"}}
85
+ forces the model to call that tool. :obj:`"none"` is the default
86
+ when no tools are present. :obj:`"auto"` is the default if tools
87
+ are present.
88
+ """
89
+
90
+ temperature: Optional[float] = None
91
+ top_p: Optional[float] = None
92
+ n: Optional[int] = None
93
+ stream: Optional[bool] = None
94
+ stop: Optional[Union[str, Sequence[str]]] = None
95
+ max_tokens: Optional[int] = None
96
+ presence_penalty: Optional[float] = None
97
+ response_format: Optional[dict] = None
98
+ frequency_penalty: Optional[float] = None
99
+ user: Optional[str] = None
100
+ tools: Optional[list] = None
101
+ tool_choice: Optional[Union[dict[str, str], str]] = None
102
+
103
+
104
+ COMETAPI_API_PARAMS = {param for param in CometAPIConfig.model_fields.keys()}
@@ -1,11 +1,8 @@
1
1
  # syntax=docker/dockerfile:1
2
-
3
2
  FROM ubuntu:22.04
4
3
 
5
- # Set environment variable to avoid interactive prompts
6
4
  ENV DEBIAN_FRONTEND=noninteractive
7
5
 
8
- # Update and install base utilities
9
6
  RUN apt-get update && apt-get install -y \
10
7
  build-essential \
11
8
  software-properties-common \
@@ -20,7 +17,6 @@ RUN apt-get update && apt-get install -y \
20
17
  && apt-get clean \
21
18
  && apt-get autoremove -y
22
19
 
23
- # Install Python 3.10 and its dependencies
24
20
  RUN add-apt-repository ppa:deadsnakes/ppa && \
25
21
  apt-get update && \
26
22
  apt-get install -y \
@@ -34,32 +30,27 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \
34
30
  && apt-get clean \
35
31
  && apt-get autoremove -y
36
32
 
37
- # Install R
38
33
  RUN apt-get update && \
39
34
  apt-get install -y r-base && \
40
35
  rm -rf /var/lib/apt/lists/* && \
41
36
  apt-get clean && \
42
37
  apt-get autoremove -y
43
38
 
44
- # Install NodeJS 22.x
45
39
  RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
46
40
  apt-get install -y nodejs && \
47
41
  rm -rf /var/lib/apt/lists/* && \
48
42
  apt-get clean && \
49
43
  apt-get autoremove -y
50
44
 
51
- # Install Poetry
52
45
  RUN curl -fsSL https://install.python-poetry.org | python3.10 - && \
53
46
  ln -s ~/.local/bin/poetry /usr/local/bin/poetry
54
47
 
55
- # Upgrade pip and install base Python packages
56
48
  RUN python3.10 -m pip install --upgrade pip setuptools wheel
57
-
58
- # Install uv using pip instead of the shell script
59
49
  RUN pip install uv
60
50
 
61
- # Setup working directory
51
+ RUN groupadd -r devuser && useradd -r -m -g devuser devuser
62
52
  WORKDIR /workspace
53
+ RUN chown -R devuser:devuser /workspace
54
+ USER devuser
63
55
 
64
- # Set default shell
65
56
  CMD ["/bin/bash"]
@@ -96,7 +96,10 @@ class ChatHistoryBlock(MemoryBlock):
96
96
  if (
97
97
  record_dicts
98
98
  and record_dicts[0]['role_at_backend']
99
- in {OpenAIBackendRole.SYSTEM, OpenAIBackendRole.DEVELOPER}
99
+ in {
100
+ OpenAIBackendRole.SYSTEM.value,
101
+ OpenAIBackendRole.DEVELOPER.value,
102
+ }
100
103
  )
101
104
  else 0
102
105
  )