camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/models/anthropic_model.py
CHANGED
|
@@ -12,19 +12,63 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Union
|
|
16
16
|
|
|
17
|
-
from
|
|
17
|
+
from openai import AsyncStream, Stream
|
|
18
|
+
|
|
19
|
+
from camel.configs import AnthropicConfig
|
|
20
|
+
from camel.messages import OpenAIMessage
|
|
18
21
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
-
from camel.types import ModelType
|
|
22
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
20
23
|
from camel.utils import (
|
|
21
|
-
AnthropicTokenCounter,
|
|
22
24
|
BaseTokenCounter,
|
|
25
|
+
OpenAITokenCounter,
|
|
23
26
|
api_keys_required,
|
|
24
27
|
dependencies_required,
|
|
25
28
|
)
|
|
26
29
|
|
|
27
30
|
|
|
31
|
+
def strip_trailing_whitespace_from_messages(
|
|
32
|
+
messages: List[OpenAIMessage],
|
|
33
|
+
) -> List[OpenAIMessage]:
|
|
34
|
+
r"""Strip trailing whitespace from all message contents in a list of
|
|
35
|
+
messages. This is necessary because the Anthropic API doesn't allow
|
|
36
|
+
trailing whitespace in message content.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
messages (List[OpenAIMessage]): List of messages to process
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
List[OpenAIMessage]: The processed messages with trailing whitespace
|
|
43
|
+
removed
|
|
44
|
+
"""
|
|
45
|
+
if not messages:
|
|
46
|
+
return messages
|
|
47
|
+
|
|
48
|
+
# Create a deep copy to avoid modifying the original messages
|
|
49
|
+
processed_messages = [dict(msg) for msg in messages]
|
|
50
|
+
|
|
51
|
+
# Process each message
|
|
52
|
+
for msg in processed_messages:
|
|
53
|
+
if "content" in msg and msg["content"] is not None:
|
|
54
|
+
if isinstance(msg["content"], str):
|
|
55
|
+
msg["content"] = msg["content"].rstrip()
|
|
56
|
+
elif isinstance(msg["content"], list):
|
|
57
|
+
# Handle content that's a list of content parts (e.g., for
|
|
58
|
+
# multimodal content)
|
|
59
|
+
for i, part in enumerate(msg["content"]):
|
|
60
|
+
if (
|
|
61
|
+
isinstance(part, dict)
|
|
62
|
+
and "text" in part
|
|
63
|
+
and isinstance(part["text"], str)
|
|
64
|
+
):
|
|
65
|
+
part["text"] = part["text"].rstrip()
|
|
66
|
+
elif isinstance(part, str):
|
|
67
|
+
msg["content"][i] = part.rstrip()
|
|
68
|
+
|
|
69
|
+
return processed_messages # type: ignore[return-value]
|
|
70
|
+
|
|
71
|
+
|
|
28
72
|
class AnthropicModel(OpenAICompatibleModel):
|
|
29
73
|
r"""Anthropic API in a unified OpenAICompatibleModel interface.
|
|
30
74
|
|
|
@@ -89,29 +133,84 @@ class AnthropicModel(OpenAICompatibleModel):
|
|
|
89
133
|
**kwargs,
|
|
90
134
|
)
|
|
91
135
|
|
|
136
|
+
# Monkey patch the AnthropicTokenCounter to handle trailing whitespace
|
|
137
|
+
self._patch_anthropic_token_counter()
|
|
138
|
+
|
|
92
139
|
@property
|
|
93
140
|
def token_counter(self) -> BaseTokenCounter:
|
|
94
141
|
r"""Initialize the token counter for the model backend.
|
|
95
142
|
|
|
96
143
|
Returns:
|
|
97
|
-
|
|
144
|
+
OpenAITokenCounter: The token counter following the model's
|
|
98
145
|
tokenization style.
|
|
99
146
|
"""
|
|
147
|
+
# TODO: use anthropic token counter
|
|
148
|
+
|
|
100
149
|
if not self._token_counter:
|
|
101
|
-
self._token_counter =
|
|
150
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
102
151
|
return self._token_counter
|
|
103
152
|
|
|
104
|
-
def
|
|
105
|
-
|
|
106
|
-
|
|
153
|
+
def _request_chat_completion(
|
|
154
|
+
self,
|
|
155
|
+
messages: List[OpenAIMessage],
|
|
156
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
157
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
158
|
+
# Strip trailing whitespace from all message contents to prevent
|
|
159
|
+
# Anthropic API errors
|
|
160
|
+
processed_messages = strip_trailing_whitespace_from_messages(messages)
|
|
161
|
+
|
|
162
|
+
# Call the parent class method
|
|
163
|
+
return super()._request_chat_completion(processed_messages, tools)
|
|
164
|
+
|
|
165
|
+
async def _arequest_chat_completion(
|
|
166
|
+
self,
|
|
167
|
+
messages: List[OpenAIMessage],
|
|
168
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
169
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
170
|
+
# Strip trailing whitespace from all message contents to prevent
|
|
171
|
+
# Anthropic API errors
|
|
172
|
+
processed_messages = strip_trailing_whitespace_from_messages(messages)
|
|
107
173
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
174
|
+
# Call the parent class method
|
|
175
|
+
return await super()._arequest_chat_completion(
|
|
176
|
+
processed_messages, tools
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
def _patch_anthropic_token_counter(self):
|
|
180
|
+
r"""Monkey patch the AnthropicTokenCounter class to handle trailing
|
|
181
|
+
whitespace.
|
|
182
|
+
|
|
183
|
+
This patches the count_tokens_from_messages method to strip trailing
|
|
184
|
+
whitespace from message content before sending to the Anthropic API.
|
|
111
185
|
"""
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
186
|
+
import functools
|
|
187
|
+
|
|
188
|
+
from anthropic.types import MessageParam
|
|
189
|
+
|
|
190
|
+
from camel.utils import AnthropicTokenCounter
|
|
191
|
+
|
|
192
|
+
original_count_tokens = (
|
|
193
|
+
AnthropicTokenCounter.count_tokens_from_messages
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
@functools.wraps(original_count_tokens)
|
|
197
|
+
def patched_count_tokens(self, messages):
|
|
198
|
+
# Process messages to remove trailing whitespace
|
|
199
|
+
processed_messages = strip_trailing_whitespace_from_messages(
|
|
200
|
+
messages
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# Use the processed messages with the original method
|
|
204
|
+
return self.client.messages.count_tokens(
|
|
205
|
+
messages=[
|
|
206
|
+
MessageParam(
|
|
207
|
+
content=str(msg["content"]),
|
|
208
|
+
role="user" if msg["role"] == "user" else "assistant",
|
|
209
|
+
)
|
|
210
|
+
for msg in processed_messages
|
|
211
|
+
],
|
|
212
|
+
model=self.model,
|
|
213
|
+
).input_tokens
|
|
214
|
+
|
|
215
|
+
# Apply the monkey patch
|
|
216
|
+
AnthropicTokenCounter.count_tokens_from_messages = patched_count_tokens
|
|
@@ -13,17 +13,11 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict,
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from
|
|
19
|
-
from pydantic import BaseModel
|
|
20
|
-
|
|
21
|
-
from camel.configs import BEDROCK_API_PARAMS, BedrockConfig
|
|
22
|
-
from camel.messages import OpenAIMessage
|
|
18
|
+
from camel.configs import BedrockConfig
|
|
23
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
24
20
|
from camel.types import (
|
|
25
|
-
ChatCompletion,
|
|
26
|
-
ChatCompletionChunk,
|
|
27
21
|
ModelType,
|
|
28
22
|
)
|
|
29
23
|
from camel.utils import BaseTokenCounter, api_keys_required
|
|
@@ -93,28 +87,3 @@ class AWSBedrockModel(OpenAICompatibleModel):
|
|
|
93
87
|
max_retries=max_retries,
|
|
94
88
|
**kwargs,
|
|
95
89
|
)
|
|
96
|
-
|
|
97
|
-
async def _arun(
|
|
98
|
-
self,
|
|
99
|
-
messages: List[OpenAIMessage],
|
|
100
|
-
response_format: Optional[Type[BaseModel]] = None,
|
|
101
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
102
|
-
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
103
|
-
raise NotImplementedError(
|
|
104
|
-
"AWS Bedrock does not support async inference."
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
def check_model_config(self):
|
|
108
|
-
r"""Check whether the input model configuration contains unexpected
|
|
109
|
-
arguments.
|
|
110
|
-
|
|
111
|
-
Raises:
|
|
112
|
-
ValueError: If the model configuration dictionary contains any
|
|
113
|
-
unexpected argument for this model class.
|
|
114
|
-
"""
|
|
115
|
-
for param in self.model_config_dict:
|
|
116
|
-
if param not in BEDROCK_API_PARAMS:
|
|
117
|
-
raise ValueError(
|
|
118
|
-
f"Invalid parameter '{param}' in model_config_dict. "
|
|
119
|
-
f"Valid parameters are: {BEDROCK_API_PARAMS}"
|
|
120
|
-
)
|
|
@@ -11,13 +11,19 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import copy
|
|
14
15
|
import os
|
|
16
|
+
import warnings
|
|
15
17
|
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
|
16
18
|
|
|
17
19
|
from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
|
|
20
|
+
from openai.lib.streaming.chat import (
|
|
21
|
+
AsyncChatCompletionStreamManager,
|
|
22
|
+
ChatCompletionStreamManager,
|
|
23
|
+
)
|
|
18
24
|
from pydantic import BaseModel
|
|
19
25
|
|
|
20
|
-
from camel.configs import
|
|
26
|
+
from camel.configs import ChatGPTConfig
|
|
21
27
|
from camel.messages import OpenAIMessage
|
|
22
28
|
from camel.models.base_model import BaseModelBackend
|
|
23
29
|
from camel.types import (
|
|
@@ -41,6 +47,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
41
47
|
from langfuse.decorators import observe
|
|
42
48
|
except ImportError:
|
|
43
49
|
from camel.utils import observe
|
|
50
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
51
|
+
try:
|
|
52
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
53
|
+
except ImportError:
|
|
54
|
+
from camel.utils import observe
|
|
44
55
|
else:
|
|
45
56
|
from camel.utils import observe
|
|
46
57
|
|
|
@@ -50,7 +61,8 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
50
61
|
|
|
51
62
|
Args:
|
|
52
63
|
model_type (Union[ModelType, str]): Model for which a backend is
|
|
53
|
-
created,
|
|
64
|
+
created, Should be the deployment name you chose when you deployed
|
|
65
|
+
an azure model.
|
|
54
66
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
55
67
|
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
56
68
|
:obj:`None`, :obj:`ChatGPTConfig().as_dict()` will be used.
|
|
@@ -61,8 +73,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
61
73
|
(default: :obj:`None`)
|
|
62
74
|
api_version (Optional[str], optional): The api version for the model.
|
|
63
75
|
(default: :obj:`None`)
|
|
64
|
-
azure_deployment_name (Optional[str], optional): The deployment name
|
|
65
|
-
you chose when you deployed an azure model. (default: :obj:`None`)
|
|
66
76
|
azure_ad_token (Optional[str], optional): Your Azure Active Directory
|
|
67
77
|
token, https://www.microsoft.com/en-us/security/business/
|
|
68
78
|
identity-access/microsoft-entra-id. (default: :obj:`None`)
|
|
@@ -78,8 +88,23 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
78
88
|
(default: :obj:`None`)
|
|
79
89
|
max_retries (int, optional): Maximum number of retries for API calls.
|
|
80
90
|
(default: :obj:`3`)
|
|
91
|
+
client (Optional[Any], optional): A custom synchronous AzureOpenAI
|
|
92
|
+
client instance. If provided, this client will be used instead of
|
|
93
|
+
creating a new one. Useful for RL frameworks like AReaL or rLLM
|
|
94
|
+
that provide Azure OpenAI-compatible clients. The client should
|
|
95
|
+
implement the AzureOpenAI client interface with
|
|
96
|
+
`.chat.completions.create()` and `.beta.chat.completions.parse()`
|
|
97
|
+
methods. (default: :obj:`None`)
|
|
98
|
+
async_client (Optional[Any], optional): A custom asynchronous
|
|
99
|
+
AzureOpenAI client instance. If provided, this client will be
|
|
100
|
+
used instead of creating a new one. The client should implement
|
|
101
|
+
the AsyncAzureOpenAI client interface. (default: :obj:`None`)
|
|
102
|
+
azure_deployment_name (Optional[str], optional): **Deprecated**.
|
|
103
|
+
Use `model_type` parameter instead. This parameter is kept for
|
|
104
|
+
backward compatibility and will be removed in a future version.
|
|
105
|
+
(default: :obj:`None`)
|
|
81
106
|
**kwargs (Any): Additional arguments to pass to the client
|
|
82
|
-
initialization.
|
|
107
|
+
initialization. Ignored if custom clients are provided.
|
|
83
108
|
|
|
84
109
|
References:
|
|
85
110
|
https://learn.microsoft.com/en-us/azure/ai-services/openai/
|
|
@@ -94,12 +119,35 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
94
119
|
timeout: Optional[float] = None,
|
|
95
120
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
96
121
|
api_version: Optional[str] = None,
|
|
97
|
-
azure_deployment_name: Optional[str] = None,
|
|
98
122
|
azure_ad_token_provider: Optional["AzureADTokenProvider"] = None,
|
|
99
123
|
azure_ad_token: Optional[str] = None,
|
|
100
124
|
max_retries: int = 3,
|
|
125
|
+
client: Optional[Any] = None,
|
|
126
|
+
async_client: Optional[Any] = None,
|
|
127
|
+
azure_deployment_name: Optional[str] = None,
|
|
101
128
|
**kwargs: Any,
|
|
102
129
|
) -> None:
|
|
130
|
+
# Handle deprecated azure_deployment_name parameter
|
|
131
|
+
if azure_deployment_name is not None:
|
|
132
|
+
warnings.warn(
|
|
133
|
+
"The 'azure_deployment_name' parameter is deprecated. "
|
|
134
|
+
"Please use 'model_type' parameter instead. "
|
|
135
|
+
"The 'azure_deployment_name' parameter is being ignored.",
|
|
136
|
+
DeprecationWarning,
|
|
137
|
+
stacklevel=2,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Handle deprecated AZURE_DEPLOYMENT_NAME environment variable
|
|
141
|
+
if os.environ.get("AZURE_DEPLOYMENT_NAME") is not None:
|
|
142
|
+
warnings.warn(
|
|
143
|
+
"The 'AZURE_DEPLOYMENT_NAME' environment variable is "
|
|
144
|
+
"deprecated. Please use the 'model_type' parameter "
|
|
145
|
+
"instead. The 'AZURE_DEPLOYMENT_NAME' environment "
|
|
146
|
+
"variable is being ignored.",
|
|
147
|
+
DeprecationWarning,
|
|
148
|
+
stacklevel=2,
|
|
149
|
+
)
|
|
150
|
+
|
|
103
151
|
if model_config_dict is None:
|
|
104
152
|
model_config_dict = ChatGPTConfig().as_dict()
|
|
105
153
|
api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
|
|
@@ -110,9 +158,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
110
158
|
)
|
|
111
159
|
|
|
112
160
|
self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
|
|
113
|
-
self._azure_deployment_name = azure_deployment_name or os.environ.get(
|
|
114
|
-
"AZURE_DEPLOYMENT_NAME"
|
|
115
|
-
)
|
|
116
161
|
self._azure_ad_token = azure_ad_token or os.environ.get(
|
|
117
162
|
"AZURE_AD_TOKEN"
|
|
118
163
|
)
|
|
@@ -122,62 +167,73 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
122
167
|
"Must provide either the `api_version` argument "
|
|
123
168
|
"or `AZURE_API_VERSION` environment variable."
|
|
124
169
|
)
|
|
125
|
-
if self._azure_deployment_name is None:
|
|
126
|
-
raise ValueError(
|
|
127
|
-
"Must provide either the `azure_deployment_name` argument "
|
|
128
|
-
"or `AZURE_DEPLOYMENT_NAME` environment variable."
|
|
129
|
-
)
|
|
130
170
|
|
|
131
|
-
if
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
self._client = LangfuseOpenAI(
|
|
136
|
-
azure_endpoint=str(self._url),
|
|
137
|
-
azure_deployment=self._azure_deployment_name,
|
|
138
|
-
api_version=self.api_version,
|
|
139
|
-
api_key=self._api_key,
|
|
140
|
-
azure_ad_token=self._azure_ad_token,
|
|
141
|
-
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
142
|
-
timeout=self._timeout,
|
|
143
|
-
max_retries=max_retries,
|
|
144
|
-
**kwargs,
|
|
145
|
-
)
|
|
146
|
-
self._async_client = LangfuseAsyncOpenAI(
|
|
147
|
-
azure_endpoint=str(self._url),
|
|
148
|
-
azure_deployment=self._azure_deployment_name,
|
|
149
|
-
api_version=self.api_version,
|
|
150
|
-
api_key=self._api_key,
|
|
151
|
-
azure_ad_token=self._azure_ad_token,
|
|
152
|
-
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
153
|
-
timeout=self._timeout,
|
|
154
|
-
max_retries=max_retries,
|
|
155
|
-
**kwargs,
|
|
156
|
-
)
|
|
171
|
+
# Use custom clients if provided, otherwise create new ones
|
|
172
|
+
if client is not None:
|
|
173
|
+
# Use the provided custom sync client
|
|
174
|
+
self._client = client
|
|
157
175
|
else:
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
176
|
+
# Create default sync client
|
|
177
|
+
if is_langfuse_available():
|
|
178
|
+
from langfuse.openai import AzureOpenAI as LangfuseOpenAI
|
|
179
|
+
|
|
180
|
+
self._client = LangfuseOpenAI(
|
|
181
|
+
azure_endpoint=str(self._url),
|
|
182
|
+
azure_deployment=str(self.model_type),
|
|
183
|
+
api_version=self.api_version,
|
|
184
|
+
api_key=self._api_key,
|
|
185
|
+
azure_ad_token=self._azure_ad_token,
|
|
186
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
187
|
+
timeout=self._timeout,
|
|
188
|
+
max_retries=max_retries,
|
|
189
|
+
**kwargs,
|
|
190
|
+
)
|
|
191
|
+
else:
|
|
192
|
+
self._client = AzureOpenAI(
|
|
193
|
+
azure_endpoint=str(self._url),
|
|
194
|
+
azure_deployment=str(self.model_type),
|
|
195
|
+
api_version=self.api_version,
|
|
196
|
+
api_key=self._api_key,
|
|
197
|
+
azure_ad_token=self._azure_ad_token,
|
|
198
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
199
|
+
timeout=self._timeout,
|
|
200
|
+
max_retries=max_retries,
|
|
201
|
+
**kwargs,
|
|
202
|
+
)
|
|
169
203
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
204
|
+
if async_client is not None:
|
|
205
|
+
# Use the provided custom async client
|
|
206
|
+
self._async_client = async_client
|
|
207
|
+
else:
|
|
208
|
+
# Create default async client
|
|
209
|
+
if is_langfuse_available():
|
|
210
|
+
from langfuse.openai import (
|
|
211
|
+
AsyncAzureOpenAI as LangfuseAsyncOpenAI,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
self._async_client = LangfuseAsyncOpenAI(
|
|
215
|
+
azure_endpoint=str(self._url),
|
|
216
|
+
azure_deployment=str(self.model_type),
|
|
217
|
+
api_version=self.api_version,
|
|
218
|
+
api_key=self._api_key,
|
|
219
|
+
azure_ad_token=self._azure_ad_token,
|
|
220
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
221
|
+
timeout=self._timeout,
|
|
222
|
+
max_retries=max_retries,
|
|
223
|
+
**kwargs,
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
self._async_client = AsyncAzureOpenAI(
|
|
227
|
+
azure_endpoint=str(self._url),
|
|
228
|
+
azure_deployment=str(self.model_type),
|
|
229
|
+
api_version=self.api_version,
|
|
230
|
+
api_key=self._api_key,
|
|
231
|
+
azure_ad_token=self._azure_ad_token,
|
|
232
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
233
|
+
timeout=self._timeout,
|
|
234
|
+
max_retries=max_retries,
|
|
235
|
+
**kwargs,
|
|
236
|
+
)
|
|
181
237
|
|
|
182
238
|
@property
|
|
183
239
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -197,7 +253,11 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
197
253
|
messages: List[OpenAIMessage],
|
|
198
254
|
response_format: Optional[Type[BaseModel]] = None,
|
|
199
255
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
200
|
-
) -> Union[
|
|
256
|
+
) -> Union[
|
|
257
|
+
ChatCompletion,
|
|
258
|
+
Stream[ChatCompletionChunk],
|
|
259
|
+
ChatCompletionStreamManager[BaseModel],
|
|
260
|
+
]:
|
|
201
261
|
r"""Runs inference of Azure OpenAI chat completion.
|
|
202
262
|
|
|
203
263
|
Args:
|
|
@@ -212,6 +272,8 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
212
272
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
213
273
|
`ChatCompletion` in the non-stream mode, or
|
|
214
274
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
275
|
+
`ChatCompletionStreamManager[BaseModel]` for
|
|
276
|
+
structured output streaming.
|
|
215
277
|
"""
|
|
216
278
|
|
|
217
279
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -229,10 +291,14 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
229
291
|
response_format = response_format or self.model_config_dict.get(
|
|
230
292
|
"response_format", None
|
|
231
293
|
)
|
|
294
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
232
295
|
if response_format:
|
|
233
|
-
|
|
234
|
-
self.
|
|
235
|
-
|
|
296
|
+
if is_streaming:
|
|
297
|
+
return self._request_stream_parse(
|
|
298
|
+
messages, response_format, tools
|
|
299
|
+
)
|
|
300
|
+
else:
|
|
301
|
+
return self._request_parse(messages, response_format, tools)
|
|
236
302
|
else:
|
|
237
303
|
result = self._request_chat_completion(messages, tools)
|
|
238
304
|
|
|
@@ -244,7 +310,11 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
244
310
|
messages: List[OpenAIMessage],
|
|
245
311
|
response_format: Optional[Type[BaseModel]] = None,
|
|
246
312
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
247
|
-
) -> Union[
|
|
313
|
+
) -> Union[
|
|
314
|
+
ChatCompletion,
|
|
315
|
+
AsyncStream[ChatCompletionChunk],
|
|
316
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
317
|
+
]:
|
|
248
318
|
r"""Runs inference of Azure OpenAI chat completion.
|
|
249
319
|
|
|
250
320
|
Args:
|
|
@@ -256,9 +326,12 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
256
326
|
use for the request.
|
|
257
327
|
|
|
258
328
|
Returns:
|
|
259
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
329
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
|
|
330
|
+
AsyncChatCompletionStreamManager[BaseModel]]:
|
|
260
331
|
`ChatCompletion` in the non-stream mode, or
|
|
261
332
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
333
|
+
`AsyncChatCompletionStreamManager[BaseModel]` for
|
|
334
|
+
structured output streaming.
|
|
262
335
|
"""
|
|
263
336
|
|
|
264
337
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -276,10 +349,16 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
276
349
|
response_format = response_format or self.model_config_dict.get(
|
|
277
350
|
"response_format", None
|
|
278
351
|
)
|
|
352
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
279
353
|
if response_format:
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
354
|
+
if is_streaming:
|
|
355
|
+
return await self._arequest_stream_parse(
|
|
356
|
+
messages, response_format, tools
|
|
357
|
+
)
|
|
358
|
+
else:
|
|
359
|
+
return await self._arequest_parse(
|
|
360
|
+
messages, response_format, tools
|
|
361
|
+
)
|
|
283
362
|
else:
|
|
284
363
|
result = await self._arequest_chat_completion(messages, tools)
|
|
285
364
|
|
|
@@ -297,7 +376,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
297
376
|
|
|
298
377
|
return self._client.chat.completions.create(
|
|
299
378
|
messages=messages,
|
|
300
|
-
model=self.
|
|
379
|
+
model=str(self.model_type),
|
|
301
380
|
**request_config,
|
|
302
381
|
)
|
|
303
382
|
|
|
@@ -313,7 +392,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
313
392
|
|
|
314
393
|
return await self._async_client.chat.completions.create(
|
|
315
394
|
messages=messages,
|
|
316
|
-
model=self.
|
|
395
|
+
model=str(self.model_type),
|
|
317
396
|
**request_config,
|
|
318
397
|
)
|
|
319
398
|
|
|
@@ -323,8 +402,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
323
402
|
response_format: Type[BaseModel],
|
|
324
403
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
325
404
|
) -> ChatCompletion:
|
|
326
|
-
import copy
|
|
327
|
-
|
|
328
405
|
request_config = copy.deepcopy(self.model_config_dict)
|
|
329
406
|
|
|
330
407
|
request_config["response_format"] = response_format
|
|
@@ -336,7 +413,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
336
413
|
|
|
337
414
|
return self._client.beta.chat.completions.parse(
|
|
338
415
|
messages=messages,
|
|
339
|
-
model=self.
|
|
416
|
+
model=str(self.model_type),
|
|
340
417
|
**request_config,
|
|
341
418
|
)
|
|
342
419
|
|
|
@@ -346,8 +423,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
346
423
|
response_format: Type[BaseModel],
|
|
347
424
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
348
425
|
) -> ChatCompletion:
|
|
349
|
-
import copy
|
|
350
|
-
|
|
351
426
|
request_config = copy.deepcopy(self.model_config_dict)
|
|
352
427
|
|
|
353
428
|
request_config["response_format"] = response_format
|
|
@@ -359,24 +434,63 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
359
434
|
|
|
360
435
|
return await self._async_client.beta.chat.completions.parse(
|
|
361
436
|
messages=messages,
|
|
362
|
-
model=self.
|
|
437
|
+
model=str(self.model_type),
|
|
363
438
|
**request_config,
|
|
364
439
|
)
|
|
365
440
|
|
|
366
|
-
def
|
|
367
|
-
|
|
368
|
-
|
|
441
|
+
def _request_stream_parse(
|
|
442
|
+
self,
|
|
443
|
+
messages: List[OpenAIMessage],
|
|
444
|
+
response_format: Type[BaseModel],
|
|
445
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
446
|
+
) -> ChatCompletionStreamManager[BaseModel]:
|
|
447
|
+
r"""Request streaming structured output parsing.
|
|
369
448
|
|
|
370
|
-
|
|
371
|
-
ValueError: If the model configuration dictionary contains any
|
|
372
|
-
unexpected arguments to Azure OpenAI API.
|
|
449
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
373
450
|
"""
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
451
|
+
|
|
452
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
453
|
+
|
|
454
|
+
# Remove stream from config as it's handled by the stream method
|
|
455
|
+
request_config.pop("stream", None)
|
|
456
|
+
|
|
457
|
+
if tools is not None:
|
|
458
|
+
request_config["tools"] = tools
|
|
459
|
+
|
|
460
|
+
# Use the beta streaming API for structured outputs
|
|
461
|
+
return self._client.beta.chat.completions.stream(
|
|
462
|
+
messages=messages,
|
|
463
|
+
model=str(self.model_type),
|
|
464
|
+
response_format=response_format,
|
|
465
|
+
**request_config,
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
async def _arequest_stream_parse(
|
|
469
|
+
self,
|
|
470
|
+
messages: List[OpenAIMessage],
|
|
471
|
+
response_format: Type[BaseModel],
|
|
472
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
473
|
+
) -> AsyncChatCompletionStreamManager[BaseModel]:
|
|
474
|
+
r"""Request async streaming structured output parsing.
|
|
475
|
+
|
|
476
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
477
|
+
"""
|
|
478
|
+
|
|
479
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
480
|
+
|
|
481
|
+
# Remove stream from config as it's handled by the stream method
|
|
482
|
+
request_config.pop("stream", None)
|
|
483
|
+
|
|
484
|
+
if tools is not None:
|
|
485
|
+
request_config["tools"] = tools
|
|
486
|
+
|
|
487
|
+
# Use the beta streaming API for structured outputs
|
|
488
|
+
return self._async_client.beta.chat.completions.stream(
|
|
489
|
+
messages=messages,
|
|
490
|
+
model=str(self.model_type),
|
|
491
|
+
response_format=response_format,
|
|
492
|
+
**request_config,
|
|
493
|
+
)
|
|
380
494
|
|
|
381
495
|
@property
|
|
382
496
|
def stream(self) -> bool:
|