camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +6 -2
  3. camel/agents/_utils.py +38 -0
  4. camel/agents/chat_agent.py +4014 -410
  5. camel/agents/mcp_agent.py +30 -27
  6. camel/agents/repo_agent.py +2 -1
  7. camel/benchmarks/browsecomp.py +6 -6
  8. camel/configs/__init__.py +15 -0
  9. camel/configs/aihubmix_config.py +88 -0
  10. camel/configs/amd_config.py +70 -0
  11. camel/configs/cometapi_config.py +104 -0
  12. camel/configs/minimax_config.py +93 -0
  13. camel/configs/nebius_config.py +103 -0
  14. camel/configs/vllm_config.py +2 -0
  15. camel/data_collectors/alpaca_collector.py +15 -6
  16. camel/datagen/self_improving_cot.py +1 -1
  17. camel/datasets/base_generator.py +39 -10
  18. camel/environments/__init__.py +12 -0
  19. camel/environments/rlcards_env.py +860 -0
  20. camel/environments/single_step.py +28 -3
  21. camel/environments/tic_tac_toe.py +1 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker/Dockerfile +4 -16
  24. camel/interpreters/docker_interpreter.py +3 -2
  25. camel/interpreters/e2b_interpreter.py +34 -1
  26. camel/interpreters/internal_python_interpreter.py +51 -2
  27. camel/interpreters/microsandbox_interpreter.py +395 -0
  28. camel/loaders/__init__.py +11 -2
  29. camel/loaders/base_loader.py +85 -0
  30. camel/loaders/chunkr_reader.py +9 -0
  31. camel/loaders/firecrawl_reader.py +4 -4
  32. camel/logger.py +1 -1
  33. camel/memories/agent_memories.py +84 -1
  34. camel/memories/base.py +34 -0
  35. camel/memories/blocks/chat_history_block.py +122 -4
  36. camel/memories/blocks/vectordb_block.py +8 -1
  37. camel/memories/context_creators/score_based.py +29 -237
  38. camel/memories/records.py +88 -8
  39. camel/messages/base.py +166 -40
  40. camel/messages/func_message.py +32 -5
  41. camel/models/__init__.py +10 -0
  42. camel/models/aihubmix_model.py +83 -0
  43. camel/models/aiml_model.py +1 -16
  44. camel/models/amd_model.py +101 -0
  45. camel/models/anthropic_model.py +117 -18
  46. camel/models/aws_bedrock_model.py +2 -33
  47. camel/models/azure_openai_model.py +205 -91
  48. camel/models/base_audio_model.py +3 -1
  49. camel/models/base_model.py +189 -24
  50. camel/models/cohere_model.py +5 -17
  51. camel/models/cometapi_model.py +83 -0
  52. camel/models/crynux_model.py +1 -16
  53. camel/models/deepseek_model.py +6 -16
  54. camel/models/fish_audio_model.py +6 -0
  55. camel/models/gemini_model.py +71 -20
  56. camel/models/groq_model.py +1 -17
  57. camel/models/internlm_model.py +1 -16
  58. camel/models/litellm_model.py +49 -32
  59. camel/models/lmstudio_model.py +1 -17
  60. camel/models/minimax_model.py +83 -0
  61. camel/models/mistral_model.py +1 -16
  62. camel/models/model_factory.py +27 -1
  63. camel/models/model_manager.py +24 -6
  64. camel/models/modelscope_model.py +1 -16
  65. camel/models/moonshot_model.py +185 -19
  66. camel/models/nebius_model.py +83 -0
  67. camel/models/nemotron_model.py +0 -5
  68. camel/models/netmind_model.py +1 -16
  69. camel/models/novita_model.py +1 -16
  70. camel/models/nvidia_model.py +1 -16
  71. camel/models/ollama_model.py +4 -19
  72. camel/models/openai_compatible_model.py +171 -46
  73. camel/models/openai_model.py +205 -77
  74. camel/models/openrouter_model.py +1 -17
  75. camel/models/ppio_model.py +1 -16
  76. camel/models/qianfan_model.py +1 -16
  77. camel/models/qwen_model.py +1 -16
  78. camel/models/reka_model.py +1 -16
  79. camel/models/samba_model.py +34 -47
  80. camel/models/sglang_model.py +64 -31
  81. camel/models/siliconflow_model.py +1 -16
  82. camel/models/stub_model.py +0 -4
  83. camel/models/togetherai_model.py +1 -16
  84. camel/models/vllm_model.py +1 -16
  85. camel/models/volcano_model.py +0 -17
  86. camel/models/watsonx_model.py +1 -16
  87. camel/models/yi_model.py +1 -16
  88. camel/models/zhipuai_model.py +60 -16
  89. camel/parsers/__init__.py +18 -0
  90. camel/parsers/mcp_tool_call_parser.py +176 -0
  91. camel/retrievers/auto_retriever.py +1 -0
  92. camel/runtimes/configs.py +11 -11
  93. camel/runtimes/daytona_runtime.py +15 -16
  94. camel/runtimes/docker_runtime.py +6 -6
  95. camel/runtimes/remote_http_runtime.py +5 -5
  96. camel/services/agent_openapi_server.py +380 -0
  97. camel/societies/__init__.py +2 -0
  98. camel/societies/role_playing.py +26 -28
  99. camel/societies/workforce/__init__.py +2 -0
  100. camel/societies/workforce/events.py +122 -0
  101. camel/societies/workforce/prompts.py +249 -38
  102. camel/societies/workforce/role_playing_worker.py +82 -20
  103. camel/societies/workforce/single_agent_worker.py +634 -34
  104. camel/societies/workforce/structured_output_handler.py +512 -0
  105. camel/societies/workforce/task_channel.py +169 -23
  106. camel/societies/workforce/utils.py +176 -9
  107. camel/societies/workforce/worker.py +77 -23
  108. camel/societies/workforce/workflow_memory_manager.py +772 -0
  109. camel/societies/workforce/workforce.py +3168 -478
  110. camel/societies/workforce/workforce_callback.py +74 -0
  111. camel/societies/workforce/workforce_logger.py +203 -175
  112. camel/societies/workforce/workforce_metrics.py +33 -0
  113. camel/storages/__init__.py +4 -0
  114. camel/storages/key_value_storages/json.py +15 -2
  115. camel/storages/key_value_storages/mem0_cloud.py +48 -47
  116. camel/storages/object_storages/google_cloud.py +1 -1
  117. camel/storages/vectordb_storages/__init__.py +6 -0
  118. camel/storages/vectordb_storages/chroma.py +731 -0
  119. camel/storages/vectordb_storages/oceanbase.py +13 -13
  120. camel/storages/vectordb_storages/pgvector.py +349 -0
  121. camel/storages/vectordb_storages/qdrant.py +3 -3
  122. camel/storages/vectordb_storages/surreal.py +365 -0
  123. camel/storages/vectordb_storages/tidb.py +8 -6
  124. camel/tasks/task.py +244 -27
  125. camel/toolkits/__init__.py +46 -8
  126. camel/toolkits/aci_toolkit.py +64 -19
  127. camel/toolkits/arxiv_toolkit.py +6 -6
  128. camel/toolkits/base.py +63 -5
  129. camel/toolkits/code_execution.py +28 -1
  130. camel/toolkits/context_summarizer_toolkit.py +684 -0
  131. camel/toolkits/craw4ai_toolkit.py +93 -0
  132. camel/toolkits/dappier_toolkit.py +10 -6
  133. camel/toolkits/dingtalk.py +1135 -0
  134. camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
  135. camel/toolkits/excel_toolkit.py +901 -67
  136. camel/toolkits/file_toolkit.py +1402 -0
  137. camel/toolkits/function_tool.py +30 -6
  138. camel/toolkits/github_toolkit.py +107 -20
  139. camel/toolkits/gmail_toolkit.py +1839 -0
  140. camel/toolkits/google_calendar_toolkit.py +38 -4
  141. camel/toolkits/google_drive_mcp_toolkit.py +54 -0
  142. camel/toolkits/human_toolkit.py +34 -10
  143. camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
  144. camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
  145. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
  146. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
  147. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  148. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
  149. camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
  150. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
  151. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
  152. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
  153. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
  154. camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
  155. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  156. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  157. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  158. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
  159. camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
  160. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
  161. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
  162. camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
  163. camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
  164. camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
  165. camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
  166. camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
  167. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
  168. camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
  169. camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
  170. camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
  171. camel/toolkits/image_generation_toolkit.py +390 -0
  172. camel/toolkits/jina_reranker_toolkit.py +3 -4
  173. camel/toolkits/klavis_toolkit.py +5 -1
  174. camel/toolkits/markitdown_toolkit.py +104 -0
  175. camel/toolkits/math_toolkit.py +64 -10
  176. camel/toolkits/mcp_toolkit.py +370 -45
  177. camel/toolkits/memory_toolkit.py +5 -1
  178. camel/toolkits/message_agent_toolkit.py +608 -0
  179. camel/toolkits/message_integration.py +724 -0
  180. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  181. camel/toolkits/note_taking_toolkit.py +277 -0
  182. camel/toolkits/notion_mcp_toolkit.py +224 -0
  183. camel/toolkits/openbb_toolkit.py +5 -1
  184. camel/toolkits/origene_mcp_toolkit.py +56 -0
  185. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  186. camel/toolkits/pptx_toolkit.py +25 -12
  187. camel/toolkits/resend_toolkit.py +168 -0
  188. camel/toolkits/screenshot_toolkit.py +213 -0
  189. camel/toolkits/search_toolkit.py +437 -142
  190. camel/toolkits/slack_toolkit.py +104 -50
  191. camel/toolkits/sympy_toolkit.py +1 -1
  192. camel/toolkits/task_planning_toolkit.py +3 -3
  193. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  194. camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
  195. camel/toolkits/terminal_toolkit/utils.py +532 -0
  196. camel/toolkits/thinking_toolkit.py +1 -1
  197. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  198. camel/toolkits/video_analysis_toolkit.py +106 -26
  199. camel/toolkits/video_download_toolkit.py +17 -14
  200. camel/toolkits/web_deploy_toolkit.py +1219 -0
  201. camel/toolkits/wechat_official_toolkit.py +483 -0
  202. camel/toolkits/zapier_toolkit.py +5 -1
  203. camel/types/__init__.py +2 -2
  204. camel/types/agents/tool_calling_record.py +4 -1
  205. camel/types/enums.py +316 -40
  206. camel/types/openai_types.py +2 -2
  207. camel/types/unified_model_type.py +31 -4
  208. camel/utils/commons.py +36 -5
  209. camel/utils/constants.py +3 -0
  210. camel/utils/context_utils.py +1003 -0
  211. camel/utils/mcp.py +138 -4
  212. camel/utils/mcp_client.py +45 -1
  213. camel/utils/message_summarizer.py +148 -0
  214. camel/utils/token_counting.py +43 -20
  215. camel/utils/tool_result.py +44 -0
  216. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
  217. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
  218. camel/loaders/pandas_reader.py +0 -368
  219. camel/toolkits/dalle_toolkit.py +0 -175
  220. camel/toolkits/file_write_toolkit.py +0 -444
  221. camel/toolkits/openai_agent_toolkit.py +0 -135
  222. camel/toolkits/terminal_toolkit.py +0 -1037
  223. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
  224. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
@@ -12,19 +12,63 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Union
16
16
 
17
- from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
17
+ from openai import AsyncStream, Stream
18
+
19
+ from camel.configs import AnthropicConfig
20
+ from camel.messages import OpenAIMessage
18
21
  from camel.models.openai_compatible_model import OpenAICompatibleModel
19
- from camel.types import ModelType
22
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
20
23
  from camel.utils import (
21
- AnthropicTokenCounter,
22
24
  BaseTokenCounter,
25
+ OpenAITokenCounter,
23
26
  api_keys_required,
24
27
  dependencies_required,
25
28
  )
26
29
 
27
30
 
31
+ def strip_trailing_whitespace_from_messages(
32
+ messages: List[OpenAIMessage],
33
+ ) -> List[OpenAIMessage]:
34
+ r"""Strip trailing whitespace from all message contents in a list of
35
+ messages. This is necessary because the Anthropic API doesn't allow
36
+ trailing whitespace in message content.
37
+
38
+ Args:
39
+ messages (List[OpenAIMessage]): List of messages to process
40
+
41
+ Returns:
42
+ List[OpenAIMessage]: The processed messages with trailing whitespace
43
+ removed
44
+ """
45
+ if not messages:
46
+ return messages
47
+
48
+ # Create a deep copy to avoid modifying the original messages
49
+ processed_messages = [dict(msg) for msg in messages]
50
+
51
+ # Process each message
52
+ for msg in processed_messages:
53
+ if "content" in msg and msg["content"] is not None:
54
+ if isinstance(msg["content"], str):
55
+ msg["content"] = msg["content"].rstrip()
56
+ elif isinstance(msg["content"], list):
57
+ # Handle content that's a list of content parts (e.g., for
58
+ # multimodal content)
59
+ for i, part in enumerate(msg["content"]):
60
+ if (
61
+ isinstance(part, dict)
62
+ and "text" in part
63
+ and isinstance(part["text"], str)
64
+ ):
65
+ part["text"] = part["text"].rstrip()
66
+ elif isinstance(part, str):
67
+ msg["content"][i] = part.rstrip()
68
+
69
+ return processed_messages # type: ignore[return-value]
70
+
71
+
28
72
  class AnthropicModel(OpenAICompatibleModel):
29
73
  r"""Anthropic API in a unified OpenAICompatibleModel interface.
30
74
 
@@ -89,29 +133,84 @@ class AnthropicModel(OpenAICompatibleModel):
89
133
  **kwargs,
90
134
  )
91
135
 
136
+ # Monkey patch the AnthropicTokenCounter to handle trailing whitespace
137
+ self._patch_anthropic_token_counter()
138
+
92
139
  @property
93
140
  def token_counter(self) -> BaseTokenCounter:
94
141
  r"""Initialize the token counter for the model backend.
95
142
 
96
143
  Returns:
97
- BaseTokenCounter: The token counter following the model's
144
+ OpenAITokenCounter: The token counter following the model's
98
145
  tokenization style.
99
146
  """
147
+ # TODO: use anthropic token counter
148
+
100
149
  if not self._token_counter:
101
- self._token_counter = AnthropicTokenCounter(self.model_type)
150
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
102
151
  return self._token_counter
103
152
 
104
- def check_model_config(self):
105
- r"""Check whether the model configuration is valid for anthropic
106
- model backends.
153
+ def _request_chat_completion(
154
+ self,
155
+ messages: List[OpenAIMessage],
156
+ tools: Optional[List[Dict[str, Any]]] = None,
157
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
158
+ # Strip trailing whitespace from all message contents to prevent
159
+ # Anthropic API errors
160
+ processed_messages = strip_trailing_whitespace_from_messages(messages)
161
+
162
+ # Call the parent class method
163
+ return super()._request_chat_completion(processed_messages, tools)
164
+
165
+ async def _arequest_chat_completion(
166
+ self,
167
+ messages: List[OpenAIMessage],
168
+ tools: Optional[List[Dict[str, Any]]] = None,
169
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
170
+ # Strip trailing whitespace from all message contents to prevent
171
+ # Anthropic API errors
172
+ processed_messages = strip_trailing_whitespace_from_messages(messages)
107
173
 
108
- Raises:
109
- ValueError: If the model configuration dictionary contains any
110
- unexpected arguments to Anthropic API.
174
+ # Call the parent class method
175
+ return await super()._arequest_chat_completion(
176
+ processed_messages, tools
177
+ )
178
+
179
+ def _patch_anthropic_token_counter(self):
180
+ r"""Monkey patch the AnthropicTokenCounter class to handle trailing
181
+ whitespace.
182
+
183
+ This patches the count_tokens_from_messages method to strip trailing
184
+ whitespace from message content before sending to the Anthropic API.
111
185
  """
112
- for param in self.model_config_dict:
113
- if param not in ANTHROPIC_API_PARAMS:
114
- raise ValueError(
115
- f"Unexpected argument `{param}` is "
116
- "input into Anthropic model backend."
117
- )
186
+ import functools
187
+
188
+ from anthropic.types import MessageParam
189
+
190
+ from camel.utils import AnthropicTokenCounter
191
+
192
+ original_count_tokens = (
193
+ AnthropicTokenCounter.count_tokens_from_messages
194
+ )
195
+
196
+ @functools.wraps(original_count_tokens)
197
+ def patched_count_tokens(self, messages):
198
+ # Process messages to remove trailing whitespace
199
+ processed_messages = strip_trailing_whitespace_from_messages(
200
+ messages
201
+ )
202
+
203
+ # Use the processed messages with the original method
204
+ return self.client.messages.count_tokens(
205
+ messages=[
206
+ MessageParam(
207
+ content=str(msg["content"]),
208
+ role="user" if msg["role"] == "user" else "assistant",
209
+ )
210
+ for msg in processed_messages
211
+ ],
212
+ model=self.model,
213
+ ).input_tokens
214
+
215
+ # Apply the monkey patch
216
+ AnthropicTokenCounter.count_tokens_from_messages = patched_count_tokens
@@ -13,17 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
16
+ from typing import Any, Dict, Optional, Union
17
17
 
18
- from openai import AsyncStream
19
- from pydantic import BaseModel
20
-
21
- from camel.configs import BEDROCK_API_PARAMS, BedrockConfig
22
- from camel.messages import OpenAIMessage
18
+ from camel.configs import BedrockConfig
23
19
  from camel.models.openai_compatible_model import OpenAICompatibleModel
24
20
  from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
21
  ModelType,
28
22
  )
29
23
  from camel.utils import BaseTokenCounter, api_keys_required
@@ -93,28 +87,3 @@ class AWSBedrockModel(OpenAICompatibleModel):
93
87
  max_retries=max_retries,
94
88
  **kwargs,
95
89
  )
96
-
97
- async def _arun(
98
- self,
99
- messages: List[OpenAIMessage],
100
- response_format: Optional[Type[BaseModel]] = None,
101
- tools: Optional[List[Dict[str, Any]]] = None,
102
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
103
- raise NotImplementedError(
104
- "AWS Bedrock does not support async inference."
105
- )
106
-
107
- def check_model_config(self):
108
- r"""Check whether the input model configuration contains unexpected
109
- arguments.
110
-
111
- Raises:
112
- ValueError: If the model configuration dictionary contains any
113
- unexpected argument for this model class.
114
- """
115
- for param in self.model_config_dict:
116
- if param not in BEDROCK_API_PARAMS:
117
- raise ValueError(
118
- f"Invalid parameter '{param}' in model_config_dict. "
119
- f"Valid parameters are: {BEDROCK_API_PARAMS}"
120
- )
@@ -11,13 +11,19 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import copy
14
15
  import os
16
+ import warnings
15
17
  from typing import Any, Callable, Dict, List, Optional, Type, Union
16
18
 
17
19
  from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
20
+ from openai.lib.streaming.chat import (
21
+ AsyncChatCompletionStreamManager,
22
+ ChatCompletionStreamManager,
23
+ )
18
24
  from pydantic import BaseModel
19
25
 
20
- from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
26
+ from camel.configs import ChatGPTConfig
21
27
  from camel.messages import OpenAIMessage
22
28
  from camel.models.base_model import BaseModelBackend
23
29
  from camel.types import (
@@ -41,6 +47,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
41
47
  from langfuse.decorators import observe
42
48
  except ImportError:
43
49
  from camel.utils import observe
50
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
51
+ try:
52
+ from traceroot import trace as observe # type: ignore[import]
53
+ except ImportError:
54
+ from camel.utils import observe
44
55
  else:
45
56
  from camel.utils import observe
46
57
 
@@ -50,7 +61,8 @@ class AzureOpenAIModel(BaseModelBackend):
50
61
 
51
62
  Args:
52
63
  model_type (Union[ModelType, str]): Model for which a backend is
53
- created, one of GPT_* series.
64
+ created, Should be the deployment name you chose when you deployed
65
+ an azure model.
54
66
  model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
55
67
  that will be fed into:obj:`openai.ChatCompletion.create()`. If
56
68
  :obj:`None`, :obj:`ChatGPTConfig().as_dict()` will be used.
@@ -61,8 +73,6 @@ class AzureOpenAIModel(BaseModelBackend):
61
73
  (default: :obj:`None`)
62
74
  api_version (Optional[str], optional): The api version for the model.
63
75
  (default: :obj:`None`)
64
- azure_deployment_name (Optional[str], optional): The deployment name
65
- you chose when you deployed an azure model. (default: :obj:`None`)
66
76
  azure_ad_token (Optional[str], optional): Your Azure Active Directory
67
77
  token, https://www.microsoft.com/en-us/security/business/
68
78
  identity-access/microsoft-entra-id. (default: :obj:`None`)
@@ -78,8 +88,23 @@ class AzureOpenAIModel(BaseModelBackend):
78
88
  (default: :obj:`None`)
79
89
  max_retries (int, optional): Maximum number of retries for API calls.
80
90
  (default: :obj:`3`)
91
+ client (Optional[Any], optional): A custom synchronous AzureOpenAI
92
+ client instance. If provided, this client will be used instead of
93
+ creating a new one. Useful for RL frameworks like AReaL or rLLM
94
+ that provide Azure OpenAI-compatible clients. The client should
95
+ implement the AzureOpenAI client interface with
96
+ `.chat.completions.create()` and `.beta.chat.completions.parse()`
97
+ methods. (default: :obj:`None`)
98
+ async_client (Optional[Any], optional): A custom asynchronous
99
+ AzureOpenAI client instance. If provided, this client will be
100
+ used instead of creating a new one. The client should implement
101
+ the AsyncAzureOpenAI client interface. (default: :obj:`None`)
102
+ azure_deployment_name (Optional[str], optional): **Deprecated**.
103
+ Use `model_type` parameter instead. This parameter is kept for
104
+ backward compatibility and will be removed in a future version.
105
+ (default: :obj:`None`)
81
106
  **kwargs (Any): Additional arguments to pass to the client
82
- initialization.
107
+ initialization. Ignored if custom clients are provided.
83
108
 
84
109
  References:
85
110
  https://learn.microsoft.com/en-us/azure/ai-services/openai/
@@ -94,12 +119,35 @@ class AzureOpenAIModel(BaseModelBackend):
94
119
  timeout: Optional[float] = None,
95
120
  token_counter: Optional[BaseTokenCounter] = None,
96
121
  api_version: Optional[str] = None,
97
- azure_deployment_name: Optional[str] = None,
98
122
  azure_ad_token_provider: Optional["AzureADTokenProvider"] = None,
99
123
  azure_ad_token: Optional[str] = None,
100
124
  max_retries: int = 3,
125
+ client: Optional[Any] = None,
126
+ async_client: Optional[Any] = None,
127
+ azure_deployment_name: Optional[str] = None,
101
128
  **kwargs: Any,
102
129
  ) -> None:
130
+ # Handle deprecated azure_deployment_name parameter
131
+ if azure_deployment_name is not None:
132
+ warnings.warn(
133
+ "The 'azure_deployment_name' parameter is deprecated. "
134
+ "Please use 'model_type' parameter instead. "
135
+ "The 'azure_deployment_name' parameter is being ignored.",
136
+ DeprecationWarning,
137
+ stacklevel=2,
138
+ )
139
+
140
+ # Handle deprecated AZURE_DEPLOYMENT_NAME environment variable
141
+ if os.environ.get("AZURE_DEPLOYMENT_NAME") is not None:
142
+ warnings.warn(
143
+ "The 'AZURE_DEPLOYMENT_NAME' environment variable is "
144
+ "deprecated. Please use the 'model_type' parameter "
145
+ "instead. The 'AZURE_DEPLOYMENT_NAME' environment "
146
+ "variable is being ignored.",
147
+ DeprecationWarning,
148
+ stacklevel=2,
149
+ )
150
+
103
151
  if model_config_dict is None:
104
152
  model_config_dict = ChatGPTConfig().as_dict()
105
153
  api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
@@ -110,9 +158,6 @@ class AzureOpenAIModel(BaseModelBackend):
110
158
  )
111
159
 
112
160
  self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
113
- self._azure_deployment_name = azure_deployment_name or os.environ.get(
114
- "AZURE_DEPLOYMENT_NAME"
115
- )
116
161
  self._azure_ad_token = azure_ad_token or os.environ.get(
117
162
  "AZURE_AD_TOKEN"
118
163
  )
@@ -122,62 +167,73 @@ class AzureOpenAIModel(BaseModelBackend):
122
167
  "Must provide either the `api_version` argument "
123
168
  "or `AZURE_API_VERSION` environment variable."
124
169
  )
125
- if self._azure_deployment_name is None:
126
- raise ValueError(
127
- "Must provide either the `azure_deployment_name` argument "
128
- "or `AZURE_DEPLOYMENT_NAME` environment variable."
129
- )
130
170
 
131
- if is_langfuse_available():
132
- from langfuse.openai import AsyncAzureOpenAI as LangfuseAsyncOpenAI
133
- from langfuse.openai import AzureOpenAI as LangfuseOpenAI
134
-
135
- self._client = LangfuseOpenAI(
136
- azure_endpoint=str(self._url),
137
- azure_deployment=self._azure_deployment_name,
138
- api_version=self.api_version,
139
- api_key=self._api_key,
140
- azure_ad_token=self._azure_ad_token,
141
- azure_ad_token_provider=self.azure_ad_token_provider,
142
- timeout=self._timeout,
143
- max_retries=max_retries,
144
- **kwargs,
145
- )
146
- self._async_client = LangfuseAsyncOpenAI(
147
- azure_endpoint=str(self._url),
148
- azure_deployment=self._azure_deployment_name,
149
- api_version=self.api_version,
150
- api_key=self._api_key,
151
- azure_ad_token=self._azure_ad_token,
152
- azure_ad_token_provider=self.azure_ad_token_provider,
153
- timeout=self._timeout,
154
- max_retries=max_retries,
155
- **kwargs,
156
- )
171
+ # Use custom clients if provided, otherwise create new ones
172
+ if client is not None:
173
+ # Use the provided custom sync client
174
+ self._client = client
157
175
  else:
158
- self._client = AzureOpenAI(
159
- azure_endpoint=str(self._url),
160
- azure_deployment=self._azure_deployment_name,
161
- api_version=self.api_version,
162
- api_key=self._api_key,
163
- azure_ad_token=self._azure_ad_token,
164
- azure_ad_token_provider=self.azure_ad_token_provider,
165
- timeout=self._timeout,
166
- max_retries=max_retries,
167
- **kwargs,
168
- )
176
+ # Create default sync client
177
+ if is_langfuse_available():
178
+ from langfuse.openai import AzureOpenAI as LangfuseOpenAI
179
+
180
+ self._client = LangfuseOpenAI(
181
+ azure_endpoint=str(self._url),
182
+ azure_deployment=str(self.model_type),
183
+ api_version=self.api_version,
184
+ api_key=self._api_key,
185
+ azure_ad_token=self._azure_ad_token,
186
+ azure_ad_token_provider=self.azure_ad_token_provider,
187
+ timeout=self._timeout,
188
+ max_retries=max_retries,
189
+ **kwargs,
190
+ )
191
+ else:
192
+ self._client = AzureOpenAI(
193
+ azure_endpoint=str(self._url),
194
+ azure_deployment=str(self.model_type),
195
+ api_version=self.api_version,
196
+ api_key=self._api_key,
197
+ azure_ad_token=self._azure_ad_token,
198
+ azure_ad_token_provider=self.azure_ad_token_provider,
199
+ timeout=self._timeout,
200
+ max_retries=max_retries,
201
+ **kwargs,
202
+ )
169
203
 
170
- self._async_client = AsyncAzureOpenAI(
171
- azure_endpoint=str(self._url),
172
- azure_deployment=self._azure_deployment_name,
173
- api_version=self.api_version,
174
- api_key=self._api_key,
175
- azure_ad_token=self._azure_ad_token,
176
- azure_ad_token_provider=self.azure_ad_token_provider,
177
- timeout=self._timeout,
178
- max_retries=max_retries,
179
- **kwargs,
180
- )
204
+ if async_client is not None:
205
+ # Use the provided custom async client
206
+ self._async_client = async_client
207
+ else:
208
+ # Create default async client
209
+ if is_langfuse_available():
210
+ from langfuse.openai import (
211
+ AsyncAzureOpenAI as LangfuseAsyncOpenAI,
212
+ )
213
+
214
+ self._async_client = LangfuseAsyncOpenAI(
215
+ azure_endpoint=str(self._url),
216
+ azure_deployment=str(self.model_type),
217
+ api_version=self.api_version,
218
+ api_key=self._api_key,
219
+ azure_ad_token=self._azure_ad_token,
220
+ azure_ad_token_provider=self.azure_ad_token_provider,
221
+ timeout=self._timeout,
222
+ max_retries=max_retries,
223
+ **kwargs,
224
+ )
225
+ else:
226
+ self._async_client = AsyncAzureOpenAI(
227
+ azure_endpoint=str(self._url),
228
+ azure_deployment=str(self.model_type),
229
+ api_version=self.api_version,
230
+ api_key=self._api_key,
231
+ azure_ad_token=self._azure_ad_token,
232
+ azure_ad_token_provider=self.azure_ad_token_provider,
233
+ timeout=self._timeout,
234
+ max_retries=max_retries,
235
+ **kwargs,
236
+ )
181
237
 
182
238
  @property
183
239
  def token_counter(self) -> BaseTokenCounter:
@@ -197,7 +253,11 @@ class AzureOpenAIModel(BaseModelBackend):
197
253
  messages: List[OpenAIMessage],
198
254
  response_format: Optional[Type[BaseModel]] = None,
199
255
  tools: Optional[List[Dict[str, Any]]] = None,
200
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
256
+ ) -> Union[
257
+ ChatCompletion,
258
+ Stream[ChatCompletionChunk],
259
+ ChatCompletionStreamManager[BaseModel],
260
+ ]:
201
261
  r"""Runs inference of Azure OpenAI chat completion.
202
262
 
203
263
  Args:
@@ -212,6 +272,8 @@ class AzureOpenAIModel(BaseModelBackend):
212
272
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
213
273
  `ChatCompletion` in the non-stream mode, or
214
274
  `Stream[ChatCompletionChunk]` in the stream mode.
275
+ `ChatCompletionStreamManager[BaseModel]` for
276
+ structured output streaming.
215
277
  """
216
278
 
217
279
  # Update Langfuse trace with current agent session and metadata
@@ -229,10 +291,14 @@ class AzureOpenAIModel(BaseModelBackend):
229
291
  response_format = response_format or self.model_config_dict.get(
230
292
  "response_format", None
231
293
  )
294
+ is_streaming = self.model_config_dict.get("stream", False)
232
295
  if response_format:
233
- result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
234
- self._request_parse(messages, response_format, tools)
235
- )
296
+ if is_streaming:
297
+ return self._request_stream_parse(
298
+ messages, response_format, tools
299
+ )
300
+ else:
301
+ return self._request_parse(messages, response_format, tools)
236
302
  else:
237
303
  result = self._request_chat_completion(messages, tools)
238
304
 
@@ -244,7 +310,11 @@ class AzureOpenAIModel(BaseModelBackend):
244
310
  messages: List[OpenAIMessage],
245
311
  response_format: Optional[Type[BaseModel]] = None,
246
312
  tools: Optional[List[Dict[str, Any]]] = None,
247
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
313
+ ) -> Union[
314
+ ChatCompletion,
315
+ AsyncStream[ChatCompletionChunk],
316
+ AsyncChatCompletionStreamManager[BaseModel],
317
+ ]:
248
318
  r"""Runs inference of Azure OpenAI chat completion.
249
319
 
250
320
  Args:
@@ -256,9 +326,12 @@ class AzureOpenAIModel(BaseModelBackend):
256
326
  use for the request.
257
327
 
258
328
  Returns:
259
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
329
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
330
+ AsyncChatCompletionStreamManager[BaseModel]]:
260
331
  `ChatCompletion` in the non-stream mode, or
261
332
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
333
+ `AsyncChatCompletionStreamManager[BaseModel]` for
334
+ structured output streaming.
262
335
  """
263
336
 
264
337
  # Update Langfuse trace with current agent session and metadata
@@ -276,10 +349,16 @@ class AzureOpenAIModel(BaseModelBackend):
276
349
  response_format = response_format or self.model_config_dict.get(
277
350
  "response_format", None
278
351
  )
352
+ is_streaming = self.model_config_dict.get("stream", False)
279
353
  if response_format:
280
- result: Union[
281
- ChatCompletion, AsyncStream[ChatCompletionChunk]
282
- ] = await self._arequest_parse(messages, response_format, tools)
354
+ if is_streaming:
355
+ return await self._arequest_stream_parse(
356
+ messages, response_format, tools
357
+ )
358
+ else:
359
+ return await self._arequest_parse(
360
+ messages, response_format, tools
361
+ )
283
362
  else:
284
363
  result = await self._arequest_chat_completion(messages, tools)
285
364
 
@@ -297,7 +376,7 @@ class AzureOpenAIModel(BaseModelBackend):
297
376
 
298
377
  return self._client.chat.completions.create(
299
378
  messages=messages,
300
- model=self._azure_deployment_name, # type:ignore[arg-type]
379
+ model=str(self.model_type),
301
380
  **request_config,
302
381
  )
303
382
 
@@ -313,7 +392,7 @@ class AzureOpenAIModel(BaseModelBackend):
313
392
 
314
393
  return await self._async_client.chat.completions.create(
315
394
  messages=messages,
316
- model=self._azure_deployment_name, # type:ignore[arg-type]
395
+ model=str(self.model_type),
317
396
  **request_config,
318
397
  )
319
398
 
@@ -323,8 +402,6 @@ class AzureOpenAIModel(BaseModelBackend):
323
402
  response_format: Type[BaseModel],
324
403
  tools: Optional[List[Dict[str, Any]]] = None,
325
404
  ) -> ChatCompletion:
326
- import copy
327
-
328
405
  request_config = copy.deepcopy(self.model_config_dict)
329
406
 
330
407
  request_config["response_format"] = response_format
@@ -336,7 +413,7 @@ class AzureOpenAIModel(BaseModelBackend):
336
413
 
337
414
  return self._client.beta.chat.completions.parse(
338
415
  messages=messages,
339
- model=self._azure_deployment_name, # type:ignore[arg-type]
416
+ model=str(self.model_type),
340
417
  **request_config,
341
418
  )
342
419
 
@@ -346,8 +423,6 @@ class AzureOpenAIModel(BaseModelBackend):
346
423
  response_format: Type[BaseModel],
347
424
  tools: Optional[List[Dict[str, Any]]] = None,
348
425
  ) -> ChatCompletion:
349
- import copy
350
-
351
426
  request_config = copy.deepcopy(self.model_config_dict)
352
427
 
353
428
  request_config["response_format"] = response_format
@@ -359,24 +434,63 @@ class AzureOpenAIModel(BaseModelBackend):
359
434
 
360
435
  return await self._async_client.beta.chat.completions.parse(
361
436
  messages=messages,
362
- model=self._azure_deployment_name, # type:ignore[arg-type]
437
+ model=str(self.model_type),
363
438
  **request_config,
364
439
  )
365
440
 
366
- def check_model_config(self):
367
- r"""Check whether the model configuration contains any
368
- unexpected arguments to Azure OpenAI API.
441
+ def _request_stream_parse(
442
+ self,
443
+ messages: List[OpenAIMessage],
444
+ response_format: Type[BaseModel],
445
+ tools: Optional[List[Dict[str, Any]]] = None,
446
+ ) -> ChatCompletionStreamManager[BaseModel]:
447
+ r"""Request streaming structured output parsing.
369
448
 
370
- Raises:
371
- ValueError: If the model configuration dictionary contains any
372
- unexpected arguments to Azure OpenAI API.
449
+ Note: This uses OpenAI's beta streaming API for structured outputs.
373
450
  """
374
- for param in self.model_config_dict:
375
- if param not in OPENAI_API_PARAMS:
376
- raise ValueError(
377
- f"Unexpected argument `{param}` is "
378
- "input into Azure OpenAI model backend."
379
- )
451
+
452
+ request_config = copy.deepcopy(self.model_config_dict)
453
+
454
+ # Remove stream from config as it's handled by the stream method
455
+ request_config.pop("stream", None)
456
+
457
+ if tools is not None:
458
+ request_config["tools"] = tools
459
+
460
+ # Use the beta streaming API for structured outputs
461
+ return self._client.beta.chat.completions.stream(
462
+ messages=messages,
463
+ model=str(self.model_type),
464
+ response_format=response_format,
465
+ **request_config,
466
+ )
467
+
468
+ async def _arequest_stream_parse(
469
+ self,
470
+ messages: List[OpenAIMessage],
471
+ response_format: Type[BaseModel],
472
+ tools: Optional[List[Dict[str, Any]]] = None,
473
+ ) -> AsyncChatCompletionStreamManager[BaseModel]:
474
+ r"""Request async streaming structured output parsing.
475
+
476
+ Note: This uses OpenAI's beta streaming API for structured outputs.
477
+ """
478
+
479
+ request_config = copy.deepcopy(self.model_config_dict)
480
+
481
+ # Remove stream from config as it's handled by the stream method
482
+ request_config.pop("stream", None)
483
+
484
+ if tools is not None:
485
+ request_config["tools"] = tools
486
+
487
+ # Use the beta streaming API for structured outputs
488
+ return self._async_client.beta.chat.completions.stream(
489
+ messages=messages,
490
+ model=str(self.model_type),
491
+ response_format=response_format,
492
+ **request_config,
493
+ )
380
494
 
381
495
  @property
382
496
  def stream(self) -> bool: