camel-ai 0.2.71a3__py3-none-any.whl → 0.2.71a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (39) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1482 -134
  3. camel/agents/repo_agent.py +2 -1
  4. camel/benchmarks/browsecomp.py +6 -6
  5. camel/interpreters/docker_interpreter.py +3 -2
  6. camel/loaders/base_loader.py +85 -0
  7. camel/logger.py +1 -1
  8. camel/messages/base.py +12 -1
  9. camel/models/azure_openai_model.py +96 -7
  10. camel/models/base_model.py +68 -10
  11. camel/models/deepseek_model.py +5 -0
  12. camel/models/gemini_model.py +5 -0
  13. camel/models/litellm_model.py +48 -16
  14. camel/models/model_manager.py +24 -6
  15. camel/models/openai_compatible_model.py +109 -5
  16. camel/models/openai_model.py +117 -8
  17. camel/societies/workforce/prompts.py +68 -5
  18. camel/societies/workforce/role_playing_worker.py +1 -0
  19. camel/societies/workforce/single_agent_worker.py +1 -0
  20. camel/societies/workforce/utils.py +67 -2
  21. camel/societies/workforce/workforce.py +412 -67
  22. camel/societies/workforce/workforce_logger.py +0 -8
  23. camel/tasks/task.py +2 -0
  24. camel/toolkits/__init__.py +7 -2
  25. camel/toolkits/craw4ai_toolkit.py +2 -2
  26. camel/toolkits/file_write_toolkit.py +526 -121
  27. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +9 -3
  28. camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +31 -8
  29. camel/toolkits/message_agent_toolkit.py +608 -0
  30. camel/toolkits/note_taking_toolkit.py +90 -0
  31. camel/toolkits/openai_image_toolkit.py +292 -0
  32. camel/toolkits/slack_toolkit.py +4 -4
  33. camel/toolkits/terminal_toolkit.py +223 -73
  34. camel/utils/mcp_client.py +37 -1
  35. {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a5.dist-info}/METADATA +48 -7
  36. {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a5.dist-info}/RECORD +38 -35
  37. camel/toolkits/dalle_toolkit.py +0 -175
  38. {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a5.dist-info}/WHEEL +0 -0
  39. {camel_ai-0.2.71a3.dist-info → camel_ai-0.2.71a5.dist-info}/licenses/LICENSE +0 -0
@@ -21,6 +21,7 @@ if TYPE_CHECKING:
21
21
  from pydantic import BaseModel
22
22
 
23
23
  from camel.agents import ChatAgent
24
+ from camel.agents.chat_agent import StreamingChatAgentResponse
24
25
  from camel.logger import get_logger
25
26
  from camel.messages import BaseMessage
26
27
  from camel.models import BaseModelBackend, ModelFactory
@@ -442,7 +443,7 @@ class RepoAgent(ChatAgent):
442
443
 
443
444
  def step(
444
445
  self, input_message: Union[BaseMessage, str], *args, **kwargs
445
- ) -> ChatAgentResponse:
446
+ ) -> Union[ChatAgentResponse, StreamingChatAgentResponse]:
446
447
  r"""Overrides `ChatAgent.step()` to first retrieve relevant context
447
448
  from the vector store before passing the input to the language model.
448
449
  """
@@ -619,20 +619,20 @@ class BrowseCompBenchmark(BaseBenchmark):
619
619
  assistant_response, user_response = pipeline.step(
620
620
  input_msg
621
621
  )
622
- if assistant_response.terminated: # type: ignore[attr-defined]
622
+ if assistant_response.terminated: # type: ignore[union-attr]
623
623
  break
624
- if user_response.terminated: # type: ignore[attr-defined]
624
+ if user_response.terminated: # type: ignore[union-attr]
625
625
  break
626
- if "CAMEL_TASK_DONE" in user_response.msg.content: # type: ignore[attr-defined]
626
+ if "CAMEL_TASK_DONE" in user_response.msg.content: # type: ignore[union-attr]
627
627
  break
628
628
 
629
629
  chat_history.append(
630
- f"AI User: {user_response.msg.content}" # type: ignore[attr-defined]
630
+ f"AI User: {user_response.msg.content}" # type: ignore[union-attr]
631
631
  )
632
632
  chat_history.append(
633
- f"AI Assistant: {assistant_response.msg.content}" # type: ignore[attr-defined]
633
+ f"AI Assistant: {assistant_response.msg.content}" # type: ignore[union-attr]
634
634
  )
635
- input_msg = assistant_response.msg # type: ignore[attr-defined]
635
+ input_msg = assistant_response.msg # type: ignore[union-attr]
636
636
 
637
637
  chat_history_str = "\n".join(chat_history)
638
638
  if roleplaying_summarizer:
@@ -146,8 +146,9 @@ class DockerInterpreter(BaseInterpreter):
146
146
  tar_stream = io.BytesIO()
147
147
  with tarfile.open(fileobj=tar_stream, mode='w') as tar:
148
148
  tarinfo = tarfile.TarInfo(name=filename)
149
- tarinfo.size = len(content)
150
- tar.addfile(tarinfo, io.BytesIO(content.encode('utf-8')))
149
+ encoded_content = content.encode('utf-8')
150
+ tarinfo.size = len(encoded_content)
151
+ tar.addfile(tarinfo, io.BytesIO(encoded_content))
151
152
  tar_stream.seek(0)
152
153
 
153
154
  # copy the tar into the container
@@ -0,0 +1,85 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from abc import ABC, abstractmethod
15
+ from pathlib import Path
16
+ from typing import Any, Dict, List, Union
17
+
18
+
19
+ class BaseLoader(ABC):
20
+ r"""Abstract base class for all data loaders in CAMEL."""
21
+
22
+ @abstractmethod
23
+ def _load_single(self, source: Union[str, Path]) -> Dict[str, Any]:
24
+ r"""Load data from a single source.
25
+
26
+ Args:
27
+ source (Union[str, Path]): The data source to load from.
28
+
29
+ Returns:
30
+ Dict[str, Any]: A dictionary containing the loaded data. It is
31
+ recommended that the dictionary includes a "content" key with
32
+ the primary data and optional metadata keys.
33
+ """
34
+ pass
35
+
36
+ def load(
37
+ self,
38
+ source: Union[str, Path, List[Union[str, Path]]],
39
+ ) -> Dict[str, List[Dict[str, Any]]]:
40
+ r"""Load data from one or multiple sources.
41
+
42
+ Args:
43
+ source (Union[str, Path, List[Union[str, Path]]]): The data source
44
+ (s) to load from. Can be:
45
+ - A single path/URL (str or Path)
46
+ - A list of paths/URLs
47
+
48
+ Returns:
49
+ Dict[str, List[Dict[str, Any]]]: A dictionary with a single key
50
+ "contents" containing a list of loaded data. If a single source
51
+ is provided, the list will contain a single item.
52
+
53
+ Raises:
54
+ ValueError: If no sources are provided
55
+ Exception: If loading fails for any source
56
+ """
57
+ if not source:
58
+ raise ValueError("At least one source must be provided")
59
+
60
+ # Convert single source to list for uniform processing
61
+ sources = [source] if isinstance(source, (str, Path)) else list(source)
62
+
63
+ # Process all sources
64
+ results = []
65
+ for i, src in enumerate(sources, 1):
66
+ try:
67
+ content = self._load_single(src)
68
+ results.append(content)
69
+ except Exception as e:
70
+ raise RuntimeError(
71
+ f"Error loading source {i}/{len(sources)}: {src}"
72
+ ) from e
73
+
74
+ return {"contents": results}
75
+
76
+ @property
77
+ @abstractmethod
78
+ def supported_formats(self) -> set[str]:
79
+ r"""Get the set of supported file formats or data sources.
80
+
81
+ Returns:
82
+ set[str]: A set of strings representing the supported formats/
83
+ sources.
84
+ """
85
+ pass
camel/logger.py CHANGED
@@ -155,7 +155,7 @@ def set_log_level(level):
155
155
  _logger.debug(f"Logging level set to: {logging.getLevelName(level)}")
156
156
 
157
157
 
158
- def get_logger(name):
158
+ def get_logger(name: str):
159
159
  r"""Get a logger with the specified name, prefixed with 'camel.'.
160
160
 
161
161
 
camel/messages/base.py CHANGED
@@ -533,7 +533,18 @@ class BaseMessage:
533
533
  OpenAIAssistantMessage: The converted :obj:`OpenAIAssistantMessage`
534
534
  object.
535
535
  """
536
- return {"role": "assistant", "content": self.content}
536
+ message_dict: Dict[str, Any] = {
537
+ "role": "assistant",
538
+ "content": self.content,
539
+ }
540
+
541
+ # Check if meta_dict contains tool_calls
542
+ if self.meta_dict and "tool_calls" in self.meta_dict:
543
+ tool_calls = self.meta_dict["tool_calls"]
544
+ if tool_calls:
545
+ message_dict["tool_calls"] = tool_calls
546
+
547
+ return message_dict # type: ignore[return-value]
537
548
 
538
549
  def to_dict(self) -> Dict:
539
550
  r"""Converts the message to a dictionary.
@@ -11,10 +11,15 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import copy
14
15
  import os
15
16
  from typing import Any, Callable, Dict, List, Optional, Type, Union
16
17
 
17
18
  from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
19
+ from openai.lib.streaming.chat import (
20
+ AsyncChatCompletionStreamManager,
21
+ ChatCompletionStreamManager,
22
+ )
18
23
  from pydantic import BaseModel
19
24
 
20
25
  from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
@@ -41,6 +46,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
41
46
  from langfuse.decorators import observe
42
47
  except ImportError:
43
48
  from camel.utils import observe
49
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
50
+ try:
51
+ from traceroot import trace as observe # type: ignore[import]
52
+ except ImportError:
53
+ from camel.utils import observe
44
54
  else:
45
55
  from camel.utils import observe
46
56
 
@@ -197,7 +207,11 @@ class AzureOpenAIModel(BaseModelBackend):
197
207
  messages: List[OpenAIMessage],
198
208
  response_format: Optional[Type[BaseModel]] = None,
199
209
  tools: Optional[List[Dict[str, Any]]] = None,
200
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
210
+ ) -> Union[
211
+ ChatCompletion,
212
+ Stream[ChatCompletionChunk],
213
+ ChatCompletionStreamManager[BaseModel],
214
+ ]:
201
215
  r"""Runs inference of Azure OpenAI chat completion.
202
216
 
203
217
  Args:
@@ -212,6 +226,8 @@ class AzureOpenAIModel(BaseModelBackend):
212
226
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
213
227
  `ChatCompletion` in the non-stream mode, or
214
228
  `Stream[ChatCompletionChunk]` in the stream mode.
229
+ `ChatCompletionStreamManager[BaseModel]` for
230
+ structured output streaming.
215
231
  """
216
232
 
217
233
  # Update Langfuse trace with current agent session and metadata
@@ -229,10 +245,17 @@ class AzureOpenAIModel(BaseModelBackend):
229
245
  response_format = response_format or self.model_config_dict.get(
230
246
  "response_format", None
231
247
  )
248
+ is_streaming = self.model_config_dict.get("stream", False)
232
249
  if response_format:
233
250
  result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
234
251
  self._request_parse(messages, response_format, tools)
235
252
  )
253
+ if is_streaming:
254
+ return self._request_stream_parse(
255
+ messages, response_format, tools
256
+ )
257
+ else:
258
+ return self._request_parse(messages, response_format, tools)
236
259
  else:
237
260
  result = self._request_chat_completion(messages, tools)
238
261
 
@@ -244,7 +267,11 @@ class AzureOpenAIModel(BaseModelBackend):
244
267
  messages: List[OpenAIMessage],
245
268
  response_format: Optional[Type[BaseModel]] = None,
246
269
  tools: Optional[List[Dict[str, Any]]] = None,
247
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
270
+ ) -> Union[
271
+ ChatCompletion,
272
+ AsyncStream[ChatCompletionChunk],
273
+ AsyncChatCompletionStreamManager[BaseModel],
274
+ ]:
248
275
  r"""Runs inference of Azure OpenAI chat completion.
249
276
 
250
277
  Args:
@@ -256,9 +283,12 @@ class AzureOpenAIModel(BaseModelBackend):
256
283
  use for the request.
257
284
 
258
285
  Returns:
259
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
286
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
287
+ AsyncChatCompletionStreamManager[BaseModel]]:
260
288
  `ChatCompletion` in the non-stream mode, or
261
289
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
290
+ `AsyncChatCompletionStreamManager[BaseModel]` for
291
+ structured output streaming.
262
292
  """
263
293
 
264
294
  # Update Langfuse trace with current agent session and metadata
@@ -276,10 +306,19 @@ class AzureOpenAIModel(BaseModelBackend):
276
306
  response_format = response_format or self.model_config_dict.get(
277
307
  "response_format", None
278
308
  )
309
+ is_streaming = self.model_config_dict.get("stream", False)
279
310
  if response_format:
280
311
  result: Union[
281
312
  ChatCompletion, AsyncStream[ChatCompletionChunk]
282
313
  ] = await self._arequest_parse(messages, response_format, tools)
314
+ if is_streaming:
315
+ return await self._arequest_stream_parse(
316
+ messages, response_format, tools
317
+ )
318
+ else:
319
+ return await self._arequest_parse(
320
+ messages, response_format, tools
321
+ )
283
322
  else:
284
323
  result = await self._arequest_chat_completion(messages, tools)
285
324
 
@@ -323,8 +362,6 @@ class AzureOpenAIModel(BaseModelBackend):
323
362
  response_format: Type[BaseModel],
324
363
  tools: Optional[List[Dict[str, Any]]] = None,
325
364
  ) -> ChatCompletion:
326
- import copy
327
-
328
365
  request_config = copy.deepcopy(self.model_config_dict)
329
366
 
330
367
  request_config["response_format"] = response_format
@@ -346,8 +383,6 @@ class AzureOpenAIModel(BaseModelBackend):
346
383
  response_format: Type[BaseModel],
347
384
  tools: Optional[List[Dict[str, Any]]] = None,
348
385
  ) -> ChatCompletion:
349
- import copy
350
-
351
386
  request_config = copy.deepcopy(self.model_config_dict)
352
387
 
353
388
  request_config["response_format"] = response_format
@@ -363,6 +398,60 @@ class AzureOpenAIModel(BaseModelBackend):
363
398
  **request_config,
364
399
  )
365
400
 
401
+ def _request_stream_parse(
402
+ self,
403
+ messages: List[OpenAIMessage],
404
+ response_format: Type[BaseModel],
405
+ tools: Optional[List[Dict[str, Any]]] = None,
406
+ ) -> ChatCompletionStreamManager[BaseModel]:
407
+ r"""Request streaming structured output parsing.
408
+
409
+ Note: This uses OpenAI's beta streaming API for structured outputs.
410
+ """
411
+
412
+ request_config = copy.deepcopy(self.model_config_dict)
413
+
414
+ # Remove stream from config as it's handled by the stream method
415
+ request_config.pop("stream", None)
416
+
417
+ if tools is not None:
418
+ request_config["tools"] = tools
419
+
420
+ # Use the beta streaming API for structured outputs
421
+ return self._client.beta.chat.completions.stream(
422
+ messages=messages,
423
+ model=self.model_type,
424
+ response_format=response_format,
425
+ **request_config,
426
+ )
427
+
428
+ async def _arequest_stream_parse(
429
+ self,
430
+ messages: List[OpenAIMessage],
431
+ response_format: Type[BaseModel],
432
+ tools: Optional[List[Dict[str, Any]]] = None,
433
+ ) -> AsyncChatCompletionStreamManager[BaseModel]:
434
+ r"""Request async streaming structured output parsing.
435
+
436
+ Note: This uses OpenAI's beta streaming API for structured outputs.
437
+ """
438
+
439
+ request_config = copy.deepcopy(self.model_config_dict)
440
+
441
+ # Remove stream from config as it's handled by the stream method
442
+ request_config.pop("stream", None)
443
+
444
+ if tools is not None:
445
+ request_config["tools"] = tools
446
+
447
+ # Use the beta streaming API for structured outputs
448
+ return self._async_client.beta.chat.completions.stream(
449
+ messages=messages,
450
+ model=self.model_type,
451
+ response_format=response_format,
452
+ **request_config,
453
+ )
454
+
366
455
  def check_model_config(self):
367
456
  r"""Check whether the model configuration contains any
368
457
  unexpected arguments to Azure OpenAI API.
@@ -18,6 +18,10 @@ from abc import ABC, abstractmethod
18
18
  from typing import Any, Dict, List, Optional, Type, Union
19
19
 
20
20
  from openai import AsyncStream, Stream
21
+ from openai.lib.streaming.chat import (
22
+ AsyncChatCompletionStreamManager,
23
+ ChatCompletionStreamManager,
24
+ )
21
25
  from pydantic import BaseModel
22
26
 
23
27
  from camel.messages import OpenAIMessage
@@ -307,7 +311,28 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
307
311
  messages: List[OpenAIMessage],
308
312
  response_format: Optional[Type[BaseModel]] = None,
309
313
  tools: Optional[List[Dict[str, Any]]] = None,
310
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
314
+ ) -> Union[
315
+ ChatCompletion,
316
+ Stream[ChatCompletionChunk],
317
+ ChatCompletionStreamManager[BaseModel],
318
+ ]:
319
+ r"""Runs the query to the backend model in a non-stream mode.
320
+
321
+ Args:
322
+ messages (List[OpenAIMessage]): Message list with the chat history
323
+ in OpenAI API format.
324
+ response_format (Optional[Type[BaseModel]]): The format of the
325
+ response.
326
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
327
+ use for the request.
328
+
329
+ Returns:
330
+ Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
331
+ `ChatCompletion` in the non-stream mode, or
332
+ `Stream[ChatCompletionChunk]` in the stream mode,
333
+ or `ChatCompletionStreamManager[BaseModel]` in the structured
334
+ stream mode.
335
+ """
311
336
  pass
312
337
 
313
338
  @abstractmethod
@@ -316,7 +341,28 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
316
341
  messages: List[OpenAIMessage],
317
342
  response_format: Optional[Type[BaseModel]] = None,
318
343
  tools: Optional[List[Dict[str, Any]]] = None,
319
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
344
+ ) -> Union[
345
+ ChatCompletion,
346
+ AsyncStream[ChatCompletionChunk],
347
+ AsyncChatCompletionStreamManager[BaseModel],
348
+ ]:
349
+ r"""Runs the query to the backend model in async non-stream mode.
350
+
351
+ Args:
352
+ messages (List[OpenAIMessage]): Message list with the chat history
353
+ in OpenAI API format.
354
+ response_format (Optional[Type[BaseModel]]): The format of the
355
+ response.
356
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
357
+ use for the request.
358
+
359
+ Returns:
360
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
361
+ `ChatCompletion` in the non-stream mode, or
362
+ `AsyncStream[ChatCompletionChunk]` in the stream mode,
363
+ or `AsyncChatCompletionStreamManager[BaseModel]` in the
364
+ structured stream mode.
365
+ """
320
366
  pass
321
367
 
322
368
  def run(
@@ -324,7 +370,11 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
324
370
  messages: List[OpenAIMessage],
325
371
  response_format: Optional[Type[BaseModel]] = None,
326
372
  tools: Optional[List[Dict[str, Any]]] = None,
327
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
373
+ ) -> Union[
374
+ ChatCompletion,
375
+ Stream[ChatCompletionChunk],
376
+ ChatCompletionStreamManager[BaseModel],
377
+ ]:
328
378
  r"""Runs the query to the backend model.
329
379
 
330
380
  Args:
@@ -338,9 +388,11 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
338
388
  (default: :obj:`None`)
339
389
 
340
390
  Returns:
341
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
342
- `ChatCompletion` in the non-stream mode, or
343
- `Stream[ChatCompletionChunk]` in the stream mode.
391
+ Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
392
+ `ChatCompletion` in the non-stream mode,
393
+ `Stream[ChatCompletionChunk]` in the stream mode, or
394
+ `ChatCompletionStreamManager[BaseModel]` in the structured
395
+ stream mode.
344
396
  """
345
397
  # Log the request if logging is enabled
346
398
  log_path = self._log_request(messages)
@@ -365,7 +417,11 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
365
417
  messages: List[OpenAIMessage],
366
418
  response_format: Optional[Type[BaseModel]] = None,
367
419
  tools: Optional[List[Dict[str, Any]]] = None,
368
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
420
+ ) -> Union[
421
+ ChatCompletion,
422
+ AsyncStream[ChatCompletionChunk],
423
+ AsyncChatCompletionStreamManager[BaseModel],
424
+ ]:
369
425
  r"""Runs the query to the backend model asynchronously.
370
426
 
371
427
  Args:
@@ -379,9 +435,11 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
379
435
  (default: :obj:`None`)
380
436
 
381
437
  Returns:
382
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
383
- `ChatCompletion` in the non-stream mode, or
384
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
438
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
439
+ `ChatCompletion` in the non-stream mode,
440
+ `AsyncStream[ChatCompletionChunk]` in the stream mode, or
441
+ `AsyncChatCompletionStreamManager[BaseModel]` in the structured
442
+ stream mode.
385
443
  """
386
444
  # Log the request if logging is enabled
387
445
  log_path = self._log_request(messages)
@@ -40,6 +40,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
40
  from langfuse.decorators import observe
41
41
  except ImportError:
42
42
  from camel.utils import observe
43
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
44
+ try:
45
+ from traceroot import trace as observe # type: ignore[import]
46
+ except ImportError:
47
+ from camel.utils import observe
43
48
  else:
44
49
  from camel.utils import observe
45
50
 
@@ -37,6 +37,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
37
37
  from langfuse.decorators import observe
38
38
  except ImportError:
39
39
  from camel.utils import observe
40
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
41
+ try:
42
+ from traceroot import trace as observe # type: ignore[import]
43
+ except ImportError:
44
+ from camel.utils import observe
40
45
  else:
41
46
  from camel.utils import observe
42
47
 
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
+ import uuid
15
16
  from typing import Any, Dict, List, Optional, Type, Union
16
17
 
17
18
  from pydantic import BaseModel
@@ -98,23 +99,47 @@ class LiteLLMModel(BaseModelBackend):
98
99
  Returns:
99
100
  ChatCompletion: The response object in OpenAI's format.
100
101
  """
101
- return ChatCompletion.construct(
102
- id=response.id,
103
- choices=[
102
+
103
+ converted_choices = []
104
+ for choice in response.choices:
105
+ # Build the assistant message dict
106
+ msg_dict: Dict[str, Any] = {
107
+ "role": choice.message.role,
108
+ "content": choice.message.content,
109
+ }
110
+
111
+ if getattr(choice.message, "tool_calls", None):
112
+ msg_dict["tool_calls"] = choice.message.tool_calls
113
+
114
+ elif getattr(choice.message, "function_call", None):
115
+ func_call = choice.message.function_call
116
+ msg_dict["tool_calls"] = [
117
+ {
118
+ "id": f"call_{uuid.uuid4().hex[:24]}",
119
+ "type": "function",
120
+ "function": {
121
+ "name": getattr(func_call, "name", None),
122
+ "arguments": getattr(func_call, "arguments", "{}"),
123
+ },
124
+ }
125
+ ]
126
+
127
+ converted_choices.append(
104
128
  {
105
- "index": response.choices[0].index,
106
- "message": {
107
- "role": response.choices[0].message.role,
108
- "content": response.choices[0].message.content,
109
- },
110
- "finish_reason": response.choices[0].finish_reason,
129
+ "index": choice.index,
130
+ "message": msg_dict,
131
+ "finish_reason": choice.finish_reason,
111
132
  }
112
- ],
113
- created=response.created,
114
- model=response.model,
115
- object=response.object,
116
- system_fingerprint=response.system_fingerprint,
117
- usage=response.usage,
133
+ )
134
+
135
+ return ChatCompletion.construct(
136
+ id=response.id,
137
+ choices=converted_choices,
138
+ created=getattr(response, "created", None),
139
+ model=getattr(response, "model", None),
140
+ object=getattr(response, "object", None),
141
+ system_fingerprint=getattr(response, "system_fingerprint", None),
142
+ usage=getattr(response, "usage", None),
118
143
  )
119
144
 
120
145
  @property
@@ -148,6 +173,13 @@ class LiteLLMModel(BaseModelBackend):
148
173
  Returns:
149
174
  ChatCompletion
150
175
  """
176
+
177
+ request_config = self.model_config_dict.copy()
178
+ if tools:
179
+ request_config['tools'] = tools
180
+ if response_format:
181
+ request_config['response_format'] = response_format
182
+
151
183
  update_current_observation(
152
184
  input={
153
185
  "messages": messages,
@@ -176,7 +208,7 @@ class LiteLLMModel(BaseModelBackend):
176
208
  base_url=self._url,
177
209
  model=self.model_type,
178
210
  messages=messages,
179
- **self.model_config_dict,
211
+ **request_config,
180
212
  **self.kwargs,
181
213
  )
182
214
  response = self._convert_response_from_litellm_to_openai(response)
@@ -27,6 +27,10 @@ from typing import (
27
27
  )
28
28
 
29
29
  from openai import AsyncStream, Stream
30
+ from openai.lib.streaming.chat import (
31
+ AsyncChatCompletionStreamManager,
32
+ ChatCompletionStreamManager,
33
+ )
30
34
  from pydantic import BaseModel
31
35
 
32
36
  from camel.messages import OpenAIMessage
@@ -196,7 +200,11 @@ class ModelManager:
196
200
  messages: List[OpenAIMessage],
197
201
  response_format: Optional[Type[BaseModel]] = None,
198
202
  tools: Optional[List[Dict[str, Any]]] = None,
199
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
203
+ ) -> Union[
204
+ ChatCompletion,
205
+ Stream[ChatCompletionChunk],
206
+ ChatCompletionStreamManager[BaseModel],
207
+ ]:
200
208
  r"""Process a list of messages by selecting a model based on
201
209
  the scheduling strategy.
202
210
  Sends the entire list of messages to the selected model,
@@ -207,9 +215,12 @@ class ModelManager:
207
215
  history in OpenAI API format.
208
216
 
209
217
  Returns:
210
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
218
+ Union[ChatCompletion, Stream[ChatCompletionChunk],
219
+ ChatCompletionStreamManager[BaseModel]]:
211
220
  `ChatCompletion` in the non-stream mode, or
212
- `Stream[ChatCompletionChunk]` in the stream mode.
221
+ `Stream[ChatCompletionChunk]` in the stream mode, or
222
+ `ChatCompletionStreamManager[BaseModel]` for
223
+ structured-output stream.
213
224
  """
214
225
  self.current_model = self.scheduling_strategy()
215
226
 
@@ -233,7 +244,11 @@ class ModelManager:
233
244
  messages: List[OpenAIMessage],
234
245
  response_format: Optional[Type[BaseModel]] = None,
235
246
  tools: Optional[List[Dict[str, Any]]] = None,
236
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
247
+ ) -> Union[
248
+ ChatCompletion,
249
+ AsyncStream[ChatCompletionChunk],
250
+ AsyncChatCompletionStreamManager[BaseModel],
251
+ ]:
237
252
  r"""Process a list of messages by selecting a model based on
238
253
  the scheduling strategy.
239
254
  Sends the entire list of messages to the selected model,
@@ -244,9 +259,12 @@ class ModelManager:
244
259
  history in OpenAI API format.
245
260
 
246
261
  Returns:
247
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
262
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
263
+ AsyncChatCompletionStreamManager[BaseModel]]:
248
264
  `ChatCompletion` in the non-stream mode, or
249
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
265
+ `AsyncStream[ChatCompletionChunk]` in the stream mode, or
266
+ `AsyncChatCompletionStreamManager[BaseModel]` for
267
+ structured-output stream.
250
268
  """
251
269
  async with self.lock:
252
270
  self.current_model = self.scheduling_strategy()