camel-ai 0.1.5.2__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -32,6 +32,7 @@ from camel.responses import ChatAgentResponse
32
32
  from camel.types import (
33
33
  ChatCompletion,
34
34
  ChatCompletionChunk,
35
+ ModelPlatformType,
35
36
  ModelType,
36
37
  OpenAIBackendRole,
37
38
  RoleType,
@@ -41,7 +42,6 @@ from camel.utils import get_model_encoding
41
42
  if TYPE_CHECKING:
42
43
  from openai import Stream
43
44
 
44
- from camel.configs import BaseConfig
45
45
  from camel.functions import OpenAIFunction
46
46
  from camel.terminators import ResponseTerminator
47
47
 
@@ -80,10 +80,9 @@ class ChatAgent(BaseAgent):
80
80
 
81
81
  Args:
82
82
  system_message (BaseMessage): The system message for the chat agent.
83
- model_type (ModelType, optional): The LLM model to use for generating
84
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
85
- model_config (BaseConfig, optional): Configuration options for the
86
- LLM model. (default: :obj:`None`)
83
+ model (BaseModelBackend, optional): The model backend to use for
84
+ generating responses. (default: :obj:`OpenAIModel` with
85
+ `GPT_3_5_TURBO`)
87
86
  api_key (str, optional): The API key for authenticating with the
88
87
  LLM service. Only OpenAI and Anthropic model supported (default:
89
88
  :obj:`None`)
@@ -109,8 +108,7 @@ class ChatAgent(BaseAgent):
109
108
  def __init__(
110
109
  self,
111
110
  system_message: BaseMessage,
112
- model_type: Optional[ModelType] = None,
113
- model_config: Optional[BaseConfig] = None,
111
+ model: Optional[BaseModelBackend] = None,
114
112
  api_key: Optional[str] = None,
115
113
  memory: Optional[AgentMemory] = None,
116
114
  message_window_size: Optional[int] = None,
@@ -123,24 +121,30 @@ class ChatAgent(BaseAgent):
123
121
  self.system_message = system_message
124
122
  self.role_name: str = system_message.role_name
125
123
  self.role_type: RoleType = system_message.role_type
124
+ self._api_key = api_key
125
+ self.model_backend: BaseModelBackend = (
126
+ model
127
+ if model is not None
128
+ else ModelFactory.create(
129
+ model_platform=ModelPlatformType.OPENAI,
130
+ model_type=ModelType.GPT_3_5_TURBO,
131
+ model_config_dict=ChatGPTConfig().__dict__,
132
+ api_key=self._api_key,
133
+ )
134
+ )
126
135
  self.output_language: Optional[str] = output_language
127
136
  if self.output_language is not None:
128
137
  self.set_output_language(self.output_language)
129
138
 
130
- self.model_type: ModelType = (
131
- model_type if model_type is not None else ModelType.GPT_3_5_TURBO
132
- )
139
+ self.model_type: ModelType = self.model_backend.model_type
133
140
 
134
141
  self.func_dict: Dict[str, Callable] = {}
135
142
  if tools is not None:
136
143
  for func in tools:
137
144
  self.func_dict[func.get_function_name()] = func.func
138
145
 
139
- self.model_config = model_config or ChatGPTConfig()
140
- self._api_key = api_key
141
- self.model_backend: BaseModelBackend = ModelFactory.create(
142
- self.model_type, self.model_config.__dict__, self._api_key
143
- )
146
+ self.model_config_dict = self.model_backend.model_config_dict
147
+
144
148
  self.model_token_limit = token_limit or self.model_backend.token_limit
145
149
  context_creator = ScoreBasedContextCreator(
146
150
  self.model_backend.token_counter,
@@ -643,7 +647,7 @@ class ChatAgent(BaseAgent):
643
647
  func = self.func_dict[func_name]
644
648
 
645
649
  args_str: str = choice.message.tool_calls[0].function.arguments
646
- args = json.loads(args_str.replace("'", "\""))
650
+ args = json.loads(args_str)
647
651
 
648
652
  # Pass the extracted arguments to the indicated function
649
653
  try:
@@ -702,7 +706,7 @@ class ChatAgent(BaseAgent):
702
706
  func = self.func_dict[func_name]
703
707
 
704
708
  args_str: str = choice.message.tool_calls[0].function.arguments
705
- args = json.loads(args_str.replace("'", "\""))
709
+ args = json.loads(args_str)
706
710
 
707
711
  # Pass the extracted arguments to the indicated function
708
712
  try:
@@ -20,8 +20,8 @@ from colorama import Fore
20
20
  from camel.agents.chat_agent import ChatAgent
21
21
  from camel.memories import AgentMemory
22
22
  from camel.messages import BaseMessage
23
+ from camel.models import BaseModelBackend
23
24
  from camel.responses import ChatAgentResponse
24
- from camel.types import ModelType
25
25
  from camel.utils import get_first_int, print_text_animated
26
26
 
27
27
 
@@ -31,10 +31,9 @@ class CriticAgent(ChatAgent):
31
31
  Args:
32
32
  system_message (BaseMessage): The system message for the critic
33
33
  agent.
34
- model_type (ModelType, optional): The LLM model to use for generating
35
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
36
- model_config (Any, optional): Configuration options for the LLM model.
37
- (default: :obj:`None`)
34
+ model (BaseModelBackend, optional): The model backend to use for
35
+ generating responses. (default: :obj:`OpenAIModel` with
36
+ `GPT_3_5_TURBO`)
38
37
  message_window_size (int, optional): The maximum number of previous
39
38
  messages to include in the context window. If `None`, no windowing
40
39
  is performed. (default: :obj:`6`)
@@ -48,8 +47,7 @@ class CriticAgent(ChatAgent):
48
47
  def __init__(
49
48
  self,
50
49
  system_message: BaseMessage,
51
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
52
- model_config: Optional[Any] = None,
50
+ model: Optional[BaseModelBackend] = None,
53
51
  memory: Optional[AgentMemory] = None,
54
52
  message_window_size: int = 6,
55
53
  retry_attempts: int = 2,
@@ -58,8 +56,7 @@ class CriticAgent(ChatAgent):
58
56
  ) -> None:
59
57
  super().__init__(
60
58
  system_message,
61
- model_type=model_type,
62
- model_config=model_config,
59
+ model=model,
63
60
  memory=memory,
64
61
  message_window_size=message_window_size,
65
62
  )
@@ -15,10 +15,10 @@ import re
15
15
  from typing import Dict, List, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
- from camel.configs import BaseConfig
19
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
20
20
  from camel.prompts import TextPrompt
21
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
22
22
 
23
23
 
24
24
  class DeductiveReasonerAgent(ChatAgent):
@@ -33,16 +33,14 @@ class DeductiveReasonerAgent(ChatAgent):
33
33
  - L represents the path or process from A to B.
34
34
 
35
35
  Args:
36
- model_type (ModelType, optional): The type of model to use for the
37
- agent. (default: :obj: `None`)
38
- model_config (BaseConfig, optional): The configuration for the model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  """
41
40
 
42
41
  def __init__(
43
42
  self,
44
- model_type: Optional[ModelType] = None,
45
- model_config: Optional[BaseConfig] = None,
43
+ model: Optional[BaseModelBackend] = None,
46
44
  ) -> None:
47
45
  system_message = BaseMessage(
48
46
  role_name="Insight Agent",
@@ -50,7 +48,7 @@ class DeductiveReasonerAgent(ChatAgent):
50
48
  meta_dict=None,
51
49
  content="You assign roles based on tasks.",
52
50
  )
53
- super().__init__(system_message, model_type, model_config)
51
+ super().__init__(system_message, model=model)
54
52
 
55
53
  def deduce_conditions_and_quality(
56
54
  self,
@@ -23,8 +23,8 @@ from camel.interpreters import (
23
23
  SubprocessInterpreter,
24
24
  )
25
25
  from camel.messages import BaseMessage
26
+ from camel.models import BaseModelBackend
26
27
  from camel.responses import ChatAgentResponse
27
- from camel.types import ModelType
28
28
  from camel.utils import print_text_animated
29
29
 
30
30
 
@@ -33,10 +33,9 @@ class EmbodiedAgent(ChatAgent):
33
33
 
34
34
  Args:
35
35
  system_message (BaseMessage): The system message for the chat agent.
36
- model_type (ModelType, optional): The LLM model to use for generating
37
- responses. (default :obj:`ModelType.GPT_4`)
38
- model_config (Any, optional): Configuration options for the LLM model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  message_window_size (int, optional): The maximum number of previous
41
40
  messages to include in the context window. If `None`, no windowing
42
41
  is performed. (default: :obj:`None`)
@@ -55,8 +54,7 @@ class EmbodiedAgent(ChatAgent):
55
54
  def __init__(
56
55
  self,
57
56
  system_message: BaseMessage,
58
- model_type: ModelType = ModelType.GPT_4,
59
- model_config: Optional[Any] = None,
57
+ model: Optional[BaseModelBackend] = None,
60
58
  message_window_size: Optional[int] = None,
61
59
  tool_agents: Optional[List[BaseToolAgent]] = None,
62
60
  code_interpreter: Optional[BaseInterpreter] = None,
@@ -78,8 +76,7 @@ class EmbodiedAgent(ChatAgent):
78
76
  self.logger_color = logger_color
79
77
  super().__init__(
80
78
  system_message=system_message,
81
- model_type=model_type,
82
- model_config=model_config,
79
+ model=model,
83
80
  message_window_size=message_window_size,
84
81
  )
85
82
 
@@ -11,19 +11,20 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Optional, Union
14
+ from typing import Optional, Union
15
15
 
16
16
  from unstructured.documents.elements import Element
17
17
 
18
18
  from camel.agents import ChatAgent
19
19
  from camel.messages import BaseMessage
20
+ from camel.models import BaseModelBackend
20
21
  from camel.prompts import TextPrompt
21
22
  from camel.storages.graph_storages.graph_element import (
22
23
  GraphElement,
23
24
  Node,
24
25
  Relationship,
25
26
  )
26
- from camel.types import ModelType, RoleType
27
+ from camel.types import RoleType
27
28
 
28
29
  text_prompt = """
29
30
  You are tasked with extracting nodes and relationships from given content and
@@ -105,16 +106,14 @@ class KnowledgeGraphAgent(ChatAgent):
105
106
 
106
107
  def __init__(
107
108
  self,
108
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
109
- model_config: Optional[Any] = None,
109
+ model: Optional[BaseModelBackend] = None,
110
110
  ) -> None:
111
111
  r"""Initialize the `KnowledgeGraphAgent`.
112
112
 
113
113
  Args:
114
- model_type (ModelType, optional): The type of model to use for the
115
- agent. Defaults to `ModelType.GPT_3_5_TURBO`.
116
- model_config (Any, optional): The configuration for the model.
117
- Defaults to `None`.
114
+ model (BaseModelBackend, optional): The model backend to use for
115
+ generating responses. (default: :obj:`OpenAIModel` with
116
+ `GPT_3_5_TURBO`)
118
117
  """
119
118
  system_message = BaseMessage(
120
119
  role_name="Graphify",
@@ -126,7 +125,7 @@ class KnowledgeGraphAgent(ChatAgent):
126
125
  "illuminate the hidden connections within the chaos of "
127
126
  "information.",
128
127
  )
129
- super().__init__(system_message, model_type, model_config)
128
+ super().__init__(system_message, model=model)
130
129
 
131
130
  def run(
132
131
  self,
@@ -12,32 +12,31 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import re
15
- from typing import Any, Dict, Optional, Union
15
+ from typing import Dict, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
19
20
  from camel.prompts import TextPrompt
20
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
21
22
 
22
23
 
23
24
  class RoleAssignmentAgent(ChatAgent):
24
25
  r"""An agent that generates role names based on the task prompt.
25
26
 
27
+ Args:
28
+ model (BaseModelBackend, optional): The model backend to use for
29
+ generating responses. (default: :obj:`OpenAIModel` with
30
+ `GPT_3_5_TURBO`)
31
+
26
32
  Attributes:
27
33
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
28
34
  role names.
29
-
30
- Args:
31
- model_type (ModelType, optional): The type of model to use for the
32
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
33
- model_config (Any, optional): The configuration for the model.
34
- (default: :obj:`None`)
35
35
  """
36
36
 
37
37
  def __init__(
38
38
  self,
39
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
40
- model_config: Optional[Any] = None,
39
+ model: Optional[BaseModelBackend] = None,
41
40
  ) -> None:
42
41
  system_message = BaseMessage(
43
42
  role_name="Role Assigner",
@@ -45,7 +44,7 @@ class RoleAssignmentAgent(ChatAgent):
45
44
  meta_dict=None,
46
45
  content="You assign roles based on tasks.",
47
46
  )
48
- super().__init__(system_message, model_type, model_config)
47
+ super().__init__(system_message, model=model)
49
48
 
50
49
  def run(
51
50
  self,
@@ -11,12 +11,13 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Optional
14
+ from typing import Optional
15
15
 
16
16
  from camel.agents.chat_agent import ChatAgent
17
17
  from camel.messages import BaseMessage
18
+ from camel.models import BaseModelBackend
18
19
  from camel.prompts import TextPrompt
19
- from camel.types import ModelType, RoleType
20
+ from camel.types import RoleType
20
21
  from camel.utils import create_chunks
21
22
 
22
23
 
@@ -33,8 +34,7 @@ class SearchAgent(ChatAgent):
33
34
 
34
35
  def __init__(
35
36
  self,
36
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
37
- model_config: Optional[Any] = None,
37
+ model: Optional[BaseModelBackend] = None,
38
38
  ) -> None:
39
39
  system_message = BaseMessage(
40
40
  role_name="Assistant",
@@ -42,7 +42,7 @@ class SearchAgent(ChatAgent):
42
42
  meta_dict=None,
43
43
  content="You are a helpful assistant.",
44
44
  )
45
- super().__init__(system_message, model_type, model_config)
45
+ super().__init__(system_message, model=model)
46
46
 
47
47
  def summarize_text(self, text: str, query: str) -> str:
48
48
  r"""Summarize the information from the text, base on the query.
@@ -14,10 +14,10 @@
14
14
  from typing import Any, Dict, List, Optional, Union
15
15
 
16
16
  from camel.agents.chat_agent import ChatAgent
17
- from camel.configs import ChatGPTConfig
18
17
  from camel.messages import BaseMessage
18
+ from camel.models import BaseModelBackend
19
19
  from camel.prompts import PromptTemplateGenerator, TextPrompt
20
- from camel.types import ModelType, RoleType, TaskType
20
+ from camel.types import RoleType, TaskType
21
21
  from camel.utils import get_task_list
22
22
 
23
23
 
@@ -30,27 +30,25 @@ class TaskSpecifyAgent(ChatAgent):
30
30
  task_specify_prompt (TextPrompt): The prompt for specifying the task.
31
31
 
32
32
  Args:
33
- model_type (ModelType, optional): The type of model to use for the
34
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
33
+ model (BaseModelBackend, optional): The model backend to use for
34
+ generating responses. (default: :obj:`OpenAIModel` with
35
+ `GPT_3_5_TURBO`)
35
36
  task_type (TaskType, optional): The type of task for which to generate
36
37
  a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
37
- model_config (Any, optional): The configuration for the model.
38
- (default: :obj:`None`)
39
38
  task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
40
39
  specifying the task. (default: :obj:`None`)
41
40
  word_limit (int, optional): The word limit for the task prompt.
42
41
  (default: :obj:`50`)
43
42
  output_language (str, optional): The language to be output by the
44
- agent. (default: :obj:`None`)
43
+ agent. (default: :obj:`None`)
45
44
  """
46
45
 
47
46
  DEFAULT_WORD_LIMIT = 50
48
47
 
49
48
  def __init__(
50
49
  self,
51
- model_type: Optional[ModelType] = None,
50
+ model: Optional[BaseModelBackend] = None,
52
51
  task_type: TaskType = TaskType.AI_SOCIETY,
53
- model_config: Optional[Any] = None,
54
52
  task_specify_prompt: Optional[Union[str, TextPrompt]] = None,
55
53
  word_limit: int = DEFAULT_WORD_LIMIT,
56
54
  output_language: Optional[str] = None,
@@ -67,8 +65,6 @@ class TaskSpecifyAgent(ChatAgent):
67
65
  else:
68
66
  self.task_specify_prompt = TextPrompt(task_specify_prompt)
69
67
 
70
- model_config = model_config or ChatGPTConfig(temperature=1.0)
71
-
72
68
  system_message = BaseMessage(
73
69
  role_name="Task Specifier",
74
70
  role_type=RoleType.ASSISTANT,
@@ -78,8 +74,7 @@ class TaskSpecifyAgent(ChatAgent):
78
74
 
79
75
  super().__init__(
80
76
  system_message,
81
- model_type=model_type,
82
- model_config=model_config,
77
+ model=model,
83
78
  output_language=output_language,
84
79
  )
85
80
 
@@ -130,18 +125,16 @@ class TaskPlannerAgent(ChatAgent):
130
125
  the task into subtasks.
131
126
 
132
127
  Args:
133
- model_type (ModelType, optional): The type of model to use for the
134
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
135
- model_config (Any, optional): The configuration for the model.
136
- (default: :obj:`None`)
128
+ model (BaseModelBackend, optional): The model backend to use for
129
+ generating responses. (default: :obj:`OpenAIModel` with
130
+ `GPT_3_5_TURBO`)
137
131
  output_language (str, optional): The language to be output by the
138
- agent. (default: :obj:`None`)
132
+ agent. (default: :obj:`None`)
139
133
  """
140
134
 
141
135
  def __init__(
142
136
  self,
143
- model_type: Optional[ModelType] = None,
144
- model_config: Optional[Any] = None,
137
+ model: Optional[BaseModelBackend] = None,
145
138
  output_language: Optional[str] = None,
146
139
  ) -> None:
147
140
  self.task_planner_prompt = TextPrompt(
@@ -156,8 +149,7 @@ class TaskPlannerAgent(ChatAgent):
156
149
 
157
150
  super().__init__(
158
151
  system_message,
159
- model_type,
160
- model_config,
152
+ model=model,
161
153
  output_language=output_language,
162
154
  )
163
155
 
@@ -208,10 +200,9 @@ class TaskCreationAgent(ChatAgent):
208
200
  role_name (str): The role name of the Agent to create the task.
209
201
  objective (Union[str, TextPrompt]): The objective of the Agent to
210
202
  perform the task.
211
- model_type (ModelType, optional): The type of model to use for the
212
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
213
- model_config (Any, optional): The configuration for the model.
214
- (default: :obj:`None`)
203
+ model (BaseModelBackend, optional): The LLM backend to use for
204
+ generating responses. (default: :obj:`OpenAIModel` with
205
+ `GPT_3_5_TURBO`)
215
206
  output_language (str, optional): The language to be output by the
216
207
  agent. (default: :obj:`None`)
217
208
  message_window_size (int, optional): The maximum number of previous
@@ -225,8 +216,7 @@ class TaskCreationAgent(ChatAgent):
225
216
  self,
226
217
  role_name: str,
227
218
  objective: Union[str, TextPrompt],
228
- model_type: Optional[ModelType] = None,
229
- model_config: Optional[Any] = None,
219
+ model: Optional[BaseModelBackend] = None,
230
220
  output_language: Optional[str] = None,
231
221
  message_window_size: Optional[int] = None,
232
222
  max_task_num: Optional[int] = 3,
@@ -266,8 +256,7 @@ Be concrete.
266
256
 
267
257
  super().__init__(
268
258
  system_message,
269
- model_type,
270
- model_config,
259
+ model=model,
271
260
  output_language=output_language,
272
261
  message_window_size=message_window_size,
273
262
  )
@@ -282,6 +271,7 @@ Be concrete.
282
271
  Args:
283
272
  task_list (List[str]): The completed or in-progress
284
273
  tasks which should not overlap with new created tasks.
274
+
285
275
  Returns:
286
276
  List[str]: The new task list generated by the Agent.
287
277
  """
@@ -321,10 +311,9 @@ class TaskPrioritizationAgent(ChatAgent):
321
311
  Args:
322
312
  objective (Union[str, TextPrompt]): The objective of the Agent to
323
313
  perform the task.
324
- model_type (ModelType, optional): The type of model to use for the
325
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
326
- model_config (Any, optional): The configuration for the model.
327
- (default: :obj:`None`)
314
+ model (BaseModelBackend, optional): The LLM backend to use for
315
+ generating responses. (default: :obj:`OpenAIModel` with
316
+ `GPT_3_5_TURBO`)
328
317
  output_language (str, optional): The language to be output by the
329
318
  agent. (default: :obj:`None`)
330
319
  message_window_size (int, optional): The maximum number of previous
@@ -335,8 +324,7 @@ class TaskPrioritizationAgent(ChatAgent):
335
324
  def __init__(
336
325
  self,
337
326
  objective: Union[str, TextPrompt],
338
- model_type: Optional[ModelType] = None,
339
- model_config: Optional[Any] = None,
327
+ model: Optional[BaseModelBackend] = None,
340
328
  output_language: Optional[str] = None,
341
329
  message_window_size: Optional[int] = None,
342
330
  ) -> None:
@@ -372,8 +360,7 @@ with any other output."""
372
360
 
373
361
  super().__init__(
374
362
  system_message,
375
- model_type,
376
- model_config,
363
+ model=model,
377
364
  output_language=output_language,
378
365
  message_window_size=message_window_size,
379
366
  )
@@ -386,6 +373,7 @@ with any other output."""
386
373
 
387
374
  Args:
388
375
  task_list (List[str]): The unprioritized tasks of agent.
376
+
389
377
  Returns:
390
378
  List[str]: The new prioritized task list generated by the Agent.
391
379
  """
@@ -41,6 +41,19 @@ class ChatGPTConfig(BaseConfig):
41
41
  (default: :obj:`1.0`)
42
42
  n (int, optional): How many chat completion choices to generate for
43
43
  each input message. (default: :obj:`1`)
44
+ response_format (object, optional): An object specifying the format
45
+ that the model must output. Compatible with GPT-4 Turbo and all
46
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
47
+ {"type": "json_object"} enables JSON mode, which guarantees the
48
+ message the model generates is valid JSON. Important: when using
49
+ JSON mode, you must also instruct the model to produce JSON
50
+ yourself via a system or user message. Without this, the model
51
+ may generate an unending stream of whitespace until the generation
52
+ reaches the token limit, resulting in a long-running and seemingly
53
+ "stuck" request. Also note that the message content may be
54
+ partially cut off if finish_reason="length", which indicates the
55
+ generation exceeded max_tokens or the conversation exceeded the
56
+ max context length.
44
57
  stream (bool, optional): If True, partial message deltas will be sent
45
58
  as data-only server-sent events as they become available.
46
59
  (default: :obj:`False`)
@@ -95,6 +108,7 @@ class ChatGPTConfig(BaseConfig):
95
108
  stop: str | Sequence[str] | NotGiven = NOT_GIVEN
96
109
  max_tokens: int | NotGiven = NOT_GIVEN
97
110
  presence_penalty: float = 0.0
111
+ response_format: dict | NotGiven = NOT_GIVEN
98
112
  frequency_penalty: float = 0.0
99
113
  logit_bias: dict = field(default_factory=dict)
100
114
  user: str = ""
@@ -13,16 +13,15 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import json
15
15
  import os
16
- from typing import Any, Callable, Dict, List, Tuple
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple
17
17
 
18
- import prance
19
18
  import requests
20
19
 
21
20
  from camel.functions import OpenAIFunction, openapi_security_config
22
21
  from camel.types import OpenAPIName
23
22
 
24
23
 
25
- def parse_openapi_file(openapi_spec_path: str) -> Dict[str, Any]:
24
+ def parse_openapi_file(openapi_spec_path: str) -> Optional[Dict[str, Any]]:
26
25
  r"""Load and parse an OpenAPI specification file.
27
26
 
28
27
  This function utilizes the `prance.ResolvingParser` to parse and resolve
@@ -34,8 +33,14 @@ def parse_openapi_file(openapi_spec_path: str) -> Dict[str, Any]:
34
33
  specification.
35
34
 
36
35
  Returns:
37
- Dict[str, Any]: The parsed OpenAPI specification as a dictionary.
36
+ Optional[Dict[str, Any]]: The parsed OpenAPI specification
37
+ as a dictionary. :obj:`None` if the package is not installed.
38
38
  """
39
+ try:
40
+ import prance
41
+ except Exception:
42
+ return None
43
+
39
44
  # Load the OpenAPI spec
40
45
  parser = prance.ResolvingParser(
41
46
  openapi_spec_path, backend="openapi-spec-validator", strict=False
@@ -451,6 +456,8 @@ def apinames_filepaths_to_funs_schemas(
451
456
  )
452
457
 
453
458
  openapi_spec = parse_openapi_file(file_path)
459
+ if openapi_spec is None:
460
+ return [], []
454
461
 
455
462
  # Generate and merge function schemas
456
463
  openapi_functions_schemas = openapi_spec_to_openai_schemas(