camel-ai 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (45) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/chat_agent.py +40 -53
  4. camel/agents/knowledge_graph_agent.py +221 -0
  5. camel/configs/__init__.py +29 -0
  6. camel/configs/anthropic_config.py +73 -0
  7. camel/configs/base_config.py +22 -0
  8. camel/configs/openai_config.py +132 -0
  9. camel/embeddings/openai_embedding.py +7 -2
  10. camel/functions/__init__.py +13 -8
  11. camel/functions/open_api_function.py +380 -0
  12. camel/functions/open_api_specs/coursera/__init__.py +13 -0
  13. camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
  14. camel/functions/open_api_specs/klarna/__init__.py +13 -0
  15. camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
  16. camel/functions/open_api_specs/speak/__init__.py +13 -0
  17. camel/functions/open_api_specs/speak/openapi.yaml +151 -0
  18. camel/functions/openai_function.py +3 -1
  19. camel/functions/retrieval_functions.py +61 -0
  20. camel/functions/slack_functions.py +275 -0
  21. camel/models/__init__.py +2 -0
  22. camel/models/anthropic_model.py +16 -2
  23. camel/models/base_model.py +8 -2
  24. camel/models/model_factory.py +7 -3
  25. camel/models/openai_audio_models.py +251 -0
  26. camel/models/openai_model.py +12 -4
  27. camel/models/stub_model.py +5 -1
  28. camel/retrievers/__init__.py +2 -0
  29. camel/retrievers/auto_retriever.py +47 -36
  30. camel/retrievers/base.py +42 -37
  31. camel/retrievers/bm25_retriever.py +10 -19
  32. camel/retrievers/cohere_rerank_retriever.py +108 -0
  33. camel/retrievers/vector_retriever.py +43 -26
  34. camel/storages/vectordb_storages/qdrant.py +3 -1
  35. camel/toolkits/__init__.py +21 -0
  36. camel/toolkits/base.py +22 -0
  37. camel/toolkits/github_toolkit.py +245 -0
  38. camel/types/__init__.py +6 -0
  39. camel/types/enums.py +44 -3
  40. camel/utils/__init__.py +4 -2
  41. camel/utils/commons.py +97 -173
  42. {camel_ai-0.1.3.dist-info → camel_ai-0.1.4.dist-info}/METADATA +9 -3
  43. {camel_ai-0.1.3.dist-info → camel_ai-0.1.4.dist-info}/RECORD +44 -26
  44. camel/configs.py +0 -271
  45. {camel_ai-0.1.3.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +0 -0
camel/configs.py DELETED
@@ -1,271 +0,0 @@
1
- # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
- # Licensed under the Apache License, Version 2.0 (the “License”);
3
- # you may not use this file except in compliance with the License.
4
- # You may obtain a copy of the License at
5
- #
6
- # http://www.apache.org/licenses/LICENSE-2.0
7
- #
8
- # Unless required by applicable law or agreed to in writing, software
9
- # distributed under the License is distributed on an “AS IS” BASIS,
10
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
- # See the License for the specific language governing permissions and
12
- # limitations under the License.
13
- # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from __future__ import annotations
15
-
16
- from abc import ABC
17
- from dataclasses import asdict, dataclass, field
18
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
19
-
20
- from anthropic._types import NOT_GIVEN, NotGiven
21
-
22
- if TYPE_CHECKING:
23
- from camel.functions import OpenAIFunction
24
-
25
-
26
- @dataclass(frozen=True)
27
- class BaseConfig(ABC): # noqa: B024
28
- pass
29
-
30
-
31
- @dataclass(frozen=True)
32
- class ChatGPTConfig(BaseConfig):
33
- r"""Defines the parameters for generating chat completions using the
34
- OpenAI API.
35
-
36
- Args:
37
- temperature (float, optional): Sampling temperature to use, between
38
- :obj:`0` and :obj:`2`. Higher values make the output more random,
39
- while lower values make it more focused and deterministic.
40
- (default: :obj:`0.2`)
41
- top_p (float, optional): An alternative to sampling with temperature,
42
- called nucleus sampling, where the model considers the results of
43
- the tokens with top_p probability mass. So :obj:`0.1` means only
44
- the tokens comprising the top 10% probability mass are considered.
45
- (default: :obj:`1.0`)
46
- n (int, optional): How many chat completion choices to generate for
47
- each input message. (default: :obj:`1`)
48
- stream (bool, optional): If True, partial message deltas will be sent
49
- as data-only server-sent events as they become available.
50
- (default: :obj:`False`)
51
- stop (str or list, optional): Up to :obj:`4` sequences where the API
52
- will stop generating further tokens. (default: :obj:`None`)
53
- max_tokens (int, optional): The maximum number of tokens to generate
54
- in the chat completion. The total length of input tokens and
55
- generated tokens is limited by the model's context length.
56
- (default: :obj:`None`)
57
- presence_penalty (float, optional): Number between :obj:`-2.0` and
58
- :obj:`2.0`. Positive values penalize new tokens based on whether
59
- they appear in the text so far, increasing the model's likelihood
60
- to talk about new topics. See more information about frequency and
61
- presence penalties. (default: :obj:`0.0`)
62
- frequency_penalty (float, optional): Number between :obj:`-2.0` and
63
- :obj:`2.0`. Positive values penalize new tokens based on their
64
- existing frequency in the text so far, decreasing the model's
65
- likelihood to repeat the same line verbatim. See more information
66
- about frequency and presence penalties. (default: :obj:`0.0`)
67
- logit_bias (dict, optional): Modify the likelihood of specified tokens
68
- appearing in the completion. Accepts a json object that maps tokens
69
- (specified by their token ID in the tokenizer) to an associated
70
- bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
71
- is added to the logits generated by the model prior to sampling.
72
- The exact effect will vary per model, but values between:obj:` -1`
73
- and :obj:`1` should decrease or increase likelihood of selection;
74
- values like :obj:`-100` or :obj:`100` should result in a ban or
75
- exclusive selection of the relevant token. (default: :obj:`{}`)
76
- user (str, optional): A unique identifier representing your end-user,
77
- which can help OpenAI to monitor and detect abuse.
78
- (default: :obj:`""`)
79
- """
80
-
81
- temperature: float = 0.2 # openai default: 1.0
82
- top_p: float = 1.0
83
- n: int = 1
84
- stream: bool = False
85
- stop: Optional[Union[str, Sequence[str]]] = None
86
- max_tokens: Optional[int] = None
87
- presence_penalty: float = 0.0
88
- frequency_penalty: float = 0.0
89
- logit_bias: Dict = field(default_factory=dict)
90
- user: str = ""
91
-
92
-
93
- @dataclass(frozen=True)
94
- class ChatGPTVisionConfig(BaseConfig):
95
- r"""Defines the parameters for generating chat completions with
96
- vision model using the OpenAI API. The vision config here is the
97
- subset of the :class:`ChatGPTConfig`.
98
-
99
- Args:
100
- temperature (float, optional): Sampling temperature to use, between
101
- :obj:`0` and :obj:`2`. Higher values make the output more random,
102
- while lower values make it more focused and deterministic.
103
- (default: :obj:`0.2`)
104
- top_p (float, optional): An alternative to sampling with temperature,
105
- called nucleus sampling, where the model considers the results of
106
- the tokens with top_p probability mass. So :obj:`0.1` means only
107
- the tokens comprising the top 10% probability mass are considered.
108
- (default: :obj:`1.0`)
109
- n (int, optional): How many chat completion choices to generate for
110
- each input message. (default: :obj:`1`)
111
- stream (bool, optional): If True, partial message deltas will be sent
112
- as data-only server-sent events as they become available.
113
- (default: :obj:`False`)
114
- max_tokens (int, optional): The maximum number of tokens to generate
115
- in the chat completion. The total length of input tokens and
116
- generated tokens is limited by the model's context length.
117
- (default: :obj:`4096`)
118
- presence_penalty (float, optional): Number between :obj:`-2.0` and
119
- :obj:`2.0`. Positive values penalize new tokens based on whether
120
- they appear in the text so far, increasing the model's likelihood
121
- to talk about new topics. See more information about frequency and
122
- presence penalties. (default: :obj:`0.0`)
123
- frequency_penalty (float, optional): Number between :obj:`-2.0` and
124
- :obj:`2.0`. Positive values penalize new tokens based on their
125
- existing frequency in the text so far, decreasing the model's
126
- likelihood to repeat the same line verbatim. See more information
127
- about frequency and presence penalties. (default: :obj:`0.0`)
128
- user (str, optional): A unique identifier representing your end-user,
129
- which can help OpenAI to monitor and detect abuse.
130
- (default: :obj:`""`)
131
- """
132
-
133
- temperature: float = 0.2 # openai default: 1.0
134
- top_p: float = 1.0
135
- n: int = 1
136
- stream: bool = False
137
- max_tokens: int = 4096
138
- presence_penalty: float = 0.0
139
- frequency_penalty: float = 0.0
140
- user: str = ""
141
-
142
-
143
- @dataclass(frozen=True)
144
- class FunctionCallingConfig(ChatGPTConfig):
145
- r"""Defines the parameters for generating chat completions using the
146
- OpenAI API with functions included.
147
-
148
- Args:
149
- functions (List[Dict[str, Any]]): A list of functions the model may
150
- generate JSON inputs for.
151
- function_call (Union[Dict[str, str], str], optional): Controls how the
152
- model responds to function calls. :obj:`"none"` means the model
153
- does not call a function, and responds to the end-user.
154
- :obj:`"auto"` means the model can pick between an end-user or
155
- calling a function. Specifying a particular function via
156
- :obj:`{"name": "my_function"}` forces the model to call that
157
- function. (default: :obj:`"auto"`)
158
- """
159
-
160
- functions: List[Dict[str, Any]] = field(default_factory=list)
161
- function_call: Union[Dict[str, str], str] = "auto"
162
-
163
- @classmethod
164
- def from_openai_function_list(
165
- cls,
166
- function_list: List[OpenAIFunction],
167
- function_call: Union[Dict[str, str], str] = "auto",
168
- kwargs: Optional[Dict[str, Any]] = None,
169
- ):
170
- r"""Class method for creating an instance given the function-related
171
- arguments.
172
-
173
- Args:
174
- function_list (List[OpenAIFunction]): The list of function objects
175
- to be loaded into this configuration and passed to the model.
176
- function_call (Union[Dict[str, str], str], optional): Controls how
177
- the model responds to function calls, as specified in the
178
- creator's documentation.
179
- kwargs (Optional[Dict[str, Any]]): The extra modifications to be
180
- made on the original settings defined in :obj:`ChatGPTConfig`.
181
-
182
- Return:
183
- FunctionCallingConfig: A new instance which loads the given
184
- function list into a list of dictionaries and the input
185
- :obj:`function_call` argument.
186
- """
187
- return cls(
188
- functions=[
189
- func.get_openai_function_schema() for func in function_list
190
- ],
191
- function_call=function_call,
192
- **(kwargs or {}),
193
- )
194
-
195
-
196
- @dataclass(frozen=True)
197
- class OpenSourceConfig(BaseConfig):
198
- r"""Defines parameters for setting up open-source models and includes
199
- parameters to be passed to chat completion function of OpenAI API.
200
-
201
- Args:
202
- model_path (str): The path to a local folder containing the model
203
- files or the model card in HuggingFace hub.
204
- server_url (str): The URL to the server running the model inference
205
- which will be used as the API base of OpenAI API.
206
- api_params (ChatGPTConfig): An instance of :obj:ChatGPTConfig to
207
- contain the arguments to be passed to OpenAI API.
208
- """
209
-
210
- model_path: str
211
- server_url: str
212
- api_params: ChatGPTConfig = field(default_factory=ChatGPTConfig)
213
-
214
-
215
- OPENAI_API_PARAMS = {param for param in asdict(ChatGPTConfig()).keys()}
216
- OPENAI_API_PARAMS_WITH_FUNCTIONS = {
217
- param for param in asdict(FunctionCallingConfig()).keys()
218
- }
219
-
220
-
221
- @dataclass(frozen=True)
222
- class AnthropicConfig(BaseConfig):
223
- r"""Defines the parameters for generating chat completions using the
224
- Anthropic API.
225
-
226
- See: https://docs.anthropic.com/claude/reference/complete_post
227
- Args:
228
- max_tokens_to_sample (int, optional): The maximum number of tokens to
229
- generate before stopping. Note that Anthropic models may stop
230
- before reaching this maximum. This parameter only specifies the
231
- absolute maximum number of tokens to generate.
232
- (default: :obj:`256`)
233
- stop_sequences (List[str], optional): Sequences that will cause the
234
- model to stop generating completion text. Anthropic models stop
235
- on "\n\nHuman:", and may include additional built-in stop sequences
236
- in the future. By providing the stop_sequences parameter, you may
237
- include additional strings that will cause the model to stop
238
- generating.
239
- temperature (float, optional): Amount of randomness injected into the
240
- response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
241
- for analytical / multiple choice, and closer to 1 for creative
242
- and generative tasks.
243
- (default: :obj:`1`)
244
- top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
245
- compute the cumulative distribution over all the options for each
246
- subsequent token in decreasing probability order and cut it off
247
- once it reaches a particular probability specified by `top_p`.
248
- You should either alter `temperature` or `top_p`,
249
- but not both.
250
- (default: :obj:`0.7`)
251
- top_k (int, optional): Only sample from the top K options for each
252
- subsequent token. Used to remove "long tail" low probability
253
- responses.
254
- (default: :obj:`5`)
255
- metadata: An object describing metadata about the request.
256
- stream (bool, optional): Whether to incrementally stream the response
257
- using server-sent events.
258
- (default: :obj:`False`)
259
-
260
- """
261
-
262
- max_tokens: int = 256
263
- stop_sequences: Union[List[str], NotGiven] = NOT_GIVEN
264
- temperature: float = 1
265
- top_p: Union[float, NotGiven] = NOT_GIVEN
266
- top_k: Union[int, NotGiven] = NOT_GIVEN
267
- metadata: NotGiven = NOT_GIVEN
268
- stream: bool = False
269
-
270
-
271
- ANTHROPIC_API_PARAMS = {param for param in asdict(AnthropicConfig()).keys()}