camel-ai 0.1.6.6__py3-none-any.whl → 0.1.6.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +0 -6
- camel/configs/__init__.py +6 -0
- camel/configs/samba_config.py +50 -0
- camel/configs/togetherai_config.py +107 -0
- camel/models/__init__.py +4 -0
- camel/models/groq_model.py +5 -5
- camel/models/litellm_model.py +1 -1
- camel/models/model_factory.py +9 -0
- camel/models/ollama_model.py +6 -4
- camel/models/openai_compatibility_model.py +3 -3
- camel/models/samba_model.py +291 -0
- camel/models/togetherai_model.py +148 -0
- camel/models/vllm_model.py +7 -5
- camel/models/zhipuai_model.py +2 -2
- camel/retrievers/auto_retriever.py +2 -27
- camel/toolkits/__init__.py +3 -0
- camel/toolkits/linkedin_toolkit.py +230 -0
- camel/types/enums.py +31 -5
- camel/utils/__init__.py +2 -0
- camel/utils/commons.py +22 -0
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.7.dist-info}/METADATA +2 -2
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.7.dist-info}/RECORD +24 -19
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.7.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from openai import OpenAI, Stream
|
|
19
|
+
|
|
20
|
+
from camel.configs import TOGETHERAI_API_PARAMS
|
|
21
|
+
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
23
|
+
from camel.utils import (
|
|
24
|
+
BaseTokenCounter,
|
|
25
|
+
OpenAITokenCounter,
|
|
26
|
+
api_keys_required,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TogetherAIModel:
|
|
31
|
+
r"""Constructor for Together AI backend with OpenAI compatibility.
|
|
32
|
+
TODO: Add function calling support
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
model_type: str,
|
|
38
|
+
model_config_dict: Dict[str, Any],
|
|
39
|
+
api_key: Optional[str] = None,
|
|
40
|
+
url: Optional[str] = None,
|
|
41
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
42
|
+
) -> None:
|
|
43
|
+
r"""Constructor for TogetherAI backend.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
model_type (str): Model for which a backend is created, supported
|
|
47
|
+
model can be found here: https://docs.together.ai/docs/chat-models
|
|
48
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
49
|
+
be fed into openai.ChatCompletion.create().
|
|
50
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
51
|
+
Together service. (default: :obj:`None`)
|
|
52
|
+
url (Optional[str]): The url to the Together AI service. (default:
|
|
53
|
+
:obj:`"https://api.together.xyz/v1"`)
|
|
54
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
55
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
56
|
+
GPT_4O_MINI)` will be used.
|
|
57
|
+
"""
|
|
58
|
+
self.model_type = model_type
|
|
59
|
+
self.model_config_dict = model_config_dict
|
|
60
|
+
self._token_counter = token_counter
|
|
61
|
+
self._api_key = api_key or os.environ.get("TOGETHER_API_KEY")
|
|
62
|
+
self._url = url or os.environ.get("TOGETHER_API_BASE_URL")
|
|
63
|
+
|
|
64
|
+
self._client = OpenAI(
|
|
65
|
+
timeout=60,
|
|
66
|
+
max_retries=3,
|
|
67
|
+
api_key=self._api_key,
|
|
68
|
+
base_url=self._url or "https://api.together.xyz/v1",
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
@api_keys_required("TOGETHER_API_KEY")
|
|
72
|
+
def run(
|
|
73
|
+
self,
|
|
74
|
+
messages: List[OpenAIMessage],
|
|
75
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
76
|
+
r"""Runs inference of OpenAI chat completion.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
80
|
+
in OpenAI API format.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
84
|
+
`ChatCompletion` in the non-stream mode, or
|
|
85
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
86
|
+
"""
|
|
87
|
+
# Use OpenAI cilent as interface call Together AI
|
|
88
|
+
# Reference: https://docs.together.ai/docs/openai-api-compatibility
|
|
89
|
+
response = self._client.chat.completions.create(
|
|
90
|
+
messages=messages,
|
|
91
|
+
model=self.model_type,
|
|
92
|
+
**self.model_config_dict,
|
|
93
|
+
)
|
|
94
|
+
return response
|
|
95
|
+
|
|
96
|
+
@property
|
|
97
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
98
|
+
r"""Initialize the token counter for the model backend.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
OpenAITokenCounter: The token counter following the model's
|
|
102
|
+
tokenization style.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
if not self._token_counter:
|
|
106
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
107
|
+
return self._token_counter
|
|
108
|
+
|
|
109
|
+
def check_model_config(self):
|
|
110
|
+
r"""Check whether the model configuration contains any
|
|
111
|
+
unexpected arguments to TogetherAI API.
|
|
112
|
+
|
|
113
|
+
Raises:
|
|
114
|
+
ValueError: If the model configuration dictionary contains any
|
|
115
|
+
unexpected arguments to TogetherAI API.
|
|
116
|
+
"""
|
|
117
|
+
for param in self.model_config_dict:
|
|
118
|
+
if param not in TOGETHERAI_API_PARAMS:
|
|
119
|
+
raise ValueError(
|
|
120
|
+
f"Unexpected argument `{param}` is "
|
|
121
|
+
"input into TogetherAI model backend."
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
@property
|
|
125
|
+
def stream(self) -> bool:
|
|
126
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
127
|
+
results each time.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
bool: Whether the model is in stream mode.
|
|
131
|
+
"""
|
|
132
|
+
return self.model_config_dict.get('stream', False)
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def token_limit(self) -> int:
|
|
136
|
+
r"""Returns the maximum token limit for the given model.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
int: The maximum token limit for the given model.
|
|
140
|
+
"""
|
|
141
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
142
|
+
if isinstance(max_tokens, int):
|
|
143
|
+
return max_tokens
|
|
144
|
+
print(
|
|
145
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
146
|
+
" setting up the model. Using 4096 as default value."
|
|
147
|
+
)
|
|
148
|
+
return 4096
|
camel/models/vllm_model.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
14
15
|
from typing import Any, Dict, List, Optional, Union
|
|
15
16
|
|
|
16
17
|
from openai import OpenAI, Stream
|
|
@@ -42,20 +43,21 @@ class VLLMModel:
|
|
|
42
43
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
43
44
|
be fed into openai.ChatCompletion.create().
|
|
44
45
|
url (Optional[str]): The url to the model service. (default:
|
|
45
|
-
:obj:`
|
|
46
|
+
:obj:`"http://localhost:8000/v1"`)
|
|
46
47
|
api_key (Optional[str]): The API key for authenticating with the
|
|
47
48
|
model service.
|
|
48
49
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
49
50
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
50
|
-
|
|
51
|
+
GPT_4O_MINI)` will be used.
|
|
51
52
|
"""
|
|
52
53
|
self.model_type = model_type
|
|
53
54
|
self.model_config_dict = model_config_dict
|
|
55
|
+
self._url = url or os.environ.get("VLLM_BASE_URL")
|
|
54
56
|
# Use OpenAI cilent as interface call vLLM
|
|
55
57
|
self._client = OpenAI(
|
|
56
58
|
timeout=60,
|
|
57
59
|
max_retries=3,
|
|
58
|
-
base_url=
|
|
60
|
+
base_url=self._url or "http://localhost:8000/v1",
|
|
59
61
|
api_key=api_key,
|
|
60
62
|
)
|
|
61
63
|
self._token_counter = token_counter
|
|
@@ -70,7 +72,7 @@ class VLLMModel:
|
|
|
70
72
|
tokenization style.
|
|
71
73
|
"""
|
|
72
74
|
if not self._token_counter:
|
|
73
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
75
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
74
76
|
return self._token_counter
|
|
75
77
|
|
|
76
78
|
def check_model_config(self):
|
|
@@ -113,7 +115,7 @@ class VLLMModel:
|
|
|
113
115
|
|
|
114
116
|
@property
|
|
115
117
|
def token_limit(self) -> int:
|
|
116
|
-
"""Returns the maximum token limit for the given model.
|
|
118
|
+
r"""Returns the maximum token limit for the given model.
|
|
117
119
|
|
|
118
120
|
Returns:
|
|
119
121
|
int: The maximum token limit for the given model.
|
camel/models/zhipuai_model.py
CHANGED
|
@@ -52,7 +52,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
52
52
|
:obj:`None`)
|
|
53
53
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
54
54
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
55
|
-
|
|
55
|
+
GPT_4O_MINI)` will be used.
|
|
56
56
|
"""
|
|
57
57
|
super().__init__(
|
|
58
58
|
model_type, model_config_dict, api_key, url, token_counter
|
|
@@ -105,7 +105,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
105
105
|
"""
|
|
106
106
|
|
|
107
107
|
if not self._token_counter:
|
|
108
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
108
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
109
109
|
return self._token_counter
|
|
110
110
|
|
|
111
111
|
def check_model_config(self):
|
|
@@ -14,9 +14,7 @@
|
|
|
14
14
|
import datetime
|
|
15
15
|
import os
|
|
16
16
|
import re
|
|
17
|
-
from pathlib import Path
|
|
18
17
|
from typing import Collection, List, Optional, Sequence, Tuple, Union
|
|
19
|
-
from urllib.parse import urlparse
|
|
20
18
|
|
|
21
19
|
from camel.embeddings import BaseEmbedding, OpenAIEmbedding
|
|
22
20
|
from camel.retrievers.vector_retriever import VectorRetriever
|
|
@@ -112,35 +110,12 @@ class AutoRetriever:
|
|
|
112
110
|
Returns:
|
|
113
111
|
str: A sanitized, valid collection name suitable for use.
|
|
114
112
|
"""
|
|
113
|
+
|
|
115
114
|
if isinstance(content, Element):
|
|
116
115
|
content = content.metadata.file_directory
|
|
117
116
|
|
|
118
|
-
|
|
119
|
-
parsed_url = urlparse(content)
|
|
120
|
-
is_url = all([parsed_url.scheme, parsed_url.netloc])
|
|
121
|
-
|
|
122
|
-
# Convert given path into a collection name, ensuring it only
|
|
123
|
-
# contains numbers, letters, and underscores
|
|
124
|
-
if is_url:
|
|
125
|
-
# For URLs, remove https://, replace /, and any characters not
|
|
126
|
-
# allowed by Milvus with _
|
|
127
|
-
collection_name = re.sub(
|
|
128
|
-
r'[^0-9a-zA-Z]+',
|
|
129
|
-
'_',
|
|
130
|
-
content.replace("https://", ""),
|
|
131
|
-
)
|
|
132
|
-
elif os.path.exists(content):
|
|
133
|
-
# For file paths, get the stem and replace spaces with _, also
|
|
134
|
-
# ensuring only allowed characters are present
|
|
135
|
-
collection_name = re.sub(r'[^0-9a-zA-Z]+', '_', Path(content).stem)
|
|
136
|
-
else:
|
|
137
|
-
# the content is string input
|
|
138
|
-
collection_name = content[:10]
|
|
117
|
+
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
139
118
|
|
|
140
|
-
# Ensure the collection name does not start or end with underscore
|
|
141
|
-
collection_name = collection_name.strip("_")
|
|
142
|
-
# Limit the maximum length of the collection name to 30 characters
|
|
143
|
-
collection_name = collection_name[:30]
|
|
144
119
|
return collection_name
|
|
145
120
|
|
|
146
121
|
def _get_file_modified_date_from_file(
|
camel/toolkits/__init__.py
CHANGED
|
@@ -28,6 +28,7 @@ from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
|
|
|
28
28
|
from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
|
|
29
29
|
from .slack_toolkit import SLACK_FUNCS, SlackToolkit
|
|
30
30
|
from .dalle_toolkit import DALLE_FUNCS, DalleToolkit
|
|
31
|
+
from .linkedin_toolkit import LINKEDIN_FUNCS, LinkedInToolkit
|
|
31
32
|
|
|
32
33
|
from .base import BaseToolkit
|
|
33
34
|
from .code_execution import CodeExecutionToolkit
|
|
@@ -47,6 +48,7 @@ __all__ = [
|
|
|
47
48
|
'WEATHER_FUNCS',
|
|
48
49
|
'SLACK_FUNCS',
|
|
49
50
|
'DALLE_FUNCS',
|
|
51
|
+
'LINKEDIN_FUNCS',
|
|
50
52
|
'BaseToolkit',
|
|
51
53
|
'GithubToolkit',
|
|
52
54
|
'MathToolkit',
|
|
@@ -58,5 +60,6 @@ __all__ = [
|
|
|
58
60
|
'WeatherToolkit',
|
|
59
61
|
'RetrievalToolkit',
|
|
60
62
|
'OpenAPIToolkit',
|
|
63
|
+
'LinkedInToolkit',
|
|
61
64
|
'CodeExecutionToolkit',
|
|
62
65
|
]
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import os
|
|
17
|
+
from http import HTTPStatus
|
|
18
|
+
from typing import List
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
|
|
22
|
+
from camel.toolkits import OpenAIFunction
|
|
23
|
+
from camel.toolkits.base import BaseToolkit
|
|
24
|
+
from camel.utils import handle_http_error
|
|
25
|
+
|
|
26
|
+
LINKEDIN_POST_LIMIT = 1300
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LinkedInToolkit(BaseToolkit):
|
|
30
|
+
r"""A class representing a toolkit for LinkedIn operations.
|
|
31
|
+
|
|
32
|
+
This class provides methods for creating a post, deleting a post, and
|
|
33
|
+
retrieving the authenticated user's profile information.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self):
|
|
37
|
+
self._access_token = self._get_access_token()
|
|
38
|
+
|
|
39
|
+
def create_post(self, text: str) -> dict:
|
|
40
|
+
r"""Creates a post on LinkedIn for the authenticated user.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
text (str): The content of the post to be created.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
dict: A dictionary containing the post ID and the content of
|
|
47
|
+
the post. If the post creation fails, the values will be None.
|
|
48
|
+
|
|
49
|
+
Raises:
|
|
50
|
+
Exception: If the post creation fails due to
|
|
51
|
+
an error response from LinkedIn API.
|
|
52
|
+
"""
|
|
53
|
+
url = 'https://api.linkedin.com/v2/ugcPosts'
|
|
54
|
+
urn = self.get_profile(include_id=True)
|
|
55
|
+
|
|
56
|
+
headers = {
|
|
57
|
+
'X-Restli-Protocol-Version': '2.0.0',
|
|
58
|
+
'Content-Type': 'application/json',
|
|
59
|
+
'Authorization': f'Bearer {self._access_token}',
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
post_data = {
|
|
63
|
+
"author": urn['id'],
|
|
64
|
+
"lifecycleState": "PUBLISHED",
|
|
65
|
+
"specificContent": {
|
|
66
|
+
"com.linkedin.ugc.ShareContent": {
|
|
67
|
+
"shareCommentary": {"text": text},
|
|
68
|
+
"shareMediaCategory": "NONE",
|
|
69
|
+
}
|
|
70
|
+
},
|
|
71
|
+
"visibility": {
|
|
72
|
+
"com.linkedin.ugc.MemberNetworkVisibility": "PUBLIC"
|
|
73
|
+
},
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
response = requests.post(
|
|
77
|
+
url, headers=headers, data=json.dumps(post_data)
|
|
78
|
+
)
|
|
79
|
+
if response.status_code == 201:
|
|
80
|
+
post_response = response.json()
|
|
81
|
+
post_id = post_response.get('id', None) # Get the ID of the post
|
|
82
|
+
return {'Post ID': post_id, 'Text': text}
|
|
83
|
+
else:
|
|
84
|
+
raise Exception(
|
|
85
|
+
f"Failed to create post. Status code: {response.status_code}, "
|
|
86
|
+
f"Response: {response.text}"
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def delete_post(self, post_id: str) -> str:
|
|
90
|
+
r"""Deletes a LinkedIn post with the specified ID
|
|
91
|
+
for an authorized user.
|
|
92
|
+
|
|
93
|
+
This function sends a DELETE request to the LinkedIn API to delete
|
|
94
|
+
a post with the specified ID. Before sending the request, it
|
|
95
|
+
prompts the user to confirm the deletion.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
post_id (str): The ID of the post to delete.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
str: A message indicating the result of the deletion. If the
|
|
102
|
+
deletion was successful, the message includes the ID of the
|
|
103
|
+
deleted post. If the deletion was not successful, the message
|
|
104
|
+
includes an error message.
|
|
105
|
+
|
|
106
|
+
Reference:
|
|
107
|
+
https://docs.microsoft.com/en-us/linkedin/marketing/integrations/community-management/shares/ugc-post-api
|
|
108
|
+
"""
|
|
109
|
+
print(
|
|
110
|
+
"You are going to delete a LinkedIn post "
|
|
111
|
+
f"with the following ID: {post_id}"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
confirm = input(
|
|
115
|
+
"Are you sure you want to delete this post? (yes/no): "
|
|
116
|
+
)
|
|
117
|
+
if confirm.lower() != "yes":
|
|
118
|
+
return "Execution cancelled by the user."
|
|
119
|
+
|
|
120
|
+
headers = {
|
|
121
|
+
"Authorization": f"Bearer {self._access_token}",
|
|
122
|
+
"Content-Type": "application/json",
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
response = requests.delete(
|
|
126
|
+
f"https://api.linkedin.com/v2/ugcPosts/{post_id}",
|
|
127
|
+
headers=headers,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
if response.status_code != HTTPStatus.NO_CONTENT:
|
|
131
|
+
error_type = handle_http_error(response)
|
|
132
|
+
return (
|
|
133
|
+
f"Request returned a(n) {error_type!s}: "
|
|
134
|
+
f"{response.status_code!s} {response.text}"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
return f"Post deleted successfully. Post ID: {post_id}."
|
|
138
|
+
|
|
139
|
+
def get_profile(self, include_id: bool = False) -> dict:
|
|
140
|
+
r"""Retrieves the authenticated user's LinkedIn profile info.
|
|
141
|
+
|
|
142
|
+
This function sends a GET request to the LinkedIn API to retrieve the
|
|
143
|
+
authenticated user's profile information. Optionally, it also returns
|
|
144
|
+
the user's LinkedIn ID.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
include_id (bool): Whether to include the LinkedIn profile ID in
|
|
148
|
+
the response.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
dict: A dictionary containing the user's LinkedIn profile
|
|
152
|
+
information. If `include_id` is True, the dictionary will also
|
|
153
|
+
include the profile ID.
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
Exception: If the profile retrieval fails due to an error response
|
|
157
|
+
from LinkedIn API.
|
|
158
|
+
"""
|
|
159
|
+
headers = {
|
|
160
|
+
"Authorization": f"Bearer {self._access_token}",
|
|
161
|
+
'Connection': 'Keep-Alive',
|
|
162
|
+
'Content-Type': 'application/json',
|
|
163
|
+
"X-Restli-Protocol-Version": "2.0.0",
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
response = requests.get(
|
|
167
|
+
"https://api.linkedin.com/v2/userinfo",
|
|
168
|
+
headers=headers,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if response.status_code != HTTPStatus.OK:
|
|
172
|
+
raise Exception(
|
|
173
|
+
f"Failed to retrieve profile. "
|
|
174
|
+
f"Status code: {response.status_code}, "
|
|
175
|
+
f"Response: {response.text}"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
json_response = response.json()
|
|
179
|
+
|
|
180
|
+
locale = json_response.get('locale', {})
|
|
181
|
+
country = locale.get('country', 'N/A')
|
|
182
|
+
language = locale.get('language', 'N/A')
|
|
183
|
+
|
|
184
|
+
profile_report = {
|
|
185
|
+
"Country": country,
|
|
186
|
+
"Language": language,
|
|
187
|
+
"First Name": json_response.get('given_name'),
|
|
188
|
+
"Last Name": json_response.get('family_name'),
|
|
189
|
+
"Email": json_response.get('email'),
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if include_id:
|
|
193
|
+
profile_report['id'] = f"urn:li:person:{json_response['sub']}"
|
|
194
|
+
|
|
195
|
+
return profile_report
|
|
196
|
+
|
|
197
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
198
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
199
|
+
functions in the toolkit.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
203
|
+
representing the functions in the toolkit.
|
|
204
|
+
"""
|
|
205
|
+
return [
|
|
206
|
+
OpenAIFunction(self.create_post),
|
|
207
|
+
OpenAIFunction(self.delete_post),
|
|
208
|
+
OpenAIFunction(self.get_profile),
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
def _get_access_token(self) -> str:
|
|
212
|
+
r"""Fetches the access token required for making LinkedIn API requests.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
str: The OAuth 2.0 access token or warming message if the
|
|
216
|
+
environment variable `LINKEDIN_ACCESS_TOKEN` is not set or is
|
|
217
|
+
empty.
|
|
218
|
+
|
|
219
|
+
Reference:
|
|
220
|
+
You can apply for your personal LinkedIn API access token through
|
|
221
|
+
the link below:
|
|
222
|
+
https://www.linkedin.com/developers/apps
|
|
223
|
+
"""
|
|
224
|
+
token = os.getenv("LINKEDIN_ACCESS_TOKEN")
|
|
225
|
+
if not token:
|
|
226
|
+
return "Access token not found. Please set LINKEDIN_ACCESS_TOKEN."
|
|
227
|
+
return token
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
LINKEDIN_FUNCS: List[OpenAIFunction] = LinkedInToolkit().get_tools()
|
camel/types/enums.py
CHANGED
|
@@ -81,13 +81,16 @@ class ModelType(Enum):
|
|
|
81
81
|
MISTRAL_MIXTRAL_8x22B = "open-mixtral-8x22b"
|
|
82
82
|
MISTRAL_CODESTRAL_MAMBA = "open-codestral-mamba"
|
|
83
83
|
|
|
84
|
+
# SambaNova Model
|
|
85
|
+
SAMBA_LLAMA_3_1_405B = "llama3-405b"
|
|
86
|
+
SAMBA_LLAMA_3_1_70B = "llama3-70b"
|
|
87
|
+
SAMBA_LLAMA_3_1_8B = "llama3-8b"
|
|
88
|
+
|
|
84
89
|
@property
|
|
85
90
|
def value_for_tiktoken(self) -> str:
|
|
86
|
-
|
|
87
|
-
self.value
|
|
88
|
-
|
|
89
|
-
else "gpt-3.5-turbo"
|
|
90
|
-
)
|
|
91
|
+
if self.is_openai:
|
|
92
|
+
return self.value
|
|
93
|
+
return "gpt-3.5-turbo"
|
|
91
94
|
|
|
92
95
|
@property
|
|
93
96
|
def is_openai(self) -> bool:
|
|
@@ -192,6 +195,14 @@ class ModelType(Enum):
|
|
|
192
195
|
def is_gemini(self) -> bool:
|
|
193
196
|
return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
|
|
194
197
|
|
|
198
|
+
@property
|
|
199
|
+
def is_samba(self) -> bool:
|
|
200
|
+
return self in {
|
|
201
|
+
ModelType.SAMBA_LLAMA_3_1_405B,
|
|
202
|
+
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
203
|
+
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
204
|
+
}
|
|
205
|
+
|
|
195
206
|
@property
|
|
196
207
|
def token_limit(self) -> int:
|
|
197
208
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -251,6 +262,9 @@ class ModelType(Enum):
|
|
|
251
262
|
ModelType.GROQ_LLAMA_3_1_8B,
|
|
252
263
|
ModelType.GROQ_LLAMA_3_1_70B,
|
|
253
264
|
ModelType.GROQ_LLAMA_3_1_405B,
|
|
265
|
+
ModelType.SAMBA_LLAMA_3_1_405B,
|
|
266
|
+
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
267
|
+
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
254
268
|
}:
|
|
255
269
|
return 131_072
|
|
256
270
|
elif self in {
|
|
@@ -444,7 +458,9 @@ class ModelPlatformType(Enum):
|
|
|
444
458
|
GEMINI = "gemini"
|
|
445
459
|
VLLM = "vllm"
|
|
446
460
|
MISTRAL = "mistral"
|
|
461
|
+
TOGETHER = "together"
|
|
447
462
|
OPENAI_COMPATIBILITY_MODEL = "openai-compatibility-model"
|
|
463
|
+
SAMBA = "samba-nova"
|
|
448
464
|
|
|
449
465
|
@property
|
|
450
466
|
def is_openai(self) -> bool:
|
|
@@ -476,6 +492,11 @@ class ModelPlatformType(Enum):
|
|
|
476
492
|
r"""Returns whether this platform is vllm."""
|
|
477
493
|
return self is ModelPlatformType.VLLM
|
|
478
494
|
|
|
495
|
+
@property
|
|
496
|
+
def is_together(self) -> bool:
|
|
497
|
+
r"""Returns whether this platform is together."""
|
|
498
|
+
return self is ModelPlatformType.TOGETHER
|
|
499
|
+
|
|
479
500
|
@property
|
|
480
501
|
def is_litellm(self) -> bool:
|
|
481
502
|
r"""Returns whether this platform is litellm."""
|
|
@@ -507,6 +528,11 @@ class ModelPlatformType(Enum):
|
|
|
507
528
|
r"""Returns whether this platform is Gemini."""
|
|
508
529
|
return self is ModelPlatformType.GEMINI
|
|
509
530
|
|
|
531
|
+
@property
|
|
532
|
+
def is_samba(self) -> bool:
|
|
533
|
+
r"""Returns whether this platform is Samba Nova."""
|
|
534
|
+
return self is ModelPlatformType.SAMBA
|
|
535
|
+
|
|
510
536
|
|
|
511
537
|
class AudioModelType(Enum):
|
|
512
538
|
TTS_1 = "tts-1"
|
camel/utils/__init__.py
CHANGED
|
@@ -27,6 +27,7 @@ from .commons import (
|
|
|
27
27
|
get_pydantic_object_schema,
|
|
28
28
|
get_system_information,
|
|
29
29
|
get_task_list,
|
|
30
|
+
handle_http_error,
|
|
30
31
|
is_docker_running,
|
|
31
32
|
json_to_function_code,
|
|
32
33
|
print_text_animated,
|
|
@@ -76,4 +77,5 @@ __all__ = [
|
|
|
76
77
|
'agentops_decorator',
|
|
77
78
|
'AgentOpsMeta',
|
|
78
79
|
'track_agent',
|
|
80
|
+
'handle_http_error',
|
|
79
81
|
]
|
camel/utils/commons.py
CHANGED
|
@@ -20,6 +20,7 @@ import subprocess
|
|
|
20
20
|
import time
|
|
21
21
|
import zipfile
|
|
22
22
|
from functools import wraps
|
|
23
|
+
from http import HTTPStatus
|
|
23
24
|
from typing import (
|
|
24
25
|
Any,
|
|
25
26
|
Callable,
|
|
@@ -547,3 +548,24 @@ def track_agent(*args, **kwargs):
|
|
|
547
548
|
return f
|
|
548
549
|
|
|
549
550
|
return noop
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def handle_http_error(response: requests.Response) -> str:
|
|
554
|
+
r"""Handles the HTTP errors based on the status code of the response.
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
response (requests.Response): The HTTP response from the API call.
|
|
558
|
+
|
|
559
|
+
Returns:
|
|
560
|
+
str: The error type, based on the status code.
|
|
561
|
+
"""
|
|
562
|
+
if response.status_code == HTTPStatus.UNAUTHORIZED:
|
|
563
|
+
return "Unauthorized. Check your access token."
|
|
564
|
+
elif response.status_code == HTTPStatus.FORBIDDEN:
|
|
565
|
+
return "Forbidden. You do not have permission to perform this action."
|
|
566
|
+
elif response.status_code == HTTPStatus.NOT_FOUND:
|
|
567
|
+
return "Not Found. The resource could not be located."
|
|
568
|
+
elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
|
|
569
|
+
return "Too Many Requests. You have hit the rate limit."
|
|
570
|
+
else:
|
|
571
|
+
return "HTTP Error"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.6.
|
|
3
|
+
Version: 0.1.6.7
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -202,7 +202,7 @@ conda create --name camel python=3.9
|
|
|
202
202
|
conda activate camel
|
|
203
203
|
|
|
204
204
|
# Clone github repo
|
|
205
|
-
git clone -b v0.1.6.
|
|
205
|
+
git clone -b v0.1.6.7 https://github.com/camel-ai/camel.git
|
|
206
206
|
|
|
207
207
|
# Change directory into project directory
|
|
208
208
|
cd camel
|