camel-ai 0.1.6.6__py3-none-any.whl → 0.1.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +44 -9
- camel/agents/critic_agent.py +0 -1
- camel/configs/__init__.py +9 -0
- camel/configs/reka_config.py +74 -0
- camel/configs/samba_config.py +50 -0
- camel/configs/togetherai_config.py +107 -0
- camel/models/__init__.py +6 -0
- camel/models/groq_model.py +5 -5
- camel/models/litellm_model.py +1 -1
- camel/models/model_factory.py +12 -0
- camel/models/ollama_model.py +6 -4
- camel/models/openai_compatibility_model.py +3 -3
- camel/models/reka_model.py +232 -0
- camel/models/samba_model.py +291 -0
- camel/models/togetherai_model.py +148 -0
- camel/models/vllm_model.py +7 -5
- camel/models/zhipuai_model.py +2 -2
- camel/retrievers/auto_retriever.py +2 -27
- camel/societies/babyagi_playing.py +0 -3
- camel/societies/role_playing.py +18 -2
- camel/storages/object_storages/amazon_s3.py +12 -10
- camel/toolkits/__init__.py +3 -0
- camel/toolkits/linkedin_toolkit.py +230 -0
- camel/types/enums.py +64 -6
- camel/utils/__init__.py +2 -0
- camel/utils/commons.py +22 -0
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/METADATA +19 -10
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/RECORD +30 -23
- {camel_ai-0.1.6.6.dist-info → camel_ai-0.1.6.8.dist-info}/WHEEL +0 -0
camel/models/vllm_model.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
14
15
|
from typing import Any, Dict, List, Optional, Union
|
|
15
16
|
|
|
16
17
|
from openai import OpenAI, Stream
|
|
@@ -42,20 +43,21 @@ class VLLMModel:
|
|
|
42
43
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
43
44
|
be fed into openai.ChatCompletion.create().
|
|
44
45
|
url (Optional[str]): The url to the model service. (default:
|
|
45
|
-
:obj:`
|
|
46
|
+
:obj:`"http://localhost:8000/v1"`)
|
|
46
47
|
api_key (Optional[str]): The API key for authenticating with the
|
|
47
48
|
model service.
|
|
48
49
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
49
50
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
50
|
-
|
|
51
|
+
GPT_4O_MINI)` will be used.
|
|
51
52
|
"""
|
|
52
53
|
self.model_type = model_type
|
|
53
54
|
self.model_config_dict = model_config_dict
|
|
55
|
+
self._url = url or os.environ.get("VLLM_BASE_URL")
|
|
54
56
|
# Use OpenAI cilent as interface call vLLM
|
|
55
57
|
self._client = OpenAI(
|
|
56
58
|
timeout=60,
|
|
57
59
|
max_retries=3,
|
|
58
|
-
base_url=
|
|
60
|
+
base_url=self._url or "http://localhost:8000/v1",
|
|
59
61
|
api_key=api_key,
|
|
60
62
|
)
|
|
61
63
|
self._token_counter = token_counter
|
|
@@ -70,7 +72,7 @@ class VLLMModel:
|
|
|
70
72
|
tokenization style.
|
|
71
73
|
"""
|
|
72
74
|
if not self._token_counter:
|
|
73
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
75
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
74
76
|
return self._token_counter
|
|
75
77
|
|
|
76
78
|
def check_model_config(self):
|
|
@@ -113,7 +115,7 @@ class VLLMModel:
|
|
|
113
115
|
|
|
114
116
|
@property
|
|
115
117
|
def token_limit(self) -> int:
|
|
116
|
-
"""Returns the maximum token limit for the given model.
|
|
118
|
+
r"""Returns the maximum token limit for the given model.
|
|
117
119
|
|
|
118
120
|
Returns:
|
|
119
121
|
int: The maximum token limit for the given model.
|
camel/models/zhipuai_model.py
CHANGED
|
@@ -52,7 +52,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
52
52
|
:obj:`None`)
|
|
53
53
|
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
54
54
|
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
55
|
-
|
|
55
|
+
GPT_4O_MINI)` will be used.
|
|
56
56
|
"""
|
|
57
57
|
super().__init__(
|
|
58
58
|
model_type, model_config_dict, api_key, url, token_counter
|
|
@@ -105,7 +105,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
105
105
|
"""
|
|
106
106
|
|
|
107
107
|
if not self._token_counter:
|
|
108
|
-
self._token_counter = OpenAITokenCounter(ModelType.
|
|
108
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
109
109
|
return self._token_counter
|
|
110
110
|
|
|
111
111
|
def check_model_config(self):
|
|
@@ -14,9 +14,7 @@
|
|
|
14
14
|
import datetime
|
|
15
15
|
import os
|
|
16
16
|
import re
|
|
17
|
-
from pathlib import Path
|
|
18
17
|
from typing import Collection, List, Optional, Sequence, Tuple, Union
|
|
19
|
-
from urllib.parse import urlparse
|
|
20
18
|
|
|
21
19
|
from camel.embeddings import BaseEmbedding, OpenAIEmbedding
|
|
22
20
|
from camel.retrievers.vector_retriever import VectorRetriever
|
|
@@ -112,35 +110,12 @@ class AutoRetriever:
|
|
|
112
110
|
Returns:
|
|
113
111
|
str: A sanitized, valid collection name suitable for use.
|
|
114
112
|
"""
|
|
113
|
+
|
|
115
114
|
if isinstance(content, Element):
|
|
116
115
|
content = content.metadata.file_directory
|
|
117
116
|
|
|
118
|
-
|
|
119
|
-
parsed_url = urlparse(content)
|
|
120
|
-
is_url = all([parsed_url.scheme, parsed_url.netloc])
|
|
121
|
-
|
|
122
|
-
# Convert given path into a collection name, ensuring it only
|
|
123
|
-
# contains numbers, letters, and underscores
|
|
124
|
-
if is_url:
|
|
125
|
-
# For URLs, remove https://, replace /, and any characters not
|
|
126
|
-
# allowed by Milvus with _
|
|
127
|
-
collection_name = re.sub(
|
|
128
|
-
r'[^0-9a-zA-Z]+',
|
|
129
|
-
'_',
|
|
130
|
-
content.replace("https://", ""),
|
|
131
|
-
)
|
|
132
|
-
elif os.path.exists(content):
|
|
133
|
-
# For file paths, get the stem and replace spaces with _, also
|
|
134
|
-
# ensuring only allowed characters are present
|
|
135
|
-
collection_name = re.sub(r'[^0-9a-zA-Z]+', '_', Path(content).stem)
|
|
136
|
-
else:
|
|
137
|
-
# the content is string input
|
|
138
|
-
collection_name = content[:10]
|
|
117
|
+
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
139
118
|
|
|
140
|
-
# Ensure the collection name does not start or end with underscore
|
|
141
|
-
collection_name = collection_name.strip("_")
|
|
142
|
-
# Limit the maximum length of the collection name to 30 characters
|
|
143
|
-
collection_name = collection_name[:30]
|
|
144
119
|
return collection_name
|
|
145
120
|
|
|
146
121
|
def _get_file_modified_date_from_file(
|
|
@@ -243,9 +243,6 @@ class BabyAGI:
|
|
|
243
243
|
|
|
244
244
|
assistant_response = self.assistant_agent.step(assistant_msg_msg)
|
|
245
245
|
assistant_msg = assistant_response.msgs[0]
|
|
246
|
-
self.assistant_agent.record_message(assistant_msg)
|
|
247
|
-
self.task_creation_agent.record_message(assistant_msg)
|
|
248
|
-
self.task_prioritization_agent.record_message(assistant_msg)
|
|
249
246
|
|
|
250
247
|
self.solved_subtasks.append(task_name)
|
|
251
248
|
past_tasks = self.solved_subtasks + list(self.subtasks)
|
camel/societies/role_playing.py
CHANGED
|
@@ -486,7 +486,15 @@ class RolePlaying:
|
|
|
486
486
|
),
|
|
487
487
|
)
|
|
488
488
|
user_msg = self._reduce_message_options(user_response.msgs)
|
|
489
|
-
|
|
489
|
+
|
|
490
|
+
# To prevent recording the same memory more than once (once in chat
|
|
491
|
+
# step and once in role play), and the model generates only one
|
|
492
|
+
# response when multi-response support is enabled.
|
|
493
|
+
if (
|
|
494
|
+
'n' in self.user_agent.model_config_dict.keys()
|
|
495
|
+
and self.user_agent.model_config_dict['n'] > 1
|
|
496
|
+
):
|
|
497
|
+
self.user_agent.record_message(user_msg)
|
|
490
498
|
|
|
491
499
|
assistant_response = self.assistant_agent.step(user_msg)
|
|
492
500
|
if assistant_response.terminated or assistant_response.msgs is None:
|
|
@@ -501,7 +509,15 @@ class RolePlaying:
|
|
|
501
509
|
),
|
|
502
510
|
)
|
|
503
511
|
assistant_msg = self._reduce_message_options(assistant_response.msgs)
|
|
504
|
-
|
|
512
|
+
|
|
513
|
+
# To prevent recording the same memory more than once (once in chat
|
|
514
|
+
# step and once in role play), and the model generates only one
|
|
515
|
+
# response when multi-response support is enabled.
|
|
516
|
+
if (
|
|
517
|
+
'n' in self.assistant_agent.model_config_dict.keys()
|
|
518
|
+
and self.assistant_agent.model_config_dict['n'] > 1
|
|
519
|
+
):
|
|
520
|
+
self.assistant_agent.record_message(assistant_msg)
|
|
505
521
|
|
|
506
522
|
return (
|
|
507
523
|
ChatAgentResponse(
|
|
@@ -70,18 +70,20 @@ class AmazonS3Storage(BaseObjectStorage):
|
|
|
70
70
|
aws_key_id = None
|
|
71
71
|
aws_secret_key = None
|
|
72
72
|
|
|
73
|
-
import
|
|
73
|
+
import botocore.session
|
|
74
74
|
from botocore import UNSIGNED
|
|
75
75
|
from botocore.config import Config
|
|
76
76
|
|
|
77
|
+
session = botocore.session.get_session()
|
|
78
|
+
|
|
77
79
|
if not anonymous:
|
|
78
|
-
self._client =
|
|
80
|
+
self._client = session.create_client(
|
|
79
81
|
"s3",
|
|
80
82
|
aws_access_key_id=aws_key_id,
|
|
81
83
|
aws_secret_access_key=aws_secret_key,
|
|
82
84
|
)
|
|
83
85
|
else:
|
|
84
|
-
self._client =
|
|
86
|
+
self._client = session.create_client(
|
|
85
87
|
"s3", config=Config(signature_version=UNSIGNED)
|
|
86
88
|
)
|
|
87
89
|
|
|
@@ -165,11 +167,10 @@ class AmazonS3Storage(BaseObjectStorage):
|
|
|
165
167
|
local_file_path (Path): The path to the local file to be uploaded.
|
|
166
168
|
remote_file_key (str): The path to the object in the bucket.
|
|
167
169
|
"""
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
)
|
|
170
|
+
with open(local_file_path, "rb") as f:
|
|
171
|
+
self._client.put_object(
|
|
172
|
+
Bucket=self._bucket_name, Key=remote_file_key, Body=f
|
|
173
|
+
)
|
|
173
174
|
|
|
174
175
|
def _download_file(
|
|
175
176
|
self,
|
|
@@ -182,11 +183,12 @@ class AmazonS3Storage(BaseObjectStorage):
|
|
|
182
183
|
local_file_path (Path): The path to the local file to be saved.
|
|
183
184
|
remote_file_key (str): The key of the object in the bucket.
|
|
184
185
|
"""
|
|
185
|
-
self._client.
|
|
186
|
+
file = self._client.get_object(
|
|
186
187
|
Bucket=self._bucket_name,
|
|
187
188
|
Key=remote_file_key,
|
|
188
|
-
Filename=local_file_path,
|
|
189
189
|
)
|
|
190
|
+
with open(local_file_path, "wb") as f:
|
|
191
|
+
f.write(file["Body"].read())
|
|
190
192
|
|
|
191
193
|
def _object_exists(self, file_key: str) -> bool:
|
|
192
194
|
r"""
|
camel/toolkits/__init__.py
CHANGED
|
@@ -28,6 +28,7 @@ from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
|
|
|
28
28
|
from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
|
|
29
29
|
from .slack_toolkit import SLACK_FUNCS, SlackToolkit
|
|
30
30
|
from .dalle_toolkit import DALLE_FUNCS, DalleToolkit
|
|
31
|
+
from .linkedin_toolkit import LINKEDIN_FUNCS, LinkedInToolkit
|
|
31
32
|
|
|
32
33
|
from .base import BaseToolkit
|
|
33
34
|
from .code_execution import CodeExecutionToolkit
|
|
@@ -47,6 +48,7 @@ __all__ = [
|
|
|
47
48
|
'WEATHER_FUNCS',
|
|
48
49
|
'SLACK_FUNCS',
|
|
49
50
|
'DALLE_FUNCS',
|
|
51
|
+
'LINKEDIN_FUNCS',
|
|
50
52
|
'BaseToolkit',
|
|
51
53
|
'GithubToolkit',
|
|
52
54
|
'MathToolkit',
|
|
@@ -58,5 +60,6 @@ __all__ = [
|
|
|
58
60
|
'WeatherToolkit',
|
|
59
61
|
'RetrievalToolkit',
|
|
60
62
|
'OpenAPIToolkit',
|
|
63
|
+
'LinkedInToolkit',
|
|
61
64
|
'CodeExecutionToolkit',
|
|
62
65
|
]
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import os
|
|
17
|
+
from http import HTTPStatus
|
|
18
|
+
from typing import List
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
|
|
22
|
+
from camel.toolkits import OpenAIFunction
|
|
23
|
+
from camel.toolkits.base import BaseToolkit
|
|
24
|
+
from camel.utils import handle_http_error
|
|
25
|
+
|
|
26
|
+
LINKEDIN_POST_LIMIT = 1300
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LinkedInToolkit(BaseToolkit):
|
|
30
|
+
r"""A class representing a toolkit for LinkedIn operations.
|
|
31
|
+
|
|
32
|
+
This class provides methods for creating a post, deleting a post, and
|
|
33
|
+
retrieving the authenticated user's profile information.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self):
|
|
37
|
+
self._access_token = self._get_access_token()
|
|
38
|
+
|
|
39
|
+
def create_post(self, text: str) -> dict:
|
|
40
|
+
r"""Creates a post on LinkedIn for the authenticated user.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
text (str): The content of the post to be created.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
dict: A dictionary containing the post ID and the content of
|
|
47
|
+
the post. If the post creation fails, the values will be None.
|
|
48
|
+
|
|
49
|
+
Raises:
|
|
50
|
+
Exception: If the post creation fails due to
|
|
51
|
+
an error response from LinkedIn API.
|
|
52
|
+
"""
|
|
53
|
+
url = 'https://api.linkedin.com/v2/ugcPosts'
|
|
54
|
+
urn = self.get_profile(include_id=True)
|
|
55
|
+
|
|
56
|
+
headers = {
|
|
57
|
+
'X-Restli-Protocol-Version': '2.0.0',
|
|
58
|
+
'Content-Type': 'application/json',
|
|
59
|
+
'Authorization': f'Bearer {self._access_token}',
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
post_data = {
|
|
63
|
+
"author": urn['id'],
|
|
64
|
+
"lifecycleState": "PUBLISHED",
|
|
65
|
+
"specificContent": {
|
|
66
|
+
"com.linkedin.ugc.ShareContent": {
|
|
67
|
+
"shareCommentary": {"text": text},
|
|
68
|
+
"shareMediaCategory": "NONE",
|
|
69
|
+
}
|
|
70
|
+
},
|
|
71
|
+
"visibility": {
|
|
72
|
+
"com.linkedin.ugc.MemberNetworkVisibility": "PUBLIC"
|
|
73
|
+
},
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
response = requests.post(
|
|
77
|
+
url, headers=headers, data=json.dumps(post_data)
|
|
78
|
+
)
|
|
79
|
+
if response.status_code == 201:
|
|
80
|
+
post_response = response.json()
|
|
81
|
+
post_id = post_response.get('id', None) # Get the ID of the post
|
|
82
|
+
return {'Post ID': post_id, 'Text': text}
|
|
83
|
+
else:
|
|
84
|
+
raise Exception(
|
|
85
|
+
f"Failed to create post. Status code: {response.status_code}, "
|
|
86
|
+
f"Response: {response.text}"
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def delete_post(self, post_id: str) -> str:
|
|
90
|
+
r"""Deletes a LinkedIn post with the specified ID
|
|
91
|
+
for an authorized user.
|
|
92
|
+
|
|
93
|
+
This function sends a DELETE request to the LinkedIn API to delete
|
|
94
|
+
a post with the specified ID. Before sending the request, it
|
|
95
|
+
prompts the user to confirm the deletion.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
post_id (str): The ID of the post to delete.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
str: A message indicating the result of the deletion. If the
|
|
102
|
+
deletion was successful, the message includes the ID of the
|
|
103
|
+
deleted post. If the deletion was not successful, the message
|
|
104
|
+
includes an error message.
|
|
105
|
+
|
|
106
|
+
Reference:
|
|
107
|
+
https://docs.microsoft.com/en-us/linkedin/marketing/integrations/community-management/shares/ugc-post-api
|
|
108
|
+
"""
|
|
109
|
+
print(
|
|
110
|
+
"You are going to delete a LinkedIn post "
|
|
111
|
+
f"with the following ID: {post_id}"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
confirm = input(
|
|
115
|
+
"Are you sure you want to delete this post? (yes/no): "
|
|
116
|
+
)
|
|
117
|
+
if confirm.lower() != "yes":
|
|
118
|
+
return "Execution cancelled by the user."
|
|
119
|
+
|
|
120
|
+
headers = {
|
|
121
|
+
"Authorization": f"Bearer {self._access_token}",
|
|
122
|
+
"Content-Type": "application/json",
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
response = requests.delete(
|
|
126
|
+
f"https://api.linkedin.com/v2/ugcPosts/{post_id}",
|
|
127
|
+
headers=headers,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
if response.status_code != HTTPStatus.NO_CONTENT:
|
|
131
|
+
error_type = handle_http_error(response)
|
|
132
|
+
return (
|
|
133
|
+
f"Request returned a(n) {error_type!s}: "
|
|
134
|
+
f"{response.status_code!s} {response.text}"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
return f"Post deleted successfully. Post ID: {post_id}."
|
|
138
|
+
|
|
139
|
+
def get_profile(self, include_id: bool = False) -> dict:
|
|
140
|
+
r"""Retrieves the authenticated user's LinkedIn profile info.
|
|
141
|
+
|
|
142
|
+
This function sends a GET request to the LinkedIn API to retrieve the
|
|
143
|
+
authenticated user's profile information. Optionally, it also returns
|
|
144
|
+
the user's LinkedIn ID.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
include_id (bool): Whether to include the LinkedIn profile ID in
|
|
148
|
+
the response.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
dict: A dictionary containing the user's LinkedIn profile
|
|
152
|
+
information. If `include_id` is True, the dictionary will also
|
|
153
|
+
include the profile ID.
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
Exception: If the profile retrieval fails due to an error response
|
|
157
|
+
from LinkedIn API.
|
|
158
|
+
"""
|
|
159
|
+
headers = {
|
|
160
|
+
"Authorization": f"Bearer {self._access_token}",
|
|
161
|
+
'Connection': 'Keep-Alive',
|
|
162
|
+
'Content-Type': 'application/json',
|
|
163
|
+
"X-Restli-Protocol-Version": "2.0.0",
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
response = requests.get(
|
|
167
|
+
"https://api.linkedin.com/v2/userinfo",
|
|
168
|
+
headers=headers,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if response.status_code != HTTPStatus.OK:
|
|
172
|
+
raise Exception(
|
|
173
|
+
f"Failed to retrieve profile. "
|
|
174
|
+
f"Status code: {response.status_code}, "
|
|
175
|
+
f"Response: {response.text}"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
json_response = response.json()
|
|
179
|
+
|
|
180
|
+
locale = json_response.get('locale', {})
|
|
181
|
+
country = locale.get('country', 'N/A')
|
|
182
|
+
language = locale.get('language', 'N/A')
|
|
183
|
+
|
|
184
|
+
profile_report = {
|
|
185
|
+
"Country": country,
|
|
186
|
+
"Language": language,
|
|
187
|
+
"First Name": json_response.get('given_name'),
|
|
188
|
+
"Last Name": json_response.get('family_name'),
|
|
189
|
+
"Email": json_response.get('email'),
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if include_id:
|
|
193
|
+
profile_report['id'] = f"urn:li:person:{json_response['sub']}"
|
|
194
|
+
|
|
195
|
+
return profile_report
|
|
196
|
+
|
|
197
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
198
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
199
|
+
functions in the toolkit.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
203
|
+
representing the functions in the toolkit.
|
|
204
|
+
"""
|
|
205
|
+
return [
|
|
206
|
+
OpenAIFunction(self.create_post),
|
|
207
|
+
OpenAIFunction(self.delete_post),
|
|
208
|
+
OpenAIFunction(self.get_profile),
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
def _get_access_token(self) -> str:
|
|
212
|
+
r"""Fetches the access token required for making LinkedIn API requests.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
str: The OAuth 2.0 access token or warming message if the
|
|
216
|
+
environment variable `LINKEDIN_ACCESS_TOKEN` is not set or is
|
|
217
|
+
empty.
|
|
218
|
+
|
|
219
|
+
Reference:
|
|
220
|
+
You can apply for your personal LinkedIn API access token through
|
|
221
|
+
the link below:
|
|
222
|
+
https://www.linkedin.com/developers/apps
|
|
223
|
+
"""
|
|
224
|
+
token = os.getenv("LINKEDIN_ACCESS_TOKEN")
|
|
225
|
+
if not token:
|
|
226
|
+
return "Access token not found. Please set LINKEDIN_ACCESS_TOKEN."
|
|
227
|
+
return token
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
LINKEDIN_FUNCS: List[OpenAIFunction] = LinkedInToolkit().get_tools()
|
camel/types/enums.py
CHANGED
|
@@ -72,7 +72,7 @@ class ModelType(Enum):
|
|
|
72
72
|
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
73
73
|
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
|
74
74
|
|
|
75
|
-
# Mistral AI
|
|
75
|
+
# Mistral AI models
|
|
76
76
|
MISTRAL_LARGE = "mistral-large-latest"
|
|
77
77
|
MISTRAL_NEMO = "open-mistral-nemo"
|
|
78
78
|
MISTRAL_CODESTRAL = "codestral-latest"
|
|
@@ -81,13 +81,21 @@ class ModelType(Enum):
|
|
|
81
81
|
MISTRAL_MIXTRAL_8x22B = "open-mixtral-8x22b"
|
|
82
82
|
MISTRAL_CODESTRAL_MAMBA = "open-codestral-mamba"
|
|
83
83
|
|
|
84
|
+
# Reka models
|
|
85
|
+
REKA_CORE = "reka-core"
|
|
86
|
+
REKA_FLASH = "reka-flash"
|
|
87
|
+
REKA_EDGE = "reka-edge"
|
|
88
|
+
|
|
89
|
+
# SambaNova Model
|
|
90
|
+
SAMBA_LLAMA_3_1_405B = "llama3-405b"
|
|
91
|
+
SAMBA_LLAMA_3_1_70B = "llama3-70b"
|
|
92
|
+
SAMBA_LLAMA_3_1_8B = "llama3-8b"
|
|
93
|
+
|
|
84
94
|
@property
|
|
85
95
|
def value_for_tiktoken(self) -> str:
|
|
86
|
-
|
|
87
|
-
self.value
|
|
88
|
-
|
|
89
|
-
else "gpt-3.5-turbo"
|
|
90
|
-
)
|
|
96
|
+
if self.is_openai:
|
|
97
|
+
return self.value
|
|
98
|
+
return "gpt-3.5-turbo"
|
|
91
99
|
|
|
92
100
|
@property
|
|
93
101
|
def is_openai(self) -> bool:
|
|
@@ -190,8 +198,34 @@ class ModelType(Enum):
|
|
|
190
198
|
|
|
191
199
|
@property
|
|
192
200
|
def is_gemini(self) -> bool:
|
|
201
|
+
r"""Returns whether this type of models is Gemini model.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
bool: Whether this type of models is gemini.
|
|
205
|
+
"""
|
|
193
206
|
return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
|
|
194
207
|
|
|
208
|
+
@property
|
|
209
|
+
def is_reka(self) -> bool:
|
|
210
|
+
r"""Returns whether this type of models is Reka model.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
bool: Whether this type of models is Reka.
|
|
214
|
+
"""
|
|
215
|
+
return self in {
|
|
216
|
+
ModelType.REKA_CORE,
|
|
217
|
+
ModelType.REKA_EDGE,
|
|
218
|
+
ModelType.REKA_FLASH,
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
@property
|
|
222
|
+
def is_samba(self) -> bool:
|
|
223
|
+
return self in {
|
|
224
|
+
ModelType.SAMBA_LLAMA_3_1_405B,
|
|
225
|
+
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
226
|
+
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
227
|
+
}
|
|
228
|
+
|
|
195
229
|
@property
|
|
196
230
|
def token_limit(self) -> int:
|
|
197
231
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -208,6 +242,9 @@ class ModelType(Enum):
|
|
|
208
242
|
ModelType.LLAMA_2,
|
|
209
243
|
ModelType.NEMOTRON_4_REWARD,
|
|
210
244
|
ModelType.STUB,
|
|
245
|
+
ModelType.REKA_CORE,
|
|
246
|
+
ModelType.REKA_EDGE,
|
|
247
|
+
ModelType.REKA_FLASH,
|
|
211
248
|
}:
|
|
212
249
|
return 4_096
|
|
213
250
|
elif self in {
|
|
@@ -251,6 +288,9 @@ class ModelType(Enum):
|
|
|
251
288
|
ModelType.GROQ_LLAMA_3_1_8B,
|
|
252
289
|
ModelType.GROQ_LLAMA_3_1_70B,
|
|
253
290
|
ModelType.GROQ_LLAMA_3_1_405B,
|
|
291
|
+
ModelType.SAMBA_LLAMA_3_1_405B,
|
|
292
|
+
ModelType.SAMBA_LLAMA_3_1_70B,
|
|
293
|
+
ModelType.SAMBA_LLAMA_3_1_8B,
|
|
254
294
|
}:
|
|
255
295
|
return 131_072
|
|
256
296
|
elif self in {
|
|
@@ -444,7 +484,10 @@ class ModelPlatformType(Enum):
|
|
|
444
484
|
GEMINI = "gemini"
|
|
445
485
|
VLLM = "vllm"
|
|
446
486
|
MISTRAL = "mistral"
|
|
487
|
+
REKA = "reka"
|
|
488
|
+
TOGETHER = "together"
|
|
447
489
|
OPENAI_COMPATIBILITY_MODEL = "openai-compatibility-model"
|
|
490
|
+
SAMBA = "samba-nova"
|
|
448
491
|
|
|
449
492
|
@property
|
|
450
493
|
def is_openai(self) -> bool:
|
|
@@ -476,6 +519,11 @@ class ModelPlatformType(Enum):
|
|
|
476
519
|
r"""Returns whether this platform is vllm."""
|
|
477
520
|
return self is ModelPlatformType.VLLM
|
|
478
521
|
|
|
522
|
+
@property
|
|
523
|
+
def is_together(self) -> bool:
|
|
524
|
+
r"""Returns whether this platform is together."""
|
|
525
|
+
return self is ModelPlatformType.TOGETHER
|
|
526
|
+
|
|
479
527
|
@property
|
|
480
528
|
def is_litellm(self) -> bool:
|
|
481
529
|
r"""Returns whether this platform is litellm."""
|
|
@@ -507,6 +555,16 @@ class ModelPlatformType(Enum):
|
|
|
507
555
|
r"""Returns whether this platform is Gemini."""
|
|
508
556
|
return self is ModelPlatformType.GEMINI
|
|
509
557
|
|
|
558
|
+
@property
|
|
559
|
+
def is_reka(self) -> bool:
|
|
560
|
+
r"""Returns whether this platform is Reka."""
|
|
561
|
+
return self is ModelPlatformType.REKA
|
|
562
|
+
|
|
563
|
+
@property
|
|
564
|
+
def is_samba(self) -> bool:
|
|
565
|
+
r"""Returns whether this platform is Samba Nova."""
|
|
566
|
+
return self is ModelPlatformType.SAMBA
|
|
567
|
+
|
|
510
568
|
|
|
511
569
|
class AudioModelType(Enum):
|
|
512
570
|
TTS_1 = "tts-1"
|
camel/utils/__init__.py
CHANGED
|
@@ -27,6 +27,7 @@ from .commons import (
|
|
|
27
27
|
get_pydantic_object_schema,
|
|
28
28
|
get_system_information,
|
|
29
29
|
get_task_list,
|
|
30
|
+
handle_http_error,
|
|
30
31
|
is_docker_running,
|
|
31
32
|
json_to_function_code,
|
|
32
33
|
print_text_animated,
|
|
@@ -76,4 +77,5 @@ __all__ = [
|
|
|
76
77
|
'agentops_decorator',
|
|
77
78
|
'AgentOpsMeta',
|
|
78
79
|
'track_agent',
|
|
80
|
+
'handle_http_error',
|
|
79
81
|
]
|
camel/utils/commons.py
CHANGED
|
@@ -20,6 +20,7 @@ import subprocess
|
|
|
20
20
|
import time
|
|
21
21
|
import zipfile
|
|
22
22
|
from functools import wraps
|
|
23
|
+
from http import HTTPStatus
|
|
23
24
|
from typing import (
|
|
24
25
|
Any,
|
|
25
26
|
Callable,
|
|
@@ -547,3 +548,24 @@ def track_agent(*args, **kwargs):
|
|
|
547
548
|
return f
|
|
548
549
|
|
|
549
550
|
return noop
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def handle_http_error(response: requests.Response) -> str:
|
|
554
|
+
r"""Handles the HTTP errors based on the status code of the response.
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
response (requests.Response): The HTTP response from the API call.
|
|
558
|
+
|
|
559
|
+
Returns:
|
|
560
|
+
str: The error type, based on the status code.
|
|
561
|
+
"""
|
|
562
|
+
if response.status_code == HTTPStatus.UNAUTHORIZED:
|
|
563
|
+
return "Unauthorized. Check your access token."
|
|
564
|
+
elif response.status_code == HTTPStatus.FORBIDDEN:
|
|
565
|
+
return "Forbidden. You do not have permission to perform this action."
|
|
566
|
+
elif response.status_code == HTTPStatus.NOT_FOUND:
|
|
567
|
+
return "Not Found. The resource could not be located."
|
|
568
|
+
elif response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
|
|
569
|
+
return "Too Many Requests. You have hit the rate limit."
|
|
570
|
+
else:
|
|
571
|
+
return "HTTP Error"
|