camel-ai 0.2.19__py3-none-any.whl → 0.2.20a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +27 -27
- camel/configs/__init__.py +6 -0
- camel/configs/moonshot_config.py +63 -0
- camel/configs/siliconflow_config.py +91 -0
- camel/datahubs/huggingface.py +3 -3
- camel/embeddings/jina_embedding.py +6 -1
- camel/models/__init__.py +2 -0
- camel/models/model_factory.py +6 -0
- camel/models/moonshot_model.py +138 -0
- camel/models/siliconflow_model.py +142 -0
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/search_toolkit.py +12 -0
- camel/toolkits/semantic_scholar_toolkit.py +308 -0
- camel/types/enums.py +108 -5
- camel/types/unified_model_type.py +5 -0
- {camel_ai-0.2.19.dist-info → camel_ai-0.2.20a0.dist-info}/METADATA +5 -2
- {camel_ai-0.2.19.dist-info → camel_ai-0.2.20a0.dist-info}/RECORD +20 -15
- {camel_ai-0.2.19.dist-info → camel_ai-0.2.20a0.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.19.dist-info → camel_ai-0.2.20a0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from openai import OpenAI, Stream
|
|
18
|
+
|
|
19
|
+
from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
|
|
20
|
+
from camel.messages import OpenAIMessage
|
|
21
|
+
from camel.models import BaseModelBackend
|
|
22
|
+
from camel.types import (
|
|
23
|
+
ChatCompletion,
|
|
24
|
+
ChatCompletionChunk,
|
|
25
|
+
ModelType,
|
|
26
|
+
)
|
|
27
|
+
from camel.utils import (
|
|
28
|
+
BaseTokenCounter,
|
|
29
|
+
OpenAITokenCounter,
|
|
30
|
+
api_keys_required,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SiliconFlowModel(BaseModelBackend):
|
|
35
|
+
r"""SiliconFlow API in a unified BaseModelBackend interface.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
39
|
+
created.
|
|
40
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
41
|
+
that will be fed into OpenAI client. If :obj:`None`,
|
|
42
|
+
:obj:`SiliconFlowConfig().as_dict()` will be used.
|
|
43
|
+
(default: :obj:`None`)
|
|
44
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
45
|
+
the SiliconFlow service. (default: :obj:`None`)
|
|
46
|
+
url (Optional[str], optional): The URL to the SiliconFlow service. If
|
|
47
|
+
not provided, :obj:`https://api.siliconflow.cn/v1/` will be used.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
50
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
51
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
52
|
+
(default: :obj:`None`)
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", 'SILICONFLOW_API_KEY'),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
model_type: Union[ModelType, str],
|
|
63
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
64
|
+
api_key: Optional[str] = None,
|
|
65
|
+
url: Optional[str] = None,
|
|
66
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
67
|
+
) -> None:
|
|
68
|
+
if model_config_dict is None:
|
|
69
|
+
model_config_dict = SiliconFlowConfig().as_dict()
|
|
70
|
+
api_key = api_key or os.environ.get("SILICONFLOW_API_KEY")
|
|
71
|
+
url = url or os.environ.get(
|
|
72
|
+
"SILICONFLOW_API_BASE_URL",
|
|
73
|
+
"https://api.siliconflow.cn/v1/",
|
|
74
|
+
)
|
|
75
|
+
super().__init__(
|
|
76
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
77
|
+
)
|
|
78
|
+
self._client = OpenAI(
|
|
79
|
+
timeout=180,
|
|
80
|
+
max_retries=3,
|
|
81
|
+
api_key=self._api_key,
|
|
82
|
+
base_url=self._url,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def run(
|
|
86
|
+
self,
|
|
87
|
+
messages: List[OpenAIMessage],
|
|
88
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
|
+
r"""Runs inference of SiliconFlow chat completion.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
93
|
+
in OpenAI API format.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
97
|
+
`ChatCompletion` in the non-stream mode, or
|
|
98
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
99
|
+
"""
|
|
100
|
+
response = self._client.chat.completions.create(
|
|
101
|
+
messages=messages,
|
|
102
|
+
model=self.model_type,
|
|
103
|
+
**self.model_config_dict,
|
|
104
|
+
)
|
|
105
|
+
return response
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
109
|
+
r"""Initialize the token counter for the model backend.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
BaseTokenCounter: The token counter following the model's
|
|
113
|
+
tokenization style.
|
|
114
|
+
"""
|
|
115
|
+
if not self._token_counter:
|
|
116
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
117
|
+
return self._token_counter
|
|
118
|
+
|
|
119
|
+
def check_model_config(self):
|
|
120
|
+
r"""Check whether the model configuration contains any
|
|
121
|
+
unexpected arguments to SiliconFlow API.
|
|
122
|
+
|
|
123
|
+
Raises:
|
|
124
|
+
ValueError: If the model configuration dictionary contains any
|
|
125
|
+
unexpected arguments to SiliconFlow API.
|
|
126
|
+
"""
|
|
127
|
+
for param in self.model_config_dict:
|
|
128
|
+
if param not in SILICONFLOW_API_PARAMS:
|
|
129
|
+
raise ValueError(
|
|
130
|
+
f"Unexpected argument `{param}` is "
|
|
131
|
+
"input into SiliconFlow model backend."
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def stream(self) -> bool:
|
|
136
|
+
"""Returns whether the model is in stream mode, which sends partial
|
|
137
|
+
results each time.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
bool: Whether the model is in stream mode.
|
|
141
|
+
"""
|
|
142
|
+
return self.model_config_dict.get('stream', False)
|
camel/toolkits/__init__.py
CHANGED
|
@@ -45,6 +45,7 @@ from .human_toolkit import HumanToolkit
|
|
|
45
45
|
from .stripe_toolkit import StripeToolkit
|
|
46
46
|
from .video_toolkit import VideoDownloaderToolkit
|
|
47
47
|
from .dappier_toolkit import DappierToolkit
|
|
48
|
+
from .semantic_scholar_toolkit import SemanticScholarToolkit
|
|
48
49
|
|
|
49
50
|
__all__ = [
|
|
50
51
|
'BaseToolkit',
|
|
@@ -77,4 +78,5 @@ __all__ = [
|
|
|
77
78
|
'MeshyToolkit',
|
|
78
79
|
'OpenBBToolkit',
|
|
79
80
|
'DappierToolkit',
|
|
81
|
+
'SemanticScholarToolkit',
|
|
80
82
|
]
|
camel/toolkits/search_toolkit.py
CHANGED
|
@@ -580,6 +580,18 @@ class SearchToolkit(BaseToolkit):
|
|
|
580
580
|
"image_url": image_url,
|
|
581
581
|
}
|
|
582
582
|
|
|
583
|
+
# For Results pod, collect all plaintext values from subpods
|
|
584
|
+
if pod.get("@title") == "Results":
|
|
585
|
+
results_text = []
|
|
586
|
+
if isinstance(subpod_data, list):
|
|
587
|
+
for subpod in subpod_data:
|
|
588
|
+
if subpod.get("plaintext"):
|
|
589
|
+
results_text.append(subpod["plaintext"])
|
|
590
|
+
else:
|
|
591
|
+
if description:
|
|
592
|
+
results_text.append(description)
|
|
593
|
+
pod_info["description"] = "\n".join(results_text)
|
|
594
|
+
|
|
583
595
|
# Add to steps list
|
|
584
596
|
output["pod_info"].append(pod_info)
|
|
585
597
|
|
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from typing import List, Optional
|
|
17
|
+
|
|
18
|
+
import requests
|
|
19
|
+
|
|
20
|
+
from camel.toolkits import FunctionTool
|
|
21
|
+
from camel.toolkits.base import BaseToolkit
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class SemanticScholarToolkit(BaseToolkit):
|
|
25
|
+
r"""A toolkit for interacting with the Semantic Scholar
|
|
26
|
+
API to fetch paper and author data.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self):
|
|
30
|
+
r"""Initializes the SemanticScholarToolkit."""
|
|
31
|
+
self.base_url = "https://api.semanticscholar.org/graph/v1"
|
|
32
|
+
|
|
33
|
+
def fetch_paper_data_title(
|
|
34
|
+
self,
|
|
35
|
+
paper_title: str,
|
|
36
|
+
fields: Optional[List[str]] = None,
|
|
37
|
+
) -> dict:
|
|
38
|
+
r"""Fetches a SINGLE paper from the Semantic Scholar
|
|
39
|
+
API based on a paper title.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
paper_title (str): The title of the paper to fetch.
|
|
43
|
+
fields (Optional[List[str]], optional): The fields to include in
|
|
44
|
+
the response (default: :obj:`None`). If not provided defaults
|
|
45
|
+
to ["title", "abstract", "authors", "year", "citationCount",
|
|
46
|
+
"publicationTypes", "publicationDate", "openAccessPdf"].
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
dict: The response data from the API or error information if the
|
|
50
|
+
request fails.
|
|
51
|
+
"""
|
|
52
|
+
if fields is None:
|
|
53
|
+
fields = [
|
|
54
|
+
"title",
|
|
55
|
+
"abstract",
|
|
56
|
+
"authors",
|
|
57
|
+
"year",
|
|
58
|
+
"citationCount",
|
|
59
|
+
"publicationTypes",
|
|
60
|
+
"publicationDate",
|
|
61
|
+
"openAccessPdf",
|
|
62
|
+
]
|
|
63
|
+
|
|
64
|
+
url = f"{self.base_url}/paper/search"
|
|
65
|
+
query_params = {"query": paper_title, "fields": ",".join(fields)}
|
|
66
|
+
try:
|
|
67
|
+
response = requests.get(url, params=query_params)
|
|
68
|
+
response.raise_for_status()
|
|
69
|
+
return response.json()
|
|
70
|
+
except requests.exceptions.RequestException as e:
|
|
71
|
+
return {
|
|
72
|
+
"error": f"Request failed: {e!s}",
|
|
73
|
+
"message": str(e),
|
|
74
|
+
}
|
|
75
|
+
except ValueError:
|
|
76
|
+
return {
|
|
77
|
+
"error": "Response is not valid JSON",
|
|
78
|
+
"message": response.text,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
def fetch_paper_data_id(
|
|
82
|
+
self,
|
|
83
|
+
paper_id: str,
|
|
84
|
+
fields: Optional[List[str]] = None,
|
|
85
|
+
) -> dict:
|
|
86
|
+
r"""Fetches a SINGLE paper from the Semantic Scholar
|
|
87
|
+
API based on a paper ID.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
paper_id (str): The ID of the paper to fetch.
|
|
91
|
+
fields (Optional[List[str]], optional): The fields to include in
|
|
92
|
+
the response (default: :obj:`None`). If not provided defaults
|
|
93
|
+
to ["title", "abstract", "authors", "year", "citationCount",
|
|
94
|
+
"publicationTypes", "publicationDate", "openAccessPdf"].
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
dict: The response data from the API or error information
|
|
98
|
+
if the request fails.
|
|
99
|
+
"""
|
|
100
|
+
if fields is None:
|
|
101
|
+
fields = [
|
|
102
|
+
"title",
|
|
103
|
+
"abstract",
|
|
104
|
+
"authors",
|
|
105
|
+
"year",
|
|
106
|
+
"citationCount",
|
|
107
|
+
"publicationTypes",
|
|
108
|
+
"publicationDate",
|
|
109
|
+
"openAccessPdf",
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
url = f"{self.base_url}/paper/{paper_id}"
|
|
113
|
+
query_params = {"fields": ",".join(fields)}
|
|
114
|
+
try:
|
|
115
|
+
response = requests.get(url, params=query_params)
|
|
116
|
+
response.raise_for_status()
|
|
117
|
+
return response.json()
|
|
118
|
+
except requests.exceptions.RequestException as e:
|
|
119
|
+
return {
|
|
120
|
+
"error": f"Request failed: {e!s}",
|
|
121
|
+
"message": str(e),
|
|
122
|
+
}
|
|
123
|
+
except ValueError:
|
|
124
|
+
return {
|
|
125
|
+
"error": "Response is not valid JSON",
|
|
126
|
+
"message": response.text,
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def fetch_bulk_paper_data(
|
|
130
|
+
self,
|
|
131
|
+
query: str,
|
|
132
|
+
year: str = "2023-",
|
|
133
|
+
fields: Optional[List[str]] = None,
|
|
134
|
+
) -> dict:
|
|
135
|
+
r"""Fetches MULTIPLE papers at once from the Semantic Scholar
|
|
136
|
+
API based on a related topic.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
query (str): The text query to match against the paper's title and
|
|
140
|
+
abstract. For example, you can use the following operators and
|
|
141
|
+
techniques to construct your query: Example 1: ((cloud
|
|
142
|
+
computing) | virtualization) +security -privacy This will
|
|
143
|
+
match papers whose title or abstract contains "cloud" and
|
|
144
|
+
"computing", or contains the word "virtualization". The papers
|
|
145
|
+
must also include the term "security" but exclude papers that
|
|
146
|
+
contain the word "privacy".
|
|
147
|
+
year (str, optional): The year filter for papers (default:
|
|
148
|
+
:obj:`"2023-"`).
|
|
149
|
+
fields (Optional[List[str]], optional): The fields to include in
|
|
150
|
+
the response (default: :obj:`None`). If not provided defaults
|
|
151
|
+
to ["title", "url", "publicationTypes", "publicationDate",
|
|
152
|
+
"openAccessPdf"].
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
dict: The response data from the API or error information if the
|
|
156
|
+
request fails.
|
|
157
|
+
"""
|
|
158
|
+
if fields is None:
|
|
159
|
+
fields = [
|
|
160
|
+
"title",
|
|
161
|
+
"url",
|
|
162
|
+
"publicationTypes",
|
|
163
|
+
"publicationDate",
|
|
164
|
+
"openAccessPdf",
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
url = f"{self.base_url}/paper/search/bulk"
|
|
168
|
+
query_params = {
|
|
169
|
+
"query": query,
|
|
170
|
+
"fields": ",".join(fields),
|
|
171
|
+
"year": year,
|
|
172
|
+
}
|
|
173
|
+
try:
|
|
174
|
+
response = requests.get(url, params=query_params)
|
|
175
|
+
response.raise_for_status()
|
|
176
|
+
return response.json()
|
|
177
|
+
except requests.exceptions.RequestException as e:
|
|
178
|
+
return {
|
|
179
|
+
"error": f"Request failed: {e!s}",
|
|
180
|
+
"message": str(e),
|
|
181
|
+
}
|
|
182
|
+
except ValueError:
|
|
183
|
+
return {
|
|
184
|
+
"error": "Response is not valid JSON",
|
|
185
|
+
"message": response.text,
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
def fetch_recommended_papers(
|
|
189
|
+
self,
|
|
190
|
+
positive_paper_ids: List[str],
|
|
191
|
+
negative_paper_ids: List[str],
|
|
192
|
+
fields: Optional[List[str]] = None,
|
|
193
|
+
limit: int = 500,
|
|
194
|
+
save_to_file: bool = False,
|
|
195
|
+
) -> dict:
|
|
196
|
+
r"""Fetches recommended papers from the Semantic Scholar
|
|
197
|
+
API based on the positive and negative paper IDs.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
positive_paper_ids (list): A list of paper IDs (as strings)
|
|
201
|
+
that are positively correlated to the recommendation.
|
|
202
|
+
negative_paper_ids (list): A list of paper IDs (as strings)
|
|
203
|
+
that are negatively correlated to the recommendation.
|
|
204
|
+
fields (Optional[List[str]], optional): The fields to include in
|
|
205
|
+
the response (default: :obj:`None`). If not provided defaults
|
|
206
|
+
to ["title", "url", "citationCount", "authors",
|
|
207
|
+
"publicationTypes", "publicationDate", "openAccessPdf"].
|
|
208
|
+
limit (int, optional): The maximum number of recommended papers to
|
|
209
|
+
return (default: :obj:`500`).
|
|
210
|
+
save_to_file (bool, optional): If True, saves the response data to
|
|
211
|
+
a file (default: :obj:`False`).
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
dict: A dictionary containing recommended papers sorted by
|
|
215
|
+
citation count.
|
|
216
|
+
"""
|
|
217
|
+
if fields is None:
|
|
218
|
+
fields = [
|
|
219
|
+
"title",
|
|
220
|
+
"url",
|
|
221
|
+
"citationCount",
|
|
222
|
+
"authors",
|
|
223
|
+
"publicationTypes",
|
|
224
|
+
"publicationDate",
|
|
225
|
+
"openAccessPdf",
|
|
226
|
+
]
|
|
227
|
+
|
|
228
|
+
url = "https://api.semanticscholar.org/recommendations/v1/papers"
|
|
229
|
+
query_params = {"fields": ",".join(fields), "limit": str(limit)}
|
|
230
|
+
data = {
|
|
231
|
+
"positive_paper_ids": positive_paper_ids,
|
|
232
|
+
"negative_paper_ids": negative_paper_ids,
|
|
233
|
+
}
|
|
234
|
+
try:
|
|
235
|
+
response = requests.post(url, params=query_params, json=data)
|
|
236
|
+
response.raise_for_status()
|
|
237
|
+
papers = response.json()
|
|
238
|
+
if save_to_file:
|
|
239
|
+
with open('recommended_papers.json', 'w') as output:
|
|
240
|
+
json.dump(papers, output)
|
|
241
|
+
return papers
|
|
242
|
+
except requests.exceptions.RequestException as e:
|
|
243
|
+
return {"error": str(e)}
|
|
244
|
+
except ValueError:
|
|
245
|
+
return {
|
|
246
|
+
"error": "Response is not valid JSON",
|
|
247
|
+
"message": response.text,
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
def fetch_author_data(
|
|
251
|
+
self,
|
|
252
|
+
ids: List[str],
|
|
253
|
+
fields: Optional[List[str]] = None,
|
|
254
|
+
save_to_file: bool = False,
|
|
255
|
+
) -> dict:
|
|
256
|
+
r"""Fetches author information from the Semantic Scholar
|
|
257
|
+
API based on author IDs.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
ids (list): A list of author IDs (as strings) to fetch
|
|
261
|
+
data for.
|
|
262
|
+
fields (Optional[List[str]], optional): The fields to include in
|
|
263
|
+
the response (default: :obj:`None`). If not provided defaults
|
|
264
|
+
to ["name", "url", "paperCount", "hIndex", "papers"].
|
|
265
|
+
save_to_file (bool, optional): Whether to save the results to a
|
|
266
|
+
file (default: :obj:`False`).
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
dict: The response data from the API or error information if
|
|
270
|
+
the request fails.
|
|
271
|
+
"""
|
|
272
|
+
if fields is None:
|
|
273
|
+
fields = ["name", "url", "paperCount", "hIndex", "papers"]
|
|
274
|
+
|
|
275
|
+
url = f"{self.base_url}/author/batch"
|
|
276
|
+
query_params = {"fields": ",".join(fields)}
|
|
277
|
+
data = {"ids": ids}
|
|
278
|
+
try:
|
|
279
|
+
response = requests.post(url, params=query_params, json=data)
|
|
280
|
+
response.raise_for_status()
|
|
281
|
+
response_data = response.json()
|
|
282
|
+
if save_to_file:
|
|
283
|
+
with open('author_information.json', 'w') as output:
|
|
284
|
+
json.dump(response_data, output)
|
|
285
|
+
return response_data
|
|
286
|
+
except requests.exceptions.RequestException as e:
|
|
287
|
+
return {"error": str(e)}
|
|
288
|
+
except ValueError:
|
|
289
|
+
return {
|
|
290
|
+
"error": "Response is not valid JSON",
|
|
291
|
+
"message": response.text,
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
295
|
+
r"""Returns a list of FunctionTool objects representing the
|
|
296
|
+
functions in the toolkit.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
List[FunctionTool]: A list of FunctionTool objects
|
|
300
|
+
representing the functions in the toolkit.
|
|
301
|
+
"""
|
|
302
|
+
return [
|
|
303
|
+
FunctionTool(self.fetch_paper_data_title),
|
|
304
|
+
FunctionTool(self.fetch_paper_data_id),
|
|
305
|
+
FunctionTool(self.fetch_bulk_paper_data),
|
|
306
|
+
FunctionTool(self.fetch_recommended_papers),
|
|
307
|
+
FunctionTool(self.fetch_author_data),
|
|
308
|
+
]
|