camel-ai 0.2.16__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (49) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +18 -4
  3. camel/agents/multi_hop_generator_agent.py +85 -0
  4. camel/agents/programmed_agent_instruction.py +148 -0
  5. camel/benchmarks/__init__.py +2 -0
  6. camel/benchmarks/apibank.py +5 -0
  7. camel/benchmarks/apibench.py +8 -4
  8. camel/benchmarks/gaia.py +2 -2
  9. camel/benchmarks/ragbench.py +333 -0
  10. camel/bots/__init__.py +1 -1
  11. camel/bots/discord/__init__.py +26 -0
  12. camel/bots/discord/discord_app.py +384 -0
  13. camel/bots/discord/discord_installation.py +64 -0
  14. camel/bots/discord/discord_store.py +160 -0
  15. camel/configs/__init__.py +3 -0
  16. camel/configs/anthropic_config.py +17 -15
  17. camel/configs/internlm_config.py +60 -0
  18. camel/data_collector/base.py +5 -5
  19. camel/data_collector/sharegpt_collector.py +2 -2
  20. camel/datagen/self_instruct/self_instruct.py +1 -1
  21. camel/datagen/self_instruct/templates.py +12 -14
  22. camel/loaders/__init__.py +2 -0
  23. camel/loaders/panda_reader.py +337 -0
  24. camel/messages/__init__.py +10 -4
  25. camel/messages/conversion/conversation_models.py +5 -0
  26. camel/messages/func_message.py +30 -22
  27. camel/models/__init__.py +2 -0
  28. camel/models/anthropic_model.py +1 -22
  29. camel/models/cohere_model.py +8 -0
  30. camel/models/gemini_model.py +10 -1
  31. camel/models/internlm_model.py +143 -0
  32. camel/models/mistral_model.py +14 -7
  33. camel/models/model_factory.py +3 -0
  34. camel/models/reward/__init__.py +2 -0
  35. camel/models/reward/skywork_model.py +88 -0
  36. camel/synthetic_datagen/source2synth/data_processor.py +373 -0
  37. camel/synthetic_datagen/source2synth/models.py +68 -0
  38. camel/synthetic_datagen/source2synth/user_data_processor_config.py +73 -0
  39. camel/toolkits/google_scholar_toolkit.py +9 -0
  40. camel/types/__init__.py +4 -2
  41. camel/types/enums.py +34 -1
  42. camel/types/openai_types.py +6 -4
  43. camel/types/unified_model_type.py +5 -0
  44. camel/utils/token_counting.py +3 -3
  45. {camel_ai-0.2.16.dist-info → camel_ai-0.2.17.dist-info}/METADATA +158 -187
  46. {camel_ai-0.2.16.dist-info → camel_ai-0.2.17.dist-info}/RECORD +48 -35
  47. {camel_ai-0.2.16.dist-info → camel_ai-0.2.17.dist-info}/WHEEL +1 -1
  48. camel/bots/discord_app.py +0 -138
  49. {camel_ai-0.2.16.dist-info → camel_ai-0.2.17.dist-info}/LICENSE +0 -0
@@ -0,0 +1,333 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import Any, Callable, Dict, List, Literal, Optional, Sequence
16
+
17
+ import numpy as np
18
+ from datasets import Dataset, load_dataset
19
+
20
+ from camel.agents import ChatAgent
21
+ from camel.benchmarks import BaseBenchmark
22
+ from camel.logger import get_logger
23
+ from camel.retrievers import AutoRetriever
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ class RagasFields:
29
+ r"""Constants for RAGAS evaluation field names."""
30
+
31
+ INPUT_CONTEXT = "contexts"
32
+ INPUT_QUESTION = "question"
33
+ INPUT_ANSWER = "answer"
34
+
35
+
36
+ def annotate_dataset(
37
+ dataset: Dataset,
38
+ context_call: Optional[Callable[[Dict[str, Any]], List[str]]],
39
+ answer_call: Optional[Callable[[Dict[str, Any]], str]],
40
+ ) -> Dataset:
41
+ r"""Annotate the dataset by adding context and answers using the provided
42
+ functions.
43
+
44
+ Args:
45
+ dataset (Dataset): The input dataset to annotate.
46
+ context_call (Optional[Callable[[Dict[str, Any]], List[str]]]):
47
+ Function to generate context for each example.
48
+ answer_call (Optional[Callable[[Dict[str, Any]], str]]): Function to
49
+ generate answer for each example.
50
+
51
+ Returns:
52
+ Dataset: The annotated dataset with added contexts and/or answers.
53
+ """
54
+
55
+ def process_example(example: Dict[str, Any]) -> Dict[str, Any]:
56
+ if context_call:
57
+ example["contexts"] = context_call(example)
58
+ if answer_call:
59
+ example["answer"] = answer_call(example)
60
+ return example
61
+
62
+ return dataset.map(process_example)
63
+
64
+
65
+ def rmse(
66
+ input_trues: Sequence[float],
67
+ input_preds: Sequence[float],
68
+ ) -> Optional[float]:
69
+ r"""Calculate Root Mean Squared Error (RMSE).
70
+
71
+ Args:
72
+ input_trues (Sequence[float]): Ground truth values.
73
+ input_preds (Sequence[float]): Predicted values.
74
+
75
+ Returns:
76
+ Optional[float]: RMSE value, or None if inputs have different lengths.
77
+ """
78
+ if len(input_trues) != len(input_preds):
79
+ logger.warning("Input lengths mismatch in RMSE calculation")
80
+ return None
81
+
82
+ trues = np.array(input_trues)
83
+ preds = np.array(input_preds, dtype=float)
84
+
85
+ # Ignore NaN values in predictions
86
+ eval_idx = ~np.isnan(preds)
87
+ if not np.any(eval_idx):
88
+ logger.warning("No valid predictions for RMSE calculation")
89
+ return None
90
+
91
+ trues = trues[eval_idx]
92
+ preds = preds[eval_idx]
93
+
94
+ return float(np.sqrt(np.mean((preds - trues) ** 2)))
95
+
96
+
97
+ def auroc(trues: Sequence[bool], preds: Sequence[float]) -> float:
98
+ r"""Calculate Area Under Receiver Operating Characteristic Curve (AUROC).
99
+
100
+ Args:
101
+ trues (Sequence[bool]): Ground truth binary values.
102
+ preds (Sequence[float]): Predicted probability values.
103
+
104
+ Returns:
105
+ float: AUROC score.
106
+ """
107
+ from sklearn.metrics import roc_auc_score # type: ignore[import-untyped]
108
+
109
+ eval_idx = ~np.isnan(preds)
110
+ if not np.any(eval_idx):
111
+ logger.warning("No valid predictions for AUROC calculation")
112
+ return 0.5 # Return random classifier score
113
+
114
+ return float(
115
+ roc_auc_score(np.array(trues)[eval_idx], np.array(preds)[eval_idx])
116
+ )
117
+
118
+
119
+ def ragas_calculate_metrics(
120
+ dataset: Dataset,
121
+ pred_context_relevance_field: Optional[str],
122
+ pred_faithfulness_field: Optional[str],
123
+ metrics_to_evaluate: Optional[List[str]] = None,
124
+ ground_truth_context_relevance_field: str = "relevance_score",
125
+ ground_truth_faithfulness_field: str = "adherence_score",
126
+ ) -> Dict[str, Optional[float]]:
127
+ r"""Calculate RAGAS evaluation metrics.
128
+
129
+ Args:
130
+ dataset (Dataset): The dataset containing predictions and ground truth.
131
+ pred_context_relevance_field (Optional[str]): Field name for predicted
132
+ context relevance.
133
+ pred_faithfulness_field (Optional[str]): Field name for predicted
134
+ faithfulness.
135
+ metrics_to_evaluate (Optional[List[str]]): List of metrics to evaluate.
136
+ ground_truth_context_relevance_field (str): Field name for ground truth
137
+ relevance.
138
+ ground_truth_faithfulness_field (str): Field name for ground truth
139
+ adherence.
140
+
141
+ Returns:
142
+ Dict[str, Optional[float]]: Dictionary of calculated metrics.
143
+ """
144
+ metrics_to_evaluate = metrics_to_evaluate or [
145
+ "context_relevancy",
146
+ "faithfulness",
147
+ ]
148
+ calculated_metrics: Dict[str, Optional[float]] = {}
149
+
150
+ if (
151
+ "context_relevancy" in metrics_to_evaluate
152
+ and pred_context_relevance_field
153
+ ):
154
+ trues_relevance = dataset[ground_truth_context_relevance_field]
155
+ preds_relevance = dataset[pred_context_relevance_field]
156
+ calculated_metrics["relevance_rmse"] = rmse(
157
+ trues_relevance, preds_relevance
158
+ )
159
+
160
+ if "faithfulness" in metrics_to_evaluate and pred_faithfulness_field:
161
+ trues_hallucination = ~np.array(
162
+ dataset[ground_truth_faithfulness_field]
163
+ )
164
+ preds_hallucination = 1 - np.array(
165
+ dataset[pred_faithfulness_field], dtype=float
166
+ )
167
+ calculated_metrics["hallucination_auroc"] = auroc(
168
+ trues_hallucination.tolist(), preds_hallucination.tolist()
169
+ )
170
+
171
+ return calculated_metrics
172
+
173
+
174
+ def ragas_evaluate_dataset(
175
+ dataset: Dataset,
176
+ contexts_field_name: Optional[str],
177
+ answer_field_name: Optional[str],
178
+ metrics_to_evaluate: Optional[List[str]] = None,
179
+ ) -> Dataset:
180
+ r"""Evaluate the dataset using RAGAS metrics.
181
+
182
+ Args:
183
+ dataset (Dataset): Input dataset to evaluate.
184
+ contexts_field_name (Optional[str]): Field name containing contexts.
185
+ answer_field_name (Optional[str]): Field name containing answers.
186
+ metrics_to_evaluate (Optional[List[str]]): List of metrics to evaluate.
187
+
188
+ Returns:
189
+ Dataset: Dataset with added evaluation metrics.
190
+ """
191
+ from ragas import evaluate
192
+ from ragas.metrics import ( # type: ignore[import-untyped]
193
+ context_relevancy,
194
+ faithfulness,
195
+ )
196
+
197
+ metrics_to_evaluate = metrics_to_evaluate or [
198
+ "context_relevancy",
199
+ "faithfulness",
200
+ ]
201
+
202
+ # Rename fields if necessary
203
+ if (
204
+ contexts_field_name
205
+ and contexts_field_name != RagasFields.INPUT_CONTEXT
206
+ ):
207
+ dataset = dataset.rename_column(
208
+ contexts_field_name, RagasFields.INPUT_CONTEXT
209
+ )
210
+ if answer_field_name and answer_field_name != RagasFields.INPUT_ANSWER:
211
+ dataset = dataset.rename_column(
212
+ answer_field_name, RagasFields.INPUT_ANSWER
213
+ )
214
+
215
+ metrics = []
216
+ if "context_relevancy" in metrics_to_evaluate:
217
+ metrics.append(context_relevancy)
218
+ if "faithfulness" in metrics_to_evaluate:
219
+ metrics.append(faithfulness)
220
+
221
+ ragas_result = evaluate(dataset, metrics=metrics)
222
+ return Dataset.from_pandas(ragas_result.to_pandas())
223
+
224
+
225
+ class RAGBenchBenchmark(BaseBenchmark):
226
+ r"""RAGBench Benchmark for evaluating RAG performance.
227
+
228
+ This benchmark uses the rungalileo/ragbench dataset to evaluate
229
+ retrieval-augmented generation (RAG) systems. It measures context
230
+ relevancy and faithfulness metrics as described in
231
+ https://arxiv.org/abs/2407.11005.
232
+
233
+ Args:
234
+ processes (int, optional): Number of processes for parallel processing.
235
+ subset (str, optional): Dataset subset to use (e.g., "hotpotqa").
236
+ split (str, optional): Dataset split to use (e.g., "test").
237
+ """
238
+
239
+ def __init__(
240
+ self,
241
+ processes: int = 1,
242
+ subset: Literal[
243
+ "covidqa",
244
+ "cuad",
245
+ "delucionqa",
246
+ "emanual",
247
+ "expertqa",
248
+ "finqa",
249
+ "hagrid",
250
+ "hotpotqa",
251
+ "msmarco",
252
+ "pubmedqa",
253
+ "tatqa",
254
+ "techqa",
255
+ ] = "hotpotqa",
256
+ split: Literal["train", "test", "validation"] = "test",
257
+ ) -> None:
258
+ super().__init__("ragbench", "rag_bench", "", processes)
259
+ self.subset = subset
260
+ self.split = split
261
+ self.dataset: Optional[Dataset] = None
262
+
263
+ def download(self):
264
+ r"""Download the RAGBench dataset."""
265
+ try:
266
+ self.dataset = load_dataset(
267
+ "rungalileo/ragbench", self.subset, split=self.split
268
+ )
269
+ except Exception as e:
270
+ logger.error(f"Failed to download dataset: {e}")
271
+ raise
272
+
273
+ def load(self, force_download: bool = False):
274
+ r"""Load the RAGBench dataset.
275
+
276
+ Args:
277
+ force_download (bool, optional): Whether to force download the
278
+ data.
279
+ """
280
+ if force_download or self.dataset is None:
281
+ logger.info(
282
+ "%s dataset",
283
+ "Force downloading" if force_download else "Loading",
284
+ )
285
+ self.download()
286
+
287
+ def run( # type: ignore[override, return]
288
+ self,
289
+ agent: ChatAgent,
290
+ auto_retriever: AutoRetriever,
291
+ ) -> Dict[str, Optional[float]]:
292
+ r"""Run the benchmark evaluation.
293
+
294
+ Args:
295
+ agent (ChatAgent): Chat agent for generating answers.
296
+ auto_retriever (AutoRetriever): Retriever for finding relevant
297
+ contexts.
298
+
299
+ Returns:
300
+ Dict[str, Optional[float]]: Dictionary of evaluation metrics.
301
+ """
302
+
303
+ def context_call(example):
304
+ retrieved_info = auto_retriever.run_vector_retriever(
305
+ query=example['question'],
306
+ contents=example['documents'],
307
+ top_k=1,
308
+ return_detailed_info=True,
309
+ similarity_threshold=0.5,
310
+ )
311
+ return [c['text'] for c in retrieved_info['Retrieved Context']]
312
+
313
+ def answer_call(example: Dict[str, Any]) -> str:
314
+ user_msg = str(example)
315
+ assistant_response = agent.step(user_msg)
316
+ return assistant_response.msg.content
317
+
318
+ # Annotate the dataset
319
+ annotated_ds = annotate_dataset(
320
+ self.dataset, context_call, answer_call
321
+ )
322
+ evaluated_ds = ragas_evaluate_dataset(
323
+ annotated_ds,
324
+ contexts_field_name="contexts",
325
+ answer_field_name="answer",
326
+ metrics_to_evaluate=["context_relevancy", "faithfulness"],
327
+ )
328
+
329
+ return ragas_calculate_metrics(
330
+ evaluated_ds,
331
+ pred_context_relevance_field="context_relevancy",
332
+ pred_faithfulness_field="faithfulness",
333
+ )
camel/bots/__init__.py CHANGED
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from .discord_app import DiscordApp
14
+ from .discord import DiscordApp
15
15
  from .slack.models import (
16
16
  SlackAppMentionEventBody,
17
17
  SlackAppMentionEventProfile,
@@ -0,0 +1,26 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from .discord_app import DiscordApp
15
+ from .discord_installation import DiscordInstallation
16
+ from .discord_store import (
17
+ DiscordBaseInstallationStore,
18
+ DiscordSQLiteInstallationStore,
19
+ )
20
+
21
+ __all__ = [
22
+ "DiscordApp",
23
+ "DiscordInstallation",
24
+ "DiscordSQLiteInstallationStore",
25
+ "DiscordBaseInstallationStore",
26
+ ]