camel-ai 0.2.37__py3-none-any.whl → 0.2.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (55) hide show
  1. camel/__init__.py +1 -1
  2. camel/datagen/evol_instruct/__init__.py +20 -0
  3. camel/datagen/evol_instruct/evol_instruct.py +424 -0
  4. camel/datagen/evol_instruct/scorer.py +166 -0
  5. camel/datagen/evol_instruct/templates.py +268 -0
  6. camel/environments/models.py +10 -4
  7. camel/environments/single_step.py +91 -17
  8. camel/interpreters/docker_interpreter.py +1 -1
  9. camel/interpreters/e2b_interpreter.py +1 -1
  10. camel/interpreters/subprocess_interpreter.py +1 -1
  11. camel/loaders/__init__.py +2 -2
  12. camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
  13. camel/memories/context_creators/score_based.py +198 -67
  14. camel/models/aiml_model.py +9 -3
  15. camel/models/anthropic_model.py +11 -3
  16. camel/models/azure_openai_model.py +9 -3
  17. camel/models/base_audio_model.py +6 -0
  18. camel/models/base_model.py +4 -0
  19. camel/models/deepseek_model.py +9 -3
  20. camel/models/gemini_model.py +9 -3
  21. camel/models/groq_model.py +9 -3
  22. camel/models/internlm_model.py +8 -2
  23. camel/models/model_factory.py +4 -0
  24. camel/models/moonshot_model.py +8 -2
  25. camel/models/nemotron_model.py +9 -3
  26. camel/models/nvidia_model.py +9 -3
  27. camel/models/ollama_model.py +9 -3
  28. camel/models/openai_audio_models.py +5 -3
  29. camel/models/openai_compatible_model.py +9 -3
  30. camel/models/openai_model.py +9 -3
  31. camel/models/openrouter_model.py +9 -3
  32. camel/models/qwen_model.py +9 -3
  33. camel/models/samba_model.py +9 -3
  34. camel/models/sglang_model.py +11 -4
  35. camel/models/siliconflow_model.py +8 -2
  36. camel/models/stub_model.py +2 -1
  37. camel/models/togetherai_model.py +9 -3
  38. camel/models/vllm_model.py +9 -3
  39. camel/models/yi_model.py +9 -3
  40. camel/models/zhipuai_model.py +9 -3
  41. camel/retrievers/auto_retriever.py +14 -0
  42. camel/storages/__init__.py +2 -0
  43. camel/storages/vectordb_storages/__init__.py +2 -0
  44. camel/storages/vectordb_storages/tidb.py +332 -0
  45. camel/toolkits/__init__.py +5 -0
  46. camel/toolkits/browser_toolkit.py +84 -61
  47. camel/toolkits/openai_agent_toolkit.py +131 -0
  48. camel/toolkits/searxng_toolkit.py +207 -0
  49. camel/toolkits/thinking_toolkit.py +168 -12
  50. camel/types/enums.py +1 -0
  51. camel/verifiers/python_verifier.py +12 -4
  52. {camel_ai-0.2.37.dist-info → camel_ai-0.2.38.dist-info}/METADATA +52 -4
  53. {camel_ai-0.2.37.dist-info → camel_ai-0.2.38.dist-info}/RECORD +55 -48
  54. {camel_ai-0.2.37.dist-info → camel_ai-0.2.38.dist-info}/WHEEL +0 -0
  55. {camel_ai-0.2.37.dist-info → camel_ai-0.2.38.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,268 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from abc import ABC, abstractmethod
16
+ from dataclasses import dataclass
17
+ from typing import Dict, List, Union
18
+
19
+
20
+ # flake8: noqa
21
+ @dataclass(frozen=True)
22
+ class BaseEvolInstructTemplates(ABC):
23
+ r"""Abstract base class for evolution instruction templates.
24
+
25
+ This class defines a required structure for prompt transformation templates
26
+ - `EVOL_METHODS`: A dictionary mapping method keys to their descriptions.
27
+ - `STRATEGY`: A dictionary defining strategies and associated methods.
28
+
29
+ Subclasses should define concrete templates for specific domains.
30
+ """
31
+
32
+ @property
33
+ @abstractmethod
34
+ def EVOL_METHODS(self) -> Dict[str, str]:
35
+ r"""A dictionary mapping evolution method keys to their descriptions."""
36
+ pass
37
+
38
+ @property
39
+ @abstractmethod
40
+ def STRATEGY(self) -> Dict[str, Dict[str, Union[str, List[str]]]]:
41
+ r"""A dictionary defining strategies and their corresponding methods."""
42
+ pass
43
+
44
+
45
+ # flake8: noqa
46
+ @dataclass(frozen=True)
47
+ class EvolInstructTemplates(BaseEvolInstructTemplates):
48
+ r"""Contains templates for EvolInstruct prompt transformations.
49
+
50
+ References:
51
+ - WizardLM: Empowering Large Language Models to Follow Complex
52
+ Instructions
53
+ https://arxiv.org/pdf/2304.12244
54
+ - eva: Evolving Alignment via Asymmetric Self-Play
55
+ https://arxiv.org/abs/2411.00062
56
+ """
57
+
58
+ # High-level instructions on in-depth/in-breadth evolving
59
+ INST_IN_DEPTH = (
60
+ "Please act as an expert Prompt Creator.\n"
61
+ "Your objective is to rewrite a given prompt into a more complex "
62
+ "version to make those large language models (e.g., gemini) a bit "
63
+ "harder to handle.\n"
64
+ "But the rewritten prompt must be reasonable and must be understood "
65
+ "and responded by humans.\n"
66
+ "Your rewriting cannot omit the non-text parts such as the table and "
67
+ "code in #Given Prompt#, if there is any."
68
+ "You should try your best not to make the #Rewritten Prompt# become "
69
+ "verbose, "
70
+ "The #Rewritten Prompt# should be roughly the similar length or a "
71
+ "little bit more than that of #Given Prompt#.\n"
72
+ "The #Rewritten Prompt# must sound like a real human user's prompt; "
73
+ "DON'T make it like sound machine-generated."
74
+ "Specifically, you SHOULD complicate the given prompt using the "
75
+ "following method: "
76
+ "\n{method}\n"
77
+ "The rewritten prompt should reflect meaningful changes across its "
78
+ "structure, ensuring the entire sentence feels sufficiently different "
79
+ "from the original. "
80
+ "Again, make sure the rewritten prompt is more CHALLENGING."
81
+ "Respond with your rewritten prompt directly. "
82
+ "#Given Prompt#:\n{prompt}\n"
83
+ "#Rewritten Prompt#:\n"
84
+ ).lstrip()
85
+
86
+ INST_IN_BREADTH = (
87
+ "Please act as an expert Prompt Creator.\n"
88
+ "Your objective is to generate a brand-new prompt based on the #Given "
89
+ "Prompt#. "
90
+ "The purpose of this task is to promote diversity and generality of "
91
+ "training prompts for language models, helping it practice with "
92
+ "varied challenges and perspectives.\n"
93
+ "The LENGTH and complexity of the #Created Prompt# should be similar "
94
+ "to that of the #Given Prompt#.\n"
95
+ "The #Created Prompt# must be reasonable, interpretable, and solvable "
96
+ "by humans.\n"
97
+ "The #Created Prompt# must sound like a real human user's prompt; "
98
+ "DON'T make it sound like machine-generated."
99
+ "Follow the method described below to guide your creation:\n"
100
+ "{method}\n"
101
+ "The created prompt should reflect meaningful changes across its "
102
+ "structure, ensuring the entire sentence feels sufficiently different "
103
+ "from the original. "
104
+ "Respond with your created prompt directly.\n"
105
+ "#Given Prompt#:\n{prompt}\n"
106
+ "#Created Prompt#:\n"
107
+ ).lstrip()
108
+
109
+ # Sub-method instructions (following the eva paper setting)
110
+ IN_BREADTH_KEYS = [
111
+ 'persona',
112
+ 'shift-in',
113
+ 'shift-out',
114
+ 'mix',
115
+ 'abstract',
116
+ ]
117
+
118
+ IN_DEPTH_KEYS = [
119
+ 'constraints',
120
+ 'deepening',
121
+ 'concretizing',
122
+ 'reasoning',
123
+ 'expansion',
124
+ ]
125
+
126
+ STRATEGY = {
127
+ "IN-DEPTH": {
128
+ 'meta_instruction': INST_IN_DEPTH,
129
+ 'methods': IN_DEPTH_KEYS,
130
+ },
131
+ "IN-BREADTH": {
132
+ 'meta_instruction': INST_IN_BREADTH,
133
+ 'methods': IN_BREADTH_KEYS,
134
+ },
135
+ }
136
+
137
+ EVOL_METHODS = {
138
+ "persona": (
139
+ "Reframe the #Given Prompt# as if written by a user with a "
140
+ "completely different persona, background, or expertise. Adjust "
141
+ "the tone, style, phrasing, or anything you feel proper to "
142
+ "reflect this change. The changes should make the prompt feel "
143
+ "like it was authored by someone entirely new."
144
+ ),
145
+ "shift-in": (
146
+ "Shift the high-level idea of the #Given Prompt# to explore a "
147
+ "different subdomain or context within the same domain. Ensure "
148
+ "the new topic still challenges the model to reason or provide "
149
+ "knowledge relevant to the domain."
150
+ ),
151
+ "shift-out": (
152
+ "Shift the high-level idea of the #Given Prompt# to a completely "
153
+ "different topic in a different setting. The new topic may "
154
+ "challenge the model with similar reasoning or contextual "
155
+ "understanding but in a novel way."
156
+ ),
157
+ "mix": (
158
+ "Combine the high-level concept of the #Given Prompt# with "
159
+ "elements from a different domain. Introduce novel scenarios or "
160
+ "contexts to create diversity while maintaining relevance to the "
161
+ "original idea."
162
+ ),
163
+ "abstract": (
164
+ "Turn the #Given Prompt# into a more abstract or generalized "
165
+ "version, removing specific details while preserving its intent. "
166
+ "Ensure the new prompt encourages broader, principle-driven "
167
+ "reasoning."
168
+ ),
169
+ "constraints": (
170
+ "Add one or more significant constraints or requirements into the "
171
+ "'#Given Prompt#'. The added constraints must meaningfully alter "
172
+ "how the model would respond. For example, specify additional "
173
+ "rules, contexts, or limitations that demand creative adjustments."
174
+ ),
175
+ "deepening": (
176
+ "If the #Given Prompt# contains inquiries about certain issues, "
177
+ "increase the depth and breadth of the inquiry. Make the question "
178
+ "require a more detailed, multi-layered, or comprehensive response"
179
+ ". For instance, break the problem into sub-problems or require "
180
+ "connections between unrelated concepts."
181
+ ),
182
+ "concretizing": (
183
+ "Replace general concepts in the #Given Prompt# with more specific"
184
+ " and detailed concepts. Ensure that the change makes the problem "
185
+ "more defined and concrete, leaving less room for ambiguity. For "
186
+ "example, replace 'a device' with 'a wearable fitness tracker "
187
+ "with GPS'."
188
+ ),
189
+ "reasoning": (
190
+ "Add one or more reasoning steps into the '#Given Prompt#'. "
191
+ "Explicitly rewrite it to demand multi-step reasoning or justify "
192
+ "intermediate steps in the solution. For instance, if the original"
193
+ " prompt is a simple query, make the response require a "
194
+ "step-by-step breakdown of logic or calculations."
195
+ ),
196
+ "expansion": (
197
+ "Expand the #Given Prompt# by including additional perspectives, "
198
+ "domains, or layers of complexity. For example, if the original "
199
+ "prompt focuses on a single scenario, add related scenarios or ask"
200
+ " the model to compare different situations."
201
+ ),
202
+ }
203
+
204
+
205
+ # flake8: noqa
206
+ @dataclass(frozen=True)
207
+ class MathEvolInstructTemplates(BaseEvolInstructTemplates):
208
+ r"""Contains templates for MathEvolInstruct prompt transformations."""
209
+
210
+ # Meta-instructions for in-depth evolving
211
+ INST_IN_DEPTH = (
212
+ "Please act as a math expert. Your objective is to create a new math "
213
+ "problem that is more challenging yet concise than the given math "
214
+ "problem. Ensure that the mathematical content (including any "
215
+ "equations or figures) is preserved, and rephrase the problem to "
216
+ "increase its complexity and depth. The generated problem should be "
217
+ "clearly stated, strictly mathematical, and suitable for solving with "
218
+ "symbolic computation (e.g., using sympy). You will be given a method "
219
+ "to guide your creation. Make sure to follow the method strictly. "
220
+ "Consolidate any multiple parts into one integrated question that "
221
+ "ask for one definitive answer. Respond with your generated problem "
222
+ "directly. "
223
+ "#Original Problem#:\n{prompt}\n"
224
+ "#Generated Problem#:\n"
225
+ ).lstrip()
226
+
227
+ EVOL_METHODS = {
228
+ "constraints": (
229
+ "Add one or more significant constraints or requirements into the "
230
+ "'#Given Prompt#'. The added constraints must meaningfully alter "
231
+ "how the model would respond. For example, specify additional "
232
+ "rules, contexts, or limitations that demand creative adjustments."
233
+ ),
234
+ "deepening": (
235
+ "Increase the difficulty of the #Given Prompt# by integrating "
236
+ "additional layers of reasoning and rigor. Refine the problem so "
237
+ "that all added difficulty is consolidated into a single coherent "
238
+ "question requiring one final answer, avoiding fragmentation into "
239
+ "multiple sub-problems."
240
+ ),
241
+ "expansion": (
242
+ "Expand the #Given Prompt# by incorporating additional "
243
+ "perspectives or layers of complexity into the problem statement. "
244
+ "Ensure that the revised problem remains a single, unified "
245
+ "question with one final answer, rather than a series of separate "
246
+ "sub-questions."
247
+ ),
248
+ "condense": (
249
+ "Reformulate the given math problem into a well-structured and "
250
+ "formally stated mathematical question.\n"
251
+ "- Present the problem in a structured and rigorous mathematical "
252
+ "format.\n"
253
+ "- Removing unnecessary instructions, explanations, or hints.\n"
254
+ "- If the given problem contains several sub-questions, make "
255
+ "necessary changes to let the problem could be answered with one "
256
+ "number or expression by removing the sub-questions or combining "
257
+ "them into one."
258
+ ),
259
+ }
260
+
261
+ IN_DEPTH_KEYS = ['constraints', 'deepening', 'expansion']
262
+
263
+ STRATEGY = {
264
+ "IN-DEPTH": {
265
+ 'meta_instruction': INST_IN_DEPTH,
266
+ 'methods': IN_DEPTH_KEYS,
267
+ },
268
+ }
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  from datetime import datetime, timezone
16
- from typing import Any, Dict, Optional, Protocol
16
+ from typing import Any, Dict, Optional, Protocol, Tuple
17
17
 
18
18
  from pydantic import BaseModel, Field
19
19
 
@@ -33,9 +33,8 @@ class Action(BaseModel):
33
33
  generated (UTC).
34
34
  """
35
35
 
36
- index: int = Field(
37
- ..., description="Index of the state this action is performed upon"
38
- )
36
+ index: Optional[int] = Field(default=None, description="...")
37
+
39
38
  llm_response: str = Field(description="Generated response from the LLM")
40
39
  metadata: Dict[str, Any] = Field(
41
40
  default_factory=dict,
@@ -87,6 +86,13 @@ class StepResult(BaseModel):
87
86
  description="Additional information about the step",
88
87
  )
89
88
 
89
+ def as_tuple(
90
+ self,
91
+ ) -> Tuple[Observation, float, bool, Dict[str, Any]]:
92
+ r"""Returns all fields of the model as a tuple, in declaration order"""
93
+ self.info["rewards_dict"] = self.rewards_dict
94
+ return (self.observation, self.reward, self.done, self.info)
95
+
90
96
 
91
97
  class Environment(Protocol):
92
98
  async def reset(self) -> Observation:
@@ -126,6 +126,7 @@ class SingleStepEnv:
126
126
  await self.verifier.cleanup()
127
127
  self._states = []
128
128
  self._states_done = []
129
+ self.current_batch_size = 0
129
130
  logger.info('Environment closed successfully')
130
131
  except Exception as e:
131
132
  logger.error(f'Failed to close environment: {e}')
@@ -157,6 +158,8 @@ class SingleStepEnv:
157
158
  ValueError: If batch size exceeds dataset size.
158
159
  TypeError: If the dataset is of an unsupported type.
159
160
  """
161
+ if batch_size <= 0:
162
+ raise ValueError("Batch size must be positive")
160
163
 
161
164
  if not self._is_setup:
162
165
  logger.warning(
@@ -212,16 +215,18 @@ class SingleStepEnv:
212
215
 
213
216
  async def step(
214
217
  self, action: Union[Action, List[Action]]
215
- ) -> Union[StepResult, List[StepResult]]:
216
- r"""Process actions for a subset of states and update their
217
- finished status.
218
+ ) -> Union[
219
+ Tuple[Observation, float, bool, Dict[str, Any]],
220
+ List[Tuple[Observation, float, bool, Dict[str, Any]]],
221
+ ]:
222
+ r"""Process actions for a subset of states and update their finished
223
+ status.
218
224
 
219
225
  Args:
220
- action: Single action or list of actions, where each action
221
- contains an index indicating which state it corresponds to.
222
- The index must be a valid position in the internal _states list
223
- that was populated during the reset() call.
224
-
226
+ action: Single action (for batch_size=1 or micro-batch of size 1)
227
+ or list of actions (for batch_size>=2 with multiple actions).
228
+ Each action must have an index for batch_size>=2, indicating
229
+ which state it corresponds to.
225
230
 
226
231
  Returns:
227
232
  Union[StepResult, List[StepResult]]: StepResult or list of
@@ -241,10 +246,62 @@ class SingleStepEnv:
241
246
  if not self._states:
242
247
  raise RuntimeError("No current observation. Call reset() first.")
243
248
 
244
- # Normalize everything to list
245
- actions = [action] if isinstance(action, Action) else action
246
- indices = [act.index for act in actions]
249
+ # Normalize actions into a list for uniform processing
250
+ if self.current_batch_size == 1:
251
+ if isinstance(action, list):
252
+ if len(action) != 1 or not isinstance(action[0], Action):
253
+ raise ValueError(
254
+ "For batch_size=1, expect a single Action or a "
255
+ "list containing exactly one Action"
256
+ )
257
+ elif not isinstance(action, Action):
258
+ raise ValueError(
259
+ "For batch_size=1, expect a single Action or a "
260
+ "list containing exactly one Action"
261
+ )
262
+ if isinstance(action, Action):
263
+ actions = [action]
264
+ else:
265
+ actions = action
266
+ if actions[0].index is None:
267
+ actions[0].index = 0
268
+ if actions[0].index != 0:
269
+ raise ValueError("For batch_size=1, index must be None or 0")
270
+
271
+ else: # batch_size >= 2
272
+ if isinstance(action, Action):
273
+ if action.index is None:
274
+ raise ValueError(
275
+ "For batch_size>=2, each Action must have an index"
276
+ )
277
+ if not isinstance(action.index, int):
278
+ raise ValueError("Index must be an integer")
279
+ actions = [action]
280
+ elif isinstance(action, list):
281
+ if not action: # Empty list
282
+ raise ValueError("Action list cannot be empty")
283
+ actions = action
284
+ for act in actions:
285
+ if not isinstance(act, Action):
286
+ raise ValueError(
287
+ "All elements in list must be Action objects"
288
+ )
289
+ if act.index is None:
290
+ raise ValueError(
291
+ "For batch_size>=2, each Action must have an index"
292
+ )
293
+ if not isinstance(act.index, int):
294
+ raise ValueError("Index must be an integer")
295
+ else:
296
+ raise ValueError(
297
+ "For batch_size>=2, expect an Action or list of Actions"
298
+ )
247
299
 
300
+ # Validate indices
301
+ indices: List[int] = []
302
+ for act in actions:
303
+ assert act.index is not None
304
+ indices.append(act.index)
248
305
  if len(set(indices)) != len(indices):
249
306
  raise ValueError("Duplicate state indices in actions.")
250
307
  for idx in indices:
@@ -254,7 +311,6 @@ class SingleStepEnv:
254
311
  raise ValueError(f"State at index {idx} is already finished.")
255
312
 
256
313
  num_actions = len(actions)
257
-
258
314
  if self.current_batch_size % num_actions != 0:
259
315
  logger.warning(
260
316
  f"Number of actions ({num_actions}) is not a divisor of "
@@ -262,9 +318,9 @@ class SingleStepEnv:
262
318
  )
263
319
 
264
320
  proposed_solutions = [act.llm_response for act in actions]
265
- ground_truths: List[str] = [
266
- self._states[idx].final_answer for idx in indices
267
- ]
321
+ ground_truths: List[str] = []
322
+ for idx in indices:
323
+ ground_truths.append(self._states[idx].final_answer)
268
324
 
269
325
  verification_results = await self.verifier.verify_batch(
270
326
  solutions=proposed_solutions,
@@ -276,9 +332,10 @@ class SingleStepEnv:
276
332
  proposed_solutions, verification_results
277
333
  )
278
334
 
335
+ # TODO Batch this
279
336
  step_results = []
280
- # TODO: batch this
281
337
  for i, action in enumerate(actions):
338
+ assert action.index is not None
282
339
  idx = action.index
283
340
  step_result = StepResult(
284
341
  observation=self.PLACEHOLDER_OBS,
@@ -291,7 +348,7 @@ class SingleStepEnv:
291
348
  "state": self._states[idx],
292
349
  },
293
350
  )
294
- step_results.append(step_result)
351
+ step_results.append(step_result.as_tuple())
295
352
  self._states_done[idx] = True
296
353
 
297
354
  return step_results[0] if len(step_results) == 1 else step_results
@@ -315,6 +372,12 @@ class SingleStepEnv:
315
372
  - List of total rewards for each solution.
316
373
  - List of reward component dictionaries for each solution.
317
374
  """
375
+ if len(proposed_solutions) != len(verification_results):
376
+ raise ValueError(
377
+ f"Length mismatch: {len(proposed_solutions)} solutions vs "
378
+ f"{len(verification_results)} verification results"
379
+ )
380
+
318
381
  total_rewards = []
319
382
  rewards_dicts = []
320
383
 
@@ -355,9 +418,20 @@ class SingleStepEnv:
355
418
  return {}
356
419
 
357
420
  def _batch_done(self) -> bool:
421
+ r"""Check if all states in the current batch are done.
422
+
423
+ Returns:
424
+ bool: True if all states are marked as done, False otherwise.
425
+ """
358
426
  return all(self._states_done)
359
427
 
360
428
  def _batch_started(self) -> bool:
429
+ r"""Check if any state in the current batch is done.
430
+
431
+ Returns:
432
+ bool: True if at least one state is marked as done, False
433
+ otherwise.
434
+ """
361
435
  return any(self._states_done)
362
436
 
363
437
  @property
@@ -210,7 +210,7 @@ class DockerInterpreter(BaseInterpreter):
210
210
  if self.require_confirm:
211
211
  logger.info(
212
212
  f"The following {code_type} code will run on your "
213
- "computer: {code}"
213
+ f"computer: {code}"
214
214
  )
215
215
  while True:
216
216
  choice = input("Running code? [Y/n]:").lower()
@@ -99,7 +99,7 @@ class E2BInterpreter(BaseInterpreter):
99
99
  if self.require_confirm:
100
100
  logger.info(
101
101
  f"The following {code_type} code will run on your "
102
- "e2b sandbox: {code}"
102
+ f"e2b sandbox: {code}"
103
103
  )
104
104
  while True:
105
105
  choice = input("Running code? [Y/n]:").lower()
@@ -292,7 +292,7 @@ class SubprocessInterpreter(BaseInterpreter):
292
292
  if self.require_confirm:
293
293
  logger.info(
294
294
  f"The following {code_type} code will run on your "
295
- "computer: {code}"
295
+ f"computer: {code}"
296
296
  )
297
297
  while True:
298
298
  choice = input("Running code? [Y/n]:").lower().strip()
camel/loaders/__init__.py CHANGED
@@ -18,7 +18,7 @@ from .chunkr_reader import ChunkrReader
18
18
  from .firecrawl_reader import Firecrawl
19
19
  from .jina_url_reader import JinaURLReader
20
20
  from .mineru_extractor import MinerU
21
- from .panda_reader import PandaReader
21
+ from .pandas_reader import PandasReader
22
22
  from .unstructured_io import UnstructuredIO
23
23
 
24
24
  __all__ = [
@@ -30,6 +30,6 @@ __all__ = [
30
30
  'Firecrawl',
31
31
  'Apify',
32
32
  'ChunkrReader',
33
- 'PandaReader',
33
+ 'PandasReader',
34
34
  'MinerU',
35
35
  ]