camel-ai 0.2.71a11__py3-none-any.whl → 0.2.72__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (46) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +261 -489
  3. camel/memories/agent_memories.py +39 -0
  4. camel/memories/base.py +8 -0
  5. camel/models/gemini_model.py +30 -2
  6. camel/models/moonshot_model.py +36 -4
  7. camel/models/openai_model.py +29 -15
  8. camel/societies/workforce/prompts.py +25 -15
  9. camel/societies/workforce/role_playing_worker.py +1 -1
  10. camel/societies/workforce/single_agent_worker.py +9 -7
  11. camel/societies/workforce/worker.py +1 -1
  12. camel/societies/workforce/workforce.py +97 -34
  13. camel/storages/vectordb_storages/__init__.py +1 -0
  14. camel/storages/vectordb_storages/surreal.py +415 -0
  15. camel/tasks/task.py +9 -5
  16. camel/toolkits/__init__.py +10 -1
  17. camel/toolkits/base.py +57 -1
  18. camel/toolkits/human_toolkit.py +5 -1
  19. camel/toolkits/hybrid_browser_toolkit/config_loader.py +127 -414
  20. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +783 -1626
  21. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +489 -0
  22. camel/toolkits/markitdown_toolkit.py +2 -2
  23. camel/toolkits/message_integration.py +592 -0
  24. camel/toolkits/note_taking_toolkit.py +195 -26
  25. camel/toolkits/openai_image_toolkit.py +5 -5
  26. camel/toolkits/origene_mcp_toolkit.py +97 -0
  27. camel/toolkits/screenshot_toolkit.py +213 -0
  28. camel/toolkits/search_toolkit.py +161 -79
  29. camel/toolkits/terminal_toolkit.py +379 -165
  30. camel/toolkits/video_analysis_toolkit.py +13 -13
  31. camel/toolkits/video_download_toolkit.py +11 -11
  32. camel/toolkits/web_deploy_toolkit.py +1024 -0
  33. camel/types/enums.py +6 -3
  34. camel/types/unified_model_type.py +16 -4
  35. camel/utils/mcp_client.py +8 -0
  36. camel/utils/tool_result.py +1 -1
  37. {camel_ai-0.2.71a11.dist-info → camel_ai-0.2.72.dist-info}/METADATA +6 -3
  38. {camel_ai-0.2.71a11.dist-info → camel_ai-0.2.72.dist-info}/RECORD +40 -40
  39. camel/toolkits/hybrid_browser_toolkit/actions.py +0 -417
  40. camel/toolkits/hybrid_browser_toolkit/agent.py +0 -311
  41. camel/toolkits/hybrid_browser_toolkit/browser_session.py +0 -739
  42. camel/toolkits/hybrid_browser_toolkit/snapshot.py +0 -227
  43. camel/toolkits/hybrid_browser_toolkit/stealth_script.js +0 -0
  44. camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +0 -1002
  45. {camel_ai-0.2.71a11.dist-info → camel_ai-0.2.72.dist-info}/WHEEL +0 -0
  46. {camel_ai-0.2.71a11.dist-info → camel_ai-0.2.72.dist-info}/licenses/LICENSE +0 -0
@@ -32,6 +32,29 @@ class SearchToolkit(BaseToolkit):
32
32
  search engines like Google, DuckDuckGo, Wikipedia and Wolfram Alpha, Brave.
33
33
  """
34
34
 
35
+ def __init__(
36
+ self,
37
+ timeout: Optional[float] = None,
38
+ number_of_result_pages: int = 10,
39
+ exclude_domains: Optional[List[str]] = None,
40
+ ):
41
+ r"""Initializes the RedditToolkit with the specified number of retries
42
+ and delay.
43
+
44
+ Args:
45
+ timeout (float): Timeout for API requests in seconds.
46
+ (default: :obj:`None`)
47
+ number_of_result_pages (int): The number of result pages to
48
+ retrieve. (default: :obj:`10`)
49
+ exclude_domains (Optional[List[str]]): List of domains to
50
+ exclude from search results. Currently only supported
51
+ by the `search_google` function.
52
+ (default: :obj:`None`)
53
+ """
54
+ super().__init__(timeout=timeout)
55
+ self.number_of_result_pages = number_of_result_pages
56
+ self.exclude_domains = exclude_domains
57
+
35
58
  @dependencies_required("wikipedia")
36
59
  def search_wiki(self, entity: str) -> str:
37
60
  r"""Search the entity in WikiPedia and return the summary of the
@@ -144,7 +167,7 @@ class SearchToolkit(BaseToolkit):
144
167
 
145
168
  @dependencies_required("duckduckgo_search")
146
169
  def search_duckduckgo(
147
- self, query: str, source: str = "text", max_results: int = 5
170
+ self, query: str, source: str = "text"
148
171
  ) -> List[Dict[str, Any]]:
149
172
  r"""Use DuckDuckGo search engine to search information for
150
173
  the given query.
@@ -157,7 +180,6 @@ class SearchToolkit(BaseToolkit):
157
180
  query (str): The query to be searched.
158
181
  source (str): The type of information to query (e.g., "text",
159
182
  "images", "videos"). Defaults to "text".
160
- max_results (int): Max number of results, defaults to `5`.
161
183
 
162
184
  Returns:
163
185
  List[Dict[str, Any]]: A list of dictionaries where each dictionary
@@ -171,7 +193,9 @@ class SearchToolkit(BaseToolkit):
171
193
 
172
194
  if source == "text":
173
195
  try:
174
- results = ddgs.text(keywords=query, max_results=max_results)
196
+ results = ddgs.text(
197
+ keywords=query, max_results=self.number_of_result_pages
198
+ )
175
199
  except RequestException as e:
176
200
  # Handle specific exceptions or general request exceptions
177
201
  responses.append({"error": f"duckduckgo search failed.{e}"})
@@ -189,7 +213,9 @@ class SearchToolkit(BaseToolkit):
189
213
 
190
214
  elif source == "images":
191
215
  try:
192
- results = ddgs.images(keywords=query, max_results=max_results)
216
+ results = ddgs.images(
217
+ keywords=query, max_results=self.number_of_result_pages
218
+ )
193
219
  except RequestException as e:
194
220
  # Handle specific exceptions or general request exceptions
195
221
  responses.append({"error": f"duckduckgo search failed.{e}"})
@@ -208,7 +234,9 @@ class SearchToolkit(BaseToolkit):
208
234
 
209
235
  elif source == "videos":
210
236
  try:
211
- results = ddgs.videos(keywords=query, max_results=max_results)
237
+ results = ddgs.videos(
238
+ keywords=query, max_results=self.number_of_result_pages
239
+ )
212
240
  except RequestException as e:
213
241
  # Handle specific exceptions or general request exceptions
214
242
  responses.append({"error": f"duckduckgo search failed.{e}"})
@@ -241,7 +269,6 @@ class SearchToolkit(BaseToolkit):
241
269
  country: str = "US",
242
270
  search_lang: str = "en",
243
271
  ui_lang: str = "en-US",
244
- count: int = 20,
245
272
  offset: int = 0,
246
273
  safesearch: str = "moderate",
247
274
  freshness: Optional[str] = None,
@@ -277,10 +304,6 @@ class SearchToolkit(BaseToolkit):
277
304
  Format: '<language_code>-<country_code>'. Common examples:
278
305
  'en-US', 'en-GB', 'jp-JP', 'zh-hans-CN', 'zh-hant-TW',
279
306
  'de-DE', 'fr-FR', 'es-ES', 'pt-BR', 'ru-RU', 'ko-KR'.
280
- count (int): The number of search results returned in response.
281
- The maximum is 20. The actual number delivered may be less than
282
- requested. Combine this parameter with offset to paginate
283
- search results.
284
307
  offset (int): The zero based offset that indicates number of search
285
308
  results per page (count) to skip before returning the result.
286
309
  The maximum is 9. The actual number delivered may be less than
@@ -368,7 +391,7 @@ class SearchToolkit(BaseToolkit):
368
391
  "country": country,
369
392
  "search_lang": search_lang,
370
393
  "ui_lang": ui_lang,
371
- "count": count,
394
+ "count": self.number_of_result_pages,
372
395
  "offset": offset,
373
396
  "safesearch": safesearch,
374
397
  "freshness": freshness,
@@ -418,25 +441,38 @@ class SearchToolkit(BaseToolkit):
418
441
  ]
419
442
  )
420
443
  def search_google(
421
- self, query: str, num_result_pages: int = 5
444
+ self,
445
+ query: str,
446
+ search_type: str = "web",
422
447
  ) -> List[Dict[str, Any]]:
423
448
  r"""Use Google search engine to search information for the given query.
424
449
 
425
450
  Args:
426
451
  query (str): The query to be searched.
427
- num_result_pages (int): The number of result pages to retrieve.
452
+ search_type (str): The type of search to perform. Either "web" for
453
+ web pages or "image" for image search. (default: "web")
428
454
 
429
455
  Returns:
430
456
  List[Dict[str, Any]]: A list of dictionaries where each dictionary
431
- represents a website.
432
- Each dictionary contains the following keys:
457
+ represents a search result.
458
+
459
+ For web search, each dictionary contains:
433
460
  - 'result_id': A number in order.
434
461
  - 'title': The title of the website.
435
462
  - 'description': A brief description of the website.
436
463
  - 'long_description': More detail of the website.
437
464
  - 'url': The URL of the website.
438
465
 
439
- Example:
466
+ For image search, each dictionary contains:
467
+ - 'result_id': A number in order.
468
+ - 'title': The title of the image.
469
+ - 'image_url': The URL of the image.
470
+ - 'display_link': The website hosting the image.
471
+ - 'context_url': The URL of the page containing the image.
472
+ - 'width': Image width in pixels (if available).
473
+ - 'height': Image height in pixels (if available).
474
+
475
+ Example web result:
440
476
  {
441
477
  'result_id': 1,
442
478
  'title': 'OpenAI',
@@ -448,7 +484,17 @@ class SearchToolkit(BaseToolkit):
448
484
  benefit humanity as a whole',
449
485
  'url': 'https://www.openai.com'
450
486
  }
451
- title, description, url of a website.
487
+
488
+ Example image result:
489
+ {
490
+ 'result_id': 1,
491
+ 'title': 'Beautiful Sunset',
492
+ 'image_url': 'https://example.com/image.jpg',
493
+ 'display_link': 'example.com',
494
+ 'context_url': 'https://example.com/page.html',
495
+ 'width': 800,
496
+ 'height': 600
497
+ }
452
498
  """
453
499
  import requests
454
500
 
@@ -461,16 +507,30 @@ class SearchToolkit(BaseToolkit):
461
507
  start_page_idx = 1
462
508
  # Different language may get different result
463
509
  search_language = "en"
464
- # How many pages to return
465
- num_result_pages = num_result_pages
510
+
511
+ modified_query = query
512
+ if self.exclude_domains:
513
+ # Use Google's -site: operator to exclude domains
514
+ exclusion_terms = " ".join(
515
+ [f"-site:{domain}" for domain in self.exclude_domains]
516
+ )
517
+ modified_query = f"{query} {exclusion_terms}"
518
+ logger.debug(f"Excluded domains, modified query: {modified_query}")
519
+
466
520
  # Constructing the URL
467
521
  # Doc: https://developers.google.com/custom-search/v1/using_rest
468
- url = (
522
+ base_url = (
469
523
  f"https://www.googleapis.com/customsearch/v1?"
470
- f"key={GOOGLE_API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}&start="
471
- f"{start_page_idx}&lr={search_language}&num={num_result_pages}"
524
+ f"key={GOOGLE_API_KEY}&cx={SEARCH_ENGINE_ID}&q={modified_query}&start="
525
+ f"{start_page_idx}&lr={search_language}&num={self.number_of_result_pages}"
472
526
  )
473
527
 
528
+ # Add searchType parameter for image search
529
+ if search_type == "image":
530
+ url = base_url + "&searchType=image"
531
+ else:
532
+ url = base_url
533
+
474
534
  responses = []
475
535
  # Fetch the results given the URL
476
536
  try:
@@ -482,37 +542,68 @@ class SearchToolkit(BaseToolkit):
482
542
  if "items" in data:
483
543
  search_items = data.get("items")
484
544
 
485
- # Iterate over 10 results found
545
+ # Iterate over results found
486
546
  for i, search_item in enumerate(search_items, start=1):
487
- # Check metatags are present
488
- if "pagemap" not in search_item:
489
- continue
490
- if "metatags" not in search_item["pagemap"]:
491
- continue
492
- if (
493
- "og:description"
494
- in search_item["pagemap"]["metatags"][0]
495
- ):
496
- long_description = search_item["pagemap"]["metatags"][
497
- 0
498
- ]["og:description"]
547
+ if search_type == "image":
548
+ # Process image search results
549
+ title = search_item.get("title")
550
+ image_url = search_item.get("link")
551
+ display_link = search_item.get("displayLink")
552
+
553
+ # Get context URL (page containing the image)
554
+ image_info = search_item.get("image", {})
555
+ context_url = image_info.get("contextLink", "")
556
+
557
+ # Get image dimensions if available
558
+ width = image_info.get("width")
559
+ height = image_info.get("height")
560
+
561
+ response = {
562
+ "result_id": i,
563
+ "title": title,
564
+ "image_url": image_url,
565
+ "display_link": display_link,
566
+ "context_url": context_url,
567
+ }
568
+
569
+ # Add dimensions if available
570
+ if width:
571
+ response["width"] = int(width)
572
+ if height:
573
+ response["height"] = int(height)
574
+
575
+ responses.append(response)
499
576
  else:
500
- long_description = "N/A"
501
- # Get the page title
502
- title = search_item.get("title")
503
- # Page snippet
504
- snippet = search_item.get("snippet")
505
-
506
- # Extract the page url
507
- link = search_item.get("link")
508
- response = {
509
- "result_id": i,
510
- "title": title,
511
- "description": snippet,
512
- "long_description": long_description,
513
- "url": link,
514
- }
515
- responses.append(response)
577
+ # Process web search results (existing logic)
578
+ # Check metatags are present
579
+ if "pagemap" not in search_item:
580
+ continue
581
+ if "metatags" not in search_item["pagemap"]:
582
+ continue
583
+ if (
584
+ "og:description"
585
+ in search_item["pagemap"]["metatags"][0]
586
+ ):
587
+ long_description = search_item["pagemap"][
588
+ "metatags"
589
+ ][0]["og:description"]
590
+ else:
591
+ long_description = "N/A"
592
+ # Get the page title
593
+ title = search_item.get("title")
594
+ # Page snippet
595
+ snippet = search_item.get("snippet")
596
+
597
+ # Extract the page url
598
+ link = search_item.get("link")
599
+ response = {
600
+ "result_id": i,
601
+ "title": title,
602
+ "description": snippet,
603
+ "long_description": long_description,
604
+ "url": link,
605
+ }
606
+ responses.append(response)
516
607
  else:
517
608
  error_info = data.get("error", {})
518
609
  logger.error(
@@ -529,15 +620,11 @@ class SearchToolkit(BaseToolkit):
529
620
  responses.append({"error": f"google search failed: {e!s}"})
530
621
  return responses
531
622
 
532
- def tavily_search(
533
- self, query: str, num_results: int = 5, **kwargs
534
- ) -> List[Dict[str, Any]]:
623
+ def tavily_search(self, query: str, **kwargs) -> List[Dict[str, Any]]:
535
624
  r"""Use Tavily Search API to search information for the given query.
536
625
 
537
626
  Args:
538
627
  query (str): The query to be searched.
539
- num_results (int): The number of search results to retrieve
540
- (default is `5`).
541
628
  **kwargs: Additional optional parameters supported by Tavily's API:
542
629
  - search_depth (str): "basic" or "advanced" search depth.
543
630
  - topic (str): The search category, e.g., "general" or "news."
@@ -573,7 +660,9 @@ class SearchToolkit(BaseToolkit):
573
660
  client = TavilyClient(Tavily_API_KEY)
574
661
 
575
662
  try:
576
- results = client.search(query, max_results=num_results, **kwargs)
663
+ results = client.search(
664
+ query, max_results=self.number_of_result_pages, **kwargs
665
+ )
577
666
  return results
578
667
  except Exception as e:
579
668
  return [{"error": f"An unexpected error occurred: {e!s}"}]
@@ -584,7 +673,6 @@ class SearchToolkit(BaseToolkit):
584
673
  query: str,
585
674
  freshness: str = "noLimit",
586
675
  summary: bool = False,
587
- count: int = 10,
588
676
  page: int = 1,
589
677
  ) -> Dict[str, Any]:
590
678
  r"""Query the Bocha AI search API and return search results.
@@ -600,7 +688,6 @@ class SearchToolkit(BaseToolkit):
600
688
  - 'oneYear': past year.
601
689
  summary (bool): Whether to include text summaries in results.
602
690
  Default is False.
603
- count (int): Number of results to return (1-50). Default is 10.
604
691
  page (int): Page number of results. Default is 1.
605
692
 
606
693
  Returns:
@@ -623,7 +710,7 @@ class SearchToolkit(BaseToolkit):
623
710
  "query": query,
624
711
  "freshness": freshness,
625
712
  "summary": summary,
626
- "count": count,
713
+ "count": self.number_of_result_pages,
627
714
  "page": page,
628
715
  },
629
716
  ensure_ascii=False,
@@ -641,15 +728,13 @@ class SearchToolkit(BaseToolkit):
641
728
  except requests.exceptions.RequestException as e:
642
729
  return {"error": f"Bocha AI search failed: {e!s}"}
643
730
 
644
- def search_baidu(self, query: str, max_results: int = 5) -> Dict[str, Any]:
731
+ def search_baidu(self, query: str) -> Dict[str, Any]:
645
732
  r"""Search Baidu using web scraping to retrieve relevant search
646
733
  results. This method queries Baidu's search engine and extracts search
647
734
  results including titles, descriptions, and URLs.
648
735
 
649
736
  Args:
650
737
  query (str): Search query string to submit to Baidu.
651
- max_results (int): Maximum number of results to return.
652
- (default: :obj:`5`)
653
738
 
654
739
  Returns:
655
740
  Dict[str, Any]: A dictionary containing search results or error
@@ -667,7 +752,7 @@ class SearchToolkit(BaseToolkit):
667
752
  ),
668
753
  "Referer": "https://www.baidu.com",
669
754
  }
670
- params = {"wd": query, "rn": str(max_results)}
755
+ params = {"wd": query, "rn": str(self.number_of_result_pages)}
671
756
 
672
757
  response = requests.get(url, headers=headers, params=params)
673
758
  response.encoding = "utf-8"
@@ -696,7 +781,7 @@ class SearchToolkit(BaseToolkit):
696
781
  "url": link,
697
782
  }
698
783
  )
699
- if len(results) >= max_results:
784
+ if len(results) >= self.number_of_result_pages:
700
785
  break
701
786
 
702
787
  if not results:
@@ -710,7 +795,7 @@ class SearchToolkit(BaseToolkit):
710
795
  except Exception as e:
711
796
  return {"error": f"Baidu scraping error: {e!s}"}
712
797
 
713
- def search_bing(self, query: str, max_results: int = 5) -> Dict[str, Any]:
798
+ def search_bing(self, query: str) -> Dict[str, Any]:
714
799
  r"""Use Bing search engine to search information for the given query.
715
800
 
716
801
  This function queries the Chinese version of Bing search engine (cn.
@@ -722,8 +807,6 @@ class SearchToolkit(BaseToolkit):
722
807
  Args:
723
808
  query (str): The search query string to submit to Bing. Works best
724
809
  with Chinese queries or when Chinese results are preferred.
725
- max_results (int): Maximum number of results to return.
726
- (default: :obj:`5`)
727
810
 
728
811
  Returns:
729
812
  Dict ([str, Any]): A dictionary containing either:
@@ -773,7 +856,9 @@ class SearchToolkit(BaseToolkit):
773
856
  result_items = b_results_tag.find_all("li")
774
857
 
775
858
  results: List[Dict[str, Any]] = []
776
- for i in range(min(len(result_items), max_results)):
859
+ for i in range(
860
+ min(len(result_items), self.number_of_result_pages)
861
+ ):
777
862
  row = result_items[i]
778
863
  if not isinstance(row, Tag):
779
864
  continue
@@ -838,7 +923,6 @@ class SearchToolkit(BaseToolkit):
838
923
  "financial report",
839
924
  ]
840
925
  ] = None,
841
- num_results: int = 10,
842
926
  include_text: Optional[List[str]] = None,
843
927
  exclude_text: Optional[List[str]] = None,
844
928
  use_autoprompt: bool = True,
@@ -854,8 +938,6 @@ class SearchToolkit(BaseToolkit):
854
938
  and neural search. (default: :obj:`"auto"`)
855
939
  category (Optional[Literal]): Category to focus the search on, such
856
940
  as "research paper" or "news". (default: :obj:`None`)
857
- num_results (int): Number of results to return (max 100).
858
- (default: :obj:`10`)
859
941
  include_text (Optional[List[str]]): Strings that must be present in
860
942
  webpage text. Limited to 1 string of up to 5 words.
861
943
  (default: :obj:`None`)
@@ -884,7 +966,10 @@ class SearchToolkit(BaseToolkit):
884
966
  try:
885
967
  exa = Exa(EXA_API_KEY)
886
968
 
887
- if num_results is not None and not 0 < num_results <= 100:
969
+ if (
970
+ self.number_of_result_pages is not None
971
+ and not 0 < self.number_of_result_pages <= 100
972
+ ):
888
973
  raise ValueError("num_results must be between 1 and 100")
889
974
 
890
975
  if include_text is not None:
@@ -911,7 +996,7 @@ class SearchToolkit(BaseToolkit):
911
996
  query=query,
912
997
  type=search_type,
913
998
  category=category,
914
- num_results=num_results,
999
+ num_results=self.number_of_result_pages,
915
1000
  include_text=include_text,
916
1001
  exclude_text=exclude_text,
917
1002
  use_autoprompt=use_autoprompt,
@@ -925,7 +1010,7 @@ class SearchToolkit(BaseToolkit):
925
1010
  query=query,
926
1011
  type=search_type,
927
1012
  category=category,
928
- num_results=num_results,
1013
+ num_results=self.number_of_result_pages,
929
1014
  include_text=include_text,
930
1015
  exclude_text=exclude_text,
931
1016
  use_autoprompt=use_autoprompt,
@@ -955,7 +1040,6 @@ class SearchToolkit(BaseToolkit):
955
1040
  "news_center",
956
1041
  ]
957
1042
  ] = None,
958
- page: int = 1,
959
1043
  return_main_text: bool = False,
960
1044
  return_markdown_text: bool = True,
961
1045
  enable_rerank: bool = True,
@@ -980,8 +1064,6 @@ class SearchToolkit(BaseToolkit):
980
1064
  results from sites in the specified industries. Multiple
981
1065
  industries can be comma-separated.
982
1066
  (default: :obj:`None`)
983
- page (int): Page number for results pagination.
984
- (default: :obj:`1`)
985
1067
  return_main_text (bool): Whether to include the main text of the
986
1068
  webpage in results. (default: :obj:`True`)
987
1069
  return_markdown_text (bool): Whether to include markdown formatted
@@ -1014,7 +1096,7 @@ class SearchToolkit(BaseToolkit):
1014
1096
  params: Dict[str, Union[str, int]] = {
1015
1097
  "query": query,
1016
1098
  "timeRange": time_range,
1017
- "page": page,
1099
+ "page": self.number_of_result_pages,
1018
1100
  "returnMainText": str(return_main_text).lower(),
1019
1101
  "returnMarkdownText": str(return_markdown_text).lower(),
1020
1102
  "enableRerank": str(enable_rerank).lower(),