camel-ai 0.1.8__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (197) hide show
  1. {camel_ai-0.1.8 → camel_ai-0.2.0}/PKG-INFO +4 -3
  2. {camel_ai-0.1.8 → camel_ai-0.2.0}/README.md +1 -1
  3. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/__init__.py +1 -1
  4. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/chat_agent.py +10 -0
  5. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/__init__.py +4 -0
  6. camel_ai-0.2.0/camel/configs/samba_config.py +207 -0
  7. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/ollama_model.py +2 -2
  8. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/openai_model.py +16 -0
  9. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/samba_model.py +45 -6
  10. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/vllm_model.py +2 -2
  11. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/__init__.py +15 -24
  12. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/google_maps_toolkit.py +99 -165
  13. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/linkedin_toolkit.py +0 -3
  14. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_toolkit.py +0 -3
  15. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/reddit_toolkit.py +0 -3
  16. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/retrieval_toolkit.py +0 -4
  17. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/slack_toolkit.py +0 -3
  18. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/twitter_toolkit.py +2 -5
  19. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/types/enums.py +6 -0
  20. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/utils/token_counting.py +11 -2
  21. {camel_ai-0.1.8 → camel_ai-0.2.0}/pyproject.toml +6 -5
  22. camel_ai-0.1.8/camel/configs/samba_config.py +0 -109
  23. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/__init__.py +0 -0
  24. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/base.py +0 -0
  25. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/critic_agent.py +0 -0
  26. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/deductive_reasoner_agent.py +0 -0
  27. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/embodied_agent.py +0 -0
  28. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/knowledge_graph_agent.py +0 -0
  29. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/role_assignment_agent.py +0 -0
  30. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/search_agent.py +0 -0
  31. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/task_agent.py +0 -0
  32. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/tool_agents/__init__.py +0 -0
  33. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/tool_agents/base.py +0 -0
  34. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  35. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/anthropic_config.py +0 -0
  36. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/base_config.py +0 -0
  37. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/gemini_config.py +0 -0
  38. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/groq_config.py +0 -0
  39. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/litellm_config.py +0 -0
  40. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/mistral_config.py +0 -0
  41. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/ollama_config.py +0 -0
  42. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/openai_config.py +0 -0
  43. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/reka_config.py +0 -0
  44. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/togetherai_config.py +0 -0
  45. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/vllm_config.py +0 -0
  46. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/configs/zhipuai_config.py +0 -0
  47. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/__init__.py +0 -0
  48. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/base.py +0 -0
  49. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/mistral_embedding.py +0 -0
  50. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/openai_embedding.py +0 -0
  51. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
  52. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/embeddings/vlm_embedding.py +0 -0
  53. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/generators.py +0 -0
  54. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/human.py +0 -0
  55. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/__init__.py +0 -0
  56. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/base.py +0 -0
  57. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/docker_interpreter.py +0 -0
  58. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/internal_python_interpreter.py +0 -0
  59. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/interpreter_error.py +0 -0
  60. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/ipython_interpreter.py +0 -0
  61. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/interpreters/subprocess_interpreter.py +0 -0
  62. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/loaders/__init__.py +0 -0
  63. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/loaders/base_io.py +0 -0
  64. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/loaders/firecrawl_reader.py +0 -0
  65. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/loaders/jina_url_reader.py +0 -0
  66. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/loaders/unstructured_io.py +0 -0
  67. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/__init__.py +0 -0
  68. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/agent_memories.py +0 -0
  69. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/base.py +0 -0
  70. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/blocks/__init__.py +0 -0
  71. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/blocks/chat_history_block.py +0 -0
  72. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/blocks/vectordb_block.py +0 -0
  73. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/context_creators/__init__.py +0 -0
  74. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/context_creators/score_based.py +0 -0
  75. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/memories/records.py +0 -0
  76. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/messages/__init__.py +0 -0
  77. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/messages/base.py +0 -0
  78. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/messages/func_message.py +0 -0
  79. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/__init__.py +0 -0
  80. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/anthropic_model.py +0 -0
  81. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/azure_openai_model.py +0 -0
  82. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/base_model.py +0 -0
  83. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/gemini_model.py +0 -0
  84. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/groq_model.py +0 -0
  85. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/litellm_model.py +0 -0
  86. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/mistral_model.py +0 -0
  87. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/model_factory.py +0 -0
  88. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/nemotron_model.py +0 -0
  89. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/open_source_model.py +0 -0
  90. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/openai_audio_models.py +0 -0
  91. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/openai_compatibility_model.py +0 -0
  92. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/reka_model.py +0 -0
  93. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/stub_model.py +0 -0
  94. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/togetherai_model.py +0 -0
  95. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/models/zhipuai_model.py +0 -0
  96. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/__init__.py +0 -0
  97. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/ai_society.py +0 -0
  98. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/base.py +0 -0
  99. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/code.py +0 -0
  100. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/evaluation.py +0 -0
  101. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/generate_text_embedding_data.py +0 -0
  102. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/image_craft.py +0 -0
  103. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/misalignment.py +0 -0
  104. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/multi_condition_image_craft.py +0 -0
  105. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/object_recognition.py +0 -0
  106. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/prompt_templates.py +0 -0
  107. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/role_description_prompt_template.py +0 -0
  108. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/solution_extraction.py +0 -0
  109. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/task_prompt_template.py +0 -0
  110. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/translation.py +0 -0
  111. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/prompts/video_description_prompt.py +0 -0
  112. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/responses/__init__.py +0 -0
  113. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/responses/agent_responses.py +0 -0
  114. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/__init__.py +0 -0
  115. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/auto_retriever.py +0 -0
  116. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/base.py +0 -0
  117. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/bm25_retriever.py +0 -0
  118. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/cohere_rerank_retriever.py +0 -0
  119. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/retrievers/vector_retriever.py +0 -0
  120. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/societies/__init__.py +0 -0
  121. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/societies/babyagi_playing.py +0 -0
  122. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/societies/role_playing.py +0 -0
  123. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/__init__.py +0 -0
  124. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/graph_storages/__init__.py +0 -0
  125. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/graph_storages/base.py +0 -0
  126. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/graph_storages/graph_element.py +0 -0
  127. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/graph_storages/neo4j_graph.py +0 -0
  128. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/key_value_storages/__init__.py +0 -0
  129. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/key_value_storages/base.py +0 -0
  130. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/key_value_storages/in_memory.py +0 -0
  131. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/key_value_storages/json.py +0 -0
  132. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/key_value_storages/redis.py +0 -0
  133. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/object_storages/__init__.py +0 -0
  134. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/object_storages/amazon_s3.py +0 -0
  135. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/object_storages/azure_blob.py +0 -0
  136. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/object_storages/base.py +0 -0
  137. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/object_storages/google_cloud.py +0 -0
  138. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/vectordb_storages/__init__.py +0 -0
  139. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/vectordb_storages/base.py +0 -0
  140. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/vectordb_storages/milvus.py +0 -0
  141. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/storages/vectordb_storages/qdrant.py +0 -0
  142. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/tasks/__init__.py +0 -0
  143. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/tasks/task.py +0 -0
  144. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/tasks/task_prompt.py +0 -0
  145. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/terminators/__init__.py +0 -0
  146. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/terminators/base.py +0 -0
  147. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/terminators/response_terminator.py +0 -0
  148. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/terminators/token_limit_terminator.py +0 -0
  149. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/base.py +0 -0
  150. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/code_execution.py +0 -0
  151. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/dalle_toolkit.py +0 -0
  152. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/github_toolkit.py +0 -0
  153. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/math_toolkit.py +0 -0
  154. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
  155. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
  156. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
  157. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
  158. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
  159. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
  160. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
  161. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
  162. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
  163. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
  164. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
  165. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
  166. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
  167. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
  168. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
  169. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
  170. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
  171. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/security_config.py +0 -0
  172. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
  173. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
  174. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
  175. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
  176. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
  177. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
  178. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
  179. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/openai_function.py +0 -0
  180. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/search_toolkit.py +0 -0
  181. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/toolkits/weather_toolkit.py +0 -0
  182. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/types/__init__.py +0 -0
  183. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/types/openai_types.py +0 -0
  184. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/utils/__init__.py +0 -0
  185. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/utils/async_func.py +0 -0
  186. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/utils/commons.py +0 -0
  187. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/utils/constants.py +0 -0
  188. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/__init__.py +0 -0
  189. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/base.py +0 -0
  190. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/manager_node.py +0 -0
  191. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/role_playing_node.py +0 -0
  192. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/single_agent_node.py +0 -0
  193. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/task_channel.py +0 -0
  194. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/utils.py +0 -0
  195. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/worker_node.py +0 -0
  196. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/workforce.py +0 -0
  197. {camel_ai-0.1.8 → camel_ai-0.2.0}/camel/workforce/workforce_prompt.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.8
3
+ Version: 0.2.0
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -56,9 +56,10 @@ Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "
56
56
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
57
57
  Requires-Dist: nltk (==3.8.1) ; extra == "tools" or extra == "all"
58
58
  Requires-Dist: numpy (>=1,<2)
59
- Requires-Dist: openai (>=1.2.3,<2.0.0)
59
+ Requires-Dist: openai (>=1.45.0,<2.0.0)
60
60
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
61
61
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
62
+ Requires-Dist: pandoc
62
63
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
63
64
  Requires-Dist: pillow (>=10.2.0,<11.0.0) ; extra == "tools" or extra == "all"
64
65
  Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
@@ -213,7 +214,7 @@ conda create --name camel python=3.10
213
214
  conda activate camel
214
215
 
215
216
  # Clone github repo
216
- git clone -b v0.1.8 https://github.com/camel-ai/camel.git
217
+ git clone -b v0.2.0 https://github.com/camel-ai/camel.git
217
218
 
218
219
  # Change directory into project directory
219
220
  cd camel
@@ -119,7 +119,7 @@ conda create --name camel python=3.10
119
119
  conda activate camel
120
120
 
121
121
  # Clone github repo
122
- git clone -b v0.1.8 https://github.com/camel-ai/camel.git
122
+ git clone -b v0.2.0 https://github.com/camel-ai/camel.git
123
123
 
124
124
  # Change directory into project directory
125
125
  cd camel
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.8'
15
+ __version__ = '0.2.0'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -181,6 +181,16 @@ class ChatAgent(BaseAgent):
181
181
  tool.get_function_name(): tool.func for tool in all_tools
182
182
  }
183
183
 
184
+ # If the user hasn't configured tools in `BaseModelBackend`,
185
+ # the tools set from `ChatAgent` will be used.
186
+ # This design simplifies the interface while retaining tool-running
187
+ # capabilities for `BaseModelBackend`.
188
+ if all_tools and not self.model_backend.model_config_dict['tools']:
189
+ tool_schema_list = [
190
+ tool.get_openai_tool_schema() for tool in all_tools
191
+ ]
192
+ self.model_backend.model_config_dict['tools'] = tool_schema_list
193
+
184
194
  self.model_config_dict = self.model_backend.model_config_dict
185
195
 
186
196
  self.model_token_limit = token_limit or self.model_backend.token_limit
@@ -21,8 +21,10 @@ from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
22
  from .reka_config import REKA_API_PARAMS, RekaConfig
23
23
  from .samba_config import (
24
+ SAMBA_CLOUD_API_PARAMS,
24
25
  SAMBA_FAST_API_PARAMS,
25
26
  SAMBA_VERSE_API_PARAMS,
27
+ SambaCloudAPIConfig,
26
28
  SambaFastAPIConfig,
27
29
  SambaVerseAPIConfig,
28
30
  )
@@ -57,6 +59,8 @@ __all__ = [
57
59
  'SAMBA_FAST_API_PARAMS',
58
60
  'SambaVerseAPIConfig',
59
61
  'SAMBA_VERSE_API_PARAMS',
62
+ 'SambaCloudAPIConfig',
63
+ 'SAMBA_CLOUD_API_PARAMS',
60
64
  'TogetherAIConfig',
61
65
  'TOGETHERAI_API_PARAMS',
62
66
  ]
@@ -0,0 +1,207 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from typing import Any, Dict, Optional, Sequence, Union
17
+
18
+ from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ class SambaFastAPIConfig(BaseConfig):
25
+ r"""Defines the parameters for generating chat completions using the
26
+ SambaNova Fast API.
27
+
28
+ Args:
29
+ max_tokens (Optional[int], optional): the maximum number of tokens to
30
+ generate, e.g. 100.
31
+ (default: :obj:`2048`)
32
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
33
+ is detected. Or if one of these tokens is detected when providing
34
+ a string list.
35
+ (default: :obj:`None`)
36
+ stream (Optional[bool]): If True, partial message deltas will be sent
37
+ as data-only server-sent events as they become available.
38
+ Currently SambaNova Fast API only support stream mode.
39
+ (default: :obj:`True`)
40
+ stream_options (Optional[Dict]): Additional options for streaming.
41
+ (default: :obj:`{"include_usage": True}`)
42
+ """
43
+
44
+ max_tokens: Optional[int] = 2048
45
+ stop: Optional[Union[str, list[str]]] = None
46
+ stream: Optional[bool] = True
47
+ stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
48
+
49
+ def as_dict(self) -> dict[str, Any]:
50
+ config_dict = super().as_dict()
51
+ if "tools" in config_dict:
52
+ del config_dict["tools"] # SambaNova does not support tool calling
53
+ return config_dict
54
+
55
+
56
+ SAMBA_FAST_API_PARAMS = {
57
+ param for param in SambaFastAPIConfig().model_fields.keys()
58
+ }
59
+
60
+
61
+ class SambaVerseAPIConfig(BaseConfig):
62
+ r"""Defines the parameters for generating chat completions using the
63
+ SambaVerse API.
64
+
65
+ Args:
66
+ temperature (float, optional): Sampling temperature to use, between
67
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
68
+ while lower values make it more focused and deterministic.
69
+ (default: :obj:`0.7`)
70
+ top_p (float, optional): An alternative to sampling with temperature,
71
+ called nucleus sampling, where the model considers the results of
72
+ the tokens with top_p probability mass. So :obj:`0.1` means only
73
+ the tokens comprising the top 10% probability mass are considered.
74
+ (default: :obj:`0.95`)
75
+ top_k (int, optional): Only sample from the top K options for each
76
+ subsequent token. Used to remove "long tail" low probability
77
+ responses.
78
+ (default: :obj:`50`)
79
+ max_tokens (Optional[int], optional): The maximum number of tokens to
80
+ generate, e.g. 100.
81
+ (default: :obj:`2048`)
82
+ repetition_penalty (Optional[float], optional): The parameter for
83
+ repetition penalty. 1.0 means no penalty.
84
+ (default: :obj:`1.0`)
85
+ stop (Optional[Union[str,list[str]]]): Stop generation if this token
86
+ is detected. Or if one of these tokens is detected when providing
87
+ a string list.
88
+ (default: :obj:`""`)
89
+ stream (Optional[bool]): If True, partial message deltas will be sent
90
+ as data-only server-sent events as they become available.
91
+ Currently SambaVerse API doesn't support stream mode.
92
+ (default: :obj:`False`)
93
+ """
94
+
95
+ temperature: Optional[float] = 0.7
96
+ top_p: Optional[float] = 0.95
97
+ top_k: Optional[int] = 50
98
+ max_tokens: Optional[int] = 2048
99
+ repetition_penalty: Optional[float] = 1.0
100
+ stop: Optional[Union[str, list[str]]] = ""
101
+ stream: Optional[bool] = False
102
+
103
+ def as_dict(self) -> dict[str, Any]:
104
+ config_dict = super().as_dict()
105
+ if "tools" in config_dict:
106
+ del config_dict["tools"] # SambaNova does not support tool calling
107
+ return config_dict
108
+
109
+
110
+ SAMBA_VERSE_API_PARAMS = {
111
+ param for param in SambaVerseAPIConfig().model_fields.keys()
112
+ }
113
+
114
+
115
+ class SambaCloudAPIConfig(BaseConfig):
116
+ r"""Defines the parameters for generating chat completions using the
117
+ OpenAI API.
118
+
119
+ Args:
120
+ temperature (float, optional): Sampling temperature to use, between
121
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
122
+ while lower values make it more focused and deterministic.
123
+ (default: :obj:`0.2`)
124
+ top_p (float, optional): An alternative to sampling with temperature,
125
+ called nucleus sampling, where the model considers the results of
126
+ the tokens with top_p probability mass. So :obj:`0.1` means only
127
+ the tokens comprising the top 10% probability mass are considered.
128
+ (default: :obj:`1.0`)
129
+ n (int, optional): How many chat completion choices to generate for
130
+ each input message. (default: :obj:`1`)
131
+ response_format (object, optional): An object specifying the format
132
+ that the model must output. Compatible with GPT-4 Turbo and all
133
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
134
+ {"type": "json_object"} enables JSON mode, which guarantees the
135
+ message the model generates is valid JSON. Important: when using
136
+ JSON mode, you must also instruct the model to produce JSON
137
+ yourself via a system or user message. Without this, the model
138
+ may generate an unending stream of whitespace until the generation
139
+ reaches the token limit, resulting in a long-running and seemingly
140
+ "stuck" request. Also note that the message content may be
141
+ partially cut off if finish_reason="length", which indicates the
142
+ generation exceeded max_tokens or the conversation exceeded the
143
+ max context length.
144
+ stream (bool, optional): If True, partial message deltas will be sent
145
+ as data-only server-sent events as they become available.
146
+ (default: :obj:`False`)
147
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
148
+ will stop generating further tokens. (default: :obj:`None`)
149
+ max_tokens (int, optional): The maximum number of tokens to generate
150
+ in the chat completion. The total length of input tokens and
151
+ generated tokens is limited by the model's context length.
152
+ (default: :obj:`None`)
153
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
154
+ :obj:`2.0`. Positive values penalize new tokens based on whether
155
+ they appear in the text so far, increasing the model's likelihood
156
+ to talk about new topics. See more information about frequency and
157
+ presence penalties. (default: :obj:`0.0`)
158
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
159
+ :obj:`2.0`. Positive values penalize new tokens based on their
160
+ existing frequency in the text so far, decreasing the model's
161
+ likelihood to repeat the same line verbatim. See more information
162
+ about frequency and presence penalties. (default: :obj:`0.0`)
163
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
164
+ appearing in the completion. Accepts a json object that maps tokens
165
+ (specified by their token ID in the tokenizer) to an associated
166
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
167
+ is added to the logits generated by the model prior to sampling.
168
+ The exact effect will vary per model, but values between:obj:` -1`
169
+ and :obj:`1` should decrease or increase likelihood of selection;
170
+ values like :obj:`-100` or :obj:`100` should result in a ban or
171
+ exclusive selection of the relevant token. (default: :obj:`{}`)
172
+ user (str, optional): A unique identifier representing your end-user,
173
+ which can help OpenAI to monitor and detect abuse.
174
+ (default: :obj:`""`)
175
+ tools (list[OpenAIFunction], optional): A list of tools the model may
176
+ call. Currently, only functions are supported as a tool. Use this
177
+ to provide a list of functions the model may generate JSON inputs
178
+ for. A max of 128 functions are supported.
179
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
180
+ any) tool is called by the model. :obj:`"none"` means the model
181
+ will not call any tool and instead generates a message.
182
+ :obj:`"auto"` means the model can pick between generating a
183
+ message or calling one or more tools. :obj:`"required"` means the
184
+ model must call one or more tools. Specifying a particular tool
185
+ via {"type": "function", "function": {"name": "my_function"}}
186
+ forces the model to call that tool. :obj:`"none"` is the default
187
+ when no tools are present. :obj:`"auto"` is the default if tools
188
+ are present.
189
+ """
190
+
191
+ temperature: float = 0.2 # openai default: 1.0
192
+ top_p: float = 1.0
193
+ n: int = 1
194
+ stream: bool = False
195
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
196
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
197
+ presence_penalty: float = 0.0
198
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
199
+ frequency_penalty: float = 0.0
200
+ logit_bias: dict = Field(default_factory=dict)
201
+ user: str = ""
202
+ tool_choice: Optional[Union[dict[str, str], str]] = None
203
+
204
+
205
+ SAMBA_CLOUD_API_PARAMS = {
206
+ param for param in SambaCloudAPIConfig().model_fields.keys()
207
+ }
@@ -76,10 +76,10 @@ class OllamaModel:
76
76
  )
77
77
  print(
78
78
  f"Ollama server started on http://localhost:11434/v1 "
79
- f"for {self.model_type} model"
79
+ f"for {self.model_type} model."
80
80
  )
81
81
  except Exception as e:
82
- print(f"Failed to start Ollama server: {e}")
82
+ print(f"Failed to start Ollama server: {e}.")
83
83
 
84
84
  @property
85
85
  def token_counter(self) -> BaseTokenCounter:
@@ -93,6 +93,22 @@ class OpenAIModel(BaseModelBackend):
93
93
  `ChatCompletion` in the non-stream mode, or
94
94
  `Stream[ChatCompletionChunk]` in the stream mode.
95
95
  """
96
+ # o1-preview and o1-mini have Beta limitations
97
+ # reference: https://platform.openai.com/docs/guides/reasoning
98
+ if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
99
+ # Remove system message that is not supported in o1 model.
100
+ messages = [msg for msg in messages if msg.get("role") != "system"]
101
+
102
+ # Remove unsupported parameters and reset the fixed parameters
103
+ del self.model_config_dict["stream"]
104
+ del self.model_config_dict["tools"]
105
+ del self.model_config_dict["tool_choice"]
106
+ self.model_config_dict["temperature"] = 1.0
107
+ self.model_config_dict["top_p"] = 1.0
108
+ self.model_config_dict["n"] = 1.0
109
+ self.model_config_dict["presence_penalty"] = 0.0
110
+ self.model_config_dict["frequency_penalty"] = 0.0
111
+
96
112
  response = self._client.chat.completions.create(
97
113
  messages=messages,
98
114
  model=self.model_type.value,
@@ -20,7 +20,11 @@ from typing import Any, Dict, List, Optional, Union
20
20
  import httpx
21
21
  from openai import OpenAI, Stream
22
22
 
23
- from camel.configs import SAMBA_FAST_API_PARAMS, SAMBA_VERSE_API_PARAMS
23
+ from camel.configs import (
24
+ SAMBA_CLOUD_API_PARAMS,
25
+ SAMBA_FAST_API_PARAMS,
26
+ SAMBA_VERSE_API_PARAMS,
27
+ )
24
28
  from camel.messages import OpenAIMessage
25
29
  from camel.types import (
26
30
  ChatCompletion,
@@ -59,9 +63,10 @@ class SambaModel:
59
63
  SambaNova service. (default: :obj:`None`)
60
64
  url (Optional[str]): The url to the SambaNova service. Current
61
65
  support SambaNova Fast API: :obj:`"https://fast-api.snova.ai/
62
- v1/chat/ completions"` and SambaVerse API: :obj:`"https://
63
- sambaverse.sambanova.ai/api/predict"`. (default::obj:`"https://
64
- fast-api.snova.ai/v1/chat/completions"`)
66
+ v1/chat/ completions"`, SambaVerse API: :obj:`"https://
67
+ sambaverse.sambanova.ai/api/predict"` and SambaNova Cloud:
68
+ :obj:`"https://api.sambanova.ai/v1"`
69
+ (default::obj:`"https://fast-api.snova.ai/v1/chat/completions"`)
65
70
  token_counter (Optional[BaseTokenCounter]): Token counter to use
66
71
  for the model. If not provided, `OpenAITokenCounter(ModelType.
67
72
  GPT_4O_MINI)` will be used.
@@ -76,6 +81,14 @@ class SambaModel:
76
81
  self.model_config_dict = model_config_dict
77
82
  self.check_model_config()
78
83
 
84
+ if self._url == "https://api.sambanova.ai/v1":
85
+ self._client = OpenAI(
86
+ timeout=60,
87
+ max_retries=3,
88
+ base_url=self._url,
89
+ api_key=self._api_key,
90
+ )
91
+
79
92
  @property
80
93
  def token_counter(self) -> BaseTokenCounter:
81
94
  r"""Initialize the token counter for the model backend.
@@ -111,6 +124,14 @@ class SambaModel:
111
124
  "input into SambaVerse API."
112
125
  )
113
126
 
127
+ elif self._url == "https://api.sambanova.ai/v1":
128
+ for param in self.model_config_dict:
129
+ if param not in SAMBA_CLOUD_API_PARAMS:
130
+ raise ValueError(
131
+ f"Unexpected argument `{param}` is "
132
+ "input into SambaCloud API."
133
+ )
134
+
114
135
  else:
115
136
  raise ValueError(
116
137
  f"{self._url} is not supported, please check the url to the"
@@ -141,7 +162,7 @@ class SambaModel:
141
162
  def _run_streaming( # type: ignore[misc]
142
163
  self, messages: List[OpenAIMessage]
143
164
  ) -> Stream[ChatCompletionChunk]:
144
- r"""Handles streaming inference with SambaNova FastAPI.
165
+ r"""Handles streaming inference with SambaNova's API.
145
166
 
146
167
  Args:
147
168
  messages (List[OpenAIMessage]): A list of messages representing the
@@ -189,6 +210,15 @@ class SambaModel:
189
210
  except httpx.HTTPError as e:
190
211
  raise RuntimeError(f"HTTP request failed: {e!s}")
191
212
 
213
+ # Handle SambaNova's Cloud API
214
+ elif self._url == "https://api.sambanova.ai/v1":
215
+ response = self._client.chat.completions.create(
216
+ messages=messages,
217
+ model=self.model_type,
218
+ **self.model_config_dict,
219
+ )
220
+ return response
221
+
192
222
  elif self._url == "https://sambaverse.sambanova.ai/api/predict":
193
223
  raise ValueError(
194
224
  "https://sambaverse.sambanova.ai/api/predict doesn't support"
@@ -198,7 +228,7 @@ class SambaModel:
198
228
  def _run_non_streaming(
199
229
  self, messages: List[OpenAIMessage]
200
230
  ) -> ChatCompletion:
201
- r"""Handles non-streaming inference with SambaNova FastAPI.
231
+ r"""Handles non-streaming inference with SambaNova's API.
202
232
 
203
233
  Args:
204
234
  messages (List[OpenAIMessage]): A list of messages representing the
@@ -251,6 +281,15 @@ class SambaModel:
251
281
  except json.JSONDecodeError as e:
252
282
  raise ValueError(f"Failed to decode JSON response: {e!s}")
253
283
 
284
+ # Handle SambaNova's Cloud API
285
+ elif self._url == "https://api.sambanova.ai/v1":
286
+ response = self._client.chat.completions.create(
287
+ messages=messages,
288
+ model=self.model_type,
289
+ **self.model_config_dict,
290
+ )
291
+ return response
292
+
254
293
  # Handle SambaNova's Sambaverse API
255
294
  else:
256
295
  headers = {
@@ -80,10 +80,10 @@ class VLLMModel:
80
80
  )
81
81
  print(
82
82
  f"vllm server started on http://localhost:8000/v1 "
83
- f"for {self.model_type} model"
83
+ f"for {self.model_type} model."
84
84
  )
85
85
  except Exception as e:
86
- print(f"Failed to start vllm server: {e}")
86
+ print(f"Failed to start vllm server: {e}.")
87
87
 
88
88
  @property
89
89
  def token_counter(self) -> BaseTokenCounter:
@@ -19,19 +19,18 @@ from .openai_function import (
19
19
  )
20
20
  from .open_api_specs.security_config import openapi_security_config
21
21
 
22
- from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
23
- from .math_toolkit import MATH_FUNCS, MathToolkit
24
- from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
25
- from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
26
- from .search_toolkit import SEARCH_FUNCS, SearchToolkit
27
- from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
28
- from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
29
- from .slack_toolkit import SLACK_FUNCS, SlackToolkit
30
- from .dalle_toolkit import DALLE_FUNCS, DalleToolkit
31
- from .linkedin_toolkit import LINKEDIN_FUNCS, LinkedInToolkit
32
- from .reddit_toolkit import REDDIT_FUNCS, RedditToolkit
22
+ from .google_maps_toolkit import GoogleMapsToolkit
23
+ from .math_toolkit import MathToolkit, MATH_FUNCS
24
+ from .open_api_toolkit import OpenAPIToolkit
25
+ from .retrieval_toolkit import RetrievalToolkit
26
+ from .search_toolkit import SearchToolkit, SEARCH_FUNCS
27
+ from .twitter_toolkit import TwitterToolkit
28
+ from .weather_toolkit import WeatherToolkit, WEATHER_FUNCS
29
+ from .slack_toolkit import SlackToolkit
30
+ from .dalle_toolkit import DalleToolkit, DALLE_FUNCS
31
+ from .linkedin_toolkit import LinkedInToolkit
32
+ from .reddit_toolkit import RedditToolkit
33
33
 
34
- from .base import BaseToolkit
35
34
  from .code_execution import CodeExecutionToolkit
36
35
  from .github_toolkit import GithubToolkit
37
36
 
@@ -40,18 +39,6 @@ __all__ = [
40
39
  'get_openai_function_schema',
41
40
  'get_openai_tool_schema',
42
41
  'openapi_security_config',
43
- 'MATH_FUNCS',
44
- 'MAP_FUNCS',
45
- 'OPENAPI_FUNCS',
46
- 'RETRIEVAL_FUNCS',
47
- 'SEARCH_FUNCS',
48
- 'TWITTER_FUNCS',
49
- 'WEATHER_FUNCS',
50
- 'SLACK_FUNCS',
51
- 'DALLE_FUNCS',
52
- 'LINKEDIN_FUNCS',
53
- 'REDDIT_FUNCS',
54
- 'BaseToolkit',
55
42
  'GithubToolkit',
56
43
  'MathToolkit',
57
44
  'GoogleMapsToolkit',
@@ -65,4 +52,8 @@ __all__ = [
65
52
  'LinkedInToolkit',
66
53
  'RedditToolkit',
67
54
  'CodeExecutionToolkit',
55
+ 'MATH_FUNCS',
56
+ 'SEARCH_FUNCS',
57
+ 'WEATHER_FUNCS',
58
+ 'DALLE_FUNCS',
68
59
  ]