camel-ai 0.2.59__py3-none-any.whl → 0.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +3 -3
- camel/agents/__init__.py +2 -2
- camel/agents/_types.py +9 -4
- camel/agents/_utils.py +40 -2
- camel/agents/base.py +2 -2
- camel/agents/chat_agent.py +5012 -902
- camel/agents/critic_agent.py +2 -2
- camel/agents/deductive_reasoner_agent.py +56 -56
- camel/agents/embodied_agent.py +2 -2
- camel/agents/knowledge_graph_agent.py +20 -20
- camel/agents/mcp_agent.py +39 -36
- camel/agents/multi_hop_generator_agent.py +3 -3
- camel/agents/programmed_agent_instruction.py +2 -2
- camel/agents/repo_agent.py +4 -3
- camel/agents/role_assignment_agent.py +2 -2
- camel/agents/search_agent.py +2 -2
- camel/agents/task_agent.py +2 -2
- camel/agents/tool_agents/__init__.py +2 -2
- camel/agents/tool_agents/base.py +2 -2
- camel/agents/tool_agents/hugging_face_tool_agent.py +3 -3
- camel/benchmarks/__init__.py +2 -2
- camel/benchmarks/apibank.py +5 -5
- camel/benchmarks/apibench.py +2 -2
- camel/benchmarks/base.py +2 -2
- camel/benchmarks/browsecomp.py +44 -33
- camel/benchmarks/gaia.py +17 -13
- camel/benchmarks/mock_website/README.md +94 -0
- camel/benchmarks/mock_website/mock_web.py +299 -0
- camel/benchmarks/mock_website/requirements.txt +3 -0
- camel/benchmarks/mock_website/shopping_mall/app.py +465 -0
- camel/benchmarks/mock_website/task.json +104 -0
- camel/benchmarks/nexus.py +3 -3
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/__init__.py +2 -2
- camel/bots/discord/__init__.py +2 -2
- camel/bots/discord/discord_app.py +2 -2
- camel/bots/discord/discord_installation.py +2 -2
- camel/bots/discord/discord_store.py +3 -3
- camel/bots/slack/__init__.py +2 -2
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +2 -2
- camel/bots/telegram_bot.py +2 -2
- camel/configs/__init__.py +26 -2
- camel/configs/aihubmix_config.py +90 -0
- camel/configs/aiml_config.py +2 -2
- camel/configs/amd_config.py +70 -0
- camel/configs/anthropic_config.py +8 -7
- camel/configs/base_config.py +2 -2
- camel/configs/bedrock_config.py +5 -3
- camel/configs/cerebras_config.py +98 -0
- camel/configs/cohere_config.py +3 -3
- camel/configs/cometapi_config.py +106 -0
- camel/configs/crynux_config.py +94 -0
- camel/configs/deepseek_config.py +9 -8
- camel/configs/gemini_config.py +6 -4
- camel/configs/groq_config.py +6 -4
- camel/configs/internlm_config.py +6 -4
- camel/configs/litellm_config.py +2 -2
- camel/configs/lmstudio_config.py +6 -4
- camel/configs/minimax_config.py +95 -0
- camel/configs/mistral_config.py +3 -3
- camel/configs/modelscope_config.py +5 -3
- camel/configs/moonshot_config.py +2 -2
- camel/configs/nebius_config.py +105 -0
- camel/configs/netmind_config.py +2 -2
- camel/configs/novita_config.py +2 -2
- camel/configs/nvidia_config.py +2 -2
- camel/configs/ollama_config.py +2 -2
- camel/configs/openai_config.py +8 -3
- camel/configs/openrouter_config.py +6 -4
- camel/configs/ppio_config.py +2 -2
- camel/configs/qianfan_config.py +85 -0
- camel/configs/qwen_config.py +2 -2
- camel/configs/reka_config.py +3 -3
- camel/configs/samba_config.py +8 -6
- camel/configs/sglang_config.py +2 -2
- camel/configs/siliconflow_config.py +2 -2
- camel/configs/togetherai_config.py +2 -2
- camel/configs/vllm_config.py +4 -2
- camel/configs/watsonx_config.py +2 -2
- camel/configs/yi_config.py +6 -4
- camel/configs/zhipuai_config.py +6 -4
- camel/{data_collector → data_collectors}/__init__.py +2 -2
- camel/{data_collector → data_collectors}/alpaca_collector.py +19 -10
- camel/{data_collector → data_collectors}/base.py +2 -2
- camel/{data_collector → data_collectors}/sharegpt_collector.py +3 -3
- camel/datagen/__init__.py +2 -2
- camel/datagen/cot_datagen.py +32 -37
- camel/datagen/evol_instruct/__init__.py +2 -2
- camel/datagen/evol_instruct/evol_instruct.py +2 -2
- camel/datagen/evol_instruct/scorer.py +24 -25
- camel/datagen/evol_instruct/templates.py +48 -48
- camel/datagen/self_improving_cot.py +5 -5
- camel/datagen/self_instruct/__init__.py +2 -2
- camel/datagen/self_instruct/filter/__init__.py +2 -2
- camel/datagen/self_instruct/filter/filter_function.py +2 -2
- camel/datagen/self_instruct/filter/filter_registry.py +2 -2
- camel/datagen/self_instruct/filter/instruction_filter.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +2 -2
- camel/datagen/self_instruct/templates.py +47 -47
- camel/datagen/source2synth/__init__.py +2 -2
- camel/datagen/source2synth/data_processor.py +2 -2
- camel/datagen/source2synth/models.py +2 -2
- camel/datagen/source2synth/user_data_processor_config.py +2 -2
- camel/datahubs/__init__.py +2 -2
- camel/datahubs/base.py +2 -2
- camel/datahubs/huggingface.py +2 -2
- camel/datahubs/models.py +2 -2
- camel/datasets/__init__.py +2 -2
- camel/datasets/base_generator.py +41 -12
- camel/datasets/few_shot_generator.py +18 -18
- camel/datasets/models.py +3 -3
- camel/datasets/self_instruct_generator.py +2 -2
- camel/datasets/static_dataset.py +152 -2
- camel/embeddings/__init__.py +2 -2
- camel/embeddings/azure_embedding.py +2 -2
- camel/embeddings/base.py +2 -2
- camel/embeddings/gemini_embedding.py +2 -2
- camel/embeddings/jina_embedding.py +10 -3
- camel/embeddings/mistral_embedding.py +2 -2
- camel/embeddings/openai_compatible_embedding.py +2 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +4 -4
- camel/embeddings/together_embedding.py +2 -2
- camel/embeddings/vlm_embedding.py +11 -4
- camel/environments/__init__.py +14 -2
- camel/environments/models.py +2 -2
- camel/environments/multi_step.py +2 -2
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +30 -5
- camel/environments/tic_tac_toe.py +3 -3
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +2 -2
- camel/extractors/python_strategies.py +2 -2
- camel/generators.py +2 -2
- camel/human.py +2 -2
- camel/interpreters/__init__.py +4 -2
- camel/interpreters/base.py +16 -3
- camel/interpreters/docker/Dockerfile +53 -7
- camel/interpreters/docker_interpreter.py +70 -11
- camel/interpreters/e2b_interpreter.py +59 -11
- camel/interpreters/internal_python_interpreter.py +81 -4
- camel/interpreters/interpreter_error.py +2 -2
- camel/interpreters/ipython_interpreter.py +23 -5
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/interpreters/subprocess_interpreter.py +36 -4
- camel/loaders/__init__.py +17 -5
- camel/loaders/apify_reader.py +2 -2
- camel/loaders/base_io.py +2 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +128 -93
- camel/loaders/crawl4ai_reader.py +2 -2
- camel/loaders/firecrawl_reader.py +6 -6
- camel/loaders/jina_url_reader.py +2 -2
- camel/loaders/markitdown.py +2 -2
- camel/loaders/mineru_extractor.py +2 -2
- camel/loaders/mistral_reader.py +148 -0
- camel/loaders/scrapegraph_reader.py +2 -2
- camel/loaders/unstructured_io.py +2 -2
- camel/logger.py +5 -5
- camel/memories/__init__.py +2 -2
- camel/memories/agent_memories.py +86 -3
- camel/memories/base.py +36 -2
- camel/memories/blocks/__init__.py +2 -2
- camel/memories/blocks/chat_history_block.py +126 -9
- camel/memories/blocks/vectordb_block.py +10 -3
- camel/memories/context_creators/__init__.py +2 -2
- camel/memories/context_creators/score_based.py +31 -239
- camel/memories/records.py +98 -13
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +193 -46
- camel/messages/conversion/__init__.py +2 -2
- camel/messages/conversion/alpaca.py +2 -2
- camel/messages/conversion/conversation_models.py +2 -2
- camel/messages/conversion/sharegpt/__init__.py +2 -2
- camel/messages/conversion/sharegpt/function_call_formatter.py +2 -2
- camel/messages/conversion/sharegpt/hermes/__init__.py +2 -2
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +2 -2
- camel/messages/func_message.py +54 -17
- camel/models/__init__.py +18 -2
- camel/models/_utils.py +3 -3
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +11 -18
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +127 -20
- camel/models/aws_bedrock_model.py +12 -35
- camel/models/azure_openai_model.py +263 -63
- camel/models/base_audio_model.py +5 -3
- camel/models/base_model.py +195 -26
- camel/models/cerebras_model.py +83 -0
- camel/models/cohere_model.py +81 -21
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +87 -0
- camel/models/deepseek_model.py +61 -59
- camel/models/fish_audio_model.py +8 -2
- camel/models/gemini_model.py +439 -30
- camel/models/groq_model.py +11 -19
- camel/models/internlm_model.py +11 -18
- camel/models/litellm_model.py +94 -34
- camel/models/lmstudio_model.py +17 -20
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +84 -19
- camel/models/model_factory.py +49 -6
- camel/models/model_manager.py +33 -11
- camel/models/modelscope_model.py +13 -193
- camel/models/moonshot_model.py +195 -21
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +19 -9
- camel/models/netmind_model.py +11 -18
- camel/models/novita_model.py +11 -18
- camel/models/nvidia_model.py +11 -18
- camel/models/ollama_model.py +14 -21
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_compatible_model.py +234 -27
- camel/models/openai_model.py +255 -39
- camel/models/openrouter_model.py +11 -19
- camel/models/ppio_model.py +11 -18
- camel/models/qianfan_model.py +89 -0
- camel/models/qwen_model.py +13 -193
- camel/models/reka_model.py +90 -21
- camel/models/reward/__init__.py +2 -2
- camel/models/reward/base_reward_model.py +2 -2
- camel/models/reward/evaluator.py +2 -2
- camel/models/reward/nemotron_model.py +2 -2
- camel/models/reward/skywork_model.py +2 -2
- camel/models/samba_model.py +117 -49
- camel/models/sglang_model.py +162 -42
- camel/models/siliconflow_model.py +12 -35
- camel/models/stub_model.py +10 -7
- camel/models/togetherai_model.py +11 -18
- camel/models/vllm_model.py +10 -18
- camel/models/volcano_model.py +16 -20
- camel/models/watsonx_model.py +69 -19
- camel/models/yi_model.py +11 -18
- camel/models/zhipuai_model.py +70 -18
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/personas/__init__.py +2 -2
- camel/personas/persona.py +2 -2
- camel/personas/persona_hub.py +2 -2
- camel/prompts/__init__.py +2 -2
- camel/prompts/ai_society.py +2 -2
- camel/prompts/base.py +2 -2
- camel/prompts/code.py +2 -2
- camel/prompts/evaluation.py +2 -2
- camel/prompts/generate_text_embedding_data.py +2 -2
- camel/prompts/image_craft.py +2 -2
- camel/prompts/misalignment.py +2 -2
- camel/prompts/multi_condition_image_craft.py +2 -2
- camel/prompts/object_recognition.py +2 -2
- camel/prompts/persona_hub.py +3 -3
- camel/prompts/prompt_templates.py +2 -2
- camel/prompts/role_description_prompt_template.py +2 -2
- camel/prompts/solution_extraction.py +8 -8
- camel/prompts/task_prompt_template.py +2 -2
- camel/prompts/translation.py +2 -2
- camel/prompts/video_description_prompt.py +3 -3
- camel/responses/__init__.py +2 -2
- camel/responses/agent_responses.py +2 -2
- camel/retrievers/__init__.py +2 -2
- camel/retrievers/auto_retriever.py +23 -3
- camel/retrievers/base.py +2 -2
- camel/retrievers/bm25_retriever.py +3 -4
- camel/retrievers/cohere_rerank_retriever.py +2 -2
- camel/retrievers/hybrid_retrival.py +4 -4
- camel/retrievers/vector_retriever.py +2 -2
- camel/runtimes/Dockerfile.multi-toolkit +90 -0
- camel/{runtime → runtimes}/__init__.py +2 -2
- camel/runtimes/api.py +153 -0
- camel/{runtime → runtimes}/base.py +2 -2
- camel/{runtime → runtimes}/configs.py +13 -13
- camel/{runtime → runtimes}/daytona_runtime.py +18 -19
- camel/{runtime → runtimes}/docker_runtime.py +13 -13
- camel/{runtime → runtimes}/llm_guard_runtime.py +28 -28
- camel/{runtime → runtimes}/remote_http_runtime.py +12 -12
- camel/{runtime → runtimes}/ubuntu_docker_runtime.py +3 -3
- camel/{runtime → runtimes}/utils/__init__.py +2 -2
- camel/{runtime → runtimes}/utils/function_risk_toolkit.py +2 -2
- camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -2
- camel/schemas/base.py +2 -2
- camel/schemas/openai_converter.py +3 -3
- camel/schemas/outlines_converter.py +2 -2
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +4 -2
- camel/societies/babyagi_playing.py +2 -2
- camel/societies/role_playing.py +201 -80
- camel/societies/workforce/__init__.py +10 -3
- camel/societies/workforce/base.py +9 -5
- camel/societies/workforce/events.py +143 -0
- camel/societies/workforce/prompts.py +258 -33
- camel/societies/workforce/role_playing_worker.py +95 -30
- camel/societies/workforce/single_agent_worker.py +659 -30
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +182 -38
- camel/societies/workforce/utils.py +784 -18
- camel/societies/workforce/worker.py +96 -28
- camel/societies/workforce/workflow_memory_manager.py +1746 -0
- camel/societies/workforce/workforce.py +5730 -366
- camel/societies/workforce/workforce_callback.py +103 -0
- camel/societies/workforce/workforce_logger.py +647 -0
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +10 -2
- camel/storages/graph_storages/__init__.py +2 -2
- camel/storages/graph_storages/base.py +2 -2
- camel/storages/graph_storages/graph_element.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +7 -7
- camel/storages/key_value_storages/__init__.py +2 -2
- camel/storages/key_value_storages/base.py +2 -2
- camel/storages/key_value_storages/in_memory.py +2 -2
- camel/storages/key_value_storages/json.py +17 -4
- camel/storages/key_value_storages/mem0_cloud.py +50 -49
- camel/storages/key_value_storages/redis.py +2 -2
- camel/storages/object_storages/__init__.py +2 -2
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/base.py +2 -2
- camel/storages/object_storages/google_cloud.py +3 -3
- camel/storages/vectordb_storages/__init__.py +12 -2
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/faiss.py +712 -0
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/oceanbase.py +16 -17
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +6 -6
- camel/storages/vectordb_storages/surreal.py +372 -0
- camel/storages/vectordb_storages/tidb.py +11 -8
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/__init__.py +2 -2
- camel/tasks/task.py +366 -27
- camel/tasks/task_prompt.py +3 -3
- camel/terminators/__init__.py +2 -2
- camel/terminators/base.py +2 -2
- camel/terminators/response_terminator.py +2 -2
- camel/terminators/token_limit_terminator.py +2 -2
- camel/toolkits/__init__.py +58 -10
- camel/toolkits/aci_toolkit.py +66 -21
- camel/toolkits/arxiv_toolkit.py +8 -8
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/async_browser_toolkit.py +174 -575
- camel/toolkits/audio_analysis_toolkit.py +3 -3
- camel/toolkits/base.py +65 -7
- camel/toolkits/bohrium_toolkit.py +318 -0
- camel/toolkits/browser_toolkit.py +306 -566
- camel/toolkits/browser_toolkit_commons.py +568 -0
- camel/toolkits/code_execution.py +67 -11
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +12 -8
- camel/toolkits/data_commons_toolkit.py +2 -2
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/earth_science_toolkit.py +5367 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +910 -70
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +128 -20
- camel/toolkits/github_toolkit.py +148 -43
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +40 -6
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/google_maps_toolkit.py +2 -2
- camel/toolkits/google_scholar_toolkit.py +2 -2
- camel/toolkits/human_toolkit.py +36 -12
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +4589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +33 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1929 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +129 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +27 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1037 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_analysis_toolkit.py +3 -3
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +195 -79
- camel/toolkits/klavis_toolkit.py +7 -3
- camel/toolkits/linkedin_toolkit.py +2 -2
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +66 -12
- camel/toolkits/mcp_toolkit.py +841 -600
- camel/toolkits/memory_toolkit.py +7 -3
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/networkx_toolkit.py +2 -2
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/notion_toolkit.py +2 -2
- camel/toolkits/open_api_specs/biztoc/__init__.py +2 -2
- camel/toolkits/open_api_specs/biztoc/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/coursera/__init__.py +2 -2
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +2 -2
- camel/toolkits/open_api_specs/klarna/__init__.py +2 -2
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/outschool/openapi.yaml +1 -1
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +2 -2
- camel/toolkits/open_api_specs/security_config.py +2 -2
- camel/toolkits/open_api_specs/speak/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +2 -2
- camel/toolkits/open_api_toolkit.py +2 -2
- camel/toolkits/openbb_toolkit.py +7 -3
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/page_script.js +86 -74
- camel/toolkits/playwright_mcp_toolkit.py +27 -32
- camel/toolkits/pptx_toolkit.py +790 -0
- camel/toolkits/pubmed_toolkit.py +2 -2
- camel/toolkits/pulse_mcp_search_toolkit.py +2 -2
- camel/toolkits/pyautogui_toolkit.py +2 -2
- camel/toolkits/reddit_toolkit.py +2 -2
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/retrieval_toolkit.py +2 -2
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +539 -146
- camel/toolkits/searxng_toolkit.py +2 -2
- camel/toolkits/semantic_scholar_toolkit.py +2 -2
- camel/toolkits/slack_toolkit.py +108 -58
- camel/toolkits/sql_toolkit.py +712 -0
- camel/toolkits/stripe_toolkit.py +2 -2
- camel/toolkits/sympy_toolkit.py +3 -3
- camel/toolkits/task_planning_toolkit.py +134 -0
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +1070 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +3 -3
- camel/toolkits/twitter_toolkit.py +8 -3
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +112 -29
- camel/toolkits/video_download_toolkit.py +22 -16
- camel/toolkits/weather_toolkit.py +2 -2
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/whatsapp_toolkit.py +2 -2
- camel/toolkits/wolfram_alpha_toolkit.py +53 -25
- camel/toolkits/zapier_toolkit.py +7 -3
- camel/types/__init__.py +4 -4
- camel/types/agents/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +6 -3
- camel/types/enums.py +454 -35
- camel/types/mcp_registries.py +2 -2
- camel/types/openai_types.py +4 -4
- camel/types/unified_model_type.py +43 -6
- camel/utils/__init__.py +20 -2
- camel/utils/async_func.py +2 -2
- camel/utils/chunker/__init__.py +2 -2
- camel/utils/chunker/base.py +2 -2
- camel/utils/chunker/code_chunker.py +2 -2
- camel/utils/chunker/uio_chunker.py +2 -2
- camel/utils/commons.py +65 -7
- camel/utils/constants.py +5 -2
- camel/utils/context_utils.py +1134 -0
- camel/utils/deduplication.py +2 -2
- camel/utils/filename.py +2 -2
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp.py +140 -6
- camel/utils/mcp_client.py +1056 -0
- camel/utils/message_summarizer.py +148 -0
- camel/utils/response_format.py +2 -2
- camel/utils/token_counting.py +45 -22
- camel/utils/tool_result.py +44 -0
- camel/verifiers/__init__.py +2 -2
- camel/verifiers/base.py +2 -2
- camel/verifiers/math_verifier.py +2 -2
- camel/verifiers/models.py +2 -2
- camel/verifiers/physics_verifier.py +2 -2
- camel/verifiers/python_verifier.py +2 -2
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/METADATA +349 -108
- camel_ai-0.2.82.dist-info/RECORD +507 -0
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/licenses/LICENSE +1 -1
- camel/loaders/pandas_reader.py +0 -368
- camel/runtime/api.py +0 -97
- camel/toolkits/dalle_toolkit.py +0 -171
- camel/toolkits/file_write_toolkit.py +0 -395
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- camel_ai-0.2.59.dist-info/RECORD +0 -410
camel/models/base_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,15 +10,21 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import abc
|
|
15
|
+
import os
|
|
15
16
|
import re
|
|
16
17
|
from abc import ABC, abstractmethod
|
|
17
18
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
18
19
|
|
|
19
20
|
from openai import AsyncStream, Stream
|
|
21
|
+
from openai.lib.streaming.chat import (
|
|
22
|
+
AsyncChatCompletionStreamManager,
|
|
23
|
+
ChatCompletionStreamManager,
|
|
24
|
+
)
|
|
20
25
|
from pydantic import BaseModel
|
|
21
26
|
|
|
27
|
+
from camel.logger import get_logger as camel_get_logger
|
|
22
28
|
from camel.messages import OpenAIMessage
|
|
23
29
|
from camel.types import (
|
|
24
30
|
ChatCompletion,
|
|
@@ -27,7 +33,22 @@ from camel.types import (
|
|
|
27
33
|
ParsedChatCompletion,
|
|
28
34
|
UnifiedModelType,
|
|
29
35
|
)
|
|
30
|
-
from camel.utils import BaseTokenCounter
|
|
36
|
+
from camel.utils import BaseTokenCounter, Constants
|
|
37
|
+
|
|
38
|
+
if os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
39
|
+
try:
|
|
40
|
+
from traceroot import get_logger # type: ignore[import]
|
|
41
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
42
|
+
|
|
43
|
+
logger = get_logger('base_model')
|
|
44
|
+
except ImportError:
|
|
45
|
+
from camel.utils import observe
|
|
46
|
+
|
|
47
|
+
logger = camel_get_logger('base_model')
|
|
48
|
+
else:
|
|
49
|
+
from camel.utils import observe
|
|
50
|
+
|
|
51
|
+
logger = camel_get_logger('base_model')
|
|
31
52
|
|
|
32
53
|
|
|
33
54
|
class ModelBackendMeta(abc.ABCMeta):
|
|
@@ -71,6 +92,8 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
71
92
|
:obj:`OpenAITokenCounter` will be used. (default: :obj:`None`)
|
|
72
93
|
timeout (Optional[float], optional): The timeout value in seconds for
|
|
73
94
|
API calls. (default: :obj:`None`)
|
|
95
|
+
max_retries (int, optional): Maximum number of retries
|
|
96
|
+
for API calls. (default: :obj:`3`)
|
|
74
97
|
"""
|
|
75
98
|
|
|
76
99
|
def __init__(
|
|
@@ -80,7 +103,8 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
80
103
|
api_key: Optional[str] = None,
|
|
81
104
|
url: Optional[str] = None,
|
|
82
105
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
83
|
-
timeout: Optional[float] =
|
|
106
|
+
timeout: Optional[float] = Constants.TIMEOUT_THRESHOLD,
|
|
107
|
+
max_retries: int = 3,
|
|
84
108
|
) -> None:
|
|
85
109
|
self.model_type: UnifiedModelType = UnifiedModelType(model_type)
|
|
86
110
|
if model_config_dict is None:
|
|
@@ -90,7 +114,13 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
90
114
|
self._url = url
|
|
91
115
|
self._token_counter = token_counter
|
|
92
116
|
self._timeout = timeout
|
|
93
|
-
self.
|
|
117
|
+
self._max_retries = max_retries
|
|
118
|
+
# Initialize logging configuration
|
|
119
|
+
self._log_enabled = (
|
|
120
|
+
os.environ.get("CAMEL_MODEL_LOG_ENABLED", "False").lower()
|
|
121
|
+
== "true"
|
|
122
|
+
)
|
|
123
|
+
self._log_dir = os.environ.get("CAMEL_LOG_DIR", "camel_logs")
|
|
94
124
|
|
|
95
125
|
@property
|
|
96
126
|
@abstractmethod
|
|
@@ -228,13 +258,96 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
228
258
|
|
|
229
259
|
return formatted_messages
|
|
230
260
|
|
|
261
|
+
def _log_request(self, messages: List[OpenAIMessage]) -> Optional[str]:
|
|
262
|
+
r"""Log the request messages to a JSON file if logging is enabled.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
messages (List[OpenAIMessage]): The messages to log.
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Optional[str]: The path to the log file if logging is enabled,
|
|
269
|
+
None otherwise.
|
|
270
|
+
"""
|
|
271
|
+
if not self._log_enabled:
|
|
272
|
+
return None
|
|
273
|
+
|
|
274
|
+
import json
|
|
275
|
+
from datetime import datetime
|
|
276
|
+
|
|
277
|
+
os.makedirs(self._log_dir, exist_ok=True)
|
|
278
|
+
|
|
279
|
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
|
|
280
|
+
log_file_path = os.path.join(self._log_dir, f"conv_{timestamp}.json")
|
|
281
|
+
|
|
282
|
+
log_entry = {
|
|
283
|
+
"request_timestamp": datetime.now().isoformat(),
|
|
284
|
+
"model": str(self.model_type),
|
|
285
|
+
"request": {"messages": messages},
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
with open(log_file_path, "w") as f:
|
|
289
|
+
json.dump(log_entry, f, indent=4)
|
|
290
|
+
|
|
291
|
+
return log_file_path
|
|
292
|
+
|
|
293
|
+
def _log_response(self, log_path: str, response: Any) -> None:
|
|
294
|
+
r"""Log the response to the existing log file.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
log_path (str): The path to the log file.
|
|
298
|
+
response (Any): The response to log.
|
|
299
|
+
"""
|
|
300
|
+
if not self._log_enabled or not log_path:
|
|
301
|
+
return
|
|
302
|
+
|
|
303
|
+
import json
|
|
304
|
+
from datetime import datetime
|
|
305
|
+
|
|
306
|
+
with open(log_path, "r+") as f:
|
|
307
|
+
log_data = json.load(f)
|
|
308
|
+
|
|
309
|
+
log_data["response_timestamp"] = datetime.now().isoformat()
|
|
310
|
+
if isinstance(response, BaseModel):
|
|
311
|
+
log_data["response"] = response.model_dump()
|
|
312
|
+
else:
|
|
313
|
+
try:
|
|
314
|
+
json.dumps(response)
|
|
315
|
+
log_data["response"] = response
|
|
316
|
+
except TypeError:
|
|
317
|
+
log_data["response"] = str(response)
|
|
318
|
+
|
|
319
|
+
f.seek(0)
|
|
320
|
+
json.dump(log_data, f, indent=4)
|
|
321
|
+
f.truncate()
|
|
322
|
+
|
|
231
323
|
@abstractmethod
|
|
232
324
|
def _run(
|
|
233
325
|
self,
|
|
234
326
|
messages: List[OpenAIMessage],
|
|
235
327
|
response_format: Optional[Type[BaseModel]] = None,
|
|
236
328
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
237
|
-
) -> Union[
|
|
329
|
+
) -> Union[
|
|
330
|
+
ChatCompletion,
|
|
331
|
+
Stream[ChatCompletionChunk],
|
|
332
|
+
ChatCompletionStreamManager[BaseModel],
|
|
333
|
+
]:
|
|
334
|
+
r"""Runs the query to the backend model in a non-stream mode.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
338
|
+
in OpenAI API format.
|
|
339
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
340
|
+
response.
|
|
341
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
342
|
+
use for the request.
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
|
|
346
|
+
`ChatCompletion` in the non-stream mode, or
|
|
347
|
+
`Stream[ChatCompletionChunk]` in the stream mode,
|
|
348
|
+
or `ChatCompletionStreamManager[BaseModel]` in the structured
|
|
349
|
+
stream mode.
|
|
350
|
+
"""
|
|
238
351
|
pass
|
|
239
352
|
|
|
240
353
|
@abstractmethod
|
|
@@ -243,15 +356,41 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
243
356
|
messages: List[OpenAIMessage],
|
|
244
357
|
response_format: Optional[Type[BaseModel]] = None,
|
|
245
358
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
246
|
-
) -> Union[
|
|
359
|
+
) -> Union[
|
|
360
|
+
ChatCompletion,
|
|
361
|
+
AsyncStream[ChatCompletionChunk],
|
|
362
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
363
|
+
]:
|
|
364
|
+
r"""Runs the query to the backend model in async non-stream mode.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
368
|
+
in OpenAI API format.
|
|
369
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
370
|
+
response.
|
|
371
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
372
|
+
use for the request.
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
|
|
376
|
+
`ChatCompletion` in the non-stream mode, or
|
|
377
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode,
|
|
378
|
+
or `AsyncChatCompletionStreamManager[BaseModel]` in the
|
|
379
|
+
structured stream mode.
|
|
380
|
+
"""
|
|
247
381
|
pass
|
|
248
382
|
|
|
383
|
+
@observe()
|
|
249
384
|
def run(
|
|
250
385
|
self,
|
|
251
386
|
messages: List[OpenAIMessage],
|
|
252
387
|
response_format: Optional[Type[BaseModel]] = None,
|
|
253
388
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
254
|
-
) -> Union[
|
|
389
|
+
) -> Union[
|
|
390
|
+
ChatCompletion,
|
|
391
|
+
Stream[ChatCompletionChunk],
|
|
392
|
+
ChatCompletionStreamManager[BaseModel],
|
|
393
|
+
]:
|
|
255
394
|
r"""Runs the query to the backend model.
|
|
256
395
|
|
|
257
396
|
Args:
|
|
@@ -265,24 +404,47 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
265
404
|
(default: :obj:`None`)
|
|
266
405
|
|
|
267
406
|
Returns:
|
|
268
|
-
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
269
|
-
`ChatCompletion` in the non-stream mode,
|
|
270
|
-
`Stream[ChatCompletionChunk]` in the stream mode
|
|
407
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
|
|
408
|
+
`ChatCompletion` in the non-stream mode,
|
|
409
|
+
`Stream[ChatCompletionChunk]` in the stream mode, or
|
|
410
|
+
`ChatCompletionStreamManager[BaseModel]` in the structured
|
|
411
|
+
stream mode.
|
|
271
412
|
"""
|
|
413
|
+
# Log the request if logging is enabled
|
|
414
|
+
log_path = self._log_request(messages)
|
|
415
|
+
|
|
272
416
|
# None -> use default tools
|
|
273
417
|
if tools is None:
|
|
274
418
|
tools = self.model_config_dict.get("tools", None)
|
|
275
419
|
# Empty -> use no tools
|
|
276
420
|
elif not tools:
|
|
277
421
|
tools = None
|
|
278
|
-
return self._run(messages, response_format, tools)
|
|
279
422
|
|
|
423
|
+
logger.info("Running model: %s", self.model_type)
|
|
424
|
+
logger.info("Messages: %s", messages)
|
|
425
|
+
logger.info("Response format: %s", response_format)
|
|
426
|
+
logger.info("Tools: %s", tools)
|
|
427
|
+
|
|
428
|
+
result = self._run(messages, response_format, tools)
|
|
429
|
+
logger.info("Result: %s", result)
|
|
430
|
+
|
|
431
|
+
# Log the response if logging is enabled
|
|
432
|
+
if log_path:
|
|
433
|
+
self._log_response(log_path, result)
|
|
434
|
+
|
|
435
|
+
return result
|
|
436
|
+
|
|
437
|
+
@observe()
|
|
280
438
|
async def arun(
|
|
281
439
|
self,
|
|
282
440
|
messages: List[OpenAIMessage],
|
|
283
441
|
response_format: Optional[Type[BaseModel]] = None,
|
|
284
442
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
285
|
-
) -> Union[
|
|
443
|
+
) -> Union[
|
|
444
|
+
ChatCompletion,
|
|
445
|
+
AsyncStream[ChatCompletionChunk],
|
|
446
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
447
|
+
]:
|
|
286
448
|
r"""Runs the query to the backend model asynchronously.
|
|
287
449
|
|
|
288
450
|
Args:
|
|
@@ -296,26 +458,33 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
296
458
|
(default: :obj:`None`)
|
|
297
459
|
|
|
298
460
|
Returns:
|
|
299
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
300
|
-
`ChatCompletion` in the non-stream mode,
|
|
301
|
-
`AsyncStream[ChatCompletionChunk]` in the stream mode
|
|
461
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
|
|
462
|
+
`ChatCompletion` in the non-stream mode,
|
|
463
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode, or
|
|
464
|
+
`AsyncChatCompletionStreamManager[BaseModel]` in the structured
|
|
465
|
+
stream mode.
|
|
302
466
|
"""
|
|
467
|
+
# Log the request if logging is enabled
|
|
468
|
+
log_path = self._log_request(messages)
|
|
469
|
+
|
|
303
470
|
if tools is None:
|
|
304
471
|
tools = self.model_config_dict.get("tools", None)
|
|
305
472
|
elif not tools:
|
|
306
473
|
tools = None
|
|
307
|
-
return await self._arun(messages, response_format, tools)
|
|
308
474
|
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
475
|
+
logger.info("Running model: %s", self.model_type)
|
|
476
|
+
logger.info("Messages: %s", messages)
|
|
477
|
+
logger.info("Response format: %s", response_format)
|
|
478
|
+
logger.info("Tools: %s", tools)
|
|
313
479
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
480
|
+
result = await self._arun(messages, response_format, tools)
|
|
481
|
+
logger.info("Result: %s", result)
|
|
482
|
+
|
|
483
|
+
# Log the response if logging is enabled
|
|
484
|
+
if log_path:
|
|
485
|
+
self._log_response(log_path, result)
|
|
486
|
+
|
|
487
|
+
return result
|
|
319
488
|
|
|
320
489
|
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
321
490
|
r"""Count the number of tokens in the messages using the specific
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import CerebrasConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CerebrasModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by Cerebras in a unified
|
|
28
|
+
OpenAICompatibleModel interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`CerebrasConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the Cerebras service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the Cerebras service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "CEREBRAS_API_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = CerebrasConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("CEREBRAS_API_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"CEREBRAS_API_BASE_URL", "https://api.cerebras.ai/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/cohere_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import ast
|
|
15
15
|
import json
|
|
16
16
|
import logging
|
|
@@ -21,9 +21,12 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
|
-
from cohere.types import
|
|
24
|
+
from cohere.types import ( # type: ignore[attr-defined]
|
|
25
|
+
ChatMessageV2,
|
|
26
|
+
ChatResponse,
|
|
27
|
+
)
|
|
25
28
|
|
|
26
|
-
from camel.configs import
|
|
29
|
+
from camel.configs import CohereConfig
|
|
27
30
|
from camel.messages import OpenAIMessage
|
|
28
31
|
from camel.models import BaseModelBackend
|
|
29
32
|
from camel.models._utils import try_modify_message_with_format
|
|
@@ -32,8 +35,19 @@ from camel.utils import (
|
|
|
32
35
|
BaseTokenCounter,
|
|
33
36
|
OpenAITokenCounter,
|
|
34
37
|
api_keys_required,
|
|
38
|
+
get_current_agent_session_id,
|
|
39
|
+
update_current_observation,
|
|
40
|
+
update_langfuse_trace,
|
|
35
41
|
)
|
|
36
42
|
|
|
43
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
44
|
+
try:
|
|
45
|
+
from langfuse.decorators import observe
|
|
46
|
+
except ImportError:
|
|
47
|
+
from camel.utils import observe
|
|
48
|
+
else:
|
|
49
|
+
from camel.utils import observe
|
|
50
|
+
|
|
37
51
|
try:
|
|
38
52
|
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
39
53
|
from agentops import LLMEvent, record
|
|
@@ -65,6 +79,8 @@ class CohereModel(BaseModelBackend):
|
|
|
65
79
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
66
80
|
environment variable or default to 180 seconds.
|
|
67
81
|
(default: :obj:`None`)
|
|
82
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
83
|
+
initialization.
|
|
68
84
|
"""
|
|
69
85
|
|
|
70
86
|
@api_keys_required(
|
|
@@ -80,6 +96,7 @@ class CohereModel(BaseModelBackend):
|
|
|
80
96
|
url: Optional[str] = None,
|
|
81
97
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
82
98
|
timeout: Optional[float] = None,
|
|
99
|
+
**kwargs: Any,
|
|
83
100
|
):
|
|
84
101
|
import cohere
|
|
85
102
|
|
|
@@ -94,10 +111,14 @@ class CohereModel(BaseModelBackend):
|
|
|
94
111
|
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
95
112
|
)
|
|
96
113
|
self._client = cohere.ClientV2(
|
|
97
|
-
timeout=self._timeout,
|
|
114
|
+
timeout=self._timeout,
|
|
115
|
+
api_key=self._api_key,
|
|
116
|
+
**kwargs,
|
|
98
117
|
)
|
|
99
118
|
self._async_client = cohere.AsyncClientV2(
|
|
100
|
-
timeout=self._timeout,
|
|
119
|
+
timeout=self._timeout,
|
|
120
|
+
api_key=self._api_key,
|
|
121
|
+
**kwargs,
|
|
101
122
|
)
|
|
102
123
|
|
|
103
124
|
def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
|
|
@@ -271,6 +292,7 @@ class CohereModel(BaseModelBackend):
|
|
|
271
292
|
|
|
272
293
|
return request_config
|
|
273
294
|
|
|
295
|
+
@observe(as_type="generation")
|
|
274
296
|
def _run(
|
|
275
297
|
self,
|
|
276
298
|
messages: List[OpenAIMessage],
|
|
@@ -285,6 +307,28 @@ class CohereModel(BaseModelBackend):
|
|
|
285
307
|
Returns:
|
|
286
308
|
ChatCompletion.
|
|
287
309
|
"""
|
|
310
|
+
update_current_observation(
|
|
311
|
+
input={
|
|
312
|
+
"messages": messages,
|
|
313
|
+
"tools": tools,
|
|
314
|
+
},
|
|
315
|
+
model=str(self.model_type),
|
|
316
|
+
model_parameters=self.model_config_dict,
|
|
317
|
+
)
|
|
318
|
+
# Update Langfuse trace with current agent session and metadata
|
|
319
|
+
agent_session_id = get_current_agent_session_id()
|
|
320
|
+
if agent_session_id:
|
|
321
|
+
update_langfuse_trace(
|
|
322
|
+
session_id=agent_session_id,
|
|
323
|
+
metadata={
|
|
324
|
+
"source": "camel",
|
|
325
|
+
"agent_id": agent_session_id,
|
|
326
|
+
"agent_type": "camel_chat_agent",
|
|
327
|
+
"model_type": str(self.model_type),
|
|
328
|
+
},
|
|
329
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
330
|
+
)
|
|
331
|
+
|
|
288
332
|
from cohere.core.api_error import ApiError
|
|
289
333
|
|
|
290
334
|
request_config = self._prepare_request(
|
|
@@ -309,6 +353,10 @@ class CohereModel(BaseModelBackend):
|
|
|
309
353
|
|
|
310
354
|
openai_response = self._to_openai_response(response)
|
|
311
355
|
|
|
356
|
+
update_current_observation(
|
|
357
|
+
usage=openai_response.usage,
|
|
358
|
+
)
|
|
359
|
+
|
|
312
360
|
# Add AgentOps LLM Event tracking
|
|
313
361
|
if LLMEvent:
|
|
314
362
|
llm_event = LLMEvent(
|
|
@@ -325,6 +373,7 @@ class CohereModel(BaseModelBackend):
|
|
|
325
373
|
|
|
326
374
|
return openai_response
|
|
327
375
|
|
|
376
|
+
@observe(as_type="generation")
|
|
328
377
|
async def _arun(
|
|
329
378
|
self,
|
|
330
379
|
messages: List[OpenAIMessage],
|
|
@@ -339,6 +388,28 @@ class CohereModel(BaseModelBackend):
|
|
|
339
388
|
Returns:
|
|
340
389
|
ChatCompletion.
|
|
341
390
|
"""
|
|
391
|
+
update_current_observation(
|
|
392
|
+
input={
|
|
393
|
+
"messages": messages,
|
|
394
|
+
"tools": tools,
|
|
395
|
+
},
|
|
396
|
+
model=str(self.model_type),
|
|
397
|
+
model_parameters=self.model_config_dict,
|
|
398
|
+
)
|
|
399
|
+
# Update Langfuse trace with current agent session and metadata
|
|
400
|
+
agent_session_id = get_current_agent_session_id()
|
|
401
|
+
if agent_session_id:
|
|
402
|
+
update_langfuse_trace(
|
|
403
|
+
session_id=agent_session_id,
|
|
404
|
+
metadata={
|
|
405
|
+
"source": "camel",
|
|
406
|
+
"agent_id": agent_session_id,
|
|
407
|
+
"agent_type": "camel_chat_agent",
|
|
408
|
+
"model_type": str(self.model_type),
|
|
409
|
+
},
|
|
410
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
411
|
+
)
|
|
412
|
+
|
|
342
413
|
from cohere.core.api_error import ApiError
|
|
343
414
|
|
|
344
415
|
request_config = self._prepare_request(
|
|
@@ -363,6 +434,10 @@ class CohereModel(BaseModelBackend):
|
|
|
363
434
|
|
|
364
435
|
openai_response = self._to_openai_response(response)
|
|
365
436
|
|
|
437
|
+
update_current_observation(
|
|
438
|
+
usage=openai_response.usage,
|
|
439
|
+
)
|
|
440
|
+
|
|
366
441
|
# Add AgentOps LLM Event tracking
|
|
367
442
|
if LLMEvent:
|
|
368
443
|
llm_event = LLMEvent(
|
|
@@ -379,21 +454,6 @@ class CohereModel(BaseModelBackend):
|
|
|
379
454
|
|
|
380
455
|
return openai_response
|
|
381
456
|
|
|
382
|
-
def check_model_config(self):
|
|
383
|
-
r"""Check whether the model configuration contains any unexpected
|
|
384
|
-
arguments to Cohere API.
|
|
385
|
-
|
|
386
|
-
Raises:
|
|
387
|
-
ValueError: If the model configuration dictionary contains any
|
|
388
|
-
unexpected arguments to Cohere API.
|
|
389
|
-
"""
|
|
390
|
-
for param in self.model_config_dict:
|
|
391
|
-
if param not in COHERE_API_PARAMS:
|
|
392
|
-
raise ValueError(
|
|
393
|
-
f"Unexpected argument `{param}` is "
|
|
394
|
-
"input into Cohere model backend."
|
|
395
|
-
)
|
|
396
|
-
|
|
397
457
|
@property
|
|
398
458
|
def stream(self) -> bool:
|
|
399
459
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import CometAPIConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CometAPIModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by CometAPI in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`CometAPIConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the CometAPI service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the CometAPI service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "COMETAPI_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = CometAPIConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("COMETAPI_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"COMETAPI_API_BASE_URL", "https://api.cometapi.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|