camel-ai 0.2.59__py3-none-any.whl → 0.2.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +3 -3
- camel/agents/__init__.py +2 -2
- camel/agents/_types.py +9 -4
- camel/agents/_utils.py +40 -2
- camel/agents/base.py +2 -2
- camel/agents/chat_agent.py +5012 -902
- camel/agents/critic_agent.py +2 -2
- camel/agents/deductive_reasoner_agent.py +56 -56
- camel/agents/embodied_agent.py +2 -2
- camel/agents/knowledge_graph_agent.py +20 -20
- camel/agents/mcp_agent.py +39 -36
- camel/agents/multi_hop_generator_agent.py +3 -3
- camel/agents/programmed_agent_instruction.py +2 -2
- camel/agents/repo_agent.py +4 -3
- camel/agents/role_assignment_agent.py +2 -2
- camel/agents/search_agent.py +2 -2
- camel/agents/task_agent.py +2 -2
- camel/agents/tool_agents/__init__.py +2 -2
- camel/agents/tool_agents/base.py +2 -2
- camel/agents/tool_agents/hugging_face_tool_agent.py +3 -3
- camel/benchmarks/__init__.py +2 -2
- camel/benchmarks/apibank.py +5 -5
- camel/benchmarks/apibench.py +2 -2
- camel/benchmarks/base.py +2 -2
- camel/benchmarks/browsecomp.py +44 -33
- camel/benchmarks/gaia.py +17 -13
- camel/benchmarks/mock_website/README.md +94 -0
- camel/benchmarks/mock_website/mock_web.py +299 -0
- camel/benchmarks/mock_website/requirements.txt +3 -0
- camel/benchmarks/mock_website/shopping_mall/app.py +465 -0
- camel/benchmarks/mock_website/task.json +104 -0
- camel/benchmarks/nexus.py +3 -3
- camel/benchmarks/ragbench.py +2 -2
- camel/bots/__init__.py +2 -2
- camel/bots/discord/__init__.py +2 -2
- camel/bots/discord/discord_app.py +2 -2
- camel/bots/discord/discord_installation.py +2 -2
- camel/bots/discord/discord_store.py +3 -3
- camel/bots/slack/__init__.py +2 -2
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +2 -2
- camel/bots/telegram_bot.py +2 -2
- camel/configs/__init__.py +26 -2
- camel/configs/aihubmix_config.py +90 -0
- camel/configs/aiml_config.py +2 -2
- camel/configs/amd_config.py +70 -0
- camel/configs/anthropic_config.py +8 -7
- camel/configs/base_config.py +2 -2
- camel/configs/bedrock_config.py +5 -3
- camel/configs/cerebras_config.py +98 -0
- camel/configs/cohere_config.py +3 -3
- camel/configs/cometapi_config.py +106 -0
- camel/configs/crynux_config.py +94 -0
- camel/configs/deepseek_config.py +9 -8
- camel/configs/gemini_config.py +6 -4
- camel/configs/groq_config.py +6 -4
- camel/configs/internlm_config.py +6 -4
- camel/configs/litellm_config.py +2 -2
- camel/configs/lmstudio_config.py +6 -4
- camel/configs/minimax_config.py +95 -0
- camel/configs/mistral_config.py +3 -3
- camel/configs/modelscope_config.py +5 -3
- camel/configs/moonshot_config.py +2 -2
- camel/configs/nebius_config.py +105 -0
- camel/configs/netmind_config.py +2 -2
- camel/configs/novita_config.py +2 -2
- camel/configs/nvidia_config.py +2 -2
- camel/configs/ollama_config.py +2 -2
- camel/configs/openai_config.py +8 -3
- camel/configs/openrouter_config.py +6 -4
- camel/configs/ppio_config.py +2 -2
- camel/configs/qianfan_config.py +85 -0
- camel/configs/qwen_config.py +2 -2
- camel/configs/reka_config.py +3 -3
- camel/configs/samba_config.py +8 -6
- camel/configs/sglang_config.py +2 -2
- camel/configs/siliconflow_config.py +2 -2
- camel/configs/togetherai_config.py +2 -2
- camel/configs/vllm_config.py +4 -2
- camel/configs/watsonx_config.py +2 -2
- camel/configs/yi_config.py +6 -4
- camel/configs/zhipuai_config.py +6 -4
- camel/{data_collector → data_collectors}/__init__.py +2 -2
- camel/{data_collector → data_collectors}/alpaca_collector.py +19 -10
- camel/{data_collector → data_collectors}/base.py +2 -2
- camel/{data_collector → data_collectors}/sharegpt_collector.py +3 -3
- camel/datagen/__init__.py +2 -2
- camel/datagen/cot_datagen.py +32 -37
- camel/datagen/evol_instruct/__init__.py +2 -2
- camel/datagen/evol_instruct/evol_instruct.py +2 -2
- camel/datagen/evol_instruct/scorer.py +24 -25
- camel/datagen/evol_instruct/templates.py +48 -48
- camel/datagen/self_improving_cot.py +5 -5
- camel/datagen/self_instruct/__init__.py +2 -2
- camel/datagen/self_instruct/filter/__init__.py +2 -2
- camel/datagen/self_instruct/filter/filter_function.py +2 -2
- camel/datagen/self_instruct/filter/filter_registry.py +2 -2
- camel/datagen/self_instruct/filter/instruction_filter.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +2 -2
- camel/datagen/self_instruct/templates.py +47 -47
- camel/datagen/source2synth/__init__.py +2 -2
- camel/datagen/source2synth/data_processor.py +2 -2
- camel/datagen/source2synth/models.py +2 -2
- camel/datagen/source2synth/user_data_processor_config.py +2 -2
- camel/datahubs/__init__.py +2 -2
- camel/datahubs/base.py +2 -2
- camel/datahubs/huggingface.py +2 -2
- camel/datahubs/models.py +2 -2
- camel/datasets/__init__.py +2 -2
- camel/datasets/base_generator.py +41 -12
- camel/datasets/few_shot_generator.py +18 -18
- camel/datasets/models.py +3 -3
- camel/datasets/self_instruct_generator.py +2 -2
- camel/datasets/static_dataset.py +152 -2
- camel/embeddings/__init__.py +2 -2
- camel/embeddings/azure_embedding.py +2 -2
- camel/embeddings/base.py +2 -2
- camel/embeddings/gemini_embedding.py +2 -2
- camel/embeddings/jina_embedding.py +10 -3
- camel/embeddings/mistral_embedding.py +2 -2
- camel/embeddings/openai_compatible_embedding.py +2 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +4 -4
- camel/embeddings/together_embedding.py +2 -2
- camel/embeddings/vlm_embedding.py +11 -4
- camel/environments/__init__.py +14 -2
- camel/environments/models.py +2 -2
- camel/environments/multi_step.py +2 -2
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +30 -5
- camel/environments/tic_tac_toe.py +3 -3
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +2 -2
- camel/extractors/python_strategies.py +2 -2
- camel/generators.py +2 -2
- camel/human.py +2 -2
- camel/interpreters/__init__.py +4 -2
- camel/interpreters/base.py +16 -3
- camel/interpreters/docker/Dockerfile +53 -7
- camel/interpreters/docker_interpreter.py +70 -11
- camel/interpreters/e2b_interpreter.py +59 -11
- camel/interpreters/internal_python_interpreter.py +81 -4
- camel/interpreters/interpreter_error.py +2 -2
- camel/interpreters/ipython_interpreter.py +23 -5
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/interpreters/subprocess_interpreter.py +36 -4
- camel/loaders/__init__.py +17 -5
- camel/loaders/apify_reader.py +2 -2
- camel/loaders/base_io.py +2 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +128 -93
- camel/loaders/crawl4ai_reader.py +2 -2
- camel/loaders/firecrawl_reader.py +6 -6
- camel/loaders/jina_url_reader.py +2 -2
- camel/loaders/markitdown.py +2 -2
- camel/loaders/mineru_extractor.py +2 -2
- camel/loaders/mistral_reader.py +148 -0
- camel/loaders/scrapegraph_reader.py +2 -2
- camel/loaders/unstructured_io.py +2 -2
- camel/logger.py +5 -5
- camel/memories/__init__.py +2 -2
- camel/memories/agent_memories.py +86 -3
- camel/memories/base.py +36 -2
- camel/memories/blocks/__init__.py +2 -2
- camel/memories/blocks/chat_history_block.py +126 -9
- camel/memories/blocks/vectordb_block.py +10 -3
- camel/memories/context_creators/__init__.py +2 -2
- camel/memories/context_creators/score_based.py +31 -239
- camel/memories/records.py +98 -13
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +193 -46
- camel/messages/conversion/__init__.py +2 -2
- camel/messages/conversion/alpaca.py +2 -2
- camel/messages/conversion/conversation_models.py +2 -2
- camel/messages/conversion/sharegpt/__init__.py +2 -2
- camel/messages/conversion/sharegpt/function_call_formatter.py +2 -2
- camel/messages/conversion/sharegpt/hermes/__init__.py +2 -2
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +2 -2
- camel/messages/func_message.py +54 -17
- camel/models/__init__.py +18 -2
- camel/models/_utils.py +3 -3
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +11 -18
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +127 -20
- camel/models/aws_bedrock_model.py +12 -35
- camel/models/azure_openai_model.py +263 -63
- camel/models/base_audio_model.py +5 -3
- camel/models/base_model.py +195 -26
- camel/models/cerebras_model.py +83 -0
- camel/models/cohere_model.py +81 -21
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +87 -0
- camel/models/deepseek_model.py +61 -59
- camel/models/fish_audio_model.py +8 -2
- camel/models/gemini_model.py +439 -30
- camel/models/groq_model.py +11 -19
- camel/models/internlm_model.py +11 -18
- camel/models/litellm_model.py +94 -34
- camel/models/lmstudio_model.py +17 -20
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +84 -19
- camel/models/model_factory.py +49 -6
- camel/models/model_manager.py +33 -11
- camel/models/modelscope_model.py +13 -193
- camel/models/moonshot_model.py +195 -21
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +19 -9
- camel/models/netmind_model.py +11 -18
- camel/models/novita_model.py +11 -18
- camel/models/nvidia_model.py +11 -18
- camel/models/ollama_model.py +14 -21
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_compatible_model.py +234 -27
- camel/models/openai_model.py +255 -39
- camel/models/openrouter_model.py +11 -19
- camel/models/ppio_model.py +11 -18
- camel/models/qianfan_model.py +89 -0
- camel/models/qwen_model.py +13 -193
- camel/models/reka_model.py +90 -21
- camel/models/reward/__init__.py +2 -2
- camel/models/reward/base_reward_model.py +2 -2
- camel/models/reward/evaluator.py +2 -2
- camel/models/reward/nemotron_model.py +2 -2
- camel/models/reward/skywork_model.py +2 -2
- camel/models/samba_model.py +117 -49
- camel/models/sglang_model.py +162 -42
- camel/models/siliconflow_model.py +12 -35
- camel/models/stub_model.py +10 -7
- camel/models/togetherai_model.py +11 -18
- camel/models/vllm_model.py +10 -18
- camel/models/volcano_model.py +16 -20
- camel/models/watsonx_model.py +69 -19
- camel/models/yi_model.py +11 -18
- camel/models/zhipuai_model.py +70 -18
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/personas/__init__.py +2 -2
- camel/personas/persona.py +2 -2
- camel/personas/persona_hub.py +2 -2
- camel/prompts/__init__.py +2 -2
- camel/prompts/ai_society.py +2 -2
- camel/prompts/base.py +2 -2
- camel/prompts/code.py +2 -2
- camel/prompts/evaluation.py +2 -2
- camel/prompts/generate_text_embedding_data.py +2 -2
- camel/prompts/image_craft.py +2 -2
- camel/prompts/misalignment.py +2 -2
- camel/prompts/multi_condition_image_craft.py +2 -2
- camel/prompts/object_recognition.py +2 -2
- camel/prompts/persona_hub.py +3 -3
- camel/prompts/prompt_templates.py +2 -2
- camel/prompts/role_description_prompt_template.py +2 -2
- camel/prompts/solution_extraction.py +8 -8
- camel/prompts/task_prompt_template.py +2 -2
- camel/prompts/translation.py +2 -2
- camel/prompts/video_description_prompt.py +3 -3
- camel/responses/__init__.py +2 -2
- camel/responses/agent_responses.py +2 -2
- camel/retrievers/__init__.py +2 -2
- camel/retrievers/auto_retriever.py +23 -3
- camel/retrievers/base.py +2 -2
- camel/retrievers/bm25_retriever.py +3 -4
- camel/retrievers/cohere_rerank_retriever.py +2 -2
- camel/retrievers/hybrid_retrival.py +4 -4
- camel/retrievers/vector_retriever.py +2 -2
- camel/runtimes/Dockerfile.multi-toolkit +90 -0
- camel/{runtime → runtimes}/__init__.py +2 -2
- camel/runtimes/api.py +153 -0
- camel/{runtime → runtimes}/base.py +2 -2
- camel/{runtime → runtimes}/configs.py +13 -13
- camel/{runtime → runtimes}/daytona_runtime.py +18 -19
- camel/{runtime → runtimes}/docker_runtime.py +13 -13
- camel/{runtime → runtimes}/llm_guard_runtime.py +28 -28
- camel/{runtime → runtimes}/remote_http_runtime.py +12 -12
- camel/{runtime → runtimes}/ubuntu_docker_runtime.py +3 -3
- camel/{runtime → runtimes}/utils/__init__.py +2 -2
- camel/{runtime → runtimes}/utils/function_risk_toolkit.py +2 -2
- camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -2
- camel/schemas/base.py +2 -2
- camel/schemas/openai_converter.py +3 -3
- camel/schemas/outlines_converter.py +2 -2
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +4 -2
- camel/societies/babyagi_playing.py +2 -2
- camel/societies/role_playing.py +201 -80
- camel/societies/workforce/__init__.py +10 -3
- camel/societies/workforce/base.py +9 -5
- camel/societies/workforce/events.py +143 -0
- camel/societies/workforce/prompts.py +258 -33
- camel/societies/workforce/role_playing_worker.py +95 -30
- camel/societies/workforce/single_agent_worker.py +659 -30
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +182 -38
- camel/societies/workforce/utils.py +784 -18
- camel/societies/workforce/worker.py +96 -28
- camel/societies/workforce/workflow_memory_manager.py +1746 -0
- camel/societies/workforce/workforce.py +5730 -366
- camel/societies/workforce/workforce_callback.py +103 -0
- camel/societies/workforce/workforce_logger.py +647 -0
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +10 -2
- camel/storages/graph_storages/__init__.py +2 -2
- camel/storages/graph_storages/base.py +2 -2
- camel/storages/graph_storages/graph_element.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +7 -7
- camel/storages/key_value_storages/__init__.py +2 -2
- camel/storages/key_value_storages/base.py +2 -2
- camel/storages/key_value_storages/in_memory.py +2 -2
- camel/storages/key_value_storages/json.py +17 -4
- camel/storages/key_value_storages/mem0_cloud.py +50 -49
- camel/storages/key_value_storages/redis.py +2 -2
- camel/storages/object_storages/__init__.py +2 -2
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/base.py +2 -2
- camel/storages/object_storages/google_cloud.py +3 -3
- camel/storages/vectordb_storages/__init__.py +12 -2
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/faiss.py +712 -0
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/oceanbase.py +16 -17
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +6 -6
- camel/storages/vectordb_storages/surreal.py +372 -0
- camel/storages/vectordb_storages/tidb.py +11 -8
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/__init__.py +2 -2
- camel/tasks/task.py +366 -27
- camel/tasks/task_prompt.py +3 -3
- camel/terminators/__init__.py +2 -2
- camel/terminators/base.py +2 -2
- camel/terminators/response_terminator.py +2 -2
- camel/terminators/token_limit_terminator.py +2 -2
- camel/toolkits/__init__.py +58 -10
- camel/toolkits/aci_toolkit.py +66 -21
- camel/toolkits/arxiv_toolkit.py +8 -8
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/async_browser_toolkit.py +174 -575
- camel/toolkits/audio_analysis_toolkit.py +3 -3
- camel/toolkits/base.py +65 -7
- camel/toolkits/bohrium_toolkit.py +318 -0
- camel/toolkits/browser_toolkit.py +306 -566
- camel/toolkits/browser_toolkit_commons.py +568 -0
- camel/toolkits/code_execution.py +67 -11
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +12 -8
- camel/toolkits/data_commons_toolkit.py +2 -2
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/earth_science_toolkit.py +5367 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +910 -70
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +128 -20
- camel/toolkits/github_toolkit.py +148 -43
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +40 -6
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/google_maps_toolkit.py +2 -2
- camel/toolkits/google_scholar_toolkit.py +2 -2
- camel/toolkits/human_toolkit.py +36 -12
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +4589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +33 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1929 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +589 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +129 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +27 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1037 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_analysis_toolkit.py +3 -3
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +195 -79
- camel/toolkits/klavis_toolkit.py +7 -3
- camel/toolkits/linkedin_toolkit.py +2 -2
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +66 -12
- camel/toolkits/mcp_toolkit.py +841 -600
- camel/toolkits/memory_toolkit.py +7 -3
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/networkx_toolkit.py +2 -2
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/notion_toolkit.py +2 -2
- camel/toolkits/open_api_specs/biztoc/__init__.py +2 -2
- camel/toolkits/open_api_specs/biztoc/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/coursera/__init__.py +2 -2
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +2 -2
- camel/toolkits/open_api_specs/klarna/__init__.py +2 -2
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/outschool/openapi.yaml +1 -1
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +2 -2
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +2 -2
- camel/toolkits/open_api_specs/security_config.py +2 -2
- camel/toolkits/open_api_specs/speak/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +1 -1
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +2 -2
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +2 -2
- camel/toolkits/open_api_toolkit.py +2 -2
- camel/toolkits/openbb_toolkit.py +7 -3
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/page_script.js +86 -74
- camel/toolkits/playwright_mcp_toolkit.py +27 -32
- camel/toolkits/pptx_toolkit.py +790 -0
- camel/toolkits/pubmed_toolkit.py +2 -2
- camel/toolkits/pulse_mcp_search_toolkit.py +2 -2
- camel/toolkits/pyautogui_toolkit.py +2 -2
- camel/toolkits/reddit_toolkit.py +2 -2
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/retrieval_toolkit.py +2 -2
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +539 -146
- camel/toolkits/searxng_toolkit.py +2 -2
- camel/toolkits/semantic_scholar_toolkit.py +2 -2
- camel/toolkits/slack_toolkit.py +108 -58
- camel/toolkits/sql_toolkit.py +712 -0
- camel/toolkits/stripe_toolkit.py +2 -2
- camel/toolkits/sympy_toolkit.py +3 -3
- camel/toolkits/task_planning_toolkit.py +134 -0
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +1070 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +3 -3
- camel/toolkits/twitter_toolkit.py +8 -3
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +112 -29
- camel/toolkits/video_download_toolkit.py +22 -16
- camel/toolkits/weather_toolkit.py +2 -2
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/whatsapp_toolkit.py +2 -2
- camel/toolkits/wolfram_alpha_toolkit.py +53 -25
- camel/toolkits/zapier_toolkit.py +7 -3
- camel/types/__init__.py +4 -4
- camel/types/agents/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +6 -3
- camel/types/enums.py +454 -35
- camel/types/mcp_registries.py +2 -2
- camel/types/openai_types.py +4 -4
- camel/types/unified_model_type.py +43 -6
- camel/utils/__init__.py +20 -2
- camel/utils/async_func.py +2 -2
- camel/utils/chunker/__init__.py +2 -2
- camel/utils/chunker/base.py +2 -2
- camel/utils/chunker/code_chunker.py +2 -2
- camel/utils/chunker/uio_chunker.py +2 -2
- camel/utils/commons.py +65 -7
- camel/utils/constants.py +5 -2
- camel/utils/context_utils.py +1134 -0
- camel/utils/deduplication.py +2 -2
- camel/utils/filename.py +2 -2
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp.py +140 -6
- camel/utils/mcp_client.py +1056 -0
- camel/utils/message_summarizer.py +148 -0
- camel/utils/response_format.py +2 -2
- camel/utils/token_counting.py +45 -22
- camel/utils/tool_result.py +44 -0
- camel/verifiers/__init__.py +2 -2
- camel/verifiers/base.py +2 -2
- camel/verifiers/math_verifier.py +2 -2
- camel/verifiers/models.py +2 -2
- camel/verifiers/physics_verifier.py +2 -2
- camel/verifiers/python_verifier.py +2 -2
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/METADATA +349 -108
- camel_ai-0.2.82.dist-info/RECORD +507 -0
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.59.dist-info → camel_ai-0.2.82.dist-info}/licenses/LICENSE +1 -1
- camel/loaders/pandas_reader.py +0 -368
- camel/runtime/api.py +0 -97
- camel/toolkits/dalle_toolkit.py +0 -171
- camel/toolkits/file_write_toolkit.py +0 -395
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- camel_ai-0.2.59.dist-info/RECORD +0 -410
camel/models/internlm_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
18
18
|
from openai import AsyncStream
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import InternLMConfig
|
|
22
22
|
from camel.messages import OpenAIMessage
|
|
23
23
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
24
24
|
from camel.types import (
|
|
@@ -54,6 +54,10 @@ class InternLMModel(OpenAICompatibleModel):
|
|
|
54
54
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
55
55
|
environment variable or default to 180 seconds.
|
|
56
56
|
(default: :obj:`None`)
|
|
57
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
58
|
+
(default: :obj:`3`)
|
|
59
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
60
|
+
initialization.
|
|
57
61
|
"""
|
|
58
62
|
|
|
59
63
|
@api_keys_required(
|
|
@@ -69,6 +73,8 @@ class InternLMModel(OpenAICompatibleModel):
|
|
|
69
73
|
url: Optional[str] = None,
|
|
70
74
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
71
75
|
timeout: Optional[float] = None,
|
|
76
|
+
max_retries: int = 3,
|
|
77
|
+
**kwargs: Any,
|
|
72
78
|
) -> None:
|
|
73
79
|
self.model_config = model_config_dict or InternLMConfig().as_dict()
|
|
74
80
|
api_key = api_key or os.environ.get("INTERNLM_API_KEY")
|
|
@@ -84,6 +90,8 @@ class InternLMModel(OpenAICompatibleModel):
|
|
|
84
90
|
url=url,
|
|
85
91
|
token_counter=token_counter,
|
|
86
92
|
timeout=timeout,
|
|
93
|
+
max_retries=max_retries,
|
|
94
|
+
**kwargs,
|
|
87
95
|
)
|
|
88
96
|
|
|
89
97
|
async def _arun(
|
|
@@ -93,18 +101,3 @@ class InternLMModel(OpenAICompatibleModel):
|
|
|
93
101
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
94
102
|
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
95
103
|
raise NotImplementedError("InternLM does not support async inference.")
|
|
96
|
-
|
|
97
|
-
def check_model_config(self):
|
|
98
|
-
r"""Check whether the model configuration contains any
|
|
99
|
-
unexpected arguments to InternLM API.
|
|
100
|
-
|
|
101
|
-
Raises:
|
|
102
|
-
ValueError: If the model configuration dictionary contains any
|
|
103
|
-
unexpected arguments to InternLM API.
|
|
104
|
-
"""
|
|
105
|
-
for param in self.model_config_dict:
|
|
106
|
-
if param not in INTERNLM_API_PARAMS:
|
|
107
|
-
raise ValueError(
|
|
108
|
-
f"Unexpected argument `{param}` is "
|
|
109
|
-
"input into InternLM model backend."
|
|
110
|
-
)
|
camel/models/litellm_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,13 +10,14 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
+
import uuid
|
|
15
16
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
17
|
|
|
17
18
|
from pydantic import BaseModel
|
|
18
19
|
|
|
19
|
-
from camel.configs import
|
|
20
|
+
from camel.configs import LiteLLMConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
21
22
|
from camel.models import BaseModelBackend
|
|
22
23
|
from camel.types import ChatCompletion, ModelType
|
|
@@ -24,8 +25,19 @@ from camel.utils import (
|
|
|
24
25
|
BaseTokenCounter,
|
|
25
26
|
LiteLLMTokenCounter,
|
|
26
27
|
dependencies_required,
|
|
28
|
+
get_current_agent_session_id,
|
|
29
|
+
update_current_observation,
|
|
30
|
+
update_langfuse_trace,
|
|
27
31
|
)
|
|
28
32
|
|
|
33
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
34
|
+
try:
|
|
35
|
+
from langfuse.decorators import observe
|
|
36
|
+
except ImportError:
|
|
37
|
+
from camel.utils import observe
|
|
38
|
+
else:
|
|
39
|
+
from camel.utils import observe
|
|
40
|
+
|
|
29
41
|
|
|
30
42
|
class LiteLLMModel(BaseModelBackend):
|
|
31
43
|
r"""Constructor for LiteLLM backend with OpenAI compatibility.
|
|
@@ -48,6 +60,8 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
48
60
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
49
61
|
environment variable or default to 180 seconds.
|
|
50
62
|
(default: :obj:`None`)
|
|
63
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
64
|
+
initialization.
|
|
51
65
|
"""
|
|
52
66
|
|
|
53
67
|
# NOTE: Currently stream mode is not supported.
|
|
@@ -61,6 +75,7 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
61
75
|
url: Optional[str] = None,
|
|
62
76
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
77
|
timeout: Optional[float] = None,
|
|
78
|
+
**kwargs: Any,
|
|
64
79
|
) -> None:
|
|
65
80
|
from litellm import completion
|
|
66
81
|
|
|
@@ -71,6 +86,7 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
71
86
|
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
72
87
|
)
|
|
73
88
|
self.client = completion
|
|
89
|
+
self.kwargs = kwargs
|
|
74
90
|
|
|
75
91
|
def _convert_response_from_litellm_to_openai(
|
|
76
92
|
self, response
|
|
@@ -83,23 +99,47 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
83
99
|
Returns:
|
|
84
100
|
ChatCompletion: The response object in OpenAI's format.
|
|
85
101
|
"""
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
102
|
+
|
|
103
|
+
converted_choices = []
|
|
104
|
+
for choice in response.choices:
|
|
105
|
+
# Build the assistant message dict
|
|
106
|
+
msg_dict: Dict[str, Any] = {
|
|
107
|
+
"role": choice.message.role,
|
|
108
|
+
"content": choice.message.content,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if getattr(choice.message, "tool_calls", None):
|
|
112
|
+
msg_dict["tool_calls"] = choice.message.tool_calls
|
|
113
|
+
|
|
114
|
+
elif getattr(choice.message, "function_call", None):
|
|
115
|
+
func_call = choice.message.function_call
|
|
116
|
+
msg_dict["tool_calls"] = [
|
|
117
|
+
{
|
|
118
|
+
"id": f"call_{uuid.uuid4().hex[:24]}",
|
|
119
|
+
"type": "function",
|
|
120
|
+
"function": {
|
|
121
|
+
"name": getattr(func_call, "name", None),
|
|
122
|
+
"arguments": getattr(func_call, "arguments", "{}"),
|
|
123
|
+
},
|
|
124
|
+
}
|
|
125
|
+
]
|
|
126
|
+
|
|
127
|
+
converted_choices.append(
|
|
89
128
|
{
|
|
90
|
-
"index":
|
|
91
|
-
"message":
|
|
92
|
-
|
|
93
|
-
"content": response.choices[0].message.content,
|
|
94
|
-
},
|
|
95
|
-
"finish_reason": response.choices[0].finish_reason,
|
|
129
|
+
"index": choice.index,
|
|
130
|
+
"message": msg_dict,
|
|
131
|
+
"finish_reason": choice.finish_reason,
|
|
96
132
|
}
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return ChatCompletion.construct(
|
|
136
|
+
id=response.id,
|
|
137
|
+
choices=converted_choices,
|
|
138
|
+
created=getattr(response, "created", None),
|
|
139
|
+
model=getattr(response, "model", None),
|
|
140
|
+
object=getattr(response, "object", None),
|
|
141
|
+
system_fingerprint=getattr(response, "system_fingerprint", None),
|
|
142
|
+
usage=getattr(response, "usage", None),
|
|
103
143
|
)
|
|
104
144
|
|
|
105
145
|
@property
|
|
@@ -117,6 +157,7 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
117
157
|
async def _arun(self) -> None: # type: ignore[override]
|
|
118
158
|
raise NotImplementedError
|
|
119
159
|
|
|
160
|
+
@observe(as_type='generation')
|
|
120
161
|
def _run(
|
|
121
162
|
self,
|
|
122
163
|
messages: List[OpenAIMessage],
|
|
@@ -132,28 +173,47 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
132
173
|
Returns:
|
|
133
174
|
ChatCompletion
|
|
134
175
|
"""
|
|
176
|
+
|
|
177
|
+
request_config = self.model_config_dict.copy()
|
|
178
|
+
if tools:
|
|
179
|
+
request_config['tools'] = tools
|
|
180
|
+
if response_format:
|
|
181
|
+
request_config['response_format'] = response_format
|
|
182
|
+
|
|
183
|
+
update_current_observation(
|
|
184
|
+
input={
|
|
185
|
+
"messages": messages,
|
|
186
|
+
"tools": tools,
|
|
187
|
+
},
|
|
188
|
+
model=str(self.model_type),
|
|
189
|
+
model_parameters=self.model_config_dict,
|
|
190
|
+
)
|
|
191
|
+
# Update Langfuse trace with current agent session and metadata
|
|
192
|
+
agent_session_id = get_current_agent_session_id()
|
|
193
|
+
if agent_session_id:
|
|
194
|
+
update_langfuse_trace(
|
|
195
|
+
session_id=agent_session_id,
|
|
196
|
+
metadata={
|
|
197
|
+
"source": "camel",
|
|
198
|
+
"agent_id": agent_session_id,
|
|
199
|
+
"agent_type": "camel_chat_agent",
|
|
200
|
+
"model_type": str(self.model_type),
|
|
201
|
+
},
|
|
202
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
203
|
+
)
|
|
204
|
+
|
|
135
205
|
response = self.client(
|
|
136
206
|
timeout=self._timeout,
|
|
137
207
|
api_key=self._api_key,
|
|
138
208
|
base_url=self._url,
|
|
139
209
|
model=self.model_type,
|
|
140
210
|
messages=messages,
|
|
141
|
-
**
|
|
211
|
+
**request_config,
|
|
212
|
+
**self.kwargs,
|
|
142
213
|
)
|
|
143
214
|
response = self._convert_response_from_litellm_to_openai(response)
|
|
144
|
-
return response
|
|
145
|
-
|
|
146
|
-
def check_model_config(self):
|
|
147
|
-
r"""Check whether the model configuration contains any unexpected
|
|
148
|
-
arguments to LiteLLM API.
|
|
149
215
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
for param in self.model_config_dict:
|
|
155
|
-
if param not in LITELLM_API_PARAMS:
|
|
156
|
-
raise ValueError(
|
|
157
|
-
f"Unexpected argument `{param}` is "
|
|
158
|
-
"input into LiteLLM model backend."
|
|
159
|
-
)
|
|
216
|
+
update_current_observation(
|
|
217
|
+
usage=response.usage,
|
|
218
|
+
)
|
|
219
|
+
return response
|
camel/models/lmstudio_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,11 +10,11 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, Optional, Union
|
|
16
16
|
|
|
17
|
-
from camel.configs import
|
|
17
|
+
from camel.configs import LMStudioConfig
|
|
18
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
19
|
from camel.types import ModelType
|
|
20
20
|
from camel.utils import BaseTokenCounter
|
|
@@ -43,6 +43,10 @@ class LMStudioModel(OpenAICompatibleModel):
|
|
|
43
43
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
44
44
|
environment variable or default to 180 seconds.
|
|
45
45
|
(default: :obj:`None`)
|
|
46
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
47
|
+
(default: :obj:`3`)
|
|
48
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
49
|
+
initialization.
|
|
46
50
|
"""
|
|
47
51
|
|
|
48
52
|
def __init__(
|
|
@@ -53,6 +57,8 @@ class LMStudioModel(OpenAICompatibleModel):
|
|
|
53
57
|
url: Optional[str] = None,
|
|
54
58
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
55
59
|
timeout: Optional[float] = None,
|
|
60
|
+
max_retries: int = 3,
|
|
61
|
+
**kwargs: Any,
|
|
56
62
|
) -> None:
|
|
57
63
|
if model_config_dict is None:
|
|
58
64
|
model_config_dict = LMStudioConfig().as_dict()
|
|
@@ -62,21 +68,12 @@ class LMStudioModel(OpenAICompatibleModel):
|
|
|
62
68
|
)
|
|
63
69
|
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
64
70
|
super().__init__(
|
|
65
|
-
model_type,
|
|
71
|
+
model_type,
|
|
72
|
+
model_config_dict,
|
|
73
|
+
api_key,
|
|
74
|
+
url,
|
|
75
|
+
token_counter,
|
|
76
|
+
timeout,
|
|
77
|
+
max_retries=max_retries,
|
|
78
|
+
**kwargs,
|
|
66
79
|
)
|
|
67
|
-
|
|
68
|
-
def check_model_config(self):
|
|
69
|
-
r"""Check whether the model configuration contains any unexpected
|
|
70
|
-
arguments to LMStudio API. But LMStudio API does not have any
|
|
71
|
-
additional arguments to check.
|
|
72
|
-
|
|
73
|
-
Raises:
|
|
74
|
-
ValueError: If the model configuration dictionary contains any
|
|
75
|
-
unexpected arguments to LMStudio API.
|
|
76
|
-
"""
|
|
77
|
-
for param in self.model_config_dict:
|
|
78
|
-
if param not in LMSTUDIO_API_PARAMS:
|
|
79
|
-
raise ValueError(
|
|
80
|
-
f"Unexpected argument `{param}` is "
|
|
81
|
-
"input into LMStudio model backend."
|
|
82
|
-
)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import MinimaxConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MinimaxModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by Minimax in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`MinimaxConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the Minimax service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the Minimax M2 service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "MINIMAX_API_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = MinimaxConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("MINIMAX_API_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"MINIMAX_API_BASE_URL", "https://api.minimaxi.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/mistral_model.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# ========= Copyright 2023-
|
|
1
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
2
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
@@ -10,7 +10,7 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
# ========= Copyright 2023-
|
|
13
|
+
# ========= Copyright 2023-2025 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
15
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
@@ -24,7 +24,7 @@ if TYPE_CHECKING:
|
|
|
24
24
|
|
|
25
25
|
from openai import AsyncStream
|
|
26
26
|
|
|
27
|
-
from camel.configs import
|
|
27
|
+
from camel.configs import MistralConfig
|
|
28
28
|
from camel.logger import get_logger
|
|
29
29
|
from camel.messages import OpenAIMessage
|
|
30
30
|
from camel.models import BaseModelBackend
|
|
@@ -35,6 +35,9 @@ from camel.utils import (
|
|
|
35
35
|
OpenAITokenCounter,
|
|
36
36
|
api_keys_required,
|
|
37
37
|
dependencies_required,
|
|
38
|
+
get_current_agent_session_id,
|
|
39
|
+
update_current_observation,
|
|
40
|
+
update_langfuse_trace,
|
|
38
41
|
)
|
|
39
42
|
|
|
40
43
|
logger = get_logger(__name__)
|
|
@@ -47,6 +50,14 @@ try:
|
|
|
47
50
|
except (ImportError, AttributeError):
|
|
48
51
|
LLMEvent = None
|
|
49
52
|
|
|
53
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
54
|
+
try:
|
|
55
|
+
from langfuse.decorators import observe
|
|
56
|
+
except ImportError:
|
|
57
|
+
from camel.utils import observe
|
|
58
|
+
else:
|
|
59
|
+
from camel.utils import observe
|
|
60
|
+
|
|
50
61
|
|
|
51
62
|
class MistralModel(BaseModelBackend):
|
|
52
63
|
r"""Mistral API in a unified BaseModelBackend interface.
|
|
@@ -69,6 +80,10 @@ class MistralModel(BaseModelBackend):
|
|
|
69
80
|
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
70
81
|
environment variable or default to 180 seconds.
|
|
71
82
|
(default: :obj:`None`)
|
|
83
|
+
max_retries (int, optional): Maximum number of retries
|
|
84
|
+
for API calls. (default: :obj:`3`)
|
|
85
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
86
|
+
initialization.
|
|
72
87
|
"""
|
|
73
88
|
|
|
74
89
|
@api_keys_required(
|
|
@@ -85,6 +100,8 @@ class MistralModel(BaseModelBackend):
|
|
|
85
100
|
url: Optional[str] = None,
|
|
86
101
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
87
102
|
timeout: Optional[float] = None,
|
|
103
|
+
max_retries: int = 3,
|
|
104
|
+
**kwargs: Any,
|
|
88
105
|
) -> None:
|
|
89
106
|
from mistralai import Mistral
|
|
90
107
|
|
|
@@ -95,7 +112,14 @@ class MistralModel(BaseModelBackend):
|
|
|
95
112
|
url = url or os.environ.get("MISTRAL_API_BASE_URL")
|
|
96
113
|
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
97
114
|
super().__init__(
|
|
98
|
-
model_type,
|
|
115
|
+
model_type,
|
|
116
|
+
model_config_dict,
|
|
117
|
+
api_key,
|
|
118
|
+
url,
|
|
119
|
+
token_counter,
|
|
120
|
+
timeout,
|
|
121
|
+
max_retries,
|
|
122
|
+
**kwargs,
|
|
99
123
|
)
|
|
100
124
|
self._client = Mistral(
|
|
101
125
|
timeout_ms=int(self._timeout * 1000)
|
|
@@ -103,6 +127,7 @@ class MistralModel(BaseModelBackend):
|
|
|
103
127
|
else None,
|
|
104
128
|
api_key=self._api_key,
|
|
105
129
|
server_url=self._url,
|
|
130
|
+
**kwargs,
|
|
106
131
|
)
|
|
107
132
|
|
|
108
133
|
def _to_openai_response(
|
|
@@ -232,6 +257,7 @@ class MistralModel(BaseModelBackend):
|
|
|
232
257
|
)
|
|
233
258
|
return self._token_counter
|
|
234
259
|
|
|
260
|
+
@observe(as_type="generation")
|
|
235
261
|
async def _arun(
|
|
236
262
|
self,
|
|
237
263
|
messages: List[OpenAIMessage],
|
|
@@ -242,6 +268,29 @@ class MistralModel(BaseModelBackend):
|
|
|
242
268
|
"Mistral does not support async inference, using sync "
|
|
243
269
|
"inference instead."
|
|
244
270
|
)
|
|
271
|
+
update_current_observation(
|
|
272
|
+
input={
|
|
273
|
+
"messages": messages,
|
|
274
|
+
"response_format": response_format,
|
|
275
|
+
"tools": tools,
|
|
276
|
+
},
|
|
277
|
+
model=str(self.model_type),
|
|
278
|
+
model_parameters=self.model_config_dict,
|
|
279
|
+
)
|
|
280
|
+
# Update Langfuse trace with current agent session and metadata
|
|
281
|
+
agent_session_id = get_current_agent_session_id()
|
|
282
|
+
if agent_session_id:
|
|
283
|
+
update_langfuse_trace(
|
|
284
|
+
session_id=agent_session_id,
|
|
285
|
+
metadata={
|
|
286
|
+
"source": "camel",
|
|
287
|
+
"agent_id": agent_session_id,
|
|
288
|
+
"agent_type": "camel_chat_agent",
|
|
289
|
+
"model_type": str(self.model_type),
|
|
290
|
+
},
|
|
291
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
292
|
+
)
|
|
293
|
+
|
|
245
294
|
request_config = self._prepare_request(
|
|
246
295
|
messages, response_format, tools
|
|
247
296
|
)
|
|
@@ -255,6 +304,10 @@ class MistralModel(BaseModelBackend):
|
|
|
255
304
|
|
|
256
305
|
openai_response = self._to_openai_response(response) # type: ignore[arg-type]
|
|
257
306
|
|
|
307
|
+
update_current_observation(
|
|
308
|
+
usage=openai_response.usage,
|
|
309
|
+
)
|
|
310
|
+
|
|
258
311
|
# Add AgentOps LLM Event tracking
|
|
259
312
|
if LLMEvent:
|
|
260
313
|
llm_event = LLMEvent(
|
|
@@ -271,6 +324,7 @@ class MistralModel(BaseModelBackend):
|
|
|
271
324
|
|
|
272
325
|
return openai_response
|
|
273
326
|
|
|
327
|
+
@observe(as_type="generation")
|
|
274
328
|
def _run(
|
|
275
329
|
self,
|
|
276
330
|
messages: List[OpenAIMessage],
|
|
@@ -290,6 +344,28 @@ class MistralModel(BaseModelBackend):
|
|
|
290
344
|
Returns:
|
|
291
345
|
ChatCompletion: The response from the model.
|
|
292
346
|
"""
|
|
347
|
+
update_current_observation(
|
|
348
|
+
input={
|
|
349
|
+
"messages": messages,
|
|
350
|
+
"tools": tools,
|
|
351
|
+
},
|
|
352
|
+
model=str(self.model_type),
|
|
353
|
+
model_parameters=self.model_config_dict,
|
|
354
|
+
)
|
|
355
|
+
# Update Langfuse trace with current agent session and metadata
|
|
356
|
+
agent_session_id = get_current_agent_session_id()
|
|
357
|
+
if agent_session_id:
|
|
358
|
+
update_langfuse_trace(
|
|
359
|
+
session_id=agent_session_id,
|
|
360
|
+
metadata={
|
|
361
|
+
"source": "camel",
|
|
362
|
+
"agent_id": agent_session_id,
|
|
363
|
+
"agent_type": "camel_chat_agent",
|
|
364
|
+
"model_type": str(self.model_type),
|
|
365
|
+
},
|
|
366
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
367
|
+
)
|
|
368
|
+
|
|
293
369
|
request_config = self._prepare_request(
|
|
294
370
|
messages, response_format, tools
|
|
295
371
|
)
|
|
@@ -303,6 +379,10 @@ class MistralModel(BaseModelBackend):
|
|
|
303
379
|
|
|
304
380
|
openai_response = self._to_openai_response(response) # type: ignore[arg-type]
|
|
305
381
|
|
|
382
|
+
update_current_observation(
|
|
383
|
+
usage=openai_response.usage,
|
|
384
|
+
)
|
|
385
|
+
|
|
306
386
|
# Add AgentOps LLM Event tracking
|
|
307
387
|
if LLMEvent:
|
|
308
388
|
llm_event = LLMEvent(
|
|
@@ -334,21 +414,6 @@ class MistralModel(BaseModelBackend):
|
|
|
334
414
|
|
|
335
415
|
return request_config
|
|
336
416
|
|
|
337
|
-
def check_model_config(self):
|
|
338
|
-
r"""Check whether the model configuration contains any
|
|
339
|
-
unexpected arguments to Mistral API.
|
|
340
|
-
|
|
341
|
-
Raises:
|
|
342
|
-
ValueError: If the model configuration dictionary contains any
|
|
343
|
-
unexpected arguments to Mistral API.
|
|
344
|
-
"""
|
|
345
|
-
for param in self.model_config_dict:
|
|
346
|
-
if param not in MISTRAL_API_PARAMS:
|
|
347
|
-
raise ValueError(
|
|
348
|
-
f"Unexpected argument `{param}` is "
|
|
349
|
-
"input into Mistral model backend."
|
|
350
|
-
)
|
|
351
|
-
|
|
352
417
|
@property
|
|
353
418
|
def stream(self) -> bool:
|
|
354
419
|
r"""Returns whether the model is in stream mode, which sends partial
|