sycommon-python-lib 0.2.0b30__tar.gz → 0.2.0b32__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/PKG-INFO +1 -1
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/pyproject.toml +1 -1
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/get_llm.py +9 -2
- sycommon_python_lib-0.2.0b32/src/sycommon/llm/native_with_fallback_runnable.py +201 -0
- sycommon_python_lib-0.2.0b32/src/sycommon/llm/output_fixing_runnable.py +216 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/token_usage_mysql_service.py +2 -2
- sycommon_python_lib-0.2.0b32/src/sycommon/llm/usage_token.py +186 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/token_usage_mysql.py +2 -2
- sycommon_python_lib-0.2.0b32/src/sycommon/tests/deep_agent_server.py +535 -0
- sycommon_python_lib-0.2.0b32/src/sycommon/tests/skills/web-search/helpers/formatter.py +207 -0
- sycommon_python_lib-0.2.0b32/src/sycommon/tests/skills/web-search/helpers/query_builder.py +144 -0
- sycommon_python_lib-0.2.0b32/src/sycommon/tests/test_deep_agent.py +130 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/PKG-INFO +1 -1
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/SOURCES.txt +6 -2
- sycommon_python_lib-0.2.0b30/src/sycommon/llm/native_smart_retry_runnable.py +0 -176
- sycommon_python_lib-0.2.0b30/src/sycommon/llm/smart_retry_runnable.py +0 -159
- sycommon_python_lib-0.2.0b30/src/sycommon/llm/usage_token.py +0 -249
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/README.md +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/setup.cfg +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/cli.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/core/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/core/console.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/core/models.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/core/project.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/core/utils.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/templates/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/templates/agent/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/templates/base/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/command/templates/web/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/01_basic_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/02_tool_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/03_structured_output.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/04_memory_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/05_streaming.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/06_multi_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/07_skills_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/08_middleware.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/09_interrupt.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/10_custom_llm.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/11_complex_workflow.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/12_batch_processing.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/01_basic_monitoring.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/02_permission_control.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/03_tool_skill_filter.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/04_caching_retry.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/05_sanitization.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/06_tracking.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/07_advanced.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/08_progressive_skills.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/middleware/override_examples.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/examples/virtual_employee_demo.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/exports.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/get_agent.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/skills/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/skills/examples/faq_handler/scripts/search.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/skills/exports.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/agent/virtual_employee.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/Config.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/DatabaseConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/ElasticsearchConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/EmbeddingConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/LLMConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/LangfuseConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/MQConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/RedisConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/RerankerConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/SentryConfig.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/config/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/async_base_db_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/async_database_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/base_db_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/database_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/elasticsearch_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/redis_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/database/token_usage_db_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/health/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/health/health_check.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/health/metrics.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/health/ping.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/heartbeat_process/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/heartbeat_process/heartbeat_config.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/heartbeat_process/heartbeat_process_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/heartbeat_process/heartbeat_process_worker.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/embedding.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/llm_logger.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/llm_tokens.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/llm_with_token_tracking.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/struct_token.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/sy_langfuse.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/llm/token_usage_es_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/async_sql_logger.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/kafka_log.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/logger_levels.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/logger_wrapper.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/process_logger.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/logging/sql_logger.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/context.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/cors.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/docs.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/exception.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/middleware.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/monitor_memory.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/mq.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/timeout.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/middleware/traceid.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/base_http.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/log.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/mqlistener_config.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/mqmsg_model.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/mqsend_config.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/sso_user.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/models/token_usage.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/notice/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/notice/uvicorn_monitor.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/process_pool_consumer.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_client.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_pool.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service_client_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service_connection_monitor.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service_consumer_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service_core.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/rabbitmq/rabbitmq_service_producer_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/sentry/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/sentry/sy_sentry.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/services.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/sse/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/sse/event.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/sse/sse.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/example.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/example2.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/feign.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/feign_client.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_client_base.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_config_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_heartbeat_manager.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_service.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_service_discovery.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/nacos_service_registration.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/synacos/param.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tests/test_email.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tests/test_mq.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/__init__.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/async_utils.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/docs.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/env.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/merge_headers.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/snowflake.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/syemail.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon/tools/timing.py +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/dependency_links.txt +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/entry_points.txt +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/requires.txt +0 -0
- {sycommon_python_lib-0.2.0b30 → sycommon_python_lib-0.2.0b32}/src/sycommon_python_lib.egg-info/top_level.txt +0 -0
|
@@ -59,9 +59,15 @@ def get_llm(
|
|
|
59
59
|
# 指令模式(默认)
|
|
60
60
|
llm = get_llm("Qwen3.5-122B-A10B", thinking=False)
|
|
61
61
|
|
|
62
|
-
# 结构化输出
|
|
62
|
+
# 结构化输出 - 默认模式(推荐)
|
|
63
|
+
# use_native=False(默认):使用 OutputFixingRunnable,更稳定
|
|
63
64
|
llm = get_llm("Qwen3.5-122B-A10B")
|
|
64
|
-
chain = llm.with_structured_output(MyModel)
|
|
65
|
+
chain = llm.with_structured_output(MyModel) # use_native=False 默认
|
|
66
|
+
result = await chain.ainvoke([HumanMessage(content="你好")])
|
|
67
|
+
|
|
68
|
+
# 使用原生模式(需模型支持 function calling)
|
|
69
|
+
llm = get_llm("Qwen3.5-122B-A10B")
|
|
70
|
+
chain = llm.with_structured_output(MyModel, use_native=True)
|
|
65
71
|
result = await chain.ainvoke([HumanMessage(content="你好")])
|
|
66
72
|
print(result._token_usage_) # Token 统计
|
|
67
73
|
|
|
@@ -146,6 +152,7 @@ def get_llm(
|
|
|
146
152
|
"streaming": streaming,
|
|
147
153
|
"timeout": timeout,
|
|
148
154
|
"max_retries": max_retries,
|
|
155
|
+
"model_kwargs": {"response_format": {"type": "json_object"}},
|
|
149
156
|
}
|
|
150
157
|
|
|
151
158
|
# 合并其他透传参数(包括 presence_penalty, extra_body, top_p 等)
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
原生模式 + 降级修正 Runnable
|
|
4
|
+
|
|
5
|
+
第一次使用原生 with_structured_output,失败后降级到修正逻辑。
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import re
|
|
10
|
+
from typing import Type, Optional, Any
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, ValidationError
|
|
13
|
+
from langchain_core.language_models import BaseChatModel
|
|
14
|
+
from langchain_core.runnables import Runnable, RunnableConfig
|
|
15
|
+
from langchain_core.output_parsers import PydanticOutputParser
|
|
16
|
+
from langchain_core.messages import HumanMessage
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class NativeWithFallbackRunnable(Runnable):
|
|
20
|
+
"""
|
|
21
|
+
原生模式 + 降级修正 Runnable
|
|
22
|
+
|
|
23
|
+
第一次使用原生 with_structured_output,失败后降级到修正逻辑。
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
native_runnable: Runnable,
|
|
29
|
+
llm: BaseChatModel,
|
|
30
|
+
output_model: Type[BaseModel],
|
|
31
|
+
max_retries: int = 3
|
|
32
|
+
):
|
|
33
|
+
super().__init__()
|
|
34
|
+
self.native_runnable = native_runnable
|
|
35
|
+
self.llm = llm
|
|
36
|
+
self.output_model = output_model
|
|
37
|
+
self.max_retries = max_retries
|
|
38
|
+
self.parser = PydanticOutputParser(pydantic_object=output_model)
|
|
39
|
+
|
|
40
|
+
def _try_parse_result(self, result) -> BaseModel:
|
|
41
|
+
"""尝试将结果转换为 Pydantic 模型"""
|
|
42
|
+
if isinstance(result, self.output_model):
|
|
43
|
+
return result
|
|
44
|
+
|
|
45
|
+
if isinstance(result, dict):
|
|
46
|
+
return self.output_model(**result)
|
|
47
|
+
|
|
48
|
+
if isinstance(result, str):
|
|
49
|
+
processed = self._process_content(result)
|
|
50
|
+
data = json.loads(processed)
|
|
51
|
+
return self.output_model(**data)
|
|
52
|
+
|
|
53
|
+
raise ValueError(f"无法将结果转换为 {self.output_model.__name__}")
|
|
54
|
+
|
|
55
|
+
def _process_content(self, content: str) -> str:
|
|
56
|
+
"""处理内容"""
|
|
57
|
+
content = content.strip("```json").strip("```").strip()
|
|
58
|
+
json_match = re.search(r'\{[\s\S]*\}', content)
|
|
59
|
+
if json_match:
|
|
60
|
+
content = json_match.group(0)
|
|
61
|
+
content = content.replace("None", "null").replace("none", "null").replace("NONE", "null")
|
|
62
|
+
content = content.replace('"', '"').replace('"', '"')
|
|
63
|
+
return content
|
|
64
|
+
|
|
65
|
+
def _extract_content(self, response) -> str:
|
|
66
|
+
"""从 LLM 响应中提取文本内容"""
|
|
67
|
+
if hasattr(response, 'content'):
|
|
68
|
+
return response.content
|
|
69
|
+
return str(response)
|
|
70
|
+
|
|
71
|
+
def _request_fix(self, messages: list, failed_output: str, error_message: str, config=None) -> str:
|
|
72
|
+
"""请求 LLM 修正输出"""
|
|
73
|
+
fix_prompt = f"""上一次输出解析失败,请根据错误信息修正后重新输出。
|
|
74
|
+
|
|
75
|
+
**重要提示**:
|
|
76
|
+
1. 必须严格按照指定的 JSON schema 输出
|
|
77
|
+
2. 不能遗漏任何必需字段(required fields)
|
|
78
|
+
3. 不能添加 schema 中未定义的字段
|
|
79
|
+
4. 字段类型必须匹配(字符串、数字、列表、对象等)
|
|
80
|
+
|
|
81
|
+
错误信息:
|
|
82
|
+
{error_message}
|
|
83
|
+
|
|
84
|
+
上一次输出:
|
|
85
|
+
{failed_output}
|
|
86
|
+
|
|
87
|
+
输出格式,请严格按照以下 JSON schema 输出,不要输出任何多余内容:
|
|
88
|
+
{self.parser.get_format_instructions()}"""
|
|
89
|
+
|
|
90
|
+
fix_messages = messages + [HumanMessage(content=fix_prompt)]
|
|
91
|
+
response = self.llm.invoke(fix_messages, config=config)
|
|
92
|
+
return self._extract_content(response)
|
|
93
|
+
|
|
94
|
+
async def _arequest_fix(self, messages: list, failed_output: str, error_message: str, config=None) -> str:
|
|
95
|
+
"""异步请求 LLM 修正输出"""
|
|
96
|
+
fix_prompt = f"""上一次输出解析失败,请根据错误信息修正后重新输出。
|
|
97
|
+
|
|
98
|
+
**重要提示**:
|
|
99
|
+
1. 必须严格按照指定的 JSON schema 输出
|
|
100
|
+
2. 不能遗漏任何必需字段(required fields)
|
|
101
|
+
3. 不能添加 schema 中未定义的字段
|
|
102
|
+
4. 字段类型必须匹配(字符串、数字、列表、对象等)
|
|
103
|
+
|
|
104
|
+
错误信息:
|
|
105
|
+
{error_message}
|
|
106
|
+
|
|
107
|
+
上一次输出:
|
|
108
|
+
{failed_output}
|
|
109
|
+
|
|
110
|
+
输出格式,请严格按照以下 JSON schema 输出,不要输出任何多余内容:
|
|
111
|
+
{self.parser.get_format_instructions()}"""
|
|
112
|
+
|
|
113
|
+
fix_messages = messages + [HumanMessage(content=fix_prompt)]
|
|
114
|
+
response = await self.llm.ainvoke(fix_messages, config=config)
|
|
115
|
+
return self._extract_content(response)
|
|
116
|
+
|
|
117
|
+
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> BaseModel:
|
|
118
|
+
"""同步调用"""
|
|
119
|
+
messages = input if isinstance(input, list) else input.get("messages", [])
|
|
120
|
+
last_error = None
|
|
121
|
+
last_content = None
|
|
122
|
+
|
|
123
|
+
for attempt in range(self.max_retries):
|
|
124
|
+
try:
|
|
125
|
+
if attempt == 0:
|
|
126
|
+
# 第一次:使用原生 runnable
|
|
127
|
+
result = self.native_runnable.invoke(input, config=config)
|
|
128
|
+
return self._try_parse_result(result)
|
|
129
|
+
else:
|
|
130
|
+
# 修正模式
|
|
131
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
132
|
+
SYLogger.warning(f"[Native降级修正] 第 {attempt} 次尝试,错误: {last_error}")
|
|
133
|
+
|
|
134
|
+
last_content = self._request_fix(
|
|
135
|
+
messages, last_content or "", str(last_error), config
|
|
136
|
+
)
|
|
137
|
+
processed = self._process_content(last_content)
|
|
138
|
+
try:
|
|
139
|
+
result = self.parser.parse(processed)
|
|
140
|
+
return result
|
|
141
|
+
except Exception:
|
|
142
|
+
data = json.loads(processed)
|
|
143
|
+
return self.output_model(**data)
|
|
144
|
+
|
|
145
|
+
except (ValidationError, json.JSONDecodeError, ValueError, TypeError) as e:
|
|
146
|
+
last_error = e
|
|
147
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
148
|
+
if attempt == 0:
|
|
149
|
+
try:
|
|
150
|
+
last_content = str(result) if 'result' in dir() else ""
|
|
151
|
+
except:
|
|
152
|
+
last_content = ""
|
|
153
|
+
SYLogger.warning(f"[Native降级修正] 原生模式失败,降级到修正模式: {e}")
|
|
154
|
+
|
|
155
|
+
if attempt == self.max_retries - 1:
|
|
156
|
+
raise ValueError(f"经过 {self.max_retries} 次尝试仍无法解析: {last_error}")
|
|
157
|
+
|
|
158
|
+
raise ValueError("未知错误")
|
|
159
|
+
|
|
160
|
+
async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None) -> BaseModel:
|
|
161
|
+
"""异步调用"""
|
|
162
|
+
messages = input if isinstance(input, list) else input.get("messages", [])
|
|
163
|
+
last_error = None
|
|
164
|
+
last_content = None
|
|
165
|
+
|
|
166
|
+
for attempt in range(self.max_retries):
|
|
167
|
+
try:
|
|
168
|
+
if attempt == 0:
|
|
169
|
+
# 第一次:使用原生 runnable
|
|
170
|
+
result = await self.native_runnable.ainvoke(input, config=config)
|
|
171
|
+
return self._try_parse_result(result)
|
|
172
|
+
else:
|
|
173
|
+
# 修正模式
|
|
174
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
175
|
+
SYLogger.warning(f"[Native降级修正] 第 {attempt} 次尝试,错误: {last_error}")
|
|
176
|
+
|
|
177
|
+
last_content = await self._arequest_fix(
|
|
178
|
+
messages, last_content or "", str(last_error), config
|
|
179
|
+
)
|
|
180
|
+
processed = self._process_content(last_content)
|
|
181
|
+
try:
|
|
182
|
+
result = self.parser.parse(processed)
|
|
183
|
+
return result
|
|
184
|
+
except Exception:
|
|
185
|
+
data = json.loads(processed)
|
|
186
|
+
return self.output_model(**data)
|
|
187
|
+
|
|
188
|
+
except (ValidationError, json.JSONDecodeError, ValueError, TypeError) as e:
|
|
189
|
+
last_error = e
|
|
190
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
191
|
+
if attempt == 0:
|
|
192
|
+
try:
|
|
193
|
+
last_content = str(result) if 'result' in dir() else ""
|
|
194
|
+
except:
|
|
195
|
+
last_content = ""
|
|
196
|
+
SYLogger.warning(f"[Native降级修正] 原生模式失败,降级到修正模式: {e}")
|
|
197
|
+
|
|
198
|
+
if attempt == self.max_retries - 1:
|
|
199
|
+
raise ValueError(f"经过 {self.max_retries} 次尝试仍无法解析: {last_error}")
|
|
200
|
+
|
|
201
|
+
raise ValueError("未知错误")
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
结构化输出自动修正 Runnable
|
|
4
|
+
|
|
5
|
+
当 JSON 解析失败时,自动将错误信息和原始输出发送给 LLM 请求修正。
|
|
6
|
+
类似于 LangChain 旧版的 OutputFixingParser 功能。
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from typing import Type, Optional, Any
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, ValidationError
|
|
14
|
+
from langchain_core.language_models import BaseChatModel
|
|
15
|
+
from langchain_core.runnables import Runnable, RunnableConfig
|
|
16
|
+
from langchain_core.output_parsers import PydanticOutputParser
|
|
17
|
+
from langchain_core.messages import HumanMessage
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class OutputFixingRunnable(Runnable):
|
|
21
|
+
"""
|
|
22
|
+
结构化输出自动修正 Runnable
|
|
23
|
+
|
|
24
|
+
特点:
|
|
25
|
+
- 当解析失败时,自动将错误信息和原始输出发送给 LLM 请求修正
|
|
26
|
+
- 支持自定义最大重试次数
|
|
27
|
+
- 自动处理常见的 JSON 格式问题(中文引号、代码块标记等)
|
|
28
|
+
|
|
29
|
+
适用场景:
|
|
30
|
+
- 输出偶尔格式错误
|
|
31
|
+
- 需要稳定可靠的 JSON 解析
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
base_chain: Runnable,
|
|
37
|
+
llm: BaseChatModel,
|
|
38
|
+
output_model: Type[BaseModel],
|
|
39
|
+
max_retries: int = 3
|
|
40
|
+
):
|
|
41
|
+
super().__init__()
|
|
42
|
+
self.base_chain = base_chain
|
|
43
|
+
self.llm = llm
|
|
44
|
+
self.output_model = output_model
|
|
45
|
+
self.max_retries = max_retries
|
|
46
|
+
self.parser = PydanticOutputParser(pydantic_object=output_model)
|
|
47
|
+
|
|
48
|
+
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> BaseModel:
|
|
49
|
+
"""同步调用"""
|
|
50
|
+
# 获取 LLM 响应(通过 base_chain,包含 prompt)
|
|
51
|
+
response = self.base_chain.invoke(input, config=config)
|
|
52
|
+
content = self._extract_content(response)
|
|
53
|
+
|
|
54
|
+
last_error = None
|
|
55
|
+
last_content = content
|
|
56
|
+
|
|
57
|
+
for attempt in range(self.max_retries):
|
|
58
|
+
try:
|
|
59
|
+
# 处理内容并尝试解析
|
|
60
|
+
processed = self._process_content(last_content)
|
|
61
|
+
# 先尝试直接用 PydanticOutputParser 解析
|
|
62
|
+
try:
|
|
63
|
+
result = self.parser.parse(processed)
|
|
64
|
+
return result
|
|
65
|
+
except Exception:
|
|
66
|
+
# 如果失败,尝试用 json.loads 解析后再实例化
|
|
67
|
+
data = json.loads(processed)
|
|
68
|
+
result = self.output_model(**data)
|
|
69
|
+
return result
|
|
70
|
+
except (ValidationError, json.JSONDecodeError, ValueError, TypeError) as e:
|
|
71
|
+
last_error = e
|
|
72
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
73
|
+
SYLogger.warning(f"[OutputFixing] 第 {attempt + 1} 次解析失败: {e}")
|
|
74
|
+
|
|
75
|
+
if attempt < self.max_retries - 1:
|
|
76
|
+
# 请求 LLM 修正
|
|
77
|
+
last_content = self._request_fix(
|
|
78
|
+
original_input=input,
|
|
79
|
+
failed_output=last_content,
|
|
80
|
+
error_message=str(e),
|
|
81
|
+
config=config
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
raise ValueError(f"经过 {self.max_retries} 次尝试仍无法解析: {last_error}")
|
|
85
|
+
|
|
86
|
+
async def ainvoke(self, input: Any, config: Optional[RunnableConfig] = None) -> BaseModel:
|
|
87
|
+
"""异步调用"""
|
|
88
|
+
# 获取 LLM 响应(通过 base_chain,包含 prompt)
|
|
89
|
+
response = await self.base_chain.ainvoke(input, config=config)
|
|
90
|
+
content = self._extract_content(response)
|
|
91
|
+
|
|
92
|
+
last_error = None
|
|
93
|
+
last_content = content
|
|
94
|
+
|
|
95
|
+
for attempt in range(self.max_retries):
|
|
96
|
+
try:
|
|
97
|
+
# 处理内容并尝试解析
|
|
98
|
+
processed = self._process_content(last_content)
|
|
99
|
+
# 先尝试直接用 PydanticOutputParser 解析
|
|
100
|
+
try:
|
|
101
|
+
result = self.parser.parse(processed)
|
|
102
|
+
return result
|
|
103
|
+
except Exception:
|
|
104
|
+
# 如果失败,尝试用 json.loads 解析后再实例化
|
|
105
|
+
data = json.loads(processed)
|
|
106
|
+
result = self.output_model(**data)
|
|
107
|
+
return result
|
|
108
|
+
except (ValidationError, json.JSONDecodeError, ValueError, TypeError) as e:
|
|
109
|
+
last_error = e
|
|
110
|
+
from sycommon.logging.kafka_log import SYLogger
|
|
111
|
+
SYLogger.warning(f"[OutputFixing] 第 {attempt + 1} 次解析失败: {e}")
|
|
112
|
+
|
|
113
|
+
if attempt < self.max_retries - 1:
|
|
114
|
+
# 请求 LLM 修正
|
|
115
|
+
last_content = await self._arequest_fix(
|
|
116
|
+
original_input=input,
|
|
117
|
+
failed_output=last_content,
|
|
118
|
+
error_message=str(e),
|
|
119
|
+
config=config
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
raise ValueError(f"经过 {self.max_retries} 次尝试仍无法解析: {last_error}")
|
|
123
|
+
|
|
124
|
+
def _extract_content(self, response) -> str:
|
|
125
|
+
"""从 LLM 响应中提取文本内容"""
|
|
126
|
+
if hasattr(response, 'content'):
|
|
127
|
+
return response.content
|
|
128
|
+
return str(response)
|
|
129
|
+
|
|
130
|
+
def _process_content(self, content: str) -> str:
|
|
131
|
+
"""处理内容(移除代码块标记、规范化 JSON)"""
|
|
132
|
+
# 移除 markdown 代码块标记
|
|
133
|
+
content = content.strip("```json").strip("```").strip()
|
|
134
|
+
|
|
135
|
+
# 尝试提取 JSON 对象(处理前后有多余文本的情况)
|
|
136
|
+
json_match = re.search(r'\{[\s\S]*\}', content)
|
|
137
|
+
if json_match:
|
|
138
|
+
content = json_match.group(0)
|
|
139
|
+
|
|
140
|
+
# 规范化 null 值
|
|
141
|
+
content = content.replace("None", "null").replace(
|
|
142
|
+
"none", "null").replace("NONE", "null")
|
|
143
|
+
|
|
144
|
+
# 规范化中文引号为英文引号(只处理键和字符串值外部的引号)
|
|
145
|
+
content = content.replace('"', '"').replace('"', '"')
|
|
146
|
+
|
|
147
|
+
# 处理单引号(谨慎处理,只替换明显不是内容中的单引号)
|
|
148
|
+
# 不再简单替换所有单引号为双引号,因为这可能破坏内容
|
|
149
|
+
|
|
150
|
+
return content
|
|
151
|
+
|
|
152
|
+
def _request_fix(
|
|
153
|
+
self,
|
|
154
|
+
original_input: Any,
|
|
155
|
+
failed_output: str,
|
|
156
|
+
error_message: str,
|
|
157
|
+
config: Optional[RunnableConfig] = None
|
|
158
|
+
) -> str:
|
|
159
|
+
"""请求 LLM 修正输出"""
|
|
160
|
+
fix_prompt = f"""上一次输出解析失败,请根据错误信息修正后重新输出。
|
|
161
|
+
|
|
162
|
+
**重要提示**:
|
|
163
|
+
1. 必须严格按照指定的 JSON schema 输出
|
|
164
|
+
2. 不能遗漏任何必需字段(required fields)
|
|
165
|
+
3. 不能添加 schema 中未定义的字段
|
|
166
|
+
4. 字段类型必须匹配(字符串、数字、列表、对象等)
|
|
167
|
+
|
|
168
|
+
错误信息:
|
|
169
|
+
{error_message}
|
|
170
|
+
|
|
171
|
+
上一次输出:
|
|
172
|
+
{failed_output}
|
|
173
|
+
|
|
174
|
+
输出格式,请严格按照以下 JSON schema 输出,不要输出任何多余内容:
|
|
175
|
+
{self.parser.get_format_instructions()}"""
|
|
176
|
+
|
|
177
|
+
# 提取原始消息
|
|
178
|
+
messages = original_input.get("messages", []) if isinstance(
|
|
179
|
+
original_input, dict) else []
|
|
180
|
+
fix_messages = messages + [HumanMessage(content=fix_prompt)]
|
|
181
|
+
|
|
182
|
+
response = self.llm.invoke(fix_messages, config=config)
|
|
183
|
+
return self._extract_content(response)
|
|
184
|
+
|
|
185
|
+
async def _arequest_fix(
|
|
186
|
+
self,
|
|
187
|
+
original_input: Any,
|
|
188
|
+
failed_output: str,
|
|
189
|
+
error_message: str,
|
|
190
|
+
config: Optional[RunnableConfig] = None
|
|
191
|
+
) -> str:
|
|
192
|
+
"""异步请求 LLM 修正输出"""
|
|
193
|
+
fix_prompt = f"""上一次输出解析失败,请根据错误信息修正后重新输出。
|
|
194
|
+
|
|
195
|
+
**重要提示**:
|
|
196
|
+
1. 必须严格按照指定的 JSON schema 输出
|
|
197
|
+
2. 不能遗漏任何必需字段(required fields)
|
|
198
|
+
3. 不能添加 schema 中未定义的字段
|
|
199
|
+
4. 字段类型必须匹配(字符串、数字、列表、对象等)
|
|
200
|
+
|
|
201
|
+
错误信息:
|
|
202
|
+
{error_message}
|
|
203
|
+
|
|
204
|
+
上一次输出:
|
|
205
|
+
{failed_output}
|
|
206
|
+
|
|
207
|
+
输出格式,请严格按照以下 JSON schema 输出,不要输出任何多余内容:
|
|
208
|
+
{self.parser.get_format_instructions()}"""
|
|
209
|
+
|
|
210
|
+
# 提取原始消息
|
|
211
|
+
messages = original_input.get("messages", []) if isinstance(
|
|
212
|
+
original_input, dict) else []
|
|
213
|
+
fix_messages = messages + [HumanMessage(content=fix_prompt)]
|
|
214
|
+
|
|
215
|
+
response = await self.llm.ainvoke(fix_messages, config=config)
|
|
216
|
+
return self._extract_content(response)
|
|
@@ -101,7 +101,7 @@ class TokenUsageMySQLService(metaclass=SingletonMeta):
|
|
|
101
101
|
# 创建表(如果不存在)
|
|
102
102
|
await session.execute(text(f"""
|
|
103
103
|
CREATE TABLE IF NOT EXISTS token_usage_daily (
|
|
104
|
-
id INT AUTO_INCREMENT PRIMARY KEY,
|
|
104
|
+
id INT AUTO_INCREMENT PRIMARY KEY COMMENT '主键ID',
|
|
105
105
|
user_id VARCHAR(128) NULL COMMENT '用户ID',
|
|
106
106
|
tenant_id VARCHAR(128) NULL COMMENT '租户ID',
|
|
107
107
|
service_name VARCHAR(128) NULL COMMENT '服务名称',
|
|
@@ -113,7 +113,7 @@ class TokenUsageMySQLService(metaclass=SingletonMeta):
|
|
|
113
113
|
usage_date DATE NOT NULL COMMENT '使用日期',
|
|
114
114
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
|
115
115
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
|
|
116
|
-
UNIQUE KEY
|
|
116
|
+
UNIQUE KEY uniq_user_service_env_date (user_id, service_name, system_env, usage_date),
|
|
117
117
|
INDEX idx_user_id (user_id),
|
|
118
118
|
INDEX idx_service_name (service_name),
|
|
119
119
|
INDEX idx_tenant_id (tenant_id),
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
LLM Token 统计和结构化输出模块
|
|
4
|
+
|
|
5
|
+
支持两种结构化输出模式:
|
|
6
|
+
1. native: 使用模型原生的 with_structured_output,失败后降级到修正逻辑
|
|
7
|
+
2. fixing: 直接使用 OutputFixingRunnable(默认模式,更稳定)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Type, Optional
|
|
11
|
+
|
|
12
|
+
from langfuse import Langfuse
|
|
13
|
+
from langchain_core.language_models import BaseChatModel
|
|
14
|
+
from langchain_core.runnables import Runnable
|
|
15
|
+
from langchain_core.output_parsers import PydanticOutputParser
|
|
16
|
+
from langchain_core.messages import HumanMessage
|
|
17
|
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
18
|
+
from pydantic import BaseModel, Field
|
|
19
|
+
|
|
20
|
+
from sycommon.config.LLMConfig import LLMConfig
|
|
21
|
+
from sycommon.llm.struct_token import StructuredRunnableWithToken
|
|
22
|
+
from sycommon.llm.output_fixing_runnable import OutputFixingRunnable
|
|
23
|
+
from sycommon.llm.native_with_fallback_runnable import NativeWithFallbackRunnable
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LLMWithAutoTokenUsage(BaseChatModel):
|
|
27
|
+
"""自动为结构化调用返回token_usage的LLM包装类"""
|
|
28
|
+
llm: BaseChatModel = Field(default=None)
|
|
29
|
+
langfuse: Optional[Langfuse] = Field(default=None, exclude=True)
|
|
30
|
+
llmConfig: Optional[LLMConfig] = Field(default=None, exclude=True)
|
|
31
|
+
summary_prompt: Optional[str] = Field(default=None, exclude=True)
|
|
32
|
+
max_retries: int = Field(default=3, exclude=True)
|
|
33
|
+
|
|
34
|
+
def __init__(self, llm: BaseChatModel, langfuse: Langfuse, llmConfig: LLMConfig, summary_prompt: str, max_retries: int = 3, **kwargs):
|
|
35
|
+
super().__init__(llm=llm, langfuse=langfuse, llmConfig=llmConfig,
|
|
36
|
+
summary_prompt=summary_prompt, max_retries=max_retries, **kwargs)
|
|
37
|
+
|
|
38
|
+
def with_structured_output(
|
|
39
|
+
self,
|
|
40
|
+
output_model: Type[BaseModel],
|
|
41
|
+
max_retries: int = None,
|
|
42
|
+
is_extract: bool = False,
|
|
43
|
+
override_prompt: ChatPromptTemplate = None,
|
|
44
|
+
use_native: bool = False
|
|
45
|
+
) -> Runnable:
|
|
46
|
+
"""
|
|
47
|
+
返回支持自动统计Token的结构化Runnable
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
output_model: Pydantic 模型
|
|
51
|
+
max_retries: 最大重试次数
|
|
52
|
+
is_extract: 是否为提取模式
|
|
53
|
+
override_prompt: 自定义提示词模板
|
|
54
|
+
use_native: 是否使用原生结构化输出(默认 False)
|
|
55
|
+
- False: 使用 OutputFixingRunnable(默认,更稳定)
|
|
56
|
+
- True: 使用原生 with_structured_output,失败后降级到修正模式
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Runnable: 支持结构化输出的 Runnable
|
|
60
|
+
"""
|
|
61
|
+
if max_retries is None:
|
|
62
|
+
max_retries = self.max_retries
|
|
63
|
+
|
|
64
|
+
if use_native:
|
|
65
|
+
return self._with_native_structured_output(output_model, max_retries)
|
|
66
|
+
else:
|
|
67
|
+
return self._with_fixing_structured_output(
|
|
68
|
+
output_model, max_retries, is_extract, override_prompt
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
def _with_native_structured_output(
|
|
72
|
+
self,
|
|
73
|
+
output_model: Type[BaseModel],
|
|
74
|
+
max_retries: int
|
|
75
|
+
) -> Runnable:
|
|
76
|
+
"""
|
|
77
|
+
原生模式 + 降级修正
|
|
78
|
+
|
|
79
|
+
第一次使用原生 with_structured_output,失败后降级到修正逻辑。
|
|
80
|
+
"""
|
|
81
|
+
native_runnable = self.llm.with_structured_output(output_model)
|
|
82
|
+
|
|
83
|
+
native_chain = NativeWithFallbackRunnable(
|
|
84
|
+
native_runnable=native_runnable,
|
|
85
|
+
llm=self.llm,
|
|
86
|
+
output_model=output_model,
|
|
87
|
+
max_retries=max_retries
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
return StructuredRunnableWithToken(
|
|
91
|
+
retry_chain=native_chain,
|
|
92
|
+
langfuse=self.langfuse,
|
|
93
|
+
llmConfig=self.llmConfig,
|
|
94
|
+
summary_prompt=self.summary_prompt,
|
|
95
|
+
model_name=self.llmConfig.model if self.llmConfig else "Qwen2.5-72B",
|
|
96
|
+
is_native_mode=True
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def _with_fixing_structured_output(
|
|
100
|
+
self,
|
|
101
|
+
output_model: Type[BaseModel],
|
|
102
|
+
max_retries: int,
|
|
103
|
+
is_extract: bool = False,
|
|
104
|
+
override_prompt: ChatPromptTemplate = None
|
|
105
|
+
) -> Runnable:
|
|
106
|
+
"""
|
|
107
|
+
使用 LangChain 内置 OutputFixingParser
|
|
108
|
+
|
|
109
|
+
特点:
|
|
110
|
+
- 当解析失败时,自动将错误信息和原始输出发送给 LLM 请求修正
|
|
111
|
+
- 使用官方维护的 OutputFixingParser,稳定性高
|
|
112
|
+
- 适用于输出偶尔格式错误的场景
|
|
113
|
+
|
|
114
|
+
适用场景:
|
|
115
|
+
- 输出偶尔格式错误
|
|
116
|
+
- 需要稳定可靠的 JSON 解析
|
|
117
|
+
- 模型不支持原生 function calling
|
|
118
|
+
"""
|
|
119
|
+
parser = PydanticOutputParser(pydantic_object=output_model)
|
|
120
|
+
|
|
121
|
+
# 提示词模板
|
|
122
|
+
if is_extract:
|
|
123
|
+
accuracy_instructions = """
|
|
124
|
+
字段值的抽取准确率(0~1之间),评分规则:
|
|
125
|
+
1.0(完全准确):直接从原文提取,无需任何加工,且格式与原文完全一致
|
|
126
|
+
0.9(轻微处理):数据来源明确,但需进行格式标准化或冗余信息剔除(不改变原始数值)
|
|
127
|
+
0.8(有限推断):数据需通过上下文关联或简单计算得出,仍有明确依据
|
|
128
|
+
0.8以下(不可靠):数据需大量推测、存在歧义或来源不明,处理方式:直接忽略该数据,设置为None
|
|
129
|
+
"""
|
|
130
|
+
prompt = ChatPromptTemplate.from_messages([
|
|
131
|
+
MessagesPlaceholder(variable_name="messages"),
|
|
132
|
+
HumanMessage(content=f"""
|
|
133
|
+
请提取信息并遵循以下规则:
|
|
134
|
+
1. 准确率要求:{accuracy_instructions.strip()}
|
|
135
|
+
2. 输出格式,请严格按照以下JSON格式输出,不要输出任何多余内容,不要省略任何字段:{parser.get_format_instructions()}
|
|
136
|
+
""")
|
|
137
|
+
])
|
|
138
|
+
else:
|
|
139
|
+
prompt = override_prompt or ChatPromptTemplate.from_messages([
|
|
140
|
+
MessagesPlaceholder(variable_name="messages"),
|
|
141
|
+
HumanMessage(content=f"""
|
|
142
|
+
输出格式,请严格按照以下JSON格式输出,不要输出任何多余内容,不要省略任何字段:{parser.get_format_instructions()}
|
|
143
|
+
""")
|
|
144
|
+
])
|
|
145
|
+
|
|
146
|
+
# 构建 LLM 调用链
|
|
147
|
+
chain = prompt | self.llm
|
|
148
|
+
|
|
149
|
+
# 使用 OutputFixingRunnable 包装
|
|
150
|
+
fixing_chain = OutputFixingRunnable(
|
|
151
|
+
base_chain=chain,
|
|
152
|
+
llm=self.llm,
|
|
153
|
+
output_model=output_model,
|
|
154
|
+
max_retries=max_retries
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# 包装为支持 Token 统计和 Langfuse 追踪的 Runnable
|
|
158
|
+
return StructuredRunnableWithToken(
|
|
159
|
+
retry_chain=fixing_chain,
|
|
160
|
+
langfuse=self.langfuse,
|
|
161
|
+
llmConfig=self.llmConfig,
|
|
162
|
+
summary_prompt=self.summary_prompt,
|
|
163
|
+
model_name=self.llmConfig.model if self.llmConfig else "Qwen2.5-72B",
|
|
164
|
+
is_native_mode=False
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# ========== 实现BaseChatModel抽象方法 ==========
|
|
168
|
+
def _generate(self, messages, stop=None, run_manager=None, **kwargs):
|
|
169
|
+
return self.llm._generate(messages, stop=stop, run_manager=run_manager, **kwargs)
|
|
170
|
+
|
|
171
|
+
async def _agenerate(self, messages, stop=None, run_manager=None, **kwargs):
|
|
172
|
+
"""异步生成 - 委托给底层 LLM"""
|
|
173
|
+
return await self.llm._agenerate(messages, stop=stop, run_manager=run_manager, **kwargs)
|
|
174
|
+
|
|
175
|
+
def _stream(self, messages, stop=None, run_manager=None, **kwargs):
|
|
176
|
+
"""流式生成 - 委托给底层 LLM"""
|
|
177
|
+
yield from self.llm._stream(messages, stop=stop, run_manager=run_manager, **kwargs)
|
|
178
|
+
|
|
179
|
+
async def _astream(self, messages, stop=None, run_manager=None, **kwargs):
|
|
180
|
+
"""异步流式生成 - 委托给底层 LLM"""
|
|
181
|
+
async for chunk in self.llm._astream(messages, stop=stop, run_manager=run_manager, **kwargs):
|
|
182
|
+
yield chunk
|
|
183
|
+
|
|
184
|
+
@property
|
|
185
|
+
def _llm_type(self) -> str:
|
|
186
|
+
return self.llm._llm_type
|
|
@@ -21,7 +21,7 @@ class TokenUsageMySQL(Base):
|
|
|
21
21
|
__tablename__ = "token_usage_daily"
|
|
22
22
|
|
|
23
23
|
# 主键
|
|
24
|
-
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
|
24
|
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True, comment="主键ID")
|
|
25
25
|
|
|
26
26
|
# 业务字段
|
|
27
27
|
user_id: Mapped[Optional[str]] = mapped_column(String(128), nullable=True, comment="用户ID")
|
|
@@ -46,7 +46,7 @@ class TokenUsageMySQL(Base):
|
|
|
46
46
|
|
|
47
47
|
# 联合唯一索引:用户+服务名+环境+日期
|
|
48
48
|
__table_args__ = (
|
|
49
|
-
UniqueConstraint('user_id', 'service_name', 'system_env', 'usage_date', name='
|
|
49
|
+
UniqueConstraint('user_id', 'service_name', 'system_env', 'usage_date', name='uniq_user_service_env_date'),
|
|
50
50
|
Index('idx_user_id', 'user_id'),
|
|
51
51
|
Index('idx_service_name', 'service_name'),
|
|
52
52
|
Index('idx_tenant_id', 'tenant_id'),
|