genesis-flow 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- genesis_flow-1.0.0.dist-info/METADATA +822 -0
- genesis_flow-1.0.0.dist-info/RECORD +645 -0
- genesis_flow-1.0.0.dist-info/WHEEL +5 -0
- genesis_flow-1.0.0.dist-info/entry_points.txt +19 -0
- genesis_flow-1.0.0.dist-info/licenses/LICENSE.txt +202 -0
- genesis_flow-1.0.0.dist-info/top_level.txt +1 -0
- mlflow/__init__.py +367 -0
- mlflow/__main__.py +3 -0
- mlflow/ag2/__init__.py +56 -0
- mlflow/ag2/ag2_logger.py +294 -0
- mlflow/anthropic/__init__.py +40 -0
- mlflow/anthropic/autolog.py +129 -0
- mlflow/anthropic/chat.py +144 -0
- mlflow/artifacts/__init__.py +268 -0
- mlflow/autogen/__init__.py +144 -0
- mlflow/autogen/chat.py +142 -0
- mlflow/azure/__init__.py +26 -0
- mlflow/azure/auth_handler.py +257 -0
- mlflow/azure/client.py +319 -0
- mlflow/azure/config.py +120 -0
- mlflow/azure/connection_factory.py +340 -0
- mlflow/azure/exceptions.py +27 -0
- mlflow/azure/stores.py +327 -0
- mlflow/azure/utils.py +183 -0
- mlflow/bedrock/__init__.py +45 -0
- mlflow/bedrock/_autolog.py +202 -0
- mlflow/bedrock/chat.py +122 -0
- mlflow/bedrock/stream.py +160 -0
- mlflow/bedrock/utils.py +43 -0
- mlflow/cli.py +707 -0
- mlflow/client.py +12 -0
- mlflow/config/__init__.py +56 -0
- mlflow/crewai/__init__.py +79 -0
- mlflow/crewai/autolog.py +253 -0
- mlflow/crewai/chat.py +29 -0
- mlflow/data/__init__.py +75 -0
- mlflow/data/artifact_dataset_sources.py +170 -0
- mlflow/data/code_dataset_source.py +40 -0
- mlflow/data/dataset.py +123 -0
- mlflow/data/dataset_registry.py +168 -0
- mlflow/data/dataset_source.py +110 -0
- mlflow/data/dataset_source_registry.py +219 -0
- mlflow/data/delta_dataset_source.py +167 -0
- mlflow/data/digest_utils.py +108 -0
- mlflow/data/evaluation_dataset.py +562 -0
- mlflow/data/filesystem_dataset_source.py +81 -0
- mlflow/data/http_dataset_source.py +145 -0
- mlflow/data/huggingface_dataset.py +258 -0
- mlflow/data/huggingface_dataset_source.py +118 -0
- mlflow/data/meta_dataset.py +104 -0
- mlflow/data/numpy_dataset.py +223 -0
- mlflow/data/pandas_dataset.py +231 -0
- mlflow/data/polars_dataset.py +352 -0
- mlflow/data/pyfunc_dataset_mixin.py +31 -0
- mlflow/data/schema.py +76 -0
- mlflow/data/sources.py +1 -0
- mlflow/data/spark_dataset.py +406 -0
- mlflow/data/spark_dataset_source.py +74 -0
- mlflow/data/spark_delta_utils.py +118 -0
- mlflow/data/tensorflow_dataset.py +350 -0
- mlflow/data/uc_volume_dataset_source.py +81 -0
- mlflow/db.py +27 -0
- mlflow/dspy/__init__.py +17 -0
- mlflow/dspy/autolog.py +197 -0
- mlflow/dspy/callback.py +398 -0
- mlflow/dspy/constant.py +1 -0
- mlflow/dspy/load.py +93 -0
- mlflow/dspy/save.py +393 -0
- mlflow/dspy/util.py +109 -0
- mlflow/dspy/wrapper.py +226 -0
- mlflow/entities/__init__.py +104 -0
- mlflow/entities/_mlflow_object.py +52 -0
- mlflow/entities/assessment.py +545 -0
- mlflow/entities/assessment_error.py +80 -0
- mlflow/entities/assessment_source.py +141 -0
- mlflow/entities/dataset.py +92 -0
- mlflow/entities/dataset_input.py +51 -0
- mlflow/entities/dataset_summary.py +62 -0
- mlflow/entities/document.py +48 -0
- mlflow/entities/experiment.py +109 -0
- mlflow/entities/experiment_tag.py +35 -0
- mlflow/entities/file_info.py +45 -0
- mlflow/entities/input_tag.py +35 -0
- mlflow/entities/lifecycle_stage.py +35 -0
- mlflow/entities/logged_model.py +228 -0
- mlflow/entities/logged_model_input.py +26 -0
- mlflow/entities/logged_model_output.py +32 -0
- mlflow/entities/logged_model_parameter.py +46 -0
- mlflow/entities/logged_model_status.py +74 -0
- mlflow/entities/logged_model_tag.py +33 -0
- mlflow/entities/metric.py +200 -0
- mlflow/entities/model_registry/__init__.py +29 -0
- mlflow/entities/model_registry/_model_registry_entity.py +13 -0
- mlflow/entities/model_registry/model_version.py +243 -0
- mlflow/entities/model_registry/model_version_deployment_job_run_state.py +44 -0
- mlflow/entities/model_registry/model_version_deployment_job_state.py +70 -0
- mlflow/entities/model_registry/model_version_search.py +25 -0
- mlflow/entities/model_registry/model_version_stages.py +25 -0
- mlflow/entities/model_registry/model_version_status.py +35 -0
- mlflow/entities/model_registry/model_version_tag.py +35 -0
- mlflow/entities/model_registry/prompt.py +73 -0
- mlflow/entities/model_registry/prompt_version.py +244 -0
- mlflow/entities/model_registry/registered_model.py +175 -0
- mlflow/entities/model_registry/registered_model_alias.py +35 -0
- mlflow/entities/model_registry/registered_model_deployment_job_state.py +39 -0
- mlflow/entities/model_registry/registered_model_search.py +25 -0
- mlflow/entities/model_registry/registered_model_tag.py +35 -0
- mlflow/entities/multipart_upload.py +74 -0
- mlflow/entities/param.py +49 -0
- mlflow/entities/run.py +97 -0
- mlflow/entities/run_data.py +84 -0
- mlflow/entities/run_info.py +188 -0
- mlflow/entities/run_inputs.py +59 -0
- mlflow/entities/run_outputs.py +43 -0
- mlflow/entities/run_status.py +41 -0
- mlflow/entities/run_tag.py +36 -0
- mlflow/entities/source_type.py +31 -0
- mlflow/entities/span.py +774 -0
- mlflow/entities/span_event.py +96 -0
- mlflow/entities/span_status.py +102 -0
- mlflow/entities/trace.py +317 -0
- mlflow/entities/trace_data.py +71 -0
- mlflow/entities/trace_info.py +220 -0
- mlflow/entities/trace_info_v2.py +162 -0
- mlflow/entities/trace_location.py +173 -0
- mlflow/entities/trace_state.py +39 -0
- mlflow/entities/trace_status.py +68 -0
- mlflow/entities/view_type.py +51 -0
- mlflow/environment_variables.py +866 -0
- mlflow/evaluation/__init__.py +16 -0
- mlflow/evaluation/assessment.py +369 -0
- mlflow/evaluation/evaluation.py +411 -0
- mlflow/evaluation/evaluation_tag.py +61 -0
- mlflow/evaluation/fluent.py +48 -0
- mlflow/evaluation/utils.py +201 -0
- mlflow/exceptions.py +213 -0
- mlflow/experiments.py +140 -0
- mlflow/gemini/__init__.py +81 -0
- mlflow/gemini/autolog.py +186 -0
- mlflow/gemini/chat.py +261 -0
- mlflow/genai/__init__.py +71 -0
- mlflow/genai/datasets/__init__.py +67 -0
- mlflow/genai/datasets/evaluation_dataset.py +131 -0
- mlflow/genai/evaluation/__init__.py +3 -0
- mlflow/genai/evaluation/base.py +411 -0
- mlflow/genai/evaluation/constant.py +23 -0
- mlflow/genai/evaluation/utils.py +244 -0
- mlflow/genai/judges/__init__.py +21 -0
- mlflow/genai/judges/databricks.py +404 -0
- mlflow/genai/label_schemas/__init__.py +153 -0
- mlflow/genai/label_schemas/label_schemas.py +209 -0
- mlflow/genai/labeling/__init__.py +159 -0
- mlflow/genai/labeling/labeling.py +250 -0
- mlflow/genai/optimize/__init__.py +13 -0
- mlflow/genai/optimize/base.py +198 -0
- mlflow/genai/optimize/optimizers/__init__.py +4 -0
- mlflow/genai/optimize/optimizers/base_optimizer.py +38 -0
- mlflow/genai/optimize/optimizers/dspy_mipro_optimizer.py +221 -0
- mlflow/genai/optimize/optimizers/dspy_optimizer.py +91 -0
- mlflow/genai/optimize/optimizers/utils/dspy_mipro_callback.py +76 -0
- mlflow/genai/optimize/optimizers/utils/dspy_mipro_utils.py +18 -0
- mlflow/genai/optimize/types.py +75 -0
- mlflow/genai/optimize/util.py +30 -0
- mlflow/genai/prompts/__init__.py +206 -0
- mlflow/genai/scheduled_scorers.py +431 -0
- mlflow/genai/scorers/__init__.py +26 -0
- mlflow/genai/scorers/base.py +492 -0
- mlflow/genai/scorers/builtin_scorers.py +765 -0
- mlflow/genai/scorers/scorer_utils.py +138 -0
- mlflow/genai/scorers/validation.py +165 -0
- mlflow/genai/utils/data_validation.py +146 -0
- mlflow/genai/utils/enum_utils.py +23 -0
- mlflow/genai/utils/trace_utils.py +211 -0
- mlflow/groq/__init__.py +42 -0
- mlflow/groq/_groq_autolog.py +74 -0
- mlflow/johnsnowlabs/__init__.py +888 -0
- mlflow/langchain/__init__.py +24 -0
- mlflow/langchain/api_request_parallel_processor.py +330 -0
- mlflow/langchain/autolog.py +147 -0
- mlflow/langchain/chat_agent_langgraph.py +340 -0
- mlflow/langchain/constant.py +1 -0
- mlflow/langchain/constants.py +1 -0
- mlflow/langchain/databricks_dependencies.py +444 -0
- mlflow/langchain/langchain_tracer.py +597 -0
- mlflow/langchain/model.py +919 -0
- mlflow/langchain/output_parsers.py +142 -0
- mlflow/langchain/retriever_chain.py +153 -0
- mlflow/langchain/runnables.py +527 -0
- mlflow/langchain/utils/chat.py +402 -0
- mlflow/langchain/utils/logging.py +671 -0
- mlflow/langchain/utils/serialization.py +36 -0
- mlflow/legacy_databricks_cli/__init__.py +0 -0
- mlflow/legacy_databricks_cli/configure/__init__.py +0 -0
- mlflow/legacy_databricks_cli/configure/provider.py +482 -0
- mlflow/litellm/__init__.py +175 -0
- mlflow/llama_index/__init__.py +22 -0
- mlflow/llama_index/autolog.py +55 -0
- mlflow/llama_index/chat.py +43 -0
- mlflow/llama_index/constant.py +1 -0
- mlflow/llama_index/model.py +577 -0
- mlflow/llama_index/pyfunc_wrapper.py +332 -0
- mlflow/llama_index/serialize_objects.py +188 -0
- mlflow/llama_index/tracer.py +561 -0
- mlflow/metrics/__init__.py +479 -0
- mlflow/metrics/base.py +39 -0
- mlflow/metrics/genai/__init__.py +25 -0
- mlflow/metrics/genai/base.py +101 -0
- mlflow/metrics/genai/genai_metric.py +771 -0
- mlflow/metrics/genai/metric_definitions.py +450 -0
- mlflow/metrics/genai/model_utils.py +371 -0
- mlflow/metrics/genai/prompt_template.py +68 -0
- mlflow/metrics/genai/prompts/__init__.py +0 -0
- mlflow/metrics/genai/prompts/v1.py +422 -0
- mlflow/metrics/genai/utils.py +6 -0
- mlflow/metrics/metric_definitions.py +619 -0
- mlflow/mismatch.py +34 -0
- mlflow/mistral/__init__.py +34 -0
- mlflow/mistral/autolog.py +71 -0
- mlflow/mistral/chat.py +135 -0
- mlflow/ml_package_versions.py +452 -0
- mlflow/models/__init__.py +97 -0
- mlflow/models/auth_policy.py +83 -0
- mlflow/models/cli.py +354 -0
- mlflow/models/container/__init__.py +294 -0
- mlflow/models/container/scoring_server/__init__.py +0 -0
- mlflow/models/container/scoring_server/nginx.conf +39 -0
- mlflow/models/dependencies_schemas.py +287 -0
- mlflow/models/display_utils.py +158 -0
- mlflow/models/docker_utils.py +211 -0
- mlflow/models/evaluation/__init__.py +23 -0
- mlflow/models/evaluation/_shap_patch.py +64 -0
- mlflow/models/evaluation/artifacts.py +194 -0
- mlflow/models/evaluation/base.py +1811 -0
- mlflow/models/evaluation/calibration_curve.py +109 -0
- mlflow/models/evaluation/default_evaluator.py +996 -0
- mlflow/models/evaluation/deprecated.py +23 -0
- mlflow/models/evaluation/evaluator_registry.py +80 -0
- mlflow/models/evaluation/evaluators/classifier.py +704 -0
- mlflow/models/evaluation/evaluators/default.py +233 -0
- mlflow/models/evaluation/evaluators/regressor.py +96 -0
- mlflow/models/evaluation/evaluators/shap.py +296 -0
- mlflow/models/evaluation/lift_curve.py +178 -0
- mlflow/models/evaluation/utils/metric.py +123 -0
- mlflow/models/evaluation/utils/trace.py +179 -0
- mlflow/models/evaluation/validation.py +434 -0
- mlflow/models/flavor_backend.py +93 -0
- mlflow/models/flavor_backend_registry.py +53 -0
- mlflow/models/model.py +1639 -0
- mlflow/models/model_config.py +150 -0
- mlflow/models/notebook_resources/agent_evaluation_template.html +235 -0
- mlflow/models/notebook_resources/eval_with_dataset_example.py +22 -0
- mlflow/models/notebook_resources/eval_with_synthetic_example.py +22 -0
- mlflow/models/python_api.py +369 -0
- mlflow/models/rag_signatures.py +128 -0
- mlflow/models/resources.py +321 -0
- mlflow/models/signature.py +662 -0
- mlflow/models/utils.py +2054 -0
- mlflow/models/wheeled_model.py +280 -0
- mlflow/openai/__init__.py +57 -0
- mlflow/openai/_agent_tracer.py +364 -0
- mlflow/openai/api_request_parallel_processor.py +131 -0
- mlflow/openai/autolog.py +509 -0
- mlflow/openai/constant.py +1 -0
- mlflow/openai/model.py +824 -0
- mlflow/openai/utils/chat_schema.py +367 -0
- mlflow/optuna/__init__.py +3 -0
- mlflow/optuna/storage.py +646 -0
- mlflow/plugins/__init__.py +72 -0
- mlflow/plugins/base.py +358 -0
- mlflow/plugins/builtin/__init__.py +24 -0
- mlflow/plugins/builtin/pytorch_plugin.py +150 -0
- mlflow/plugins/builtin/sklearn_plugin.py +158 -0
- mlflow/plugins/builtin/transformers_plugin.py +187 -0
- mlflow/plugins/cli.py +321 -0
- mlflow/plugins/discovery.py +340 -0
- mlflow/plugins/manager.py +465 -0
- mlflow/plugins/registry.py +316 -0
- mlflow/plugins/templates/framework_plugin_template.py +329 -0
- mlflow/prompt/constants.py +20 -0
- mlflow/prompt/promptlab_model.py +197 -0
- mlflow/prompt/registry_utils.py +248 -0
- mlflow/promptflow/__init__.py +495 -0
- mlflow/protos/__init__.py +0 -0
- mlflow/protos/assessments_pb2.py +174 -0
- mlflow/protos/databricks_artifacts_pb2.py +489 -0
- mlflow/protos/databricks_filesystem_service_pb2.py +196 -0
- mlflow/protos/databricks_managed_catalog_messages_pb2.py +95 -0
- mlflow/protos/databricks_managed_catalog_service_pb2.py +86 -0
- mlflow/protos/databricks_pb2.py +267 -0
- mlflow/protos/databricks_trace_server_pb2.py +374 -0
- mlflow/protos/databricks_uc_registry_messages_pb2.py +1249 -0
- mlflow/protos/databricks_uc_registry_service_pb2.py +170 -0
- mlflow/protos/facet_feature_statistics_pb2.py +296 -0
- mlflow/protos/internal_pb2.py +77 -0
- mlflow/protos/mlflow_artifacts_pb2.py +336 -0
- mlflow/protos/model_registry_pb2.py +1073 -0
- mlflow/protos/scalapb/__init__.py +0 -0
- mlflow/protos/scalapb/scalapb_pb2.py +104 -0
- mlflow/protos/service_pb2.py +2600 -0
- mlflow/protos/unity_catalog_oss_messages_pb2.py +457 -0
- mlflow/protos/unity_catalog_oss_service_pb2.py +130 -0
- mlflow/protos/unity_catalog_prompt_messages_pb2.py +447 -0
- mlflow/protos/unity_catalog_prompt_messages_pb2_grpc.py +24 -0
- mlflow/protos/unity_catalog_prompt_service_pb2.py +164 -0
- mlflow/protos/unity_catalog_prompt_service_pb2_grpc.py +785 -0
- mlflow/py.typed +0 -0
- mlflow/pydantic_ai/__init__.py +57 -0
- mlflow/pydantic_ai/autolog.py +173 -0
- mlflow/pyfunc/__init__.py +3844 -0
- mlflow/pyfunc/_mlflow_pyfunc_backend_predict.py +61 -0
- mlflow/pyfunc/backend.py +523 -0
- mlflow/pyfunc/context.py +78 -0
- mlflow/pyfunc/dbconnect_artifact_cache.py +144 -0
- mlflow/pyfunc/loaders/__init__.py +7 -0
- mlflow/pyfunc/loaders/chat_agent.py +117 -0
- mlflow/pyfunc/loaders/chat_model.py +125 -0
- mlflow/pyfunc/loaders/code_model.py +31 -0
- mlflow/pyfunc/loaders/responses_agent.py +112 -0
- mlflow/pyfunc/mlserver.py +46 -0
- mlflow/pyfunc/model.py +1473 -0
- mlflow/pyfunc/scoring_server/__init__.py +604 -0
- mlflow/pyfunc/scoring_server/app.py +7 -0
- mlflow/pyfunc/scoring_server/client.py +146 -0
- mlflow/pyfunc/spark_model_cache.py +48 -0
- mlflow/pyfunc/stdin_server.py +44 -0
- mlflow/pyfunc/utils/__init__.py +3 -0
- mlflow/pyfunc/utils/data_validation.py +224 -0
- mlflow/pyfunc/utils/environment.py +22 -0
- mlflow/pyfunc/utils/input_converter.py +47 -0
- mlflow/pyfunc/utils/serving_data_parser.py +11 -0
- mlflow/pytorch/__init__.py +1171 -0
- mlflow/pytorch/_lightning_autolog.py +580 -0
- mlflow/pytorch/_pytorch_autolog.py +50 -0
- mlflow/pytorch/pickle_module.py +35 -0
- mlflow/rfunc/__init__.py +42 -0
- mlflow/rfunc/backend.py +134 -0
- mlflow/runs.py +89 -0
- mlflow/server/__init__.py +302 -0
- mlflow/server/auth/__init__.py +1224 -0
- mlflow/server/auth/__main__.py +4 -0
- mlflow/server/auth/basic_auth.ini +6 -0
- mlflow/server/auth/cli.py +11 -0
- mlflow/server/auth/client.py +537 -0
- mlflow/server/auth/config.py +34 -0
- mlflow/server/auth/db/__init__.py +0 -0
- mlflow/server/auth/db/cli.py +18 -0
- mlflow/server/auth/db/migrations/__init__.py +0 -0
- mlflow/server/auth/db/migrations/alembic.ini +110 -0
- mlflow/server/auth/db/migrations/env.py +76 -0
- mlflow/server/auth/db/migrations/versions/8606fa83a998_initial_migration.py +51 -0
- mlflow/server/auth/db/migrations/versions/__init__.py +0 -0
- mlflow/server/auth/db/models.py +67 -0
- mlflow/server/auth/db/utils.py +37 -0
- mlflow/server/auth/entities.py +165 -0
- mlflow/server/auth/logo.py +14 -0
- mlflow/server/auth/permissions.py +65 -0
- mlflow/server/auth/routes.py +18 -0
- mlflow/server/auth/sqlalchemy_store.py +263 -0
- mlflow/server/graphql/__init__.py +0 -0
- mlflow/server/graphql/autogenerated_graphql_schema.py +353 -0
- mlflow/server/graphql/graphql_custom_scalars.py +24 -0
- mlflow/server/graphql/graphql_errors.py +15 -0
- mlflow/server/graphql/graphql_no_batching.py +89 -0
- mlflow/server/graphql/graphql_schema_extensions.py +74 -0
- mlflow/server/handlers.py +3217 -0
- mlflow/server/prometheus_exporter.py +17 -0
- mlflow/server/validation.py +30 -0
- mlflow/shap/__init__.py +691 -0
- mlflow/sklearn/__init__.py +1994 -0
- mlflow/sklearn/utils.py +1041 -0
- mlflow/smolagents/__init__.py +66 -0
- mlflow/smolagents/autolog.py +139 -0
- mlflow/smolagents/chat.py +29 -0
- mlflow/store/__init__.py +10 -0
- mlflow/store/_unity_catalog/__init__.py +1 -0
- mlflow/store/_unity_catalog/lineage/__init__.py +1 -0
- mlflow/store/_unity_catalog/lineage/constants.py +2 -0
- mlflow/store/_unity_catalog/registry/__init__.py +6 -0
- mlflow/store/_unity_catalog/registry/prompt_info.py +75 -0
- mlflow/store/_unity_catalog/registry/rest_store.py +1740 -0
- mlflow/store/_unity_catalog/registry/uc_oss_rest_store.py +507 -0
- mlflow/store/_unity_catalog/registry/utils.py +121 -0
- mlflow/store/artifact/__init__.py +0 -0
- mlflow/store/artifact/artifact_repo.py +472 -0
- mlflow/store/artifact/artifact_repository_registry.py +154 -0
- mlflow/store/artifact/azure_blob_artifact_repo.py +275 -0
- mlflow/store/artifact/azure_data_lake_artifact_repo.py +295 -0
- mlflow/store/artifact/cli.py +141 -0
- mlflow/store/artifact/cloud_artifact_repo.py +332 -0
- mlflow/store/artifact/databricks_artifact_repo.py +729 -0
- mlflow/store/artifact/databricks_artifact_repo_resources.py +301 -0
- mlflow/store/artifact/databricks_logged_model_artifact_repo.py +93 -0
- mlflow/store/artifact/databricks_models_artifact_repo.py +216 -0
- mlflow/store/artifact/databricks_sdk_artifact_repo.py +134 -0
- mlflow/store/artifact/databricks_sdk_models_artifact_repo.py +97 -0
- mlflow/store/artifact/dbfs_artifact_repo.py +240 -0
- mlflow/store/artifact/ftp_artifact_repo.py +132 -0
- mlflow/store/artifact/gcs_artifact_repo.py +296 -0
- mlflow/store/artifact/hdfs_artifact_repo.py +209 -0
- mlflow/store/artifact/http_artifact_repo.py +218 -0
- mlflow/store/artifact/local_artifact_repo.py +142 -0
- mlflow/store/artifact/mlflow_artifacts_repo.py +94 -0
- mlflow/store/artifact/models_artifact_repo.py +259 -0
- mlflow/store/artifact/optimized_s3_artifact_repo.py +356 -0
- mlflow/store/artifact/presigned_url_artifact_repo.py +173 -0
- mlflow/store/artifact/r2_artifact_repo.py +70 -0
- mlflow/store/artifact/runs_artifact_repo.py +265 -0
- mlflow/store/artifact/s3_artifact_repo.py +330 -0
- mlflow/store/artifact/sftp_artifact_repo.py +141 -0
- mlflow/store/artifact/uc_volume_artifact_repo.py +76 -0
- mlflow/store/artifact/unity_catalog_models_artifact_repo.py +168 -0
- mlflow/store/artifact/unity_catalog_oss_models_artifact_repo.py +168 -0
- mlflow/store/artifact/utils/__init__.py +0 -0
- mlflow/store/artifact/utils/models.py +148 -0
- mlflow/store/db/__init__.py +0 -0
- mlflow/store/db/base_sql_model.py +3 -0
- mlflow/store/db/db_types.py +10 -0
- mlflow/store/db/utils.py +314 -0
- mlflow/store/db_migrations/__init__.py +0 -0
- mlflow/store/db_migrations/alembic.ini +74 -0
- mlflow/store/db_migrations/env.py +84 -0
- mlflow/store/db_migrations/versions/0584bdc529eb_add_cascading_deletion_to_datasets_from_experiments.py +88 -0
- mlflow/store/db_migrations/versions/0a8213491aaa_drop_duplicate_killed_constraint.py +49 -0
- mlflow/store/db_migrations/versions/0c779009ac13_add_deleted_time_field_to_runs_table.py +24 -0
- mlflow/store/db_migrations/versions/181f10493468_allow_nulls_for_metric_values.py +35 -0
- mlflow/store/db_migrations/versions/27a6a02d2cf1_add_model_version_tags_table.py +38 -0
- mlflow/store/db_migrations/versions/2b4d017a5e9b_add_model_registry_tables_to_db.py +77 -0
- mlflow/store/db_migrations/versions/2d6e25af4d3e_increase_max_param_val_length.py +33 -0
- mlflow/store/db_migrations/versions/3500859a5d39_add_model_aliases_table.py +50 -0
- mlflow/store/db_migrations/versions/39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py +41 -0
- mlflow/store/db_migrations/versions/400f98739977_add_logged_model_tables.py +123 -0
- mlflow/store/db_migrations/versions/4465047574b1_increase_max_dataset_schema_size.py +38 -0
- mlflow/store/db_migrations/versions/451aebb31d03_add_metric_step.py +35 -0
- mlflow/store/db_migrations/versions/5b0e9adcef9c_add_cascade_deletion_to_trace_tables_fk.py +40 -0
- mlflow/store/db_migrations/versions/6953534de441_add_step_to_inputs_table.py +25 -0
- mlflow/store/db_migrations/versions/728d730b5ebd_add_registered_model_tags_table.py +38 -0
- mlflow/store/db_migrations/versions/7ac759974ad8_update_run_tags_with_larger_limit.py +36 -0
- mlflow/store/db_migrations/versions/7f2a7d5fae7d_add_datasets_inputs_input_tags_tables.py +82 -0
- mlflow/store/db_migrations/versions/84291f40a231_add_run_link_to_model_version.py +26 -0
- mlflow/store/db_migrations/versions/867495a8f9d4_add_trace_tables.py +90 -0
- mlflow/store/db_migrations/versions/89d4b8295536_create_latest_metrics_table.py +169 -0
- mlflow/store/db_migrations/versions/90e64c465722_migrate_user_column_to_tags.py +64 -0
- mlflow/store/db_migrations/versions/97727af70f4d_creation_time_last_update_time_experiments.py +25 -0
- mlflow/store/db_migrations/versions/__init__.py +0 -0
- mlflow/store/db_migrations/versions/a8c4a736bde6_allow_nulls_for_run_id.py +27 -0
- mlflow/store/db_migrations/versions/acf3f17fdcc7_add_storage_location_field_to_model_.py +29 -0
- mlflow/store/db_migrations/versions/bd07f7e963c5_create_index_on_run_uuid.py +26 -0
- mlflow/store/db_migrations/versions/bda7b8c39065_increase_model_version_tag_value_limit.py +38 -0
- mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py +41 -0
- mlflow/store/db_migrations/versions/cbc13b556ace_add_v3_trace_schema_columns.py +31 -0
- mlflow/store/db_migrations/versions/cc1f77228345_change_param_value_length_to_500.py +34 -0
- mlflow/store/db_migrations/versions/cfd24bdc0731_update_run_status_constraint_with_killed.py +78 -0
- mlflow/store/db_migrations/versions/df50e92ffc5e_add_experiment_tags_table.py +38 -0
- mlflow/store/db_migrations/versions/f5a4f2784254_increase_run_tag_value_limit.py +36 -0
- mlflow/store/entities/__init__.py +3 -0
- mlflow/store/entities/paged_list.py +18 -0
- mlflow/store/model_registry/__init__.py +10 -0
- mlflow/store/model_registry/abstract_store.py +1081 -0
- mlflow/store/model_registry/base_rest_store.py +44 -0
- mlflow/store/model_registry/databricks_workspace_model_registry_rest_store.py +37 -0
- mlflow/store/model_registry/dbmodels/__init__.py +0 -0
- mlflow/store/model_registry/dbmodels/models.py +206 -0
- mlflow/store/model_registry/file_store.py +1091 -0
- mlflow/store/model_registry/rest_store.py +481 -0
- mlflow/store/model_registry/sqlalchemy_store.py +1286 -0
- mlflow/store/tracking/__init__.py +23 -0
- mlflow/store/tracking/abstract_store.py +816 -0
- mlflow/store/tracking/dbmodels/__init__.py +0 -0
- mlflow/store/tracking/dbmodels/initial_models.py +243 -0
- mlflow/store/tracking/dbmodels/models.py +1073 -0
- mlflow/store/tracking/file_store.py +2438 -0
- mlflow/store/tracking/postgres_managed_identity.py +146 -0
- mlflow/store/tracking/rest_store.py +1131 -0
- mlflow/store/tracking/sqlalchemy_store.py +2785 -0
- mlflow/system_metrics/__init__.py +61 -0
- mlflow/system_metrics/metrics/__init__.py +0 -0
- mlflow/system_metrics/metrics/base_metrics_monitor.py +32 -0
- mlflow/system_metrics/metrics/cpu_monitor.py +23 -0
- mlflow/system_metrics/metrics/disk_monitor.py +21 -0
- mlflow/system_metrics/metrics/gpu_monitor.py +71 -0
- mlflow/system_metrics/metrics/network_monitor.py +34 -0
- mlflow/system_metrics/metrics/rocm_monitor.py +123 -0
- mlflow/system_metrics/system_metrics_monitor.py +198 -0
- mlflow/tracing/__init__.py +16 -0
- mlflow/tracing/assessment.py +356 -0
- mlflow/tracing/client.py +531 -0
- mlflow/tracing/config.py +125 -0
- mlflow/tracing/constant.py +105 -0
- mlflow/tracing/destination.py +81 -0
- mlflow/tracing/display/__init__.py +40 -0
- mlflow/tracing/display/display_handler.py +196 -0
- mlflow/tracing/export/async_export_queue.py +186 -0
- mlflow/tracing/export/inference_table.py +138 -0
- mlflow/tracing/export/mlflow_v3.py +137 -0
- mlflow/tracing/export/utils.py +70 -0
- mlflow/tracing/fluent.py +1417 -0
- mlflow/tracing/processor/base_mlflow.py +199 -0
- mlflow/tracing/processor/inference_table.py +175 -0
- mlflow/tracing/processor/mlflow_v3.py +47 -0
- mlflow/tracing/processor/otel.py +73 -0
- mlflow/tracing/provider.py +487 -0
- mlflow/tracing/trace_manager.py +200 -0
- mlflow/tracing/utils/__init__.py +616 -0
- mlflow/tracing/utils/artifact_utils.py +28 -0
- mlflow/tracing/utils/copy.py +55 -0
- mlflow/tracing/utils/environment.py +55 -0
- mlflow/tracing/utils/exception.py +21 -0
- mlflow/tracing/utils/once.py +35 -0
- mlflow/tracing/utils/otlp.py +63 -0
- mlflow/tracing/utils/processor.py +54 -0
- mlflow/tracing/utils/search.py +292 -0
- mlflow/tracing/utils/timeout.py +250 -0
- mlflow/tracing/utils/token.py +19 -0
- mlflow/tracing/utils/truncation.py +124 -0
- mlflow/tracing/utils/warning.py +76 -0
- mlflow/tracking/__init__.py +39 -0
- mlflow/tracking/_model_registry/__init__.py +1 -0
- mlflow/tracking/_model_registry/client.py +764 -0
- mlflow/tracking/_model_registry/fluent.py +853 -0
- mlflow/tracking/_model_registry/registry.py +67 -0
- mlflow/tracking/_model_registry/utils.py +251 -0
- mlflow/tracking/_tracking_service/__init__.py +0 -0
- mlflow/tracking/_tracking_service/client.py +883 -0
- mlflow/tracking/_tracking_service/registry.py +56 -0
- mlflow/tracking/_tracking_service/utils.py +275 -0
- mlflow/tracking/artifact_utils.py +179 -0
- mlflow/tracking/client.py +5900 -0
- mlflow/tracking/context/__init__.py +0 -0
- mlflow/tracking/context/abstract_context.py +35 -0
- mlflow/tracking/context/databricks_cluster_context.py +15 -0
- mlflow/tracking/context/databricks_command_context.py +15 -0
- mlflow/tracking/context/databricks_job_context.py +49 -0
- mlflow/tracking/context/databricks_notebook_context.py +41 -0
- mlflow/tracking/context/databricks_repo_context.py +43 -0
- mlflow/tracking/context/default_context.py +51 -0
- mlflow/tracking/context/git_context.py +32 -0
- mlflow/tracking/context/registry.py +98 -0
- mlflow/tracking/context/system_environment_context.py +15 -0
- mlflow/tracking/default_experiment/__init__.py +1 -0
- mlflow/tracking/default_experiment/abstract_context.py +43 -0
- mlflow/tracking/default_experiment/databricks_notebook_experiment_provider.py +44 -0
- mlflow/tracking/default_experiment/registry.py +75 -0
- mlflow/tracking/fluent.py +3595 -0
- mlflow/tracking/metric_value_conversion_utils.py +93 -0
- mlflow/tracking/multimedia.py +206 -0
- mlflow/tracking/registry.py +86 -0
- mlflow/tracking/request_auth/__init__.py +0 -0
- mlflow/tracking/request_auth/abstract_request_auth_provider.py +34 -0
- mlflow/tracking/request_auth/registry.py +60 -0
- mlflow/tracking/request_header/__init__.py +0 -0
- mlflow/tracking/request_header/abstract_request_header_provider.py +36 -0
- mlflow/tracking/request_header/databricks_request_header_provider.py +38 -0
- mlflow/tracking/request_header/default_request_header_provider.py +17 -0
- mlflow/tracking/request_header/registry.py +79 -0
- mlflow/transformers/__init__.py +2982 -0
- mlflow/transformers/flavor_config.py +258 -0
- mlflow/transformers/hub_utils.py +83 -0
- mlflow/transformers/llm_inference_utils.py +468 -0
- mlflow/transformers/model_io.py +301 -0
- mlflow/transformers/peft.py +51 -0
- mlflow/transformers/signature.py +183 -0
- mlflow/transformers/torch_utils.py +55 -0
- mlflow/types/__init__.py +21 -0
- mlflow/types/agent.py +270 -0
- mlflow/types/chat.py +240 -0
- mlflow/types/llm.py +935 -0
- mlflow/types/responses.py +139 -0
- mlflow/types/responses_helpers.py +416 -0
- mlflow/types/schema.py +1505 -0
- mlflow/types/type_hints.py +647 -0
- mlflow/types/utils.py +753 -0
- mlflow/utils/__init__.py +283 -0
- mlflow/utils/_capture_modules.py +256 -0
- mlflow/utils/_capture_transformers_modules.py +75 -0
- mlflow/utils/_spark_utils.py +201 -0
- mlflow/utils/_unity_catalog_oss_utils.py +97 -0
- mlflow/utils/_unity_catalog_utils.py +479 -0
- mlflow/utils/annotations.py +218 -0
- mlflow/utils/arguments_utils.py +16 -0
- mlflow/utils/async_logging/__init__.py +1 -0
- mlflow/utils/async_logging/async_artifacts_logging_queue.py +258 -0
- mlflow/utils/async_logging/async_logging_queue.py +366 -0
- mlflow/utils/async_logging/run_artifact.py +38 -0
- mlflow/utils/async_logging/run_batch.py +58 -0
- mlflow/utils/async_logging/run_operations.py +49 -0
- mlflow/utils/autologging_utils/__init__.py +737 -0
- mlflow/utils/autologging_utils/client.py +432 -0
- mlflow/utils/autologging_utils/config.py +33 -0
- mlflow/utils/autologging_utils/events.py +294 -0
- mlflow/utils/autologging_utils/logging_and_warnings.py +328 -0
- mlflow/utils/autologging_utils/metrics_queue.py +71 -0
- mlflow/utils/autologging_utils/safety.py +1104 -0
- mlflow/utils/autologging_utils/versioning.py +95 -0
- mlflow/utils/checkpoint_utils.py +206 -0
- mlflow/utils/class_utils.py +6 -0
- mlflow/utils/cli_args.py +257 -0
- mlflow/utils/conda.py +354 -0
- mlflow/utils/credentials.py +231 -0
- mlflow/utils/data_utils.py +17 -0
- mlflow/utils/databricks_utils.py +1436 -0
- mlflow/utils/docstring_utils.py +477 -0
- mlflow/utils/doctor.py +133 -0
- mlflow/utils/download_cloud_file_chunk.py +43 -0
- mlflow/utils/env_manager.py +16 -0
- mlflow/utils/env_pack.py +131 -0
- mlflow/utils/environment.py +1009 -0
- mlflow/utils/exception_utils.py +14 -0
- mlflow/utils/file_utils.py +978 -0
- mlflow/utils/git_utils.py +77 -0
- mlflow/utils/gorilla.py +797 -0
- mlflow/utils/import_hooks/__init__.py +363 -0
- mlflow/utils/lazy_load.py +51 -0
- mlflow/utils/logging_utils.py +168 -0
- mlflow/utils/mime_type_utils.py +58 -0
- mlflow/utils/mlflow_tags.py +103 -0
- mlflow/utils/model_utils.py +486 -0
- mlflow/utils/name_utils.py +346 -0
- mlflow/utils/nfs_on_spark.py +62 -0
- mlflow/utils/openai_utils.py +164 -0
- mlflow/utils/os.py +12 -0
- mlflow/utils/oss_registry_utils.py +29 -0
- mlflow/utils/plugins.py +17 -0
- mlflow/utils/process.py +182 -0
- mlflow/utils/promptlab_utils.py +146 -0
- mlflow/utils/proto_json_utils.py +743 -0
- mlflow/utils/pydantic_utils.py +54 -0
- mlflow/utils/request_utils.py +279 -0
- mlflow/utils/requirements_utils.py +704 -0
- mlflow/utils/rest_utils.py +673 -0
- mlflow/utils/search_logged_model_utils.py +127 -0
- mlflow/utils/search_utils.py +2111 -0
- mlflow/utils/secure_loading.py +221 -0
- mlflow/utils/security_validation.py +384 -0
- mlflow/utils/server_cli_utils.py +61 -0
- mlflow/utils/spark_utils.py +15 -0
- mlflow/utils/string_utils.py +138 -0
- mlflow/utils/thread_utils.py +63 -0
- mlflow/utils/time.py +54 -0
- mlflow/utils/timeout.py +42 -0
- mlflow/utils/uri.py +572 -0
- mlflow/utils/validation.py +662 -0
- mlflow/utils/virtualenv.py +458 -0
- mlflow/utils/warnings_utils.py +25 -0
- mlflow/utils/yaml_utils.py +179 -0
- mlflow/version.py +24 -0
@@ -0,0 +1,131 @@
|
|
1
|
+
# Based ons: https://github.com/openai/openai-cookbook/blob/6df6ceff470eeba26a56de131254e775292eac22/examples/api_request_parallel_processor.py
|
2
|
+
# Several changes were made to make it work with MLflow.
|
3
|
+
|
4
|
+
"""
|
5
|
+
API REQUEST PARALLEL PROCESSOR
|
6
|
+
|
7
|
+
Using the OpenAI API to process lots of text quickly takes some care.
|
8
|
+
If you trickle in a million API requests one by one, they'll take days to complete.
|
9
|
+
If you flood a million API requests in parallel, they'll exceed the rate limits and fail with
|
10
|
+
errors. To maximize throughput, parallel requests need to be throttled to stay under rate limits.
|
11
|
+
|
12
|
+
This script parallelizes requests to the OpenAI API
|
13
|
+
|
14
|
+
Features:
|
15
|
+
- Makes requests concurrently, to maximize throughput
|
16
|
+
- Retries failed requests up to {max_attempts} times, to avoid missing data
|
17
|
+
- Logs errors, to diagnose problems with requests
|
18
|
+
"""
|
19
|
+
|
20
|
+
from __future__ import annotations
|
21
|
+
|
22
|
+
import logging
|
23
|
+
import threading
|
24
|
+
from concurrent.futures import FIRST_EXCEPTION, ThreadPoolExecutor, wait
|
25
|
+
from dataclasses import dataclass
|
26
|
+
from typing import Any, Callable
|
27
|
+
|
28
|
+
import mlflow
|
29
|
+
|
30
|
+
_logger = logging.getLogger(__name__)
|
31
|
+
|
32
|
+
|
33
|
+
@dataclass
|
34
|
+
class StatusTracker:
|
35
|
+
"""Stores metadata about the script's progress. Only one instance is created."""
|
36
|
+
|
37
|
+
num_tasks_started: int = 0
|
38
|
+
num_tasks_in_progress: int = 0 # script ends when this reaches 0
|
39
|
+
num_tasks_succeeded: int = 0
|
40
|
+
num_tasks_failed: int = 0
|
41
|
+
num_rate_limit_errors: int = 0
|
42
|
+
lock: threading.Lock = threading.Lock()
|
43
|
+
error = None
|
44
|
+
|
45
|
+
def start_task(self):
|
46
|
+
with self.lock:
|
47
|
+
self.num_tasks_started += 1
|
48
|
+
self.num_tasks_in_progress += 1
|
49
|
+
|
50
|
+
def complete_task(self, *, success: bool):
|
51
|
+
with self.lock:
|
52
|
+
self.num_tasks_in_progress -= 1
|
53
|
+
if success:
|
54
|
+
self.num_tasks_succeeded += 1
|
55
|
+
else:
|
56
|
+
self.num_tasks_failed += 1
|
57
|
+
|
58
|
+
def increment_num_rate_limit_errors(self):
|
59
|
+
with self.lock:
|
60
|
+
self.num_rate_limit_errors += 1
|
61
|
+
|
62
|
+
|
63
|
+
def call_api(
|
64
|
+
index: int,
|
65
|
+
results: list[tuple[int, Any]],
|
66
|
+
task: Callable[[], Any],
|
67
|
+
status_tracker: StatusTracker,
|
68
|
+
):
|
69
|
+
import openai
|
70
|
+
|
71
|
+
status_tracker.start_task()
|
72
|
+
try:
|
73
|
+
result = task()
|
74
|
+
_logger.debug(f"Request #{index} succeeded")
|
75
|
+
status_tracker.complete_task(success=True)
|
76
|
+
results.append((index, result))
|
77
|
+
except openai.RateLimitError as e:
|
78
|
+
status_tracker.complete_task(success=False)
|
79
|
+
_logger.debug(f"Request #{index} failed with: {e}")
|
80
|
+
status_tracker.increment_num_rate_limit_errors()
|
81
|
+
status_tracker.error = mlflow.MlflowException(
|
82
|
+
f"Request #{index} failed with rate limit: {e}."
|
83
|
+
)
|
84
|
+
except Exception as e:
|
85
|
+
status_tracker.complete_task(success=False)
|
86
|
+
_logger.debug(f"Request #{index} failed with: {e}")
|
87
|
+
status_tracker.error = mlflow.MlflowException(
|
88
|
+
f"Request #{index} failed with: {e.__cause__}"
|
89
|
+
)
|
90
|
+
|
91
|
+
|
92
|
+
def process_api_requests(
|
93
|
+
request_tasks: list[Callable[[], Any]],
|
94
|
+
max_workers: int = 10,
|
95
|
+
):
|
96
|
+
"""Processes API requests in parallel"""
|
97
|
+
# initialize trackers
|
98
|
+
status_tracker = StatusTracker() # single instance to track a collection of variables
|
99
|
+
|
100
|
+
results: list[tuple[int, Any]] = []
|
101
|
+
request_tasks_iter = enumerate(request_tasks)
|
102
|
+
_logger.debug(f"Request pool executor will run {len(request_tasks)} requests")
|
103
|
+
with ThreadPoolExecutor(
|
104
|
+
max_workers=max_workers, thread_name_prefix="MlflowOpenAiApi"
|
105
|
+
) as executor:
|
106
|
+
futures = [
|
107
|
+
executor.submit(
|
108
|
+
call_api,
|
109
|
+
index=index,
|
110
|
+
task=task,
|
111
|
+
results=results,
|
112
|
+
status_tracker=status_tracker,
|
113
|
+
)
|
114
|
+
for index, task in request_tasks_iter
|
115
|
+
]
|
116
|
+
wait(futures, return_when=FIRST_EXCEPTION)
|
117
|
+
|
118
|
+
# after finishing, log final status
|
119
|
+
if status_tracker.num_tasks_failed > 0:
|
120
|
+
if status_tracker.num_tasks_failed == 1:
|
121
|
+
raise status_tracker.error
|
122
|
+
raise mlflow.MlflowException(
|
123
|
+
f"{status_tracker.num_tasks_failed} tasks failed. See logs for details."
|
124
|
+
)
|
125
|
+
if status_tracker.num_rate_limit_errors > 0:
|
126
|
+
_logger.debug(
|
127
|
+
f"{status_tracker.num_rate_limit_errors} rate limit errors received. "
|
128
|
+
"Consider running at a lower rate."
|
129
|
+
)
|
130
|
+
|
131
|
+
return [res for _, res in sorted(results)]
|
mlflow/openai/autolog.py
ADDED
@@ -0,0 +1,509 @@
|
|
1
|
+
import functools
|
2
|
+
import importlib.metadata
|
3
|
+
import json
|
4
|
+
import logging
|
5
|
+
import warnings
|
6
|
+
from typing import Any, AsyncIterator, Iterator
|
7
|
+
|
8
|
+
from packaging.version import Version
|
9
|
+
|
10
|
+
import mlflow
|
11
|
+
from mlflow.entities import SpanType
|
12
|
+
from mlflow.entities.span import LiveSpan
|
13
|
+
from mlflow.entities.span_event import SpanEvent
|
14
|
+
from mlflow.entities.span_status import SpanStatusCode
|
15
|
+
from mlflow.exceptions import MlflowException
|
16
|
+
from mlflow.openai.constant import FLAVOR_NAME
|
17
|
+
from mlflow.openai.utils.chat_schema import set_span_chat_attributes
|
18
|
+
from mlflow.tracing.constant import (
|
19
|
+
STREAM_CHUNK_EVENT_NAME_FORMAT,
|
20
|
+
STREAM_CHUNK_EVENT_VALUE_KEY,
|
21
|
+
SpanAttributeKey,
|
22
|
+
TokenUsageKey,
|
23
|
+
TraceMetadataKey,
|
24
|
+
)
|
25
|
+
from mlflow.tracing.fluent import start_span_no_context
|
26
|
+
from mlflow.tracing.trace_manager import InMemoryTraceManager
|
27
|
+
from mlflow.tracing.utils import TraceJSONEncoder
|
28
|
+
from mlflow.utils.annotations import experimental
|
29
|
+
from mlflow.utils.autologging_utils import autologging_integration
|
30
|
+
from mlflow.utils.autologging_utils.config import AutoLoggingConfig
|
31
|
+
from mlflow.utils.autologging_utils.safety import safe_patch
|
32
|
+
|
33
|
+
_logger = logging.getLogger(__name__)
|
34
|
+
|
35
|
+
|
36
|
+
@experimental(version="2.14.0")
|
37
|
+
def autolog(
|
38
|
+
disable=False,
|
39
|
+
exclusive=False,
|
40
|
+
disable_for_unsupported_versions=False,
|
41
|
+
silent=False,
|
42
|
+
log_traces=True,
|
43
|
+
):
|
44
|
+
"""
|
45
|
+
Enables (or disables) and configures autologging from OpenAI to MLflow.
|
46
|
+
Raises :py:class:`MlflowException <mlflow.exceptions.MlflowException>`
|
47
|
+
if the OpenAI version < 1.0.
|
48
|
+
|
49
|
+
Args:
|
50
|
+
disable: If ``True``, disables the OpenAI autologging integration. If ``False``,
|
51
|
+
enables the OpenAI autologging integration.
|
52
|
+
exclusive: If ``True``, autologged content is not logged to user-created fluent runs.
|
53
|
+
If ``False``, autologged content is logged to the active fluent run,
|
54
|
+
which may be user-created.
|
55
|
+
disable_for_unsupported_versions: If ``True``, disable autologging for versions of
|
56
|
+
OpenAI that have not been tested against this version of the MLflow
|
57
|
+
client or are incompatible.
|
58
|
+
silent: If ``True``, suppress all event logs and warnings from MLflow during OpenAI
|
59
|
+
autologging. If ``False``, show all events and warnings during OpenAI
|
60
|
+
autologging.
|
61
|
+
log_traces: If ``True``, traces are logged for OpenAI models. If ``False``, no traces are
|
62
|
+
collected during inference. Default to ``True``.
|
63
|
+
"""
|
64
|
+
if Version(importlib.metadata.version("openai")).major < 1:
|
65
|
+
raise MlflowException("OpenAI autologging is only supported for openai >= 1.0.0")
|
66
|
+
|
67
|
+
# This needs to be called before doing any safe-patching (otherwise safe-patch will be no-op).
|
68
|
+
# TODO: since this implementation is inconsistent, explore a universal way to solve the issue.
|
69
|
+
_autolog(
|
70
|
+
disable=disable,
|
71
|
+
exclusive=exclusive,
|
72
|
+
disable_for_unsupported_versions=disable_for_unsupported_versions,
|
73
|
+
silent=silent,
|
74
|
+
log_traces=log_traces,
|
75
|
+
)
|
76
|
+
|
77
|
+
# Tracing OpenAI Agent SDK. This has to be done outside the function annotated with
|
78
|
+
# `@autologging_integration` because the function is not executed when `disable=True`.
|
79
|
+
try:
|
80
|
+
from mlflow.openai._agent_tracer import (
|
81
|
+
add_mlflow_trace_processor,
|
82
|
+
remove_mlflow_trace_processor,
|
83
|
+
)
|
84
|
+
|
85
|
+
if log_traces and not disable:
|
86
|
+
add_mlflow_trace_processor()
|
87
|
+
else:
|
88
|
+
remove_mlflow_trace_processor()
|
89
|
+
except ImportError:
|
90
|
+
pass
|
91
|
+
|
92
|
+
|
93
|
+
# This is required by mlflow.autolog()
|
94
|
+
autolog.integration_name = FLAVOR_NAME
|
95
|
+
|
96
|
+
|
97
|
+
# NB: The @autologging_integration annotation must be applied here, and the callback injection
|
98
|
+
# needs to happen outside the annotated function. This is because the annotated function is NOT
|
99
|
+
# executed when disable=True is passed. This prevents us from removing our callback and patching
|
100
|
+
# when autologging is turned off.
|
101
|
+
@autologging_integration(FLAVOR_NAME)
|
102
|
+
def _autolog(
|
103
|
+
disable=False,
|
104
|
+
exclusive=False,
|
105
|
+
disable_for_unsupported_versions=False,
|
106
|
+
silent=False,
|
107
|
+
log_traces=True,
|
108
|
+
):
|
109
|
+
from openai.resources.chat.completions import AsyncCompletions as AsyncChatCompletions
|
110
|
+
from openai.resources.chat.completions import Completions as ChatCompletions
|
111
|
+
from openai.resources.completions import AsyncCompletions, Completions
|
112
|
+
from openai.resources.embeddings import AsyncEmbeddings, Embeddings
|
113
|
+
|
114
|
+
for task in (ChatCompletions, Completions, Embeddings):
|
115
|
+
safe_patch(FLAVOR_NAME, task, "create", patched_call)
|
116
|
+
|
117
|
+
if hasattr(ChatCompletions, "parse"):
|
118
|
+
# In openai>=1.92.0, `ChatCompletions` has a `parse` method:
|
119
|
+
# https://github.com/openai/openai-python/commit/0e358ed66b317038705fb38958a449d284f3cb88
|
120
|
+
safe_patch(FLAVOR_NAME, ChatCompletions, "parse", patched_call)
|
121
|
+
|
122
|
+
for task in (AsyncChatCompletions, AsyncCompletions, AsyncEmbeddings):
|
123
|
+
safe_patch(FLAVOR_NAME, task, "create", async_patched_call)
|
124
|
+
|
125
|
+
if hasattr(AsyncChatCompletions, "parse"):
|
126
|
+
# In openai>=1.92.0, `AsyncChatCompletions` has a `parse` method:
|
127
|
+
# https://github.com/openai/openai-python/commit/0e358ed66b317038705fb38958a449d284f3cb88
|
128
|
+
safe_patch(FLAVOR_NAME, AsyncChatCompletions, "parse", async_patched_call)
|
129
|
+
|
130
|
+
try:
|
131
|
+
from openai.resources.beta.chat.completions import AsyncCompletions, Completions
|
132
|
+
except ImportError:
|
133
|
+
pass
|
134
|
+
else:
|
135
|
+
safe_patch(FLAVOR_NAME, Completions, "parse", patched_call)
|
136
|
+
safe_patch(FLAVOR_NAME, AsyncCompletions, "parse", async_patched_call)
|
137
|
+
|
138
|
+
try:
|
139
|
+
from openai.resources.responses import AsyncResponses, Responses
|
140
|
+
except ImportError:
|
141
|
+
pass
|
142
|
+
else:
|
143
|
+
safe_patch(FLAVOR_NAME, Responses, "create", patched_call)
|
144
|
+
safe_patch(FLAVOR_NAME, AsyncResponses, "create", async_patched_call)
|
145
|
+
safe_patch(FLAVOR_NAME, AsyncResponses, "parse", async_patched_call)
|
146
|
+
safe_patch(FLAVOR_NAME, Responses, "parse", patched_call)
|
147
|
+
|
148
|
+
# Patch Swarm agent to generate traces
|
149
|
+
try:
|
150
|
+
from swarm import Swarm
|
151
|
+
|
152
|
+
warnings.warn(
|
153
|
+
"Autologging for OpenAI Swarm is deprecated and will be removed in a future release. "
|
154
|
+
"OpenAI Agent SDK is drop-in replacement for agent building and is supported by "
|
155
|
+
"MLflow autologging. Please refer to the OpenAI Agent SDK documentation "
|
156
|
+
"(https://github.com/openai/openai-agents-python) for more details.",
|
157
|
+
category=FutureWarning,
|
158
|
+
stacklevel=2,
|
159
|
+
)
|
160
|
+
|
161
|
+
safe_patch(
|
162
|
+
FLAVOR_NAME,
|
163
|
+
Swarm,
|
164
|
+
"get_chat_completion",
|
165
|
+
patched_agent_get_chat_completion,
|
166
|
+
)
|
167
|
+
|
168
|
+
safe_patch(
|
169
|
+
FLAVOR_NAME,
|
170
|
+
Swarm,
|
171
|
+
"run",
|
172
|
+
patched_swarm_run,
|
173
|
+
)
|
174
|
+
except ImportError:
|
175
|
+
pass
|
176
|
+
|
177
|
+
|
178
|
+
def _get_span_type(task: type) -> str:
|
179
|
+
from openai.resources.chat.completions import AsyncCompletions as AsyncChatCompletions
|
180
|
+
from openai.resources.chat.completions import Completions as ChatCompletions
|
181
|
+
from openai.resources.completions import AsyncCompletions, Completions
|
182
|
+
from openai.resources.embeddings import AsyncEmbeddings, Embeddings
|
183
|
+
|
184
|
+
span_type_mapping = {
|
185
|
+
ChatCompletions: SpanType.CHAT_MODEL,
|
186
|
+
AsyncChatCompletions: SpanType.CHAT_MODEL,
|
187
|
+
Completions: SpanType.LLM,
|
188
|
+
AsyncCompletions: SpanType.LLM,
|
189
|
+
Embeddings: SpanType.EMBEDDING,
|
190
|
+
AsyncEmbeddings: SpanType.EMBEDDING,
|
191
|
+
}
|
192
|
+
|
193
|
+
try:
|
194
|
+
# Only available in openai>=1.40.0
|
195
|
+
from openai.resources.beta.chat.completions import (
|
196
|
+
AsyncCompletions as BetaAsyncChatCompletions,
|
197
|
+
)
|
198
|
+
from openai.resources.beta.chat.completions import Completions as BetaChatCompletions
|
199
|
+
|
200
|
+
span_type_mapping[BetaChatCompletions] = SpanType.CHAT_MODEL
|
201
|
+
span_type_mapping[BetaAsyncChatCompletions] = SpanType.CHAT_MODEL
|
202
|
+
except ImportError:
|
203
|
+
_logger.debug(
|
204
|
+
"Failed to import `BetaChatCompletions` or `BetaAsyncChatCompletions`", exc_info=True
|
205
|
+
)
|
206
|
+
|
207
|
+
try:
|
208
|
+
# Responses API only available in openai>=1.66.0
|
209
|
+
from openai.resources.responses import AsyncResponses, Responses
|
210
|
+
|
211
|
+
span_type_mapping[Responses] = SpanType.CHAT_MODEL
|
212
|
+
span_type_mapping[AsyncResponses] = SpanType.CHAT_MODEL
|
213
|
+
except ImportError:
|
214
|
+
pass
|
215
|
+
|
216
|
+
return span_type_mapping.get(task, SpanType.UNKNOWN)
|
217
|
+
|
218
|
+
|
219
|
+
def _try_parse_raw_response(response: Any) -> Any:
|
220
|
+
"""
|
221
|
+
As documented at https://github.com/openai/openai-python/tree/52357cff50bee57ef442e94d78a0de38b4173fc2?tab=readme-ov-file#accessing-raw-response-data-eg-headers,
|
222
|
+
a `LegacyAPIResponse` (https://github.com/openai/openai-python/blob/52357cff50bee57ef442e94d78a0de38b4173fc2/src/openai/_legacy_response.py#L45)
|
223
|
+
object is returned when the `create` method is invoked with `with_raw_response`.
|
224
|
+
"""
|
225
|
+
try:
|
226
|
+
from openai._legacy_response import LegacyAPIResponse
|
227
|
+
except ImportError:
|
228
|
+
_logger.debug("Failed to import `LegacyAPIResponse` from `openai._legacy_response`")
|
229
|
+
return response
|
230
|
+
if isinstance(response, LegacyAPIResponse):
|
231
|
+
try:
|
232
|
+
# `parse` returns either a `pydantic.BaseModel` or a `openai.Stream` object
|
233
|
+
# depending on whether the request has a `stream` parameter set to `True`.
|
234
|
+
return response.parse()
|
235
|
+
except Exception as e:
|
236
|
+
_logger.debug(f"Failed to parse {response} (type: {response.__class__}): {e}")
|
237
|
+
|
238
|
+
return response
|
239
|
+
|
240
|
+
|
241
|
+
def patched_call(original, self, *args, **kwargs):
|
242
|
+
config = AutoLoggingConfig.init(flavor_name=mlflow.openai.FLAVOR_NAME)
|
243
|
+
active_run = mlflow.active_run()
|
244
|
+
run_id = active_run.info.run_id if active_run else None
|
245
|
+
|
246
|
+
if config.log_traces:
|
247
|
+
span = _start_span(self, kwargs, run_id)
|
248
|
+
|
249
|
+
# Execute the original function
|
250
|
+
try:
|
251
|
+
raw_result = original(self, *args, **kwargs)
|
252
|
+
except Exception as e:
|
253
|
+
if config.log_traces:
|
254
|
+
_end_span_on_exception(span, e)
|
255
|
+
raise
|
256
|
+
|
257
|
+
if config.log_traces:
|
258
|
+
_end_span_on_success(span, kwargs, raw_result)
|
259
|
+
|
260
|
+
return raw_result
|
261
|
+
|
262
|
+
|
263
|
+
async def async_patched_call(original, self, *args, **kwargs):
|
264
|
+
config = AutoLoggingConfig.init(flavor_name=mlflow.openai.FLAVOR_NAME)
|
265
|
+
active_run = mlflow.active_run()
|
266
|
+
run_id = active_run.info.run_id if active_run else None
|
267
|
+
|
268
|
+
if config.log_traces:
|
269
|
+
span = _start_span(self, kwargs, run_id)
|
270
|
+
|
271
|
+
# Execute the original function
|
272
|
+
try:
|
273
|
+
raw_result = await original(self, *args, **kwargs)
|
274
|
+
except Exception as e:
|
275
|
+
if config.log_traces:
|
276
|
+
_end_span_on_exception(span, e)
|
277
|
+
raise
|
278
|
+
|
279
|
+
if config.log_traces:
|
280
|
+
_end_span_on_success(span, kwargs, raw_result)
|
281
|
+
|
282
|
+
return raw_result
|
283
|
+
|
284
|
+
|
285
|
+
def _start_span(
|
286
|
+
instance: Any,
|
287
|
+
inputs: dict[str, Any],
|
288
|
+
run_id: str,
|
289
|
+
):
|
290
|
+
# Record input parameters to attributes
|
291
|
+
attributes = {k: v for k, v in inputs.items() if k not in ("messages", "input")}
|
292
|
+
|
293
|
+
# If there is an active span, create a child span under it, otherwise create a new trace
|
294
|
+
span = start_span_no_context(
|
295
|
+
name=instance.__class__.__name__,
|
296
|
+
span_type=_get_span_type(instance.__class__),
|
297
|
+
inputs=inputs,
|
298
|
+
attributes=attributes,
|
299
|
+
)
|
300
|
+
|
301
|
+
# Associate run ID to the trace manually, because if a new run is created by
|
302
|
+
# autologging, it is not set as the active run thus not automatically
|
303
|
+
# associated with the trace.
|
304
|
+
if run_id is not None:
|
305
|
+
tm = InMemoryTraceManager().get_instance()
|
306
|
+
tm.set_trace_metadata(span.trace_id, TraceMetadataKey.SOURCE_RUN, run_id)
|
307
|
+
|
308
|
+
return span
|
309
|
+
|
310
|
+
|
311
|
+
def _end_span_on_success(span: LiveSpan, inputs: dict[str, Any], raw_result: Any):
|
312
|
+
from openai import AsyncStream, Stream
|
313
|
+
|
314
|
+
result = _try_parse_raw_response(raw_result)
|
315
|
+
|
316
|
+
if isinstance(result, Stream):
|
317
|
+
# If the output is a stream, we add a hook to store the intermediate chunks
|
318
|
+
# and then log the outputs as a single artifact when the stream ends
|
319
|
+
def _stream_output_logging_hook(stream: Iterator) -> Iterator:
|
320
|
+
output = []
|
321
|
+
for i, chunk in enumerate(stream):
|
322
|
+
_add_span_event(span, i, chunk)
|
323
|
+
output.append(chunk)
|
324
|
+
yield chunk
|
325
|
+
_process_last_chunk(span, chunk, inputs, output)
|
326
|
+
|
327
|
+
result._iterator = _stream_output_logging_hook(result._iterator)
|
328
|
+
elif isinstance(result, AsyncStream):
|
329
|
+
|
330
|
+
async def _stream_output_logging_hook(stream: AsyncIterator) -> AsyncIterator:
|
331
|
+
output = []
|
332
|
+
async for chunk in stream:
|
333
|
+
_add_span_event(span, len(output), chunk)
|
334
|
+
output.append(chunk)
|
335
|
+
yield chunk
|
336
|
+
_process_last_chunk(span, chunk, inputs, output)
|
337
|
+
|
338
|
+
result._iterator = _stream_output_logging_hook(result._iterator)
|
339
|
+
else:
|
340
|
+
try:
|
341
|
+
set_span_chat_attributes(span, inputs, result)
|
342
|
+
span.end(outputs=result)
|
343
|
+
except Exception as e:
|
344
|
+
_logger.warning(f"Encountered unexpected error when ending trace: {e}", exc_info=True)
|
345
|
+
|
346
|
+
|
347
|
+
def _process_last_chunk(span: LiveSpan, chunk: Any, inputs: dict[str, Any], output: list[Any]):
|
348
|
+
if _is_responses_final_event(chunk):
|
349
|
+
output = chunk.response
|
350
|
+
else:
|
351
|
+
# Reconstruct a completion object from streaming chunks
|
352
|
+
output = _reconstruct_completion_from_stream(output)
|
353
|
+
|
354
|
+
# Set usage information on span if available
|
355
|
+
if usage := getattr(chunk, "usage", None):
|
356
|
+
usage_dict = {
|
357
|
+
TokenUsageKey.INPUT_TOKENS: usage.prompt_tokens,
|
358
|
+
TokenUsageKey.OUTPUT_TOKENS: usage.completion_tokens,
|
359
|
+
TokenUsageKey.TOTAL_TOKENS: usage.total_tokens,
|
360
|
+
}
|
361
|
+
span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_dict)
|
362
|
+
|
363
|
+
_end_span_on_success(span, inputs, output)
|
364
|
+
|
365
|
+
|
366
|
+
def _reconstruct_completion_from_stream(chunks: list[Any]) -> Any:
|
367
|
+
"""
|
368
|
+
Reconstruct a completion object from streaming chunks.
|
369
|
+
|
370
|
+
This preserves the structure and metadata that would be present in a non-streaming
|
371
|
+
completion response, including ID, model, timestamps, usage, etc.
|
372
|
+
"""
|
373
|
+
if not chunks:
|
374
|
+
return None
|
375
|
+
|
376
|
+
if chunks[0].object == "text_completion":
|
377
|
+
# Handling for the deprecated Completions API. Keep the legacy behavior for now.
|
378
|
+
def _extract_content(chunk: Any) -> str:
|
379
|
+
if not chunk.choices:
|
380
|
+
return ""
|
381
|
+
return chunk.choices[0].text or ""
|
382
|
+
|
383
|
+
return "".join(map(_extract_content, chunks))
|
384
|
+
|
385
|
+
if chunks[0].object != "chat.completion.chunk":
|
386
|
+
return chunks # Ignore non-chat chunks
|
387
|
+
|
388
|
+
from openai.types.chat import ChatCompletion
|
389
|
+
from openai.types.chat.chat_completion import Choice
|
390
|
+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
391
|
+
|
392
|
+
# Build the base message
|
393
|
+
def _extract_content(chunk: Any) -> str:
|
394
|
+
if not chunk.choices:
|
395
|
+
return ""
|
396
|
+
return chunk.choices[0].delta.content or ""
|
397
|
+
|
398
|
+
message = ChatCompletionMessage(
|
399
|
+
role="assistant", content="".join(map(_extract_content, chunks))
|
400
|
+
)
|
401
|
+
|
402
|
+
# Extract metadata from the last chunk
|
403
|
+
last_chunk = chunks[-1]
|
404
|
+
finish_reason = "stop"
|
405
|
+
if choices := getattr(last_chunk, "choices", None):
|
406
|
+
if chunk_choice := choices[0]:
|
407
|
+
finish_reason = getattr(chunk_choice, "finish_reason") or finish_reason
|
408
|
+
|
409
|
+
choice = Choice(index=0, message=message, finish_reason=finish_reason)
|
410
|
+
|
411
|
+
# Build the completion dict
|
412
|
+
return ChatCompletion(
|
413
|
+
id=last_chunk.id,
|
414
|
+
choices=[choice],
|
415
|
+
created=last_chunk.created,
|
416
|
+
model=last_chunk.model,
|
417
|
+
object="chat.completion",
|
418
|
+
system_fingerprint=last_chunk.system_fingerprint,
|
419
|
+
usage=last_chunk.usage,
|
420
|
+
)
|
421
|
+
|
422
|
+
|
423
|
+
def _is_responses_final_event(chunk: Any) -> bool:
|
424
|
+
try:
|
425
|
+
from openai.types.responses import ResponseCompletedEvent
|
426
|
+
|
427
|
+
return isinstance(chunk, ResponseCompletedEvent)
|
428
|
+
except ImportError:
|
429
|
+
return False
|
430
|
+
|
431
|
+
|
432
|
+
def _end_span_on_exception(span: LiveSpan, e: Exception):
|
433
|
+
try:
|
434
|
+
span.add_event(SpanEvent.from_exception(e))
|
435
|
+
span.end(status=SpanStatusCode.ERROR)
|
436
|
+
except Exception as inner_e:
|
437
|
+
_logger.warning(f"Encountered unexpected error when ending trace: {inner_e}")
|
438
|
+
|
439
|
+
|
440
|
+
def _add_span_event(span: LiveSpan, index: int, chunk: Any):
|
441
|
+
span.add_event(
|
442
|
+
SpanEvent(
|
443
|
+
name=STREAM_CHUNK_EVENT_NAME_FORMAT.format(index=index),
|
444
|
+
# OpenTelemetry SpanEvent only support str-str key-value pairs for attributes
|
445
|
+
attributes={STREAM_CHUNK_EVENT_VALUE_KEY: json.dumps(chunk, cls=TraceJSONEncoder)},
|
446
|
+
)
|
447
|
+
)
|
448
|
+
|
449
|
+
|
450
|
+
def patched_agent_get_chat_completion(original, self, *args, **kwargs):
|
451
|
+
"""
|
452
|
+
Patch the `get_chat_completion` method of the ChatCompletion object.
|
453
|
+
OpenAI autolog already handles the raw completion request, but tracing
|
454
|
+
the swarm's method is useful to track other parameters like agent name.
|
455
|
+
"""
|
456
|
+
agent = kwargs.get("agent") or args[0]
|
457
|
+
|
458
|
+
# Patch agent's functions to generate traces. Function calls only happen
|
459
|
+
# after the first completion is generated because of the design of
|
460
|
+
# function calling. Therefore, we can safely patch the tool functions here
|
461
|
+
# within get_chat_completion() hook.
|
462
|
+
# We cannot patch functions during the agent's initialization because the
|
463
|
+
# agent's functions can be modified after the agent is created.
|
464
|
+
def function_wrapper(fn):
|
465
|
+
if "context_variables" in fn.__code__.co_varnames:
|
466
|
+
|
467
|
+
def wrapper(*args, **kwargs):
|
468
|
+
# NB: Swarm uses `func.__code__.co_varnames` to inspect if the provided
|
469
|
+
# tool function includes 'context_variables' parameter in the signature
|
470
|
+
# and ingest the global context variables if so. Wrapping the function
|
471
|
+
# with mlflow.trace() will break this.
|
472
|
+
# The co_varnames is determined based on the local variables of the
|
473
|
+
# function, so we workaround this by declaring it here as a local variable.
|
474
|
+
context_variables = kwargs.get("context_variables", {}) # noqa: F841
|
475
|
+
return mlflow.trace(
|
476
|
+
fn,
|
477
|
+
name=f"{agent.name}.{fn.__name__}",
|
478
|
+
span_type=SpanType.TOOL,
|
479
|
+
)(*args, **kwargs)
|
480
|
+
else:
|
481
|
+
|
482
|
+
def wrapper(*args, **kwargs):
|
483
|
+
return mlflow.trace(
|
484
|
+
fn,
|
485
|
+
name=f"{agent.name}.{fn.__name__}",
|
486
|
+
span_type=SpanType.TOOL,
|
487
|
+
)(*args, **kwargs)
|
488
|
+
|
489
|
+
wrapped = functools.wraps(fn)(wrapper)
|
490
|
+
wrapped._is_mlflow_traced = True # Marker to avoid double tracing
|
491
|
+
return wrapped
|
492
|
+
|
493
|
+
agent.functions = [
|
494
|
+
function_wrapper(fn) if not hasattr(fn, "_is_mlflow_traced") else fn
|
495
|
+
for fn in agent.functions
|
496
|
+
]
|
497
|
+
|
498
|
+
traced_fn = mlflow.trace(
|
499
|
+
original, name=f"{agent.name}.get_chat_completion", span_type=SpanType.CHAIN
|
500
|
+
)
|
501
|
+
return traced_fn(self, *args, **kwargs)
|
502
|
+
|
503
|
+
|
504
|
+
def patched_swarm_run(original, self, *args, **kwargs):
|
505
|
+
"""
|
506
|
+
Patched version of `run` method of the Swarm object.
|
507
|
+
"""
|
508
|
+
traced_fn = mlflow.trace(original, span_type=SpanType.AGENT)
|
509
|
+
return traced_fn(self, *args, **kwargs)
|
@@ -0,0 +1 @@
|
|
1
|
+
FLAVOR_NAME = "openai"
|