flowcept 0.9.7__tar.gz → 0.9.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {flowcept-0.9.7 → flowcept-0.9.9}/PKG-INFO +5 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/index.rst +1 -0
- flowcept-0.9.9/docs/prov_query.rst +197 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/pyproject.toml +1 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/sample_settings.yaml +1 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/gui/agent_gui.py +5 -3
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/prompts/general_prompts.py +1 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/prompts/in_memory_query_prompts.py +3 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/tools/general_tools.py +57 -2
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/mq_dao/mq_dao_redis.py +2 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_api/flowcept_controller.py +37 -22
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/version.py +1 -1
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/checks.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/create-release-n-publish.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-llm-tests.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests-all-dbs.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests-in-container.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests-kafka.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests-py313.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests-simple.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run-tests.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/run_examples.sh +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.github/workflows/version_bumper.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.gitignore +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/.readthedocs.yaml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/CONTRIBUTING.md +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/LICENSE +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/Makefile +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/README.md +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/Dockerfile +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/compose-grafana.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/compose-kafka.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/compose-mofka.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/compose-mongo.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/deployment/compose.yml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/api-reference.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/architecture.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/cli-reference.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/conf.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/contributing.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/img/architecture-diagram.png +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/img/flowcept-logo-dark.png +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/img/flowcept-logo.png +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/prov_capture.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/prov_storage.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/quick_start.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/schemas.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/setup.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/task_schema.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/telemetry_capture.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/docs/workflow_schema.rst +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/a2a/README.md +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/a2a/agent1.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/a2a/agent2.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/aec_agent_context_manager.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/aec_agent_mock.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/aec_prompts.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/agents/opt_driver_mock.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/consumers/ping_pong_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/consumers/simple_consumer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/consumers/simple_publisher.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/convergence_loop_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/dask_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/distributed_consumer_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/instrumented_loop_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/instrumented_simple_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/README.md +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/custom_provenance_id_mapping.yaml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/llm_dataprep.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/llm_main_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/llm_model.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/llm_complex/llm_test_runner.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/mlflow_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/mqtt_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/single_layer_perceptron_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/start_here.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/tensorboard_example.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/unmanaged/main.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/examples/unmanaged/simple_task.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/analytics.ipynb +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/dask.ipynb +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/dask_from_CLI.ipynb +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/mlflow.ipynb +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/reset_dask_nb_exec_counts.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/notebooks/tensorboard.ipynb +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/mofka/bedrock_setup.sh +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/mofka/consumer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/mofka/mofka-requirements.yaml +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/mofka/mofka_config.json +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/resources/simple_redis_consumer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/agent_client.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/agents_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/dynamic_schema_tracker.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/flowcept_agent.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/flowcept_ctx_manager.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/gui/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/gui/audio_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/gui/gui_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/llms/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/llms/claude_gcp.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/llms/gemini25.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/prompts/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/tools/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/tools/in_memory_queries/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/agents/tools/in_memory_queries/pandas_agent_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/analytics/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/analytics/analytics_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/analytics/data_augmentation.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/analytics/plot.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/cli.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/autoflush_buffer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/docdb_dao/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/docdb_dao/docdb_dao_base.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/docdb_dao/lmdb_dao.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/docdb_dao/mongodb_dao.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/keyvalue_dao.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/mq_dao/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/mq_dao/mq_dao_base.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/mq_dao/mq_dao_kafka.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/mq_dao/mq_dao_mofka.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/daos/redis_conn.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_dataclasses/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_dataclasses/base_settings_dataclasses.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_dataclasses/task_object.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_dataclasses/telemetry.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_dataclasses/workflow_object.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/flowcept_logger.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/query_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/settings_factory.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/task_data_preprocess.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/commons/vocabulary.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/configs.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_api/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_api/db_api.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_api/task_query_api.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_webserver/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_webserver/app.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_webserver/resources/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_webserver/resources/query_rsrc.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowcept_webserver/resources/task_messages_rsrc.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/base_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/brokers/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/dask/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/dask/dask_dataclasses.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/dask/dask_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/dask/dask_plugins.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/instrumentation_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/interceptor_state_manager.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/mlflow/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dao.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dataclasses.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/tensorboard/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_dataclasses.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/agent/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/base_consumer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/consumer_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/consumers/document_inserter.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/flowceptor/telemetry_capture.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/flowcept_agent_task.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/flowcept_decorator.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/flowcept_loop.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/flowcept_task.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/flowcept_torch.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/src/flowcept/instrumentation/task_capture.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/dask_test_utils.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_broker.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_dask.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_dask_with_context_mgmt.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_file_observer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_mlflow.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/adapters/test_tensorboard.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/db_api_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/flowcept_api_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/sample_data.json +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/sample_data_with_telemetry_and_rai.json +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/api/task_query_api_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/doc_db_inserter/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/doc_db_inserter/doc_db_inserter_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/flowcept_explicit_tasks.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/flowcept_loop_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/flowcept_task_decorator_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/ml_tests/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/ml_tests/dl_trainer.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/ml_tests/ml_decorator_dask_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/instrumentation_tests/ml_tests/ml_decorator_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/misc_tests/__init__.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/misc_tests/log_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/misc_tests/singleton_test.py +0 -0
- {flowcept-0.9.7 → flowcept-0.9.9}/tests/misc_tests/telemetry_test.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: flowcept
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.9
|
|
4
4
|
Summary: Capture and query workflow provenance data using data observability
|
|
5
5
|
Author: Oak Ridge National Laboratory
|
|
6
6
|
License-Expression: MIT
|
|
@@ -27,6 +27,7 @@ Requires-Dist: jupyterlab; extra == 'all'
|
|
|
27
27
|
Requires-Dist: langchain-community; extra == 'all'
|
|
28
28
|
Requires-Dist: langchain-openai; extra == 'all'
|
|
29
29
|
Requires-Dist: lmdb; extra == 'all'
|
|
30
|
+
Requires-Dist: matplotlib; extra == 'all'
|
|
30
31
|
Requires-Dist: mcp[cli]; extra == 'all'
|
|
31
32
|
Requires-Dist: mlflow-skinny; extra == 'all'
|
|
32
33
|
Requires-Dist: nbmake; extra == 'all'
|
|
@@ -88,6 +89,7 @@ Requires-Dist: confluent-kafka<=2.8.0; extra == 'kafka'
|
|
|
88
89
|
Provides-Extra: llm-agent
|
|
89
90
|
Requires-Dist: langchain-community; extra == 'llm-agent'
|
|
90
91
|
Requires-Dist: langchain-openai; extra == 'llm-agent'
|
|
92
|
+
Requires-Dist: matplotlib; extra == 'llm-agent'
|
|
91
93
|
Requires-Dist: mcp[cli]; extra == 'llm-agent'
|
|
92
94
|
Requires-Dist: pymupdf; extra == 'llm-agent'
|
|
93
95
|
Requires-Dist: streamlit; extra == 'llm-agent'
|
|
@@ -95,6 +97,7 @@ Provides-Extra: llm-agent-audio
|
|
|
95
97
|
Requires-Dist: gtts; extra == 'llm-agent-audio'
|
|
96
98
|
Requires-Dist: langchain-community; extra == 'llm-agent-audio'
|
|
97
99
|
Requires-Dist: langchain-openai; extra == 'llm-agent-audio'
|
|
100
|
+
Requires-Dist: matplotlib; extra == 'llm-agent-audio'
|
|
98
101
|
Requires-Dist: mcp[cli]; extra == 'llm-agent-audio'
|
|
99
102
|
Requires-Dist: pydub; extra == 'llm-agent-audio'
|
|
100
103
|
Requires-Dist: pymupdf; extra == 'llm-agent-audio'
|
|
@@ -105,6 +108,7 @@ Provides-Extra: llm-google
|
|
|
105
108
|
Requires-Dist: google-genai; extra == 'llm-google'
|
|
106
109
|
Requires-Dist: langchain-community; extra == 'llm-google'
|
|
107
110
|
Requires-Dist: langchain-openai; extra == 'llm-google'
|
|
111
|
+
Requires-Dist: matplotlib; extra == 'llm-google'
|
|
108
112
|
Requires-Dist: mcp[cli]; extra == 'llm-google'
|
|
109
113
|
Requires-Dist: pymupdf; extra == 'llm-google'
|
|
110
114
|
Requires-Dist: streamlit; extra == 'llm-google'
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
Provenance Querying
|
|
2
|
+
====================
|
|
3
|
+
|
|
4
|
+
Flowcept captures detailed provenance about workflows, tasks, and artifacts. Once captured, there are multiple ways to query this provenance depending on your needs. This guide summarizes the main mechanisms available for querying Flowcept data.
|
|
5
|
+
|
|
6
|
+
.. note::
|
|
7
|
+
|
|
8
|
+
Persistence is optional in Flowcept. You can configure Flowcept to use LMDB, MongoDB or both. LMDB is a lightweight file‑based database ideal for simple tests; MongoDB provides advanced query support and is required for Flowcept’s database API and CLI queries. Flowcept can write to both backends if both are enabled.
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
Querying with the Command‑Line Interface
|
|
12
|
+
----------------------------------------
|
|
13
|
+
|
|
14
|
+
Flowcept provides a small CLI for quick database queries. The CLI requires MongoDB to be enabled【202622579855635†L68-L96】 and accessible from your environment. After installing Flowcept, run `flowcept --help` to see all available commands and options【463846373907647†L68-L83】. The usage pattern is:
|
|
15
|
+
|
|
16
|
+
.. code-block:: console
|
|
17
|
+
|
|
18
|
+
flowcept --<function-name-with-dashes> [--<arg-name-with-dashes>=<value>]
|
|
19
|
+
|
|
20
|
+
Important query‑oriented commands include:
|
|
21
|
+
|
|
22
|
+
* ``workflow-count`` – count tasks, workflows and objects for a given workflow ID【463846373907647†L152-L166】.
|
|
23
|
+
* ``query`` – run a MongoDB query against the tasks collection, with optional projection, sorting and limit【463846373907647†L168-L190】.
|
|
24
|
+
* ``get-task`` – fetch a single task document by its ID【463846373907647†L198-L207】.
|
|
25
|
+
|
|
26
|
+
Here’s an example session:
|
|
27
|
+
|
|
28
|
+
.. code-block:: console
|
|
29
|
+
|
|
30
|
+
# count the number of tasks, workflows and objects for a workflow
|
|
31
|
+
flowcept --workflow-count --workflow-id=123e4567-e89b-12d3-a456-426614174000
|
|
32
|
+
|
|
33
|
+
# query tasks where status is COMPLETED and only return `activity_id` and `status`
|
|
34
|
+
flowcept --query --filter='{"status": "COMPLETED"}' \
|
|
35
|
+
--project='{"activity_id": 1, "status": 1, "_id": 0}' \
|
|
36
|
+
--sort='[["started_at", -1]]' --limit=10
|
|
37
|
+
|
|
38
|
+
# fetch a task by ID
|
|
39
|
+
flowcept --get-task --task-id=24aa4e52-9aec-4ef6-8cb7-cbd7c72d436e
|
|
40
|
+
|
|
41
|
+
The CLI prints JSON results to stdout. For full usage details see the official CLI reference【463846373907647†L68-L83】.
|
|
42
|
+
|
|
43
|
+
Querying via the Python API (`Flowcept.db`)
|
|
44
|
+
-------------------------------------------
|
|
45
|
+
|
|
46
|
+
For programmatic access inside scripts and notebooks, Flowcept exposes a database API via the ``Flowcept.db`` property. When MongoDB is enabled this property returns an instance of the internal `DBAPI` class. You can call any of the following methods:
|
|
47
|
+
|
|
48
|
+
* ``task_query(filter, projection=None, limit=0, sort=None)`` – query the `tasks` collection with an optional projection, sort and limit.
|
|
49
|
+
* ``workflow_query(filter)`` – query the `workflows` collection.
|
|
50
|
+
* ``get_workflow_object(workflow_id)`` – fetch a workflow and return a `WorkflowObject`.
|
|
51
|
+
* ``insert_or_update_task(task_object)`` – insert or update a task.
|
|
52
|
+
* ``save_or_update_object(object, type, custom_metadata, …)`` – persist binary objects such as models or large artifacts.
|
|
53
|
+
|
|
54
|
+
Below is a typical usage pattern:
|
|
55
|
+
|
|
56
|
+
.. code-block:: python
|
|
57
|
+
|
|
58
|
+
from flowcept import Flowcept
|
|
59
|
+
|
|
60
|
+
# query tasks for the current workflow
|
|
61
|
+
tasks = Flowcept.db.get_tasks_from_current_workflow()
|
|
62
|
+
print(f"Tasks captured in current workflow: {len(tasks)}")
|
|
63
|
+
|
|
64
|
+
# find all tasks marked with a "math" tag
|
|
65
|
+
math_tasks = Flowcept.db.task_query(filter={"tags": "math"})
|
|
66
|
+
for t in math_tasks:
|
|
67
|
+
print(f"{t['task_id']} – {t['activity_id']}: {t['status']}")
|
|
68
|
+
|
|
69
|
+
# fetch a workflow object and inspect its arguments
|
|
70
|
+
wf = Flowcept.db.get_workflow_object(workflow_id="123e4567-e89b-12d3-a456-426614174000")
|
|
71
|
+
print(wf.workflow_args)
|
|
72
|
+
|
|
73
|
+
The `DBAPI` exposes many other methods, such as `get_tasks_recursive` to retrieve all descendants of a task, or `dump_tasks_to_file_recursive` to export tasks to Parquet. See the API reference for details.
|
|
74
|
+
|
|
75
|
+
Accessing the In‑Memory Buffer
|
|
76
|
+
------------------------------
|
|
77
|
+
|
|
78
|
+
During runtime Flowcept stores captured messages in an in‑memory buffer (`Flowcept.buffer`). This buffer is useful for debugging or lightweight scripts because it provides immediate access to the latest tasks and workflows without any additional services. In the example below we create two tasks that attach binary data and then inspect the buffer:
|
|
79
|
+
|
|
80
|
+
.. code-block:: python
|
|
81
|
+
|
|
82
|
+
from pathlib import Path
|
|
83
|
+
from flowcept import Flowcept
|
|
84
|
+
from flowcept.instrumentation.task import FlowceptTask
|
|
85
|
+
|
|
86
|
+
with Flowcept() as f:
|
|
87
|
+
used_args = {"a": 1}
|
|
88
|
+
# first task – attach a PDF
|
|
89
|
+
with FlowceptTask(used=used_args) as t:
|
|
90
|
+
img_path = Path("docs/img/architecture.pdf")
|
|
91
|
+
with open(img_path, "rb") as fp:
|
|
92
|
+
img_data = fp.read()
|
|
93
|
+
t.end(generated={"b": 2},
|
|
94
|
+
data=img_data,
|
|
95
|
+
custom_metadata={
|
|
96
|
+
"mime_type": "application/pdf",
|
|
97
|
+
"file_name": "architecture.pdf",
|
|
98
|
+
"file_extension": "pdf"})
|
|
99
|
+
t.send()
|
|
100
|
+
# second task – attach a PNG
|
|
101
|
+
with FlowceptTask(used=used_args) as t:
|
|
102
|
+
img_path = Path("docs/img/flowcept-logo.png")
|
|
103
|
+
with open(img_path, "rb") as fp:
|
|
104
|
+
img_data = fp.read()
|
|
105
|
+
t.end(generated={"c": 2},
|
|
106
|
+
data=img_data,
|
|
107
|
+
custom_metadata={
|
|
108
|
+
"mime_type": "image/png",
|
|
109
|
+
"file_name": "flowcept-logo.png",
|
|
110
|
+
"file_extension": "png"})
|
|
111
|
+
t.send()
|
|
112
|
+
|
|
113
|
+
# inspect the buffer
|
|
114
|
+
assert len(Flowcept.buffer) == 3 # includes the workflow message
|
|
115
|
+
assert Flowcept.buffer[1]["data"] # binary data is captured as bytes
|
|
116
|
+
|
|
117
|
+
At any point inside the running workflow you can access `Flowcept.buffer` to retrieve a list of dictionaries representing messages. Each element contains the original JSON payload plus any binary `data` field. Because the buffer lives in memory, it reflects the most recent state of the workflow and is cleared when the process ends.
|
|
118
|
+
|
|
119
|
+
Working Offline: Reading a Messages File
|
|
120
|
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
121
|
+
|
|
122
|
+
When persistence is enabled in offline mode, Flowcept dumps the buffer to a JSONL file. Use :func:`Flowcept.read_messages_file` to load these messages later. If you pass `return_df=True` Flowcept will normalise nested fields into dot‑separated columns and return a pandas DataFrame. This is handy for ad‑hoc analysis with pandas.
|
|
123
|
+
|
|
124
|
+
.. code-block:: python
|
|
125
|
+
|
|
126
|
+
from flowcept import Flowcept
|
|
127
|
+
|
|
128
|
+
# read JSON into a list of dicts
|
|
129
|
+
msgs = Flowcept.read_messages_file("offline_buffer.jsonl")
|
|
130
|
+
print(f"{len(msgs)} messages")
|
|
131
|
+
|
|
132
|
+
# read JSON into a pandas DataFrame
|
|
133
|
+
df = Flowcept.read_messages_file("offline_buffer.jsonl", return_df=True)
|
|
134
|
+
# dot‑notation columns allow easy selection; e.g., outputs of attention layers
|
|
135
|
+
print("generated.attention" in df.columns)
|
|
136
|
+
|
|
137
|
+
Keep in mind that the JSONL file is only created when using fully offline mode. The path is configured in the settings file under ``DUMP_BUFFER_PATH``. If the file doesn’t exist, `read_messages_file` will raise an error.
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
Working Directly with MongoDB
|
|
141
|
+
-----------------------------
|
|
142
|
+
|
|
143
|
+
If MongoDB is enabled in your settings you may prefer to query the database directly, especially for complex aggregation pipelines. Flowcept stores tasks in the ``tasks`` collection, workflows in ``workflows``, and binary objects in ``objects``. You can use any MongoDB tool or client library, such as:
|
|
144
|
+
|
|
145
|
+
* **PyMongo** – Python driver for MongoDB; perfect for custom scripts.
|
|
146
|
+
* **MongoDB Compass** – graphical UI for ad‑hoc queries and visualisation.
|
|
147
|
+
* **mongo shell** or **mongosh** – CLI for interactive queries.
|
|
148
|
+
|
|
149
|
+
For example, using PyMongo:
|
|
150
|
+
|
|
151
|
+
.. code-block:: python
|
|
152
|
+
|
|
153
|
+
import pymongo
|
|
154
|
+
|
|
155
|
+
client = pymongo.MongoClient("mongodb://localhost:27017")
|
|
156
|
+
db = client["flowcept"]
|
|
157
|
+
# find the 20 most recent tasks for a workflow
|
|
158
|
+
tasks = db.tasks.find(
|
|
159
|
+
{"workflow_id": "123e4567-e89b-12d3-a456-426614174000"},
|
|
160
|
+
{"_id": 0, "activity_id": 1, "status": 1}
|
|
161
|
+
).sort("started_at", pymongo.DESCENDING).limit(20)
|
|
162
|
+
for t in tasks:
|
|
163
|
+
print(t)
|
|
164
|
+
|
|
165
|
+
The connection string, database name and authentication credentials are configured in the Flowcept settings file.
|
|
166
|
+
|
|
167
|
+
Working with LMDB
|
|
168
|
+
-----------------
|
|
169
|
+
|
|
170
|
+
If LMDB is enabled instead of MongoDB【202622579855635†L68-L96】 Flowcept stores data in a directory (default: ``flowcept_lmdb``). LMDB is a file‑based key–value store; it does not support ad‑hoc queries out of the box, but you can read the data programmatically. Flowcept’s `DBAPI` can export LMDB data into pandas DataFrames, allowing you to analyse offline runs without MongoDB:
|
|
171
|
+
|
|
172
|
+
.. code-block:: python
|
|
173
|
+
|
|
174
|
+
from flowcept import Flowcept
|
|
175
|
+
|
|
176
|
+
# export LMDB tasks to a DataFrame
|
|
177
|
+
df = Flowcept.db.to_df(collection="tasks")
|
|
178
|
+
print(df.head())
|
|
179
|
+
|
|
180
|
+
Alternatively, you can use the `lmdb` Python library to iterate over raw key–value pairs. The LMDB environment is located under the directory configured in your settings file (commonly named ``flowcept_lmdb``). Because LMDB stores binary values, you’ll need to serialise and deserialise JSON messages yourself.
|
|
181
|
+
|
|
182
|
+
Monitoring Provenance with Grafana
|
|
183
|
+
----------------------------------
|
|
184
|
+
|
|
185
|
+
Flowcept supports streaming provenance into monitoring dashboards. A sample Docker compose file (`deployment/compose-grafana.yml`) runs Grafana along with MongoDB and Redis. Grafana is configured with a pre‑built MongoDB‑Grafana image and exposes a port (3000) for the dashboard【711366042692702†L292-L297】. To configure Grafana to query Flowcept’s MongoDB, create a new data source with the URL `mongodb://flowcept_mongo:27017` and specify the database name (usually `flowcept`). The compose file sets environment variables for the admin user and password so you can log in and create your own panels.
|
|
186
|
+
|
|
187
|
+
Grafana can also connect directly to Redis or Kafka for near‑real‑time streaming. See the Grafana documentation for instructions on configuring those plugins.
|
|
188
|
+
|
|
189
|
+
Querying via the LLM‑based Flowcept Agent
|
|
190
|
+
-----------------------------------------
|
|
191
|
+
|
|
192
|
+
Flowcept’s agentic querying (powered by language models) is under active development. The agent will allow natural‑language queries over provenance data, with interactive guidance and summarisation. Documentation will be released in a future version. In the meantime, use the CLI or Python API for querying tasks and workflows.
|
|
193
|
+
|
|
194
|
+
Conclusion
|
|
195
|
+
----------
|
|
196
|
+
|
|
197
|
+
Flowcept offers several ways to query provenance data depending on your environment and requirements. For quick inspection, use the in‑memory buffer or offline message files. For interactive scripts or notebooks, `Flowcept.db` provides a high‑level API to MongoDB or LMDB. For more sophisticated queries, connect directly to MongoDB using the CLI or standard MongoDB tools. Grafana integration lets you build dashboards on live data. As Flowcept evolves, additional capabilities—such as LLM‑based query agents—will expand the ways you can explore your provenance.
|
|
@@ -65,7 +65,7 @@ mlflow = ["mlflow-skinny", "SQLAlchemy", "alembic", "watchdog", "cryptography"]
|
|
|
65
65
|
nvidia = ["nvidia-ml-py"]
|
|
66
66
|
mqtt = ["paho-mqtt"]
|
|
67
67
|
tensorboard = ["tensorboard", "tensorflow", "tbparse"]
|
|
68
|
-
llm_agent = ["mcp[cli]", "langchain_community", "langchain_openai", "streamlit", "PyMuPDF"]
|
|
68
|
+
llm_agent = ["mcp[cli]", "langchain_community", "langchain_openai", "streamlit", "PyMuPDF", "matplotlib"]
|
|
69
69
|
llm_google = ["flowcept[llm_agent]", "google-genai"]
|
|
70
70
|
llm_agent_audio = ["flowcept[llm_agent]", "streamlit-mic-recorder", "SpeechRecognition", "pydub", "gTTS"]
|
|
71
71
|
# System dependency (required for pydub)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
flowcept_version: 0.9.
|
|
1
|
+
flowcept_version: 0.9.9 # Version of the Flowcept package. This setting file is compatible with this version.
|
|
2
2
|
|
|
3
3
|
project:
|
|
4
4
|
debug: true # Toggle debug mode. This will add a property `debug: true` to all saved data, making it easier to retrieve/delete them later.
|
|
@@ -37,9 +37,11 @@ display_ai_msg(GREETING)
|
|
|
37
37
|
def main():
|
|
38
38
|
"""Main Agent GUI function."""
|
|
39
39
|
st.caption(
|
|
40
|
-
"💡
|
|
41
|
-
"
|
|
42
|
-
"
|
|
40
|
+
"💡 Quick help\n"
|
|
41
|
+
"Ask about workflow metrics, plots, or summaries.\n\n"
|
|
42
|
+
"Inputs → used, outputs → generated.\n"
|
|
43
|
+
"Commands: @record <note> add • @show records list • reset context clear • save context save.\n"
|
|
44
|
+
"Tip: set result = df to run ad-hoc Python on the in-memory DataFrame."
|
|
43
45
|
)
|
|
44
46
|
|
|
45
47
|
user_input = st.chat_input("Send a message")
|
|
@@ -23,9 +23,9 @@ ROUTING_PROMPT = (
|
|
|
23
23
|
"You are a routing assistant for a provenance AI agent. "
|
|
24
24
|
"Given the following user message, classify it into one of the following routes:\n"
|
|
25
25
|
"- small_talk: if it's casual conversation or some random word (e.g., 'hausdn', 'a', hello, how are you, what can you do, what's your name)\n"
|
|
26
|
+
"- in_context_query: if the user is querying the provenance data questions about tasks or data in running workflow (or a workflow that ran recently) or if the user mentions the in-memory 'df' or a dataframe. I expect that most of the interactions will fall in this category.\n"
|
|
26
27
|
"- plot: if user is requesting plots (e.g., plot, chart, visualize)\n"
|
|
27
28
|
# "- in_context_query: if the user asks questions about tasks or data in running workflow (or a workflow that ran recently) or if the user mentions the in-memory 'df' or a dataframe.\n"
|
|
28
|
-
"- in_context_query: if the user is querying the provenance data questions about tasks or data in running workflow (or a workflow that ran recently) or if the user mentions the in-memory 'df' or a dataframe.\n"
|
|
29
29
|
# "- historical_prov_query: if the user wants to query historical provenance data\n"
|
|
30
30
|
"- in_chat_query: if the user appears to be asking about something that has said recently in this chat.\n"
|
|
31
31
|
"- unknown: if you don't know.\n"
|
|
@@ -176,9 +176,12 @@ QUERY_GUIDELINES = """
|
|
|
176
176
|
-To select the first (or earliest) N workflow executions, use or adapt the following: `df.groupby('workflow_id', as_index=False).agg({{"started_at": 'min'}}).sort_values(by='started_at', ascending=True).head(N)['workflow_id']` - utilize `started_at` to sort!
|
|
177
177
|
-To select the last (or latest or most recent) N workflow executions, use or adapt the following: `df.groupby('workflow_id', as_index=False).agg({{"ended_at": 'max'}}).sort_values(by='ended_at', ascending=False).head(N)['workflow_id']` - utilize `ended_at` to sort!
|
|
178
178
|
|
|
179
|
+
-If the user does not ask for a specific workflow run, do not use `workflow_id` in your query.
|
|
179
180
|
-To select the first or earliest or initial tasks, use or adapt the following: `df.sort_values(by='started_at', ascending=True)`
|
|
180
181
|
-To select the last or final or most recent tasks, use or adapt the following: `df.sort_values(by='ended_at', ascending=False)`
|
|
181
182
|
|
|
183
|
+
-If user explicitly asks to display or show all columns or fields, do not project on any particular field or column. Just show all of them.
|
|
184
|
+
|
|
182
185
|
-WHEN the user requests a "summary" of activities, you must incorporate relevant summary statistics such as min, max, and mean, into the code you generate.
|
|
183
186
|
-Do NOT use df[0] or df[integer value] or df[df[<field name>].idxmax()] or df[df[<field name>].idxmin()] because these are obviously not valid Pandas Code!
|
|
184
187
|
-**Do NOT use any of those: df[df['started_at'].idxmax()], df[df['started_at'].idxmin()], df[df['ended_at'].idxmin()], df[df['ended_at'].idxmax()]. Those are not valid Pandas Code.**
|
|
@@ -73,6 +73,57 @@ def record_guidance(message: str) -> ToolResult:
|
|
|
73
73
|
return ToolResult(code=201, result=f"Ok. I recorded in my memory: {message}")
|
|
74
74
|
|
|
75
75
|
|
|
76
|
+
@mcp_flowcept.tool()
|
|
77
|
+
def show_records() -> ToolResult:
|
|
78
|
+
"""
|
|
79
|
+
Lists all recorded user guidance.
|
|
80
|
+
"""
|
|
81
|
+
try:
|
|
82
|
+
ctx = mcp_flowcept.get_context()
|
|
83
|
+
custom_guidance: List = ctx.request_context.lifespan_context.custom_guidance
|
|
84
|
+
if not custom_guidance:
|
|
85
|
+
message = "There is no recorded user guidance."
|
|
86
|
+
else:
|
|
87
|
+
message = "This is the list of custom guidance I have in my memory:\n"
|
|
88
|
+
message += "\n".join(f" - {msg}" for msg in custom_guidance)
|
|
89
|
+
|
|
90
|
+
return ToolResult(code=201, result=message)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
return ToolResult(code=499, result=str(e))
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
@mcp_flowcept.tool()
|
|
96
|
+
def reset_records() -> ToolResult:
|
|
97
|
+
"""
|
|
98
|
+
Resets all recorded user guidance.
|
|
99
|
+
"""
|
|
100
|
+
try:
|
|
101
|
+
ctx = mcp_flowcept.get_context()
|
|
102
|
+
ctx.request_context.lifespan_context.custom_guidance = []
|
|
103
|
+
return ToolResult(code=201, result="Custom guidance reset.")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
return ToolResult(code=499, result=str(e))
|
|
106
|
+
|
|
107
|
+
@mcp_flowcept.tool()
|
|
108
|
+
def show_records() -> ToolResult:
|
|
109
|
+
"""
|
|
110
|
+
Lists all recorded user guidance.
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
ctx = mcp_flowcept.get_context()
|
|
114
|
+
custom_guidance: List = ctx.request_context.lifespan_context.custom_guidance
|
|
115
|
+
if not custom_guidance:
|
|
116
|
+
message = "There is no recorded user guidance."
|
|
117
|
+
else:
|
|
118
|
+
message = "This is the list of custom guidance I have in my memory:"
|
|
119
|
+
message += "\n".join(f"- {msg}" for msg in custom_guidance)
|
|
120
|
+
|
|
121
|
+
return ToolResult(code=201, result=message)
|
|
122
|
+
except Exception as e:
|
|
123
|
+
return ToolResult(code=499, result=str(e))
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
|
|
76
127
|
@mcp_flowcept.tool()
|
|
77
128
|
def prompt_handler(message: str) -> ToolResult:
|
|
78
129
|
"""
|
|
@@ -95,6 +146,10 @@ def prompt_handler(message: str) -> ToolResult:
|
|
|
95
146
|
|
|
96
147
|
if "@record" in message:
|
|
97
148
|
return record_guidance(message)
|
|
149
|
+
if "@show records" in message:
|
|
150
|
+
return show_records()
|
|
151
|
+
if "@reset records" in message:
|
|
152
|
+
return reset_records(message)
|
|
98
153
|
|
|
99
154
|
llm = build_llm_model()
|
|
100
155
|
|
|
@@ -105,12 +160,12 @@ def prompt_handler(message: str) -> ToolResult:
|
|
|
105
160
|
prompt = SMALL_TALK_PROMPT + message
|
|
106
161
|
response = llm.invoke(prompt)
|
|
107
162
|
return ToolResult(code=201, result=response)
|
|
163
|
+
elif route == "in_context_query":
|
|
164
|
+
return run_df_query(llm, message, plot=False)
|
|
108
165
|
elif route == "plot":
|
|
109
166
|
return run_df_query(llm, message, plot=True)
|
|
110
167
|
elif route == "historical_prov_query":
|
|
111
168
|
return ToolResult(code=201, result="We need to query the Provenance Database. Feature coming soon.")
|
|
112
|
-
elif route == "in_context_query":
|
|
113
|
-
return run_df_query(llm, message, plot=False)
|
|
114
169
|
elif route == "in_chat_query":
|
|
115
170
|
prompt = SMALL_TALK_PROMPT + message
|
|
116
171
|
response = llm.invoke(prompt)
|
|
@@ -70,6 +70,7 @@ class MQDaoRedis(MQDao):
|
|
|
70
70
|
except Exception as e:
|
|
71
71
|
self.logger.error(f"Failed to process message {message}")
|
|
72
72
|
self.logger.exception(e)
|
|
73
|
+
continue
|
|
73
74
|
|
|
74
75
|
current_trials = 0
|
|
75
76
|
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
|
|
@@ -78,7 +79,7 @@ class MQDaoRedis(MQDao):
|
|
|
78
79
|
sleep(3)
|
|
79
80
|
except Exception as e:
|
|
80
81
|
self.logger.exception(e)
|
|
81
|
-
|
|
82
|
+
continue
|
|
82
83
|
|
|
83
84
|
def send_message(self, message: dict, channel=MQ_CHANNEL, serializer=msgpack.dumps):
|
|
84
85
|
"""Send the message."""
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
"""Controller module."""
|
|
2
2
|
|
|
3
|
-
import
|
|
4
|
-
from typing import List, Dict
|
|
3
|
+
from typing import List, Dict, Any
|
|
5
4
|
from uuid import uuid4
|
|
6
5
|
|
|
7
6
|
from flowcept.commons.autoflush_buffer import AutoflushBuffer
|
|
@@ -175,25 +174,31 @@ class Flowcept(object):
|
|
|
175
174
|
self._interceptor_instances[0]._mq_dao.bulk_publish(self.buffer)
|
|
176
175
|
|
|
177
176
|
@staticmethod
|
|
178
|
-
def read_messages_file(file_path: str = None
|
|
177
|
+
def read_messages_file(file_path: str | None = None, return_df: bool = False):
|
|
179
178
|
"""
|
|
180
179
|
Read a JSON Lines (JSONL) file containing captured Flowcept messages.
|
|
181
180
|
|
|
182
181
|
This function loads a file where each line is a serialized JSON object.
|
|
183
182
|
It joins the lines into a single JSON array and parses them efficiently
|
|
184
|
-
with ``orjson``.
|
|
183
|
+
with ``orjson``. If ``return_df`` is True, it returns a pandas DataFrame
|
|
184
|
+
created via ``pandas.json_normalize(..., sep='.')`` so nested fields become
|
|
185
|
+
dot-separated columns (for example, ``generated.attention``).
|
|
185
186
|
|
|
186
187
|
Parameters
|
|
187
188
|
----------
|
|
188
189
|
file_path : str, optional
|
|
189
|
-
Path to the messages file. If not provided, defaults to the
|
|
190
|
-
|
|
191
|
-
|
|
190
|
+
Path to the messages file. If not provided, defaults to the value of
|
|
191
|
+
``DUMP_BUFFER_PATH`` from the configuration. If neither is provided,
|
|
192
|
+
an assertion error is raised.
|
|
193
|
+
return_df : bool, default False
|
|
194
|
+
If True, return a normalized pandas DataFrame. If False, return the
|
|
195
|
+
parsed list of dictionaries.
|
|
192
196
|
|
|
193
197
|
Returns
|
|
194
198
|
-------
|
|
195
|
-
|
|
196
|
-
A list of message objects
|
|
199
|
+
list of dict or pandas.DataFrame
|
|
200
|
+
A list of message objects when ``return_df`` is False,
|
|
201
|
+
otherwise a normalized DataFrame with dot-separated columns.
|
|
197
202
|
|
|
198
203
|
Raises
|
|
199
204
|
------
|
|
@@ -203,35 +208,45 @@ class Flowcept(object):
|
|
|
203
208
|
If the specified file does not exist.
|
|
204
209
|
orjson.JSONDecodeError
|
|
205
210
|
If the file contents cannot be parsed as valid JSON.
|
|
211
|
+
ModuleNotFoundError
|
|
212
|
+
If ``return_df`` is True but pandas is not installed.
|
|
206
213
|
|
|
207
214
|
Examples
|
|
208
215
|
--------
|
|
209
|
-
Read messages
|
|
216
|
+
Read messages as a list:
|
|
210
217
|
|
|
211
218
|
>>> msgs = read_messages_file("offline_buffer.jsonl")
|
|
212
|
-
>>>
|
|
213
|
-
|
|
219
|
+
>>> len(msgs) > 0
|
|
220
|
+
True
|
|
214
221
|
|
|
215
|
-
|
|
222
|
+
Read messages as a normalized DataFrame:
|
|
216
223
|
|
|
217
|
-
>>>
|
|
218
|
-
>>>
|
|
219
|
-
|
|
220
|
-
task_start wf_123
|
|
221
|
-
task_end wf_123
|
|
224
|
+
>>> df = read_messages_file("offline_buffer.jsonl", return_df=True)
|
|
225
|
+
>>> "generated.attention" in df.columns
|
|
226
|
+
True
|
|
222
227
|
"""
|
|
228
|
+
import os
|
|
223
229
|
import orjson
|
|
224
230
|
|
|
225
|
-
_buffer = []
|
|
226
231
|
if file_path is None:
|
|
227
232
|
file_path = DUMP_BUFFER_PATH
|
|
228
233
|
assert file_path is not None, "Please indicate file_path either in the argument or in the config file."
|
|
229
234
|
if not os.path.exists(file_path):
|
|
230
|
-
raise f"File {file_path}
|
|
235
|
+
raise FileNotFoundError(f"File '{file_path}' was not found. It is created only in fully offline mode.")
|
|
236
|
+
|
|
231
237
|
with open(file_path, "rb") as f:
|
|
232
238
|
lines = [ln for ln in f.read().splitlines() if ln]
|
|
233
|
-
|
|
234
|
-
|
|
239
|
+
|
|
240
|
+
buffer: List[Dict[str, Any]] = orjson.loads(b"[" + b",".join(lines) + b"]")
|
|
241
|
+
|
|
242
|
+
if return_df:
|
|
243
|
+
try:
|
|
244
|
+
import pandas as pd
|
|
245
|
+
except ModuleNotFoundError as e:
|
|
246
|
+
raise ModuleNotFoundError("pandas is required when return_df=True. Please install pandas.") from e
|
|
247
|
+
return pd.json_normalize(buffer, sep=".")
|
|
248
|
+
|
|
249
|
+
return buffer
|
|
235
250
|
|
|
236
251
|
def save_workflow(self, interceptor: str, interceptor_instance: BaseInterceptor):
|
|
237
252
|
"""
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|