flowcept 0.8.10__tar.gz → 0.8.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-llm-tests.yml +1 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests-py313.yml +3 -5
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests.yml +13 -4
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run_examples.sh +4 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/.gitignore +1 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/Makefile +8 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/PKG-INFO +48 -11
- flowcept-0.8.12/examples/agents/a2a/README.md +27 -0
- flowcept-0.8.12/examples/agents/a2a/agent1.py +60 -0
- flowcept-0.8.12/examples/agents/a2a/agent2.py +60 -0
- flowcept-0.8.12/examples/agents/aec_agent_context_manager.py +67 -0
- flowcept-0.8.12/examples/agents/aec_agent_mock.py +110 -0
- flowcept-0.8.12/examples/agents/aec_prompts.py +113 -0
- flowcept-0.8.12/examples/agents/opt_driver_mock.py +193 -0
- flowcept-0.8.12/examples/consumers/ping_pong_example.py +65 -0
- flowcept-0.8.12/examples/consumers/simple_consumer.py +34 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/llm_main_example.py +1 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/llm_model.py +3 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/mlflow_example.py +1 -1
- flowcept-0.8.12/examples/start_here.py +51 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/mlflow.ipynb +2 -2
- {flowcept-0.8.10 → flowcept-0.8.12}/pyproject.toml +30 -18
- {flowcept-0.8.10 → flowcept-0.8.12}/resources/sample_settings.yaml +46 -14
- flowcept-0.8.12/resources/simple_redis_consumer.py +33 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/__init__.py +7 -4
- flowcept-0.8.12/src/flowcept/agents/__init__.py +5 -0
- flowcept-0.8.12/src/flowcept/agents/agent_client.py +58 -0
- flowcept-0.8.12/src/flowcept/agents/agents_utils.py +181 -0
- flowcept-0.8.12/src/flowcept/agents/dynamic_schema_tracker.py +191 -0
- flowcept-0.8.12/src/flowcept/agents/flowcept_agent.py +30 -0
- flowcept-0.8.12/src/flowcept/agents/flowcept_ctx_manager.py +175 -0
- flowcept-0.8.12/src/flowcept/agents/gui/__init__.py +5 -0
- flowcept-0.8.12/src/flowcept/agents/gui/agent_gui.py +76 -0
- flowcept-0.8.12/src/flowcept/agents/gui/gui_utils.py +239 -0
- flowcept-0.8.12/src/flowcept/agents/llms/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/agents/llms/claude_gcp.py +139 -0
- flowcept-0.8.12/src/flowcept/agents/llms/gemini25.py +119 -0
- flowcept-0.8.12/src/flowcept/agents/prompts/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/agents/prompts/general_prompts.py +69 -0
- flowcept-0.8.12/src/flowcept/agents/prompts/in_memory_query_prompts.py +297 -0
- flowcept-0.8.12/src/flowcept/agents/tools/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/agents/tools/general_tools.py +102 -0
- flowcept-0.8.12/src/flowcept/agents/tools/in_memory_queries/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +704 -0
- flowcept-0.8.12/src/flowcept/agents/tools/in_memory_queries/pandas_agent_utils.py +309 -0
- flowcept-0.8.12/src/flowcept/cli.py +702 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/docdb_dao/mongodb_dao.py +47 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/keyvalue_dao.py +19 -23
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/mq_dao/mq_dao_base.py +49 -38
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/mq_dao/mq_dao_kafka.py +20 -3
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/mq_dao/mq_dao_mofka.py +4 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/mq_dao/mq_dao_redis.py +38 -5
- flowcept-0.8.12/src/flowcept/commons/daos/redis_conn.py +47 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_dataclasses/task_object.py +50 -27
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_dataclasses/workflow_object.py +9 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/settings_factory.py +2 -4
- flowcept-0.8.12/src/flowcept/commons/task_data_preprocess.py +400 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/utils.py +26 -7
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/configs.py +48 -29
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_api/flowcept_controller.py +102 -18
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/base_interceptor.py +24 -11
- flowcept-0.8.12/src/flowcept/flowceptor/adapters/brokers/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +132 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +3 -3
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +3 -3
- flowcept-0.8.12/src/flowcept/flowceptor/consumers/agent/__init__.py +1 -0
- flowcept-0.8.12/src/flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +125 -0
- flowcept-0.8.12/src/flowcept/flowceptor/consumers/base_consumer.py +94 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/consumers/consumer_utils.py +5 -4
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/consumers/document_inserter.py +135 -36
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/telemetry_capture.py +6 -3
- flowcept-0.8.12/src/flowcept/instrumentation/flowcept_agent_task.py +294 -0
- flowcept-0.8.12/src/flowcept/instrumentation/flowcept_decorator.py +43 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/instrumentation/flowcept_loop.py +3 -3
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/instrumentation/flowcept_task.py +64 -24
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/instrumentation/flowcept_torch.py +5 -5
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/instrumentation/task_capture.py +87 -4
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/version.py +1 -1
- flowcept-0.8.12/tests/adapters/test_broker.py +83 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/test_dask.py +1 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/test_mlflow.py +1 -1
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/flowcept_task_decorator_test.py +43 -9
- flowcept-0.8.10/resources/simple_redis_consumer.py +0 -24
- flowcept-0.8.10/src/flowcept/cli.py +0 -260
- flowcept-0.8.10/src/flowcept/flowceptor/adapters/zambeze/__init__.py +0 -1
- flowcept-0.8.10/src/flowcept/flowceptor/adapters/zambeze/zambeze_dataclasses.py +0 -41
- flowcept-0.8.10/src/flowcept/flowceptor/adapters/zambeze/zambeze_interceptor.py +0 -102
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/checks.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/create-release-n-publish.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests-all-dbs.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests-in-container.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests-kafka.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/run-tests-simple.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.github/workflows/version_bumper.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/.readthedocs.yaml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/CONTRIBUTING.md +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/LICENSE +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/README.md +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/Dockerfile +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/compose-grafana.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/compose-kafka.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/compose-mofka.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/compose-mongo.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/deployment/compose.yml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/api-reference.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/conf.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/contributing.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/getstarted.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/index.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/schemas.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/task_schema.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/docs/workflow_schema.rst +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/dask_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/distributed_consumer_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/instrumented_loop_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/instrumented_simple_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/README.md +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/custom_provenance_id_mapping.yaml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/llm_dataprep.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/llm_complex/llm_test_runner.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/single_layer_perceptron_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/tensorboard_example.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/unmanaged/main.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/examples/unmanaged/simple_task.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/analytics.ipynb +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/dask.ipynb +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/dask_from_CLI.ipynb +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/reset_dask_nb_exec_counts.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/notebooks/tensorboard.ipynb +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/resources/mofka/bedrock_setup.sh +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/resources/mofka/consumer.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/resources/mofka/mofka-requirements.yaml +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/resources/mofka/mofka_config.json +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/analytics/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/analytics/analytics_utils.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/analytics/data_augmentation.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/analytics/plot.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/autoflush_buffer.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/docdb_dao/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/docdb_dao/docdb_dao_base.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/docdb_dao/lmdb_dao.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/daos/mq_dao/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_dataclasses/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_dataclasses/base_settings_dataclasses.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_dataclasses/telemetry.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/flowcept_logger.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/query_utils.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/commons/vocabulary.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_api/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_api/db_api.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_api/task_query_api.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_webserver/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_webserver/app.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_webserver/resources/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_webserver/resources/query_rsrc.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowcept_webserver/resources/task_messages_rsrc.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/dask/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/dask/dask_dataclasses.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/dask/dask_interceptor.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/dask/dask_plugins.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/instrumentation_interceptor.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/interceptor_state_manager.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/mlflow/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dao.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dataclasses.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/tensorboard/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_dataclasses.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/flowceptor/consumers/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/src/flowcept/instrumentation/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/dask_test_utils.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/test_dask_with_context_mgmt.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/test_file_observer.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/adapters/test_tensorboard.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/db_api_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/flowcept_api_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/sample_data.json +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/sample_data_with_telemetry_and_rai.json +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/api/task_query_api_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/doc_db_inserter/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/doc_db_inserter/doc_db_inserter_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/flowcept_explicit_tasks.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/flowcept_loop_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/ml_tests/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/ml_tests/dl_trainer.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/ml_tests/ml_decorator_dask_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/instrumentation_tests/ml_tests/ml_decorator_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/misc_tests/__init__.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/misc_tests/log_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/misc_tests/singleton_test.py +0 -0
- {flowcept-0.8.10 → flowcept-0.8.12}/tests/misc_tests/telemetry_test.py +0 -0
|
@@ -37,7 +37,7 @@ jobs:
|
|
|
37
37
|
python --version
|
|
38
38
|
|
|
39
39
|
- name: Install dependencies
|
|
40
|
-
run: pip install .[ml_dev,dask,mongo]
|
|
40
|
+
run: pip install .[extras,ml_dev,dask,mongo]
|
|
41
41
|
|
|
42
42
|
- name: Test LLM with Dask cluster
|
|
43
43
|
run: python examples/llm_complex/llm_main_example.py --start-dask-cluster
|
|
@@ -36,18 +36,16 @@ jobs:
|
|
|
36
36
|
python -m pip install --upgrade pip
|
|
37
37
|
python --version
|
|
38
38
|
|
|
39
|
-
- name: Install dependencies
|
|
39
|
+
- name: Install dependencies
|
|
40
40
|
run: |
|
|
41
|
-
pip install .
|
|
42
|
-
pip install flask-restful msgpack omegaconf pandas psutil py-cpuinfo redis requests pyarrow
|
|
43
|
-
pip install .[mongo,analytics,dask,docs,kafka,mlflow,dev]
|
|
41
|
+
pip install .[all]
|
|
44
42
|
|
|
45
43
|
- name: List installed packages
|
|
46
44
|
run: pip list
|
|
47
45
|
|
|
48
46
|
- name: Test with pytest and redis, ignoring the ones that (as of Dec 21 2024) don't work on py3.13)
|
|
49
47
|
run: |
|
|
50
|
-
pytest --ignore=tests/adapters/test_tensorboard.py --ignore tests/instrumentation_tests/ml_tests/
|
|
48
|
+
pytest --ignore=tests/adapters/test_tensorboard.py --ignore=tests/adapters/test_broker.py --ignore tests/instrumentation_tests/ml_tests/
|
|
51
49
|
|
|
52
50
|
- name: Shut down docker compose
|
|
53
51
|
run: make services-stop-mongo
|
|
@@ -29,16 +29,25 @@ jobs:
|
|
|
29
29
|
cache: "pip"
|
|
30
30
|
|
|
31
31
|
- name: Show OS Info
|
|
32
|
-
run: '
|
|
33
|
-
|
|
34
|
-
- name: Start docker compose with redis
|
|
35
|
-
run: make services-mongo
|
|
32
|
+
run: 'case "${OSTYPE:-}" in linux*) echo "OS Type: Linux"; (command -v lsb_release >/dev/null 2>&1 && lsb_release -a || cat /etc/os-release); uname -r ;; darwin*) echo "OS Type: macOS"; sw_vers || true; uname -r ;; *) echo "Unsupported OS type: ${OSTYPE:-unknown}" ;; esac'
|
|
36
33
|
|
|
37
34
|
- name: Upgrade pip
|
|
38
35
|
run: |
|
|
39
36
|
python -m pip install --upgrade pip
|
|
40
37
|
python --version
|
|
41
38
|
|
|
39
|
+
- name: Test basic example without any external service
|
|
40
|
+
run: |
|
|
41
|
+
pip install .
|
|
42
|
+
pip list
|
|
43
|
+
flowcept --init-settings
|
|
44
|
+
python examples/start_here.py
|
|
45
|
+
pip uninstall flowcept -y
|
|
46
|
+
rm ~/.flowcept/settings.yaml
|
|
47
|
+
|
|
48
|
+
- name: Start docker compose with redis
|
|
49
|
+
run: make services-mongo
|
|
50
|
+
|
|
42
51
|
- name: Test examples
|
|
43
52
|
run: bash .github/workflows/run_examples.sh examples true # with mongo
|
|
44
53
|
|
|
@@ -34,6 +34,8 @@ run_test() {
|
|
|
34
34
|
pip uninstall flowcept -y > /dev/null 2>&1 || true # Ignore errors during uninstall
|
|
35
35
|
|
|
36
36
|
pip install . > /dev/null 2>&1
|
|
37
|
+
pip install .[extras] > /dev/null 2>&1
|
|
38
|
+
pip list
|
|
37
39
|
|
|
38
40
|
if [[ "$with_mongo" == "true" ]]; then
|
|
39
41
|
pip install .[mongo] > /dev/null 2>&1
|
|
@@ -81,7 +83,8 @@ echo "Using examples directory: $EXAMPLES_DIR"
|
|
|
81
83
|
echo "With Mongo? ${WITH_MONGO}"
|
|
82
84
|
|
|
83
85
|
# Define the test cases
|
|
84
|
-
default_tests=("instrumented_simple_example.py" "instrumented_loop_example.py" "distributed_consumer_example.py" "dask_example.py" "mlflow_example.py" "
|
|
86
|
+
default_tests=("instrumented_simple_example.py" "instrumented_loop_example.py" "distributed_consumer_example.py" "dask_example.py" "mlflow_example.py" "single_layer_perceptron_example.py" "llm_complex/llm_main_example.py" "unmanaged/main.py")
|
|
87
|
+
# Removing tensorboard_example.py from the list above while the dataset link is not fixed.
|
|
85
88
|
|
|
86
89
|
# Use the third argument if provided, otherwise use default tests
|
|
87
90
|
if [[ -n "$3" ]]; then
|
|
@@ -83,10 +83,17 @@ tests-in-container-kafka:
|
|
|
83
83
|
liveness:
|
|
84
84
|
python -c 'from flowcept import Flowcept; print(Flowcept.services_alive())'
|
|
85
85
|
|
|
86
|
+
dev_agent:
|
|
87
|
+
mcp dev src/flowcept/flowceptor/adapters/agents/flowcept_agent.py
|
|
88
|
+
|
|
89
|
+
install_dev_agent: # Run this to fix python env problems in the MCP studio env
|
|
90
|
+
mcp install src/flowcept/flowceptor/adapters/agents/flowcept_agent.py
|
|
91
|
+
|
|
92
|
+
|
|
86
93
|
# Run unit tests using pytest
|
|
87
94
|
.PHONY: tests
|
|
88
95
|
tests:
|
|
89
|
-
pytest
|
|
96
|
+
pytest --ignore=tests/adapters/test_tensorboard.py
|
|
90
97
|
|
|
91
98
|
.PHONY: tests-notebooks
|
|
92
99
|
tests-notebooks:
|
|
@@ -1,44 +1,52 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: flowcept
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.12
|
|
4
4
|
Summary: Capture and query workflow provenance data using data observability
|
|
5
|
-
Project-URL: GitHub, https://github.com/ORNL/flowcept
|
|
6
5
|
Author: Oak Ridge National Laboratory
|
|
7
6
|
License-Expression: MIT
|
|
8
7
|
License-File: LICENSE
|
|
9
|
-
Keywords: ai,big-data,dask,data-analytics,data-integration,databases,lineage,machine-learning,ml,mlflow,model-management,parallel-processing,provenance,reproducibility,responsible-ai,scientific-workflows,tensorboard,workflows
|
|
8
|
+
Keywords: agentic-ai,agentic-workflows,ai,big-data,dask,data-analytics,data-integration,databases,lineage,llm,machine-learning,ml,mlflow,model-management,parallel-processing,provenance,reproducibility,responsible-ai,scientific-workflows,tensorboard,workflows
|
|
10
9
|
Classifier: License :: OSI Approved :: MIT License
|
|
11
10
|
Classifier: Operating System :: OS Independent
|
|
12
11
|
Classifier: Programming Language :: Python :: 3
|
|
13
12
|
Requires-Python: >=3.10
|
|
14
|
-
Requires-Dist: flask-restful
|
|
15
|
-
Requires-Dist: lmdb
|
|
16
13
|
Requires-Dist: msgpack
|
|
14
|
+
Requires-Dist: numpy
|
|
17
15
|
Requires-Dist: omegaconf
|
|
18
|
-
Requires-Dist:
|
|
19
|
-
Requires-Dist: psutil>=6.1.1
|
|
20
|
-
Requires-Dist: py-cpuinfo
|
|
21
|
-
Requires-Dist: pyarrow
|
|
22
|
-
Requires-Dist: redis
|
|
23
|
-
Requires-Dist: requests
|
|
16
|
+
Requires-Dist: orjson
|
|
24
17
|
Provides-Extra: all
|
|
25
18
|
Requires-Dist: alembic; extra == 'all'
|
|
26
19
|
Requires-Dist: confluent-kafka<=2.8.0; extra == 'all'
|
|
20
|
+
Requires-Dist: cryptography; extra == 'all'
|
|
27
21
|
Requires-Dist: dask[distributed]<=2024.10.0; extra == 'all'
|
|
22
|
+
Requires-Dist: flask-restful; extra == 'all'
|
|
28
23
|
Requires-Dist: furo; extra == 'all'
|
|
24
|
+
Requires-Dist: gitpython; extra == 'all'
|
|
25
|
+
Requires-Dist: google-genai; extra == 'all'
|
|
29
26
|
Requires-Dist: jupyterlab; extra == 'all'
|
|
27
|
+
Requires-Dist: langchain-community; extra == 'all'
|
|
28
|
+
Requires-Dist: lmdb; extra == 'all'
|
|
29
|
+
Requires-Dist: mcp[cli]; extra == 'all'
|
|
30
30
|
Requires-Dist: mlflow-skinny; extra == 'all'
|
|
31
31
|
Requires-Dist: nbmake; extra == 'all'
|
|
32
|
+
Requires-Dist: paho-mqtt; extra == 'all'
|
|
33
|
+
Requires-Dist: pandas; extra == 'all'
|
|
32
34
|
Requires-Dist: pika; extra == 'all'
|
|
33
35
|
Requires-Dist: plotly; extra == 'all'
|
|
36
|
+
Requires-Dist: psutil>=6.1.1; extra == 'all'
|
|
37
|
+
Requires-Dist: py-cpuinfo; extra == 'all'
|
|
38
|
+
Requires-Dist: pyarrow; extra == 'all'
|
|
34
39
|
Requires-Dist: pymongo; extra == 'all'
|
|
35
40
|
Requires-Dist: pytest; extra == 'all'
|
|
36
41
|
Requires-Dist: pyyaml; extra == 'all'
|
|
42
|
+
Requires-Dist: redis; extra == 'all'
|
|
43
|
+
Requires-Dist: requests; extra == 'all'
|
|
37
44
|
Requires-Dist: ruff; extra == 'all'
|
|
38
45
|
Requires-Dist: scipy; extra == 'all'
|
|
39
46
|
Requires-Dist: seaborn; extra == 'all'
|
|
40
47
|
Requires-Dist: sphinx; extra == 'all'
|
|
41
48
|
Requires-Dist: sqlalchemy; extra == 'all'
|
|
49
|
+
Requires-Dist: streamlit; extra == 'all'
|
|
42
50
|
Requires-Dist: tbparse; extra == 'all'
|
|
43
51
|
Requires-Dist: tensorboard; extra == 'all'
|
|
44
52
|
Requires-Dist: tensorflow; extra == 'all'
|
|
@@ -63,8 +71,28 @@ Requires-Dist: sphinx; extra == 'dev'
|
|
|
63
71
|
Provides-Extra: docs
|
|
64
72
|
Requires-Dist: furo; extra == 'docs'
|
|
65
73
|
Requires-Dist: sphinx; extra == 'docs'
|
|
74
|
+
Provides-Extra: extras
|
|
75
|
+
Requires-Dist: flask-restful; extra == 'extras'
|
|
76
|
+
Requires-Dist: gitpython; extra == 'extras'
|
|
77
|
+
Requires-Dist: lmdb; extra == 'extras'
|
|
78
|
+
Requires-Dist: pandas; extra == 'extras'
|
|
79
|
+
Requires-Dist: psutil>=6.1.1; extra == 'extras'
|
|
80
|
+
Requires-Dist: py-cpuinfo; extra == 'extras'
|
|
81
|
+
Requires-Dist: redis; extra == 'extras'
|
|
82
|
+
Requires-Dist: requests; extra == 'extras'
|
|
66
83
|
Provides-Extra: kafka
|
|
67
84
|
Requires-Dist: confluent-kafka<=2.8.0; extra == 'kafka'
|
|
85
|
+
Provides-Extra: llm-agent
|
|
86
|
+
Requires-Dist: langchain-community; extra == 'llm-agent'
|
|
87
|
+
Requires-Dist: mcp[cli]; extra == 'llm-agent'
|
|
88
|
+
Requires-Dist: streamlit; extra == 'llm-agent'
|
|
89
|
+
Provides-Extra: llm-google
|
|
90
|
+
Requires-Dist: google-genai; extra == 'llm-google'
|
|
91
|
+
Requires-Dist: langchain-community; extra == 'llm-google'
|
|
92
|
+
Requires-Dist: mcp[cli]; extra == 'llm-google'
|
|
93
|
+
Requires-Dist: streamlit; extra == 'llm-google'
|
|
94
|
+
Provides-Extra: lmdb
|
|
95
|
+
Requires-Dist: lmdb; extra == 'lmdb'
|
|
68
96
|
Provides-Extra: ml-dev
|
|
69
97
|
Requires-Dist: datasets==2.17.0; extra == 'ml-dev'
|
|
70
98
|
Requires-Dist: nltk; extra == 'ml-dev'
|
|
@@ -75,13 +103,22 @@ Requires-Dist: torchtext==0.17.2; extra == 'ml-dev'
|
|
|
75
103
|
Requires-Dist: torchvision==0.17.2; extra == 'ml-dev'
|
|
76
104
|
Provides-Extra: mlflow
|
|
77
105
|
Requires-Dist: alembic; extra == 'mlflow'
|
|
106
|
+
Requires-Dist: cryptography; extra == 'mlflow'
|
|
78
107
|
Requires-Dist: mlflow-skinny; extra == 'mlflow'
|
|
79
108
|
Requires-Dist: sqlalchemy; extra == 'mlflow'
|
|
80
109
|
Requires-Dist: watchdog; extra == 'mlflow'
|
|
81
110
|
Provides-Extra: mongo
|
|
111
|
+
Requires-Dist: pyarrow; extra == 'mongo'
|
|
82
112
|
Requires-Dist: pymongo; extra == 'mongo'
|
|
113
|
+
Provides-Extra: mqtt
|
|
114
|
+
Requires-Dist: paho-mqtt; extra == 'mqtt'
|
|
83
115
|
Provides-Extra: nvidia
|
|
84
116
|
Requires-Dist: nvidia-ml-py; extra == 'nvidia'
|
|
117
|
+
Provides-Extra: redis
|
|
118
|
+
Requires-Dist: redis; extra == 'redis'
|
|
119
|
+
Provides-Extra: telemetry
|
|
120
|
+
Requires-Dist: psutil>=6.1.1; extra == 'telemetry'
|
|
121
|
+
Requires-Dist: py-cpuinfo; extra == 'telemetry'
|
|
85
122
|
Provides-Extra: tensorboard
|
|
86
123
|
Requires-Dist: tbparse; extra == 'tensorboard'
|
|
87
124
|
Requires-Dist: tensorboard; extra == 'tensorboard'
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
This simple example shows communication between two agents through Flowcept's provenance messages.
|
|
2
|
+
|
|
3
|
+
### Setup:
|
|
4
|
+
|
|
5
|
+
- Agent1 runs on localhost:8000
|
|
6
|
+
- Agent2 runs on localhost:8001
|
|
7
|
+
- Client talks to agent1
|
|
8
|
+
|
|
9
|
+
### Running
|
|
10
|
+
|
|
11
|
+
1. Run Agent1
|
|
12
|
+
2. Run Agent2
|
|
13
|
+
3. Activate agent2 (this step has to be skipped but couldn't find out how yet): curl -X POST http://localhost:8001/mcp/agent/Agent2/action/liveness -H "Content-Type: application/json"
|
|
14
|
+
|
|
15
|
+
The sequence begins by running
|
|
16
|
+
|
|
17
|
+
`flowcept --agent-client --tool-name agent_task1`
|
|
18
|
+
|
|
19
|
+
### Sequence of Message Passing
|
|
20
|
+
|
|
21
|
+
1. Client starts by calling Agent 1's tool: agent_task1
|
|
22
|
+
2. Instrumented Agent Action agent_task1 sends its completion message
|
|
23
|
+
3. Agent 1 sends "call_agent_task" message to call agent_task2
|
|
24
|
+
4. Agent 2 receives it
|
|
25
|
+
5. Agent 2 runs its tool agent_task2
|
|
26
|
+
6. Instrumented Agent Action agent_task2 sends its completion message (subtype 'agent_task')
|
|
27
|
+
7. Agent 1 receives it. Prints and finishes.
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import random
|
|
2
|
+
from typing import Dict
|
|
3
|
+
|
|
4
|
+
import uvicorn
|
|
5
|
+
from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
|
|
6
|
+
from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
|
|
7
|
+
from flowcept.instrumentation.flowcept_task import flowcept_task
|
|
8
|
+
from mcp.server.fastmcp import FastMCP
|
|
9
|
+
|
|
10
|
+
from flowcept.configs import AGENT
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@flowcept_task(subtype="call_agent_task")
|
|
14
|
+
def agent_task2(**kwargs):
|
|
15
|
+
return
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Agent1ContextManager(BaseAgentContextManager):
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
super().__init__()
|
|
22
|
+
|
|
23
|
+
def message_handler(self, msg_obj: Dict) -> bool:
|
|
24
|
+
if msg_obj.get('type', '') == 'task':
|
|
25
|
+
subtype = msg_obj.get("subtype", '')
|
|
26
|
+
if subtype == 'agent_task':
|
|
27
|
+
print(msg_obj)
|
|
28
|
+
tool_name = msg_obj["activity_id"]
|
|
29
|
+
generated = msg_obj["generated"]
|
|
30
|
+
if tool_name == 'agent_task1':
|
|
31
|
+
self.logger.debug(f"Ok, Agent 1 executed agent_task1. Now going to send Message to Agent 2")
|
|
32
|
+
agent_task2(**generated)
|
|
33
|
+
elif tool_name == "agent_task2":
|
|
34
|
+
self.logger.debug(f"Ok, Agent 2 executed agent_task2. All good. Its output was: {generated}")
|
|
35
|
+
return True
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
agent_controller = Agent1ContextManager()
|
|
39
|
+
mcp = FastMCP("Agent1", require_session=True, lifespan=agent_controller.lifespan)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@mcp.tool()
|
|
43
|
+
@agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
|
|
44
|
+
def agent_task1(campaign_id=None):
|
|
45
|
+
return {
|
|
46
|
+
"msg": "I'm agent 1 and I wish to talk to agent 2!",
|
|
47
|
+
"data": random.randint(0, 350)
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def main():
|
|
51
|
+
"""
|
|
52
|
+
Start the MCP server.
|
|
53
|
+
"""
|
|
54
|
+
uvicorn.run(
|
|
55
|
+
mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
if __name__ == "__main__":
|
|
60
|
+
main()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import Dict
|
|
2
|
+
|
|
3
|
+
import uvicorn
|
|
4
|
+
from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
|
|
5
|
+
from flowcept.flowceptor.consumers.agent.client_agent import run_tool
|
|
6
|
+
from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
|
|
7
|
+
from mcp.server.fastmcp import FastMCP
|
|
8
|
+
|
|
9
|
+
from flowcept.configs import AGENT
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Agent2ContextManager(BaseAgentContextManager):
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__()
|
|
16
|
+
|
|
17
|
+
def message_handler(self, msg_obj: Dict) -> bool:
|
|
18
|
+
print(msg_obj)
|
|
19
|
+
if msg_obj.get('type', '') == 'task':
|
|
20
|
+
subtype = msg_obj.get("subtype", '')
|
|
21
|
+
if subtype == 'call_agent_task':
|
|
22
|
+
tool_name = msg_obj["activity_id"]
|
|
23
|
+
args = msg_obj["used"]
|
|
24
|
+
if tool_name == 'agent_task2':
|
|
25
|
+
self.logger.debug(f"I am Agent 2. I saw that Agent 1 executed agent_task1 and sent its message to the MQ.\nNow I'm going to execute my action.")
|
|
26
|
+
self.logger.debug(f"Args for agent_task2: {args}")
|
|
27
|
+
r = run_tool(tool_name="agent_task2", kwargs={"data": args["data"]}, port=8001)
|
|
28
|
+
self.logger.info(f"Response: {r}")
|
|
29
|
+
return True
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
agent_controller = Agent2ContextManager()
|
|
33
|
+
mcp = FastMCP("Agent2", require_session=True, lifespan=agent_controller.lifespan)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@mcp.tool()
|
|
37
|
+
@agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
|
|
38
|
+
def agent_task2(data):
|
|
39
|
+
print(f"This is the data generated by Agent 1: {data}")
|
|
40
|
+
return {
|
|
41
|
+
"msg": "I'm agent 2 and I executed!",
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
@mcp.tool()
|
|
45
|
+
def liveness():
|
|
46
|
+
return "Im alive"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def main():
|
|
50
|
+
"""
|
|
51
|
+
Start the MCP server.
|
|
52
|
+
"""
|
|
53
|
+
uvicorn.run(
|
|
54
|
+
mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=8001, lifespan="on"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
if __name__ == "__main__":
|
|
59
|
+
main()
|
|
60
|
+
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Dict, List
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
|
|
6
|
+
from flowcept.agents.agent_client import run_tool
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class AeCContext:
|
|
11
|
+
"""
|
|
12
|
+
Container for storing agent context data during the lifespan of an application session.
|
|
13
|
+
|
|
14
|
+
Attributes
|
|
15
|
+
----------
|
|
16
|
+
tasks : list of dict
|
|
17
|
+
A list of task messages received from the message queue. Each task message is stored as a dictionary.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
history: List[Dict]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AdamantineAeCContextManager(BaseAgentContextManager):
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
super().__init__()
|
|
27
|
+
self.reset_context()
|
|
28
|
+
|
|
29
|
+
def message_handler(self, msg_obj: Dict) -> bool:
|
|
30
|
+
if msg_obj.get('type', '') == 'task':
|
|
31
|
+
subtype = msg_obj.get("subtype", '')
|
|
32
|
+
if subtype == 'call_agent_task':
|
|
33
|
+
print(msg_obj)
|
|
34
|
+
tool_name = msg_obj["activity_id"]
|
|
35
|
+
campaign_id = msg_obj.get("campaign_id", None)
|
|
36
|
+
tool_args = msg_obj.get("used", {})
|
|
37
|
+
tool_args["campaign_id"] = campaign_id
|
|
38
|
+
self.logger.debug(f"Going to run {tool_name}, {tool_args}")
|
|
39
|
+
tool_result = run_tool(tool_name, kwargs=tool_args)
|
|
40
|
+
if len(tool_result):
|
|
41
|
+
if tool_name == 'choose_option':
|
|
42
|
+
this_history = dict()
|
|
43
|
+
tool_result = tool_result[0]
|
|
44
|
+
this_history.update(tool_args["scores"])
|
|
45
|
+
tool_result = json.loads(tool_result)
|
|
46
|
+
this_history["chosen_option"] = tool_result["option"]
|
|
47
|
+
this_history["explanation"] = tool_result["explanation"]
|
|
48
|
+
self.context.history.append(this_history)
|
|
49
|
+
else:
|
|
50
|
+
self.logger.error(f"Something wrong happened when running tool {tool_name}.")
|
|
51
|
+
elif subtype == 'agent_task':
|
|
52
|
+
print('Tool result', msg_obj["activity_id"])
|
|
53
|
+
if msg_obj.get("subtype", '') == "llm_query":
|
|
54
|
+
print("Msg from agent.")
|
|
55
|
+
#
|
|
56
|
+
# msg_output = msg_obj.get("generated", {})["response"]
|
|
57
|
+
#
|
|
58
|
+
# simulation_output = simulate_layer(self._layers_count, msg_output)
|
|
59
|
+
#
|
|
60
|
+
# run_tool_async("ask_agent", simulation_output)
|
|
61
|
+
|
|
62
|
+
else:
|
|
63
|
+
print(f"We got a msg with different type: {msg_obj.get("type", None)}")
|
|
64
|
+
return True
|
|
65
|
+
|
|
66
|
+
def reset_context(self):
|
|
67
|
+
self.context = AeCContext(history=[])
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Dict, List
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import uvicorn
|
|
6
|
+
from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
|
|
7
|
+
from mcp.server.fastmcp import FastMCP
|
|
8
|
+
|
|
9
|
+
from flowcept.configs import AGENT
|
|
10
|
+
from flowcept.agents.agents_utils import build_llm_model
|
|
11
|
+
|
|
12
|
+
from examples.agents.aec_agent_context_manager import AdamantineAeCContextManager
|
|
13
|
+
from examples.agents.aec_prompts import choose_option_prompt, generate_options_set_prompt
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
agent_controller = AdamantineAeCContextManager()
|
|
17
|
+
mcp = FastMCP("AnC_Agent_mock", require_session=True, lifespan=agent_controller.lifespan)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
#################################################
|
|
22
|
+
# TOOLS
|
|
23
|
+
#################################################
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@mcp.tool()
|
|
27
|
+
@agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
|
|
28
|
+
def generate_options_set(layer: int, planned_controls, number_of_options=4, campaign_id=None):
|
|
29
|
+
llm = build_llm_model()
|
|
30
|
+
ctx = mcp.get_context()
|
|
31
|
+
history = ctx.request_context.lifespan_context.history
|
|
32
|
+
messages = generate_options_set_prompt(layer, planned_controls, history, number_of_options)
|
|
33
|
+
response = llm.invoke(messages)
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
control_options = json.loads(response)
|
|
37
|
+
except Exception as e:
|
|
38
|
+
raise Exception(f"Could not parse json in generate_options_set. Error {e}. Likely an LLM output problem. "
|
|
39
|
+
f"This is the JSON we tried to parse: {response}")
|
|
40
|
+
|
|
41
|
+
assert len(control_options) == number_of_options
|
|
42
|
+
return {"control_options": control_options}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@mcp.tool()
|
|
46
|
+
@agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
|
|
47
|
+
def choose_option(scores: Dict, planned_controls: List[Dict], campaign_id: str=None):
|
|
48
|
+
llm = build_llm_model()
|
|
49
|
+
ctx = mcp.get_context()
|
|
50
|
+
history = ctx.request_context.lifespan_context.history
|
|
51
|
+
messages = choose_option_prompt(scores, planned_controls, history)
|
|
52
|
+
response = llm.invoke(messages)
|
|
53
|
+
try:
|
|
54
|
+
result = json.loads(response)
|
|
55
|
+
except Exception as e:
|
|
56
|
+
raise Exception(f"Could not parse json in choose_option. Error {e}. Likely an LLM output problem. "
|
|
57
|
+
f"This is the JSON we tried to parse: {response}")
|
|
58
|
+
|
|
59
|
+
human_option = int(np.argmin(scores["scores"]))
|
|
60
|
+
|
|
61
|
+
result["human_option"] = human_option
|
|
62
|
+
result["attention"] = True if human_option != result["option"] else False
|
|
63
|
+
|
|
64
|
+
return result
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@mcp.tool()
|
|
68
|
+
def get_latest(n: int = None) -> str:
|
|
69
|
+
"""
|
|
70
|
+
Return the latest task(s) as a JSON string.
|
|
71
|
+
"""
|
|
72
|
+
ctx = mcp.get_context()
|
|
73
|
+
tasks = ctx.request_context.lifespan_context.tasks
|
|
74
|
+
if not tasks:
|
|
75
|
+
return "No tasks available."
|
|
76
|
+
if n is None:
|
|
77
|
+
return json.dumps(tasks[-1])
|
|
78
|
+
return json.dumps(tasks[-n])
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@mcp.tool()
|
|
82
|
+
def check_liveness() -> str:
|
|
83
|
+
"""
|
|
84
|
+
Check if the agent is running.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
return f"I'm {mcp.name} and I'm ready!"
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@mcp.tool()
|
|
91
|
+
def check_llm() -> str:
|
|
92
|
+
"""
|
|
93
|
+
Check if the agent can talk to the LLM service.
|
|
94
|
+
"""
|
|
95
|
+
llm = build_llm_model()
|
|
96
|
+
response = llm.invoke("Hello, are you there?")
|
|
97
|
+
return response
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def main():
|
|
101
|
+
"""
|
|
102
|
+
Start the MCP server.
|
|
103
|
+
"""
|
|
104
|
+
uvicorn.run(
|
|
105
|
+
mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
if __name__ == "__main__":
|
|
110
|
+
main()
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
from typing import Dict, List
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def generate_options_set_prompt(layer: int, planned_controls: List[Dict], history: List[Dict] = None,
|
|
5
|
+
number_of_options=4) -> str:
|
|
6
|
+
max_layers = len(planned_controls)
|
|
7
|
+
prompt_str = f"""
|
|
8
|
+
Role: You are a decision-making expert in Advanced Additive Manufacturing Technologies at the Manufacturing Demonstration Facility.
|
|
9
|
+
Background: You are analyzing layers being printed in a 3D printer. A control decision must be made for each layer to optimize printing outcomes.
|
|
10
|
+
|
|
11
|
+
Task: Generate exactly {number_of_options} control options for layer {layer} in a print job with {max_layers} layers.
|
|
12
|
+
|
|
13
|
+
Domain Constraints: Each control option must be a JSON object with the following fields: 'power', 'dwell_0', and 'dwell_1'.
|
|
14
|
+
Domain Constraints: 'power' is a float in the range [0, 350]. 'dwell_0' and 'dwell_1' are integers from 10 to 120, divisible by 5.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
if history:
|
|
18
|
+
prompt_str += """
|
|
19
|
+
Context: Use both the original pre-calculated control plan and the full history of previously generated options to inform your decision.
|
|
20
|
+
"""
|
|
21
|
+
else:
|
|
22
|
+
prompt_str += """
|
|
23
|
+
Context: Use only the original pre-calculated control plan to inform your decision.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
prompt_str += f"""
|
|
27
|
+
Input - Pre-calculated Control Plan:
|
|
28
|
+
{planned_controls}
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
if history:
|
|
32
|
+
prompt_str += f"""
|
|
33
|
+
Input - Full Decision History:
|
|
34
|
+
{history}
|
|
35
|
+
|
|
36
|
+
History Format: The history is a list of JSON objects. Each entry includes: the layer index, control options generated for that layer, and the calculated score for each option.
|
|
37
|
+
Scoring Note: Typically (but not always), a lower score indicates a better option.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
prompt_str += f"""
|
|
41
|
+
Format Constraints: Output a list with exactly {number_of_options} elements.
|
|
42
|
+
Format Constraints: Each element must be a JSON object with the format: {{'power': float, 'dwell_0': int, 'dwell_1': int}}.
|
|
43
|
+
Output Restriction: DO NOT WRITE ANYTHING ELSE. Only output the JSON list. The response will be parsed automatically.
|
|
44
|
+
"""
|
|
45
|
+
return prompt_str
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def choose_option_prompt(scores: Dict, planned_controls: List[Dict], history=None) -> str:
|
|
49
|
+
"""
|
|
50
|
+
Prompt: Write the prompt following according to these:
|
|
51
|
+
|
|
52
|
+
1. Analyze the prompt. Perform any improvements that you believe will help the LLM model answer better. Improve the prompt to try to make it more clear, less redundant for the LLM model.
|
|
53
|
+
2. Additionally, the LLM model is often making confusion because it analyzes arrays like [2, 3, 5] to choose a score from it, but it hallucinates saying that it chose the score 5 because it's the lowest score, when it's clearly not the case. Improve the prompt to try to make it more clear for the LLM model.
|
|
54
|
+
3. Analyze each message in the list and use your judgement to identify whether this message is better classified as "human" or as "system.
|
|
55
|
+
4. Rewrite the function to return an array of pairs ("role", "message"), i.e., each element in the array is a tuple of two strings, where "role" should be either "human" or "system". Do not use any MCP message stuff.
|
|
56
|
+
5. Reorganize the function for better readability. Check "if history" only once. Use array.extends to reduce the number of appends. It's fine to use more than 120 characters per line.
|
|
57
|
+
6. Adjust the prompts to better structure the messages themselves, e.g., informing when the message is defining a role, the message content should be "Role: rest of the message"
|
|
58
|
+
Clearly label :
|
|
59
|
+
tasks,
|
|
60
|
+
background,
|
|
61
|
+
format constraints,
|
|
62
|
+
domain constraints,
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
current_layer = scores.get("layer")
|
|
66
|
+
max_layers = len(planned_controls)
|
|
67
|
+
|
|
68
|
+
prompt_str = f"""
|
|
69
|
+
Role: You are a decision-making expert in Advanced Additive Manufacturing Technologies at the Manufacturing Demonstration Facility.
|
|
70
|
+
Background: You are analyzing a layer-by-layer 3D printing process to help select optimal control decisions based on simulation scores.
|
|
71
|
+
Task: Choose the best control option for layer {current_layer} out of a set of possible options. You will receive the scores and control options computed by an HPC simulation.
|
|
72
|
+
|
|
73
|
+
Domain Constraints: Each control option is a dictionary with the fields 'power', 'dwell_0', and 'dwell_1'.
|
|
74
|
+
Domain Constraints: 'power' is a float between 0 and 350. 'dwell_0' and 'dwell_1' are integers between 10 and 120, and must be divisible by 5.
|
|
75
|
+
|
|
76
|
+
Score Format: The input is a dictionary with:
|
|
77
|
+
- 'layer': current layer index
|
|
78
|
+
- 'control_options': a list of candidate control option dictionaries
|
|
79
|
+
- 'scores': a list of floats of same length, where scores[i] is the score of control_options[i].
|
|
80
|
+
|
|
81
|
+
Scoring Hint: Typically (but not always), a lower score indicates better quality. For example, in [5, 10], option 0 is preferred since 5 < 10.
|
|
82
|
+
⚠️ Caution: Do NOT hallucinate reasoning. For example, if scores = [2, 3, 5], do NOT say 5 is the lowest. Use correct comparisons only.
|
|
83
|
+
Labeling: If your chosen option has the lowest score, label it 'expected'. If it does not, label it 'surprise' and explain your reasoning.
|
|
84
|
+
|
|
85
|
+
Input - Scores for layer {current_layer}:
|
|
86
|
+
{scores}
|
|
87
|
+
|
|
88
|
+
Input - Pre-calculated Control Plan:
|
|
89
|
+
{planned_controls}
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
if history:
|
|
93
|
+
prompt_str += f"""
|
|
94
|
+
|
|
95
|
+
Input - Full History of previous control decisions and scores:
|
|
96
|
+
{history}
|
|
97
|
+
|
|
98
|
+
History Format: Each item in the history is a dictionary with keys: 'layer', 'control_options', and 'scores'. Use this to reason based on past behavior.
|
|
99
|
+
"""
|
|
100
|
+
else:
|
|
101
|
+
prompt_str += "\nContext: No prior decision history is available. Use only the current inputs.\n"
|
|
102
|
+
|
|
103
|
+
prompt_str += """
|
|
104
|
+
Format Constraints: Return a JSON object like:
|
|
105
|
+
{"option": index_of_best_option, "explanation": your_reasoning, "agent_label": "expected" or "surprise"}
|
|
106
|
+
|
|
107
|
+
Output Restriction:
|
|
108
|
+
- DO NOT SAY 'Here is the output'
|
|
109
|
+
- ONLY WRITE THE VALID JSON. NO EXPLANATIONS AT ALL.
|
|
110
|
+
- YOUR OUTPUT MUST BE A VALID JSON! Your output will be parsed programmatically.
|
|
111
|
+
"""
|
|
112
|
+
return prompt_str
|
|
113
|
+
|