flowcept 0.8.9__tar.gz → 0.8.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/checks.yml +1 -1
  2. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests-py313.yml +1 -1
  3. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run_examples.sh +2 -1
  4. {flowcept-0.8.9 → flowcept-0.8.11}/.gitignore +1 -0
  5. {flowcept-0.8.9 → flowcept-0.8.11}/Makefile +8 -1
  6. {flowcept-0.8.9 → flowcept-0.8.11}/PKG-INFO +18 -6
  7. {flowcept-0.8.9 → flowcept-0.8.11}/README.md +8 -5
  8. flowcept-0.8.11/docs/index.rst +18 -0
  9. flowcept-0.8.11/examples/agents/aec_agent_mock.py +144 -0
  10. flowcept-0.8.11/examples/agents/opt_driver_mock.py +49 -0
  11. flowcept-0.8.11/examples/consumers/simple_consumer.py +23 -0
  12. {flowcept-0.8.9 → flowcept-0.8.11}/examples/mlflow_example.py +1 -1
  13. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/mlflow.ipynb +2 -2
  14. {flowcept-0.8.9 → flowcept-0.8.11}/pyproject.toml +14 -1
  15. {flowcept-0.8.9 → flowcept-0.8.11}/resources/sample_settings.yaml +44 -23
  16. flowcept-0.8.11/resources/simple_redis_consumer.py +33 -0
  17. flowcept-0.8.11/src/flowcept/cli.py +460 -0
  18. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/keyvalue_dao.py +19 -23
  19. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/mq_dao/mq_dao_base.py +29 -29
  20. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/mq_dao/mq_dao_kafka.py +4 -3
  21. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/mq_dao/mq_dao_mofka.py +4 -0
  22. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/mq_dao/mq_dao_redis.py +38 -5
  23. flowcept-0.8.11/src/flowcept/commons/daos/redis_conn.py +47 -0
  24. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_dataclasses/task_object.py +36 -8
  25. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/settings_factory.py +2 -4
  26. flowcept-0.8.11/src/flowcept/commons/task_data_preprocess.py +200 -0
  27. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/utils.py +1 -1
  28. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/configs.py +11 -9
  29. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_api/flowcept_controller.py +30 -13
  30. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/__init__.py +1 -0
  31. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/agents_utils.py +89 -0
  32. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/flowcept_agent.py +292 -0
  33. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/flowcept_llm_prov_capture.py +186 -0
  34. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/prompts.py +51 -0
  35. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/base_interceptor.py +17 -19
  36. flowcept-0.8.11/src/flowcept/flowceptor/adapters/brokers/__init__.py +1 -0
  37. flowcept-0.8.11/src/flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +132 -0
  38. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +3 -3
  39. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +3 -3
  40. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/__init__.py +1 -0
  41. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +101 -0
  42. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/client_agent.py +48 -0
  43. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/flowcept_agent_context_manager.py +145 -0
  44. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/flowcept_qa_manager.py +112 -0
  45. flowcept-0.8.11/src/flowcept/flowceptor/consumers/base_consumer.py +90 -0
  46. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/consumers/document_inserter.py +138 -53
  47. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/telemetry_capture.py +1 -1
  48. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/instrumentation/task_capture.py +19 -9
  49. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/version.py +1 -1
  50. flowcept-0.8.11/tests/adapters/test_broker.py +83 -0
  51. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/test_mlflow.py +1 -1
  52. flowcept-0.8.9/docs/index.rst +0 -17
  53. flowcept-0.8.9/resources/simple_redis_consumer.py +0 -24
  54. flowcept-0.8.9/src/flowcept/flowceptor/adapters/zambeze/__init__.py +0 -1
  55. flowcept-0.8.9/src/flowcept/flowceptor/adapters/zambeze/zambeze_dataclasses.py +0 -41
  56. flowcept-0.8.9/src/flowcept/flowceptor/adapters/zambeze/zambeze_interceptor.py +0 -102
  57. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/create-release-n-publish.yml +0 -0
  58. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-llm-tests.yml +0 -0
  59. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests-all-dbs.yml +0 -0
  60. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests-in-container.yml +0 -0
  61. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests-kafka.yml +0 -0
  62. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests-simple.yml +0 -0
  63. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/run-tests.yml +0 -0
  64. {flowcept-0.8.9 → flowcept-0.8.11}/.github/workflows/version_bumper.py +0 -0
  65. {flowcept-0.8.9 → flowcept-0.8.11}/.readthedocs.yaml +0 -0
  66. {flowcept-0.8.9 → flowcept-0.8.11}/CONTRIBUTING.md +0 -0
  67. {flowcept-0.8.9 → flowcept-0.8.11}/LICENSE +0 -0
  68. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/Dockerfile +0 -0
  69. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/compose-grafana.yml +0 -0
  70. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/compose-kafka.yml +0 -0
  71. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/compose-mofka.yml +0 -0
  72. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/compose-mongo.yml +0 -0
  73. {flowcept-0.8.9 → flowcept-0.8.11}/deployment/compose.yml +0 -0
  74. {flowcept-0.8.9 → flowcept-0.8.11}/docs/api-reference.rst +0 -0
  75. {flowcept-0.8.9 → flowcept-0.8.11}/docs/conf.py +0 -0
  76. {flowcept-0.8.9 → flowcept-0.8.11}/docs/contributing.rst +0 -0
  77. {flowcept-0.8.9 → flowcept-0.8.11}/docs/getstarted.rst +0 -0
  78. {flowcept-0.8.9 → flowcept-0.8.11}/docs/schemas.rst +0 -0
  79. {flowcept-0.8.9 → flowcept-0.8.11}/docs/task_schema.rst +0 -0
  80. {flowcept-0.8.9 → flowcept-0.8.11}/docs/workflow_schema.rst +0 -0
  81. {flowcept-0.8.9 → flowcept-0.8.11}/examples/dask_example.py +0 -0
  82. {flowcept-0.8.9 → flowcept-0.8.11}/examples/distributed_consumer_example.py +0 -0
  83. {flowcept-0.8.9 → flowcept-0.8.11}/examples/instrumented_loop_example.py +0 -0
  84. {flowcept-0.8.9 → flowcept-0.8.11}/examples/instrumented_simple_example.py +0 -0
  85. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/README.md +0 -0
  86. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/custom_provenance_id_mapping.yaml +0 -0
  87. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/llm_dataprep.py +0 -0
  88. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/llm_main_example.py +0 -0
  89. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/llm_model.py +0 -0
  90. {flowcept-0.8.9 → flowcept-0.8.11}/examples/llm_complex/llm_test_runner.py +0 -0
  91. {flowcept-0.8.9 → flowcept-0.8.11}/examples/single_layer_perceptron_example.py +0 -0
  92. {flowcept-0.8.9 → flowcept-0.8.11}/examples/tensorboard_example.py +0 -0
  93. {flowcept-0.8.9 → flowcept-0.8.11}/examples/unmanaged/main.py +0 -0
  94. {flowcept-0.8.9 → flowcept-0.8.11}/examples/unmanaged/simple_task.py +0 -0
  95. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/analytics.ipynb +0 -0
  96. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/dask.ipynb +0 -0
  97. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/dask_from_CLI.ipynb +0 -0
  98. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/reset_dask_nb_exec_counts.py +0 -0
  99. {flowcept-0.8.9 → flowcept-0.8.11}/notebooks/tensorboard.ipynb +0 -0
  100. {flowcept-0.8.9 → flowcept-0.8.11}/resources/mofka/bedrock_setup.sh +0 -0
  101. {flowcept-0.8.9 → flowcept-0.8.11}/resources/mofka/consumer.py +0 -0
  102. {flowcept-0.8.9 → flowcept-0.8.11}/resources/mofka/mofka-requirements.yaml +0 -0
  103. {flowcept-0.8.9 → flowcept-0.8.11}/resources/mofka/mofka_config.json +0 -0
  104. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/__init__.py +0 -0
  105. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/analytics/__init__.py +0 -0
  106. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/analytics/analytics_utils.py +0 -0
  107. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/analytics/data_augmentation.py +0 -0
  108. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/analytics/plot.py +0 -0
  109. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/__init__.py +0 -0
  110. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/autoflush_buffer.py +0 -0
  111. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/__init__.py +0 -0
  112. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/docdb_dao/__init__.py +0 -0
  113. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/docdb_dao/docdb_dao_base.py +0 -0
  114. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/docdb_dao/lmdb_dao.py +0 -0
  115. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/docdb_dao/mongodb_dao.py +0 -0
  116. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/daos/mq_dao/__init__.py +0 -0
  117. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_dataclasses/__init__.py +0 -0
  118. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_dataclasses/base_settings_dataclasses.py +0 -0
  119. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_dataclasses/telemetry.py +0 -0
  120. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_dataclasses/workflow_object.py +0 -0
  121. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/flowcept_logger.py +0 -0
  122. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/query_utils.py +0 -0
  123. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/commons/vocabulary.py +0 -0
  124. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_api/__init__.py +0 -0
  125. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_api/db_api.py +0 -0
  126. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_api/task_query_api.py +0 -0
  127. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_webserver/__init__.py +0 -0
  128. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_webserver/app.py +0 -0
  129. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_webserver/resources/__init__.py +0 -0
  130. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_webserver/resources/query_rsrc.py +0 -0
  131. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowcept_webserver/resources/task_messages_rsrc.py +0 -0
  132. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/__init__.py +0 -0
  133. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/__init__.py +0 -0
  134. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/dask/__init__.py +0 -0
  135. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/dask/dask_dataclasses.py +0 -0
  136. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/dask/dask_interceptor.py +0 -0
  137. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/dask/dask_plugins.py +0 -0
  138. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/instrumentation_interceptor.py +0 -0
  139. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/interceptor_state_manager.py +0 -0
  140. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/mlflow/__init__.py +0 -0
  141. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +0 -0
  142. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dao.py +0 -0
  143. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dataclasses.py +0 -0
  144. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/tensorboard/__init__.py +0 -0
  145. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_dataclasses.py +0 -0
  146. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/consumers/__init__.py +0 -0
  147. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/flowceptor/consumers/consumer_utils.py +0 -0
  148. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/instrumentation/__init__.py +0 -0
  149. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/instrumentation/flowcept_loop.py +0 -0
  150. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/instrumentation/flowcept_task.py +0 -0
  151. {flowcept-0.8.9 → flowcept-0.8.11}/src/flowcept/instrumentation/flowcept_torch.py +0 -0
  152. {flowcept-0.8.9 → flowcept-0.8.11}/tests/__init__.py +0 -0
  153. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/__init__.py +0 -0
  154. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/dask_test_utils.py +0 -0
  155. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/test_dask.py +0 -0
  156. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/test_dask_with_context_mgmt.py +0 -0
  157. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/test_file_observer.py +0 -0
  158. {flowcept-0.8.9 → flowcept-0.8.11}/tests/adapters/test_tensorboard.py +0 -0
  159. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/__init__.py +0 -0
  160. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/db_api_test.py +0 -0
  161. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/flowcept_api_test.py +0 -0
  162. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/sample_data.json +0 -0
  163. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/sample_data_with_telemetry_and_rai.json +0 -0
  164. {flowcept-0.8.9 → flowcept-0.8.11}/tests/api/task_query_api_test.py +0 -0
  165. {flowcept-0.8.9 → flowcept-0.8.11}/tests/doc_db_inserter/__init__.py +0 -0
  166. {flowcept-0.8.9 → flowcept-0.8.11}/tests/doc_db_inserter/doc_db_inserter_test.py +0 -0
  167. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/__init__.py +0 -0
  168. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/flowcept_explicit_tasks.py +0 -0
  169. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/flowcept_loop_test.py +0 -0
  170. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/flowcept_task_decorator_test.py +0 -0
  171. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/ml_tests/__init__.py +0 -0
  172. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/ml_tests/dl_trainer.py +0 -0
  173. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/ml_tests/ml_decorator_dask_test.py +0 -0
  174. {flowcept-0.8.9 → flowcept-0.8.11}/tests/instrumentation_tests/ml_tests/ml_decorator_test.py +0 -0
  175. {flowcept-0.8.9 → flowcept-0.8.11}/tests/misc_tests/__init__.py +0 -0
  176. {flowcept-0.8.9 → flowcept-0.8.11}/tests/misc_tests/log_test.py +0 -0
  177. {flowcept-0.8.9 → flowcept-0.8.11}/tests/misc_tests/singleton_test.py +0 -0
  178. {flowcept-0.8.9 → flowcept-0.8.11}/tests/misc_tests/telemetry_test.py +0 -0
@@ -1,4 +1,4 @@
1
- name: Linter, formatter, and docs checks
1
+ name: Code and doc checks
2
2
 
3
3
  on: pull_request
4
4
 
@@ -47,7 +47,7 @@ jobs:
47
47
 
48
48
  - name: Test with pytest and redis, ignoring the ones that (as of Dec 21 2024) don't work on py3.13)
49
49
  run: |
50
- pytest --ignore=tests/adapters/test_tensorboard.py --ignore tests/instrumentation_tests/ml_tests/
50
+ pytest --ignore=tests/adapters/test_tensorboard.py --ignore=tests/adapters/test_broker.py --ignore tests/instrumentation_tests/ml_tests/
51
51
 
52
52
  - name: Shut down docker compose
53
53
  run: make services-stop-mongo
@@ -81,7 +81,8 @@ echo "Using examples directory: $EXAMPLES_DIR"
81
81
  echo "With Mongo? ${WITH_MONGO}"
82
82
 
83
83
  # Define the test cases
84
- default_tests=("instrumented_simple_example.py" "instrumented_loop_example.py" "distributed_consumer_example.py" "dask_example.py" "mlflow_example.py" "tensorboard_example.py" "single_layer_perceptron_example.py" "llm_complex/llm_main_example.py" "unmanaged/main.py")
84
+ default_tests=("instrumented_simple_example.py" "instrumented_loop_example.py" "distributed_consumer_example.py" "dask_example.py" "mlflow_example.py" "single_layer_perceptron_example.py" "llm_complex/llm_main_example.py" "unmanaged/main.py")
85
+ # Removing tensorboard_example.py from the list above while the dataset link is not fixed.
85
86
 
86
87
  # Use the third argument if provided, otherwise use default tests
87
88
  if [[ -n "$3" ]]; then
@@ -31,3 +31,4 @@ launch.json
31
31
  core.*
32
32
  *.csv
33
33
 
34
+ uv.lock
@@ -83,10 +83,17 @@ tests-in-container-kafka:
83
83
  liveness:
84
84
  python -c 'from flowcept import Flowcept; print(Flowcept.services_alive())'
85
85
 
86
+ dev_agent:
87
+ mcp dev src/flowcept/flowceptor/adapters/agents/flowcept_agent.py
88
+
89
+ install_dev_agent: # Run this to fix python env problems in the MCP studio env
90
+ mcp install src/flowcept/flowceptor/adapters/agents/flowcept_agent.py
91
+
92
+
86
93
  # Run unit tests using pytest
87
94
  .PHONY: tests
88
95
  tests:
89
- pytest
96
+ pytest --ignore=tests/adapters/test_tensorboard.py
90
97
 
91
98
  .PHONY: tests-notebooks
92
99
  tests-notebooks:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowcept
3
- Version: 0.8.9
3
+ Version: 0.8.11
4
4
  Summary: Capture and query workflow provenance data using data observability
5
5
  Project-URL: GitHub, https://github.com/ORNL/flowcept
6
6
  Author: Oak Ridge National Laboratory
@@ -29,6 +29,7 @@ Requires-Dist: furo; extra == 'all'
29
29
  Requires-Dist: jupyterlab; extra == 'all'
30
30
  Requires-Dist: mlflow-skinny; extra == 'all'
31
31
  Requires-Dist: nbmake; extra == 'all'
32
+ Requires-Dist: paho-mqtt; extra == 'all'
32
33
  Requires-Dist: pika; extra == 'all'
33
34
  Requires-Dist: plotly; extra == 'all'
34
35
  Requires-Dist: pymongo; extra == 'all'
@@ -65,6 +66,12 @@ Requires-Dist: furo; extra == 'docs'
65
66
  Requires-Dist: sphinx; extra == 'docs'
66
67
  Provides-Extra: kafka
67
68
  Requires-Dist: confluent-kafka<=2.8.0; extra == 'kafka'
69
+ Provides-Extra: llm-agent
70
+ Requires-Dist: faiss-cpu; extra == 'llm-agent'
71
+ Requires-Dist: langchain-community; extra == 'llm-agent'
72
+ Requires-Dist: mcp[cli]; extra == 'llm-agent'
73
+ Requires-Dist: sentence-transformers; extra == 'llm-agent'
74
+ Requires-Dist: tiktoken; extra == 'llm-agent'
68
75
  Provides-Extra: ml-dev
69
76
  Requires-Dist: datasets==2.17.0; extra == 'ml-dev'
70
77
  Requires-Dist: nltk; extra == 'ml-dev'
@@ -80,6 +87,8 @@ Requires-Dist: sqlalchemy; extra == 'mlflow'
80
87
  Requires-Dist: watchdog; extra == 'mlflow'
81
88
  Provides-Extra: mongo
82
89
  Requires-Dist: pymongo; extra == 'mongo'
90
+ Provides-Extra: mqtt
91
+ Requires-Dist: paho-mqtt; extra == 'mqtt'
83
92
  Provides-Extra: nvidia
84
93
  Requires-Dist: nvidia-ml-py; extra == 'nvidia'
85
94
  Provides-Extra: tensorboard
@@ -112,7 +121,9 @@ Description-Content-Type: text/markdown
112
121
 
113
122
  ## Overview
114
123
 
115
- Flowcept is a runtime data integration system that captures and queries workflow provenance with minimal or no code changes. It unifies data across diverse workflows and tools, enabling integrated analysis and insights, especially in federated environments. Designed for scenarios involving critical data from multiple workflows, Flowcept seamlessly integrates data at runtime, providing a unified view for end-to-end monitoring and analysis, and enhanced support for Machine Learning (ML) workflows.
124
+ Flowcept is a runtime data integration system that captures and queries workflow provenance with minimal or no code changes. It unifies data from diverse workflows and tools, enabling integrated analysis and insights, especially in federated environments.
125
+
126
+ Designed for scenarios involving critical data from multiple workflows, Flowcept supports end-to-end monitoring, analysis, querying, and enhanced support for Machine Learning (ML) workflows.
116
127
 
117
128
  ## Features
118
129
 
@@ -135,8 +146,9 @@ Notes:
135
146
  - TensorBoard
136
147
  - Python scripts can be easily instrumented via `@decorators` using `@flowcept_task` (for generic Python method) or `@torch_task` (for methods that encapsulate PyTorch model manipulation, such as training or evaluation).
137
148
  - Currently supported MQ systems:
138
- - Kafka
139
- - Redis
149
+ - [Kafka](https://kafka.apache.org)
150
+ - [Redis](https://redis.io)
151
+ - [Mofka](https://mofka.readthedocs.io)
140
152
  - Currently supported database systems:
141
153
  - MongoDB
142
154
  - Lightning Memory-Mapped Database (lightweight file-only database system)
@@ -181,7 +193,7 @@ If you want to install all optional dependencies, use:
181
193
  pip install flowcept[all]
182
194
  ```
183
195
 
184
- This is a convenient way to ensure all adapters are available, but it may install dependencies you don't need.
196
+ This is useful mostly for Flowcept developers. Please avoid installing like this if you can, as it may install several dependencies you will never use.
185
197
 
186
198
  ### 4. Installing from Source
187
199
  To install Flowcept from the source repository:
@@ -363,7 +375,7 @@ Some unit tests utilize `torch==2.2.2`, `torchtext=0.17.2`, and `torchvision==0.
363
375
 
364
376
  ## Documentation
365
377
 
366
- Full documentation is available on [Read the Docs](https://myproject.readthedocs.io/en/latest/).
378
+ Full documentation is available on [Read the Docs](https://flowcept.readthedocs.io/).
367
379
 
368
380
  ## Cite us
369
381
 
@@ -22,7 +22,9 @@
22
22
 
23
23
  ## Overview
24
24
 
25
- Flowcept is a runtime data integration system that captures and queries workflow provenance with minimal or no code changes. It unifies data across diverse workflows and tools, enabling integrated analysis and insights, especially in federated environments. Designed for scenarios involving critical data from multiple workflows, Flowcept seamlessly integrates data at runtime, providing a unified view for end-to-end monitoring and analysis, and enhanced support for Machine Learning (ML) workflows.
25
+ Flowcept is a runtime data integration system that captures and queries workflow provenance with minimal or no code changes. It unifies data from diverse workflows and tools, enabling integrated analysis and insights, especially in federated environments.
26
+
27
+ Designed for scenarios involving critical data from multiple workflows, Flowcept supports end-to-end monitoring, analysis, querying, and enhanced support for Machine Learning (ML) workflows.
26
28
 
27
29
  ## Features
28
30
 
@@ -45,8 +47,9 @@ Notes:
45
47
  - TensorBoard
46
48
  - Python scripts can be easily instrumented via `@decorators` using `@flowcept_task` (for generic Python method) or `@torch_task` (for methods that encapsulate PyTorch model manipulation, such as training or evaluation).
47
49
  - Currently supported MQ systems:
48
- - Kafka
49
- - Redis
50
+ - [Kafka](https://kafka.apache.org)
51
+ - [Redis](https://redis.io)
52
+ - [Mofka](https://mofka.readthedocs.io)
50
53
  - Currently supported database systems:
51
54
  - MongoDB
52
55
  - Lightning Memory-Mapped Database (lightweight file-only database system)
@@ -91,7 +94,7 @@ If you want to install all optional dependencies, use:
91
94
  pip install flowcept[all]
92
95
  ```
93
96
 
94
- This is a convenient way to ensure all adapters are available, but it may install dependencies you don't need.
97
+ This is useful mostly for Flowcept developers. Please avoid installing like this if you can, as it may install several dependencies you will never use.
95
98
 
96
99
  ### 4. Installing from Source
97
100
  To install Flowcept from the source repository:
@@ -273,7 +276,7 @@ Some unit tests utilize `torch==2.2.2`, `torchtext=0.17.2`, and `torchvision==0.
273
276
 
274
277
  ## Documentation
275
278
 
276
- Full documentation is available on [Read the Docs](https://myproject.readthedocs.io/en/latest/).
279
+ Full documentation is available on [Read the Docs](https://flowcept.readthedocs.io/).
277
280
 
278
281
  ## Cite us
279
282
 
@@ -0,0 +1,18 @@
1
+ Flowcept
2
+ ========
3
+
4
+ GitHub Repository: https://github.com/ornl/flowcept
5
+
6
+ Flowcept is a runtime data integration system that captures and queries workflow provenance with minimal or no code changes. It unifies data from diverse workflows and tools, enabling integrated analysis and insights, especially in federated environments.
7
+
8
+ Designed for scenarios involving critical data from multiple workflows, Flowcept supports end-to-end monitoring, analysis, querying, and enhanced support for Machine Learning (ML) workflows.
9
+
10
+ .. toctree::
11
+ :maxdepth: 2
12
+ :caption: Contents:
13
+
14
+ getstarted
15
+ schemas
16
+ contributing
17
+ api-reference
18
+
@@ -0,0 +1,144 @@
1
+ import json
2
+ import os
3
+ from typing import Dict
4
+ import textwrap
5
+
6
+ import uvicorn
7
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
8
+ from mcp.server.fastmcp import FastMCP
9
+ from mcp.server.fastmcp.prompts import base
10
+
11
+ from flowcept.configs import AGENT
12
+ from flowcept.flowcept_api.flowcept_controller import Flowcept
13
+ from flowcept.flowceptor.adapters.agents.agents_utils import convert_mcp_to_langchain
14
+ from flowcept.flowceptor.adapters.agents.flowcept_llm_prov_capture import invoke_llm, add_preamble_to_response
15
+ from flowcept.flowceptor.adapters.agents.prompts import get_question_prompt, BASE_SINGLETASK_PROMPT
16
+ from flowcept.commons.utils import get_utc_now
17
+
18
+ os.environ["SAMBASTUDIO_URL"] = AGENT.get("llm_server_url")
19
+ os.environ["SAMBASTUDIO_API_KEY"] = AGENT.get("api_key")
20
+
21
+ agent_controller = BaseAgentContextManager()
22
+ mcp = FastMCP("AnC_Agent_mock", require_session=True, lifespan=agent_controller.lifespan)
23
+
24
+
25
+ #################################################
26
+ # PROMPTS
27
+ #################################################
28
+
29
+ @mcp.prompt()
30
+ def single_task_used_generated_prompt(task_data: Dict, question: str) -> list[base.Message]:
31
+ """
32
+ Generates a prompt to ask about one particular task.
33
+ """
34
+ msgs = BASE_SINGLETASK_PROMPT.copy()
35
+ msgs.append(get_question_prompt(question))
36
+ msgs.append(base.UserMessage(f"This is the task object I need you to focus on: \n {task_data}\n"))
37
+ return msgs
38
+
39
+
40
+ @mcp.prompt()
41
+ def adamantine_prompt(layer: int, simulation_output: Dict, question: str) -> list[base.Message]:
42
+ control_options = simulation_output.get("control_options")
43
+ l2_error = simulation_output.get("l2_error")
44
+
45
+ control_options_str = ""
46
+ for o in range(len(control_options)):
47
+ control_options_str += f"Option {o + 1}: {control_options[o]}\n"
48
+
49
+ l2_error_str = ""
50
+ for o in range(len(l2_error)):
51
+ l2_error_str += f"Option {o + 1}: {l2_error[o]}\n"
52
+
53
+ prompt = textwrap.dedent(f"""\
54
+ SUMMARY OF CURRENT STATE: Currently, the printer is printing layer {layer}. You need to make a control decision for layer {layer + 2}. It is currently {get_utc_now()}.
55
+
56
+ CONTROL OPTIONS:
57
+ {control_options_str}
58
+
59
+ AUTOMATED ANALYSIS FROM SIMULATIONS:
60
+ Full volume L2 error (lower is better)
61
+
62
+ {l2_error_str}
63
+ """).strip()
64
+
65
+ return [
66
+ base.UserMessage(prompt),
67
+ base.UserMessage(f"Based on this provided information, here is the question: {question}")
68
+ ]
69
+
70
+
71
+ #################################################
72
+ # TOOLS
73
+ #################################################
74
+
75
+ @mcp.tool()
76
+ def get_latest(n: int = None) -> str:
77
+ """
78
+ Return the latest task(s) as a JSON string.
79
+ """
80
+ ctx = mcp.get_context()
81
+ tasks = ctx.request_context.lifespan_context.tasks
82
+ if not tasks:
83
+ return "No tasks available."
84
+ if n is None:
85
+ return json.dumps(tasks[-1])
86
+ return json.dumps(tasks[-n])
87
+
88
+
89
+ @mcp.tool()
90
+ def check_liveness() -> str:
91
+ """
92
+ Check if the agent is running.
93
+ """
94
+
95
+ return f"I'm {mcp.name} and I'm ready!"
96
+
97
+
98
+ @mcp.tool()
99
+ def check_llm() -> str:
100
+ """
101
+ Check if the agent can talk to the LLM service.
102
+ """
103
+
104
+ messages = [base.UserMessage(f"Hi, are you working properly?")]
105
+
106
+ langchain_messages = convert_mcp_to_langchain(messages)
107
+ response = invoke_llm(langchain_messages)
108
+ result = add_preamble_to_response(response, mcp)
109
+
110
+ return result
111
+
112
+
113
+ @mcp.tool()
114
+ def adamantine_ask_about_latest_iteration(question) -> str:
115
+ ctx = mcp.get_context()
116
+ tasks = ctx.request_context.lifespan_context.tasks
117
+ if not tasks:
118
+ return "No tasks available."
119
+ task_data = tasks[-1]
120
+
121
+ layer = task_data.get('used').get('layer_number', 0)
122
+ simulation_output = task_data.get('generated')
123
+
124
+ messages = adamantine_prompt(layer, simulation_output, question)
125
+
126
+ langchain_messages = convert_mcp_to_langchain(messages)
127
+
128
+ response = invoke_llm(langchain_messages)
129
+ result = add_preamble_to_response(response, mcp, task_data)
130
+ return result
131
+
132
+
133
+ def main():
134
+ """
135
+ Start the MCP server.
136
+ """
137
+ f = Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False).start()
138
+ f.logger.info(f"This section's workflow_id={Flowcept.current_workflow_id}")
139
+ setattr(mcp, "workflow_id", f.current_workflow_id)
140
+ uvicorn.run(mcp.streamable_http_app, host="0.0.0.0", port=8000, lifespan="on")
141
+
142
+
143
+ if __name__ == "__main__":
144
+ main()
@@ -0,0 +1,49 @@
1
+ from time import sleep
2
+ import random
3
+ from flowcept.flowcept_api.flowcept_controller import Flowcept
4
+ from flowcept.flowceptor.consumers.agent.client_agent import run_tool
5
+ from flowcept.instrumentation.flowcept_task import flowcept_task
6
+
7
+
8
+ @flowcept_task
9
+ def simulate_layer(layer_number: int):
10
+ power_arr = [0, 15, 25, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350] # floating number from 0 to 350
11
+ dwell_arr = list(range(10, 121, 5))
12
+
13
+ control_options = [
14
+ f"{power_arr[random.randint(0, len(power_arr)-1)]}W power reheat pass, {power_arr[random.randint(0, len(power_arr)-1)]}s dwell",
15
+ f"{dwell_arr[random.randint(0, len(dwell_arr)-1)]}s dwell, {dwell_arr[random.randint(0, len(dwell_arr)-1)]}s dwell",
16
+ f"{dwell_arr[random.randint(0, len(dwell_arr)-1)]}s dwell, {power_arr[random.randint(0, len(power_arr)-1)]}W power reheat pass",
17
+ f"{power_arr[random.randint(0, len(power_arr)-1)]}W power reheat pass, {power_arr[random.randint(0, len(power_arr)-1)]}W power reheat pass"
18
+ ]
19
+ l2_error = [
20
+ random.randint(100, 350),
21
+ random.randint(100, 500),
22
+ random.randint(100, 500),
23
+ random.randint(100, 600)
24
+ ]
25
+ sleep(5/(layer_number+1))
26
+ return {"control_options": control_options, "l2_error": l2_error}
27
+
28
+
29
+ try:
30
+ print(run_tool("check_liveness"))
31
+ except Exception as e:
32
+ print(e)
33
+ pass
34
+
35
+
36
+ def adaptive_control_workflow(config):
37
+ for i in range(config["max_layers"]):
38
+ print()
39
+ print(f"Starting simulation for Layer {i}; ", end='')
40
+ simulation_output = simulate_layer(layer_number=i)
41
+ print(simulation_output)
42
+
43
+
44
+ if __name__ == "__main__":
45
+ with Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False):
46
+ config = {"max_layers": 5}
47
+ adaptive_control_workflow(config)
48
+
49
+
@@ -0,0 +1,23 @@
1
+ from typing import Dict
2
+
3
+ from flowcept.flowceptor.consumers.base_consumer import BaseConsumer
4
+
5
+
6
+ class MyConsumer(BaseConsumer):
7
+
8
+ def __init__(self):
9
+ super().__init__()
10
+
11
+ def message_handler(self, msg_obj: Dict) -> bool:
12
+ if msg_obj.get('type', '') == 'task':
13
+ print(msg_obj)
14
+ else:
15
+ print(f"We got a msg with different type: {msg_obj.get("type", None)}")
16
+ return True
17
+
18
+
19
+ if __name__ == "__main__":
20
+
21
+ print("Starting consumer indefinitely. Press ctrl+c to stop")
22
+ consumer = MyConsumer()
23
+ consumer.start(daemon=False)
@@ -31,7 +31,7 @@ if __name__ == "__main__":
31
31
  mlflow.log_params({"param1": 1})
32
32
  mlflow.log_params({"param2": 2})
33
33
  mlflow.log_metric("metric1", 10)
34
- run_id = run.info.run_uuid
34
+ run_id = run.info.run_id
35
35
  task = Flowcept.db.query(filter={"task_id": run_id})[0]
36
36
  assert task["status"] == "FINISHED"
37
37
  assert "param1" in task["used"]
@@ -97,7 +97,7 @@
97
97
  " # Actual training code would go here\n",
98
98
  " print(\"Generated training metadata.\")\n",
99
99
  " mlflow.log_metric(\"loss\", 0.04)\n",
100
- " return run.info.run_uuid"
100
+ " return run.info.run_id"
101
101
  ]
102
102
  },
103
103
  {
@@ -413,7 +413,7 @@
413
413
  "name": "python",
414
414
  "nbconvert_exporter": "python",
415
415
  "pygments_lexer": "ipython3",
416
- "version": "3.10.15"
416
+ "version": "3.12.9"
417
417
  }
418
418
  },
419
419
  "nbformat": 4,
@@ -16,7 +16,7 @@ dependencies = [
16
16
  "redis",
17
17
  "requests",
18
18
  "lmdb",
19
- "pyarrow"
19
+ "pyarrow",
20
20
  ]
21
21
  authors = [{name = "Oak Ridge National Laboratory"}]
22
22
  description = "Capture and query workflow provenance data using data observability"
@@ -62,7 +62,10 @@ docs = ["sphinx", "furo"]
62
62
  kafka = ["confluent-kafka<=2.8.0"] # As of today, 2/28/2025, version 2.8.1 is stale. When this gets fixed, let's remove the version constraint. https://pypi.org/project/confluent-kafka/#history
63
63
  mlflow = ["mlflow-skinny", "SQLAlchemy", "alembic", "watchdog"]
64
64
  nvidia = ["nvidia-ml-py"]
65
+ mqtt = ["paho-mqtt"]
65
66
  tensorboard = ["tensorboard", "tensorflow", "tbparse"]
67
+ llm_agent = ["mcp[cli]", "langchain_community", "sentence-transformers", "tiktoken", "faiss-cpu"]
68
+
66
69
  dev = [
67
70
  "flowcept[docs]",
68
71
  "jupyterlab",
@@ -88,6 +91,7 @@ all = [
88
91
  "flowcept[dask]",
89
92
  "flowcept[kafka]",
90
93
  "flowcept[mlflow]",
94
+ "flowcept[mqtt]",
91
95
  "flowcept[tensorboard]",
92
96
  "flowcept[dev]",
93
97
  ]
@@ -105,8 +109,17 @@ ignore = ["D200", "D212", "D105", "D401", "D205", "D100"]
105
109
  [tool.ruff.lint.pydocstyle]
106
110
  convention = "numpy"
107
111
 
112
+ [tool.uv.workspace]
113
+ members = [
114
+ "tmp_tests/mcp-server-demo",
115
+ "tmp_tests/mcp-server-demo/mcp-server-demo",
116
+ ]
117
+
108
118
  [tool.hatch.build.targets.wheel]
109
119
  packages = ["src/flowcept"]
110
120
 
111
121
  [tool.hatch.build.targets.wheel.force-include]
112
122
  "resources/sample_settings.yaml" = "resources/sample_settings.yaml"
123
+
124
+ [project.scripts]
125
+ flowcept = "flowcept.cli:main"
@@ -1,4 +1,4 @@
1
- flowcept_version: 0.8.9 # Version of the Flowcept package. This setting file is compatible with this version.
1
+ flowcept_version: 0.8.11 # Version of the Flowcept package. This setting file is compatible with this version.
2
2
 
3
3
  project:
4
4
  debug: true # Toggle debug mode. This will add a property `debug: true` to all saved data, making it easier to retrieve/delete them later.
@@ -25,7 +25,6 @@ telemetry_capture: # This toggles each individual type of telemetry capture. GPU
25
25
 
26
26
  instrumentation:
27
27
  enabled: true # This toggles data capture for instrumentation.
28
- singleton: true # Use a single instrumentation instance per process. Defaults to true
29
28
  torch:
30
29
  what: parent_and_children # Scope of instrumentation: "parent_only" -- will capture only at the main model level, "parent_and_children" -- will capture the inner layers, or ~ (disable).
31
30
  children_mode: telemetry_and_tensor_inspection # What to capture if parent_and_children is chosen in the scope. Possible values: "tensor_inspection" (i.e., tensor metadata), "telemetry", "telemetry_and_tensor_inspection"
@@ -40,18 +39,22 @@ experiment:
40
39
  mq:
41
40
  type: redis # or kafka or mofka; Please adjust the port (kafka's default is 9092; redis is 6379). If mofka, adjust the group_file.
42
41
  host: localhost
43
- # instances: ["localhost:6379"] # We can have multiple redis instances being accessed by the consumers but each interceptor will currently access one single redis.
42
+ # uri: ?
43
+ # instances: ["localhost:6379"] # We can have multiple MQ instances being accessed by the consumers but each interceptor will currently access one single MQ..
44
44
  port: 6379
45
45
  # group_file: mofka.json
46
46
  channel: interception
47
47
  buffer_size: 50
48
48
  insertion_buffer_time_secs: 5
49
49
  timing: false
50
+ # uri: use Redis connection uri here
50
51
  chunk_size: -1 # use 0 or -1 to disable this. Or simply omit this from the config file.
52
+ same_as_kvdb: false # Set this to true if you are using the same Redis instance both as an MQ and as the KV_DB. In that case, no need to repeat connection parameters in MQ. Use only what you define in KV_DB.
51
53
 
52
54
  kv_db:
53
55
  host: localhost
54
56
  port: 6379
57
+ enabled: true
55
58
  # uri: use Redis connection uri here
56
59
 
57
60
  web_server:
@@ -59,9 +62,9 @@ web_server:
59
62
  port: 5000
60
63
 
61
64
  sys_metadata:
62
- environment_id: "laptop"
65
+ environment_id: "laptop" # We use this to keep track of the environment used to run an experiment. Typical values include the cluster name, but it can be anything that you think will help identify your experimentation environment.
63
66
 
64
- extra_metadata:
67
+ extra_metadata: # We use this to store any extra metadata you want to keep track of during an experiment.
65
68
  place_holder: ""
66
69
 
67
70
  analytics:
@@ -70,13 +73,20 @@ analytics:
70
73
  generated.accuracy: maximum_first
71
74
 
72
75
  db_buffer:
73
- adaptive_buffer_size: true
74
- insertion_buffer_time_secs: 5
75
- max_buffer_size: 50
76
- min_buffer_size: 10
77
- remove_empty_fields: false
78
- stop_max_trials: 240
79
- stop_trials_sleep: 0.01
76
+ insertion_buffer_time_secs: 5 # Time interval (in seconds) to buffer incoming records before flushing to the database
77
+ buffer_size: 50 # Maximum number of records to hold in the buffer before forcing a flush
78
+ remove_empty_fields: false # If true, fields with null/empty values will be removed before insertion
79
+ stop_max_trials: 240 # Maximum number of trials before giving up when waiting for a fully safe stop (i.e., all records have been inserted as expected).
80
+ stop_trials_sleep: 0.01 # Sleep duration (in seconds) between trials when waiting for a fully safe stop.
81
+
82
+ agent:
83
+ enabled: false
84
+ mcp_host: localhost
85
+ mcp_port: 8000
86
+ llm_server_url: '?'
87
+ api_key: '?'
88
+ model: '?'
89
+ model_kwargs: {}
80
90
 
81
91
  databases:
82
92
 
@@ -89,20 +99,30 @@ databases:
89
99
  host: localhost
90
100
  port: 27017
91
101
  db: flowcept
92
- create_collection_index: true
102
+ create_collection_index: true # Whether flowcept should create collection indices if they haven't been created yet. This is done only at the Flowcept start up.
93
103
 
94
104
  adapters:
95
105
  # For each key below, you can have multiple instances. Like mlflow1, mlflow2; zambeze1, zambeze2. Use an empty dict, {}, if you won't use any adapter.
96
- zambeze:
97
- kind: zambeze
98
- host: localhost
99
- port: 5672
100
- queue_names:
101
- - hello
102
- - hello2
103
- # key_values_to_filter:
104
- # - key: activity_status
105
- # value: CREATED
106
+
107
+ broker_mqtt:
108
+ kind: broker
109
+ host: h
110
+ port: 30011
111
+ protocol: mqtt3.1.1
112
+ queues: ["#"]
113
+ username: postman
114
+ password: p
115
+ qos: 2
116
+ task_subtype: intersect_msg
117
+ tracked_keys:
118
+ used: payload
119
+ generated: ~
120
+ custom_metadata: [headers, msgId]
121
+ activity_id: operationId
122
+ submitted_at: ~
123
+ started_at: ~
124
+ ended_at: ~
125
+ registered_at: ~
106
126
 
107
127
  mlflow:
108
128
  kind: mlflow
@@ -125,3 +145,4 @@ adapters:
125
145
  worker_should_get_output: true
126
146
  scheduler_create_timestamps: true
127
147
  worker_create_timestamps: false
148
+
@@ -0,0 +1,33 @@
1
+ import redis
2
+ import msgpack
3
+
4
+ from flowcept.commons.daos.mq_dao.mq_dao_redis import MQDaoRedis
5
+ from flowcept.configs import MQ_HOST, MQ_PORT, MQ_CHANNEL, KVDB_URI
6
+ # Connect to Redis
7
+ redis_client = (
8
+ redis.from_url(KVDB_URI) if KVDB_URI else redis.Redis(host=MQ_HOST, port=MQ_PORT, db=0)
9
+ )
10
+ # Subscribe to a channel
11
+ pubsub = redis_client.pubsub()
12
+ pubsub.subscribe(MQ_CHANNEL)
13
+
14
+ print("Listening for messages...")
15
+
16
+
17
+ for message in pubsub.listen():
18
+ print()
19
+ print("Received a message!", end=' ')
20
+ if message and message["type"] in MQDaoRedis.MESSAGE_TYPES_IGNORE:
21
+ continue
22
+
23
+ if not isinstance(message["data"], (bytes, bytearray)):
24
+ print(
25
+ f"Skipping message with unexpected data type: {type(message["data"])} - {message["data"]}")
26
+ continue
27
+
28
+ try:
29
+ msg_obj = msgpack.loads(message["data"], strict_map_key=False)
30
+ msg_type = msg_obj.get("type", None)
31
+ print(msg_type)
32
+ except Exception as e:
33
+ print(e)