flowcept 0.9.18__tar.gz → 0.9.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. {flowcept-0.9.18 → flowcept-0.9.19}/Makefile +3 -3
  2. {flowcept-0.9.18 → flowcept-0.9.19}/PKG-INFO +1 -1
  3. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/Dockerfile +2 -5
  4. flowcept-0.9.19/docs/agent.rst +43 -0
  5. {flowcept-0.9.18 → flowcept-0.9.19}/docs/index.rst +1 -0
  6. {flowcept-0.9.18 → flowcept-0.9.19}/docs/prov_capture.rst +1 -1
  7. {flowcept-0.9.18 → flowcept-0.9.19}/pyproject.toml +8 -1
  8. {flowcept-0.9.18 → flowcept-0.9.19}/resources/sample_settings.yaml +1 -1
  9. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/agent_client.py +10 -4
  10. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/agents_utils.py +12 -19
  11. flowcept-0.9.19/src/flowcept/agents/flowcept_agent.py +134 -0
  12. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/flowcept_ctx_manager.py +31 -24
  13. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/keyvalue_dao.py +12 -3
  14. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/mq_dao/mq_dao_base.py +37 -20
  15. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/configs.py +13 -2
  16. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +2 -2
  17. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/base_consumer.py +22 -4
  18. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/document_inserter.py +22 -1
  19. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/version.py +1 -1
  20. flowcept-0.9.19/tests/agent/agent_tests.py +95 -0
  21. flowcept-0.9.18/src/flowcept/agents/flowcept_agent.py +0 -33
  22. flowcept-0.9.18/tests/agent/agent_tests.py +0 -21
  23. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/checks.yml +0 -0
  24. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/create-release-n-publish.yml +0 -0
  25. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-llm-tests.yml +0 -0
  26. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests-all-dbs.yml +0 -0
  27. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests-in-container.yml +0 -0
  28. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests-kafka.yml +0 -0
  29. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests-py313.yml +0 -0
  30. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests-simple.yml +0 -0
  31. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run-tests.yml +0 -0
  32. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/run_examples.sh +0 -0
  33. {flowcept-0.9.18 → flowcept-0.9.19}/.github/workflows/version_bumper.py +0 -0
  34. {flowcept-0.9.18 → flowcept-0.9.19}/.gitignore +0 -0
  35. {flowcept-0.9.18 → flowcept-0.9.19}/.readthedocs.yaml +0 -0
  36. {flowcept-0.9.18 → flowcept-0.9.19}/CONTRIBUTING.md +0 -0
  37. {flowcept-0.9.18 → flowcept-0.9.19}/LICENSE +0 -0
  38. {flowcept-0.9.18 → flowcept-0.9.19}/README.md +0 -0
  39. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/compose-grafana.yml +0 -0
  40. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/compose-kafka.yml +0 -0
  41. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/compose-mofka.yml +0 -0
  42. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/compose-mongo.yml +0 -0
  43. {flowcept-0.9.18 → flowcept-0.9.19}/deployment/compose.yml +0 -0
  44. {flowcept-0.9.18 → flowcept-0.9.19}/docs/api-reference.rst +0 -0
  45. {flowcept-0.9.18 → flowcept-0.9.19}/docs/architecture.rst +0 -0
  46. {flowcept-0.9.18 → flowcept-0.9.19}/docs/cli-reference.rst +0 -0
  47. {flowcept-0.9.18 → flowcept-0.9.19}/docs/conf.py +0 -0
  48. {flowcept-0.9.18 → flowcept-0.9.19}/docs/contributing.rst +0 -0
  49. {flowcept-0.9.18 → flowcept-0.9.19}/docs/img/PROV-AGENT.svg +0 -0
  50. {flowcept-0.9.18 → flowcept-0.9.19}/docs/img/architecture-diagram.png +0 -0
  51. {flowcept-0.9.18 → flowcept-0.9.19}/docs/img/flowcept-logo-dark.png +0 -0
  52. {flowcept-0.9.18 → flowcept-0.9.19}/docs/img/flowcept-logo.png +0 -0
  53. {flowcept-0.9.18 → flowcept-0.9.19}/docs/large_data.rst +0 -0
  54. {flowcept-0.9.18 → flowcept-0.9.19}/docs/prov_query.rst +0 -0
  55. {flowcept-0.9.18 → flowcept-0.9.19}/docs/prov_storage.rst +0 -0
  56. {flowcept-0.9.18 → flowcept-0.9.19}/docs/publications/README.md +0 -0
  57. {flowcept-0.9.18 → flowcept-0.9.19}/docs/quick_start.rst +0 -0
  58. {flowcept-0.9.18 → flowcept-0.9.19}/docs/schemas.rst +0 -0
  59. {flowcept-0.9.18 → flowcept-0.9.19}/docs/setup.rst +0 -0
  60. {flowcept-0.9.18 → flowcept-0.9.19}/docs/task_schema.rst +0 -0
  61. {flowcept-0.9.18 → flowcept-0.9.19}/docs/telemetry_capture.rst +0 -0
  62. {flowcept-0.9.18 → flowcept-0.9.19}/docs/workflow_schema.rst +0 -0
  63. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/a2a/README.md +0 -0
  64. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/a2a/agent1.py +0 -0
  65. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/a2a/agent2.py +0 -0
  66. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/aec_agent_context_manager.py +0 -0
  67. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/aec_agent_mock.py +0 -0
  68. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/aec_prompts.py +0 -0
  69. {flowcept-0.9.18 → flowcept-0.9.19}/examples/agents/opt_driver_mock.py +0 -0
  70. {flowcept-0.9.18 → flowcept-0.9.19}/examples/consumers/ping_pong_example.py +0 -0
  71. {flowcept-0.9.18 → flowcept-0.9.19}/examples/consumers/simple_consumer.py +0 -0
  72. {flowcept-0.9.18 → flowcept-0.9.19}/examples/consumers/simple_publisher.py +0 -0
  73. {flowcept-0.9.18 → flowcept-0.9.19}/examples/convergence_loop_example.py +0 -0
  74. {flowcept-0.9.18 → flowcept-0.9.19}/examples/dask_example.py +0 -0
  75. {flowcept-0.9.18 → flowcept-0.9.19}/examples/distributed_consumer_example.py +0 -0
  76. {flowcept-0.9.18 → flowcept-0.9.19}/examples/instrumented_loop_example.py +0 -0
  77. {flowcept-0.9.18 → flowcept-0.9.19}/examples/instrumented_simple_example.py +0 -0
  78. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/README.md +0 -0
  79. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/custom_provenance_id_mapping.yaml +0 -0
  80. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/llm_dataprep.py +0 -0
  81. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/llm_main_example.py +0 -0
  82. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/llm_model.py +0 -0
  83. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_complex/llm_test_runner.py +0 -0
  84. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_tutorial/README.md +0 -0
  85. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_tutorial/analysis.ipynb +0 -0
  86. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_tutorial/llm_dataprep.py +0 -0
  87. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_tutorial/llm_model.py +0 -0
  88. {flowcept-0.9.18 → flowcept-0.9.19}/examples/llm_tutorial/llm_train_campaign.py +0 -0
  89. {flowcept-0.9.18 → flowcept-0.9.19}/examples/mlflow_example.py +0 -0
  90. {flowcept-0.9.18 → flowcept-0.9.19}/examples/mqtt_example.py +0 -0
  91. {flowcept-0.9.18 → flowcept-0.9.19}/examples/single_layer_perceptron_example.py +0 -0
  92. {flowcept-0.9.18 → flowcept-0.9.19}/examples/start_here.py +0 -0
  93. {flowcept-0.9.18 → flowcept-0.9.19}/examples/tensorboard_example.py +0 -0
  94. {flowcept-0.9.18 → flowcept-0.9.19}/examples/unmanaged/main.py +0 -0
  95. {flowcept-0.9.18 → flowcept-0.9.19}/examples/unmanaged/simple_task.py +0 -0
  96. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/analytics.ipynb +0 -0
  97. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/dask.ipynb +0 -0
  98. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/dask_from_CLI.ipynb +0 -0
  99. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/mlflow.ipynb +0 -0
  100. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/reset_dask_nb_exec_counts.py +0 -0
  101. {flowcept-0.9.18 → flowcept-0.9.19}/notebooks/tensorboard.ipynb +0 -0
  102. {flowcept-0.9.18 → flowcept-0.9.19}/resources/mofka/bedrock_setup.sh +0 -0
  103. {flowcept-0.9.18 → flowcept-0.9.19}/resources/mofka/consumer.py +0 -0
  104. {flowcept-0.9.18 → flowcept-0.9.19}/resources/mofka/mofka-requirements.yaml +0 -0
  105. {flowcept-0.9.18 → flowcept-0.9.19}/resources/mofka/mofka_config.json +0 -0
  106. {flowcept-0.9.18 → flowcept-0.9.19}/resources/ontology/catalog-v001.xml +0 -0
  107. {flowcept-0.9.18 → flowcept-0.9.19}/resources/ontology/prov_agent.owl +0 -0
  108. {flowcept-0.9.18 → flowcept-0.9.19}/resources/simple_redis_consumer.py +0 -0
  109. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/__init__.py +0 -0
  110. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/__init__.py +0 -0
  111. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/dynamic_schema_tracker.py +0 -0
  112. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/gui/__init__.py +0 -0
  113. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/gui/agent_gui.py +0 -0
  114. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/gui/audio_utils.py +0 -0
  115. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/gui/gui_utils.py +0 -0
  116. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/llms/__init__.py +0 -0
  117. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/llms/claude_gcp.py +0 -0
  118. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/llms/gemini25.py +0 -0
  119. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/prompts/__init__.py +0 -0
  120. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/prompts/general_prompts.py +0 -0
  121. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/prompts/in_memory_query_prompts.py +0 -0
  122. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/tools/__init__.py +0 -0
  123. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/tools/general_tools.py +0 -0
  124. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/tools/in_memory_queries/__init__.py +0 -0
  125. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +0 -0
  126. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/agents/tools/in_memory_queries/pandas_agent_utils.py +0 -0
  127. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/analytics/__init__.py +0 -0
  128. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/analytics/analytics_utils.py +0 -0
  129. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/analytics/data_augmentation.py +0 -0
  130. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/analytics/plot.py +0 -0
  131. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/cli.py +0 -0
  132. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/__init__.py +0 -0
  133. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/autoflush_buffer.py +0 -0
  134. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/__init__.py +0 -0
  135. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/docdb_dao/__init__.py +0 -0
  136. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/docdb_dao/docdb_dao_base.py +0 -0
  137. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/docdb_dao/lmdb_dao.py +0 -0
  138. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/docdb_dao/mongodb_dao.py +0 -0
  139. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/mq_dao/__init__.py +0 -0
  140. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/mq_dao/mq_dao_kafka.py +0 -0
  141. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/mq_dao/mq_dao_mofka.py +0 -0
  142. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/mq_dao/mq_dao_redis.py +0 -0
  143. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/daos/redis_conn.py +0 -0
  144. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_dataclasses/__init__.py +0 -0
  145. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_dataclasses/base_settings_dataclasses.py +0 -0
  146. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_dataclasses/task_object.py +0 -0
  147. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_dataclasses/telemetry.py +0 -0
  148. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_dataclasses/workflow_object.py +0 -0
  149. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/flowcept_logger.py +0 -0
  150. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/query_utils.py +0 -0
  151. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/settings_factory.py +0 -0
  152. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/task_data_preprocess.py +0 -0
  153. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/utils.py +0 -0
  154. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/commons/vocabulary.py +0 -0
  155. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_api/__init__.py +0 -0
  156. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_api/db_api.py +0 -0
  157. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_api/flowcept_controller.py +0 -0
  158. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_api/task_query_api.py +0 -0
  159. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_webserver/__init__.py +0 -0
  160. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_webserver/app.py +0 -0
  161. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_webserver/resources/__init__.py +0 -0
  162. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_webserver/resources/query_rsrc.py +0 -0
  163. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowcept_webserver/resources/task_messages_rsrc.py +0 -0
  164. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/__init__.py +0 -0
  165. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/__init__.py +0 -0
  166. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/base_interceptor.py +0 -0
  167. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/brokers/__init__.py +0 -0
  168. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +0 -0
  169. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/dask/__init__.py +0 -0
  170. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/dask/dask_dataclasses.py +0 -0
  171. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/dask/dask_interceptor.py +0 -0
  172. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/dask/dask_plugins.py +0 -0
  173. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/instrumentation_interceptor.py +0 -0
  174. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/interceptor_state_manager.py +0 -0
  175. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/mlflow/__init__.py +0 -0
  176. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +0 -0
  177. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dao.py +0 -0
  178. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dataclasses.py +0 -0
  179. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +0 -0
  180. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/tensorboard/__init__.py +0 -0
  181. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_dataclasses.py +0 -0
  182. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +0 -0
  183. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/__init__.py +0 -0
  184. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/agent/__init__.py +0 -0
  185. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/consumers/consumer_utils.py +0 -0
  186. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/flowceptor/telemetry_capture.py +0 -0
  187. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/__init__.py +0 -0
  188. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/flowcept_agent_task.py +0 -0
  189. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/flowcept_decorator.py +0 -0
  190. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/flowcept_loop.py +0 -0
  191. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/flowcept_task.py +0 -0
  192. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/flowcept_torch.py +0 -0
  193. {flowcept-0.9.18 → flowcept-0.9.19}/src/flowcept/instrumentation/task_capture.py +0 -0
  194. {flowcept-0.9.18 → flowcept-0.9.19}/tests/__init__.py +0 -0
  195. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/__init__.py +0 -0
  196. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/dask_test_utils.py +0 -0
  197. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_broker.py +0 -0
  198. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_dask.py +0 -0
  199. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_dask_with_context_mgmt.py +0 -0
  200. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_file_observer.py +0 -0
  201. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_mlflow.py +0 -0
  202. {flowcept-0.9.18 → flowcept-0.9.19}/tests/adapters/test_tensorboard.py +0 -0
  203. {flowcept-0.9.18 → flowcept-0.9.19}/tests/agent/__init__.py +0 -0
  204. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/__init__.py +0 -0
  205. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/db_api_test.py +0 -0
  206. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/flowcept_api_test.py +0 -0
  207. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/sample_data.json +0 -0
  208. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/sample_data_with_telemetry_and_rai.json +0 -0
  209. {flowcept-0.9.18 → flowcept-0.9.19}/tests/api/task_query_api_test.py +0 -0
  210. {flowcept-0.9.18 → flowcept-0.9.19}/tests/conftest.py +0 -0
  211. {flowcept-0.9.18 → flowcept-0.9.19}/tests/doc_db_inserter/__init__.py +0 -0
  212. {flowcept-0.9.18 → flowcept-0.9.19}/tests/doc_db_inserter/doc_db_inserter_test.py +0 -0
  213. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/__init__.py +0 -0
  214. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/flowcept_explicit_tasks.py +0 -0
  215. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/flowcept_loop_test.py +0 -0
  216. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/flowcept_task_decorator_test.py +0 -0
  217. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/ml_tests/__init__.py +0 -0
  218. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/ml_tests/dl_trainer.py +0 -0
  219. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/ml_tests/ml_decorator_dask_test.py +0 -0
  220. {flowcept-0.9.18 → flowcept-0.9.19}/tests/instrumentation_tests/ml_tests/ml_decorator_test.py +0 -0
  221. {flowcept-0.9.18 → flowcept-0.9.19}/tests/misc_tests/__init__.py +0 -0
  222. {flowcept-0.9.18 → flowcept-0.9.19}/tests/misc_tests/log_test.py +0 -0
  223. {flowcept-0.9.18 → flowcept-0.9.19}/tests/misc_tests/singleton_test.py +0 -0
  224. {flowcept-0.9.18 → flowcept-0.9.19}/tests/misc_tests/telemetry_test.py +0 -0
@@ -77,13 +77,13 @@ run:
77
77
  docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=flowcept_redis -e MONGO_HOST=flowcept_mongo --network flowcept_default -it flowcept
78
78
 
79
79
  tests-in-container-mongo:
80
- docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=flowcept_redis -e MONGO_HOST=flowcept_mongo -e MONGO_ENABLED=true -e LMDB_ENABLED=false --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest --ignore=tests/instrumentation_tests/ml_tests
80
+ docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=flowcept_redis -e MONGO_HOST=flowcept_mongo -e MONGO_ENABLED=true -e LMDB_ENABLED=false --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest tests --ignore=tests/instrumentation_tests/ml_tests
81
81
 
82
82
  tests-in-container:
83
- docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=flowcept_redis -e MONGO_ENABLED=false -e LMDB_ENABLED=true --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest --ignore=tests/instrumentation_tests/ml_tests
83
+ docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=flowcept_redis -e MONGO_ENABLED=false -e LMDB_ENABLED=true --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest tests --ignore=tests/instrumentation_tests/ml_tests
84
84
 
85
85
  tests-in-container-kafka:
86
- docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=kafka -e MONGO_HOST=flowcept_mongo -e MQ_PORT=29092 -e MQ_TYPE=kafka -e MONGO_ENABLED=true -e LMDB_ENABLED=false --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest --ignore=tests/instrumentation_tests/ml_tests
86
+ docker run --rm -v $(shell pwd):/flowcept -e KVDB_HOST=flowcept_redis -e MQ_HOST=kafka -e MONGO_HOST=flowcept_mongo -e MQ_PORT=29092 -e MQ_TYPE=kafka -e MONGO_ENABLED=true -e LMDB_ENABLED=false --network flowcept_default flowcept /opt/conda/envs/flowcept/bin/pytest tests --ignore=tests/instrumentation_tests/ml_tests
87
87
 
88
88
  # This command can be removed once we have our CLI
89
89
  liveness:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowcept
3
- Version: 0.9.18
3
+ Version: 0.9.19
4
4
  Summary: Capture and query workflow provenance data using data observability
5
5
  Author: Oak Ridge National Laboratory
6
6
  License-Expression: MIT
@@ -1,9 +1,6 @@
1
1
  # Use the command `make build` to build this image.
2
- FROM miniconda:local
3
-
4
- RUN apt-get update && \
5
- apt-get install -y vim curl wget make patch gcc \
6
- && rm -rf /var/lib/apt/lists/*
2
+ # Use conda-forge base image consistently across local/CI.
3
+ FROM condaforge/miniforge3:23.11.0-0
7
4
 
8
5
  WORKDIR /flowcept
9
6
 
@@ -0,0 +1,43 @@
1
+ Flowcept Agent
2
+ ==============
3
+
4
+ The Flowcept Agent is an MCP-powered LLM interface for querying provenance data. It exposes a small set of tools
5
+ (e.g., ``prompt_handler``) that route natural-language questions to in-memory queries over captured task summaries.
6
+
7
+ Online-first design
8
+ ------------------
9
+ Like Flowcept as a whole, the agent is designed to run **while a workflow is still executing**. In online mode,
10
+ it consumes messages from the MQ (typically Redis) so it can respond to queries in near real time. This is the
11
+ recommended setup for interactive RAG/MCP analysis during live runs.
12
+
13
+ Offline (file-based) queries
14
+ ----------------------------
15
+ For simple tests or disconnected environments, the agent can also be initialized from a **JSONL buffer file**.
16
+ In this mode, Flowcept writes messages to disk (``dump_buffer``), and the agent loads the file once at startup
17
+ before serving queries.
18
+
19
+ This is a minimal offline example:
20
+
21
+ .. code-block:: python
22
+
23
+ import json
24
+ from flowcept import Flowcept, flowcept_task
25
+ from flowcept.agents.flowcept_agent import FlowceptAgent
26
+
27
+ @flowcept_task
28
+ def sum_one(x):
29
+ return x + 1
30
+
31
+ # Run a small workflow and dump the buffer to disk
32
+ with Flowcept(start_persistence=False, save_workflow=False, check_safe_stops=False) as f:
33
+ sum_one(1)
34
+ f.dump_buffer("flowcept_buffer.jsonl")
35
+
36
+ # Start the agent from the buffer file and query it
37
+ agent = FlowceptAgent(buffer_path="flowcept_buffer.jsonl")
38
+ agent.start()
39
+ resp = agent.query("how many tasks?")
40
+ print(json.loads(resp))
41
+ agent.stop()
42
+
43
+ In the future, this page will include a full **online** example (live MQ + Redis) and deployment guidance.
@@ -57,6 +57,7 @@ Flowcept
57
57
  quick_start
58
58
  architecture
59
59
  setup
60
+ agent
60
61
  prov_capture
61
62
  telemetry_capture
62
63
  prov_storage
@@ -124,7 +124,7 @@ Optional Arguments
124
124
 
125
125
  When creating a ``Flowcept`` instance (with or without a context manager), you can pass:
126
126
 
127
- - **interceptors**: list of interceptors (e.g., ``"instrumentation"``, ``"dask"``, ``"mlflow"``). Defaults to ``["instrumentation"]`` if enabled.
127
+ - **interceptors**: list of interceptors (e.g., ``"instrumentation"``, ``"dask"``, ``"mlflow"``). Defaults to ``["instrumentation"]`` if enabled. Instrumentation defaults to enabled unless explicitly set to ``false`` in settings.
128
128
  - **bundle_exec_id**: identifier for grouping interceptors. Defaults to ``id(self)``.
129
129
  - **campaign_id**: unique identifier for the campaign. Defaults to a generated UUID.
130
130
  - **workflow_id**: unique identifier for the workflow. Defaults to a generated UUID.
@@ -130,5 +130,12 @@ packages = ["src/flowcept"]
130
130
  [tool.hatch.build.targets.wheel.force-include]
131
131
  "resources/sample_settings.yaml" = "resources/sample_settings.yaml"
132
132
 
133
+ [tool.pytest.ini_options]
134
+ filterwarnings = [
135
+ "ignore:websockets\\.legacy is deprecated:DeprecationWarning",
136
+ "ignore:websockets\\.server\\.WebSocketServerProtocol is deprecated:DeprecationWarning",
137
+ "ignore:Use `streamable_http_client` instead\\.:DeprecationWarning",
138
+ ]
139
+
133
140
  [project.scripts]
134
- flowcept = "flowcept.cli:main"
141
+ flowcept = "flowcept.cli:main"
@@ -1,4 +1,4 @@
1
- flowcept_version: 0.9.18 # Version of the Flowcept package. This setting file is compatible with this version.
1
+ flowcept_version: 0.9.19 # Version of the Flowcept package. This setting file is compatible with this version.
2
2
 
3
3
  project:
4
4
  debug: true # Toggle debug mode. This will add a property `debug: true` to all saved data, making it easier to retrieve/delete them later.
@@ -1,4 +1,6 @@
1
1
  import asyncio
2
+ import json
3
+ import re
2
4
  from typing import Dict, List, Callable
3
5
 
4
6
  from flowcept.configs import AGENT_HOST, AGENT_PORT
@@ -48,10 +50,14 @@ def run_tool(
48
50
  result: List[TextContent] = await session.call_tool(tool_name, arguments=kwargs)
49
51
  actual_result = []
50
52
  for r in result.content:
51
- if isinstance(r, str):
52
- actual_result.append(r)
53
- else:
54
- actual_result.append(r.text)
53
+ text = r if isinstance(r, str) else r.text
54
+ try:
55
+ json.loads(text)
56
+ actual_result.append(text)
57
+ except Exception:
58
+ match = re.search(r"Error code:\\s*(\\d+)", text)
59
+ code = int(match.group(1)) if match else 400
60
+ actual_result.append(json.dumps({"code": code, "result": text, "tool_name": tool_name}))
55
61
 
56
62
  return actual_result
57
63
 
@@ -139,8 +139,8 @@ def build_llm_model(
139
139
  if _service_provider == "sambanova":
140
140
  from langchain_community.llms.sambanova import SambaStudio
141
141
 
142
- os.environ["SAMBASTUDIO_URL"] = AGENT.get("llm_server_url")
143
- os.environ["SAMBASTUDIO_API_KEY"] = AGENT.get("api_key")
142
+ os.environ["SAMBASTUDIO_URL"] = os.environ.get("SAMBASTUDIO_URL", AGENT.get("llm_server_url"))
143
+ os.environ["SAMBASTUDIO_API_KEY"] = os.environ.get("SAMBASTUDIO_API_KEY", AGENT.get("api_key"))
144
144
 
145
145
  llm = SambaStudio(model_kwargs=_model_kwargs)
146
146
  elif _service_provider == "azure":
@@ -155,7 +155,16 @@ def build_llm_model(
155
155
  from langchain_openai import ChatOpenAI
156
156
 
157
157
  api_key = os.environ.get("OPENAI_API_KEY", AGENT.get("api_key", None))
158
- llm = ChatOpenAI(openai_api_key=api_key, **model_kwargs)
158
+ base_url = os.environ.get("OPENAI_BASE_URL", AGENT.get("llm_server_url") or None)
159
+ org = os.environ.get("OPENAI_ORG_ID", AGENT.get("organization", None))
160
+
161
+ init_kwargs = {"api_key": api_key}
162
+ if base_url:
163
+ init_kwargs["base_url"] = base_url
164
+ if org:
165
+ init_kwargs["organization"] = org
166
+
167
+ llm = ChatOpenAI(**init_kwargs, **_model_kwargs)
159
168
  elif _service_provider == "google":
160
169
  if "claude" in _model_kwargs["model"]:
161
170
  api_key = os.environ.get("GOOGLE_API_KEY", AGENT.get("api_key", None))
@@ -168,22 +177,6 @@ def build_llm_model(
168
177
  from flowcept.agents.llms.gemini25 import Gemini25LLM
169
178
 
170
179
  llm = Gemini25LLM(**_model_kwargs)
171
- elif _service_provider == "openai":
172
- from langchain_openai import ChatOpenAI
173
-
174
- api_key = os.environ.get("OPENAI_API_KEY", AGENT.get("api_key"))
175
- base_url = os.environ.get("OPENAI_BASE_URL", AGENT.get("llm_server_url") or None) # optional
176
- org = os.environ.get("OPENAI_ORG_ID", AGENT.get("organization", None)) # optional
177
-
178
- init_kwargs = {"api_key": api_key}
179
- if base_url:
180
- init_kwargs["base_url"] = base_url
181
- if org:
182
- init_kwargs["organization"] = org
183
-
184
- # IMPORTANT: use the merged kwargs so `model` and temps flow through
185
- llm = ChatOpenAI(**init_kwargs, **_model_kwargs)
186
-
187
180
  else:
188
181
  raise Exception("Currently supported providers are sambanova, openai, azure, and google.")
189
182
  if track_tools:
@@ -0,0 +1,134 @@
1
+ import json
2
+ import os
3
+ from threading import Thread
4
+
5
+ from flowcept.agents import check_liveness
6
+ from flowcept.agents.agents_utils import ToolResult
7
+ from flowcept.agents.tools.general_tools import prompt_handler
8
+ from flowcept.agents.agent_client import run_tool
9
+ from flowcept.agents.flowcept_ctx_manager import mcp_flowcept, ctx_manager
10
+ from flowcept.commons.flowcept_logger import FlowceptLogger
11
+ from flowcept.configs import AGENT_HOST, AGENT_PORT, DUMP_BUFFER_PATH, MQ_ENABLED
12
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
13
+ from uuid import uuid4
14
+
15
+ import uvicorn
16
+
17
+
18
+ class FlowceptAgent:
19
+ """
20
+ Flowcept agent server wrapper with optional offline buffer loading.
21
+ """
22
+
23
+ def __init__(self, buffer_path: str | None = None):
24
+ """
25
+ Initialize a FlowceptAgent.
26
+
27
+ Parameters
28
+ ----------
29
+ buffer_path : str or None
30
+ Optional path to a JSONL buffer file. When MQ is disabled, the agent
31
+ loads this file once at startup.
32
+ """
33
+ self.buffer_path = buffer_path
34
+ self.logger = FlowceptLogger()
35
+ self._server_thread: Thread | None = None
36
+ self._server = None
37
+
38
+ def _load_buffer_once(self) -> int:
39
+ """
40
+ Load messages from a JSONL buffer file into the agent context.
41
+
42
+ Returns
43
+ -------
44
+ int
45
+ Number of messages loaded.
46
+ """
47
+ path = self.buffer_path or DUMP_BUFFER_PATH
48
+ if not os.path.exists(path):
49
+ raise FileNotFoundError(f"Buffer file not found: {path}")
50
+
51
+ count = 0
52
+ self.logger.info(f"Loading agent buffer from {path}")
53
+ if ctx_manager.agent_id is None:
54
+ agent_id = str(uuid4())
55
+ BaseAgentContextManager.agent_id = agent_id
56
+ ctx_manager.agent_id = agent_id
57
+ with open(path, "r") as handle:
58
+ for line in handle:
59
+ line = line.strip()
60
+ if not line:
61
+ continue
62
+ msg_obj = json.loads(line)
63
+ ctx_manager.message_handler(msg_obj)
64
+ count += 1
65
+ self.logger.info(f"Loaded {count} messages from buffer.")
66
+ return count
67
+
68
+ def _run_server(self):
69
+ """Run the MCP server (blocking call)."""
70
+ config = uvicorn.Config(mcp_flowcept.streamable_http_app, host=AGENT_HOST, port=AGENT_PORT, lifespan="on")
71
+ self._server = uvicorn.Server(config)
72
+ self._server.run()
73
+
74
+ def start(self):
75
+ """
76
+ Start the agent server in a background thread.
77
+
78
+ Returns
79
+ -------
80
+ FlowceptAgent
81
+ The current instance.
82
+ """
83
+ if not MQ_ENABLED:
84
+ self._load_buffer_once()
85
+
86
+ self._server_thread = Thread(target=self._run_server, daemon=False)
87
+ self._server_thread.start()
88
+ self.logger.info(f"Flowcept agent server started on {AGENT_HOST}:{AGENT_PORT}")
89
+ return self
90
+
91
+ def stop(self):
92
+ """Stop the agent server and wait briefly for shutdown."""
93
+ if self._server is not None:
94
+ self._server.should_exit = True
95
+ if self._server_thread is not None:
96
+ self._server_thread.join(timeout=5)
97
+
98
+ def wait(self):
99
+ """Block until the server thread exits."""
100
+ if self._server_thread is not None:
101
+ self._server_thread.join()
102
+
103
+ def query(self, message: str) -> ToolResult:
104
+ """
105
+ Send a prompt to the agent's main router tool and return the response.
106
+ """
107
+ try:
108
+ resp = run_tool(tool_name=prompt_handler, kwargs={"message": message})[0]
109
+ except Exception as e:
110
+ return ToolResult(code=400, result=f"Error executing tool prompt_handler: {e}", tool_name="prompt_handler")
111
+
112
+ try:
113
+ return ToolResult(**json.loads(resp))
114
+ except Exception as e:
115
+ return ToolResult(
116
+ code=499,
117
+ result=f"Could not parse tool response as JSON: {resp}",
118
+ extra=str(e),
119
+ tool_name="prompt_handler",
120
+ )
121
+
122
+
123
+ def main():
124
+ """
125
+ Start the MCP server.
126
+ """
127
+ agent = FlowceptAgent().start()
128
+ # Wake up tool call
129
+ print(run_tool(check_liveness, host=AGENT_HOST, port=AGENT_PORT)[0])
130
+ agent.wait()
131
+
132
+
133
+ if __name__ == "__main__":
134
+ main()
@@ -103,7 +103,7 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
103
103
  self.schema_tracker = DynamicSchemaTracker(**self.tracker_config)
104
104
  self.msgs_counter = 0
105
105
  self.context_chunk_size = 1 # Should be in the settings
106
- super().__init__()
106
+ super().__init__(allow_mq_disabled=True)
107
107
 
108
108
  def message_handler(self, msg_obj: Dict):
109
109
  """
@@ -133,12 +133,15 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
133
133
  if task_msg.activity_id == "reset_user_context":
134
134
  self.context.reset_context()
135
135
  self.msgs_counter = 0
136
- FlowceptTask(
137
- agent_id=self.agent_id,
138
- generated={"msg": "Provenance Agent reset context."},
139
- subtype="agent_task",
140
- activity_id="reset_user_context",
141
- ).send()
136
+ if self._mq_dao is None:
137
+ self.logger.warning("MQ is disabled; skipping reset_user_context response message.")
138
+ else:
139
+ FlowceptTask(
140
+ agent_id=self.agent_id,
141
+ generated={"msg": "Provenance Agent reset context."},
142
+ subtype="agent_task",
143
+ activity_id="reset_user_context",
144
+ ).send()
142
145
  return True
143
146
  elif task_msg.activity_id == "provenance_query":
144
147
  self.logger.info("Received a prov query message!")
@@ -161,14 +164,17 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
161
164
  status = Status.ERROR
162
165
  error = f"Could not convert the following into a ToolResult:\n{resp}\nException: {e}"
163
166
  generated = {"text": str(resp)}
164
- FlowceptTask(
165
- agent_id=self.agent_id,
166
- generated=generated,
167
- stderr=error,
168
- status=status,
169
- subtype="agent_task",
170
- activity_id="provenance_query_response",
171
- ).send()
167
+ if self._mq_dao is None:
168
+ self.logger.warning("MQ is disabled; skipping provenance_query response message.")
169
+ else:
170
+ FlowceptTask(
171
+ agent_id=self.agent_id,
172
+ generated=generated,
173
+ stderr=error,
174
+ status=status,
175
+ subtype="agent_task",
176
+ activity_id="provenance_query_response",
177
+ ).send()
172
178
 
173
179
  return True
174
180
 
@@ -200,12 +206,10 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
200
206
  ]
201
207
  )
202
208
  except Exception as e:
203
- self.logger.error(
204
- f"Could not add these tasks to buffer!\n"
205
- f"{
206
- self.context.task_summaries[self.msgs_counter - self.context_chunk_size : self.msgs_counter]
207
- }"
208
- )
209
+ task_slice = self.context.task_summaries[
210
+ self.msgs_counter - self.context_chunk_size : self.msgs_counter
211
+ ]
212
+ self.logger.error(f"Could not add these tasks to buffer!\n{task_slice}")
209
213
  self.logger.exception(e)
210
214
 
211
215
  # self.monitor_chunk()
@@ -232,9 +236,12 @@ class FlowceptAgentContextManager(BaseAgentContextManager):
232
236
  if len(result):
233
237
  content = result[0].text
234
238
  if content != "Error executing tool":
235
- msg = {"type": "flowcept_agent", "info": "monitor", "content": content}
236
- self._mq_dao.send_message(msg)
237
- self.logger.debug(str(content))
239
+ if self._mq_dao is None:
240
+ self.logger.warning("MQ is disabled; skipping monitor message.")
241
+ else:
242
+ msg = {"type": "flowcept_agent", "info": "monitor", "content": content}
243
+ self._mq_dao.send_message(msg)
244
+ self.logger.debug(str(content))
238
245
  else:
239
246
  self.logger.error(content)
240
247
 
@@ -1,7 +1,5 @@
1
1
  """Key value module."""
2
2
 
3
- from flowcept.commons.daos.redis_conn import RedisConn
4
-
5
3
  from flowcept.commons.flowcept_logger import FlowceptLogger
6
4
  from flowcept.configs import (
7
5
  KVDB_HOST,
@@ -26,12 +24,23 @@ class KeyValueDAO:
26
24
 
27
25
  def __init__(self):
28
26
  if not hasattr(self, "_initialized"):
29
- self._initialized = True
30
27
  self.logger = FlowceptLogger()
28
+ from flowcept.commons.daos.redis_conn import RedisConn
29
+
31
30
  self.redis_conn = RedisConn.build_redis_conn_pool(
32
31
  host=KVDB_HOST, port=KVDB_PORT, password=KVDB_PASSWORD, uri=KVDB_URI
33
32
  )
34
33
 
34
+ self._initialized = True
35
+
36
+ @staticmethod
37
+ def get_set_name(set_id: str, exec_bundle_id=None) -> str:
38
+ """Return a consistent set name for KVDB sets."""
39
+ set_name = set_id
40
+ if exec_bundle_id is not None:
41
+ set_name += "_" + str(exec_bundle_id)
42
+ return set_name
43
+
35
44
  def delete_set(self, set_name: str):
36
45
  """Delete it."""
37
46
  self.redis_conn.delete(set_name)
@@ -7,6 +7,7 @@ import msgpack
7
7
  from time import time
8
8
  import flowcept.commons
9
9
  from flowcept.commons.autoflush_buffer import AutoflushBuffer
10
+ from flowcept.commons.daos.keyvalue_dao import KeyValueDAO
10
11
  from flowcept.commons.utils import chunked
11
12
  from flowcept.commons.flowcept_logger import FlowceptLogger
12
13
  from flowcept.configs import (
@@ -29,6 +30,8 @@ class MQDao(object):
29
30
 
30
31
  ENCODER = GenericJSONEncoder if JSON_SERIALIZER == "complex" else None
31
32
  # TODO we don't have a unit test to cover complex dict!
33
+ MQ_THREAD_SET_ID = "started_mq_thread_execution"
34
+ MQ_FLUSH_COMPLETE_SET_ID = "pending_mq_flush_complete"
32
35
 
33
36
  @staticmethod
34
37
  def build(*args, **kwargs) -> "MQDao":
@@ -51,20 +54,6 @@ class MQDao(object):
51
54
  else:
52
55
  raise NotImplementedError
53
56
 
54
- @staticmethod
55
- def _get_set_name(exec_bundle_id=None):
56
- """Get the set name.
57
-
58
- :param exec_bundle_id: A way to group one or many interceptors, and
59
- treat each group as a bundle to control when their time_based
60
- threads started and ended.
61
- :return:
62
- """
63
- set_id = "started_mq_thread_execution"
64
- if exec_bundle_id is not None:
65
- set_id += "_" + str(exec_bundle_id)
66
- return set_id
67
-
68
57
  def __init__(self, adapter_settings=None):
69
58
  self.logger = FlowceptLogger()
70
59
  self.started = False
@@ -103,22 +92,36 @@ class MQDao(object):
103
92
 
104
93
  def register_time_based_thread_init(self, interceptor_instance_id: str, exec_bundle_id=None):
105
94
  """Register the time."""
106
- set_name = MQDao._get_set_name(exec_bundle_id)
95
+ set_name = KeyValueDAO.get_set_name(MQDao.MQ_THREAD_SET_ID, exec_bundle_id)
107
96
  # self.logger.info(
108
97
  # f"Register start of time_based MQ flush thread {set_name}.{interceptor_instance_id}"
109
98
  # )
110
99
  self._keyvalue_dao.add_key_into_set(set_name, interceptor_instance_id)
100
+ flush_set_name = KeyValueDAO.get_set_name(MQDao.MQ_FLUSH_COMPLETE_SET_ID, exec_bundle_id)
101
+ self._keyvalue_dao.add_key_into_set(flush_set_name, interceptor_instance_id)
111
102
 
112
103
  def register_time_based_thread_end(self, interceptor_instance_id: str, exec_bundle_id=None):
113
104
  """Register time."""
114
- set_name = MQDao._get_set_name(exec_bundle_id)
105
+ set_name = KeyValueDAO.get_set_name(MQDao.MQ_THREAD_SET_ID, exec_bundle_id)
115
106
  self.logger.info(f"Registering end of time_based MQ flush thread {set_name}.{interceptor_instance_id}")
116
107
  self._keyvalue_dao.remove_key_from_set(set_name, interceptor_instance_id)
117
108
  self.logger.info(f"Done registering time_based MQ flush thread {set_name}.{interceptor_instance_id}")
118
109
 
119
110
  def all_time_based_threads_ended(self, exec_bundle_id=None):
120
111
  """Get all time."""
121
- set_name = MQDao._get_set_name(exec_bundle_id)
112
+ set_name = KeyValueDAO.get_set_name(MQDao.MQ_THREAD_SET_ID, exec_bundle_id)
113
+ return self._keyvalue_dao.set_is_empty(set_name)
114
+
115
+ def register_flush_complete(self, interceptor_instance_id: str, exec_bundle_id=None):
116
+ """Register a flush-complete signal for an interceptor."""
117
+ set_name = KeyValueDAO.get_set_name(MQDao.MQ_FLUSH_COMPLETE_SET_ID, exec_bundle_id)
118
+ self.logger.info(f"Registering flush completion {set_name}.{interceptor_instance_id}")
119
+ self._keyvalue_dao.remove_key_from_set(set_name, interceptor_instance_id)
120
+ self.logger.info(f"Done registering flush completion {set_name}.{interceptor_instance_id}")
121
+
122
+ def all_flush_complete_received(self, exec_bundle_id=None):
123
+ """Return True when all interceptors in the bundle reported flush completion."""
124
+ set_name = KeyValueDAO.get_set_name(MQDao.MQ_FLUSH_COMPLETE_SET_ID, exec_bundle_id)
122
125
  return self._keyvalue_dao.set_is_empty(set_name)
123
126
 
124
127
  def set_campaign_id(self, campaign_id=None):
@@ -172,11 +175,14 @@ class MQDao(object):
172
175
  if self._time_based_flushing_started:
173
176
  self.buffer.stop()
174
177
  self._time_based_flushing_started = False
178
+ self.logger.debug("MQ time-based flushed for the last time!")
175
179
  else:
176
180
  self.logger.error("MQ time-based flushing is not started")
177
181
  else:
178
182
  self.buffer = list()
179
183
 
184
+ self.logger.debug("Buffer closed.")
185
+
180
186
  def _stop_timed(self, interceptor_instance_id: str, check_safe_stops: bool = True, bundle_exec_id: int = None):
181
187
  t1 = time()
182
188
  self._stop(interceptor_instance_id, check_safe_stops, bundle_exec_id)
@@ -190,10 +196,12 @@ class MQDao(object):
190
196
 
191
197
  def _stop(self, interceptor_instance_id: str = None, check_safe_stops: bool = True, bundle_exec_id: int = None):
192
198
  """Stop MQ publisher."""
193
- self.logger.debug(f"MQ pub received stop sign: bundle={bundle_exec_id}, interceptor={interceptor_instance_id}")
194
199
  self._close_buffer()
195
- self.logger.debug("Flushed MQ for the last time!")
196
- if check_safe_stops:
200
+ if check_safe_stops and MQ_ENABLED:
201
+ self.logger.debug(
202
+ f"Sending flush-complete msg. Bundle: {bundle_exec_id}; interceptor id: {interceptor_instance_id}"
203
+ )
204
+ self._send_mq_dao_flush_complete(interceptor_instance_id, bundle_exec_id)
197
205
  self.logger.debug(f"Sending stop msg. Bundle: {bundle_exec_id}; interceptor id: {interceptor_instance_id}")
198
206
  self._send_mq_dao_time_thread_stop(interceptor_instance_id, bundle_exec_id)
199
207
  self.started = False
@@ -210,6 +218,15 @@ class MQDao(object):
210
218
  # self.logger.info("Control msg sent: " + str(msg))
211
219
  self.send_message(msg)
212
220
 
221
+ def _send_mq_dao_flush_complete(self, interceptor_instance_id, exec_bundle_id=None):
222
+ msg = {
223
+ "type": "flowcept_control",
224
+ "info": "mq_flush_complete",
225
+ "interceptor_instance_id": interceptor_instance_id,
226
+ "exec_bundle_id": exec_bundle_id,
227
+ }
228
+ self.send_message(msg)
229
+
213
230
  def send_document_inserter_stop(self, exec_bundle_id=None):
214
231
  """Send the document."""
215
232
  # These control_messages are handled by the document inserter
@@ -9,7 +9,7 @@ from flowcept.version import __version__
9
9
  PROJECT_NAME = "flowcept"
10
10
 
11
11
  DEFAULT_SETTINGS = {
12
- "version": __version__,
12
+ "flowcept_version": __version__,
13
13
  "log": {"log_file_level": "disable", "log_stream_level": "disable"},
14
14
  "project": {"dump_buffer": {"enabled": True}},
15
15
  "telemetry_capture": {},
@@ -81,7 +81,7 @@ FLOWCEPT_USER = settings["experiment"].get("user", "blank_user")
81
81
 
82
82
  MQ_INSTANCES = settings["mq"].get("instances", None)
83
83
  MQ_SETTINGS = settings["mq"]
84
- MQ_ENABLED = os.getenv("MQ_ENABLED", settings["mq"].get("enabled", True))
84
+ MQ_ENABLED = os.getenv("MQ_ENABLED", str(settings["mq"].get("enabled", True))).strip().lower() in _TRUE_VALUES
85
85
  MQ_TYPE = os.getenv("MQ_TYPE", settings["mq"].get("type", "redis"))
86
86
  MQ_CHANNEL = os.getenv("MQ_CHANNEL", settings["mq"].get("channel", "interception"))
87
87
  MQ_PASSWORD = settings["mq"].get("password", None)
@@ -103,6 +103,11 @@ KVDB_PORT = int(os.getenv("KVDB_PORT", settings["kv_db"].get("port", "6379")))
103
103
  KVDB_URI = os.getenv("KVDB_URI", settings["kv_db"].get("uri", None))
104
104
  KVDB_ENABLED = settings["kv_db"].get("enabled", False)
105
105
 
106
+ if MQ_ENABLED and not KVDB_ENABLED:
107
+ raise ValueError(
108
+ "Invalid configuration: MQ is enabled but kv_db is disabled. "
109
+ "Enable kv_db.enabled (and KVDB) when MQ is enabled."
110
+ )
106
111
 
107
112
  DATABASES = settings.get("databases", {})
108
113
 
@@ -160,6 +165,12 @@ JSON_SERIALIZER = settings["project"].get("json_serializer", "default")
160
165
  REPLACE_NON_JSON_SERIALIZABLE = settings["project"].get("replace_non_json_serializable", True)
161
166
  ENRICH_MESSAGES = settings["project"].get("enrich_messages", True)
162
167
 
168
+ if DB_FLUSH_MODE == "online" and not MQ_ENABLED:
169
+ raise ValueError(
170
+ "Invalid configuration: project.db_flush_mode is 'online' but MQ is disabled. "
171
+ "Enable mq.enabled (or MQ_ENABLED=true) or set project.db_flush_mode to 'offline'."
172
+ )
173
+
163
174
  # Default: enable dump buffer only when running in offline flush mode.
164
175
  _DEFAULT_DUMP_BUFFER_ENABLED = DB_FLUSH_MODE == "offline"
165
176
  DUMP_BUFFER_ENABLED = (
@@ -45,12 +45,12 @@ class BaseAgentContextManager(BaseConsumer):
45
45
 
46
46
  agent_id = None
47
47
 
48
- def __init__(self):
48
+ def __init__(self, allow_mq_disabled: bool = False):
49
49
  """
50
50
  Initializes the agent and resets its context state.
51
51
  """
52
52
  self._started = False
53
- super().__init__()
53
+ super().__init__(allow_mq_disabled=allow_mq_disabled)
54
54
  # self.context = BaseAppContext(tasks=[])
55
55
  self.agent_id = BaseAgentContextManager.agent_id
56
56