flowcept 0.8.11__tar.gz → 0.9.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (203) hide show
  1. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-llm-tests.yml +1 -1
  2. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests-py313.yml +2 -4
  3. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests.yml +13 -4
  4. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run_examples.sh +2 -0
  5. {flowcept-0.8.11 → flowcept-0.9.1}/PKG-INFO +42 -14
  6. flowcept-0.9.1/examples/agents/a2a/README.md +27 -0
  7. flowcept-0.9.1/examples/agents/a2a/agent1.py +60 -0
  8. flowcept-0.9.1/examples/agents/a2a/agent2.py +60 -0
  9. flowcept-0.9.1/examples/agents/aec_agent_context_manager.py +67 -0
  10. flowcept-0.9.1/examples/agents/aec_agent_mock.py +110 -0
  11. flowcept-0.9.1/examples/agents/aec_prompts.py +113 -0
  12. flowcept-0.9.1/examples/agents/opt_driver_mock.py +193 -0
  13. flowcept-0.9.1/examples/consumers/ping_pong_example.py +65 -0
  14. {flowcept-0.8.11 → flowcept-0.9.1}/examples/consumers/simple_consumer.py +12 -1
  15. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/llm_main_example.py +1 -1
  16. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/llm_model.py +3 -1
  17. flowcept-0.9.1/examples/start_here.py +51 -0
  18. {flowcept-0.8.11 → flowcept-0.9.1}/pyproject.toml +27 -25
  19. {flowcept-0.8.11 → flowcept-0.9.1}/resources/sample_settings.yaml +12 -4
  20. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/__init__.py +7 -4
  21. flowcept-0.9.1/src/flowcept/agents/__init__.py +5 -0
  22. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/client_agent.py → flowcept-0.9.1/src/flowcept/agents/agent_client.py +22 -12
  23. flowcept-0.9.1/src/flowcept/agents/agents_utils.py +181 -0
  24. flowcept-0.9.1/src/flowcept/agents/dynamic_schema_tracker.py +191 -0
  25. flowcept-0.9.1/src/flowcept/agents/flowcept_agent.py +30 -0
  26. flowcept-0.9.1/src/flowcept/agents/flowcept_ctx_manager.py +175 -0
  27. flowcept-0.9.1/src/flowcept/agents/gui/__init__.py +5 -0
  28. flowcept-0.9.1/src/flowcept/agents/gui/agent_gui.py +76 -0
  29. flowcept-0.9.1/src/flowcept/agents/gui/gui_utils.py +239 -0
  30. flowcept-0.9.1/src/flowcept/agents/llms/__init__.py +1 -0
  31. flowcept-0.9.1/src/flowcept/agents/llms/claude_gcp.py +139 -0
  32. flowcept-0.9.1/src/flowcept/agents/llms/gemini25.py +119 -0
  33. flowcept-0.9.1/src/flowcept/agents/prompts/__init__.py +1 -0
  34. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/prompts.py → flowcept-0.9.1/src/flowcept/agents/prompts/general_prompts.py +18 -0
  35. flowcept-0.9.1/src/flowcept/agents/prompts/in_memory_query_prompts.py +297 -0
  36. flowcept-0.9.1/src/flowcept/agents/tools/__init__.py +1 -0
  37. flowcept-0.9.1/src/flowcept/agents/tools/general_tools.py +102 -0
  38. flowcept-0.9.1/src/flowcept/agents/tools/in_memory_queries/__init__.py +1 -0
  39. flowcept-0.9.1/src/flowcept/agents/tools/in_memory_queries/in_memory_queries_tools.py +704 -0
  40. flowcept-0.9.1/src/flowcept/agents/tools/in_memory_queries/pandas_agent_utils.py +309 -0
  41. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/cli.py +286 -44
  42. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/docdb_dao/mongodb_dao.py +47 -0
  43. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/mq_dao/mq_dao_base.py +24 -13
  44. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/mq_dao/mq_dao_kafka.py +18 -2
  45. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_dataclasses/task_object.py +16 -21
  46. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_dataclasses/workflow_object.py +9 -1
  47. flowcept-0.9.1/src/flowcept/commons/task_data_preprocess.py +400 -0
  48. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/utils.py +25 -6
  49. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/configs.py +41 -26
  50. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_api/flowcept_controller.py +73 -6
  51. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/base_interceptor.py +11 -5
  52. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/agent/base_agent_context_manager.py +25 -1
  53. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/base_consumer.py +4 -0
  54. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/consumer_utils.py +5 -4
  55. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/document_inserter.py +2 -2
  56. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/telemetry_capture.py +5 -2
  57. flowcept-0.9.1/src/flowcept/instrumentation/flowcept_agent_task.py +294 -0
  58. flowcept-0.9.1/src/flowcept/instrumentation/flowcept_decorator.py +43 -0
  59. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/instrumentation/flowcept_loop.py +3 -3
  60. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/instrumentation/flowcept_task.py +64 -24
  61. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/instrumentation/flowcept_torch.py +5 -5
  62. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/instrumentation/task_capture.py +83 -6
  63. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/version.py +1 -1
  64. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_dask.py +1 -0
  65. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/flowcept_task_decorator_test.py +43 -9
  66. flowcept-0.8.11/examples/agents/aec_agent_mock.py +0 -144
  67. flowcept-0.8.11/examples/agents/opt_driver_mock.py +0 -49
  68. flowcept-0.8.11/src/flowcept/commons/task_data_preprocess.py +0 -200
  69. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/__init__.py +0 -1
  70. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/agents_utils.py +0 -89
  71. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/flowcept_agent.py +0 -292
  72. flowcept-0.8.11/src/flowcept/flowceptor/adapters/agents/flowcept_llm_prov_capture.py +0 -186
  73. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/flowcept_agent_context_manager.py +0 -145
  74. flowcept-0.8.11/src/flowcept/flowceptor/consumers/agent/flowcept_qa_manager.py +0 -112
  75. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/checks.yml +0 -0
  76. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/create-release-n-publish.yml +0 -0
  77. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests-all-dbs.yml +0 -0
  78. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests-in-container.yml +0 -0
  79. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests-kafka.yml +0 -0
  80. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/run-tests-simple.yml +0 -0
  81. {flowcept-0.8.11 → flowcept-0.9.1}/.github/workflows/version_bumper.py +0 -0
  82. {flowcept-0.8.11 → flowcept-0.9.1}/.gitignore +0 -0
  83. {flowcept-0.8.11 → flowcept-0.9.1}/.readthedocs.yaml +0 -0
  84. {flowcept-0.8.11 → flowcept-0.9.1}/CONTRIBUTING.md +0 -0
  85. {flowcept-0.8.11 → flowcept-0.9.1}/LICENSE +0 -0
  86. {flowcept-0.8.11 → flowcept-0.9.1}/Makefile +0 -0
  87. {flowcept-0.8.11 → flowcept-0.9.1}/README.md +0 -0
  88. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/Dockerfile +0 -0
  89. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/compose-grafana.yml +0 -0
  90. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/compose-kafka.yml +0 -0
  91. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/compose-mofka.yml +0 -0
  92. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/compose-mongo.yml +0 -0
  93. {flowcept-0.8.11 → flowcept-0.9.1}/deployment/compose.yml +0 -0
  94. {flowcept-0.8.11 → flowcept-0.9.1}/docs/api-reference.rst +0 -0
  95. {flowcept-0.8.11 → flowcept-0.9.1}/docs/conf.py +0 -0
  96. {flowcept-0.8.11 → flowcept-0.9.1}/docs/contributing.rst +0 -0
  97. {flowcept-0.8.11 → flowcept-0.9.1}/docs/getstarted.rst +0 -0
  98. {flowcept-0.8.11 → flowcept-0.9.1}/docs/index.rst +0 -0
  99. {flowcept-0.8.11 → flowcept-0.9.1}/docs/schemas.rst +0 -0
  100. {flowcept-0.8.11 → flowcept-0.9.1}/docs/task_schema.rst +0 -0
  101. {flowcept-0.8.11 → flowcept-0.9.1}/docs/workflow_schema.rst +0 -0
  102. {flowcept-0.8.11 → flowcept-0.9.1}/examples/dask_example.py +0 -0
  103. {flowcept-0.8.11 → flowcept-0.9.1}/examples/distributed_consumer_example.py +0 -0
  104. {flowcept-0.8.11 → flowcept-0.9.1}/examples/instrumented_loop_example.py +0 -0
  105. {flowcept-0.8.11 → flowcept-0.9.1}/examples/instrumented_simple_example.py +0 -0
  106. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/README.md +0 -0
  107. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/custom_provenance_id_mapping.yaml +0 -0
  108. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/llm_dataprep.py +0 -0
  109. {flowcept-0.8.11 → flowcept-0.9.1}/examples/llm_complex/llm_test_runner.py +0 -0
  110. {flowcept-0.8.11 → flowcept-0.9.1}/examples/mlflow_example.py +0 -0
  111. {flowcept-0.8.11 → flowcept-0.9.1}/examples/single_layer_perceptron_example.py +0 -0
  112. {flowcept-0.8.11 → flowcept-0.9.1}/examples/tensorboard_example.py +0 -0
  113. {flowcept-0.8.11 → flowcept-0.9.1}/examples/unmanaged/main.py +0 -0
  114. {flowcept-0.8.11 → flowcept-0.9.1}/examples/unmanaged/simple_task.py +0 -0
  115. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/analytics.ipynb +0 -0
  116. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/dask.ipynb +0 -0
  117. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/dask_from_CLI.ipynb +0 -0
  118. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/mlflow.ipynb +0 -0
  119. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/reset_dask_nb_exec_counts.py +0 -0
  120. {flowcept-0.8.11 → flowcept-0.9.1}/notebooks/tensorboard.ipynb +0 -0
  121. {flowcept-0.8.11 → flowcept-0.9.1}/resources/mofka/bedrock_setup.sh +0 -0
  122. {flowcept-0.8.11 → flowcept-0.9.1}/resources/mofka/consumer.py +0 -0
  123. {flowcept-0.8.11 → flowcept-0.9.1}/resources/mofka/mofka-requirements.yaml +0 -0
  124. {flowcept-0.8.11 → flowcept-0.9.1}/resources/mofka/mofka_config.json +0 -0
  125. {flowcept-0.8.11 → flowcept-0.9.1}/resources/simple_redis_consumer.py +0 -0
  126. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/analytics/__init__.py +0 -0
  127. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/analytics/analytics_utils.py +0 -0
  128. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/analytics/data_augmentation.py +0 -0
  129. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/analytics/plot.py +0 -0
  130. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/__init__.py +0 -0
  131. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/autoflush_buffer.py +0 -0
  132. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/__init__.py +0 -0
  133. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/docdb_dao/__init__.py +0 -0
  134. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/docdb_dao/docdb_dao_base.py +0 -0
  135. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/docdb_dao/lmdb_dao.py +0 -0
  136. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/keyvalue_dao.py +0 -0
  137. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/mq_dao/__init__.py +0 -0
  138. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/mq_dao/mq_dao_mofka.py +0 -0
  139. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/mq_dao/mq_dao_redis.py +0 -0
  140. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/daos/redis_conn.py +0 -0
  141. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_dataclasses/__init__.py +0 -0
  142. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_dataclasses/base_settings_dataclasses.py +0 -0
  143. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_dataclasses/telemetry.py +0 -0
  144. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/flowcept_logger.py +0 -0
  145. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/query_utils.py +0 -0
  146. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/settings_factory.py +0 -0
  147. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/commons/vocabulary.py +0 -0
  148. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_api/__init__.py +0 -0
  149. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_api/db_api.py +0 -0
  150. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_api/task_query_api.py +0 -0
  151. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_webserver/__init__.py +0 -0
  152. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_webserver/app.py +0 -0
  153. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_webserver/resources/__init__.py +0 -0
  154. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_webserver/resources/query_rsrc.py +0 -0
  155. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowcept_webserver/resources/task_messages_rsrc.py +0 -0
  156. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/__init__.py +0 -0
  157. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/__init__.py +0 -0
  158. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/brokers/__init__.py +0 -0
  159. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/brokers/mqtt_interceptor.py +0 -0
  160. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/dask/__init__.py +0 -0
  161. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/dask/dask_dataclasses.py +0 -0
  162. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/dask/dask_interceptor.py +0 -0
  163. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/dask/dask_plugins.py +0 -0
  164. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/instrumentation_interceptor.py +0 -0
  165. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/interceptor_state_manager.py +0 -0
  166. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/mlflow/__init__.py +0 -0
  167. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/mlflow/interception_event_handler.py +0 -0
  168. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dao.py +0 -0
  169. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/mlflow/mlflow_dataclasses.py +0 -0
  170. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/mlflow/mlflow_interceptor.py +0 -0
  171. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/tensorboard/__init__.py +0 -0
  172. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_dataclasses.py +0 -0
  173. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/adapters/tensorboard/tensorboard_interceptor.py +0 -0
  174. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/__init__.py +0 -0
  175. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/flowceptor/consumers/agent/__init__.py +0 -0
  176. {flowcept-0.8.11 → flowcept-0.9.1}/src/flowcept/instrumentation/__init__.py +0 -0
  177. {flowcept-0.8.11 → flowcept-0.9.1}/tests/__init__.py +0 -0
  178. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/__init__.py +0 -0
  179. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/dask_test_utils.py +0 -0
  180. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_broker.py +0 -0
  181. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_dask_with_context_mgmt.py +0 -0
  182. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_file_observer.py +0 -0
  183. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_mlflow.py +0 -0
  184. {flowcept-0.8.11 → flowcept-0.9.1}/tests/adapters/test_tensorboard.py +0 -0
  185. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/__init__.py +0 -0
  186. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/db_api_test.py +0 -0
  187. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/flowcept_api_test.py +0 -0
  188. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/sample_data.json +0 -0
  189. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/sample_data_with_telemetry_and_rai.json +0 -0
  190. {flowcept-0.8.11 → flowcept-0.9.1}/tests/api/task_query_api_test.py +0 -0
  191. {flowcept-0.8.11 → flowcept-0.9.1}/tests/doc_db_inserter/__init__.py +0 -0
  192. {flowcept-0.8.11 → flowcept-0.9.1}/tests/doc_db_inserter/doc_db_inserter_test.py +0 -0
  193. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/__init__.py +0 -0
  194. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/flowcept_explicit_tasks.py +0 -0
  195. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/flowcept_loop_test.py +0 -0
  196. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/ml_tests/__init__.py +0 -0
  197. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/ml_tests/dl_trainer.py +0 -0
  198. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/ml_tests/ml_decorator_dask_test.py +0 -0
  199. {flowcept-0.8.11 → flowcept-0.9.1}/tests/instrumentation_tests/ml_tests/ml_decorator_test.py +0 -0
  200. {flowcept-0.8.11 → flowcept-0.9.1}/tests/misc_tests/__init__.py +0 -0
  201. {flowcept-0.8.11 → flowcept-0.9.1}/tests/misc_tests/log_test.py +0 -0
  202. {flowcept-0.8.11 → flowcept-0.9.1}/tests/misc_tests/singleton_test.py +0 -0
  203. {flowcept-0.8.11 → flowcept-0.9.1}/tests/misc_tests/telemetry_test.py +0 -0
@@ -37,7 +37,7 @@ jobs:
37
37
  python --version
38
38
 
39
39
  - name: Install dependencies
40
- run: pip install .[ml_dev,dask,mongo]
40
+ run: pip install .[extras,ml_dev,dask,mongo]
41
41
 
42
42
  - name: Test LLM with Dask cluster
43
43
  run: python examples/llm_complex/llm_main_example.py --start-dask-cluster
@@ -36,11 +36,9 @@ jobs:
36
36
  python -m pip install --upgrade pip
37
37
  python --version
38
38
 
39
- - name: Install dependencies that work on py3.13
39
+ - name: Install dependencies
40
40
  run: |
41
- pip install . --no-deps
42
- pip install flask-restful msgpack omegaconf pandas psutil py-cpuinfo redis requests pyarrow
43
- pip install .[mongo,analytics,dask,docs,kafka,mlflow,dev]
41
+ pip install .[all]
44
42
 
45
43
  - name: List installed packages
46
44
  run: pip list
@@ -29,16 +29,25 @@ jobs:
29
29
  cache: "pip"
30
30
 
31
31
  - name: Show OS Info
32
- run: '[[ "$OSTYPE" == "linux-gnu"* ]] && { echo "OS Type: Linux"; (command -v lsb_release &> /dev/null && lsb_release -a) || cat /etc/os-release; uname -r; } || [[ "$OSTYPE" == "darwin"* ]] && { echo "OS Type: macOS"; sw_vers; uname -r; } || echo "Unsupported OS type: $OSTYPE"'
33
-
34
- - name: Start docker compose with redis
35
- run: make services-mongo
32
+ run: 'case "${OSTYPE:-}" in linux*) echo "OS Type: Linux"; (command -v lsb_release >/dev/null 2>&1 && lsb_release -a || cat /etc/os-release); uname -r ;; darwin*) echo "OS Type: macOS"; sw_vers || true; uname -r ;; *) echo "Unsupported OS type: ${OSTYPE:-unknown}" ;; esac'
36
33
 
37
34
  - name: Upgrade pip
38
35
  run: |
39
36
  python -m pip install --upgrade pip
40
37
  python --version
41
38
 
39
+ - name: Test basic example without any external service
40
+ run: |
41
+ pip install .
42
+ pip list
43
+ flowcept --init-settings
44
+ python examples/start_here.py
45
+ pip uninstall flowcept -y
46
+ rm ~/.flowcept/settings.yaml
47
+
48
+ - name: Start docker compose with redis
49
+ run: make services-mongo
50
+
42
51
  - name: Test examples
43
52
  run: bash .github/workflows/run_examples.sh examples true # with mongo
44
53
 
@@ -34,6 +34,8 @@ run_test() {
34
34
  pip uninstall flowcept -y > /dev/null 2>&1 || true # Ignore errors during uninstall
35
35
 
36
36
  pip install . > /dev/null 2>&1
37
+ pip install .[extras] > /dev/null 2>&1
38
+ pip list
37
39
 
38
40
  if [[ "$with_mongo" == "true" ]]; then
39
41
  pip install .[mongo] > /dev/null 2>&1
@@ -1,45 +1,52 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowcept
3
- Version: 0.8.11
3
+ Version: 0.9.1
4
4
  Summary: Capture and query workflow provenance data using data observability
5
- Project-URL: GitHub, https://github.com/ORNL/flowcept
6
5
  Author: Oak Ridge National Laboratory
7
6
  License-Expression: MIT
8
7
  License-File: LICENSE
9
- Keywords: ai,big-data,dask,data-analytics,data-integration,databases,lineage,machine-learning,ml,mlflow,model-management,parallel-processing,provenance,reproducibility,responsible-ai,scientific-workflows,tensorboard,workflows
8
+ Keywords: agentic-ai,agentic-workflows,ai,big-data,dask,data-analytics,data-integration,databases,lineage,llm,machine-learning,ml,mlflow,model-management,parallel-processing,provenance,reproducibility,responsible-ai,scientific-workflows,tensorboard,workflows
10
9
  Classifier: License :: OSI Approved :: MIT License
11
10
  Classifier: Operating System :: OS Independent
12
11
  Classifier: Programming Language :: Python :: 3
13
12
  Requires-Python: >=3.10
14
- Requires-Dist: flask-restful
15
- Requires-Dist: lmdb
16
13
  Requires-Dist: msgpack
14
+ Requires-Dist: numpy
17
15
  Requires-Dist: omegaconf
18
- Requires-Dist: pandas
19
- Requires-Dist: psutil>=6.1.1
20
- Requires-Dist: py-cpuinfo
21
- Requires-Dist: pyarrow
22
- Requires-Dist: redis
23
- Requires-Dist: requests
16
+ Requires-Dist: orjson
24
17
  Provides-Extra: all
25
18
  Requires-Dist: alembic; extra == 'all'
26
19
  Requires-Dist: confluent-kafka<=2.8.0; extra == 'all'
20
+ Requires-Dist: cryptography; extra == 'all'
27
21
  Requires-Dist: dask[distributed]<=2024.10.0; extra == 'all'
22
+ Requires-Dist: flask-restful; extra == 'all'
28
23
  Requires-Dist: furo; extra == 'all'
24
+ Requires-Dist: gitpython; extra == 'all'
25
+ Requires-Dist: google-genai; extra == 'all'
29
26
  Requires-Dist: jupyterlab; extra == 'all'
27
+ Requires-Dist: langchain-community; extra == 'all'
28
+ Requires-Dist: lmdb; extra == 'all'
29
+ Requires-Dist: mcp[cli]; extra == 'all'
30
30
  Requires-Dist: mlflow-skinny; extra == 'all'
31
31
  Requires-Dist: nbmake; extra == 'all'
32
32
  Requires-Dist: paho-mqtt; extra == 'all'
33
+ Requires-Dist: pandas; extra == 'all'
33
34
  Requires-Dist: pika; extra == 'all'
34
35
  Requires-Dist: plotly; extra == 'all'
36
+ Requires-Dist: psutil>=6.1.1; extra == 'all'
37
+ Requires-Dist: py-cpuinfo; extra == 'all'
38
+ Requires-Dist: pyarrow; extra == 'all'
35
39
  Requires-Dist: pymongo; extra == 'all'
36
40
  Requires-Dist: pytest; extra == 'all'
37
41
  Requires-Dist: pyyaml; extra == 'all'
42
+ Requires-Dist: redis; extra == 'all'
43
+ Requires-Dist: requests; extra == 'all'
38
44
  Requires-Dist: ruff; extra == 'all'
39
45
  Requires-Dist: scipy; extra == 'all'
40
46
  Requires-Dist: seaborn; extra == 'all'
41
47
  Requires-Dist: sphinx; extra == 'all'
42
48
  Requires-Dist: sqlalchemy; extra == 'all'
49
+ Requires-Dist: streamlit; extra == 'all'
43
50
  Requires-Dist: tbparse; extra == 'all'
44
51
  Requires-Dist: tensorboard; extra == 'all'
45
52
  Requires-Dist: tensorflow; extra == 'all'
@@ -64,14 +71,28 @@ Requires-Dist: sphinx; extra == 'dev'
64
71
  Provides-Extra: docs
65
72
  Requires-Dist: furo; extra == 'docs'
66
73
  Requires-Dist: sphinx; extra == 'docs'
74
+ Provides-Extra: extras
75
+ Requires-Dist: flask-restful; extra == 'extras'
76
+ Requires-Dist: gitpython; extra == 'extras'
77
+ Requires-Dist: lmdb; extra == 'extras'
78
+ Requires-Dist: pandas; extra == 'extras'
79
+ Requires-Dist: psutil>=6.1.1; extra == 'extras'
80
+ Requires-Dist: py-cpuinfo; extra == 'extras'
81
+ Requires-Dist: redis; extra == 'extras'
82
+ Requires-Dist: requests; extra == 'extras'
67
83
  Provides-Extra: kafka
68
84
  Requires-Dist: confluent-kafka<=2.8.0; extra == 'kafka'
69
85
  Provides-Extra: llm-agent
70
- Requires-Dist: faiss-cpu; extra == 'llm-agent'
71
86
  Requires-Dist: langchain-community; extra == 'llm-agent'
72
87
  Requires-Dist: mcp[cli]; extra == 'llm-agent'
73
- Requires-Dist: sentence-transformers; extra == 'llm-agent'
74
- Requires-Dist: tiktoken; extra == 'llm-agent'
88
+ Requires-Dist: streamlit; extra == 'llm-agent'
89
+ Provides-Extra: llm-google
90
+ Requires-Dist: google-genai; extra == 'llm-google'
91
+ Requires-Dist: langchain-community; extra == 'llm-google'
92
+ Requires-Dist: mcp[cli]; extra == 'llm-google'
93
+ Requires-Dist: streamlit; extra == 'llm-google'
94
+ Provides-Extra: lmdb
95
+ Requires-Dist: lmdb; extra == 'lmdb'
75
96
  Provides-Extra: ml-dev
76
97
  Requires-Dist: datasets==2.17.0; extra == 'ml-dev'
77
98
  Requires-Dist: nltk; extra == 'ml-dev'
@@ -82,15 +103,22 @@ Requires-Dist: torchtext==0.17.2; extra == 'ml-dev'
82
103
  Requires-Dist: torchvision==0.17.2; extra == 'ml-dev'
83
104
  Provides-Extra: mlflow
84
105
  Requires-Dist: alembic; extra == 'mlflow'
106
+ Requires-Dist: cryptography; extra == 'mlflow'
85
107
  Requires-Dist: mlflow-skinny; extra == 'mlflow'
86
108
  Requires-Dist: sqlalchemy; extra == 'mlflow'
87
109
  Requires-Dist: watchdog; extra == 'mlflow'
88
110
  Provides-Extra: mongo
111
+ Requires-Dist: pyarrow; extra == 'mongo'
89
112
  Requires-Dist: pymongo; extra == 'mongo'
90
113
  Provides-Extra: mqtt
91
114
  Requires-Dist: paho-mqtt; extra == 'mqtt'
92
115
  Provides-Extra: nvidia
93
116
  Requires-Dist: nvidia-ml-py; extra == 'nvidia'
117
+ Provides-Extra: redis
118
+ Requires-Dist: redis; extra == 'redis'
119
+ Provides-Extra: telemetry
120
+ Requires-Dist: psutil>=6.1.1; extra == 'telemetry'
121
+ Requires-Dist: py-cpuinfo; extra == 'telemetry'
94
122
  Provides-Extra: tensorboard
95
123
  Requires-Dist: tbparse; extra == 'tensorboard'
96
124
  Requires-Dist: tensorboard; extra == 'tensorboard'
@@ -0,0 +1,27 @@
1
+ This simple example shows communication between two agents through Flowcept's provenance messages.
2
+
3
+ ### Setup:
4
+
5
+ - Agent1 runs on localhost:8000
6
+ - Agent2 runs on localhost:8001
7
+ - Client talks to agent1
8
+
9
+ ### Running
10
+
11
+ 1. Run Agent1
12
+ 2. Run Agent2
13
+ 3. Activate agent2 (this step has to be skipped but couldn't find out how yet): curl -X POST http://localhost:8001/mcp/agent/Agent2/action/liveness -H "Content-Type: application/json"
14
+
15
+ The sequence begins by running
16
+
17
+ `flowcept --agent-client --tool-name agent_task1`
18
+
19
+ ### Sequence of Message Passing
20
+
21
+ 1. Client starts by calling Agent 1's tool: agent_task1
22
+ 2. Instrumented Agent Action agent_task1 sends its completion message
23
+ 3. Agent 1 sends "call_agent_task" message to call agent_task2
24
+ 4. Agent 2 receives it
25
+ 5. Agent 2 runs its tool agent_task2
26
+ 6. Instrumented Agent Action agent_task2 sends its completion message (subtype 'agent_task')
27
+ 7. Agent 1 receives it. Prints and finishes.
@@ -0,0 +1,60 @@
1
+ import random
2
+ from typing import Dict
3
+
4
+ import uvicorn
5
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
6
+ from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
7
+ from flowcept.instrumentation.flowcept_task import flowcept_task
8
+ from mcp.server.fastmcp import FastMCP
9
+
10
+ from flowcept.configs import AGENT
11
+
12
+
13
+ @flowcept_task(subtype="call_agent_task")
14
+ def agent_task2(**kwargs):
15
+ return
16
+
17
+
18
+ class Agent1ContextManager(BaseAgentContextManager):
19
+
20
+ def __init__(self):
21
+ super().__init__()
22
+
23
+ def message_handler(self, msg_obj: Dict) -> bool:
24
+ if msg_obj.get('type', '') == 'task':
25
+ subtype = msg_obj.get("subtype", '')
26
+ if subtype == 'agent_task':
27
+ print(msg_obj)
28
+ tool_name = msg_obj["activity_id"]
29
+ generated = msg_obj["generated"]
30
+ if tool_name == 'agent_task1':
31
+ self.logger.debug(f"Ok, Agent 1 executed agent_task1. Now going to send Message to Agent 2")
32
+ agent_task2(**generated)
33
+ elif tool_name == "agent_task2":
34
+ self.logger.debug(f"Ok, Agent 2 executed agent_task2. All good. Its output was: {generated}")
35
+ return True
36
+
37
+
38
+ agent_controller = Agent1ContextManager()
39
+ mcp = FastMCP("Agent1", require_session=True, lifespan=agent_controller.lifespan)
40
+
41
+
42
+ @mcp.tool()
43
+ @agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
44
+ def agent_task1(campaign_id=None):
45
+ return {
46
+ "msg": "I'm agent 1 and I wish to talk to agent 2!",
47
+ "data": random.randint(0, 350)
48
+ }
49
+
50
+ def main():
51
+ """
52
+ Start the MCP server.
53
+ """
54
+ uvicorn.run(
55
+ mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
56
+ )
57
+
58
+
59
+ if __name__ == "__main__":
60
+ main()
@@ -0,0 +1,60 @@
1
+ from typing import Dict
2
+
3
+ import uvicorn
4
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
5
+ from flowcept.flowceptor.consumers.agent.client_agent import run_tool
6
+ from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
7
+ from mcp.server.fastmcp import FastMCP
8
+
9
+ from flowcept.configs import AGENT
10
+
11
+
12
+ class Agent2ContextManager(BaseAgentContextManager):
13
+
14
+ def __init__(self):
15
+ super().__init__()
16
+
17
+ def message_handler(self, msg_obj: Dict) -> bool:
18
+ print(msg_obj)
19
+ if msg_obj.get('type', '') == 'task':
20
+ subtype = msg_obj.get("subtype", '')
21
+ if subtype == 'call_agent_task':
22
+ tool_name = msg_obj["activity_id"]
23
+ args = msg_obj["used"]
24
+ if tool_name == 'agent_task2':
25
+ self.logger.debug(f"I am Agent 2. I saw that Agent 1 executed agent_task1 and sent its message to the MQ.\nNow I'm going to execute my action.")
26
+ self.logger.debug(f"Args for agent_task2: {args}")
27
+ r = run_tool(tool_name="agent_task2", kwargs={"data": args["data"]}, port=8001)
28
+ self.logger.info(f"Response: {r}")
29
+ return True
30
+
31
+
32
+ agent_controller = Agent2ContextManager()
33
+ mcp = FastMCP("Agent2", require_session=True, lifespan=agent_controller.lifespan)
34
+
35
+
36
+ @mcp.tool()
37
+ @agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
38
+ def agent_task2(data):
39
+ print(f"This is the data generated by Agent 1: {data}")
40
+ return {
41
+ "msg": "I'm agent 2 and I executed!",
42
+ }
43
+
44
+ @mcp.tool()
45
+ def liveness():
46
+ return "Im alive"
47
+
48
+
49
+ def main():
50
+ """
51
+ Start the MCP server.
52
+ """
53
+ uvicorn.run(
54
+ mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=8001, lifespan="on"
55
+ )
56
+
57
+
58
+ if __name__ == "__main__":
59
+ main()
60
+
@@ -0,0 +1,67 @@
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List
3
+ import json
4
+
5
+ from flowcept.flowceptor.consumers.agent.base_agent_context_manager import BaseAgentContextManager
6
+ from flowcept.agents.agent_client import run_tool
7
+
8
+
9
+ @dataclass
10
+ class AeCContext:
11
+ """
12
+ Container for storing agent context data during the lifespan of an application session.
13
+
14
+ Attributes
15
+ ----------
16
+ tasks : list of dict
17
+ A list of task messages received from the message queue. Each task message is stored as a dictionary.
18
+ """
19
+
20
+ history: List[Dict]
21
+
22
+
23
+ class AdamantineAeCContextManager(BaseAgentContextManager):
24
+
25
+ def __init__(self):
26
+ super().__init__()
27
+ self.reset_context()
28
+
29
+ def message_handler(self, msg_obj: Dict) -> bool:
30
+ if msg_obj.get('type', '') == 'task':
31
+ subtype = msg_obj.get("subtype", '')
32
+ if subtype == 'call_agent_task':
33
+ print(msg_obj)
34
+ tool_name = msg_obj["activity_id"]
35
+ campaign_id = msg_obj.get("campaign_id", None)
36
+ tool_args = msg_obj.get("used", {})
37
+ tool_args["campaign_id"] = campaign_id
38
+ self.logger.debug(f"Going to run {tool_name}, {tool_args}")
39
+ tool_result = run_tool(tool_name, kwargs=tool_args)
40
+ if len(tool_result):
41
+ if tool_name == 'choose_option':
42
+ this_history = dict()
43
+ tool_result = tool_result[0]
44
+ this_history.update(tool_args["scores"])
45
+ tool_result = json.loads(tool_result)
46
+ this_history["chosen_option"] = tool_result["option"]
47
+ this_history["explanation"] = tool_result["explanation"]
48
+ self.context.history.append(this_history)
49
+ else:
50
+ self.logger.error(f"Something wrong happened when running tool {tool_name}.")
51
+ elif subtype == 'agent_task':
52
+ print('Tool result', msg_obj["activity_id"])
53
+ if msg_obj.get("subtype", '') == "llm_query":
54
+ print("Msg from agent.")
55
+ #
56
+ # msg_output = msg_obj.get("generated", {})["response"]
57
+ #
58
+ # simulation_output = simulate_layer(self._layers_count, msg_output)
59
+ #
60
+ # run_tool_async("ask_agent", simulation_output)
61
+
62
+ else:
63
+ print(f"We got a msg with different type: {msg_obj.get("type", None)}")
64
+ return True
65
+
66
+ def reset_context(self):
67
+ self.context = AeCContext(history=[])
@@ -0,0 +1,110 @@
1
+ import json
2
+ from typing import Dict, List
3
+
4
+ import numpy as np
5
+ import uvicorn
6
+ from flowcept.instrumentation.flowcept_agent_task import agent_flowcept_task
7
+ from mcp.server.fastmcp import FastMCP
8
+
9
+ from flowcept.configs import AGENT
10
+ from flowcept.agents.agents_utils import build_llm_model
11
+
12
+ from examples.agents.aec_agent_context_manager import AdamantineAeCContextManager
13
+ from examples.agents.aec_prompts import choose_option_prompt, generate_options_set_prompt
14
+
15
+
16
+ agent_controller = AdamantineAeCContextManager()
17
+ mcp = FastMCP("AnC_Agent_mock", require_session=True, lifespan=agent_controller.lifespan)
18
+
19
+
20
+
21
+ #################################################
22
+ # TOOLS
23
+ #################################################
24
+
25
+
26
+ @mcp.tool()
27
+ @agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
28
+ def generate_options_set(layer: int, planned_controls, number_of_options=4, campaign_id=None):
29
+ llm = build_llm_model()
30
+ ctx = mcp.get_context()
31
+ history = ctx.request_context.lifespan_context.history
32
+ messages = generate_options_set_prompt(layer, planned_controls, history, number_of_options)
33
+ response = llm.invoke(messages)
34
+
35
+ try:
36
+ control_options = json.loads(response)
37
+ except Exception as e:
38
+ raise Exception(f"Could not parse json in generate_options_set. Error {e}. Likely an LLM output problem. "
39
+ f"This is the JSON we tried to parse: {response}")
40
+
41
+ assert len(control_options) == number_of_options
42
+ return {"control_options": control_options}
43
+
44
+
45
+ @mcp.tool()
46
+ @agent_flowcept_task # Must be in this order. @mcp.tool then @flowcept_task
47
+ def choose_option(scores: Dict, planned_controls: List[Dict], campaign_id: str=None):
48
+ llm = build_llm_model()
49
+ ctx = mcp.get_context()
50
+ history = ctx.request_context.lifespan_context.history
51
+ messages = choose_option_prompt(scores, planned_controls, history)
52
+ response = llm.invoke(messages)
53
+ try:
54
+ result = json.loads(response)
55
+ except Exception as e:
56
+ raise Exception(f"Could not parse json in choose_option. Error {e}. Likely an LLM output problem. "
57
+ f"This is the JSON we tried to parse: {response}")
58
+
59
+ human_option = int(np.argmin(scores["scores"]))
60
+
61
+ result["human_option"] = human_option
62
+ result["attention"] = True if human_option != result["option"] else False
63
+
64
+ return result
65
+
66
+
67
+ @mcp.tool()
68
+ def get_latest(n: int = None) -> str:
69
+ """
70
+ Return the latest task(s) as a JSON string.
71
+ """
72
+ ctx = mcp.get_context()
73
+ tasks = ctx.request_context.lifespan_context.tasks
74
+ if not tasks:
75
+ return "No tasks available."
76
+ if n is None:
77
+ return json.dumps(tasks[-1])
78
+ return json.dumps(tasks[-n])
79
+
80
+
81
+ @mcp.tool()
82
+ def check_liveness() -> str:
83
+ """
84
+ Check if the agent is running.
85
+ """
86
+
87
+ return f"I'm {mcp.name} and I'm ready!"
88
+
89
+
90
+ @mcp.tool()
91
+ def check_llm() -> str:
92
+ """
93
+ Check if the agent can talk to the LLM service.
94
+ """
95
+ llm = build_llm_model()
96
+ response = llm.invoke("Hello, are you there?")
97
+ return response
98
+
99
+
100
+ def main():
101
+ """
102
+ Start the MCP server.
103
+ """
104
+ uvicorn.run(
105
+ mcp.streamable_http_app, host=AGENT.get("mcp_host", "0.0.0.0"), port=AGENT.get("mcp_port", 8000), lifespan="on"
106
+ )
107
+
108
+
109
+ if __name__ == "__main__":
110
+ main()
@@ -0,0 +1,113 @@
1
+ from typing import Dict, List
2
+
3
+
4
+ def generate_options_set_prompt(layer: int, planned_controls: List[Dict], history: List[Dict] = None,
5
+ number_of_options=4) -> str:
6
+ max_layers = len(planned_controls)
7
+ prompt_str = f"""
8
+ Role: You are a decision-making expert in Advanced Additive Manufacturing Technologies at the Manufacturing Demonstration Facility.
9
+ Background: You are analyzing layers being printed in a 3D printer. A control decision must be made for each layer to optimize printing outcomes.
10
+
11
+ Task: Generate exactly {number_of_options} control options for layer {layer} in a print job with {max_layers} layers.
12
+
13
+ Domain Constraints: Each control option must be a JSON object with the following fields: 'power', 'dwell_0', and 'dwell_1'.
14
+ Domain Constraints: 'power' is a float in the range [0, 350]. 'dwell_0' and 'dwell_1' are integers from 10 to 120, divisible by 5.
15
+ """
16
+
17
+ if history:
18
+ prompt_str += """
19
+ Context: Use both the original pre-calculated control plan and the full history of previously generated options to inform your decision.
20
+ """
21
+ else:
22
+ prompt_str += """
23
+ Context: Use only the original pre-calculated control plan to inform your decision.
24
+ """
25
+
26
+ prompt_str += f"""
27
+ Input - Pre-calculated Control Plan:
28
+ {planned_controls}
29
+ """
30
+
31
+ if history:
32
+ prompt_str += f"""
33
+ Input - Full Decision History:
34
+ {history}
35
+
36
+ History Format: The history is a list of JSON objects. Each entry includes: the layer index, control options generated for that layer, and the calculated score for each option.
37
+ Scoring Note: Typically (but not always), a lower score indicates a better option.
38
+ """
39
+
40
+ prompt_str += f"""
41
+ Format Constraints: Output a list with exactly {number_of_options} elements.
42
+ Format Constraints: Each element must be a JSON object with the format: {{'power': float, 'dwell_0': int, 'dwell_1': int}}.
43
+ Output Restriction: DO NOT WRITE ANYTHING ELSE. Only output the JSON list. The response will be parsed automatically.
44
+ """
45
+ return prompt_str
46
+
47
+
48
+ def choose_option_prompt(scores: Dict, planned_controls: List[Dict], history=None) -> str:
49
+ """
50
+ Prompt: Write the prompt following according to these:
51
+
52
+ 1. Analyze the prompt. Perform any improvements that you believe will help the LLM model answer better. Improve the prompt to try to make it more clear, less redundant for the LLM model.
53
+ 2. Additionally, the LLM model is often making confusion because it analyzes arrays like [2, 3, 5] to choose a score from it, but it hallucinates saying that it chose the score 5 because it's the lowest score, when it's clearly not the case. Improve the prompt to try to make it more clear for the LLM model.
54
+ 3. Analyze each message in the list and use your judgement to identify whether this message is better classified as "human" or as "system.
55
+ 4. Rewrite the function to return an array of pairs ("role", "message"), i.e., each element in the array is a tuple of two strings, where "role" should be either "human" or "system". Do not use any MCP message stuff.
56
+ 5. Reorganize the function for better readability. Check "if history" only once. Use array.extends to reduce the number of appends. It's fine to use more than 120 characters per line.
57
+ 6. Adjust the prompts to better structure the messages themselves, e.g., informing when the message is defining a role, the message content should be "Role: rest of the message"
58
+ Clearly label :
59
+ tasks,
60
+ background,
61
+ format constraints,
62
+ domain constraints,
63
+
64
+ """
65
+ current_layer = scores.get("layer")
66
+ max_layers = len(planned_controls)
67
+
68
+ prompt_str = f"""
69
+ Role: You are a decision-making expert in Advanced Additive Manufacturing Technologies at the Manufacturing Demonstration Facility.
70
+ Background: You are analyzing a layer-by-layer 3D printing process to help select optimal control decisions based on simulation scores.
71
+ Task: Choose the best control option for layer {current_layer} out of a set of possible options. You will receive the scores and control options computed by an HPC simulation.
72
+
73
+ Domain Constraints: Each control option is a dictionary with the fields 'power', 'dwell_0', and 'dwell_1'.
74
+ Domain Constraints: 'power' is a float between 0 and 350. 'dwell_0' and 'dwell_1' are integers between 10 and 120, and must be divisible by 5.
75
+
76
+ Score Format: The input is a dictionary with:
77
+ - 'layer': current layer index
78
+ - 'control_options': a list of candidate control option dictionaries
79
+ - 'scores': a list of floats of same length, where scores[i] is the score of control_options[i].
80
+
81
+ Scoring Hint: Typically (but not always), a lower score indicates better quality. For example, in [5, 10], option 0 is preferred since 5 < 10.
82
+ ⚠️ Caution: Do NOT hallucinate reasoning. For example, if scores = [2, 3, 5], do NOT say 5 is the lowest. Use correct comparisons only.
83
+ Labeling: If your chosen option has the lowest score, label it 'expected'. If it does not, label it 'surprise' and explain your reasoning.
84
+
85
+ Input - Scores for layer {current_layer}:
86
+ {scores}
87
+
88
+ Input - Pre-calculated Control Plan:
89
+ {planned_controls}
90
+ """
91
+
92
+ if history:
93
+ prompt_str += f"""
94
+
95
+ Input - Full History of previous control decisions and scores:
96
+ {history}
97
+
98
+ History Format: Each item in the history is a dictionary with keys: 'layer', 'control_options', and 'scores'. Use this to reason based on past behavior.
99
+ """
100
+ else:
101
+ prompt_str += "\nContext: No prior decision history is available. Use only the current inputs.\n"
102
+
103
+ prompt_str += """
104
+ Format Constraints: Return a JSON object like:
105
+ {"option": index_of_best_option, "explanation": your_reasoning, "agent_label": "expected" or "surprise"}
106
+
107
+ Output Restriction:
108
+ - DO NOT SAY 'Here is the output'
109
+ - ONLY WRITE THE VALID JSON. NO EXPLANATIONS AT ALL.
110
+ - YOUR OUTPUT MUST BE A VALID JSON! Your output will be parsed programmatically.
111
+ """
112
+ return prompt_str
113
+