autobyteus 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (472) hide show
  1. autobyteus/agent/agent.py +15 -5
  2. autobyteus/agent/bootstrap_steps/__init__.py +3 -3
  3. autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +5 -59
  4. autobyteus/agent/bootstrap_steps/base_bootstrap_step.py +1 -4
  5. autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +1 -3
  6. autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +16 -13
  7. autobyteus/agent/bootstrap_steps/working_context_snapshot_restore_step.py +38 -0
  8. autobyteus/agent/bootstrap_steps/workspace_context_initialization_step.py +2 -4
  9. autobyteus/agent/context/agent_config.py +47 -20
  10. autobyteus/agent/context/agent_context.py +23 -18
  11. autobyteus/agent/context/agent_runtime_state.py +21 -19
  12. autobyteus/agent/events/__init__.py +16 -1
  13. autobyteus/agent/events/agent_events.py +43 -3
  14. autobyteus/agent/events/agent_input_event_queue_manager.py +79 -26
  15. autobyteus/agent/events/event_store.py +57 -0
  16. autobyteus/agent/events/notifiers.py +69 -59
  17. autobyteus/agent/events/worker_event_dispatcher.py +21 -64
  18. autobyteus/agent/factory/agent_factory.py +83 -6
  19. autobyteus/agent/handlers/__init__.py +2 -0
  20. autobyteus/agent/handlers/approved_tool_invocation_event_handler.py +51 -34
  21. autobyteus/agent/handlers/bootstrap_event_handler.py +155 -0
  22. autobyteus/agent/handlers/inter_agent_message_event_handler.py +10 -0
  23. autobyteus/agent/handlers/lifecycle_event_logger.py +19 -11
  24. autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +10 -15
  25. autobyteus/agent/handlers/llm_user_message_ready_event_handler.py +188 -48
  26. autobyteus/agent/handlers/tool_execution_approval_event_handler.py +0 -10
  27. autobyteus/agent/handlers/tool_invocation_request_event_handler.py +53 -48
  28. autobyteus/agent/handlers/tool_result_event_handler.py +7 -8
  29. autobyteus/agent/handlers/user_input_message_event_handler.py +10 -3
  30. autobyteus/agent/input_processor/memory_ingest_input_processor.py +44 -0
  31. autobyteus/agent/lifecycle/__init__.py +12 -0
  32. autobyteus/agent/lifecycle/base_processor.py +109 -0
  33. autobyteus/agent/lifecycle/events.py +35 -0
  34. autobyteus/agent/lifecycle/processor_definition.py +36 -0
  35. autobyteus/agent/lifecycle/processor_registry.py +106 -0
  36. autobyteus/agent/llm_request_assembler.py +98 -0
  37. autobyteus/agent/llm_response_processor/__init__.py +1 -8
  38. autobyteus/agent/message/context_file_type.py +1 -1
  39. autobyteus/agent/runtime/agent_runtime.py +29 -21
  40. autobyteus/agent/runtime/agent_worker.py +98 -19
  41. autobyteus/agent/shutdown_steps/__init__.py +2 -0
  42. autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +2 -0
  43. autobyteus/agent/shutdown_steps/tool_cleanup_step.py +58 -0
  44. autobyteus/agent/status/__init__.py +14 -0
  45. autobyteus/agent/status/manager.py +93 -0
  46. autobyteus/agent/status/status_deriver.py +96 -0
  47. autobyteus/agent/{phases/phase_enum.py → status/status_enum.py} +16 -16
  48. autobyteus/agent/status/status_update_utils.py +73 -0
  49. autobyteus/agent/streaming/__init__.py +52 -5
  50. autobyteus/agent/streaming/adapters/__init__.py +18 -0
  51. autobyteus/agent/streaming/adapters/invocation_adapter.py +184 -0
  52. autobyteus/agent/streaming/adapters/tool_call_parsing.py +163 -0
  53. autobyteus/agent/streaming/adapters/tool_syntax_registry.py +67 -0
  54. autobyteus/agent/streaming/agent_event_stream.py +3 -183
  55. autobyteus/agent/streaming/api_tool_call/__init__.py +16 -0
  56. autobyteus/agent/streaming/api_tool_call/file_content_streamer.py +56 -0
  57. autobyteus/agent/streaming/api_tool_call/json_string_field_extractor.py +175 -0
  58. autobyteus/agent/streaming/api_tool_call_streaming_response_handler.py +4 -0
  59. autobyteus/agent/streaming/events/__init__.py +6 -0
  60. autobyteus/agent/streaming/events/stream_event_payloads.py +284 -0
  61. autobyteus/agent/streaming/events/stream_events.py +141 -0
  62. autobyteus/agent/streaming/handlers/__init__.py +15 -0
  63. autobyteus/agent/streaming/handlers/api_tool_call_streaming_response_handler.py +303 -0
  64. autobyteus/agent/streaming/handlers/parsing_streaming_response_handler.py +107 -0
  65. autobyteus/agent/streaming/handlers/pass_through_streaming_response_handler.py +107 -0
  66. autobyteus/agent/streaming/handlers/streaming_handler_factory.py +177 -0
  67. autobyteus/agent/streaming/handlers/streaming_response_handler.py +58 -0
  68. autobyteus/agent/streaming/parser/__init__.py +61 -0
  69. autobyteus/agent/streaming/parser/event_emitter.py +181 -0
  70. autobyteus/agent/streaming/parser/events.py +4 -0
  71. autobyteus/agent/streaming/parser/invocation_adapter.py +4 -0
  72. autobyteus/agent/streaming/parser/json_parsing_strategies/__init__.py +19 -0
  73. autobyteus/agent/streaming/parser/json_parsing_strategies/base.py +32 -0
  74. autobyteus/agent/streaming/parser/json_parsing_strategies/default.py +34 -0
  75. autobyteus/agent/streaming/parser/json_parsing_strategies/gemini.py +31 -0
  76. autobyteus/agent/streaming/parser/json_parsing_strategies/openai.py +64 -0
  77. autobyteus/agent/streaming/parser/json_parsing_strategies/registry.py +75 -0
  78. autobyteus/agent/streaming/parser/parser_context.py +227 -0
  79. autobyteus/agent/streaming/parser/parser_factory.py +132 -0
  80. autobyteus/agent/streaming/parser/sentinel_format.py +7 -0
  81. autobyteus/agent/streaming/parser/state_factory.py +62 -0
  82. autobyteus/agent/streaming/parser/states/__init__.py +1 -0
  83. autobyteus/agent/streaming/parser/states/base_state.py +60 -0
  84. autobyteus/agent/streaming/parser/states/custom_xml_tag_run_bash_parsing_state.py +38 -0
  85. autobyteus/agent/streaming/parser/states/custom_xml_tag_write_file_parsing_state.py +55 -0
  86. autobyteus/agent/streaming/parser/states/delimited_content_state.py +146 -0
  87. autobyteus/agent/streaming/parser/states/json_initialization_state.py +144 -0
  88. autobyteus/agent/streaming/parser/states/json_tool_parsing_state.py +137 -0
  89. autobyteus/agent/streaming/parser/states/sentinel_content_state.py +30 -0
  90. autobyteus/agent/streaming/parser/states/sentinel_initialization_state.py +117 -0
  91. autobyteus/agent/streaming/parser/states/text_state.py +78 -0
  92. autobyteus/agent/streaming/parser/states/xml_patch_file_tool_parsing_state.py +328 -0
  93. autobyteus/agent/streaming/parser/states/xml_run_bash_tool_parsing_state.py +129 -0
  94. autobyteus/agent/streaming/parser/states/xml_tag_initialization_state.py +151 -0
  95. autobyteus/agent/streaming/parser/states/xml_tool_parsing_state.py +63 -0
  96. autobyteus/agent/streaming/parser/states/xml_write_file_tool_parsing_state.py +343 -0
  97. autobyteus/agent/streaming/parser/strategies/__init__.py +17 -0
  98. autobyteus/agent/streaming/parser/strategies/base.py +24 -0
  99. autobyteus/agent/streaming/parser/strategies/json_tool_strategy.py +26 -0
  100. autobyteus/agent/streaming/parser/strategies/registry.py +28 -0
  101. autobyteus/agent/streaming/parser/strategies/sentinel_strategy.py +23 -0
  102. autobyteus/agent/streaming/parser/strategies/xml_tag_strategy.py +21 -0
  103. autobyteus/agent/streaming/parser/stream_scanner.py +167 -0
  104. autobyteus/agent/streaming/parser/streaming_parser.py +212 -0
  105. autobyteus/agent/streaming/parser/tool_call_parsing.py +4 -0
  106. autobyteus/agent/streaming/parser/tool_constants.py +7 -0
  107. autobyteus/agent/streaming/parser/tool_syntax_registry.py +4 -0
  108. autobyteus/agent/streaming/parser/xml_tool_parsing_state_registry.py +55 -0
  109. autobyteus/agent/streaming/parsing_streaming_response_handler.py +4 -0
  110. autobyteus/agent/streaming/pass_through_streaming_response_handler.py +4 -0
  111. autobyteus/agent/streaming/queue_streamer.py +3 -57
  112. autobyteus/agent/streaming/segments/__init__.py +5 -0
  113. autobyteus/agent/streaming/segments/segment_events.py +82 -0
  114. autobyteus/agent/streaming/stream_event_payloads.py +2 -223
  115. autobyteus/agent/streaming/stream_events.py +3 -140
  116. autobyteus/agent/streaming/streaming_handler_factory.py +4 -0
  117. autobyteus/agent/streaming/streaming_response_handler.py +4 -0
  118. autobyteus/agent/streaming/streams/__init__.py +5 -0
  119. autobyteus/agent/streaming/streams/agent_event_stream.py +197 -0
  120. autobyteus/agent/streaming/utils/__init__.py +5 -0
  121. autobyteus/agent/streaming/utils/queue_streamer.py +59 -0
  122. autobyteus/agent/system_prompt_processor/__init__.py +2 -0
  123. autobyteus/agent/system_prompt_processor/available_skills_processor.py +96 -0
  124. autobyteus/agent/system_prompt_processor/base_processor.py +1 -1
  125. autobyteus/agent/system_prompt_processor/processor_meta.py +15 -2
  126. autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +39 -58
  127. autobyteus/agent/token_budget.py +56 -0
  128. autobyteus/agent/tool_execution_result_processor/memory_ingest_tool_result_processor.py +29 -0
  129. autobyteus/agent/tool_invocation.py +16 -40
  130. autobyteus/agent/tool_invocation_preprocessor/__init__.py +9 -0
  131. autobyteus/agent/tool_invocation_preprocessor/base_preprocessor.py +45 -0
  132. autobyteus/agent/tool_invocation_preprocessor/processor_definition.py +15 -0
  133. autobyteus/agent/tool_invocation_preprocessor/processor_meta.py +33 -0
  134. autobyteus/agent/tool_invocation_preprocessor/processor_registry.py +60 -0
  135. autobyteus/agent/utils/wait_for_idle.py +12 -14
  136. autobyteus/agent/workspace/base_workspace.py +6 -27
  137. autobyteus/agent_team/agent_team.py +3 -3
  138. autobyteus/agent_team/agent_team_builder.py +1 -41
  139. autobyteus/agent_team/bootstrap_steps/__init__.py +0 -4
  140. autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +8 -18
  141. autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +4 -16
  142. autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +1 -2
  143. autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +1 -2
  144. autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +1 -2
  145. autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +4 -4
  146. autobyteus/agent_team/context/agent_team_config.py +6 -3
  147. autobyteus/agent_team/context/agent_team_context.py +25 -3
  148. autobyteus/agent_team/context/agent_team_runtime_state.py +9 -6
  149. autobyteus/agent_team/events/__init__.py +11 -0
  150. autobyteus/agent_team/events/agent_team_event_dispatcher.py +22 -9
  151. autobyteus/agent_team/events/agent_team_events.py +16 -0
  152. autobyteus/agent_team/events/event_store.py +57 -0
  153. autobyteus/agent_team/factory/agent_team_factory.py +8 -0
  154. autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +18 -2
  155. autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +21 -5
  156. autobyteus/agent_team/handlers/process_user_message_event_handler.py +17 -8
  157. autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +19 -4
  158. autobyteus/agent_team/runtime/agent_team_runtime.py +41 -10
  159. autobyteus/agent_team/runtime/agent_team_worker.py +69 -5
  160. autobyteus/agent_team/status/__init__.py +14 -0
  161. autobyteus/agent_team/status/agent_team_status.py +18 -0
  162. autobyteus/agent_team/status/agent_team_status_manager.py +33 -0
  163. autobyteus/agent_team/status/status_deriver.py +62 -0
  164. autobyteus/agent_team/status/status_update_utils.py +42 -0
  165. autobyteus/agent_team/streaming/__init__.py +2 -2
  166. autobyteus/agent_team/streaming/agent_team_event_notifier.py +6 -6
  167. autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +4 -4
  168. autobyteus/agent_team/streaming/agent_team_stream_events.py +3 -3
  169. autobyteus/agent_team/system_prompt_processor/__init__.py +6 -0
  170. autobyteus/agent_team/system_prompt_processor/team_manifest_injector_processor.py +76 -0
  171. autobyteus/agent_team/task_notification/task_notification_mode.py +19 -0
  172. autobyteus/agent_team/utils/wait_for_idle.py +4 -4
  173. autobyteus/cli/agent_cli.py +18 -10
  174. autobyteus/cli/agent_team_tui/app.py +14 -11
  175. autobyteus/cli/agent_team_tui/state.py +13 -15
  176. autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +15 -15
  177. autobyteus/cli/agent_team_tui/widgets/focus_pane.py +143 -36
  178. autobyteus/cli/agent_team_tui/widgets/renderables.py +1 -1
  179. autobyteus/cli/agent_team_tui/widgets/shared.py +25 -25
  180. autobyteus/cli/cli_display.py +193 -44
  181. autobyteus/cli/workflow_tui/app.py +9 -10
  182. autobyteus/cli/workflow_tui/state.py +14 -16
  183. autobyteus/cli/workflow_tui/widgets/agent_list_sidebar.py +15 -15
  184. autobyteus/cli/workflow_tui/widgets/focus_pane.py +137 -35
  185. autobyteus/cli/workflow_tui/widgets/renderables.py +1 -1
  186. autobyteus/cli/workflow_tui/widgets/shared.py +25 -25
  187. autobyteus/clients/autobyteus_client.py +94 -1
  188. autobyteus/events/event_types.py +11 -18
  189. autobyteus/llm/api/autobyteus_llm.py +33 -29
  190. autobyteus/llm/api/claude_llm.py +142 -36
  191. autobyteus/llm/api/gemini_llm.py +163 -59
  192. autobyteus/llm/api/grok_llm.py +1 -1
  193. autobyteus/llm/api/minimax_llm.py +26 -0
  194. autobyteus/llm/api/mistral_llm.py +113 -87
  195. autobyteus/llm/api/ollama_llm.py +9 -42
  196. autobyteus/llm/api/openai_compatible_llm.py +127 -91
  197. autobyteus/llm/api/openai_llm.py +3 -3
  198. autobyteus/llm/api/openai_responses_llm.py +324 -0
  199. autobyteus/llm/api/zhipu_llm.py +21 -2
  200. autobyteus/llm/autobyteus_provider.py +70 -60
  201. autobyteus/llm/base_llm.py +85 -81
  202. autobyteus/llm/converters/__init__.py +14 -0
  203. autobyteus/llm/converters/anthropic_tool_call_converter.py +37 -0
  204. autobyteus/llm/converters/gemini_tool_call_converter.py +57 -0
  205. autobyteus/llm/converters/mistral_tool_call_converter.py +37 -0
  206. autobyteus/llm/converters/openai_tool_call_converter.py +38 -0
  207. autobyteus/llm/extensions/base_extension.py +6 -12
  208. autobyteus/llm/extensions/token_usage_tracking_extension.py +45 -18
  209. autobyteus/llm/llm_factory.py +282 -204
  210. autobyteus/llm/lmstudio_provider.py +60 -49
  211. autobyteus/llm/models.py +35 -2
  212. autobyteus/llm/ollama_provider.py +60 -49
  213. autobyteus/llm/ollama_provider_resolver.py +0 -1
  214. autobyteus/llm/prompt_renderers/__init__.py +19 -0
  215. autobyteus/llm/prompt_renderers/anthropic_prompt_renderer.py +104 -0
  216. autobyteus/llm/prompt_renderers/autobyteus_prompt_renderer.py +19 -0
  217. autobyteus/llm/prompt_renderers/base_prompt_renderer.py +10 -0
  218. autobyteus/llm/prompt_renderers/gemini_prompt_renderer.py +63 -0
  219. autobyteus/llm/prompt_renderers/mistral_prompt_renderer.py +87 -0
  220. autobyteus/llm/prompt_renderers/ollama_prompt_renderer.py +51 -0
  221. autobyteus/llm/prompt_renderers/openai_chat_renderer.py +97 -0
  222. autobyteus/llm/prompt_renderers/openai_responses_renderer.py +101 -0
  223. autobyteus/llm/providers.py +1 -3
  224. autobyteus/llm/token_counter/claude_token_counter.py +56 -25
  225. autobyteus/llm/token_counter/mistral_token_counter.py +12 -8
  226. autobyteus/llm/token_counter/openai_token_counter.py +24 -5
  227. autobyteus/llm/token_counter/token_counter_factory.py +12 -5
  228. autobyteus/llm/utils/llm_config.py +6 -12
  229. autobyteus/llm/utils/media_payload_formatter.py +27 -20
  230. autobyteus/llm/utils/messages.py +55 -3
  231. autobyteus/llm/utils/response_types.py +3 -0
  232. autobyteus/llm/utils/tool_call_delta.py +31 -0
  233. autobyteus/memory/__init__.py +35 -0
  234. autobyteus/memory/compaction/__init__.py +9 -0
  235. autobyteus/memory/compaction/compaction_result.py +8 -0
  236. autobyteus/memory/compaction/compactor.py +89 -0
  237. autobyteus/memory/compaction/summarizer.py +11 -0
  238. autobyteus/memory/compaction_snapshot_builder.py +84 -0
  239. autobyteus/memory/memory_manager.py +205 -0
  240. autobyteus/memory/models/__init__.py +14 -0
  241. autobyteus/memory/models/episodic_item.py +41 -0
  242. autobyteus/memory/models/memory_types.py +7 -0
  243. autobyteus/memory/models/raw_trace_item.py +79 -0
  244. autobyteus/memory/models/semantic_item.py +41 -0
  245. autobyteus/memory/models/tool_interaction.py +20 -0
  246. autobyteus/memory/path_resolver.py +27 -0
  247. autobyteus/memory/policies/__init__.py +5 -0
  248. autobyteus/memory/policies/compaction_policy.py +16 -0
  249. autobyteus/memory/restore/__init__.py +1 -0
  250. autobyteus/memory/restore/working_context_snapshot_bootstrapper.py +61 -0
  251. autobyteus/memory/retrieval/__init__.py +7 -0
  252. autobyteus/memory/retrieval/memory_bundle.py +11 -0
  253. autobyteus/memory/retrieval/retriever.py +13 -0
  254. autobyteus/memory/store/__init__.py +9 -0
  255. autobyteus/memory/store/base_store.py +14 -0
  256. autobyteus/memory/store/file_store.py +98 -0
  257. autobyteus/memory/store/working_context_snapshot_store.py +28 -0
  258. autobyteus/memory/tool_interaction_builder.py +46 -0
  259. autobyteus/memory/turn_tracker.py +9 -0
  260. autobyteus/memory/working_context_snapshot.py +69 -0
  261. autobyteus/memory/working_context_snapshot_serializer.py +135 -0
  262. autobyteus/multimedia/audio/api/autobyteus_audio_client.py +19 -5
  263. autobyteus/multimedia/audio/api/gemini_audio_client.py +109 -16
  264. autobyteus/multimedia/audio/audio_client_factory.py +47 -9
  265. autobyteus/multimedia/audio/audio_model.py +2 -1
  266. autobyteus/multimedia/image/api/autobyteus_image_client.py +19 -5
  267. autobyteus/multimedia/image/api/gemini_image_client.py +39 -17
  268. autobyteus/multimedia/image/api/openai_image_client.py +125 -43
  269. autobyteus/multimedia/image/autobyteus_image_provider.py +2 -1
  270. autobyteus/multimedia/image/image_client_factory.py +47 -15
  271. autobyteus/multimedia/image/image_model.py +5 -2
  272. autobyteus/multimedia/providers.py +3 -2
  273. autobyteus/skills/loader.py +71 -0
  274. autobyteus/skills/model.py +11 -0
  275. autobyteus/skills/registry.py +70 -0
  276. autobyteus/task_management/tools/todo_tools/add_todo.py +2 -2
  277. autobyteus/task_management/tools/todo_tools/create_todo_list.py +2 -2
  278. autobyteus/task_management/tools/todo_tools/update_todo_status.py +2 -2
  279. autobyteus/tools/__init__.py +34 -47
  280. autobyteus/tools/base_tool.py +7 -0
  281. autobyteus/tools/file/__init__.py +2 -6
  282. autobyteus/tools/file/patch_file.py +149 -0
  283. autobyteus/tools/file/read_file.py +36 -5
  284. autobyteus/tools/file/write_file.py +4 -1
  285. autobyteus/tools/functional_tool.py +43 -6
  286. autobyteus/tools/mcp/__init__.py +2 -0
  287. autobyteus/tools/mcp/config_service.py +5 -1
  288. autobyteus/tools/mcp/server/__init__.py +2 -0
  289. autobyteus/tools/mcp/server/http_managed_mcp_server.py +1 -1
  290. autobyteus/tools/mcp/server/websocket_managed_mcp_server.py +141 -0
  291. autobyteus/tools/mcp/server_instance_manager.py +8 -1
  292. autobyteus/tools/mcp/types.py +61 -0
  293. autobyteus/tools/multimedia/audio_tools.py +70 -17
  294. autobyteus/tools/multimedia/download_media_tool.py +18 -4
  295. autobyteus/tools/multimedia/image_tools.py +246 -62
  296. autobyteus/tools/operation_executor/journal_manager.py +107 -0
  297. autobyteus/tools/operation_executor/operation_event_buffer.py +57 -0
  298. autobyteus/tools/operation_executor/operation_event_producer.py +29 -0
  299. autobyteus/tools/operation_executor/operation_executor.py +58 -0
  300. autobyteus/tools/registry/tool_definition.py +43 -2
  301. autobyteus/tools/skill/load_skill.py +50 -0
  302. autobyteus/tools/terminal/__init__.py +45 -0
  303. autobyteus/tools/terminal/ansi_utils.py +32 -0
  304. autobyteus/tools/terminal/background_process_manager.py +233 -0
  305. autobyteus/tools/terminal/output_buffer.py +105 -0
  306. autobyteus/tools/terminal/prompt_detector.py +63 -0
  307. autobyteus/tools/terminal/pty_session.py +241 -0
  308. autobyteus/tools/terminal/session_factory.py +20 -0
  309. autobyteus/tools/terminal/terminal_session_manager.py +226 -0
  310. autobyteus/tools/terminal/tools/__init__.py +13 -0
  311. autobyteus/tools/terminal/tools/get_process_output.py +81 -0
  312. autobyteus/tools/terminal/tools/run_bash.py +109 -0
  313. autobyteus/tools/terminal/tools/start_background_process.py +104 -0
  314. autobyteus/tools/terminal/tools/stop_background_process.py +67 -0
  315. autobyteus/tools/terminal/types.py +54 -0
  316. autobyteus/tools/terminal/wsl_tmux_session.py +221 -0
  317. autobyteus/tools/terminal/wsl_utils.py +156 -0
  318. autobyteus/tools/transaction_management/backup_handler.py +48 -0
  319. autobyteus/tools/transaction_management/operation_lifecycle_manager.py +62 -0
  320. autobyteus/tools/usage/__init__.py +1 -2
  321. autobyteus/tools/usage/formatters/__init__.py +17 -1
  322. autobyteus/tools/usage/formatters/base_formatter.py +8 -0
  323. autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +2 -2
  324. autobyteus/tools/usage/formatters/mistral_json_schema_formatter.py +18 -0
  325. autobyteus/tools/usage/formatters/patch_file_xml_example_formatter.py +64 -0
  326. autobyteus/tools/usage/formatters/patch_file_xml_schema_formatter.py +31 -0
  327. autobyteus/tools/usage/formatters/run_bash_xml_example_formatter.py +32 -0
  328. autobyteus/tools/usage/formatters/run_bash_xml_schema_formatter.py +36 -0
  329. autobyteus/tools/usage/formatters/write_file_xml_example_formatter.py +53 -0
  330. autobyteus/tools/usage/formatters/write_file_xml_schema_formatter.py +31 -0
  331. autobyteus/tools/usage/providers/tool_manifest_provider.py +10 -10
  332. autobyteus/tools/usage/registries/__init__.py +1 -3
  333. autobyteus/tools/usage/registries/tool_formatting_registry.py +115 -8
  334. autobyteus/tools/usage/tool_schema_provider.py +51 -0
  335. autobyteus/tools/web/__init__.py +4 -0
  336. autobyteus/tools/web/read_url_tool.py +80 -0
  337. autobyteus/utils/diff_utils.py +271 -0
  338. autobyteus/utils/download_utils.py +109 -0
  339. autobyteus/utils/file_utils.py +57 -2
  340. autobyteus/utils/gemini_helper.py +64 -0
  341. autobyteus/utils/gemini_model_mapping.py +71 -0
  342. autobyteus/utils/llm_output_formatter.py +75 -0
  343. autobyteus/utils/tool_call_format.py +36 -0
  344. autobyteus/workflow/agentic_workflow.py +3 -3
  345. autobyteus/workflow/bootstrap_steps/agent_tool_injection_step.py +2 -2
  346. autobyteus/workflow/bootstrap_steps/base_workflow_bootstrap_step.py +2 -2
  347. autobyteus/workflow/bootstrap_steps/coordinator_initialization_step.py +2 -2
  348. autobyteus/workflow/bootstrap_steps/coordinator_prompt_preparation_step.py +3 -9
  349. autobyteus/workflow/bootstrap_steps/workflow_bootstrapper.py +6 -6
  350. autobyteus/workflow/bootstrap_steps/workflow_runtime_queue_initialization_step.py +2 -2
  351. autobyteus/workflow/context/workflow_context.py +3 -3
  352. autobyteus/workflow/context/workflow_runtime_state.py +5 -5
  353. autobyteus/workflow/events/workflow_event_dispatcher.py +5 -5
  354. autobyteus/workflow/handlers/lifecycle_workflow_event_handler.py +3 -3
  355. autobyteus/workflow/handlers/process_user_message_event_handler.py +5 -5
  356. autobyteus/workflow/handlers/tool_approval_workflow_event_handler.py +2 -2
  357. autobyteus/workflow/runtime/workflow_runtime.py +8 -8
  358. autobyteus/workflow/runtime/workflow_worker.py +3 -3
  359. autobyteus/workflow/status/__init__.py +11 -0
  360. autobyteus/workflow/status/workflow_status.py +19 -0
  361. autobyteus/workflow/status/workflow_status_manager.py +48 -0
  362. autobyteus/workflow/streaming/__init__.py +2 -2
  363. autobyteus/workflow/streaming/workflow_event_notifier.py +7 -7
  364. autobyteus/workflow/streaming/workflow_stream_event_payloads.py +4 -4
  365. autobyteus/workflow/streaming/workflow_stream_events.py +3 -3
  366. autobyteus/workflow/utils/wait_for_idle.py +4 -4
  367. autobyteus-1.3.0.dist-info/METADATA +293 -0
  368. autobyteus-1.3.0.dist-info/RECORD +606 -0
  369. {autobyteus-1.2.1.dist-info → autobyteus-1.3.0.dist-info}/WHEEL +1 -1
  370. {autobyteus-1.2.1.dist-info → autobyteus-1.3.0.dist-info}/top_level.txt +0 -1
  371. autobyteus/agent/bootstrap_steps/agent_runtime_queue_initialization_step.py +0 -57
  372. autobyteus/agent/hooks/__init__.py +0 -16
  373. autobyteus/agent/hooks/base_phase_hook.py +0 -78
  374. autobyteus/agent/hooks/hook_definition.py +0 -36
  375. autobyteus/agent/hooks/hook_meta.py +0 -37
  376. autobyteus/agent/hooks/hook_registry.py +0 -106
  377. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +0 -103
  378. autobyteus/agent/phases/__init__.py +0 -18
  379. autobyteus/agent/phases/discover.py +0 -53
  380. autobyteus/agent/phases/manager.py +0 -265
  381. autobyteus/agent/phases/transition_decorator.py +0 -40
  382. autobyteus/agent/phases/transition_info.py +0 -33
  383. autobyteus/agent/remote_agent.py +0 -244
  384. autobyteus/agent/workspace/workspace_definition.py +0 -36
  385. autobyteus/agent/workspace/workspace_meta.py +0 -37
  386. autobyteus/agent/workspace/workspace_registry.py +0 -72
  387. autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +0 -25
  388. autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +0 -85
  389. autobyteus/agent_team/phases/__init__.py +0 -11
  390. autobyteus/agent_team/phases/agent_team_operational_phase.py +0 -19
  391. autobyteus/agent_team/phases/agent_team_phase_manager.py +0 -48
  392. autobyteus/llm/api/bedrock_llm.py +0 -92
  393. autobyteus/llm/api/groq_llm.py +0 -94
  394. autobyteus/llm/api/nvidia_llm.py +0 -108
  395. autobyteus/llm/utils/token_pricing_config.py +0 -87
  396. autobyteus/rpc/__init__.py +0 -73
  397. autobyteus/rpc/client/__init__.py +0 -17
  398. autobyteus/rpc/client/abstract_client_connection.py +0 -124
  399. autobyteus/rpc/client/client_connection_manager.py +0 -153
  400. autobyteus/rpc/client/sse_client_connection.py +0 -306
  401. autobyteus/rpc/client/stdio_client_connection.py +0 -280
  402. autobyteus/rpc/config/__init__.py +0 -13
  403. autobyteus/rpc/config/agent_server_config.py +0 -153
  404. autobyteus/rpc/config/agent_server_registry.py +0 -152
  405. autobyteus/rpc/hosting.py +0 -244
  406. autobyteus/rpc/protocol.py +0 -244
  407. autobyteus/rpc/server/__init__.py +0 -20
  408. autobyteus/rpc/server/agent_server_endpoint.py +0 -181
  409. autobyteus/rpc/server/base_method_handler.py +0 -40
  410. autobyteus/rpc/server/method_handlers.py +0 -259
  411. autobyteus/rpc/server/sse_server_handler.py +0 -182
  412. autobyteus/rpc/server/stdio_server_handler.py +0 -151
  413. autobyteus/rpc/server_main.py +0 -198
  414. autobyteus/rpc/transport_type.py +0 -13
  415. autobyteus/tools/bash/__init__.py +0 -2
  416. autobyteus/tools/bash/bash_executor.py +0 -100
  417. autobyteus/tools/browser/__init__.py +0 -2
  418. autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +0 -75
  419. autobyteus/tools/browser/session_aware/browser_session_aware_tool.py +0 -30
  420. autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +0 -154
  421. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +0 -89
  422. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +0 -107
  423. autobyteus/tools/browser/session_aware/factory/browser_session_aware_web_element_trigger_factory.py +0 -14
  424. autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_reader_factory.py +0 -26
  425. autobyteus/tools/browser/session_aware/factory/browser_session_aware_webpage_screenshot_taker_factory.py +0 -14
  426. autobyteus/tools/browser/session_aware/shared_browser_session.py +0 -11
  427. autobyteus/tools/browser/session_aware/shared_browser_session_manager.py +0 -25
  428. autobyteus/tools/browser/session_aware/web_element_action.py +0 -20
  429. autobyteus/tools/browser/standalone/__init__.py +0 -6
  430. autobyteus/tools/browser/standalone/factory/__init__.py +0 -0
  431. autobyteus/tools/browser/standalone/factory/webpage_reader_factory.py +0 -25
  432. autobyteus/tools/browser/standalone/factory/webpage_screenshot_taker_factory.py +0 -14
  433. autobyteus/tools/browser/standalone/navigate_to.py +0 -84
  434. autobyteus/tools/browser/standalone/web_page_pdf_generator.py +0 -101
  435. autobyteus/tools/browser/standalone/webpage_image_downloader.py +0 -169
  436. autobyteus/tools/browser/standalone/webpage_reader.py +0 -105
  437. autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +0 -105
  438. autobyteus/tools/file/edit_file.py +0 -200
  439. autobyteus/tools/file/list_directory.py +0 -168
  440. autobyteus/tools/file/search_files.py +0 -188
  441. autobyteus/tools/timer.py +0 -175
  442. autobyteus/tools/usage/parsers/__init__.py +0 -22
  443. autobyteus/tools/usage/parsers/_json_extractor.py +0 -99
  444. autobyteus/tools/usage/parsers/_string_decoders.py +0 -18
  445. autobyteus/tools/usage/parsers/anthropic_xml_tool_usage_parser.py +0 -10
  446. autobyteus/tools/usage/parsers/base_parser.py +0 -41
  447. autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +0 -83
  448. autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +0 -316
  449. autobyteus/tools/usage/parsers/exceptions.py +0 -13
  450. autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +0 -77
  451. autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +0 -149
  452. autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +0 -59
  453. autobyteus/tools/usage/registries/tool_usage_parser_registry.py +0 -62
  454. autobyteus/workflow/phases/__init__.py +0 -11
  455. autobyteus/workflow/phases/workflow_operational_phase.py +0 -19
  456. autobyteus/workflow/phases/workflow_phase_manager.py +0 -48
  457. autobyteus-1.2.1.dist-info/METADATA +0 -205
  458. autobyteus-1.2.1.dist-info/RECORD +0 -511
  459. examples/__init__.py +0 -1
  460. examples/agent_team/__init__.py +0 -1
  461. examples/discover_phase_transitions.py +0 -104
  462. examples/run_agentic_software_engineer.py +0 -239
  463. examples/run_browser_agent.py +0 -262
  464. examples/run_google_slides_agent.py +0 -287
  465. examples/run_mcp_browser_client.py +0 -174
  466. examples/run_mcp_google_slides_client.py +0 -270
  467. examples/run_mcp_list_tools.py +0 -189
  468. examples/run_poem_writer.py +0 -284
  469. examples/run_sqlite_agent.py +0 -295
  470. /autobyteus/{tools/browser/session_aware → skills}/__init__.py +0 -0
  471. /autobyteus/tools/{browser/session_aware/factory → skill}/__init__.py +0 -0
  472. {autobyteus-1.2.1.dist-info → autobyteus-1.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,85 +1,59 @@
1
1
  import logging
2
+ import inspect
2
3
  import os
3
4
  from abc import ABC
4
5
  from typing import Optional, List, AsyncGenerator, Dict, Any
5
6
  from openai import OpenAI
6
7
  from openai.types.completion_usage import CompletionUsage
7
8
  from openai.types.chat import ChatCompletionChunk
8
- import asyncio
9
9
 
10
10
  from autobyteus.llm.base_llm import BaseLLM
11
11
  from autobyteus.llm.models import LLMModel
12
12
  from autobyteus.llm.utils.llm_config import LLMConfig
13
- from autobyteus.llm.utils.media_payload_formatter import image_source_to_base64, create_data_uri, get_mime_type, is_valid_image_path
14
13
  from autobyteus.llm.utils.token_usage import TokenUsage
15
14
  from autobyteus.llm.utils.response_types import CompleteResponse, ChunkResponse
16
- from autobyteus.llm.user_message import LLMUserMessage
17
15
  from autobyteus.llm.utils.messages import Message
16
+ from autobyteus.llm.prompt_renderers.openai_chat_renderer import OpenAIChatRenderer
18
17
 
19
18
  logger = logging.getLogger(__name__)
20
19
 
21
- async def _format_openai_history(messages: List[Message]) -> List[Dict[str, Any]]:
22
- """A local async function to format history for the OpenAI SDK, handling image processing."""
23
- formatted_messages = []
24
- for msg in messages:
25
- # For multimodal messages, build the content list of parts
26
- if msg.image_urls or msg.audio_urls or msg.video_urls:
27
- content_parts: List[Dict[str, Any]] = []
28
- if msg.content:
29
- content_parts.append({"type": "text", "text": msg.content})
30
-
31
- image_tasks = []
32
- if msg.image_urls:
33
- for url in msg.image_urls:
34
- # Create an async task for each image to process them concurrently
35
- image_tasks.append(image_source_to_base64(url))
36
-
37
- try:
38
- base64_images = await asyncio.gather(*image_tasks)
39
- for i, b64_image in enumerate(base64_images):
40
- original_url = msg.image_urls[i]
41
- # Determine mime type from original path if possible, otherwise default
42
- mime_type = get_mime_type(original_url) if is_valid_image_path(original_url) else "image/jpeg"
43
- content_parts.append(create_data_uri(mime_type, b64_image))
44
- except Exception as e:
45
- logger.error(f"Error processing one or more images: {e}")
46
-
47
- # Placeholder for future audio/video processing
48
- if msg.audio_urls:
49
- logger.warning("OpenAI compatible layer does not yet support audio; skipping.")
50
- if msg.video_urls:
51
- logger.warning("OpenAI compatible layer does not yet support video; skipping.")
52
-
53
- formatted_messages.append({"role": msg.role.value, "content": content_parts})
54
- else:
55
- # For text-only messages, use the simple string format
56
- formatted_messages.append({"role": msg.role.value, "content": msg.content})
57
- return formatted_messages
58
-
59
-
60
20
  class OpenAICompatibleLLM(BaseLLM, ABC):
61
21
  def __init__(
62
22
  self,
63
23
  model: LLMModel,
64
- llm_config: LLMConfig,
65
24
  api_key_env_var: str,
66
25
  base_url: str,
26
+ llm_config: Optional[LLMConfig] = None,
67
27
  api_key_default: Optional[str] = None
68
28
  ):
29
+ model_default_config = model.default_config if hasattr(model, "default_config") else None
30
+ if model_default_config:
31
+ effective_config = LLMConfig.from_dict(model_default_config.to_dict())
32
+ if llm_config:
33
+ effective_config.merge_with(llm_config)
34
+ else:
35
+ effective_config = llm_config or LLMConfig()
36
+
37
+ # Try to get from env
69
38
  api_key = os.getenv(api_key_env_var)
39
+
40
+ # If not in env, try default (explicit check)
41
+ if (api_key is None or api_key == "") and api_key_default is not None:
42
+ api_key = api_key_default
43
+ logger.info(f"{api_key_env_var} not set, using default key: {api_key_default}")
44
+
45
+ # Final check
70
46
  if not api_key:
71
- if api_key_default:
72
- api_key = api_key_default
73
- logger.info(f"{api_key_env_var} not set, using default key.")
74
- else:
75
- logger.error(f"{api_key_env_var} environment variable is not set.")
76
- raise ValueError(f"{api_key_env_var} environment variable is not set.")
47
+ logger.error(f"{api_key_env_var} environment variable is not set and no default provided.")
48
+ raise ValueError(f"{api_key_env_var} environment variable is not set. Default was: {api_key_default}")
77
49
 
78
50
  self.client = OpenAI(api_key=api_key, base_url=base_url)
79
51
  logger.info(f"Initialized OpenAI compatible client with base_url: {base_url}")
80
52
 
81
- super().__init__(model=model, llm_config=llm_config)
82
- self.max_tokens = 8000
53
+ super().__init__(model=model, llm_config=effective_config)
54
+ # Respect user/configured limit; let provider default if unspecified.
55
+ self.max_tokens = effective_config.max_tokens
56
+ self._renderer = OpenAIChatRenderer()
83
57
 
84
58
  def _create_token_usage(self, usage_data: Optional[CompletionUsage]) -> Optional[TokenUsage]:
85
59
  if not usage_data:
@@ -90,29 +64,32 @@ class OpenAICompatibleLLM(BaseLLM, ABC):
90
64
  total_tokens=usage_data.total_tokens
91
65
  )
92
66
 
93
- async def _send_user_message_to_llm(
94
- self, user_message: LLMUserMessage, **kwargs
67
+ async def _send_messages_to_llm(
68
+ self, messages: List[Message], **kwargs
95
69
  ) -> CompleteResponse:
96
- self.add_user_message(user_message)
97
-
98
70
  try:
99
- formatted_messages = await _format_openai_history(self.messages)
100
- logger.info(f"Sending request to {self.model.provider.value} API")
101
-
71
+ formatted_messages = await self._renderer.render(messages)
72
+ logger.info("Sending request to %s API", self.model.provider.value)
73
+
102
74
  params: Dict[str, Any] = {
103
75
  "model": self.model.value,
104
76
  "messages": formatted_messages,
105
77
  }
106
78
 
107
- if self.config.uses_max_completion_tokens:
79
+ if self.max_tokens is not None:
80
+ # For OpenAI-compatible APIs, prefer max_completion_tokens; legacy max_tokens removed.
108
81
  params["max_completion_tokens"] = self.max_tokens
109
- else:
110
- params["max_tokens"] = self.max_tokens
82
+ if self.config.extra_params:
83
+ self._apply_extra_params(params, self.config.extra_params)
84
+
85
+ if kwargs.get("tools"):
86
+ params["tools"] = kwargs["tools"]
87
+ if kwargs.get("tool_choice") is not None:
88
+ params["tool_choice"] = kwargs["tool_choice"]
111
89
 
112
90
  response = self.client.chat.completions.create(**params)
113
91
  full_message = response.choices[0].message
114
92
 
115
- # --- PRESERVED ORIGINAL LOGIC ---
116
93
  reasoning = None
117
94
  if hasattr(full_message, "reasoning_content") and full_message.reasoning_content:
118
95
  reasoning = full_message.reasoning_content
@@ -124,34 +101,30 @@ class OpenAICompatibleLLM(BaseLLM, ABC):
124
101
  main_content = full_message.content
125
102
  elif "content" in full_message and full_message["content"]:
126
103
  main_content = full_message["content"]
127
- # --- END PRESERVED LOGIC ---
128
-
129
- self.add_assistant_message(main_content, reasoning_content=reasoning)
130
104
 
131
105
  token_usage = self._create_token_usage(response.usage)
132
- logger.info(f"Received response from {self.model.provider.value} API with usage data")
133
-
106
+ logger.info("Received response from %s API with usage data", self.model.provider.value)
107
+
134
108
  return CompleteResponse(
135
109
  content=main_content,
136
110
  reasoning=reasoning,
137
- usage=token_usage
111
+ usage=token_usage,
138
112
  )
139
113
  except Exception as e:
140
- logger.error(f"Error in {self.model.provider.value} API request: {str(e)}")
114
+ logger.error("Error in %s API request: %s", self.model.provider.value, str(e))
141
115
  raise ValueError(f"Error in {self.model.provider.value} API request: {str(e)}")
142
116
 
143
- async def _stream_user_message_to_llm(
144
- self, user_message: LLMUserMessage, **kwargs
117
+ async def _stream_messages_to_llm(
118
+ self, messages: List[Message], **kwargs
145
119
  ) -> AsyncGenerator[ChunkResponse, None]:
146
- self.add_user_message(user_message)
147
-
148
120
  accumulated_reasoning = ""
149
121
  accumulated_content = ""
122
+ tool_calls_logged = False
150
123
 
151
124
  try:
152
- formatted_messages = await _format_openai_history(self.messages)
153
- logger.info(f"Starting streaming request to {self.model.provider.value} API")
154
-
125
+ formatted_messages = await self._renderer.render(messages)
126
+ logger.info("Starting streaming request to %s API", self.model.provider.value)
127
+
155
128
  params: Dict[str, Any] = {
156
129
  "model": self.model.value,
157
130
  "messages": formatted_messages,
@@ -159,10 +132,15 @@ class OpenAICompatibleLLM(BaseLLM, ABC):
159
132
  "stream_options": {"include_usage": True},
160
133
  }
161
134
 
162
- if self.config.uses_max_completion_tokens:
135
+ if self.max_tokens is not None:
163
136
  params["max_completion_tokens"] = self.max_tokens
164
- else:
165
- params["max_tokens"] = self.max_tokens
137
+ if self.config.extra_params:
138
+ self._apply_extra_params(params, self.config.extra_params)
139
+
140
+ if kwargs.get("tools"):
141
+ params["tools"] = kwargs["tools"]
142
+ if kwargs.get("tool_choice") is not None:
143
+ params["tool_choice"] = kwargs["tool_choice"]
166
144
 
167
145
  stream = self.client.chat.completions.create(**params)
168
146
 
@@ -170,25 +148,41 @@ class OpenAICompatibleLLM(BaseLLM, ABC):
170
148
  chunk: ChatCompletionChunk
171
149
  if not chunk.choices:
172
150
  continue
173
-
151
+
174
152
  delta = chunk.choices[0].delta
175
153
 
176
- # --- PRESERVED ORIGINAL LOGIC (adapted for streaming) ---
177
154
  reasoning_chunk = None
178
155
  if hasattr(delta, "reasoning_content") and delta.reasoning_content:
179
156
  reasoning_chunk = delta.reasoning_content
180
157
  elif isinstance(delta, dict) and "reasoning_content" in delta and delta["reasoning_content"]:
181
158
  reasoning_chunk = delta["reasoning_content"]
182
-
159
+
183
160
  if reasoning_chunk:
184
161
  accumulated_reasoning += reasoning_chunk
185
162
  yield ChunkResponse(content="", reasoning=reasoning_chunk)
186
- # --- END PRESERVED LOGIC ---
163
+
164
+ tool_call_deltas = None
165
+ if hasattr(delta, "tool_calls") and delta.tool_calls:
166
+ from autobyteus.llm.converters.openai_tool_call_converter import convert_openai_tool_calls
167
+ tool_call_deltas = convert_openai_tool_calls(delta.tool_calls)
168
+ if tool_call_deltas and not tool_calls_logged:
169
+ logger.info(
170
+ "Streaming tool call deltas received from %s (count=%d).",
171
+ self.model.provider.value,
172
+ len(tool_call_deltas),
173
+ )
174
+ tool_calls_logged = True
187
175
 
188
176
  main_token = delta.content
189
- if main_token:
190
- accumulated_content += main_token
191
- yield ChunkResponse(content=main_token, reasoning=None)
177
+
178
+ if main_token or tool_call_deltas:
179
+ if main_token:
180
+ accumulated_content += main_token
181
+ yield ChunkResponse(
182
+ content=main_token or "",
183
+ reasoning=None,
184
+ tool_calls=tool_call_deltas,
185
+ )
192
186
 
193
187
  if hasattr(chunk, "usage") and chunk.usage is not None:
194
188
  token_usage = self._create_token_usage(chunk.usage)
@@ -196,15 +190,57 @@ class OpenAICompatibleLLM(BaseLLM, ABC):
196
190
  content="",
197
191
  reasoning=None,
198
192
  is_complete=True,
199
- usage=token_usage
193
+ usage=token_usage,
200
194
  )
201
-
202
- self.add_assistant_message(accumulated_content, reasoning_content=accumulated_reasoning)
203
- logger.info(f"Completed streaming response from {self.model.provider.value} API")
195
+
196
+ logger.info("Completed streaming response from %s API", self.model.provider.value)
204
197
 
205
198
  except Exception as e:
206
- logger.error(f"Error in {self.model.provider.value} API streaming: {str(e)}")
199
+ logger.error("Error in %s API streaming: %s", self.model.provider.value, str(e))
207
200
  raise ValueError(f"Error in {self.model.provider.value} API streaming: {str(e)}")
208
201
 
202
+ def _apply_extra_params(self, params: Dict[str, Any], extra_params: Dict[str, Any]) -> None:
203
+ # Use extra_body for provider-specific fields not in the OpenAI client signature.
204
+ if not extra_params:
205
+ return
206
+ extra = dict(extra_params)
207
+ allowed = self._get_chat_completion_param_names()
208
+
209
+ if any(key not in allowed for key in extra.keys()):
210
+ existing_body = params.get("extra_body")
211
+ if isinstance(existing_body, dict):
212
+ merged = dict(existing_body)
213
+ merged.update(extra)
214
+ params["extra_body"] = merged
215
+ else:
216
+ params["extra_body"] = extra
217
+ else:
218
+ params.update(extra)
219
+
220
+ def _get_chat_completion_param_names(self) -> set:
221
+ try:
222
+ return self._chat_completion_param_names
223
+ except AttributeError:
224
+ allowed = set(inspect.signature(self.client.chat.completions.create).parameters.keys())
225
+ self._chat_completion_param_names = allowed
226
+ return allowed
227
+
228
+
229
+ class OpenAIChatCompletionsLLM(OpenAICompatibleLLM):
230
+ """Strict OpenAI Chat Completions client: rejects unsupported extra params."""
231
+
232
+ def _apply_extra_params(self, params: Dict[str, Any], extra_params: Dict[str, Any]) -> None:
233
+ if not extra_params:
234
+ return
235
+ extra = dict(extra_params)
236
+ allowed = self._get_chat_completion_param_names()
237
+ unknown = [key for key in extra.keys() if key not in allowed]
238
+ if unknown:
239
+ raise ValueError(
240
+ "Unsupported OpenAI chat.completions params: "
241
+ + ", ".join(sorted(unknown))
242
+ )
243
+ params.update(extra)
244
+
209
245
  async def cleanup(self):
210
246
  await super().cleanup()
@@ -2,15 +2,15 @@ import logging
2
2
  from typing import Optional
3
3
  from autobyteus.llm.models import LLMModel
4
4
  from autobyteus.llm.utils.llm_config import LLMConfig
5
- from autobyteus.llm.api.openai_compatible_llm import OpenAICompatibleLLM
5
+ from autobyteus.llm.api.openai_responses_llm import OpenAIResponsesLLM
6
6
 
7
7
  logger = logging.getLogger(__name__)
8
8
 
9
- class OpenAILLM(OpenAICompatibleLLM):
9
+ class OpenAILLM(OpenAIResponsesLLM):
10
10
  def __init__(self, model: LLMModel = None, llm_config: LLMConfig = None):
11
11
  # Provide defaults if not specified
12
12
  if model is None:
13
- model = LLMModel['gpt-4o'] # Use factory access
13
+ model = LLMModel['gpt-5.2'] # Default to latest OpenAI model
14
14
  if llm_config is None:
15
15
  llm_config = LLMConfig()
16
16
 
@@ -0,0 +1,324 @@
1
+ import logging
2
+ import os
3
+ from typing import Optional, List, AsyncGenerator, Dict, Any
4
+
5
+ from openai import AsyncOpenAI
6
+ from openai.types.responses import ResponseStreamEvent
7
+
8
+ from autobyteus.llm.base_llm import BaseLLM
9
+ from autobyteus.llm.models import LLMModel
10
+ from autobyteus.llm.utils.llm_config import LLMConfig
11
+ from autobyteus.llm.utils.messages import Message
12
+ from autobyteus.llm.prompt_renderers.openai_responses_renderer import OpenAIResponsesRenderer
13
+ from autobyteus.llm.utils.response_types import CompleteResponse, ChunkResponse
14
+ from autobyteus.llm.utils.token_usage import TokenUsage
15
+ from autobyteus.llm.utils.tool_call_delta import ToolCallDelta
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class OpenAIResponsesLLM(BaseLLM):
21
+ def __init__(
22
+ self,
23
+ model: LLMModel,
24
+ api_key_env_var: str,
25
+ base_url: str,
26
+ llm_config: Optional[LLMConfig] = None,
27
+ api_key_default: Optional[str] = None,
28
+ ):
29
+ model_default_config = model.default_config if hasattr(model, "default_config") else None
30
+ if model_default_config:
31
+ effective_config = LLMConfig.from_dict(model_default_config.to_dict())
32
+ if llm_config:
33
+ effective_config.merge_with(llm_config)
34
+ else:
35
+ effective_config = llm_config or LLMConfig()
36
+
37
+ api_key = os.getenv(api_key_env_var)
38
+ if not api_key:
39
+ api_key = api_key_default
40
+
41
+ if not api_key:
42
+ raise ValueError(f"Missing API key. Set env var {api_key_env_var} or provide api_key_default.")
43
+
44
+ self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
45
+ logger.info(f"Initialized OpenAI Responses client with base_url: {base_url}")
46
+
47
+ super().__init__(model=model, llm_config=effective_config)
48
+ self.max_tokens = effective_config.max_tokens
49
+ self._renderer = OpenAIResponsesRenderer()
50
+
51
+ def _create_token_usage(self, usage_data) -> Optional[TokenUsage]:
52
+ if not usage_data:
53
+ return None
54
+ return TokenUsage(
55
+ prompt_tokens=usage_data.input_tokens,
56
+ completion_tokens=usage_data.output_tokens,
57
+ total_tokens=usage_data.total_tokens,
58
+ )
59
+
60
+ @staticmethod
61
+ def _extract_output_content(output_items: List[Any]) -> (str, Optional[str]):
62
+ content_chunks: List[str] = []
63
+ reasoning_chunks: List[str] = []
64
+
65
+ for item in output_items:
66
+ item_type = getattr(item, "type", None)
67
+ if item_type == "message":
68
+ for content_part in getattr(item, "content", []) or []:
69
+ if getattr(content_part, "type", None) == "output_text":
70
+ content_chunks.append(content_part.text)
71
+ elif item_type == "reasoning":
72
+ for summary in getattr(item, "summary", []) or []:
73
+ if getattr(summary, "type", None) == "summary_text":
74
+ reasoning_chunks.append(summary.text)
75
+
76
+ content = "".join(content_chunks)
77
+ reasoning = "".join(reasoning_chunks) if reasoning_chunks else None
78
+ return content, reasoning
79
+
80
+ def _build_reasoning_param(self) -> Optional[Dict[str, Any]]:
81
+ if not self.config.extra_params:
82
+ return None
83
+
84
+ reasoning_effort = self.config.extra_params.get("reasoning_effort")
85
+ reasoning_summary = self.config.extra_params.get("reasoning_summary")
86
+
87
+ reasoning: Dict[str, Any] = {}
88
+ if reasoning_effort:
89
+ reasoning["effort"] = reasoning_effort
90
+ if reasoning_summary and reasoning_summary != "none":
91
+ reasoning["summary"] = reasoning_summary
92
+
93
+ return reasoning or None
94
+
95
+ def _filter_extra_params(self) -> Dict[str, Any]:
96
+ if not self.config.extra_params:
97
+ return {}
98
+ filtered = dict(self.config.extra_params)
99
+ filtered.pop("reasoning_effort", None)
100
+ filtered.pop("reasoning_summary", None)
101
+ return filtered
102
+
103
+ @staticmethod
104
+ def _normalize_tools(tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
105
+ normalized: List[Dict[str, Any]] = []
106
+ for tool in tools:
107
+ if tool.get("type") == "function" and isinstance(tool.get("function"), dict):
108
+ fn = tool["function"]
109
+ normalized.append({
110
+ "type": "function",
111
+ "name": fn.get("name"),
112
+ "description": fn.get("description"),
113
+ "parameters": fn.get("parameters"),
114
+ })
115
+ else:
116
+ normalized.append(tool)
117
+ return normalized
118
+
119
+ async def _send_messages_to_llm(self, messages: List[Message], **kwargs) -> CompleteResponse:
120
+ try:
121
+ formatted_messages = await self._renderer.render(messages)
122
+ logger.info("Sending request to %s Responses API", self.model.provider.value)
123
+
124
+ params: Dict[str, Any] = {
125
+ "model": self.model.value,
126
+ "input": formatted_messages,
127
+ }
128
+
129
+ if self.max_tokens is not None:
130
+ params["max_output_tokens"] = self.max_tokens
131
+
132
+ reasoning_param = self._build_reasoning_param()
133
+ if reasoning_param:
134
+ params["reasoning"] = reasoning_param
135
+
136
+ extra_params = self._filter_extra_params()
137
+ if extra_params:
138
+ params.update(extra_params)
139
+
140
+ if kwargs.get("tools"):
141
+ params["tools"] = self._normalize_tools(kwargs["tools"])
142
+ if kwargs.get("tool_choice") is not None:
143
+ params["tool_choice"] = kwargs["tool_choice"]
144
+
145
+ response = await self.client.responses.create(**params)
146
+
147
+ content, reasoning = self._extract_output_content(response.output)
148
+
149
+ token_usage = self._create_token_usage(response.usage)
150
+ logger.info("Received response from %s Responses API", self.model.provider.value)
151
+
152
+ return CompleteResponse(content=content, reasoning=reasoning, usage=token_usage)
153
+ except Exception as e:
154
+ logger.error("Error in %s Responses API request: %s", self.model.provider.value, str(e))
155
+ raise ValueError(f"Error in {self.model.provider.value} Responses API request: {str(e)}")
156
+
157
+ async def _stream_messages_to_llm(
158
+ self, messages: List[Message], **kwargs
159
+ ) -> AsyncGenerator[ChunkResponse, None]:
160
+ accumulated_content = ""
161
+ accumulated_reasoning = ""
162
+ tool_call_state: Dict[int, Dict[str, Any]] = {}
163
+ text_delta_seen: set[str] = set()
164
+ summary_delta_seen: set[str] = set()
165
+
166
+ try:
167
+ formatted_messages = await self._renderer.render(messages)
168
+ logger.info("Starting streaming request to %s Responses API", self.model.provider.value)
169
+
170
+ params: Dict[str, Any] = {
171
+ "model": self.model.value,
172
+ "input": formatted_messages,
173
+ "stream": True,
174
+ }
175
+
176
+ if self.max_tokens is not None:
177
+ params["max_output_tokens"] = self.max_tokens
178
+
179
+ reasoning_param = self._build_reasoning_param()
180
+ if reasoning_param:
181
+ params["reasoning"] = reasoning_param
182
+
183
+ extra_params = self._filter_extra_params()
184
+ if extra_params:
185
+ params.update(extra_params)
186
+
187
+ if kwargs.get("tools"):
188
+ params["tools"] = self._normalize_tools(kwargs["tools"])
189
+ if kwargs.get("tool_choice") is not None:
190
+ params["tool_choice"] = kwargs["tool_choice"]
191
+
192
+ stream = await self.client.responses.create(**params)
193
+
194
+ async for event in stream:
195
+ event: ResponseStreamEvent
196
+ event_type = getattr(event, "type", None)
197
+
198
+ if event_type == "response.output_text.delta":
199
+ text_delta_seen.add(event.item_id)
200
+ accumulated_content += event.delta
201
+ yield ChunkResponse(content=event.delta, reasoning=None)
202
+ continue
203
+
204
+ if event_type == "response.output_text.done":
205
+ if event.item_id not in text_delta_seen:
206
+ accumulated_content += event.text
207
+ yield ChunkResponse(content=event.text, reasoning=None)
208
+ continue
209
+
210
+ if event_type == "response.reasoning_summary_text.delta":
211
+ summary_delta_seen.add(event.item_id)
212
+ accumulated_reasoning += event.delta
213
+ yield ChunkResponse(content="", reasoning=event.delta)
214
+ continue
215
+
216
+ if event_type == "response.reasoning_summary_text.done":
217
+ if event.item_id not in summary_delta_seen:
218
+ accumulated_reasoning += event.text
219
+ yield ChunkResponse(content="", reasoning=event.text)
220
+ continue
221
+
222
+ if event_type == "response.output_item.added":
223
+ item = event.item
224
+ if getattr(item, "type", None) == "function_call":
225
+ tool_call_state[event.output_index] = {
226
+ "call_id": item.call_id,
227
+ "name": item.name,
228
+ "args_seen": False,
229
+ "emitted": True,
230
+ }
231
+ yield ChunkResponse(
232
+ content="",
233
+ reasoning=None,
234
+ tool_calls=[
235
+ ToolCallDelta(
236
+ index=event.output_index,
237
+ call_id=item.call_id,
238
+ name=item.name,
239
+ )
240
+ ],
241
+ )
242
+ continue
243
+
244
+ if event_type == "response.function_call_arguments.delta":
245
+ state = tool_call_state.get(event.output_index)
246
+ if state:
247
+ state["args_seen"] = True
248
+ yield ChunkResponse(
249
+ content="",
250
+ reasoning=None,
251
+ tool_calls=[
252
+ ToolCallDelta(
253
+ index=event.output_index,
254
+ arguments_delta=event.delta,
255
+ )
256
+ ],
257
+ )
258
+ continue
259
+
260
+ if event_type == "response.function_call_arguments.done":
261
+ state = tool_call_state.get(event.output_index)
262
+ if state and not state.get("args_seen"):
263
+ yield ChunkResponse(
264
+ content="",
265
+ reasoning=None,
266
+ tool_calls=[
267
+ ToolCallDelta(
268
+ index=event.output_index,
269
+ arguments_delta=event.arguments,
270
+ )
271
+ ],
272
+ )
273
+ continue
274
+
275
+ if event_type == "response.completed":
276
+ response = event.response
277
+ for idx, item in enumerate(response.output or []):
278
+ if getattr(item, "type", None) != "function_call":
279
+ continue
280
+ state = tool_call_state.get(idx)
281
+ if not state or not state.get("emitted"):
282
+ yield ChunkResponse(
283
+ content="",
284
+ reasoning=None,
285
+ tool_calls=[
286
+ ToolCallDelta(
287
+ index=idx,
288
+ call_id=item.call_id,
289
+ name=item.name,
290
+ )
291
+ ],
292
+ )
293
+ tool_call_state[idx] = {
294
+ "call_id": item.call_id,
295
+ "name": item.name,
296
+ "args_seen": False,
297
+ "emitted": True,
298
+ }
299
+ state = tool_call_state[idx]
300
+
301
+ if not state.get("args_seen"):
302
+ yield ChunkResponse(
303
+ content="",
304
+ reasoning=None,
305
+ tool_calls=[
306
+ ToolCallDelta(
307
+ index=idx,
308
+ arguments_delta=item.arguments,
309
+ )
310
+ ],
311
+ )
312
+ state["args_seen"] = True
313
+
314
+ token_usage = self._create_token_usage(response.usage)
315
+ yield ChunkResponse(content="", reasoning=None, is_complete=True, usage=token_usage)
316
+
317
+ logger.info("Completed streaming response from %s Responses API", self.model.provider.value)
318
+
319
+ except Exception as e:
320
+ logger.error("Error in %s Responses API streaming: %s", self.model.provider.value, str(e))
321
+ raise ValueError(f"Error in {self.model.provider.value} Responses API streaming: {str(e)}")
322
+
323
+ async def cleanup(self):
324
+ await super().cleanup()