mojentic 0.7.4__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. {mojentic-0.7.4 → mojentic-0.8.1}/PKG-INFO +23 -5
  2. {mojentic-0.7.4 → mojentic-0.8.1}/README.md +22 -4
  3. {mojentic-0.7.4 → mojentic-0.8.1}/pyproject.toml +1 -1
  4. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/broker_examples.py +22 -3
  5. mojentic-0.8.1/src/_examples/fetch_openai_models.py +104 -0
  6. mojentic-0.8.1/src/_examples/openai_gateway_enhanced_demo.py +140 -0
  7. mojentic-0.8.1/src/mojentic/llm/gateways/openai.py +347 -0
  8. mojentic-0.8.1/src/mojentic/llm/gateways/openai_model_registry.py +351 -0
  9. mojentic-0.8.1/src/mojentic/llm/gateways/openai_model_registry_spec.py +181 -0
  10. mojentic-0.8.1/src/mojentic/llm/gateways/openai_temperature_handling_spec.py +245 -0
  11. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic.egg-info/PKG-INFO +23 -5
  12. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic.egg-info/SOURCES.txt +5 -0
  13. mojentic-0.7.4/src/mojentic/llm/gateways/openai.py +0 -214
  14. {mojentic-0.7.4 → mojentic-0.8.1}/LICENSE.md +0 -0
  15. {mojentic-0.7.4 → mojentic-0.8.1}/setup.cfg +0 -0
  16. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/__init__.py +0 -0
  17. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/async_dispatcher_example.py +0 -0
  18. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/async_llm_example.py +0 -0
  19. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/broker_as_tool.py +0 -0
  20. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/broker_image_examples.py +0 -0
  21. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/characterize_ollama.py +0 -0
  22. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/characterize_openai.py +0 -0
  23. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/chat_session.py +0 -0
  24. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/chat_session_with_tool.py +0 -0
  25. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/coding_file_tool.py +0 -0
  26. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/current_datetime_tool_example.py +0 -0
  27. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/design_analysis.py +0 -0
  28. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/embeddings.py +0 -0
  29. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/ensures_files_exist.py +0 -0
  30. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/ephemeral_task_manager_example.py +0 -0
  31. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/file_deduplication.py +0 -0
  32. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/file_tool.py +0 -0
  33. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/image_analysis.py +0 -0
  34. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/image_broker.py +0 -0
  35. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/image_broker_splat.py +0 -0
  36. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/iterative_solver.py +0 -0
  37. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/list_models.py +0 -0
  38. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/model_characterization.py +0 -0
  39. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/oversized_embeddings.py +0 -0
  40. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/raw.py +0 -0
  41. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/__init__.py +0 -0
  42. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/agents/__init__.py +0 -0
  43. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/agents/decisioning_agent.py +0 -0
  44. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/agents/thinking_agent.py +0 -0
  45. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/formatters.py +0 -0
  46. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/models/__init__.py +0 -0
  47. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/models/base.py +0 -0
  48. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react/models/events.py +0 -0
  49. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/react.py +0 -0
  50. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/recursive_agent.py +0 -0
  51. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/routed_send_response.py +0 -0
  52. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/simple_llm.py +0 -0
  53. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/simple_llm_repl.py +0 -0
  54. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/simple_structured.py +0 -0
  55. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/simple_tool.py +0 -0
  56. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/solver_chat_session.py +0 -0
  57. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/streaming.py +0 -0
  58. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/tell_user_example.py +0 -0
  59. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/tracer_demo.py +0 -0
  60. {mojentic-0.7.4 → mojentic-0.8.1}/src/_examples/working_memory.py +0 -0
  61. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/__init__.py +0 -0
  62. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/__init__.py +0 -0
  63. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/agent_broker.py +0 -0
  64. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/async_aggregator_agent.py +0 -0
  65. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/async_aggregator_agent_spec.py +0 -0
  66. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/async_llm_agent.py +0 -0
  67. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/async_llm_agent_spec.py +0 -0
  68. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/base_agent.py +0 -0
  69. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/base_async_agent.py +0 -0
  70. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/base_llm_agent.py +0 -0
  71. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/base_llm_agent_spec.py +0 -0
  72. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/correlation_aggregator_agent.py +0 -0
  73. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/iterative_problem_solver.py +0 -0
  74. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/output_agent.py +0 -0
  75. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/agents/simple_recursive_agent.py +0 -0
  76. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/async_dispatcher.py +0 -0
  77. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/async_dispatcher_spec.py +0 -0
  78. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/context/__init__.py +0 -0
  79. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/context/shared_working_memory.py +0 -0
  80. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/dispatcher.py +0 -0
  81. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/event.py +0 -0
  82. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/__init__.py +0 -0
  83. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/chat_session.py +0 -0
  84. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/chat_session_spec.py +0 -0
  85. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/__init__.py +0 -0
  86. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/anthropic.py +0 -0
  87. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/anthropic_messages_adapter.py +0 -0
  88. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/embeddings_gateway.py +0 -0
  89. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/file_gateway.py +0 -0
  90. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/llm_gateway.py +0 -0
  91. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/models.py +0 -0
  92. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/ollama.py +0 -0
  93. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/ollama_messages_adapter.py +0 -0
  94. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/ollama_messages_adapter_spec.py +0 -0
  95. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/openai_message_adapter_spec.py +0 -0
  96. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/openai_messages_adapter.py +0 -0
  97. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/gateways/tokenizer_gateway.py +0 -0
  98. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/llm_broker.py +0 -0
  99. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/llm_broker_spec.py +0 -0
  100. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/message_composers.py +0 -0
  101. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/message_composers_spec.py +0 -0
  102. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/registry/__init__.py +0 -0
  103. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/registry/llm_registry.py +0 -0
  104. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/registry/models.py +0 -0
  105. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/registry/populate_registry_from_ollama.py +0 -0
  106. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/__init__.py +0 -0
  107. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ask_user_tool.py +0 -0
  108. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/current_datetime.py +0 -0
  109. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/date_resolver.py +0 -0
  110. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/date_resolver_spec.py +0 -0
  111. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/__init__.py +0 -0
  112. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +0 -0
  113. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool_spec.py +0 -0
  114. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool.py +0 -0
  115. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool_spec.py +0 -0
  116. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool.py +0 -0
  117. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool_spec.py +0 -0
  118. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +0 -0
  119. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list_spec.py +0 -0
  120. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +0 -0
  121. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool_spec.py +0 -0
  122. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool.py +0 -0
  123. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool_spec.py +0 -0
  124. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +0 -0
  125. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool_spec.py +0 -0
  126. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool.py +0 -0
  127. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool_spec.py +0 -0
  128. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/file_manager.py +0 -0
  129. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/file_manager_spec.py +0 -0
  130. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/llm_tool.py +0 -0
  131. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/llm_tool_spec.py +0 -0
  132. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/organic_web_search.py +0 -0
  133. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/tell_user_tool.py +0 -0
  134. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/tool_wrapper.py +0 -0
  135. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/llm/tools/tool_wrapper_spec.py +0 -0
  136. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/router.py +0 -0
  137. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/router_spec.py +0 -0
  138. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/__init__.py +0 -0
  139. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/event_store.py +0 -0
  140. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/event_store_spec.py +0 -0
  141. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/null_tracer.py +0 -0
  142. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/tracer_events.py +0 -0
  143. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/tracer_events_spec.py +0 -0
  144. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/tracer_system.py +0 -0
  145. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/tracer/tracer_system_spec.py +0 -0
  146. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/utils/__init__.py +0 -0
  147. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic/utils/formatting.py +0 -0
  148. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic.egg-info/dependency_links.txt +0 -0
  149. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic.egg-info/requires.txt +0 -0
  150. {mojentic-0.7.4 → mojentic-0.8.1}/src/mojentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mojentic
3
- Version: 0.7.4
3
+ Version: 0.8.1
4
4
  Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
5
5
  Author-email: Stacey Vetzal <stacey@vetzal.com>
6
6
  Project-URL: Homepage, https://github.com/svetzal/mojentic
@@ -39,7 +39,7 @@ Dynamic: license-file
39
39
 
40
40
  # Mojentic
41
41
 
42
- Mojentic is a framework that provides a simple and flexible way to interact with Large Language Models (LLMs). It offers integration with various LLM providers and includes tools for structured output generation, task automation, and more. The future direction is to facilitate a team of agents, but the current focus is on robust LLM interaction capabilities.
42
+ Mojentic is a framework that provides a simple and flexible way to interact with Large Language Models (LLMs). It offers integration with various LLM providers and includes tools for structured output generation, task automation, and more. With comprehensive support for all OpenAI models including GPT-5 and automatic parameter adaptation, Mojentic handles the complexities of different model types seamlessly. The future direction is to facilitate a team of agents, but the current focus is on robust LLM interaction capabilities.
43
43
 
44
44
  [![GitHub](https://img.shields.io/github/license/svetzal/mojentic)](LICENSE.md)
45
45
  [![Python Version](https://img.shields.io/badge/python-3.11%2B-blue)](https://www.python.org/downloads/)
@@ -48,6 +48,8 @@ Mojentic is a framework that provides a simple and flexible way to interact with
48
48
  ## 🚀 Features
49
49
 
50
50
  - **LLM Integration**: Support for multiple LLM providers (OpenAI, Ollama)
51
+ - **Latest OpenAI Models**: Full support for GPT-5, GPT-4.1, and all reasoning models (o1, o3, o4 series)
52
+ - **Automatic Model Adaptation**: Seamless parameter handling across different OpenAI model types
51
53
  - **Structured Output**: Generate structured data from LLM responses using Pydantic models
52
54
  - **Tools Integration**: Utilities for date resolution, image analysis, and more
53
55
  - **Multi-modal Capabilities**: Process and analyze images alongside text
@@ -84,8 +86,9 @@ from mojentic.llm.gateways.models import LLMMessage
84
86
  from mojentic.llm.tools.date_resolver import ResolveDateTool
85
87
  from pydantic import BaseModel, Field
86
88
 
87
- # Initialize with OpenAI
88
- openai_llm = LLMBroker(model="gpt-4o", gateway=OpenAIGateway(api_key="your_api_key"))
89
+ # Initialize with OpenAI (supports all models including GPT-5, GPT-4.1, reasoning models)
90
+ openai_llm = LLMBroker(model="gpt-5", gateway=OpenAIGateway(api_key="your_api_key"))
91
+ # Or use other models: "gpt-4o", "gpt-4.1", "o1-mini", "o3-mini", etc.
89
92
 
90
93
  # Or use Ollama for local LLMs
91
94
  ollama_llm = LLMBroker(model="llama3")
@@ -99,7 +102,7 @@ class Sentiment(BaseModel):
99
102
  label: str = Field(..., description="Label for the sentiment")
100
103
 
101
104
  sentiment = openai_llm.generate_object(
102
- messages=[LLMMessage(content="Hello, how are you?")],
105
+ messages=[LLMMessage(content="Hello, how are you?")],
103
106
  object_model=Sentiment
104
107
  )
105
108
  print(sentiment.label)
@@ -118,6 +121,21 @@ result = openai_llm.generate(messages=[
118
121
  print(result)
119
122
  ```
120
123
 
124
+ ## 🤖 OpenAI Model Support
125
+
126
+ The framework automatically handles parameter differences between model types, so you can switch between any models without code changes.
127
+
128
+ ### Model-Specific Limitations
129
+
130
+ Some models have specific parameter restrictions that are automatically handled:
131
+
132
+ - **GPT-5 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
133
+ - **o1 & o4 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
134
+ - **o3 Series**: Does not support the `temperature` parameter at all. The parameter is automatically removed with a warning.
135
+ - **All Reasoning Models** (o1, o3, o4, GPT-5): Use `max_completion_tokens` instead of `max_tokens`, and have limited tool support.
136
+
137
+ The framework will automatically adapt parameters and log warnings when unsupported values are provided.
138
+
121
139
  ## 🏗️ Project Structure
122
140
 
123
141
  ```
@@ -1,6 +1,6 @@
1
1
  # Mojentic
2
2
 
3
- Mojentic is a framework that provides a simple and flexible way to interact with Large Language Models (LLMs). It offers integration with various LLM providers and includes tools for structured output generation, task automation, and more. The future direction is to facilitate a team of agents, but the current focus is on robust LLM interaction capabilities.
3
+ Mojentic is a framework that provides a simple and flexible way to interact with Large Language Models (LLMs). It offers integration with various LLM providers and includes tools for structured output generation, task automation, and more. With comprehensive support for all OpenAI models including GPT-5 and automatic parameter adaptation, Mojentic handles the complexities of different model types seamlessly. The future direction is to facilitate a team of agents, but the current focus is on robust LLM interaction capabilities.
4
4
 
5
5
  [![GitHub](https://img.shields.io/github/license/svetzal/mojentic)](LICENSE.md)
6
6
  [![Python Version](https://img.shields.io/badge/python-3.11%2B-blue)](https://www.python.org/downloads/)
@@ -9,6 +9,8 @@ Mojentic is a framework that provides a simple and flexible way to interact with
9
9
  ## 🚀 Features
10
10
 
11
11
  - **LLM Integration**: Support for multiple LLM providers (OpenAI, Ollama)
12
+ - **Latest OpenAI Models**: Full support for GPT-5, GPT-4.1, and all reasoning models (o1, o3, o4 series)
13
+ - **Automatic Model Adaptation**: Seamless parameter handling across different OpenAI model types
12
14
  - **Structured Output**: Generate structured data from LLM responses using Pydantic models
13
15
  - **Tools Integration**: Utilities for date resolution, image analysis, and more
14
16
  - **Multi-modal Capabilities**: Process and analyze images alongside text
@@ -45,8 +47,9 @@ from mojentic.llm.gateways.models import LLMMessage
45
47
  from mojentic.llm.tools.date_resolver import ResolveDateTool
46
48
  from pydantic import BaseModel, Field
47
49
 
48
- # Initialize with OpenAI
49
- openai_llm = LLMBroker(model="gpt-4o", gateway=OpenAIGateway(api_key="your_api_key"))
50
+ # Initialize with OpenAI (supports all models including GPT-5, GPT-4.1, reasoning models)
51
+ openai_llm = LLMBroker(model="gpt-5", gateway=OpenAIGateway(api_key="your_api_key"))
52
+ # Or use other models: "gpt-4o", "gpt-4.1", "o1-mini", "o3-mini", etc.
50
53
 
51
54
  # Or use Ollama for local LLMs
52
55
  ollama_llm = LLMBroker(model="llama3")
@@ -60,7 +63,7 @@ class Sentiment(BaseModel):
60
63
  label: str = Field(..., description="Label for the sentiment")
61
64
 
62
65
  sentiment = openai_llm.generate_object(
63
- messages=[LLMMessage(content="Hello, how are you?")],
66
+ messages=[LLMMessage(content="Hello, how are you?")],
64
67
  object_model=Sentiment
65
68
  )
66
69
  print(sentiment.label)
@@ -79,6 +82,21 @@ result = openai_llm.generate(messages=[
79
82
  print(result)
80
83
  ```
81
84
 
85
+ ## 🤖 OpenAI Model Support
86
+
87
+ The framework automatically handles parameter differences between model types, so you can switch between any models without code changes.
88
+
89
+ ### Model-Specific Limitations
90
+
91
+ Some models have specific parameter restrictions that are automatically handled:
92
+
93
+ - **GPT-5 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
94
+ - **o1 & o4 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
95
+ - **o3 Series**: Does not support the `temperature` parameter at all. The parameter is automatically removed with a warning.
96
+ - **All Reasoning Models** (o1, o3, o4, GPT-5): Use `max_completion_tokens` instead of `max_tokens`, and have limited tool support.
97
+
98
+ The framework will automatically adapt parameters and log warnings when unsupported values are provided.
99
+
82
100
  ## 🏗️ Project Structure
83
101
 
84
102
  ```
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mojentic"
3
- version = "0.7.4"
3
+ version = "0.8.1"
4
4
  authors = [
5
5
  { name = "Stacey Vetzal", email = "stacey@vetzal.com" },
6
6
  ]
@@ -14,7 +14,7 @@ from mojentic.llm.gateways.models import LLMMessage
14
14
  from mojentic.llm.tools.date_resolver import ResolveDateTool
15
15
 
16
16
 
17
- def openai_llm(model="gpt-4o"):
17
+ def openai_llm(model="gpt-5"):
18
18
  api_key = os.getenv("OPENAI_API_KEY")
19
19
  gateway = OpenAIGateway(api_key)
20
20
  llm = LLMBroker(model=model, gateway=gateway)
@@ -60,7 +60,26 @@ check_structured_output(openai_llm(model="o4-mini"))
60
60
  check_tool_use(openai_llm(model="o4-mini"))
61
61
  check_image_analysis(openai_llm(model="gpt-4o"))
62
62
 
63
- check_simple_textgen(ollama_llm())
64
- check_structured_output(ollama_llm())
63
+ # check_simple_textgen(ollama_llm())
64
+ # check_structured_output(ollama_llm())
65
65
  check_tool_use(ollama_llm(model="qwen3:32b"))
66
66
  check_image_analysis(ollama_llm(model="gemma3:27b"))
67
+
68
+ # Test all GPT-5 model variants to confirm they're all reasoning models
69
+ print("\n=== Testing GPT-5 Model Variants ===")
70
+ gpt5_models = [
71
+ "gpt-5",
72
+ "gpt-5-2025-08-07",
73
+ "gpt-5-chat-latest",
74
+ "gpt-5-mini",
75
+ "gpt-5-mini-2025-08-07",
76
+ "gpt-5-nano",
77
+ "gpt-5-nano-2025-08-07"
78
+ ]
79
+
80
+ for model in gpt5_models:
81
+ print(f"\n--- Testing {model} ---")
82
+ try:
83
+ check_simple_textgen(openai_llm(model=model))
84
+ except Exception as e:
85
+ print(f"Error with {model}: {e}")
@@ -0,0 +1,104 @@
1
+ """
2
+ Script to fetch current OpenAI models and update the registry with up-to-date model lists.
3
+ """
4
+
5
+ import os
6
+ from mojentic.llm.gateways.openai import OpenAIGateway
7
+
8
+ def fetch_current_openai_models():
9
+ """Fetch the current list of OpenAI models."""
10
+ api_key = os.getenv("OPENAI_API_KEY")
11
+ if not api_key:
12
+ print("ERROR: OPENAI_API_KEY environment variable not set")
13
+ return None
14
+
15
+ try:
16
+ gateway = OpenAIGateway(api_key)
17
+ models = gateway.get_available_models()
18
+ return models
19
+ except Exception as e:
20
+ print(f"ERROR: Failed to fetch models from OpenAI API: {e}")
21
+ return None
22
+
23
+ def categorize_models(models):
24
+ """Categorize models by type based on naming patterns."""
25
+ reasoning_models = []
26
+ chat_models = []
27
+ embedding_models = []
28
+ other_models = []
29
+
30
+ for model in models:
31
+ model_lower = model.lower()
32
+
33
+ # Reasoning models: o1, o3, o4, and gpt-5 series
34
+ if (any(pattern in model_lower for pattern in ['o1-', 'o3-', 'o4-', 'gpt-5']) or
35
+ model_lower in ['o1', 'o3', 'o4', 'gpt-5']):
36
+ reasoning_models.append(model)
37
+ elif 'embedding' in model_lower:
38
+ embedding_models.append(model)
39
+ elif any(pattern in model_lower for pattern in ['gpt-4', 'gpt-3.5']):
40
+ chat_models.append(model)
41
+ else:
42
+ other_models.append(model)
43
+
44
+ return {
45
+ 'reasoning': sorted(reasoning_models),
46
+ 'chat': sorted(chat_models),
47
+ 'embedding': sorted(embedding_models),
48
+ 'other': sorted(other_models)
49
+ }
50
+
51
+ def print_model_lists(categorized_models):
52
+ """Print the categorized models in a format ready for the registry."""
53
+ print("=== Current OpenAI Models ===\n")
54
+
55
+ print("# Reasoning Models (o1, o3, o4, gpt-5 series)")
56
+ print("reasoning_models = [")
57
+ for model in categorized_models['reasoning']:
58
+ print(f' "{model}",')
59
+ print("]\n")
60
+
61
+ print("# Chat Models (GPT-4 and GPT-4.1 series)")
62
+ print("gpt4_and_newer_models = [")
63
+ gpt4_and_newer = [m for m in categorized_models['chat'] if 'gpt-4' in m.lower()]
64
+ for model in gpt4_and_newer:
65
+ print(f' "{model}",')
66
+ print("]\n")
67
+
68
+ print("# Chat Models (GPT-3.5 series)")
69
+ print("gpt35_models = [")
70
+ gpt35 = [m for m in categorized_models['chat'] if 'gpt-3.5' in m.lower()]
71
+ for model in gpt35:
72
+ print(f' "{model}",')
73
+ print("]\n")
74
+
75
+ print("# Embedding Models")
76
+ print("embedding_models = [")
77
+ for model in categorized_models['embedding']:
78
+ print(f' "{model}",')
79
+ print("]\n")
80
+
81
+ print("# Other Models (for reference)")
82
+ print("# other_models = [")
83
+ for model in categorized_models['other']:
84
+ print(f'# "{model}",')
85
+ print("# ]\n")
86
+
87
+ if __name__ == "__main__":
88
+ print("Fetching current OpenAI models...")
89
+ models = fetch_current_openai_models()
90
+
91
+ if models:
92
+ print(f"Found {len(models)} models\n")
93
+ categorized = categorize_models(models)
94
+ print_model_lists(categorized)
95
+
96
+ print("\n=== Summary ===")
97
+ print(f"Reasoning models: {len(categorized['reasoning'])}")
98
+ print(f"Chat models: {len(categorized['chat'])}")
99
+ print(f"Embedding models: {len(categorized['embedding'])}")
100
+ print(f"Other models: {len(categorized['other'])}")
101
+
102
+ print("\nCopy the model lists above and update the _initialize_default_models() method in openai_model_registry.py")
103
+ else:
104
+ print("Failed to fetch models. Please check your API key and try again.")
@@ -0,0 +1,140 @@
1
+ """
2
+ Demonstration of the enhanced OpenAI gateway with model registry system.
3
+
4
+ This script shows how the new infrastructure automatically handles parameter adaptation
5
+ for reasoning models vs chat models, provides detailed logging, and offers better
6
+ error handling.
7
+ """
8
+
9
+ import os
10
+ from mojentic.llm.gateways.openai import OpenAIGateway
11
+ from mojentic.llm.gateways.openai_model_registry import get_model_registry
12
+ from mojentic.llm.gateways.models import LLMMessage, MessageRole
13
+
14
+ def demonstrate_model_registry():
15
+ """Demonstrate the model registry capabilities."""
16
+ print("=== Model Registry Demonstration ===")
17
+
18
+ registry = get_model_registry()
19
+
20
+ print("\n1. Registry contains default models:")
21
+ registered_models = registry.get_registered_models()
22
+ reasoning_models = [m for m in registered_models if registry.is_reasoning_model(m)]
23
+ chat_models = [m for m in registered_models if not registry.is_reasoning_model(m) and not m.startswith('text-')]
24
+
25
+ print(f" Reasoning models: {reasoning_models[:3]}...") # Show first 3
26
+ print(f" Chat models: {chat_models[:3]}...") # Show first 3
27
+
28
+ print("\n2. Model capability detection:")
29
+ for model in ["o1-mini", "gpt-4o"]:
30
+ capabilities = registry.get_model_capabilities(model)
31
+ token_param = capabilities.get_token_limit_param()
32
+ print(f" {model}: type={capabilities.model_type.value}, token_param={token_param}")
33
+
34
+ # Handle unknown model separately to show the warning works
35
+ print("\n3. Unknown model handling:")
36
+ print(" unknown-future-model: (will default to chat model with warning)")
37
+ capabilities = registry.get_model_capabilities("unknown-future-model")
38
+ token_param = capabilities.get_token_limit_param()
39
+ print(f" → Defaulted to: type={capabilities.model_type.value}, token_param={token_param}")
40
+
41
+ def demonstrate_parameter_adaptation():
42
+ """Demonstrate parameter adaptation for different model types."""
43
+ print("\n=== Parameter Adaptation Demonstration ===")
44
+
45
+ # This would normally require an API key, but we're just showing the adaptation logic
46
+ gateway = OpenAIGateway("fake-key-for-demo")
47
+
48
+ print("\n1. Reasoning model parameter adaptation (o1-mini):")
49
+ original_args = {
50
+ 'model': 'o1-mini',
51
+ 'messages': [LLMMessage(role=MessageRole.User, content="Hello")],
52
+ 'max_tokens': 1000,
53
+ 'tools': [] # Tools will be removed for reasoning models
54
+ }
55
+
56
+ adapted_args = gateway._adapt_parameters_for_model('o1-mini', original_args)
57
+ print(f" Original: max_tokens={original_args.get('max_tokens')}, has_tools={'tools' in original_args}")
58
+ print(f" Adapted: max_completion_tokens={adapted_args.get('max_completion_tokens')}, has_tools={'tools' in adapted_args}")
59
+
60
+ print("\n2. Chat model parameter adaptation (gpt-4o):")
61
+ original_args = {
62
+ 'model': 'gpt-4o',
63
+ 'messages': [LLMMessage(role=MessageRole.User, content="Hello")],
64
+ 'max_tokens': 1000,
65
+ 'tools': []
66
+ }
67
+
68
+ adapted_args = gateway._adapt_parameters_for_model('gpt-4o', original_args)
69
+ print(f" Original: max_tokens={original_args.get('max_tokens')}, has_tools={'tools' in original_args}")
70
+ print(f" Adapted: max_tokens={adapted_args.get('max_tokens')}, has_tools={'tools' in adapted_args}")
71
+
72
+ def demonstrate_model_validation():
73
+ """Demonstrate model parameter validation."""
74
+ print("\n=== Model Validation Demonstration ===")
75
+
76
+ gateway = OpenAIGateway("fake-key-for-demo")
77
+
78
+ print("\n1. Validating parameters for reasoning model:")
79
+ args = {
80
+ 'model': 'o1-mini',
81
+ 'messages': [LLMMessage(role=MessageRole.User, content="Hello")],
82
+ 'max_tokens': 50000, # High token count - will show warning
83
+ 'tools': [] # Tools for reasoning model - will show warning
84
+ }
85
+
86
+ try:
87
+ gateway._validate_model_parameters('o1-mini', args)
88
+ print(" Validation completed (check logs above for warnings)")
89
+ except Exception as e:
90
+ print(f" Validation error: {e}")
91
+
92
+ def demonstrate_registry_extensibility():
93
+ """Demonstrate how to extend the registry with new models."""
94
+ print("\n=== Registry Extensibility Demonstration ===")
95
+
96
+ registry = get_model_registry()
97
+
98
+ print("\n1. Adding a new model to the registry:")
99
+ from mojentic.llm.gateways.openai_model_registry import ModelCapabilities, ModelType
100
+
101
+ new_capabilities = ModelCapabilities(
102
+ model_type=ModelType.REASONING,
103
+ supports_tools=True, # Hypothetical future reasoning model with tools
104
+ supports_streaming=True,
105
+ max_output_tokens=100000
106
+ )
107
+
108
+ registry.register_model("o5-preview", new_capabilities)
109
+ print(f" Registered o5-preview as reasoning model")
110
+
111
+ # Test the new model
112
+ capabilities = registry.get_model_capabilities("o5-preview")
113
+ print(f" o5-preview: type={capabilities.model_type.value}, supports_tools={capabilities.supports_tools}")
114
+
115
+ print("\n2. Adding a new pattern for model detection:")
116
+ registry.register_pattern("claude", ModelType.CHAT)
117
+ print(" Registered 'claude' pattern for chat models")
118
+
119
+ # Test pattern matching
120
+ capabilities = registry.get_model_capabilities("claude-3-opus")
121
+ print(f" claude-3-opus (inferred): type={capabilities.model_type.value}")
122
+
123
+ if __name__ == "__main__":
124
+ print("OpenAI Gateway Enhanced Infrastructure Demo")
125
+ print("=" * 50)
126
+
127
+ demonstrate_model_registry()
128
+ demonstrate_parameter_adaptation()
129
+ demonstrate_model_validation()
130
+ demonstrate_registry_extensibility()
131
+
132
+ print("\n" + "=" * 50)
133
+ print("Demo completed!")
134
+ print("\nKey Benefits of the New Infrastructure:")
135
+ print("✓ Registry-based model management (easy to extend)")
136
+ print("✓ Automatic parameter adaptation (max_tokens ↔ max_completion_tokens)")
137
+ print("✓ Enhanced logging for debugging")
138
+ print("✓ Parameter validation with helpful warnings")
139
+ print("✓ Pattern matching for unknown models")
140
+ print("✓ Comprehensive test coverage")