mojentic 0.8.0__py3-none-any.whl → 0.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -73,7 +73,7 @@ class ChatSession:
73
73
  The response from the LLM.
74
74
  """
75
75
  self.insert_message(LLMMessage(role=MessageRole.User, content=query))
76
- response = self.llm.generate(self.messages, tools=self.tools, temperature=0.1)
76
+ response = self.llm.generate(self.messages, tools=self.tools, temperature=self.temperature)
77
77
  self._ensure_all_messages_are_sized()
78
78
  self.insert_message(LLMMessage(role=MessageRole.Assistant, content=response))
79
79
  return response
@@ -91,6 +91,27 @@ class OpenAIGateway(LLMGateway):
91
91
  num_tools=len(adapted_args['tools']))
92
92
  adapted_args['tools'] = None # Set to None instead of removing the key
93
93
 
94
+ # Handle temperature restrictions for specific models
95
+ if 'temperature' in adapted_args:
96
+ temperature = adapted_args['temperature']
97
+
98
+ # Check if model supports temperature parameter at all
99
+ if capabilities.supported_temperatures == []:
100
+ # Model doesn't support temperature parameter at all - remove it
101
+ logger.warning("Model does not support temperature parameter, removing it",
102
+ model=model,
103
+ requested_temperature=temperature)
104
+ adapted_args.pop('temperature', None)
105
+ elif not capabilities.supports_temperature(temperature):
106
+ # Model supports temperature but not this specific value - use default
107
+ default_temp = 1.0
108
+ logger.warning("Model does not support requested temperature, using default",
109
+ model=model,
110
+ requested_temperature=temperature,
111
+ default_temperature=default_temp,
112
+ supported_temperatures=capabilities.supported_temperatures)
113
+ adapted_args['temperature'] = default_temp
114
+
94
115
  return adapted_args
95
116
 
96
117
  def _validate_model_parameters(self, model: str, args: dict) -> None:
@@ -34,6 +34,7 @@ class ModelCapabilities:
34
34
  supports_vision: bool = False
35
35
  max_context_tokens: Optional[int] = None
36
36
  max_output_tokens: Optional[int] = None
37
+ supported_temperatures: Optional[List[float]] = None # None means all temperatures supported
37
38
 
38
39
  def get_token_limit_param(self) -> str:
39
40
  """Get the correct parameter name for token limits based on model type."""
@@ -41,6 +42,14 @@ class ModelCapabilities:
41
42
  return "max_completion_tokens"
42
43
  return "max_tokens"
43
44
 
45
+ def supports_temperature(self, temperature: float) -> bool:
46
+ """Check if the model supports a specific temperature value."""
47
+ if self.supported_temperatures is None:
48
+ return True # All temperatures supported if not restricted
49
+ if self.supported_temperatures == []:
50
+ return False # No temperature values supported (parameter not allowed)
51
+ return temperature in self.supported_temperatures
52
+
44
53
 
45
54
  class OpenAIModelRegistry:
46
55
  """
@@ -74,6 +83,9 @@ class OpenAIModelRegistry:
74
83
  # Deep research models and GPT-5 might have different capabilities
75
84
  is_deep_research = "deep-research" in model
76
85
  is_gpt5 = "gpt-5" in model
86
+ is_o1_series = model.startswith("o1")
87
+ is_o3_series = model.startswith("o3")
88
+ is_o4_series = model.startswith("o4")
77
89
  is_mini_or_nano = ("mini" in model or "nano" in model)
78
90
 
79
91
  # GPT-5 models may support more features than o1/o3/o4
@@ -91,13 +103,25 @@ class OpenAIModelRegistry:
91
103
  context_tokens = 128000
92
104
  output_tokens = 32768
93
105
 
106
+ # Temperature restrictions based on model series
107
+ if is_gpt5 or is_o1_series or is_o4_series:
108
+ # GPT-5, o1, and o4 series only support temperature=1.0
109
+ supported_temps = [1.0]
110
+ elif is_o3_series:
111
+ # o3 series doesn't support temperature parameter at all
112
+ supported_temps = []
113
+ else:
114
+ # Other reasoning models support all temperatures
115
+ supported_temps = None
116
+
94
117
  self._models[model] = ModelCapabilities(
95
118
  model_type=ModelType.REASONING,
96
119
  supports_tools=supports_tools,
97
120
  supports_streaming=supports_streaming,
98
121
  supports_vision=False, # Vision support would need to be confirmed for GPT-5
99
122
  max_context_tokens=context_tokens,
100
- max_output_tokens=output_tokens
123
+ max_output_tokens=output_tokens,
124
+ supported_temperatures=supported_temps
101
125
  )
102
126
 
103
127
  # Chat Models (GPT-4 and GPT-4.1 series) - Updated 2025-09-28
@@ -0,0 +1,245 @@
1
+ import pytest
2
+ from unittest.mock import MagicMock
3
+
4
+ from mojentic.llm.gateways.openai import OpenAIGateway
5
+ from mojentic.llm.gateways.openai_model_registry import get_model_registry
6
+ from mojentic.llm.gateways.models import LLMMessage, MessageRole, LLMGatewayResponse
7
+
8
+
9
+ @pytest.fixture
10
+ def mock_openai_client(mocker):
11
+ """Mock the OpenAI client."""
12
+ mock_client = MagicMock()
13
+ mock_client.chat.completions.create.return_value = MagicMock()
14
+ mock_client.chat.completions.create.return_value.choices = [MagicMock()]
15
+ mock_client.chat.completions.create.return_value.choices[0].message.content = "Test response"
16
+ mock_client.chat.completions.create.return_value.choices[0].message.tool_calls = None
17
+ return mock_client
18
+
19
+
20
+ @pytest.fixture
21
+ def openai_gateway(mocker, mock_openai_client):
22
+ """Create an OpenAI gateway with mocked client."""
23
+ mocker.patch('mojentic.llm.gateways.openai.OpenAI', return_value=mock_openai_client)
24
+ return OpenAIGateway(api_key="test_key")
25
+
26
+
27
+ class DescribeOpenAIGatewayTemperatureHandling:
28
+ """
29
+ Specification for OpenAI gateway temperature parameter handling.
30
+ """
31
+
32
+ class DescribeGPT5TemperatureRestrictions:
33
+ """
34
+ Specifications for GPT-5 model temperature restrictions.
35
+ """
36
+
37
+ def should_automatically_adjust_unsupported_temperature_for_gpt5(self, openai_gateway, mock_openai_client):
38
+ """
39
+ Given a GPT-5 model that only supports temperature=1.0
40
+ When calling complete with temperature=0.1 (unsupported)
41
+ Then it should automatically adjust to temperature=1.0
42
+ """
43
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
44
+
45
+ openai_gateway.complete(
46
+ model="gpt-5",
47
+ messages=messages,
48
+ temperature=0.1
49
+ )
50
+
51
+ # Verify the API was called with temperature=1.0, not 0.1
52
+ call_args = mock_openai_client.chat.completions.create.call_args
53
+ assert call_args[1]['temperature'] == 1.0
54
+
55
+ def should_preserve_supported_temperature_for_gpt5(self, openai_gateway, mock_openai_client):
56
+ """
57
+ Given a GPT-5 model that supports temperature=1.0
58
+ When calling complete with temperature=1.0 (supported)
59
+ Then it should preserve the temperature value
60
+ """
61
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
62
+
63
+ openai_gateway.complete(
64
+ model="gpt-5",
65
+ messages=messages,
66
+ temperature=1.0
67
+ )
68
+
69
+ # Verify the API was called with temperature=1.0
70
+ call_args = mock_openai_client.chat.completions.create.call_args
71
+ assert call_args[1]['temperature'] == 1.0
72
+
73
+ def should_preserve_any_temperature_for_gpt4o(self, openai_gateway, mock_openai_client):
74
+ """
75
+ Given a GPT-4o model that supports all temperatures
76
+ When calling complete with temperature=0.1
77
+ Then it should preserve the original temperature value
78
+ """
79
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
80
+
81
+ openai_gateway.complete(
82
+ model="gpt-4o",
83
+ messages=messages,
84
+ temperature=0.1
85
+ )
86
+
87
+ # Verify the API was called with temperature=0.1
88
+ call_args = mock_openai_client.chat.completions.create.call_args
89
+ assert call_args[1]['temperature'] == 0.1
90
+
91
+ def should_automatically_adjust_unsupported_temperature_for_o1_mini(self, openai_gateway, mock_openai_client):
92
+ """
93
+ Given an o1-mini model that only supports temperature=1.0
94
+ When calling complete with temperature=0.1 (unsupported)
95
+ Then it should automatically adjust to temperature=1.0
96
+ """
97
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
98
+
99
+ openai_gateway.complete(
100
+ model="o1-mini",
101
+ messages=messages,
102
+ temperature=0.1
103
+ )
104
+
105
+ # Verify the API was called with temperature=1.0, not 0.1
106
+ call_args = mock_openai_client.chat.completions.create.call_args
107
+ assert call_args[1]['temperature'] == 1.0
108
+
109
+ def should_automatically_adjust_unsupported_temperature_for_o4_mini(self, openai_gateway, mock_openai_client):
110
+ """
111
+ Given an o4-mini model that only supports temperature=1.0
112
+ When calling complete with temperature=0.1 (unsupported)
113
+ Then it should automatically adjust to temperature=1.0
114
+ """
115
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
116
+
117
+ openai_gateway.complete(
118
+ model="o4-mini",
119
+ messages=messages,
120
+ temperature=0.1
121
+ )
122
+
123
+ # Verify the API was called with temperature=1.0, not 0.1
124
+ call_args = mock_openai_client.chat.completions.create.call_args
125
+ assert call_args[1]['temperature'] == 1.0
126
+
127
+ def should_remove_temperature_parameter_for_o3_mini(self, openai_gateway, mock_openai_client):
128
+ """
129
+ Given an o3-mini model that doesn't support temperature parameter at all
130
+ When calling complete with temperature=0.1
131
+ Then it should remove the temperature parameter entirely
132
+ """
133
+ messages = [LLMMessage(role=MessageRole.User, content="Test message")]
134
+
135
+ openai_gateway.complete(
136
+ model="o3-mini",
137
+ messages=messages,
138
+ temperature=0.1
139
+ )
140
+
141
+ # Verify the API was called without temperature parameter
142
+ call_args = mock_openai_client.chat.completions.create.call_args
143
+ assert 'temperature' not in call_args[1]
144
+
145
+
146
+ class DescribeModelCapabilitiesTemperatureRestrictions:
147
+ """
148
+ Specification for model capabilities temperature restriction checks.
149
+ """
150
+
151
+ def should_identify_gpt5_temperature_restrictions(self):
152
+ """
153
+ Given the model registry
154
+ When checking GPT-5 model capabilities
155
+ Then it should indicate temperature=1.0 is supported and temperature=0.1 is not
156
+ """
157
+ registry = get_model_registry()
158
+ capabilities = registry.get_model_capabilities("gpt-5")
159
+
160
+ assert capabilities.supports_temperature(1.0) is True
161
+ assert capabilities.supports_temperature(0.1) is False
162
+ assert capabilities.supported_temperatures == [1.0]
163
+
164
+ def should_allow_all_temperatures_for_gpt4o(self):
165
+ """
166
+ Given the model registry
167
+ When checking GPT-4o model capabilities
168
+ Then it should indicate all temperature values are supported
169
+ """
170
+ registry = get_model_registry()
171
+ capabilities = registry.get_model_capabilities("gpt-4o")
172
+
173
+ assert capabilities.supports_temperature(1.0) is True
174
+ assert capabilities.supports_temperature(0.1) is True
175
+ assert capabilities.supports_temperature(0.7) is True
176
+ assert capabilities.supported_temperatures is None
177
+
178
+ def should_identify_all_gpt5_variants_temperature_restrictions(self):
179
+ """
180
+ Given the model registry
181
+ When checking all GPT-5 variant models
182
+ Then they should all have temperature restrictions to 1.0 only
183
+ """
184
+ registry = get_model_registry()
185
+ gpt5_models = [
186
+ "gpt-5",
187
+ "gpt-5-2025-08-07",
188
+ "gpt-5-chat-latest",
189
+ "gpt-5-codex",
190
+ "gpt-5-mini",
191
+ "gpt-5-mini-2025-08-07",
192
+ "gpt-5-nano",
193
+ "gpt-5-nano-2025-08-07"
194
+ ]
195
+
196
+ for model in gpt5_models:
197
+ capabilities = registry.get_model_capabilities(model)
198
+ assert capabilities.supports_temperature(1.0) is True
199
+ assert capabilities.supports_temperature(0.1) is False
200
+ assert capabilities.supported_temperatures == [1.0]
201
+
202
+ def should_identify_o1_series_temperature_restrictions(self):
203
+ """
204
+ Given the model registry
205
+ When checking o1 series models
206
+ Then they should have temperature restrictions to 1.0 only
207
+ """
208
+ registry = get_model_registry()
209
+ o1_models = ["o1", "o1-mini", "o1-pro", "o1-2024-12-17"]
210
+
211
+ for model in o1_models:
212
+ capabilities = registry.get_model_capabilities(model)
213
+ assert capabilities.supports_temperature(1.0) is True
214
+ assert capabilities.supports_temperature(0.1) is False
215
+ assert capabilities.supported_temperatures == [1.0]
216
+
217
+ def should_identify_o3_series_no_temperature_support(self):
218
+ """
219
+ Given the model registry
220
+ When checking o3 series models
221
+ Then they should not support temperature parameter at all
222
+ """
223
+ registry = get_model_registry()
224
+ o3_models = ["o3", "o3-mini", "o3-pro", "o3-deep-research"]
225
+
226
+ for model in o3_models:
227
+ capabilities = registry.get_model_capabilities(model)
228
+ assert capabilities.supports_temperature(1.0) is False
229
+ assert capabilities.supports_temperature(0.1) is False
230
+ assert capabilities.supported_temperatures == []
231
+
232
+ def should_identify_o4_series_temperature_restrictions(self):
233
+ """
234
+ Given the model registry
235
+ When checking o4 series models
236
+ Then they should have temperature restrictions to 1.0 only
237
+ """
238
+ registry = get_model_registry()
239
+ o4_models = ["o4-mini", "o4-mini-2025-04-16", "o4-mini-deep-research"]
240
+
241
+ for model in o4_models:
242
+ capabilities = registry.get_model_capabilities(model)
243
+ assert capabilities.supports_temperature(1.0) is True
244
+ assert capabilities.supports_temperature(0.1) is False
245
+ assert capabilities.supported_temperatures == [1.0]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mojentic
3
- Version: 0.8.0
3
+ Version: 0.8.2
4
4
  Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
5
5
  Author-email: Stacey Vetzal <stacey@vetzal.com>
6
6
  Project-URL: Homepage, https://github.com/svetzal/mojentic
@@ -125,6 +125,17 @@ print(result)
125
125
 
126
126
  The framework automatically handles parameter differences between model types, so you can switch between any models without code changes.
127
127
 
128
+ ### Model-Specific Limitations
129
+
130
+ Some models have specific parameter restrictions that are automatically handled:
131
+
132
+ - **GPT-5 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
133
+ - **o1 & o4 Series**: Only supports `temperature=1.0` (default). Other temperature values are automatically adjusted with a warning.
134
+ - **o3 Series**: Does not support the `temperature` parameter at all. The parameter is automatically removed with a warning.
135
+ - **All Reasoning Models** (o1, o3, o4, GPT-5): Use `max_completion_tokens` instead of `max_tokens`, and have limited tool support.
136
+
137
+ The framework will automatically adapt parameters and log warnings when unsupported values are provided.
138
+
128
139
  ## 🏗️ Project Structure
129
140
 
130
141
  ```
@@ -70,7 +70,7 @@ mojentic/agents/simple_recursive_agent.py,sha256=B3QcOeIuamoLp0MsAwCjJLgVCaODsEi
70
70
  mojentic/context/__init__.py,sha256=PZwMnDnn_ziGniAI6murl4dkkbBp_agkUps5EPGTFN4,136
71
71
  mojentic/context/shared_working_memory.py,sha256=Zt9MNGErEkDIUAaHvyhEOiTaEobI9l0MV4Z59lQFBr0,396
72
72
  mojentic/llm/__init__.py,sha256=rTUTMjAe524evcH09wKSpL5ymnwHFlM1fWvGKd3mSis,414
73
- mojentic/llm/chat_session.py,sha256=H2gY0mZYVym8jC69VHsmKaRZ9T87Suyw0-TW5r850nA,3992
73
+ mojentic/llm/chat_session.py,sha256=0lhCZDxGfq3ma4m-SmJqfQYdyxPQElSFPQOcqAacFlQ,4005
74
74
  mojentic/llm/chat_session_spec.py,sha256=8-jj-EHV2WwWuvo3t8I75kSEAYiG1nR-OEwkkLTi_z0,3872
75
75
  mojentic/llm/llm_broker.py,sha256=wNBGQD9GwuZoj3liZZEfCFN4sb2_TN8HDHZvZc7-PMQ,9726
76
76
  mojentic/llm/llm_broker_spec.py,sha256=40lzmYm_6Zje6z5MQ7_o3gSBThLsNW_l_1mZTUVll6A,5342
@@ -86,11 +86,12 @@ mojentic/llm/gateways/models.py,sha256=OyIaMHKrrx6dHo5FbC8qOFct7PRql9wqbe_BJlgDS
86
86
  mojentic/llm/gateways/ollama.py,sha256=OUUImBNzPte52Gsf-e7TBjDHRvYW5flU9ddxwG2zlzk,7909
87
87
  mojentic/llm/gateways/ollama_messages_adapter.py,sha256=kUN_p2FyN88_trXMcL-Xsn9xPBU7pGKlJwTUEUCf6G4,1404
88
88
  mojentic/llm/gateways/ollama_messages_adapter_spec.py,sha256=gVRbWDrHOa1EiZ0CkEWe0pGn-GKRqdGb-x56HBQeYSE,4981
89
- mojentic/llm/gateways/openai.py,sha256=42A-8etuDRBSy18q5Qp6S1yndyOJpu3p8Pu0Dd1orFU,12806
89
+ mojentic/llm/gateways/openai.py,sha256=ru156JpPW8-Zs3_O7BCBuztu_PkP88uD0qbM1Y-RMU4,14032
90
90
  mojentic/llm/gateways/openai_message_adapter_spec.py,sha256=ITBSV5njldV_x0NPgjmg8Okf9KzevQJ8dTXM-t6ubcg,6612
91
91
  mojentic/llm/gateways/openai_messages_adapter.py,sha256=Scal68JKKdBHB35ok1c5DeWYdD6Wra5oXSsPxJyyXSQ,3947
92
- mojentic/llm/gateways/openai_model_registry.py,sha256=4BIWQOl-5yAbug3UHUtpbj3kpkadNoy4sMgThyPi-i8,12858
92
+ mojentic/llm/gateways/openai_model_registry.py,sha256=CPfbwBhdZ94jzLjmaH9dRXGZFk4OD2pOUlW6RFWVAPM,14101
93
93
  mojentic/llm/gateways/openai_model_registry_spec.py,sha256=rCyXhiCOKMewkZjdZoawALoEk62yjENeYTpjYuMuXDM,6711
94
+ mojentic/llm/gateways/openai_temperature_handling_spec.py,sha256=PxQpI57RGaWpt1Dj6z2uLeFcP-dRZTHkai-igZTZc9M,9947
94
95
  mojentic/llm/gateways/tokenizer_gateway.py,sha256=ztuqfunlJ6xmyUPPHcC_69-kegiNJD6jdSEde7hDh2w,485
95
96
  mojentic/llm/registry/__init__.py,sha256=P2MHlptrtRPMSWbWl9ojXPmjMwkW0rIn6jwzCkSgnhE,164
96
97
  mojentic/llm/registry/llm_registry.py,sha256=beyrgGrkXx5ZckUJzC1nQ461vra0fF6s_qRaEdi5bsg,2508
@@ -136,8 +137,8 @@ mojentic/tracer/tracer_system.py,sha256=7CPy_2tlsHtXQ4DcO5oo52N9a9WS0GH-mjeINzu6
136
137
  mojentic/tracer/tracer_system_spec.py,sha256=TNm0f9LV__coBx0JGEKyzzNN9mFjCSG_SSrRISO8Xeg,8632
137
138
  mojentic/utils/__init__.py,sha256=lqECkkoFvHFttDnafRE1vvh0Dmna_lwupMToP5VvX5k,115
138
139
  mojentic/utils/formatting.py,sha256=bPrwwdluXdQ8TsFxfWtHNOeMWKNvAfABSoUnnA1g7c8,947
139
- mojentic-0.8.0.dist-info/licenses/LICENSE.md,sha256=txSgV8n5zY1W3NiF5HHsCwlaW0e8We1cSC6TuJUqxXA,1060
140
- mojentic-0.8.0.dist-info/METADATA,sha256=EpjGKSzORxFrSG94rw5rUvm4vztoEMfGCTkXON-W49k,6154
141
- mojentic-0.8.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
142
- mojentic-0.8.0.dist-info/top_level.txt,sha256=Q-BvPQ8Eu1jnEqK8Xkr6A9C8Xa1z38oPZRHuA5MCTqg,19
143
- mojentic-0.8.0.dist-info/RECORD,,
140
+ mojentic-0.8.2.dist-info/licenses/LICENSE.md,sha256=txSgV8n5zY1W3NiF5HHsCwlaW0e8We1cSC6TuJUqxXA,1060
141
+ mojentic-0.8.2.dist-info/METADATA,sha256=UxE4SNTvP4TVKYfQz0suITnrZ-DsoCqBwXOu97pd4_A,6896
142
+ mojentic-0.8.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
143
+ mojentic-0.8.2.dist-info/top_level.txt,sha256=Q-BvPQ8Eu1jnEqK8Xkr6A9C8Xa1z38oPZRHuA5MCTqg,19
144
+ mojentic-0.8.2.dist-info/RECORD,,