mojentic 0.7.3__py3-none-any.whl → 0.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _examples/model_characterization.py +73 -0
- mojentic/llm/gateways/openai.py +66 -8
- {mojentic-0.7.3.dist-info → mojentic-0.7.4.dist-info}/METADATA +1 -1
- {mojentic-0.7.3.dist-info → mojentic-0.7.4.dist-info}/RECORD +7 -6
- {mojentic-0.7.3.dist-info → mojentic-0.7.4.dist-info}/WHEEL +0 -0
- {mojentic-0.7.3.dist-info → mojentic-0.7.4.dist-info}/licenses/LICENSE.md +0 -0
- {mojentic-0.7.3.dist-info → mojentic-0.7.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from mojentic.llm.gateways.openai import OpenAIGateway
|
|
3
|
+
from mojentic.llm.gateways.models import LLMMessage, MessageRole
|
|
4
|
+
|
|
5
|
+
def check_model_characterization():
|
|
6
|
+
"""
|
|
7
|
+
Test the model characterization functionality with different OpenAI models.
|
|
8
|
+
This demonstrates how the gateway adapts parameters based on model type.
|
|
9
|
+
"""
|
|
10
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
11
|
+
if not api_key:
|
|
12
|
+
print("OPENAI_API_KEY environment variable not set. Skipping actual API calls.")
|
|
13
|
+
return
|
|
14
|
+
|
|
15
|
+
gateway = OpenAIGateway(api_key)
|
|
16
|
+
|
|
17
|
+
# Test messages for chat models
|
|
18
|
+
chat_messages = [
|
|
19
|
+
LLMMessage(role=MessageRole.System, content="You are a helpful assistant."),
|
|
20
|
+
LLMMessage(role=MessageRole.User, content="What is 2 + 2? Give a brief answer.")
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# Test messages for reasoning models (no system message supported)
|
|
24
|
+
reasoning_messages = [
|
|
25
|
+
LLMMessage(role=MessageRole.User, content="What is 2 + 2? Give a brief answer.")
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
# Test with different model types
|
|
29
|
+
test_models = [
|
|
30
|
+
("gpt-4o", "chat model"),
|
|
31
|
+
("gpt-4o-mini", "chat model"),
|
|
32
|
+
("o1-mini", "reasoning model"),
|
|
33
|
+
("o1-preview", "reasoning model")
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
print("Testing model characterization and parameter adaptation:")
|
|
37
|
+
print("=" * 60)
|
|
38
|
+
|
|
39
|
+
for model, model_type in test_models:
|
|
40
|
+
print(f"\nTesting {model} ({model_type}):")
|
|
41
|
+
|
|
42
|
+
# Test model classification
|
|
43
|
+
is_reasoning = gateway._is_reasoning_model(model)
|
|
44
|
+
print(f" Classified as reasoning model: {is_reasoning}")
|
|
45
|
+
|
|
46
|
+
# Use appropriate messages based on model type
|
|
47
|
+
messages = reasoning_messages if gateway._is_reasoning_model(model) else chat_messages
|
|
48
|
+
|
|
49
|
+
# Test parameter adaptation
|
|
50
|
+
original_args = {
|
|
51
|
+
'model': model,
|
|
52
|
+
'messages': messages,
|
|
53
|
+
'max_tokens': 100
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
adapted_args = gateway._adapt_parameters_for_model(model, original_args)
|
|
57
|
+
|
|
58
|
+
if 'max_tokens' in adapted_args:
|
|
59
|
+
print(f" Using parameter: max_tokens = {adapted_args['max_tokens']}")
|
|
60
|
+
elif 'max_completion_tokens' in adapted_args:
|
|
61
|
+
print(f" Using parameter: max_completion_tokens = {adapted_args['max_completion_tokens']}")
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
response = gateway.complete(**adapted_args)
|
|
65
|
+
print(f" Response: {response.content[:50]}...")
|
|
66
|
+
except Exception as e:
|
|
67
|
+
print(f" Error: {str(e)}")
|
|
68
|
+
|
|
69
|
+
print("\n" + "=" * 60)
|
|
70
|
+
print("Model characterization test completed!")
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
check_model_characterization()
|
mojentic/llm/gateways/openai.py
CHANGED
|
@@ -27,6 +27,58 @@ class OpenAIGateway(LLMGateway):
|
|
|
27
27
|
def __init__(self, api_key: str, base_url: str = None):
|
|
28
28
|
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
|
29
29
|
|
|
30
|
+
def _is_reasoning_model(self, model: str) -> bool:
|
|
31
|
+
"""
|
|
32
|
+
Determine if a model is a reasoning model that requires max_completion_tokens.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
model : str
|
|
37
|
+
The model name to classify.
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
bool
|
|
42
|
+
True if the model is a reasoning model, False if it's a chat model.
|
|
43
|
+
"""
|
|
44
|
+
# OpenAI reasoning models typically start with "o1" or contain "o4"
|
|
45
|
+
reasoning_model_patterns = [
|
|
46
|
+
"o1-",
|
|
47
|
+
"o3-",
|
|
48
|
+
"o4-",
|
|
49
|
+
"o1",
|
|
50
|
+
"o3"
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
model_lower = model.lower()
|
|
54
|
+
return any(pattern in model_lower for pattern in reasoning_model_patterns)
|
|
55
|
+
|
|
56
|
+
def _adapt_parameters_for_model(self, model: str, args: dict) -> dict:
|
|
57
|
+
"""
|
|
58
|
+
Adapt parameters based on the model type.
|
|
59
|
+
|
|
60
|
+
Parameters
|
|
61
|
+
----------
|
|
62
|
+
model : str
|
|
63
|
+
The model name.
|
|
64
|
+
args : dict
|
|
65
|
+
The original arguments.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
dict
|
|
70
|
+
The adapted arguments with correct parameter names for the model type.
|
|
71
|
+
"""
|
|
72
|
+
adapted_args = args.copy()
|
|
73
|
+
|
|
74
|
+
if self._is_reasoning_model(model) and 'max_tokens' in adapted_args:
|
|
75
|
+
# For reasoning models, use max_completion_tokens instead of max_tokens
|
|
76
|
+
adapted_args['max_completion_tokens'] = adapted_args.pop('max_tokens')
|
|
77
|
+
logger.debug("Adapted max_tokens to max_completion_tokens for reasoning model",
|
|
78
|
+
model=model, max_completion_tokens=adapted_args['max_completion_tokens'])
|
|
79
|
+
|
|
80
|
+
return adapted_args
|
|
81
|
+
|
|
30
82
|
def complete(self, **args) -> LLMGatewayResponse:
|
|
31
83
|
"""
|
|
32
84
|
Complete the LLM request by delegating to the OpenAI service.
|
|
@@ -56,22 +108,28 @@ class OpenAIGateway(LLMGateway):
|
|
|
56
108
|
LLMGatewayResponse
|
|
57
109
|
The response from the OpenAI service.
|
|
58
110
|
"""
|
|
111
|
+
# Adapt parameters based on model type
|
|
112
|
+
adapted_args = self._adapt_parameters_for_model(args['model'], args)
|
|
113
|
+
|
|
59
114
|
openai_args = {
|
|
60
|
-
'model':
|
|
61
|
-
'messages': adapt_messages_to_openai(
|
|
115
|
+
'model': adapted_args['model'],
|
|
116
|
+
'messages': adapt_messages_to_openai(adapted_args['messages']),
|
|
62
117
|
}
|
|
63
118
|
|
|
64
119
|
completion = self.client.chat.completions.create
|
|
65
120
|
|
|
66
|
-
if 'object_model' in
|
|
121
|
+
if 'object_model' in adapted_args and adapted_args['object_model'] is not None:
|
|
67
122
|
completion = self.client.beta.chat.completions.parse
|
|
68
|
-
openai_args['response_format'] =
|
|
123
|
+
openai_args['response_format'] = adapted_args['object_model']
|
|
69
124
|
|
|
70
|
-
if 'tools' in
|
|
71
|
-
openai_args['tools'] = [t.descriptor for t in
|
|
125
|
+
if 'tools' in adapted_args and adapted_args['tools'] is not None:
|
|
126
|
+
openai_args['tools'] = [t.descriptor for t in adapted_args['tools']]
|
|
72
127
|
|
|
73
|
-
|
|
74
|
-
|
|
128
|
+
# Handle both max_tokens (for chat models) and max_completion_tokens (for reasoning models)
|
|
129
|
+
if 'max_tokens' in adapted_args:
|
|
130
|
+
openai_args['max_tokens'] = adapted_args['max_tokens']
|
|
131
|
+
elif 'max_completion_tokens' in adapted_args:
|
|
132
|
+
openai_args['max_completion_tokens'] = adapted_args['max_completion_tokens']
|
|
75
133
|
|
|
76
134
|
response = completion(**openai_args)
|
|
77
135
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mojentic
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.4
|
|
4
4
|
Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
|
|
5
5
|
Author-email: Stacey Vetzal <stacey@vetzal.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/svetzal/mojentic
|
|
@@ -21,6 +21,7 @@ _examples/image_broker.py,sha256=SoIphNj98zk1i7EQc7M2n2O_9ShDHwErmJ6jf67mzX0,355
|
|
|
21
21
|
_examples/image_broker_splat.py,sha256=O7rzTFUka32if4G4VuXvhu1O-2lRMWfi0r8gjIE8-0Y,1934
|
|
22
22
|
_examples/iterative_solver.py,sha256=ANGdC74ymHosVt6xUBjplkJl_W3ALTGxOkDpPLEDcm8,1331
|
|
23
23
|
_examples/list_models.py,sha256=8noMpGeXOdX5Pf0NXCt_CRurOKEg_5luhWveGntBhe8,578
|
|
24
|
+
_examples/model_characterization.py,sha256=XwLiUP1ZIrNs4ZLmjLDW-nJQsB66H-BV0bWgBgT3N7k,2571
|
|
24
25
|
_examples/oversized_embeddings.py,sha256=_z2JoqZn0g7VtRsFVWIkngVqzjhQQvCEUYWVxs1I7MM,284
|
|
25
26
|
_examples/raw.py,sha256=Y2wvgynFuoUs28agE4ijsLYec8VRjiReklqlCH2lERs,442
|
|
26
27
|
_examples/react.py,sha256=VQ-5MmjUXoHzBFPTV_JrocuOkDzZ8oyUUSYLlEToJ_0,939
|
|
@@ -83,7 +84,7 @@ mojentic/llm/gateways/models.py,sha256=OyIaMHKrrx6dHo5FbC8qOFct7PRql9wqbe_BJlgDS
|
|
|
83
84
|
mojentic/llm/gateways/ollama.py,sha256=OUUImBNzPte52Gsf-e7TBjDHRvYW5flU9ddxwG2zlzk,7909
|
|
84
85
|
mojentic/llm/gateways/ollama_messages_adapter.py,sha256=kUN_p2FyN88_trXMcL-Xsn9xPBU7pGKlJwTUEUCf6G4,1404
|
|
85
86
|
mojentic/llm/gateways/ollama_messages_adapter_spec.py,sha256=gVRbWDrHOa1EiZ0CkEWe0pGn-GKRqdGb-x56HBQeYSE,4981
|
|
86
|
-
mojentic/llm/gateways/openai.py,sha256=
|
|
87
|
+
mojentic/llm/gateways/openai.py,sha256=Wxx_WfG2czOv9Ng8q4JCLFIHGqNs3vaMtE5ggSLBjHk,7787
|
|
87
88
|
mojentic/llm/gateways/openai_message_adapter_spec.py,sha256=ITBSV5njldV_x0NPgjmg8Okf9KzevQJ8dTXM-t6ubcg,6612
|
|
88
89
|
mojentic/llm/gateways/openai_messages_adapter.py,sha256=Scal68JKKdBHB35ok1c5DeWYdD6Wra5oXSsPxJyyXSQ,3947
|
|
89
90
|
mojentic/llm/gateways/tokenizer_gateway.py,sha256=ztuqfunlJ6xmyUPPHcC_69-kegiNJD6jdSEde7hDh2w,485
|
|
@@ -131,8 +132,8 @@ mojentic/tracer/tracer_system.py,sha256=7CPy_2tlsHtXQ4DcO5oo52N9a9WS0GH-mjeINzu6
|
|
|
131
132
|
mojentic/tracer/tracer_system_spec.py,sha256=TNm0f9LV__coBx0JGEKyzzNN9mFjCSG_SSrRISO8Xeg,8632
|
|
132
133
|
mojentic/utils/__init__.py,sha256=lqECkkoFvHFttDnafRE1vvh0Dmna_lwupMToP5VvX5k,115
|
|
133
134
|
mojentic/utils/formatting.py,sha256=bPrwwdluXdQ8TsFxfWtHNOeMWKNvAfABSoUnnA1g7c8,947
|
|
134
|
-
mojentic-0.7.
|
|
135
|
-
mojentic-0.7.
|
|
136
|
-
mojentic-0.7.
|
|
137
|
-
mojentic-0.7.
|
|
138
|
-
mojentic-0.7.
|
|
135
|
+
mojentic-0.7.4.dist-info/licenses/LICENSE.md,sha256=txSgV8n5zY1W3NiF5HHsCwlaW0e8We1cSC6TuJUqxXA,1060
|
|
136
|
+
mojentic-0.7.4.dist-info/METADATA,sha256=shFHb00Ezpkzo6QJOp1vAIhXGONL2jFH7pzIJimPBao,5475
|
|
137
|
+
mojentic-0.7.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
138
|
+
mojentic-0.7.4.dist-info/top_level.txt,sha256=Q-BvPQ8Eu1jnEqK8Xkr6A9C8Xa1z38oPZRHuA5MCTqg,19
|
|
139
|
+
mojentic-0.7.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|