mojentic 0.7.2__tar.gz → 0.7.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mojentic-0.7.2/src/mojentic.egg-info → mojentic-0.7.4}/PKG-INFO +1 -1
- {mojentic-0.7.2 → mojentic-0.7.4}/pyproject.toml +1 -1
- mojentic-0.7.4/src/_examples/model_characterization.py +73 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/anthropic.py +1 -1
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/llm_gateway.py +3 -1
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/ollama.py +6 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/openai.py +69 -6
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/llm_broker.py +42 -24
- {mojentic-0.7.2 → mojentic-0.7.4/src/mojentic.egg-info}/PKG-INFO +1 -1
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic.egg-info/SOURCES.txt +1 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/LICENSE.md +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/README.md +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/setup.cfg +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/async_dispatcher_example.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/async_llm_example.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/broker_as_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/broker_examples.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/broker_image_examples.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/characterize_ollama.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/characterize_openai.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/chat_session.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/chat_session_with_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/coding_file_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/current_datetime_tool_example.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/design_analysis.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/embeddings.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/ensures_files_exist.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/ephemeral_task_manager_example.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/file_deduplication.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/file_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/image_analysis.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/image_broker.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/image_broker_splat.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/iterative_solver.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/list_models.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/oversized_embeddings.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/raw.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/agents/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/agents/decisioning_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/agents/thinking_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/formatters.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/models/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/models/base.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react/models/events.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/react.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/recursive_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/routed_send_response.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/simple_llm.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/simple_llm_repl.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/simple_structured.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/simple_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/solver_chat_session.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/streaming.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/tell_user_example.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/tracer_demo.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/_examples/working_memory.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/agent_broker.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/async_aggregator_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/async_aggregator_agent_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/async_llm_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/async_llm_agent_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/base_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/base_async_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/base_llm_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/base_llm_agent_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/correlation_aggregator_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/iterative_problem_solver.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/output_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/agents/simple_recursive_agent.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/async_dispatcher.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/async_dispatcher_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/context/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/context/shared_working_memory.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/dispatcher.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/event.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/chat_session.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/chat_session_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/anthropic_messages_adapter.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/embeddings_gateway.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/file_gateway.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/models.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/ollama_messages_adapter.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/ollama_messages_adapter_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/openai_message_adapter_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/openai_messages_adapter.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/gateways/tokenizer_gateway.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/llm_broker_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/message_composers.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/message_composers_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/registry/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/registry/llm_registry.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/registry/models.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/registry/populate_registry_from_ollama.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ask_user_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/current_datetime.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/date_resolver.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/date_resolver_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/file_manager.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/file_manager_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/llm_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/llm_tool_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/organic_web_search.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/tell_user_tool.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/tool_wrapper.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/tool_wrapper_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/router.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/router_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/event_store.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/event_store_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/null_tracer.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/tracer_events.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/tracer_events_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/tracer_system.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/tracer/tracer_system_spec.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/utils/__init__.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/utils/formatting.py +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic.egg-info/dependency_links.txt +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic.egg-info/requires.txt +0 -0
- {mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mojentic
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.4
|
|
4
4
|
Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
|
|
5
5
|
Author-email: Stacey Vetzal <stacey@vetzal.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/svetzal/mojentic
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from mojentic.llm.gateways.openai import OpenAIGateway
|
|
3
|
+
from mojentic.llm.gateways.models import LLMMessage, MessageRole
|
|
4
|
+
|
|
5
|
+
def check_model_characterization():
|
|
6
|
+
"""
|
|
7
|
+
Test the model characterization functionality with different OpenAI models.
|
|
8
|
+
This demonstrates how the gateway adapts parameters based on model type.
|
|
9
|
+
"""
|
|
10
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
11
|
+
if not api_key:
|
|
12
|
+
print("OPENAI_API_KEY environment variable not set. Skipping actual API calls.")
|
|
13
|
+
return
|
|
14
|
+
|
|
15
|
+
gateway = OpenAIGateway(api_key)
|
|
16
|
+
|
|
17
|
+
# Test messages for chat models
|
|
18
|
+
chat_messages = [
|
|
19
|
+
LLMMessage(role=MessageRole.System, content="You are a helpful assistant."),
|
|
20
|
+
LLMMessage(role=MessageRole.User, content="What is 2 + 2? Give a brief answer.")
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# Test messages for reasoning models (no system message supported)
|
|
24
|
+
reasoning_messages = [
|
|
25
|
+
LLMMessage(role=MessageRole.User, content="What is 2 + 2? Give a brief answer.")
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
# Test with different model types
|
|
29
|
+
test_models = [
|
|
30
|
+
("gpt-4o", "chat model"),
|
|
31
|
+
("gpt-4o-mini", "chat model"),
|
|
32
|
+
("o1-mini", "reasoning model"),
|
|
33
|
+
("o1-preview", "reasoning model")
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
print("Testing model characterization and parameter adaptation:")
|
|
37
|
+
print("=" * 60)
|
|
38
|
+
|
|
39
|
+
for model, model_type in test_models:
|
|
40
|
+
print(f"\nTesting {model} ({model_type}):")
|
|
41
|
+
|
|
42
|
+
# Test model classification
|
|
43
|
+
is_reasoning = gateway._is_reasoning_model(model)
|
|
44
|
+
print(f" Classified as reasoning model: {is_reasoning}")
|
|
45
|
+
|
|
46
|
+
# Use appropriate messages based on model type
|
|
47
|
+
messages = reasoning_messages if gateway._is_reasoning_model(model) else chat_messages
|
|
48
|
+
|
|
49
|
+
# Test parameter adaptation
|
|
50
|
+
original_args = {
|
|
51
|
+
'model': model,
|
|
52
|
+
'messages': messages,
|
|
53
|
+
'max_tokens': 100
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
adapted_args = gateway._adapt_parameters_for_model(model, original_args)
|
|
57
|
+
|
|
58
|
+
if 'max_tokens' in adapted_args:
|
|
59
|
+
print(f" Using parameter: max_tokens = {adapted_args['max_tokens']}")
|
|
60
|
+
elif 'max_completion_tokens' in adapted_args:
|
|
61
|
+
print(f" Using parameter: max_completion_tokens = {adapted_args['max_completion_tokens']}")
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
response = gateway.complete(**adapted_args)
|
|
65
|
+
print(f" Response: {response.content[:50]}...")
|
|
66
|
+
except Exception as e:
|
|
67
|
+
print(f" Error: {str(e)}")
|
|
68
|
+
|
|
69
|
+
print("\n" + "=" * 60)
|
|
70
|
+
print("Model characterization test completed!")
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
check_model_characterization()
|
|
@@ -29,7 +29,7 @@ class AnthropicGateway(LLMGateway):
|
|
|
29
29
|
response = self.client.messages.create(
|
|
30
30
|
**anthropic_args,
|
|
31
31
|
temperature=args.get('temperature', 1.0),
|
|
32
|
-
max_tokens=args.get('num_predict', 2000),
|
|
32
|
+
max_tokens=args.get('max_tokens', args.get('num_predict', 2000)),
|
|
33
33
|
# thinking={
|
|
34
34
|
# "type": "enabled",
|
|
35
35
|
# "budget_tokens": 32768,
|
|
@@ -19,7 +19,7 @@ class LLMGateway:
|
|
|
19
19
|
object_model: Optional[Type[BaseModel]] = None,
|
|
20
20
|
tools: Optional[List[LLMTool]] = None,
|
|
21
21
|
temperature: float = 1.0,
|
|
22
|
-
num_ctx: int = 32768,
|
|
22
|
+
num_ctx: int = 32768, max_tokens: int = 16384,
|
|
23
23
|
num_predict: int = -1) -> LLMGatewayResponse:
|
|
24
24
|
"""
|
|
25
25
|
Complete the LLM request.
|
|
@@ -39,6 +39,8 @@ class LLMGateway:
|
|
|
39
39
|
The temperature to use for the response. Defaults to 1.0.
|
|
40
40
|
num_ctx : int
|
|
41
41
|
The number of context tokens to use. Defaults to 32768.
|
|
42
|
+
max_tokens : int
|
|
43
|
+
The maximum number of tokens to generate. Defaults to 16384.
|
|
42
44
|
num_predict : int
|
|
43
45
|
The number of tokens to predict. Defaults to no limit.
|
|
44
46
|
|
|
@@ -35,6 +35,8 @@ class OllamaGateway(LLMGateway):
|
|
|
35
35
|
)
|
|
36
36
|
if args.get('num_predict', 0) > 0:
|
|
37
37
|
options.num_predict = args['num_predict']
|
|
38
|
+
if 'max_tokens' in args:
|
|
39
|
+
options.num_predict = args['max_tokens']
|
|
38
40
|
return options
|
|
39
41
|
|
|
40
42
|
def complete(self, **args) -> LLMGatewayResponse:
|
|
@@ -56,6 +58,8 @@ class OllamaGateway(LLMGateway):
|
|
|
56
58
|
The temperature to use for the response. Defaults to 1.0.
|
|
57
59
|
num_ctx : int, optional
|
|
58
60
|
The number of context tokens to use. Defaults to 32768.
|
|
61
|
+
max_tokens : int, optional
|
|
62
|
+
The maximum number of tokens to generate. Defaults to 16384.
|
|
59
63
|
num_predict : int, optional
|
|
60
64
|
The number of tokens to predict. Defaults to no limit.
|
|
61
65
|
|
|
@@ -120,6 +124,8 @@ class OllamaGateway(LLMGateway):
|
|
|
120
124
|
The temperature to use for the response. Defaults to 1.0.
|
|
121
125
|
num_ctx : int, optional
|
|
122
126
|
The number of context tokens to use. Defaults to 32768.
|
|
127
|
+
max_tokens : int, optional
|
|
128
|
+
The maximum number of tokens to generate. Defaults to 16384.
|
|
123
129
|
num_predict : int, optional
|
|
124
130
|
The number of tokens to predict. Defaults to no limit.
|
|
125
131
|
|
|
@@ -27,6 +27,58 @@ class OpenAIGateway(LLMGateway):
|
|
|
27
27
|
def __init__(self, api_key: str, base_url: str = None):
|
|
28
28
|
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
|
29
29
|
|
|
30
|
+
def _is_reasoning_model(self, model: str) -> bool:
|
|
31
|
+
"""
|
|
32
|
+
Determine if a model is a reasoning model that requires max_completion_tokens.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
model : str
|
|
37
|
+
The model name to classify.
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
bool
|
|
42
|
+
True if the model is a reasoning model, False if it's a chat model.
|
|
43
|
+
"""
|
|
44
|
+
# OpenAI reasoning models typically start with "o1" or contain "o4"
|
|
45
|
+
reasoning_model_patterns = [
|
|
46
|
+
"o1-",
|
|
47
|
+
"o3-",
|
|
48
|
+
"o4-",
|
|
49
|
+
"o1",
|
|
50
|
+
"o3"
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
model_lower = model.lower()
|
|
54
|
+
return any(pattern in model_lower for pattern in reasoning_model_patterns)
|
|
55
|
+
|
|
56
|
+
def _adapt_parameters_for_model(self, model: str, args: dict) -> dict:
|
|
57
|
+
"""
|
|
58
|
+
Adapt parameters based on the model type.
|
|
59
|
+
|
|
60
|
+
Parameters
|
|
61
|
+
----------
|
|
62
|
+
model : str
|
|
63
|
+
The model name.
|
|
64
|
+
args : dict
|
|
65
|
+
The original arguments.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
dict
|
|
70
|
+
The adapted arguments with correct parameter names for the model type.
|
|
71
|
+
"""
|
|
72
|
+
adapted_args = args.copy()
|
|
73
|
+
|
|
74
|
+
if self._is_reasoning_model(model) and 'max_tokens' in adapted_args:
|
|
75
|
+
# For reasoning models, use max_completion_tokens instead of max_tokens
|
|
76
|
+
adapted_args['max_completion_tokens'] = adapted_args.pop('max_tokens')
|
|
77
|
+
logger.debug("Adapted max_tokens to max_completion_tokens for reasoning model",
|
|
78
|
+
model=model, max_completion_tokens=adapted_args['max_completion_tokens'])
|
|
79
|
+
|
|
80
|
+
return adapted_args
|
|
81
|
+
|
|
30
82
|
def complete(self, **args) -> LLMGatewayResponse:
|
|
31
83
|
"""
|
|
32
84
|
Complete the LLM request by delegating to the OpenAI service.
|
|
@@ -46,6 +98,8 @@ class OpenAIGateway(LLMGateway):
|
|
|
46
98
|
The temperature to use for the response. Defaults to 1.0.
|
|
47
99
|
num_ctx : int, optional
|
|
48
100
|
The number of context tokens to use. Defaults to 32768.
|
|
101
|
+
max_tokens : int, optional
|
|
102
|
+
The maximum number of tokens to generate. Defaults to 16384.
|
|
49
103
|
num_predict : int, optional
|
|
50
104
|
The number of tokens to predict. Defaults to no limit.
|
|
51
105
|
|
|
@@ -54,19 +108,28 @@ class OpenAIGateway(LLMGateway):
|
|
|
54
108
|
LLMGatewayResponse
|
|
55
109
|
The response from the OpenAI service.
|
|
56
110
|
"""
|
|
111
|
+
# Adapt parameters based on model type
|
|
112
|
+
adapted_args = self._adapt_parameters_for_model(args['model'], args)
|
|
113
|
+
|
|
57
114
|
openai_args = {
|
|
58
|
-
'model':
|
|
59
|
-
'messages': adapt_messages_to_openai(
|
|
115
|
+
'model': adapted_args['model'],
|
|
116
|
+
'messages': adapt_messages_to_openai(adapted_args['messages']),
|
|
60
117
|
}
|
|
61
118
|
|
|
62
119
|
completion = self.client.chat.completions.create
|
|
63
120
|
|
|
64
|
-
if 'object_model' in
|
|
121
|
+
if 'object_model' in adapted_args and adapted_args['object_model'] is not None:
|
|
65
122
|
completion = self.client.beta.chat.completions.parse
|
|
66
|
-
openai_args['response_format'] =
|
|
123
|
+
openai_args['response_format'] = adapted_args['object_model']
|
|
124
|
+
|
|
125
|
+
if 'tools' in adapted_args and adapted_args['tools'] is not None:
|
|
126
|
+
openai_args['tools'] = [t.descriptor for t in adapted_args['tools']]
|
|
67
127
|
|
|
68
|
-
|
|
69
|
-
|
|
128
|
+
# Handle both max_tokens (for chat models) and max_completion_tokens (for reasoning models)
|
|
129
|
+
if 'max_tokens' in adapted_args:
|
|
130
|
+
openai_args['max_tokens'] = adapted_args['max_tokens']
|
|
131
|
+
elif 'max_completion_tokens' in adapted_args:
|
|
132
|
+
openai_args['max_completion_tokens'] = adapted_args['max_completion_tokens']
|
|
70
133
|
|
|
71
134
|
response = completion(**openai_args)
|
|
72
135
|
|
|
@@ -5,18 +5,19 @@ from typing import List, Optional, Type
|
|
|
5
5
|
import structlog
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
|
-
from mojentic.tracer.tracer_system import TracerSystem
|
|
9
8
|
from mojentic.llm.gateways.llm_gateway import LLMGateway
|
|
10
9
|
from mojentic.llm.gateways.models import MessageRole, LLMMessage, LLMGatewayResponse
|
|
11
10
|
from mojentic.llm.gateways.ollama import OllamaGateway
|
|
12
11
|
from mojentic.llm.gateways.tokenizer_gateway import TokenizerGateway
|
|
12
|
+
from mojentic.tracer.tracer_system import TracerSystem
|
|
13
13
|
|
|
14
14
|
logger = structlog.get_logger()
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class LLMBroker():
|
|
18
18
|
"""
|
|
19
|
-
This class is responsible for managing interaction with a Large Language Model. It abstracts
|
|
19
|
+
This class is responsible for managing interaction with a Large Language Model. It abstracts
|
|
20
|
+
the user
|
|
20
21
|
from the specific mechanics of the LLM and provides a common interface for generating responses.
|
|
21
22
|
"""
|
|
22
23
|
|
|
@@ -25,7 +26,8 @@ class LLMBroker():
|
|
|
25
26
|
model: str
|
|
26
27
|
tracer: Optional[TracerSystem]
|
|
27
28
|
|
|
28
|
-
def __init__(self, model: str, gateway: Optional[LLMGateway] = None,
|
|
29
|
+
def __init__(self, model: str, gateway: Optional[LLMGateway] = None,
|
|
30
|
+
tokenizer: Optional[TokenizerGateway] = None,
|
|
29
31
|
tracer: Optional[TracerSystem] = None):
|
|
30
32
|
"""
|
|
31
33
|
Create an instance of the LLMBroker.
|
|
@@ -35,10 +37,12 @@ class LLMBroker():
|
|
|
35
37
|
model
|
|
36
38
|
The name of the model to use.
|
|
37
39
|
gateway
|
|
38
|
-
The gateway to use for communication with the LLM. If None, a gateway is created that
|
|
40
|
+
The gateway to use for communication with the LLM. If None, a gateway is created that
|
|
41
|
+
will utilize a local
|
|
39
42
|
Ollama server.
|
|
40
43
|
tokenizer
|
|
41
|
-
The gateway to use for tokenization. This is used to log approximate token counts for
|
|
44
|
+
The gateway to use for tokenization. This is used to log approximate token counts for
|
|
45
|
+
the LLM calls. If
|
|
42
46
|
None, `mxbai-embed-large` is used on a local Ollama server.
|
|
43
47
|
tracer
|
|
44
48
|
Optional tracer system to record LLM calls and responses.
|
|
@@ -58,8 +62,9 @@ class LLMBroker():
|
|
|
58
62
|
else:
|
|
59
63
|
self.adapter = gateway
|
|
60
64
|
|
|
61
|
-
def generate(self, messages: List[LLMMessage], tools=None, temperature=1.0, num_ctx=32768,
|
|
62
|
-
|
|
65
|
+
def generate(self, messages: List[LLMMessage], tools=None, temperature=1.0, num_ctx=32768,
|
|
66
|
+
num_predict=-1, max_tokens=16384,
|
|
67
|
+
correlation_id: str = None) -> str:
|
|
63
68
|
"""
|
|
64
69
|
Generate a text response from the LLM.
|
|
65
70
|
|
|
@@ -68,7 +73,8 @@ class LLMBroker():
|
|
|
68
73
|
messages : LLMMessage
|
|
69
74
|
A list of messages to send to the LLM.
|
|
70
75
|
tools : List[Tool]
|
|
71
|
-
A list of tools to use with the LLM. If a tool call is requested, the tool will be
|
|
76
|
+
A list of tools to use with the LLM. If a tool call is requested, the tool will be
|
|
77
|
+
called and the output
|
|
72
78
|
will be included in the response.
|
|
73
79
|
temperature : float
|
|
74
80
|
The temperature to use for the response. Defaults to 1.0
|
|
@@ -91,10 +97,11 @@ class LLMBroker():
|
|
|
91
97
|
messages_for_tracer = [m.model_dump() for m in messages]
|
|
92
98
|
|
|
93
99
|
# Record LLM call in tracer
|
|
94
|
-
tools_for_tracer = [{"name": t.name, "description": t.description} for t in
|
|
100
|
+
tools_for_tracer = [{"name": t.name, "description": t.description} for t in
|
|
101
|
+
tools] if tools else None
|
|
95
102
|
self.tracer.record_llm_call(
|
|
96
|
-
self.model,
|
|
97
|
-
messages_for_tracer,
|
|
103
|
+
self.model,
|
|
104
|
+
messages_for_tracer,
|
|
98
105
|
temperature,
|
|
99
106
|
tools=tools_for_tracer,
|
|
100
107
|
source=type(self),
|
|
@@ -110,12 +117,14 @@ class LLMBroker():
|
|
|
110
117
|
tools=tools,
|
|
111
118
|
temperature=temperature,
|
|
112
119
|
num_ctx=num_ctx,
|
|
113
|
-
num_predict=num_predict
|
|
120
|
+
num_predict=num_predict,
|
|
121
|
+
max_tokens=max_tokens)
|
|
114
122
|
|
|
115
123
|
call_duration_ms = (time.time() - start_time) * 1000
|
|
116
124
|
|
|
117
125
|
# Record LLM response in tracer
|
|
118
|
-
tool_calls_for_tracer = [tc.model_dump() for tc in
|
|
126
|
+
tool_calls_for_tracer = [tc.model_dump() for tc in
|
|
127
|
+
result.tool_calls] if result.tool_calls else None
|
|
119
128
|
self.tracer.record_llm_response(
|
|
120
129
|
self.model,
|
|
121
130
|
result.content,
|
|
@@ -153,13 +162,17 @@ class LLMBroker():
|
|
|
153
162
|
logger.info('Function output', output=output)
|
|
154
163
|
messages.append(LLMMessage(role=MessageRole.Assistant, tool_calls=[tool_call]))
|
|
155
164
|
messages.append(
|
|
156
|
-
LLMMessage(role=MessageRole.Tool, content=json.dumps(output),
|
|
157
|
-
|
|
158
|
-
|
|
165
|
+
LLMMessage(role=MessageRole.Tool, content=json.dumps(output),
|
|
166
|
+
tool_calls=[tool_call]))
|
|
167
|
+
# {'role': 'tool', 'content': str(output), 'name': tool_call.name,
|
|
168
|
+
# 'tool_call_id': tool_call.id})
|
|
169
|
+
return self.generate(messages, tools, temperature, num_ctx, num_predict,
|
|
170
|
+
correlation_id=correlation_id)
|
|
159
171
|
else:
|
|
160
172
|
logger.warn('Function not found', function=tool_call.name)
|
|
161
173
|
logger.info('Expected usage of missing function', expected_usage=tool_call)
|
|
162
|
-
# raise Exception('Unknown tool function requested:',
|
|
174
|
+
# raise Exception('Unknown tool function requested:',
|
|
175
|
+
# requested_tool.function.name)
|
|
163
176
|
|
|
164
177
|
return result.content
|
|
165
178
|
|
|
@@ -170,8 +183,9 @@ class LLMBroker():
|
|
|
170
183
|
content += message.content
|
|
171
184
|
return content
|
|
172
185
|
|
|
173
|
-
def generate_object(self, messages: List[LLMMessage], object_model: Type[BaseModel],
|
|
174
|
-
num_predict=-1,
|
|
186
|
+
def generate_object(self, messages: List[LLMMessage], object_model: Type[BaseModel],
|
|
187
|
+
temperature=1.0, num_ctx=32768, num_predict=-1, max_tokens=16384,
|
|
188
|
+
correlation_id: str = None) -> BaseModel:
|
|
175
189
|
"""
|
|
176
190
|
Generate a structured response from the LLM and return it as an object.
|
|
177
191
|
|
|
@@ -203,8 +217,8 @@ class LLMBroker():
|
|
|
203
217
|
|
|
204
218
|
# Record LLM call in tracer
|
|
205
219
|
self.tracer.record_llm_call(
|
|
206
|
-
self.model,
|
|
207
|
-
messages_for_tracer,
|
|
220
|
+
self.model,
|
|
221
|
+
messages_for_tracer,
|
|
208
222
|
temperature,
|
|
209
223
|
tools=None,
|
|
210
224
|
source=type(self),
|
|
@@ -214,14 +228,18 @@ class LLMBroker():
|
|
|
214
228
|
# Measure call duration for audit
|
|
215
229
|
start_time = time.time()
|
|
216
230
|
|
|
217
|
-
result = self.adapter.complete(model=self.model, messages=messages,
|
|
218
|
-
|
|
231
|
+
result = self.adapter.complete(model=self.model, messages=messages,
|
|
232
|
+
object_model=object_model,
|
|
233
|
+
temperature=temperature, num_ctx=num_ctx,
|
|
234
|
+
num_predict=num_predict, max_tokens=max_tokens)
|
|
219
235
|
|
|
220
236
|
call_duration_ms = (time.time() - start_time) * 1000
|
|
221
237
|
|
|
222
238
|
# Record LLM response in tracer with object representation
|
|
223
239
|
# Convert object to string for tracer
|
|
224
|
-
object_str = str(result.object.model_dump()) if hasattr(result.object,
|
|
240
|
+
object_str = str(result.object.model_dump()) if hasattr(result.object,
|
|
241
|
+
"model_dump") else str(
|
|
242
|
+
result.object)
|
|
225
243
|
self.tracer.record_llm_response(
|
|
226
244
|
self.model,
|
|
227
245
|
f"Structured response: {object_str}",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mojentic
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.4
|
|
4
4
|
Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
|
|
5
5
|
Author-email: Stacey Vetzal <stacey@vetzal.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/svetzal/mojentic
|
|
@@ -25,6 +25,7 @@ src/_examples/image_broker.py
|
|
|
25
25
|
src/_examples/image_broker_splat.py
|
|
26
26
|
src/_examples/iterative_solver.py
|
|
27
27
|
src/_examples/list_models.py
|
|
28
|
+
src/_examples/model_characterization.py
|
|
28
29
|
src/_examples/oversized_embeddings.py
|
|
29
30
|
src/_examples/raw.py
|
|
30
31
|
src/_examples/react.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/registry/populate_registry_from_ollama.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py
RENAMED
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool.py
RENAMED
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py
RENAMED
|
File without changes
|
|
File without changes
|
{mojentic-0.7.2 → mojentic-0.7.4}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|