mojentic 0.8.3__tar.gz → 0.9.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mojentic-0.8.3 → mojentic-0.9.0}/PKG-INFO +35 -4
- {mojentic-0.8.3 → mojentic-0.9.0}/README.md +32 -3
- {mojentic-0.8.3 → mojentic-0.9.0}/pyproject.toml +3 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/broker_as_tool.py +3 -3
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/broker_examples.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/broker_image_examples.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/characterize_ollama.py +2 -2
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/chat_session.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/chat_session_with_tool.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/coding_file_tool.py +16 -16
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/current_datetime_tool_example.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/ephemeral_task_manager_example.py +2 -2
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/file_tool.py +5 -5
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/iterative_solver.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/recursive_agent.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/simple_llm.py +3 -3
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/simple_llm_repl.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/simple_structured.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/simple_tool.py +2 -2
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/solver_chat_session.py +4 -4
- mojentic-0.9.0/src/_examples/streaming.py +49 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/tell_user_example.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/tracer_demo.py +3 -3
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/working_memory.py +1 -1
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/ollama.py +21 -18
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai.py +208 -4
- mojentic-0.9.0/src/mojentic/llm/gateways/openai_spec.py +99 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/llm_broker.py +161 -3
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/llm_broker_spec.py +69 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/registry/populate_registry_from_ollama.py +2 -2
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic.egg-info/PKG-INFO +35 -4
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic.egg-info/SOURCES.txt +1 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic.egg-info/requires.txt +2 -0
- mojentic-0.8.3/src/_examples/streaming.py +0 -34
- {mojentic-0.8.3 → mojentic-0.9.0}/LICENSE.md +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/setup.cfg +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/async_dispatcher_example.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/async_llm_example.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/characterize_openai.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/design_analysis.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/embeddings.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/ensures_files_exist.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/fetch_openai_models.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/file_deduplication.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/image_analysis.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/image_broker.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/image_broker_splat.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/list_models.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/model_characterization.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/openai_gateway_enhanced_demo.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/oversized_embeddings.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/raw.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/agents/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/agents/decisioning_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/agents/thinking_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/formatters.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/models/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/models/base.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/react/models/events.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/routed_send_response.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/_examples/tracer_qt_viewer.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/agent_broker.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/async_aggregator_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/async_aggregator_agent_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/async_llm_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/async_llm_agent_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/base_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/base_async_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/base_llm_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/base_llm_agent_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/correlation_aggregator_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/iterative_problem_solver.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/output_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/agents/simple_recursive_agent.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/async_dispatcher.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/async_dispatcher_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/context/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/context/shared_working_memory.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/dispatcher.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/event.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/chat_session.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/chat_session_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/anthropic.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/anthropic_messages_adapter.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/embeddings_gateway.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/file_gateway.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/llm_gateway.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/models.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/ollama_messages_adapter.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/ollama_messages_adapter_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai_message_adapter_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai_messages_adapter.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai_model_registry.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai_model_registry_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/openai_temperature_handling_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/gateways/tokenizer_gateway.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/message_composers.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/message_composers_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/registry/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/registry/llm_registry.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/registry/models.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ask_user_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/current_datetime.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/date_resolver.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/date_resolver_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/append_task_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/complete_task_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/ephemeral_task_manager/start_task_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/file_manager.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/file_manager_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/llm_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/llm_tool_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/organic_web_search.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/tell_user_tool.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/tool_wrapper.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/llm/tools/tool_wrapper_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/router.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/router_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/event_store.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/event_store_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/null_tracer.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/tracer_events.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/tracer_events_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/tracer_system.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/tracer/tracer_system_spec.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/utils/__init__.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic/utils/formatting.py +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic.egg-info/dependency_links.txt +0 -0
- {mojentic-0.8.3 → mojentic-0.9.0}/src/mojentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mojentic
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0
|
|
4
4
|
Summary: Mojentic is an agentic framework that aims to provide a simple and flexible way to assemble teams of agents to solve complex problems.
|
|
5
5
|
Author-email: Stacey Vetzal <stacey@vetzal.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/svetzal/mojentic
|
|
@@ -29,6 +29,8 @@ Requires-Dist: pytest-spec; extra == "dev"
|
|
|
29
29
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
30
30
|
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
|
31
31
|
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
32
|
+
Requires-Dist: bandit>=1.7.0; extra == "dev"
|
|
33
|
+
Requires-Dist: pip-audit>=2.0.0; extra == "dev"
|
|
32
34
|
Requires-Dist: mkdocs; extra == "dev"
|
|
33
35
|
Requires-Dist: mkdocs-material; extra == "dev"
|
|
34
36
|
Requires-Dist: mkdocs-llmstxt; extra == "dev"
|
|
@@ -91,7 +93,7 @@ openai_llm = LLMBroker(model="gpt-5", gateway=OpenAIGateway(api_key="your_api_ke
|
|
|
91
93
|
# Or use other models: "gpt-4o", "gpt-4.1", "o1-mini", "o3-mini", etc.
|
|
92
94
|
|
|
93
95
|
# Or use Ollama for local LLMs
|
|
94
|
-
ollama_llm = LLMBroker(model="
|
|
96
|
+
ollama_llm = LLMBroker(model="qwen3:32b")
|
|
95
97
|
|
|
96
98
|
# Simple text generation
|
|
97
99
|
result = openai_llm.generate(messages=[LLMMessage(content='Hello, how are you?')])
|
|
@@ -121,6 +123,35 @@ result = openai_llm.generate(messages=[
|
|
|
121
123
|
print(result)
|
|
122
124
|
```
|
|
123
125
|
|
|
126
|
+
## 🔑 OpenAI configuration
|
|
127
|
+
|
|
128
|
+
OpenAIGateway now supports environment-variable defaults so you can get started without hardcoding secrets:
|
|
129
|
+
|
|
130
|
+
- If you omit `api_key`, it will use the `OPENAI_API_KEY` environment variable.
|
|
131
|
+
- If you omit `base_url`, it will use the `OPENAI_API_ENDPOINT` environment variable (useful for custom endpoints like Azure/OpenAI-compatible proxies).
|
|
132
|
+
- Precedence: values you pass explicitly to `OpenAIGateway(api_key=..., base_url=...)` always override environment variables.
|
|
133
|
+
|
|
134
|
+
Examples:
|
|
135
|
+
|
|
136
|
+
```python
|
|
137
|
+
from mojentic.llm import LLMBroker
|
|
138
|
+
from mojentic.llm.gateways import OpenAIGateway
|
|
139
|
+
|
|
140
|
+
# 1) Easiest: rely on environment variables
|
|
141
|
+
# export OPENAI_API_KEY=sk-...
|
|
142
|
+
# export OPENAI_API_ENDPOINT=https://api.openai.com/v1 # optional
|
|
143
|
+
llm = LLMBroker(
|
|
144
|
+
model="gpt-4o-mini",
|
|
145
|
+
gateway=OpenAIGateway() # picks up OPENAI_API_KEY/OPENAI_API_ENDPOINT automatically
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# 2) Explicitly override one or both values
|
|
149
|
+
llm = LLMBroker(
|
|
150
|
+
model="gpt-4o-mini",
|
|
151
|
+
gateway=OpenAIGateway(api_key="your_key", base_url="https://api.openai.com/v1")
|
|
152
|
+
)
|
|
153
|
+
```
|
|
154
|
+
|
|
124
155
|
## 🤖 OpenAI Model Support
|
|
125
156
|
|
|
126
157
|
The framework automatically handles parameter differences between model types, so you can switch between any models without code changes.
|
|
@@ -170,9 +201,9 @@ pip install -e ".[dev]"
|
|
|
170
201
|
pytest
|
|
171
202
|
```
|
|
172
203
|
|
|
173
|
-
##
|
|
204
|
+
## ✅ Project Status
|
|
174
205
|
|
|
175
|
-
|
|
206
|
+
The agentic aspects of this framework are in the highest state of flux. The first layer has stabilized, as have the simpler parts of the second layer, and we're working on the stability of the asynchronous pubsub architecture. We expect Python 3.14 will be the real enabler for the async aspects of the second layer.
|
|
176
207
|
|
|
177
208
|
## 📄 License
|
|
178
209
|
|
|
@@ -52,7 +52,7 @@ openai_llm = LLMBroker(model="gpt-5", gateway=OpenAIGateway(api_key="your_api_ke
|
|
|
52
52
|
# Or use other models: "gpt-4o", "gpt-4.1", "o1-mini", "o3-mini", etc.
|
|
53
53
|
|
|
54
54
|
# Or use Ollama for local LLMs
|
|
55
|
-
ollama_llm = LLMBroker(model="
|
|
55
|
+
ollama_llm = LLMBroker(model="qwen3:32b")
|
|
56
56
|
|
|
57
57
|
# Simple text generation
|
|
58
58
|
result = openai_llm.generate(messages=[LLMMessage(content='Hello, how are you?')])
|
|
@@ -82,6 +82,35 @@ result = openai_llm.generate(messages=[
|
|
|
82
82
|
print(result)
|
|
83
83
|
```
|
|
84
84
|
|
|
85
|
+
## 🔑 OpenAI configuration
|
|
86
|
+
|
|
87
|
+
OpenAIGateway now supports environment-variable defaults so you can get started without hardcoding secrets:
|
|
88
|
+
|
|
89
|
+
- If you omit `api_key`, it will use the `OPENAI_API_KEY` environment variable.
|
|
90
|
+
- If you omit `base_url`, it will use the `OPENAI_API_ENDPOINT` environment variable (useful for custom endpoints like Azure/OpenAI-compatible proxies).
|
|
91
|
+
- Precedence: values you pass explicitly to `OpenAIGateway(api_key=..., base_url=...)` always override environment variables.
|
|
92
|
+
|
|
93
|
+
Examples:
|
|
94
|
+
|
|
95
|
+
```python
|
|
96
|
+
from mojentic.llm import LLMBroker
|
|
97
|
+
from mojentic.llm.gateways import OpenAIGateway
|
|
98
|
+
|
|
99
|
+
# 1) Easiest: rely on environment variables
|
|
100
|
+
# export OPENAI_API_KEY=sk-...
|
|
101
|
+
# export OPENAI_API_ENDPOINT=https://api.openai.com/v1 # optional
|
|
102
|
+
llm = LLMBroker(
|
|
103
|
+
model="gpt-4o-mini",
|
|
104
|
+
gateway=OpenAIGateway() # picks up OPENAI_API_KEY/OPENAI_API_ENDPOINT automatically
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# 2) Explicitly override one or both values
|
|
108
|
+
llm = LLMBroker(
|
|
109
|
+
model="gpt-4o-mini",
|
|
110
|
+
gateway=OpenAIGateway(api_key="your_key", base_url="https://api.openai.com/v1")
|
|
111
|
+
)
|
|
112
|
+
```
|
|
113
|
+
|
|
85
114
|
## 🤖 OpenAI Model Support
|
|
86
115
|
|
|
87
116
|
The framework automatically handles parameter differences between model types, so you can switch between any models without code changes.
|
|
@@ -131,9 +160,9 @@ pip install -e ".[dev]"
|
|
|
131
160
|
pytest
|
|
132
161
|
```
|
|
133
162
|
|
|
134
|
-
##
|
|
163
|
+
## ✅ Project Status
|
|
135
164
|
|
|
136
|
-
|
|
165
|
+
The agentic aspects of this framework are in the highest state of flux. The first layer has stabilized, as have the simpler parts of the second layer, and we're working on the stability of the asynchronous pubsub architecture. We expect Python 3.14 will be the real enabler for the async aspects of the second layer.
|
|
137
166
|
|
|
138
167
|
## 📄 License
|
|
139
168
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "mojentic"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.9.0"
|
|
4
4
|
authors = [
|
|
5
5
|
{ name = "Stacey Vetzal", email = "stacey@vetzal.com" },
|
|
6
6
|
]
|
|
@@ -35,6 +35,8 @@ dev = [
|
|
|
35
35
|
"pytest-cov",
|
|
36
36
|
"pytest-mock>=3.10.0",
|
|
37
37
|
"flake8>=6.0.0",
|
|
38
|
+
"bandit>=1.7.0",
|
|
39
|
+
"pip-audit>=2.0.0",
|
|
38
40
|
"mkdocs",
|
|
39
41
|
"mkdocs-material",
|
|
40
42
|
"mkdocs-llmstxt",
|
|
@@ -10,7 +10,7 @@ from mojentic.llm.tools.tool_wrapper import ToolWrapper
|
|
|
10
10
|
#
|
|
11
11
|
|
|
12
12
|
temporal_specialist = BaseLLMAgent(
|
|
13
|
-
llm=LLMBroker(model="
|
|
13
|
+
llm=LLMBroker(model="qwen3:7b"),
|
|
14
14
|
tools=[ResolveDateTool()],
|
|
15
15
|
behaviour="You are a historian and sociologist who focuses on sorting out temporal events, determining what happened or will happen when."
|
|
16
16
|
)
|
|
@@ -22,7 +22,7 @@ if not os.path.exists("local"):
|
|
|
22
22
|
fs = FilesystemGateway(base_path="local")
|
|
23
23
|
|
|
24
24
|
knowledge_specialist = BaseLLMAgent(
|
|
25
|
-
llm=LLMBroker(model="
|
|
25
|
+
llm=LLMBroker(model="qwen3:32b"),
|
|
26
26
|
tools=[
|
|
27
27
|
ListFilesTool(fs),
|
|
28
28
|
ReadFileTool(fs),
|
|
@@ -34,7 +34,7 @@ knowledge_specialist = BaseLLMAgent(
|
|
|
34
34
|
|
|
35
35
|
|
|
36
36
|
coordinator = BaseLLMAgent(
|
|
37
|
-
llm=LLMBroker(model="
|
|
37
|
+
llm=LLMBroker(model="qwen3:32b"),
|
|
38
38
|
behaviour="You are a coordinator who can manage multiple agents and delegate tasks to them to solve problems.",
|
|
39
39
|
tools=[
|
|
40
40
|
ToolWrapper(temporal_specialist, "temporal_specialist", "A historian and sociologist who focuses on sorting out temporal events, figuring out dates, determining what happened or will happen when."),
|
|
@@ -13,7 +13,7 @@ def check_ollama_gateway():
|
|
|
13
13
|
label: str = Field(..., description="The label describing the feeling.")
|
|
14
14
|
|
|
15
15
|
response = gateway.complete(
|
|
16
|
-
model="
|
|
16
|
+
model="qwen3:7b",
|
|
17
17
|
messages=[LLMMessage(content="Hello, how are you?")],
|
|
18
18
|
object_model=Feeling,
|
|
19
19
|
temperature=1.0,
|
|
@@ -25,7 +25,7 @@ def check_ollama_gateway():
|
|
|
25
25
|
|
|
26
26
|
def check_tools_call():
|
|
27
27
|
response = chat(
|
|
28
|
-
model="
|
|
28
|
+
model="qwen3:32b",
|
|
29
29
|
messages=[
|
|
30
30
|
# {
|
|
31
31
|
# 'role': 'user',
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from mojentic.llm import ChatSession, LLMBroker
|
|
2
2
|
from mojentic.llm.tools.date_resolver import ResolveDateTool
|
|
3
3
|
|
|
4
|
-
llm_broker = LLMBroker(model="
|
|
4
|
+
llm_broker = LLMBroker(model="qwen3:32b")
|
|
5
5
|
chat_session = ChatSession(llm_broker, tools=[ResolveDateTool()])
|
|
6
6
|
|
|
7
7
|
while True:
|
|
@@ -38,8 +38,8 @@ api_key = os.getenv("OPENAI_API_KEY")
|
|
|
38
38
|
gateway = OpenAIGateway(api_key)
|
|
39
39
|
llm = LLMBroker(model="o4-mini", gateway=gateway)
|
|
40
40
|
|
|
41
|
-
# llm = LLMBroker("
|
|
42
|
-
|
|
41
|
+
# llm = LLMBroker("qwen3-coder:30b")
|
|
42
|
+
llm = LLMBroker("qwen3-coder:30b")
|
|
43
43
|
# llm = LLMBroker(model="qwen3-128k:32b")
|
|
44
44
|
|
|
45
45
|
# Create a filesystem gateway for the sandbox
|
|
@@ -75,28 +75,28 @@ solver = IterativeProblemSolver(
|
|
|
75
75
|
system_prompt="""
|
|
76
76
|
# 0 - Project Identity & Context
|
|
77
77
|
|
|
78
|
-
You are an expert and principled software engineer, well versed in writing Python games. You work
|
|
79
|
-
carefully and purposefully and always check your work with an eye to testability and correctness.
|
|
80
|
-
You know that every line of code you write is a liability, and you take care that every line
|
|
78
|
+
You are an expert and principled software engineer, well versed in writing Python games. You work
|
|
79
|
+
carefully and purposefully and always check your work with an eye to testability and correctness.
|
|
80
|
+
You know that every line of code you write is a liability, and you take care that every line
|
|
81
81
|
matters.
|
|
82
82
|
|
|
83
83
|
# 1 - Universal Engineering Principles
|
|
84
84
|
|
|
85
85
|
* **Code is communication** — optimise for the next human reader.
|
|
86
|
-
* **Simple Design Heuristics** — guiding principles, not iron laws; consult the user when you
|
|
86
|
+
* **Simple Design Heuristics** — guiding principles, not iron laws; consult the user when you
|
|
87
87
|
need to break them.
|
|
88
88
|
1. **All tests pass** — correctness is non‑negotiable.
|
|
89
89
|
2. **Reveals intent** — code should read like an explanation.
|
|
90
|
-
3. **No *****knowledge***** duplication** — avoid multiple spots that must change together;
|
|
90
|
+
3. **No *****knowledge***** duplication** — avoid multiple spots that must change together;
|
|
91
91
|
identical code is only a smell when it hides duplicate *decisions*.
|
|
92
92
|
4. **Minimal entities** — remove unnecessary indirection, classes, or parameters.
|
|
93
93
|
* **Small, safe increments** — single‑reason commits; avoid speculative work (**YAGNI**).
|
|
94
94
|
* **Tests are the executable spec** — red first, green always; test behaviour not implementation.
|
|
95
95
|
* **Compose over inherit**; favour pure functions where practical, avoid side-effects.
|
|
96
|
-
* **Functional core, imperative shell** — isolate pure business logic from I/O and side effects;
|
|
96
|
+
* **Functional core, imperative shell** — isolate pure business logic from I/O and side effects;
|
|
97
97
|
push mutations to the system boundaries, build mockable gateways at those boundaries.
|
|
98
98
|
* **Psychological safety** — review code, not colleagues; critique ideas, not authors.
|
|
99
|
-
* **Version‑control etiquette** — descriptive commit messages, branch from `main`, PRs require
|
|
99
|
+
* **Version‑control etiquette** — descriptive commit messages, branch from `main`, PRs require
|
|
100
100
|
green CI.
|
|
101
101
|
|
|
102
102
|
# 2 - Python‑Specific Conventions
|
|
@@ -115,21 +115,21 @@ green CI.
|
|
|
115
115
|
|
|
116
116
|
## 2.2 Core Libraries
|
|
117
117
|
|
|
118
|
-
Mandatory: pydantic, structlog, pytest, pytest-spec, pytest-cov, pytest-mock, flake8, black,
|
|
119
|
-
pre‑commit, mkdocs‑material. Add new libs only when they eliminate **significant** boilerplate or
|
|
118
|
+
Mandatory: pydantic, structlog, pytest, pytest-spec, pytest-cov, pytest-mock, flake8, black,
|
|
119
|
+
pre‑commit, mkdocs‑material. Add new libs only when they eliminate **significant** boilerplate or
|
|
120
120
|
risk.
|
|
121
121
|
|
|
122
122
|
## 2.3 Project Structure & Imports
|
|
123
123
|
|
|
124
124
|
* **src‑layout**: code in `src/<package_name>/`; tests live beside code as `*_spec.py`.
|
|
125
|
-
* Import order: 1) stdlib, 2) third‑party, 3) first‑party — each group alphabetised with a blank
|
|
125
|
+
* Import order: 1) stdlib, 2) third‑party, 3) first‑party — each group alphabetised with a blank
|
|
126
126
|
line.
|
|
127
127
|
|
|
128
128
|
## 2.4 Naming & Style
|
|
129
129
|
|
|
130
130
|
* `snake_case` for functions & vars, `PascalCase` for classes, `UPPER_SNAKE` for constants.
|
|
131
131
|
* Prefix intentionally unused vars/args with `_`.
|
|
132
|
-
* **flake8** (with plugins) handles linting, and **black** auto‑formats code. Max line length
|
|
132
|
+
* **flake8** (with plugins) handles linting, and **black** auto‑formats code. Max line length
|
|
133
133
|
**100**.
|
|
134
134
|
* Cyclomatic complexity cap: **10** (flake8 `C901`).
|
|
135
135
|
* Use **f‑strings**; avoid magic numbers.
|
|
@@ -144,14 +144,14 @@ line.
|
|
|
144
144
|
|
|
145
145
|
* Configure **structlog** for JSON output by default.
|
|
146
146
|
* Never use `print` for diagnostics; reserve for user‑facing CLI UX.
|
|
147
|
-
* Log levels: `DEBUG` (dev detail) → `INFO` (lifecycle) → `WARNING` (recoverable) → `ERROR` (user
|
|
147
|
+
* Log levels: `DEBUG` (dev detail) → `INFO` (lifecycle) → `WARNING` (recoverable) → `ERROR` (user
|
|
148
148
|
visible).
|
|
149
149
|
|
|
150
150
|
## 2.7 Testing Strategy
|
|
151
151
|
|
|
152
152
|
* **pytest** with **pytest-spec** for specification-style output.
|
|
153
153
|
* Test files end with `_spec.py` and live in the same folder as the code under test.
|
|
154
|
-
* Use **Arrange / Act / Assert** blocks separated by a blank line (no comments) **or** BDD
|
|
154
|
+
* Use **Arrange / Act / Assert** blocks separated by a blank line (no comments) **or** BDD
|
|
155
155
|
`describe/should` classes.
|
|
156
156
|
* Function names: use `should_*` and BDD-style specifications.
|
|
157
157
|
* Class names: use `Describe*` and BDD-style test suites.
|
|
@@ -162,7 +162,7 @@ visible).
|
|
|
162
162
|
# 3 - Planning and Goal Tracking
|
|
163
163
|
|
|
164
164
|
- Use the provided task manager tools to create your plans and work through them step by step.
|
|
165
|
-
- Before declaring yourself finished list all tasks, ensure they are all complete, and that you
|
|
165
|
+
- Before declaring yourself finished list all tasks, ensure they are all complete, and that you
|
|
166
166
|
have not missed any steps
|
|
167
167
|
- If you've missed or forgotten some steps, add them to the task list and continue
|
|
168
168
|
- When all tasks are complete, and you can think of no more to add, declare yourself finished.
|
|
@@ -4,7 +4,7 @@ from mojentic.llm.tools.current_datetime import CurrentDateTimeTool
|
|
|
4
4
|
|
|
5
5
|
# Create an LLM broker with a specified model
|
|
6
6
|
# You can change the model to any supported model
|
|
7
|
-
llm = LLMBroker(model="
|
|
7
|
+
llm = LLMBroker(model="qwen3:7b") # Using the same model as in simple_tool.py
|
|
8
8
|
|
|
9
9
|
# Create our custom tool
|
|
10
10
|
datetime_tool = CurrentDateTimeTool()
|
|
@@ -26,8 +26,8 @@ from mojentic.llm.tools.tell_user_tool import TellUserTool
|
|
|
26
26
|
|
|
27
27
|
# llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
|
|
28
28
|
# llm = LLMBroker(model="qwen3:32b")
|
|
29
|
-
llm = LLMBroker(model="
|
|
30
|
-
# llm = LLMBroker(model="
|
|
29
|
+
llm = LLMBroker(model="qwen3:7b")
|
|
30
|
+
# llm = LLMBroker(model="qwen3:72b")
|
|
31
31
|
# llm = LLMBroker(model="o4-mini", gateway=OpenAIGateway(os.environ["OPENAI_API_KEY"]))
|
|
32
32
|
message = LLMMessage(
|
|
33
33
|
content="I want you to count from 1 to 10. Break that request down into individual tasks, track them using available tools, and perform them one by one until you're finished. Interrupt me to tell the user as you complete every task.")
|
|
@@ -52,11 +52,11 @@ This is an unfinished story about Ernie, the most adorable and colourful caterpi
|
|
|
52
52
|
#
|
|
53
53
|
|
|
54
54
|
|
|
55
|
-
# llm = LLMBroker("
|
|
56
|
-
# llm = LLMBroker("
|
|
57
|
-
# llm = LLMBroker("
|
|
58
|
-
llm = LLMBroker("
|
|
59
|
-
# llm = LLMBroker("
|
|
55
|
+
# llm = LLMBroker("qwen3:32b")
|
|
56
|
+
# llm = LLMBroker("qwen3:32b")
|
|
57
|
+
# llm = LLMBroker("qwen3:7b")
|
|
58
|
+
llm = LLMBroker("qwen3:7b")
|
|
59
|
+
# llm = LLMBroker("qwen3:32b")
|
|
60
60
|
# api_key = os.getenv("OPENAI_API_KEY")
|
|
61
61
|
# gateway = OpenAIGateway(api_key)
|
|
62
62
|
# llm = LLMBroker(model="gpt-4o-mini", gateway=gateway)
|
|
@@ -18,7 +18,7 @@ from mojentic.llm import LLMBroker
|
|
|
18
18
|
def main():
|
|
19
19
|
# Initialize the LLM broker with your preferred model
|
|
20
20
|
# Uncomment one of the following lines or modify as needed:
|
|
21
|
-
# llm = LLMBroker(model="
|
|
21
|
+
# llm = LLMBroker(model="qwen3:32b") # Ollama model
|
|
22
22
|
# llm = LLMBroker(model="gpt-4o") # OpenAI model
|
|
23
23
|
llm = LLMBroker(model="qwq") # Default model for example
|
|
24
24
|
|
|
@@ -6,7 +6,7 @@ from mojentic import Router, Dispatcher
|
|
|
6
6
|
from mojentic.agents import OutputAgent
|
|
7
7
|
from mojentic.llm import LLMBroker
|
|
8
8
|
|
|
9
|
-
# llm = LLMBroker("
|
|
9
|
+
# llm = LLMBroker("qwen3:32b")
|
|
10
10
|
llm = LLMBroker("deepseek-r1:70b")
|
|
11
11
|
thinking_agent = ThinkingAgent(llm)
|
|
12
12
|
decisioning_agent = DecisioningAgent(llm)
|
|
@@ -21,7 +21,7 @@ async def demonstrate_async():
|
|
|
21
21
|
3. Running multiple problem-solving tasks concurrently
|
|
22
22
|
"""
|
|
23
23
|
# Initialize the LLM broker with your preferred model
|
|
24
|
-
# llm = LLMBroker(model="
|
|
24
|
+
# llm = LLMBroker(model="qwen3:32b")
|
|
25
25
|
llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
|
|
26
26
|
|
|
27
27
|
# Create the agent with a maximum of 3 iterations
|
|
@@ -25,9 +25,9 @@ class RequestAgent(BaseLLMAgent):
|
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
# llm = LLMBroker("deepseek-r1:70b")
|
|
28
|
-
# llm = LLMBroker("
|
|
29
|
-
llm = LLMBroker("
|
|
30
|
-
# llm = LLMBroker("
|
|
28
|
+
# llm = LLMBroker("qwen3:14b")
|
|
29
|
+
llm = LLMBroker("qwen3:0.5b")
|
|
30
|
+
# llm = LLMBroker("qwen3:7b", gateway=OllamaGateway(host="http://odin.local:11434"))
|
|
31
31
|
request_agent = RequestAgent(llm)
|
|
32
32
|
output_agent = OutputAgent()
|
|
33
33
|
|
|
@@ -32,7 +32,7 @@ class RequestAgent(BaseLLMAgent):
|
|
|
32
32
|
return [ResponseEvent(source=type(self), correlation_id=event.correlation_id, capitol=response)]
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
llm = LLMBroker("
|
|
35
|
+
llm = LLMBroker("qwen3:14b")
|
|
36
36
|
request_agent = RequestAgent(llm)
|
|
37
37
|
output_agent = OutputAgent()
|
|
38
38
|
|
|
@@ -33,8 +33,8 @@ class RequestAgent(BaseLLMAgent):
|
|
|
33
33
|
|
|
34
34
|
|
|
35
35
|
# llm = LLMBroker("deepseek-r1:70b")
|
|
36
|
-
# llm = LLMBroker("
|
|
37
|
-
llm = LLMBroker("
|
|
36
|
+
# llm = LLMBroker("qwen3:32b")
|
|
37
|
+
llm = LLMBroker("qwen3:7b")
|
|
38
38
|
request_agent = RequestAgent(llm)
|
|
39
39
|
output_agent = OutputAgent()
|
|
40
40
|
|
|
@@ -43,12 +43,12 @@ class IterativeProblemSolverTool(LLMTool):
|
|
|
43
43
|
|
|
44
44
|
def main():
|
|
45
45
|
# llm = LLMBroker(model="MFDoom/deepseek-r1-tool-calling:14b")
|
|
46
|
-
# llm = LLMBroker(model="
|
|
47
|
-
# llm = LLMBroker(model="
|
|
48
|
-
# llm = LLMBroker(model="
|
|
46
|
+
# llm = LLMBroker(model="qwen3:14b")
|
|
47
|
+
# llm = LLMBroker(model="qwen3:14b")
|
|
48
|
+
# llm = LLMBroker(model="qwen3:7b")
|
|
49
49
|
llm = LLMBroker(model="qwq")
|
|
50
50
|
# llm = LLMBroker(model="qwq:32b-fp16")
|
|
51
|
-
# llm = LLMBroker(model="
|
|
51
|
+
# llm = LLMBroker(model="qwen3:32b")
|
|
52
52
|
|
|
53
53
|
tools = [
|
|
54
54
|
ResolveDateTool(),
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from mojentic.llm.llm_broker import LLMBroker
|
|
3
|
+
from mojentic.llm.gateways.models import LLMMessage
|
|
4
|
+
from mojentic.llm.gateways.ollama import OllamaGateway
|
|
5
|
+
from mojentic.llm.gateways.openai import OpenAIGateway
|
|
6
|
+
from mojentic.llm.tools.date_resolver import ResolveDateTool
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def main():
|
|
10
|
+
"""
|
|
11
|
+
Demonstrates streaming text generation with tool calling support.
|
|
12
|
+
|
|
13
|
+
This example shows how generate_stream() handles tool calls seamlessly:
|
|
14
|
+
1. Streams content as it arrives
|
|
15
|
+
2. Detects tool calls in the stream
|
|
16
|
+
3. Executes tools
|
|
17
|
+
4. Recursively streams the LLM's response after tool execution
|
|
18
|
+
"""
|
|
19
|
+
gateway = OllamaGateway()
|
|
20
|
+
# gateway = OpenAIGateway(api_key=os.getenv("OPENAI_API_KEY"))
|
|
21
|
+
broker = LLMBroker(
|
|
22
|
+
model="qwen3:32b",
|
|
23
|
+
# model="gpt-5",
|
|
24
|
+
gateway=gateway
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
date_tool = ResolveDateTool()
|
|
28
|
+
|
|
29
|
+
print("Streaming response with tool calling enabled...\n")
|
|
30
|
+
|
|
31
|
+
stream = broker.generate_stream(
|
|
32
|
+
messages=[
|
|
33
|
+
LLMMessage(content="Tell me a short story about a dragon. In your story, reference several dates relative to today, "
|
|
34
|
+
"like 'three days from now' or 'last week'.")
|
|
35
|
+
],
|
|
36
|
+
tools=[date_tool],
|
|
37
|
+
temperature=0.7,
|
|
38
|
+
num_ctx=32768,
|
|
39
|
+
num_predict=-1
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
for chunk in stream:
|
|
43
|
+
print(chunk, end='', flush=True)
|
|
44
|
+
|
|
45
|
+
print("\n\nDone!")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
if __name__ == "__main__":
|
|
49
|
+
main()
|
|
@@ -17,7 +17,7 @@ from mojentic.llm import LLMBroker
|
|
|
17
17
|
def main():
|
|
18
18
|
# Initialize the LLM broker with your preferred model
|
|
19
19
|
# Uncomment one of the following lines or modify as needed:
|
|
20
|
-
# llm = LLMBroker(model="
|
|
20
|
+
# llm = LLMBroker(model="qwen3:32b") # Ollama model
|
|
21
21
|
# llm = LLMBroker(model="gpt-4o") # OpenAI model
|
|
22
22
|
llm = LLMBroker(model="qwq") # Default model for example
|
|
23
23
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Example script demonstrating the tracer system with ChatSession and tools.
|
|
3
3
|
|
|
4
|
-
This example shows how to use the tracer system to monitor an interactive
|
|
5
|
-
chat session with LLMBroker and tools. When the user exits the session,
|
|
4
|
+
This example shows how to use the tracer system to monitor an interactive
|
|
5
|
+
chat session with LLMBroker and tools. When the user exits the session,
|
|
6
6
|
the script displays a summary of all traced events.
|
|
7
7
|
|
|
8
8
|
It also demonstrates how correlation_id is used to trace related events
|
|
@@ -35,7 +35,7 @@ def main():
|
|
|
35
35
|
tracer = TracerSystem()
|
|
36
36
|
|
|
37
37
|
# Create an LLM broker with the tracer
|
|
38
|
-
llm_broker = LLMBroker(model="
|
|
38
|
+
llm_broker = LLMBroker(model="gpt-oss:20b", tracer=tracer)
|
|
39
39
|
|
|
40
40
|
# Create a date resolver tool that will also use the tracer
|
|
41
41
|
date_tool = ResolveDateTool(llm_broker=llm_broker, tracer=tracer)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import List, Iterator
|
|
1
|
+
from typing import List, Iterator, Optional
|
|
2
2
|
import structlog
|
|
3
3
|
from ollama import Client, Options, ChatResponse
|
|
4
4
|
from pydantic import BaseModel
|
|
@@ -10,8 +10,18 @@ from mojentic.llm.gateways.ollama_messages_adapter import adapt_messages_to_olla
|
|
|
10
10
|
logger = structlog.get_logger()
|
|
11
11
|
|
|
12
12
|
class StreamingResponse(BaseModel):
|
|
13
|
-
"""
|
|
14
|
-
|
|
13
|
+
"""
|
|
14
|
+
Wrapper for streaming response chunks.
|
|
15
|
+
|
|
16
|
+
Attributes
|
|
17
|
+
----------
|
|
18
|
+
content : Optional[str]
|
|
19
|
+
Text content chunk from the LLM response.
|
|
20
|
+
tool_calls : Optional[List]
|
|
21
|
+
Tool calls from the LLM response (raw ollama format).
|
|
22
|
+
"""
|
|
23
|
+
content: Optional[str] = None
|
|
24
|
+
tool_calls: Optional[List] = None
|
|
15
25
|
|
|
16
26
|
class OllamaGateway(LLMGateway):
|
|
17
27
|
"""
|
|
@@ -144,28 +154,21 @@ class OllamaGateway(LLMGateway):
|
|
|
144
154
|
'stream': True
|
|
145
155
|
}
|
|
146
156
|
|
|
147
|
-
#
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# when using tools. This limits our ability to explore streaming capabilities
|
|
151
|
-
# in the mojentic API, so I'm pausing this work for now until this is resolved.
|
|
152
|
-
# https://github.com/ollama/ollama/issues/7886
|
|
153
|
-
#
|
|
154
|
-
|
|
155
|
-
# if 'tools' in args and args['tools'] is not None:
|
|
156
|
-
# ollama_args['tools'] = [t.descriptor for t in args['tools']]
|
|
157
|
+
# Enable tool support if tools are provided
|
|
158
|
+
if 'tools' in args and args['tools'] is not None:
|
|
159
|
+
ollama_args['tools'] = [t.descriptor for t in args['tools']]
|
|
157
160
|
|
|
158
161
|
stream = self.client.chat(**ollama_args)
|
|
159
162
|
|
|
160
163
|
for chunk in stream:
|
|
161
164
|
if chunk.message:
|
|
165
|
+
# Yield content chunks as they arrive
|
|
162
166
|
if chunk.message.content:
|
|
163
167
|
yield StreamingResponse(content=chunk.message.content)
|
|
164
|
-
|
|
165
|
-
#
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
# )
|
|
168
|
+
|
|
169
|
+
# Yield tool calls when they arrive
|
|
170
|
+
if chunk.message.tool_calls:
|
|
171
|
+
yield StreamingResponse(tool_calls=chunk.message.tool_calls)
|
|
169
172
|
|
|
170
173
|
def get_available_models(self) -> List[str]:
|
|
171
174
|
"""
|