mojentic 0.8.4__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. _examples/async_dispatcher_example.py +12 -4
  2. _examples/async_llm_example.py +1 -2
  3. _examples/broker_as_tool.py +42 -17
  4. _examples/broker_examples.py +5 -7
  5. _examples/broker_image_examples.py +1 -1
  6. _examples/characterize_ollama.py +3 -3
  7. _examples/characterize_openai.py +1 -1
  8. _examples/chat_session.py +2 -2
  9. _examples/chat_session_with_tool.py +2 -2
  10. _examples/coding_file_tool.py +16 -18
  11. _examples/current_datetime_tool_example.py +2 -2
  12. _examples/embeddings.py +1 -1
  13. _examples/ephemeral_task_manager_example.py +15 -11
  14. _examples/fetch_openai_models.py +10 -3
  15. _examples/file_deduplication.py +6 -6
  16. _examples/file_tool.py +5 -5
  17. _examples/image_analysis.py +2 -3
  18. _examples/image_broker.py +1 -1
  19. _examples/image_broker_splat.py +1 -1
  20. _examples/iterative_solver.py +3 -3
  21. _examples/model_characterization.py +2 -0
  22. _examples/openai_gateway_enhanced_demo.py +15 -5
  23. _examples/raw.py +1 -1
  24. _examples/react/agents/decisioning_agent.py +173 -15
  25. _examples/react/agents/summarization_agent.py +89 -0
  26. _examples/react/agents/thinking_agent.py +84 -14
  27. _examples/react/agents/tool_call_agent.py +83 -0
  28. _examples/react/formatters.py +38 -4
  29. _examples/react/models/base.py +60 -11
  30. _examples/react/models/events.py +76 -8
  31. _examples/react.py +71 -21
  32. _examples/recursive_agent.py +2 -2
  33. _examples/simple_llm.py +3 -3
  34. _examples/simple_llm_repl.py +1 -1
  35. _examples/simple_structured.py +1 -1
  36. _examples/simple_tool.py +2 -2
  37. _examples/solver_chat_session.py +5 -11
  38. _examples/streaming.py +36 -18
  39. _examples/tell_user_example.py +4 -4
  40. _examples/tracer_demo.py +18 -20
  41. _examples/tracer_qt_viewer.py +49 -46
  42. _examples/working_memory.py +1 -1
  43. mojentic/__init__.py +3 -3
  44. mojentic/agents/__init__.py +26 -8
  45. mojentic/agents/{agent_broker.py → agent_event_adapter.py} +3 -3
  46. mojentic/agents/async_aggregator_agent_spec.py +32 -33
  47. mojentic/agents/async_llm_agent.py +9 -5
  48. mojentic/agents/async_llm_agent_spec.py +21 -22
  49. mojentic/agents/base_async_agent.py +2 -2
  50. mojentic/agents/base_llm_agent.py +6 -2
  51. mojentic/agents/iterative_problem_solver.py +11 -5
  52. mojentic/agents/simple_recursive_agent.py +11 -10
  53. mojentic/agents/simple_recursive_agent_spec.py +423 -0
  54. mojentic/async_dispatcher.py +0 -1
  55. mojentic/async_dispatcher_spec.py +1 -1
  56. mojentic/context/__init__.py +0 -2
  57. mojentic/dispatcher.py +7 -8
  58. mojentic/llm/__init__.py +5 -5
  59. mojentic/llm/gateways/__init__.py +19 -18
  60. mojentic/llm/gateways/anthropic.py +1 -0
  61. mojentic/llm/gateways/anthropic_messages_adapter.py +0 -1
  62. mojentic/llm/gateways/llm_gateway.py +1 -1
  63. mojentic/llm/gateways/ollama.py +23 -18
  64. mojentic/llm/gateways/openai.py +243 -44
  65. mojentic/llm/gateways/openai_message_adapter_spec.py +3 -3
  66. mojentic/llm/gateways/openai_model_registry.py +7 -6
  67. mojentic/llm/gateways/openai_model_registry_spec.py +1 -2
  68. mojentic/llm/gateways/openai_temperature_handling_spec.py +2 -2
  69. mojentic/llm/llm_broker.py +162 -2
  70. mojentic/llm/llm_broker_spec.py +76 -2
  71. mojentic/llm/message_composers.py +6 -3
  72. mojentic/llm/message_composers_spec.py +5 -1
  73. mojentic/llm/registry/__init__.py +0 -3
  74. mojentic/llm/registry/populate_registry_from_ollama.py +2 -2
  75. mojentic/llm/tools/__init__.py +0 -9
  76. mojentic/llm/tools/ask_user_tool.py +11 -5
  77. mojentic/llm/tools/current_datetime.py +9 -6
  78. mojentic/llm/tools/date_resolver.py +10 -4
  79. mojentic/llm/tools/date_resolver_spec.py +0 -1
  80. mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +4 -1
  81. mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +1 -1
  82. mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +4 -1
  83. mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +5 -2
  84. mojentic/llm/tools/file_manager.py +131 -28
  85. mojentic/llm/tools/file_manager_spec.py +0 -3
  86. mojentic/llm/tools/llm_tool.py +1 -1
  87. mojentic/llm/tools/llm_tool_spec.py +0 -2
  88. mojentic/llm/tools/organic_web_search.py +4 -2
  89. mojentic/llm/tools/tell_user_tool.py +6 -2
  90. mojentic/llm/tools/tool_wrapper.py +2 -2
  91. mojentic/tracer/__init__.py +1 -10
  92. mojentic/tracer/event_store.py +7 -8
  93. mojentic/tracer/event_store_spec.py +1 -2
  94. mojentic/tracer/null_tracer.py +37 -43
  95. mojentic/tracer/tracer_events.py +8 -2
  96. mojentic/tracer/tracer_events_spec.py +6 -7
  97. mojentic/tracer/tracer_system.py +37 -36
  98. mojentic/tracer/tracer_system_spec.py +21 -6
  99. mojentic/utils/__init__.py +1 -1
  100. mojentic/utils/formatting.py +1 -0
  101. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/METADATA +76 -27
  102. mojentic-1.0.0.dist-info/RECORD +149 -0
  103. mojentic-0.8.4.dist-info/RECORD +0 -146
  104. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/WHEEL +0 -0
  105. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/licenses/LICENSE.md +0 -0
  106. {mojentic-0.8.4.dist-info → mojentic-1.0.0.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,6 @@
2
2
  Tests for the OpenAI Model Registry system.
3
3
  """
4
4
 
5
- import pytest
6
5
  from mojentic.llm.gateways.openai_model_registry import (
7
6
  OpenAIModelRegistry,
8
7
  ModelType,
@@ -178,4 +177,4 @@ class DescribeOpenAIModelRegistry:
178
177
  assert ModelType.REASONING.value == "reasoning"
179
178
  assert ModelType.CHAT.value == "chat"
180
179
  assert ModelType.EMBEDDING.value == "embedding"
181
- assert ModelType.MODERATION.value == "moderation"
180
+ assert ModelType.MODERATION.value == "moderation"
@@ -3,7 +3,7 @@ from unittest.mock import MagicMock
3
3
 
4
4
  from mojentic.llm.gateways.openai import OpenAIGateway
5
5
  from mojentic.llm.gateways.openai_model_registry import get_model_registry
6
- from mojentic.llm.gateways.models import LLMMessage, MessageRole, LLMGatewayResponse
6
+ from mojentic.llm.gateways.models import LLMMessage, MessageRole
7
7
 
8
8
 
9
9
  @pytest.fixture
@@ -242,4 +242,4 @@ class DescribeModelCapabilitiesTemperatureRestrictions:
242
242
  capabilities = registry.get_model_capabilities(model)
243
243
  assert capabilities.supports_temperature(1.0) is True
244
244
  assert capabilities.supports_temperature(0.1) is False
245
- assert capabilities.supported_temperatures == [1.0]
245
+ assert capabilities.supported_temperatures == [1.0]
@@ -1,12 +1,12 @@
1
1
  import json
2
2
  import time
3
- from typing import List, Optional, Type
3
+ from typing import List, Optional, Type, Iterator
4
4
 
5
5
  import structlog
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from mojentic.llm.gateways.llm_gateway import LLMGateway
9
- from mojentic.llm.gateways.models import MessageRole, LLMMessage, LLMGatewayResponse
9
+ from mojentic.llm.gateways.models import MessageRole, LLMMessage, LLMGatewayResponse, LLMToolCall
10
10
  from mojentic.llm.gateways.ollama import OllamaGateway
11
11
  from mojentic.llm.gateways.tokenizer_gateway import TokenizerGateway
12
12
  from mojentic.tracer.tracer_system import TracerSystem
@@ -182,6 +182,166 @@ class LLMBroker():
182
182
 
183
183
  return result.content
184
184
 
185
+ def generate_stream(self, messages: List[LLMMessage], tools=None, temperature=1.0, num_ctx=32768,
186
+ num_predict=-1, max_tokens=16384,
187
+ correlation_id: str = None) -> Iterator[str]:
188
+ """
189
+ Generate a streaming text response from the LLM.
190
+
191
+ This method mirrors generate() but yields content chunks as they arrive from the LLM,
192
+ providing a better user experience for long-running requests. When tool calls are
193
+ detected, tools are executed and the LLM is called recursively, with the new response
194
+ also being streamed.
195
+
196
+ Parameters
197
+ ----------
198
+ messages : List[LLMMessage]
199
+ A list of messages to send to the LLM.
200
+ tools : List[Tool]
201
+ A list of tools to use with the LLM. If a tool call is requested, the tool will be
202
+ called and the output will be included in the response.
203
+ temperature : float
204
+ The temperature to use for the response. Defaults to 1.0
205
+ num_ctx : int
206
+ The number of context tokens to use. Defaults to 32768.
207
+ num_predict : int
208
+ The number of tokens to predict. Defaults to no limit.
209
+ max_tokens : int
210
+ The maximum number of tokens to generate. Defaults to 16384.
211
+ correlation_id : str
212
+ UUID string that is copied from cause-to-affect for tracing events.
213
+
214
+ Yields
215
+ ------
216
+ str
217
+ Content chunks as they arrive from the LLM.
218
+ """
219
+ # Check if gateway supports streaming
220
+ if not hasattr(self.adapter, 'complete_stream'):
221
+ raise NotImplementedError(f"Gateway {type(self.adapter).__name__} does not support streaming")
222
+
223
+ approximate_tokens = len(self.tokenizer.encode(self._content_to_count(messages)))
224
+ logger.info(f"Requesting streaming llm response with approx {approximate_tokens} tokens")
225
+
226
+ # Convert messages to serializable dict for audit
227
+ messages_for_tracer = [m.model_dump() for m in messages]
228
+
229
+ # Record LLM call in tracer
230
+ tools_for_tracer = [{"name": t.name, "description": t.description} for t in
231
+ tools] if tools else None
232
+ self.tracer.record_llm_call(
233
+ self.model,
234
+ messages_for_tracer,
235
+ temperature,
236
+ tools=tools_for_tracer,
237
+ source=type(self),
238
+ correlation_id=correlation_id
239
+ )
240
+
241
+ # Measure call duration for audit
242
+ start_time = time.time()
243
+
244
+ # Accumulate content and tool calls from stream
245
+ accumulated_content = ""
246
+ accumulated_tool_calls = []
247
+
248
+ stream = self.adapter.complete_stream(
249
+ model=self.model,
250
+ messages=messages,
251
+ tools=tools,
252
+ temperature=temperature,
253
+ num_ctx=num_ctx,
254
+ num_predict=num_predict,
255
+ max_tokens=max_tokens
256
+ )
257
+
258
+ for chunk in stream:
259
+ # Handle content chunks
260
+ if hasattr(chunk, 'content') and chunk.content:
261
+ accumulated_content += chunk.content
262
+ yield chunk.content
263
+
264
+ # Handle tool calls if present
265
+ if hasattr(chunk, 'tool_calls') and chunk.tool_calls:
266
+ accumulated_tool_calls.extend(chunk.tool_calls)
267
+
268
+ call_duration_ms = (time.time() - start_time) * 1000
269
+
270
+ # Record LLM response in tracer
271
+ tool_calls_for_tracer = [tc.model_dump() if hasattr(tc, 'model_dump') else tc for tc in
272
+ accumulated_tool_calls] if accumulated_tool_calls else None
273
+ self.tracer.record_llm_response(
274
+ self.model,
275
+ accumulated_content,
276
+ tool_calls=tool_calls_for_tracer,
277
+ call_duration_ms=call_duration_ms,
278
+ source=type(self),
279
+ correlation_id=correlation_id
280
+ )
281
+
282
+ # Process tool calls if any were accumulated
283
+ if accumulated_tool_calls and tools is not None:
284
+ logger.info("Tool call requested in streaming response")
285
+ for tool_call in accumulated_tool_calls:
286
+ # Handle both LLMToolCall objects and raw tool call data
287
+ if hasattr(tool_call, 'name'):
288
+ tool_name = tool_call.name
289
+ tool_arguments = tool_call.arguments
290
+ else:
291
+ # Handle ollama's tool call format
292
+ tool_name = tool_call.function.name
293
+ tool_arguments = tool_call.function.arguments
294
+
295
+ if tool := next((t for t in tools if t.matches(tool_name)), None):
296
+ logger.info('Calling function', function=tool_name)
297
+ logger.info('Arguments:', arguments=tool_arguments)
298
+
299
+ # Measure tool execution time
300
+ tool_start_time = time.time()
301
+
302
+ # Call the tool
303
+ output = tool.run(**tool_arguments)
304
+
305
+ tool_duration_ms = (time.time() - tool_start_time) * 1000
306
+
307
+ # Record tool call in tracer
308
+ self.tracer.record_tool_call(
309
+ tool_name,
310
+ tool_arguments,
311
+ output,
312
+ caller="LLMBroker.generate_stream",
313
+ call_duration_ms=tool_duration_ms,
314
+ source=type(self),
315
+ correlation_id=correlation_id
316
+ )
317
+
318
+ logger.info('Function output', output=output)
319
+
320
+ # Convert to LLMToolCall if needed, preserving the ID if it exists
321
+ if not isinstance(tool_call, LLMToolCall):
322
+ # Extract ID if available from the tool_call object
323
+ tool_call_id = None
324
+ if hasattr(tool_call, 'id'):
325
+ tool_call_id = tool_call.id
326
+ elif hasattr(tool_call, 'function') and hasattr(tool_call.function, 'id'):
327
+ tool_call_id = tool_call.function.id
328
+
329
+ tool_call = LLMToolCall(id=tool_call_id, name=tool_name, arguments=tool_arguments)
330
+
331
+ messages.append(LLMMessage(role=MessageRole.Assistant, tool_calls=[tool_call]))
332
+ messages.append(
333
+ LLMMessage(role=MessageRole.Tool, content=json.dumps(output),
334
+ tool_calls=[tool_call]))
335
+
336
+ # Recursively stream the response after tool execution
337
+ yield from self.generate_stream(
338
+ messages, tools, temperature, num_ctx, num_predict,
339
+ max_tokens, correlation_id=correlation_id
340
+ )
341
+ return # Exit after recursive call
342
+ else:
343
+ logger.warn('Function not found', function=tool_name)
344
+
185
345
  def _content_to_count(self, messages: List[LLMMessage]):
186
346
  content = ""
187
347
  for message in messages:
@@ -1,4 +1,3 @@
1
- from unittest.mock import MagicMock
2
1
 
3
2
  import pytest
4
3
  from pydantic import BaseModel
@@ -11,10 +10,12 @@ class SimpleModel(BaseModel):
11
10
  text: str
12
11
  number: int
13
12
 
13
+
14
14
  class NestedModel(BaseModel):
15
15
  title: str
16
16
  details: SimpleModel
17
17
 
18
+
18
19
  class ComplexModel(BaseModel):
19
20
  name: str
20
21
  items: list[SimpleModel]
@@ -120,7 +121,11 @@ class DescribeLLMBroker:
120
121
  metadata={"key1": "value1", "key2": "value2"}
121
122
  )
122
123
  mock_gateway.complete.return_value = LLMGatewayResponse(
123
- content='{"name": "test", "items": [{"text": "item1", "number": 1}, {"text": "item2", "number": 2}], "metadata": {"key1": "value1", "key2": "value2"}}',
124
+ content=(
125
+ '{"name": "test", "items": [{"text": "item1", "number": 1}, '
126
+ '{"text": "item2", "number": 2}], '
127
+ '"metadata": {"key1": "value1", "key2": "value2"}}'
128
+ ),
124
129
  object=mock_object,
125
130
  tool_calls=[]
126
131
  )
@@ -135,3 +140,72 @@ class DescribeLLMBroker:
135
140
  assert result.items[1].number == 2
136
141
  assert result.metadata == {"key1": "value1", "key2": "value2"}
137
142
  mock_gateway.complete.assert_called_once()
143
+
144
+ class DescribeStreamingGeneration:
145
+
146
+ def should_stream_simple_response(self, llm_broker, mock_gateway, mocker):
147
+ from mojentic.llm.gateways.ollama import StreamingResponse
148
+
149
+ messages = [LLMMessage(role=MessageRole.User, content="Tell me a story")]
150
+
151
+ # Mock the complete_stream method to yield chunks
152
+ mock_gateway.complete_stream = mocker.MagicMock()
153
+ mock_gateway.complete_stream.return_value = iter([
154
+ StreamingResponse(content="Once "),
155
+ StreamingResponse(content="upon "),
156
+ StreamingResponse(content="a "),
157
+ StreamingResponse(content="time...")
158
+ ])
159
+
160
+ result_chunks = list(llm_broker.generate_stream(messages))
161
+
162
+ assert result_chunks == ["Once ", "upon ", "a ", "time..."]
163
+ mock_gateway.complete_stream.assert_called_once()
164
+
165
+ def should_handle_tool_calls_during_streaming(self, llm_broker, mock_gateway, mocker):
166
+ from mojentic.llm.gateways.ollama import StreamingResponse
167
+
168
+ messages = [LLMMessage(role=MessageRole.User, content="What is the date on Friday?")]
169
+ tool_call = mocker.create_autospec(LLMToolCall, instance=True)
170
+ tool_call.name = "resolve_date"
171
+ tool_call.arguments = {"date": "Friday"}
172
+
173
+ # First stream has tool call, second stream has the response after tool execution
174
+ mock_gateway.complete_stream = mocker.MagicMock()
175
+ mock_gateway.complete_stream.side_effect = [
176
+ iter([
177
+ StreamingResponse(content="Let "),
178
+ StreamingResponse(content="me "),
179
+ StreamingResponse(content="check..."),
180
+ StreamingResponse(tool_calls=[tool_call])
181
+ ]),
182
+ iter([
183
+ StreamingResponse(content="The "),
184
+ StreamingResponse(content="date "),
185
+ StreamingResponse(content="is "),
186
+ StreamingResponse(content="2024-11-15")
187
+ ])
188
+ ]
189
+
190
+ mock_tool = mocker.MagicMock()
191
+ mock_tool.matches.return_value = True
192
+ mock_tool.run.return_value = {"resolved_date": "2024-11-15"}
193
+
194
+ result_chunks = list(llm_broker.generate_stream(messages, tools=[mock_tool]))
195
+
196
+ # Should get chunks from first response, then chunks from second response after tool execution
197
+ assert result_chunks == ["Let ", "me ", "check...", "The ", "date ", "is ", "2024-11-15"]
198
+ assert mock_gateway.complete_stream.call_count == 2
199
+ mock_tool.run.assert_called_once_with(date="Friday")
200
+
201
+ def should_raise_error_if_gateway_does_not_support_streaming(self, llm_broker, mock_gateway):
202
+ messages = [LLMMessage(role=MessageRole.User, content="Hello")]
203
+
204
+ # Remove complete_stream method to simulate unsupported gateway
205
+ if hasattr(mock_gateway, 'complete_stream'):
206
+ delattr(mock_gateway, 'complete_stream')
207
+
208
+ with pytest.raises(NotImplementedError) as exc_info:
209
+ list(llm_broker.generate_stream(messages))
210
+
211
+ assert "does not support streaming" in str(exc_info.value)
@@ -12,7 +12,7 @@ class FileTypeSensor:
12
12
  """
13
13
  Initialize the TypeSensor with a default mapping of file extensions to language declarations.
14
14
 
15
- The TypeSensor is used to determine the appropriate language syntax highlighting
15
+ The TypeSensor is used to determine the appropriate language syntax highlighting
16
16
  for code blocks in markdown based on file extensions.
17
17
  """
18
18
  self.extension_map: Dict[str, str] = {
@@ -129,7 +129,6 @@ class MessageBuilder():
129
129
  f"{content.strip()}\n"
130
130
  f"```\n")
131
131
 
132
-
133
132
  def add_image(self, image_path: Union[str, Path]) -> "MessageBuilder":
134
133
  """
135
134
  Add a single image to the message.
@@ -253,7 +252,11 @@ class MessageBuilder():
253
252
 
254
253
  return self
255
254
 
256
- def load_content(self, file_path: Union[str, Path], template_values: Optional[Dict[str, Union[str, Path]]] = None) -> "MessageBuilder":
255
+ def load_content(
256
+ self,
257
+ file_path: Union[str, Path],
258
+ template_values: Optional[Dict[str, Union[str, Path]]] = None
259
+ ) -> "MessageBuilder":
257
260
  """
258
261
  Load content from a file into the content field of the MessageBuilder.
259
262
 
@@ -14,10 +14,12 @@ def file_gateway(mocker):
14
14
  file_gateway.is_binary.return_value = False
15
15
  return file_gateway
16
16
 
17
+
17
18
  @pytest.fixture
18
19
  def file_path():
19
20
  return Path("/path/to/file.txt")
20
21
 
22
+
21
23
  @pytest.fixture
22
24
  def whitespace_file_content():
23
25
  return "\n\n \n test file content with whitespace \n\n \n"
@@ -122,7 +124,9 @@ class DescribeMessageBuilder:
122
124
  assert "test file content" in result
123
125
  assert "```" in result
124
126
 
125
- def should_strip_whitespace_from_file_content(self, message_builder, file_gateway, file_path, whitespace_file_content, mocker):
127
+ def should_strip_whitespace_from_file_content(
128
+ self, message_builder, file_gateway, file_path, whitespace_file_content, mocker
129
+ ):
126
130
  # Use the fixtures instead of creating file path and content directly
127
131
  file_gateway.read.return_value = whitespace_file_content
128
132
  mocker.patch.object(message_builder.type_sensor, 'get_language', return_value='text')
@@ -1,6 +1,3 @@
1
1
  """
2
2
  Mojentic LLM registry module for managing model registrations.
3
3
  """
4
-
5
- from .llm_registry import LLMRegistry
6
- from .models import ModelInfo, Modality, Quantization
@@ -24,9 +24,9 @@ def register_llms_from_ollama(url: str, registry: LLMRegistry):
24
24
  # 'quantization_level': 'Q4_K_M'
25
25
  # },
26
26
  # 'digest': '4bd6cbf2d094264457a17aab6bd6acd1ed7a72fb8f8be3cfb193f63c78dd56df',
27
- # 'model': 'qwen2.5-coder:32b',
27
+ # 'model': 'qwen3-coder:32b',
28
28
  # 'modified_at': '2025-01-29T22:37:29.191797577-05:00',
29
- # 'name': 'qwen2.5-coder:32b',
29
+ # 'name': 'qwen3-coder:32b',
30
30
  # 'size': 19851349856
31
31
  # }
32
32
 
@@ -3,16 +3,7 @@ Mojentic LLM tools module for extending LLM capabilities.
3
3
  """
4
4
 
5
5
  # Base tool class
6
- from .llm_tool import LLMTool
7
- from .tool_wrapper import ToolWrapper
8
6
 
9
7
  # Common tools
10
- from .ask_user_tool import AskUserTool
11
- from .current_datetime import CurrentDateTimeTool
12
- from .date_resolver import ResolveDateTool
13
- from .organic_web_search import OrganicWebSearchTool
14
- from .tell_user_tool import TellUserTool
15
8
 
16
9
  # Import tool modules
17
- from . import file_manager
18
- from . import ephemeral_task_manager
@@ -3,8 +3,8 @@ from mojentic.llm.tools.llm_tool import LLMTool
3
3
 
4
4
  class AskUserTool(LLMTool):
5
5
  def run(self, user_request: str) -> str:
6
- print(f"\n\n\nI NEED YOUR HELP!\n{user_request}")
7
- return input(f"Your response: ")
6
+ print("\n\n\nI NEED YOUR HELP!\n{user_request}")
7
+ return input("Your response: ")
8
8
 
9
9
  @property
10
10
  def descriptor(self):
@@ -12,16 +12,22 @@ class AskUserTool(LLMTool):
12
12
  "type": "function",
13
13
  "function": {
14
14
  "name": "ask_user",
15
- "description": "If you do not know how to proceed, ask the user a question, or ask them for help or to do something for you.",
15
+ "description": (
16
+ "If you do not know how to proceed, ask the user a question, or ask them for "
17
+ "help or to do something for you."
18
+ ),
16
19
  "parameters": {
17
20
  "type": "object",
18
21
  "properties": {
19
22
  "user_request": {
20
23
  "type": "string",
21
- "description": "The question you need the user to answer, or the task you need the user to do for you."
24
+ "description": (
25
+ "The question you need the user to answer, or the task you need the user to "
26
+ "do for you."
27
+ )
22
28
  }
23
29
  },
24
30
  "required": ["user_request"]
25
31
  },
26
32
  },
27
- }
33
+ }
@@ -6,12 +6,12 @@ class CurrentDateTimeTool(LLMTool):
6
6
  def run(self, format_string: str = "%Y-%m-%d %H:%M:%S") -> dict:
7
7
  """
8
8
  Returns the current date and time.
9
-
9
+
10
10
  Parameters
11
11
  ----------
12
12
  format_string : str, optional
13
13
  The format string for the datetime, by default "%Y-%m-%d %H:%M:%S"
14
-
14
+
15
15
  Returns
16
16
  -------
17
17
  dict
@@ -19,13 +19,13 @@ class CurrentDateTimeTool(LLMTool):
19
19
  """
20
20
  current_time = datetime.now()
21
21
  formatted_time = current_time.strftime(format_string)
22
-
22
+
23
23
  return {
24
24
  "current_datetime": formatted_time,
25
25
  "timestamp": current_time.timestamp(),
26
26
  "timezone": datetime.now().astimezone().tzname()
27
27
  }
28
-
28
+
29
29
  @property
30
30
  def descriptor(self):
31
31
  return {
@@ -38,10 +38,13 @@ class CurrentDateTimeTool(LLMTool):
38
38
  "properties": {
39
39
  "format_string": {
40
40
  "type": "string",
41
- "description": "Format string for the datetime (e.g., '%Y-%m-%d %H:%M:%S', '%A, %B %d, %Y'). Default is ISO format."
41
+ "description": (
42
+ "Format string for the datetime (e.g., '%Y-%m-%d %H:%M:%S', '%A, %B %d, %Y'). "
43
+ "Default is ISO format."
44
+ )
42
45
  }
43
46
  },
44
47
  "required": []
45
48
  }
46
49
  }
47
- }
50
+ }
@@ -7,7 +7,7 @@ from mojentic.llm.tools.llm_tool import LLMTool
7
7
 
8
8
  # Avoid circular imports with TYPE_CHECKING
9
9
  if TYPE_CHECKING:
10
- from mojentic.llm.llm_broker import LLMBroker
10
+ pass
11
11
 
12
12
 
13
13
  class ResolveDateTool(LLMTool):
@@ -34,7 +34,10 @@ class ResolveDateTool(LLMTool):
34
34
  "type": "function",
35
35
  "function": {
36
36
  "name": "resolve_date",
37
- "description": "Take text that specifies a relative date, and output an absolute date. If no reference date is available, the current date is used.",
37
+ "description": (
38
+ "Take text that specifies a relative date, and output an absolute date. If no "
39
+ "reference date is available, the current date is used."
40
+ ),
38
41
  "parameters": {
39
42
  "type": "object",
40
43
  "properties": {
@@ -44,8 +47,11 @@ class ResolveDateTool(LLMTool):
44
47
  },
45
48
  "reference_date_in_iso8601": {
46
49
  "type": "string",
47
- "description": "The date from which the resolved date should be calculated, in YYYY-MM-DD"
48
- " format. Do not provide if you weren't provided one, I will assume the current date."
50
+ "description": (
51
+ "The date from which the resolved date should be calculated, in YYYY-MM-DD "
52
+ "format. Do not provide if you weren't provided one, I will assume the "
53
+ "current date."
54
+ )
49
55
  }
50
56
  },
51
57
  "additionalProperties": False,
@@ -20,7 +20,6 @@ class DescribeResolveDateTool:
20
20
  assert result["relative_date"] == "next Friday"
21
21
  assert "resolved_date" in result
22
22
 
23
-
24
23
  def should_resolve_date_with_reference(self, date_resolver):
25
24
  """
26
25
  Given a date resolver
@@ -61,7 +61,10 @@ class AppendTaskTool(LLMTool):
61
61
  "type": "function",
62
62
  "function": {
63
63
  "name": "append_task",
64
- "description": "Append a new task to the end of the task list with a description. The task will start with 'pending' status.",
64
+ "description": (
65
+ "Append a new task to the end of the task list with a description. The task will "
66
+ "start with 'pending' status."
67
+ ),
65
68
  "parameters": {
66
69
  "type": "object",
67
70
  "properties": {
@@ -5,7 +5,7 @@ This module provides a class for managing a list of tasks with state transitions
5
5
  """
6
6
 
7
7
  from enum import Enum
8
- from typing import List, Optional
8
+ from typing import List
9
9
  from pydantic import BaseModel
10
10
 
11
11
 
@@ -64,7 +64,10 @@ class InsertTaskAfterTool(LLMTool):
64
64
  "type": "function",
65
65
  "function": {
66
66
  "name": "insert_task_after",
67
- "description": "Insert a new task after an existing task in the task list. The task will start with 'pending' status.",
67
+ "description": (
68
+ "Insert a new task after an existing task in the task list. The task will start with "
69
+ "'pending' status."
70
+ ),
68
71
  "parameters": {
69
72
  "type": "object",
70
73
  "properties": {
@@ -61,7 +61,10 @@ class PrependTaskTool(LLMTool):
61
61
  "type": "function",
62
62
  "function": {
63
63
  "name": "prepend_task",
64
- "description": "Prepend a new task to the beginning of the task list with a description. The task will start with 'pending' status.",
64
+ "description": (
65
+ "Prepend a new task to the beginning of the task list with a description. The task "
66
+ "will start with 'pending' status."
67
+ ),
65
68
  "parameters": {
66
69
  "type": "object",
67
70
  "properties": {
@@ -74,4 +77,4 @@ class PrependTaskTool(LLMTool):
74
77
  "additionalProperties": False
75
78
  }
76
79
  }
77
- }
80
+ }