mojentic 0.5.3__py3-none-any.whl → 0.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. _examples/broker_examples.py +12 -22
  2. _examples/broker_image_examples.py +41 -0
  3. _examples/ephemeral_task_manager_example.py +48 -0
  4. _examples/oversized_embeddings.py +9 -0
  5. _examples/streaming.py +1 -1
  6. _examples/tell_user_example.py +43 -0
  7. mojentic/agents/iterative_problem_solver.py +5 -2
  8. mojentic/agents/simple_recursive_agent.py +2 -2
  9. mojentic/llm/gateways/openai.py +29 -6
  10. mojentic/llm/llm_broker_spec.py +0 -49
  11. mojentic/llm/message_composers_spec.py +0 -80
  12. mojentic/llm/tools/ask_user_tool.py +1 -1
  13. mojentic/llm/tools/ephemeral_task_manager/__init__.py +27 -0
  14. mojentic/llm/tools/ephemeral_task_manager/append_task_tool.py +77 -0
  15. mojentic/llm/tools/ephemeral_task_manager/append_task_tool_spec.py +34 -0
  16. mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool.py +57 -0
  17. mojentic/llm/tools/ephemeral_task_manager/clear_tasks_tool_spec.py +32 -0
  18. mojentic/llm/tools/ephemeral_task_manager/complete_task_tool.py +81 -0
  19. mojentic/llm/tools/ephemeral_task_manager/complete_task_tool_spec.py +43 -0
  20. mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list.py +202 -0
  21. mojentic/llm/tools/ephemeral_task_manager/ephemeral_task_list_spec.py +137 -0
  22. mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool.py +84 -0
  23. mojentic/llm/tools/ephemeral_task_manager/insert_task_after_tool_spec.py +42 -0
  24. mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool.py +80 -0
  25. mojentic/llm/tools/ephemeral_task_manager/list_tasks_tool_spec.py +38 -0
  26. mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool.py +77 -0
  27. mojentic/llm/tools/ephemeral_task_manager/prepend_task_tool_spec.py +34 -0
  28. mojentic/llm/tools/ephemeral_task_manager/start_task_tool.py +81 -0
  29. mojentic/llm/tools/ephemeral_task_manager/start_task_tool_spec.py +43 -0
  30. mojentic/llm/tools/organic_web_search.py +37 -0
  31. mojentic/llm/tools/tell_user_tool.py +27 -0
  32. {mojentic-0.5.3.dist-info → mojentic-0.5.5.dist-info}/METADATA +2 -1
  33. {mojentic-0.5.3.dist-info → mojentic-0.5.5.dist-info}/RECORD +36 -14
  34. {mojentic-0.5.3.dist-info → mojentic-0.5.5.dist-info}/WHEEL +1 -1
  35. mojentic/llm/tools/web_search.py +0 -35
  36. {mojentic-0.5.3.dist-info → mojentic-0.5.5.dist-info}/licenses/LICENSE.md +0 -0
  37. {mojentic-0.5.3.dist-info → mojentic-0.5.5.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,9 @@
1
1
  import logging
2
2
  import os
3
3
 
4
+ from mojentic.llm import LLMBroker
5
+ from mojentic.llm.gateways import OpenAIGateway
6
+
4
7
  logging.basicConfig(level=logging.WARN)
5
8
 
6
9
  from pathlib import Path
@@ -8,8 +11,6 @@ from pathlib import Path
8
11
  from pydantic import BaseModel, Field
9
12
 
10
13
  from mojentic.llm.gateways.models import LLMMessage
11
- from mojentic.llm.gateways.openai import OpenAIGateway
12
- from mojentic.llm.llm_broker import LLMBroker
13
14
  from mojentic.llm.tools.date_resolver import ResolveDateTool
14
15
 
15
16
 
@@ -43,6 +44,7 @@ def check_tool_use(llm):
43
44
  tools=[ResolveDateTool()])
44
45
  print(result)
45
46
 
47
+
46
48
  def check_image_analysis(llm, image_path: Path = None):
47
49
  if image_path is None:
48
50
  image_path = Path.cwd() / 'images' / 'flash_rom.jpg'
@@ -52,25 +54,13 @@ def check_image_analysis(llm, image_path: Path = None):
52
54
  ])
53
55
  print(result)
54
56
 
55
- models = ["gpt-4o", "gpt-4.1", "o3", "gpt-4.5-preview", "o4-mini"]
56
- images = [
57
- Path.cwd() / 'images' / 'flash_rom.jpg',
58
- Path.cwd() / 'images' / 'screen_cap.png',
59
- Path.cwd() / 'images' / 'xbox-one.jpg',
60
- ]
61
-
62
- for image in images:
63
- for model in models:
64
- print(f"Checking {model} with {str(image)}")
65
- check_image_analysis(openai_llm(model=model), image)
66
-
67
57
 
68
- # check_simple_textgen(openai_llm(model="o4-mini"))
69
- # check_structured_output(openai_llm(model="o4-mini"))
70
- # check_tool_use(openai_llm(model="o4-mini"))
71
- # check_image_analysis(openai_llm(model="gpt-4o"))
58
+ check_simple_textgen(openai_llm(model="o4-mini"))
59
+ check_structured_output(openai_llm(model="o4-mini"))
60
+ check_tool_use(openai_llm(model="o4-mini"))
61
+ check_image_analysis(openai_llm(model="gpt-4o"))
72
62
 
73
- # check_simple_textgen(ollama_llm())
74
- # check_structured_output(ollama_llm())
75
- # check_tool_use(ollama_llm())
76
- # check_image_analysis(ollama_llm(model="gemma3:27b"))
63
+ check_simple_textgen(ollama_llm())
64
+ check_structured_output(ollama_llm())
65
+ check_tool_use(ollama_llm(model="qwen3:32b"))
66
+ check_image_analysis(ollama_llm(model="gemma3:27b"))
@@ -0,0 +1,41 @@
1
+ import os
2
+ from pathlib import Path
3
+
4
+ from mojentic.llm import LLMBroker
5
+ from mojentic.llm.gateways import OpenAIGateway
6
+ from mojentic.llm.gateways.models import LLMMessage
7
+
8
+
9
+ def openai_llm(model="gpt-4o"):
10
+ api_key = os.getenv("OPENAI_API_KEY")
11
+ gateway = OpenAIGateway(api_key)
12
+ llm = LLMBroker(model=model, gateway=gateway)
13
+ return llm
14
+
15
+
16
+ def ollama_llm(model="llama3.3-70b-32k"):
17
+ llm = LLMBroker(model=model)
18
+ return llm
19
+
20
+
21
+ def check_image_analysis(llm, image_path: Path = None):
22
+ if image_path is None:
23
+ image_path = Path.cwd() / 'images' / 'flash_rom.jpg'
24
+ result = llm.generate(messages=[
25
+ (LLMMessage(content='What is in this image?',
26
+ image_paths=[str(image_path)]))
27
+ ])
28
+ print(result)
29
+
30
+
31
+ models = ["gpt-4o", "gpt-4.1", "o3", "gpt-4.5-preview", "o4-mini"]
32
+ images = [
33
+ Path.cwd() / 'images' / 'flash_rom.jpg',
34
+ Path.cwd() / 'images' / 'screen_cap.png',
35
+ Path.cwd() / 'images' / 'xbox-one.jpg',
36
+ ]
37
+
38
+ for image in images:
39
+ for model in models:
40
+ print(f"Checking {model} with {str(image)}")
41
+ check_image_analysis(openai_llm(model=model), image)
@@ -0,0 +1,48 @@
1
+ """
2
+ Example script demonstrating the usage of the ephemeral task manager tools.
3
+ """
4
+ import logging
5
+ import os
6
+
7
+ from mojentic.llm.gateways import OpenAIGateway
8
+
9
+ logging.basicConfig(
10
+ level=logging.WARN
11
+ )
12
+
13
+ from mojentic.llm import LLMBroker
14
+ from mojentic.llm.gateways.models import LLMMessage
15
+ from mojentic.llm.tools.ephemeral_task_manager import (
16
+ EphemeralTaskList,
17
+ AppendTaskTool,
18
+ PrependTaskTool,
19
+ InsertTaskAfterTool,
20
+ StartTaskTool,
21
+ CompleteTaskTool,
22
+ ListTasksTool,
23
+ ClearTasksTool
24
+ )
25
+ from mojentic.llm.tools.tell_user_tool import TellUserTool
26
+
27
+ # llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
28
+ # llm = LLMBroker(model="qwen3:32b")
29
+ llm = LLMBroker(model="qwen2.5:7b")
30
+ # llm = LLMBroker(model="qwen2.5:72b")
31
+ # llm = LLMBroker(model="o4-mini", gateway=OpenAIGateway(os.environ["OPENAI_API_KEY"]))
32
+ message = LLMMessage(
33
+ content="I want you to count from 1 to 10. Break that request down into individual tasks, track them using available tools, and perform them one by one until you're finished. Interrupt me to tell the user as you complete every task.")
34
+ task_list = EphemeralTaskList()
35
+ tools = [
36
+ AppendTaskTool(task_list),
37
+ PrependTaskTool(task_list),
38
+ InsertTaskAfterTool(task_list),
39
+ StartTaskTool(task_list),
40
+ CompleteTaskTool(task_list),
41
+ ListTasksTool(task_list),
42
+ ClearTasksTool(task_list),
43
+ TellUserTool(),
44
+ ]
45
+
46
+ result = llm.generate(messages=[message], tools=tools, temperature=0.0)
47
+ print(result)
48
+ print(task_list.list_tasks())
@@ -0,0 +1,9 @@
1
+ import os
2
+
3
+ from mojentic.llm.gateways import OllamaGateway, OpenAIGateway
4
+
5
+ ollama = OllamaGateway()
6
+ print(len(ollama.calculate_embeddings("Hello, world! " * 5000)))
7
+
8
+ openai = OpenAIGateway(os.environ["OPENAI_API_KEY"])
9
+ print(len(openai.calculate_embeddings("Hello, world! " * 5000)))
_examples/streaming.py CHANGED
@@ -16,7 +16,7 @@ def main():
16
16
  date_tool = ResolveDateTool()
17
17
 
18
18
  stream = ollama.complete_stream(
19
- model="MFDoom/deepseek-r1-tool-calling:70b",
19
+ model="qwen2.5:7b",
20
20
  messages=[
21
21
  LLMMessage(content="Tell me a story about a dragon. In your story, reference several dates relative to today, "
22
22
  "like 'three days from now' or 'last week'.")
@@ -0,0 +1,43 @@
1
+ """
2
+ Example script demonstrating how to use the TellUserTool.
3
+
4
+ This script shows how to create and run an IterativeProblemSolver with the TellUserTool
5
+ to display messages to the user without expecting a response.
6
+ """
7
+
8
+ import logging
9
+
10
+ logging.basicConfig(level=logging.WARN)
11
+
12
+ from mojentic.agents.iterative_problem_solver import IterativeProblemSolver
13
+ from mojentic.llm.tools.tell_user_tool import TellUserTool
14
+ from mojentic.llm import LLMBroker
15
+
16
+
17
+ def main():
18
+ # Initialize the LLM broker with your preferred model
19
+ # Uncomment one of the following lines or modify as needed:
20
+ # llm = LLMBroker(model="llama3.3-70b-32k") # Ollama model
21
+ # llm = LLMBroker(model="gpt-4o") # OpenAI model
22
+ llm = LLMBroker(model="qwq") # Default model for example
23
+
24
+ # Define a simple user request
25
+ user_request = "Tell me about the benefits of exercise."
26
+
27
+ # Create the problem solver with necessary tools
28
+ solver = IterativeProblemSolver(
29
+ llm=llm,
30
+ available_tools=[TellUserTool()],
31
+ max_iterations=3
32
+ )
33
+
34
+ # Run the solver and get the result
35
+ result = solver.solve(user_request)
36
+
37
+ # Display the results
38
+ print(f"User Request:\n{user_request}\n")
39
+ print(f"Agent Response:\n{result}\n")
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
@@ -26,7 +26,8 @@ class IterativeProblemSolver:
26
26
  max_iterations: int
27
27
  chat: ChatSession
28
28
 
29
- def __init__(self, llm: LLMBroker, available_tools: Optional[List[LLMTool]] = None, max_iterations: int = 3):
29
+ def __init__(self, llm: LLMBroker, available_tools: Optional[List[LLMTool]] = None, max_iterations: int = 3,
30
+ system_prompt: Optional[str] = None):
30
31
  """Initialize the IterativeProblemSolver.
31
32
 
32
33
  Parameters
@@ -42,7 +43,9 @@ class IterativeProblemSolver:
42
43
  self.available_tools = available_tools or []
43
44
  self.chat = ChatSession(
44
45
  llm=llm,
45
- system_prompt="You are a helpful assistant, working on behalf of the user on a specific user request.",
46
+ system_prompt=system_prompt or "You are a problem-solving assistant that can solve complex problems step by step. "
47
+ "You analyze problems, break them down into smaller parts, and solve them systematically. "
48
+ "If you cannot solve a problem completely in one step, you make progress and identify what to do next.",
46
49
  tools=self.available_tools,
47
50
  )
48
51
 
@@ -140,7 +140,7 @@ class SimpleRecursiveAgent:
140
140
  emitter: EventEmitter
141
141
  chat: ChatSession
142
142
 
143
- def __init__(self, llm: LLMBroker, available_tools: Optional[List[LLMTool]] = None, max_iterations: int = 5):
143
+ def __init__(self, llm: LLMBroker, available_tools: Optional[List[LLMTool]] = None, max_iterations: int = 5, system_prompt: Optional[str] = None):
144
144
  """
145
145
  Initialize the SimpleRecursiveAgent.
146
146
 
@@ -161,7 +161,7 @@ class SimpleRecursiveAgent:
161
161
  # Initialize the chat session
162
162
  self.chat = ChatSession(
163
163
  llm=llm,
164
- system_prompt="You are a problem-solving assistant that can solve complex problems step by step. "
164
+ system_prompt=system_prompt or "You are a problem-solving assistant that can solve complex problems step by step. "
165
165
  "You analyze problems, break them down into smaller parts, and solve them systematically. "
166
166
  "If you cannot solve a problem completely in one step, you make progress and identify what to do next.",
167
167
  tools=self.available_tools
@@ -1,12 +1,15 @@
1
1
  import json
2
- from typing import Type, List
2
+ from itertools import islice
3
+ from typing import Type, List, Iterable
3
4
 
5
+ import numpy as np
4
6
  import structlog
5
7
  from openai import OpenAI
6
8
 
7
9
  from mojentic.llm.gateways.llm_gateway import LLMGateway
8
10
  from mojentic.llm.gateways.models import LLMToolCall, LLMGatewayResponse
9
11
  from mojentic.llm.gateways.openai_messages_adapter import adapt_messages_to_openai
12
+ from mojentic.llm.gateways.tokenizer_gateway import TokenizerGateway
10
13
 
11
14
  logger = structlog.get_logger()
12
15
 
@@ -121,8 +124,28 @@ class OpenAIGateway(LLMGateway):
121
124
  The embeddings for the text.
122
125
  """
123
126
  logger.debug("calculate_embeddings", text=text, model=model)
124
- response = self.client.embeddings.create(
125
- model=model,
126
- input=text
127
- )
128
- return response.data[0].embedding
127
+
128
+ embeddings = [self.client.embeddings.create(model=model, input=chunk).data[0].embedding
129
+ for chunk in self._chunked_tokens(text, 8191)]
130
+ lengths = [len(embedding) for embedding in embeddings]
131
+
132
+ average = np.average(embeddings, axis=0, weights=lengths)
133
+ average = average / np.linalg.norm(average)
134
+ average = average.tolist()
135
+
136
+ return average
137
+
138
+ def _batched(self, iterable: Iterable, n: int):
139
+ """Batch data into tuples of length n. The last batch may be shorter."""
140
+ # batched('ABCDEFG', 3) --> ABC DEF G
141
+ if n < 1:
142
+ raise ValueError('n must be at least one')
143
+ it = iter(iterable)
144
+ while batch := tuple(islice(it, n)):
145
+ yield batch
146
+
147
+ def _chunked_tokens(self, text, chunk_length):
148
+ tokenizer = TokenizerGateway()
149
+ tokens = tokenizer.encode(text)
150
+ chunks_iterator = self._batched(tokens, chunk_length)
151
+ yield from chunks_iterator
@@ -32,22 +32,10 @@ def llm_broker(mock_gateway):
32
32
 
33
33
 
34
34
  class DescribeLLMBroker:
35
- """
36
- Specification for the LLMBroker class which handles interactions with Language Learning Models.
37
- """
38
35
 
39
36
  class DescribeMessageGeneration:
40
- """
41
- Specifications for generating messages through the LLM broker
42
- """
43
37
 
44
38
  def should_generate_simple_response_for_user_message(self, llm_broker, mock_gateway):
45
- """
46
- Given a simple user message
47
- When generating a response
48
- Then it should return the LLM's response content
49
- """
50
- # Given
51
39
  test_response_content = "I am fine, thank you!"
52
40
  messages = [LLMMessage(role=MessageRole.User, content="Hello, how are you?")]
53
41
  mock_gateway.complete.return_value = LLMGatewayResponse(
@@ -56,20 +44,12 @@ class DescribeLLMBroker:
56
44
  tool_calls=[]
57
45
  )
58
46
 
59
- # When
60
47
  result = llm_broker.generate(messages)
61
48
 
62
- # Then
63
49
  assert result == test_response_content
64
50
  mock_gateway.complete.assert_called_once()
65
51
 
66
52
  def should_handle_tool_calls_during_generation(self, llm_broker, mock_gateway, mocker):
67
- """
68
- Given a message that requires tool usage
69
- When generating a response
70
- Then it should properly handle tool calls and return final response
71
- """
72
- # Given
73
53
  messages = [LLMMessage(role=MessageRole.User, content="What is the date on Friday?")]
74
54
  tool_call = mocker.create_autospec(LLMToolCall, instance=True)
75
55
  tool_call.name = "resolve_date"
@@ -84,26 +64,15 @@ class DescribeLLMBroker:
84
64
  mock_tool.matches.return_value = True
85
65
  mock_tool.run.return_value = {"resolved_date": "Friday"}
86
66
 
87
- # When
88
67
  result = llm_broker.generate(messages, tools=[mock_tool])
89
68
 
90
- # Then
91
69
  assert result == "The date is Friday."
92
70
  assert mock_gateway.complete.call_count == 2
93
71
  mock_tool.run.assert_called_once_with(date="Friday")
94
72
 
95
73
  class DescribeObjectGeneration:
96
- """
97
- Specifications for generating structured objects through the LLM broker
98
- """
99
74
 
100
75
  def should_generate_simple_model(self, llm_broker, mock_gateway):
101
- """
102
- Given messages requiring a simple structured output
103
- When generating an object with SimpleModel
104
- Then it should return the validated SimpleModel object
105
- """
106
- # Given
107
76
  messages = [LLMMessage(role=MessageRole.User, content="Generate a simple object")]
108
77
  mock_object = SimpleModel(text="test", number=42)
109
78
  mock_gateway.complete.return_value = LLMGatewayResponse(
@@ -112,22 +81,14 @@ class DescribeLLMBroker:
112
81
  tool_calls=[]
113
82
  )
114
83
 
115
- # When
116
84
  result = llm_broker.generate_object(messages, object_model=SimpleModel)
117
85
 
118
- # Then
119
86
  assert isinstance(result, SimpleModel)
120
87
  assert result.text == "test"
121
88
  assert result.number == 42
122
89
  mock_gateway.complete.assert_called_once()
123
90
 
124
91
  def should_generate_nested_model(self, llm_broker, mock_gateway):
125
- """
126
- Given messages requiring a nested structured output
127
- When generating an object with NestedModel
128
- Then it should return the validated NestedModel object
129
- """
130
- # Given
131
92
  messages = [LLMMessage(role=MessageRole.User, content="Generate a nested object")]
132
93
  mock_object = NestedModel(
133
94
  title="main",
@@ -139,10 +100,8 @@ class DescribeLLMBroker:
139
100
  tool_calls=[]
140
101
  )
141
102
 
142
- # When
143
103
  result = llm_broker.generate_object(messages, object_model=NestedModel)
144
104
 
145
- # Then
146
105
  assert isinstance(result, NestedModel)
147
106
  assert result.title == "main"
148
107
  assert isinstance(result.details, SimpleModel)
@@ -151,12 +110,6 @@ class DescribeLLMBroker:
151
110
  mock_gateway.complete.assert_called_once()
152
111
 
153
112
  def should_generate_complex_model(self, llm_broker, mock_gateway):
154
- """
155
- Given messages requiring a complex structured output
156
- When generating an object with ComplexModel
157
- Then it should return the validated ComplexModel object
158
- """
159
- # Given
160
113
  messages = [LLMMessage(role=MessageRole.User, content="Generate a complex object")]
161
114
  mock_object = ComplexModel(
162
115
  name="test",
@@ -172,10 +125,8 @@ class DescribeLLMBroker:
172
125
  tool_calls=[]
173
126
  )
174
127
 
175
- # When
176
128
  result = llm_broker.generate_object(messages, object_model=ComplexModel)
177
129
 
178
- # Then
179
130
  assert isinstance(result, ComplexModel)
180
131
  assert result.name == "test"
181
132
  assert len(result.items) == 2
@@ -61,11 +61,6 @@ class DescribeMessageBuilder:
61
61
  """
62
62
 
63
63
  def should_build_message_with_content(self, message_builder):
64
- """
65
- Given a MessageBuilder with content
66
- When build is called
67
- Then it should return a message with that content
68
- """
69
64
  message_builder.content = "Test content"
70
65
 
71
66
  message = message_builder.build()
@@ -76,11 +71,6 @@ class DescribeMessageBuilder:
76
71
  assert message.image_paths == []
77
72
 
78
73
  def should_build_message_with_role(self, message_builder):
79
- """
80
- Given a MessageBuilder with a specific role
81
- When build is called
82
- Then it should return a message with that role
83
- """
84
74
  message_builder.role = MessageRole.Assistant
85
75
 
86
76
  message = message_builder.build()
@@ -88,11 +78,6 @@ class DescribeMessageBuilder:
88
78
  assert message.role == MessageRole.Assistant
89
79
 
90
80
  def should_build_message_with_image_paths(self, message_builder):
91
- """
92
- Given a MessageBuilder with image paths
93
- When build is called
94
- Then it should return a message with those image paths
95
- """
96
81
  message_builder.image_paths = [Path("/path/to/image1.jpg"), Path("/path/to/image2.jpg")]
97
82
 
98
83
  message = message_builder.build()
@@ -100,11 +85,6 @@ class DescribeMessageBuilder:
100
85
  assert message.image_paths == ["/path/to/image1.jpg", "/path/to/image2.jpg"]
101
86
 
102
87
  def should_build_message_with_file_content(self, message_builder, file_gateway):
103
- """
104
- Given a MessageBuilder with file paths
105
- When build is called
106
- Then it should return a message with the file contents
107
- """
108
88
  file_path = Path("/path/to/file.txt")
109
89
  message_builder.file_paths = [file_path]
110
90
 
@@ -115,11 +95,6 @@ class DescribeMessageBuilder:
115
95
  assert "File: /path/to/file.txt" in message.content
116
96
 
117
97
  def should_build_message_with_multiple_file_contents(self, message_builder, file_gateway):
118
- """
119
- Given a MessageBuilder with multiple file paths
120
- When build is called
121
- Then it should return a message with all file contents
122
- """
123
98
  file_path1 = Path("/path/to/file1.txt")
124
99
  file_path2 = Path("/path/to/file2.txt")
125
100
  message_builder.file_paths = [file_path1, file_path2]
@@ -136,11 +111,6 @@ class DescribeMessageBuilder:
136
111
  """
137
112
 
138
113
  def should_format_file_content_with_language(self, message_builder, file_gateway, mocker):
139
- """
140
- Given a MessageBuilder
141
- When _file_content_partial is called
142
- Then it should format the file content with the correct language
143
- """
144
114
  file_path = Path("/path/to/file.py")
145
115
  mocker.patch.object(message_builder.type_sensor, 'get_language', return_value='python')
146
116
 
@@ -153,11 +123,6 @@ class DescribeMessageBuilder:
153
123
  assert "```" in result
154
124
 
155
125
  def should_strip_whitespace_from_file_content(self, message_builder, file_gateway, file_path, whitespace_file_content, mocker):
156
- """
157
- Given a MessageBuilder
158
- When _file_content_partial is called with content that has whitespace above and below
159
- Then it should strip the whitespace when putting it in code fences
160
- """
161
126
  # Use the fixtures instead of creating file path and content directly
162
127
  file_gateway.read.return_value = whitespace_file_content
163
128
  mocker.patch.object(message_builder.type_sensor, 'get_language', return_value='text')
@@ -182,11 +147,6 @@ class DescribeMessageBuilder:
182
147
  """
183
148
 
184
149
  def should_add_image_path_to_list(self, message_builder):
185
- """
186
- Given a MessageBuilder
187
- When add_image is called with a path
188
- Then it should add the path to the image_paths list
189
- """
190
150
  image_path = Path("/path/to/image.jpg")
191
151
 
192
152
  result = message_builder.add_image(image_path)
@@ -195,11 +155,6 @@ class DescribeMessageBuilder:
195
155
  assert result is message_builder # Returns self for method chaining
196
156
 
197
157
  def should_convert_string_path_to_path_object(self, message_builder):
198
- """
199
- Given a MessageBuilder
200
- When add_image is called with a string path
201
- Then it should convert the string to a Path object
202
- """
203
158
  image_path_str = "/path/to/image.jpg"
204
159
 
205
160
  message_builder.add_image(image_path_str)
@@ -212,11 +167,6 @@ class DescribeMessageBuilder:
212
167
  """
213
168
 
214
169
  def should_add_multiple_specific_images(self, message_builder):
215
- """
216
- Given a MessageBuilder
217
- When add_images is called with multiple specific image paths
218
- Then it should add all paths to the image_paths list
219
- """
220
170
  image_path1 = Path("/path/to/image1.jpg")
221
171
  image_path2 = Path("/path/to/image2.jpg")
222
172
 
@@ -227,11 +177,6 @@ class DescribeMessageBuilder:
227
177
  assert result is message_builder # Returns self for method chaining
228
178
 
229
179
  def should_add_all_jpg_images_from_directory(self, message_builder, mocker):
230
- """
231
- Given a MessageBuilder
232
- When add_images is called with a directory path
233
- Then it should add all JPG images in that directory
234
- """
235
180
  dir_path = Path("/path/to/images")
236
181
  jpg_files = [Path("/path/to/images/image1.jpg"), Path("/path/to/images/image2.jpg")]
237
182
 
@@ -246,11 +191,6 @@ class DescribeMessageBuilder:
246
191
  assert jpg_files[1] in message_builder.image_paths
247
192
 
248
193
  def should_add_images_matching_glob_pattern(self, message_builder, mocker):
249
- """
250
- Given a MessageBuilder
251
- When add_images is called with a path containing a wildcard
252
- Then it should add all matching files
253
- """
254
194
  pattern_path = Path("/path/to/*.jpg")
255
195
  matching_files = [Path("/path/to/image1.jpg"), Path("/path/to/image2.jpg")]
256
196
 
@@ -276,11 +216,6 @@ class DescribeMessageBuilder:
276
216
  """
277
217
 
278
218
  def should_load_content_from_file(self, message_builder, file_gateway, file_path):
279
- """
280
- Given a MessageBuilder
281
- When load_content is called with a file path
282
- Then it should load the content from the file and set it as the content
283
- """
284
219
  result = message_builder.load_content(file_path)
285
220
 
286
221
  file_gateway.read.assert_called_once_with(file_path)
@@ -288,11 +223,6 @@ class DescribeMessageBuilder:
288
223
  assert result is message_builder # Returns self for method chaining
289
224
 
290
225
  def should_convert_string_path_to_path_object(self, message_builder, file_gateway):
291
- """
292
- Given a MessageBuilder
293
- When load_content is called with a string path
294
- Then it should convert the string to a Path object
295
- """
296
226
  file_path_str = "/path/to/file.txt"
297
227
 
298
228
  message_builder.load_content(file_path_str)
@@ -300,22 +230,12 @@ class DescribeMessageBuilder:
300
230
  file_gateway.read.assert_called_once_with(Path(file_path_str))
301
231
 
302
232
  def should_raise_error_if_file_not_found(self, message_builder, file_gateway, file_path):
303
- """
304
- Given a MessageBuilder
305
- When load_content is called with a non-existent file
306
- Then it should raise a FileNotFoundError
307
- """
308
233
  file_gateway.exists.return_value = False
309
234
 
310
235
  with pytest.raises(FileNotFoundError):
311
236
  message_builder.load_content(file_path)
312
237
 
313
238
  def should_replace_placeholders_with_template_values(self, message_builder, file_gateway, file_path):
314
- """
315
- Given a MessageBuilder
316
- When load_content is called with a file path and template values
317
- Then it should replace placeholders in the content with the corresponding values
318
- """
319
239
  # Set up the file content with placeholders
320
240
  file_gateway.read.return_value = "Hello, {name}! Today is {day}."
321
241
 
@@ -12,7 +12,7 @@ class AskUserTool(LLMTool):
12
12
  "type": "function",
13
13
  "function": {
14
14
  "name": "ask_user",
15
- "description": "If you do not know how to proceed, ask the user a question, or ask them to do something for you.",
15
+ "description": "If you do not know how to proceed, ask the user a question, or ask them for help or to do something for you.",
16
16
  "parameters": {
17
17
  "type": "object",
18
18
  "properties": {
@@ -0,0 +1,27 @@
1
+ """
2
+ Ephemeral Task Manager tools for managing a list of tasks.
3
+
4
+ This module provides tools for appending, prepending, inserting, starting, completing, and listing tasks.
5
+ Tasks follow a state machine that transitions from PENDING through IN_PROGRESS to COMPLETED.
6
+ """
7
+
8
+ from mojentic.llm.tools.ephemeral_task_manager.append_task_tool import AppendTaskTool
9
+ from mojentic.llm.tools.ephemeral_task_manager.clear_tasks_tool import ClearTasksTool
10
+ from mojentic.llm.tools.ephemeral_task_manager.complete_task_tool import CompleteTaskTool
11
+ from mojentic.llm.tools.ephemeral_task_manager.insert_task_after_tool import InsertTaskAfterTool
12
+ from mojentic.llm.tools.ephemeral_task_manager.list_tasks_tool import ListTasksTool
13
+ from mojentic.llm.tools.ephemeral_task_manager.prepend_task_tool import PrependTaskTool
14
+ from mojentic.llm.tools.ephemeral_task_manager.start_task_tool import StartTaskTool
15
+ from mojentic.llm.tools.ephemeral_task_manager.ephemeral_task_list import EphemeralTaskList, Task
16
+
17
+ __all__ = [
18
+ "EphemeralTaskList",
19
+ "Task",
20
+ "AppendTaskTool",
21
+ "PrependTaskTool",
22
+ "InsertTaskAfterTool",
23
+ "StartTaskTool",
24
+ "CompleteTaskTool",
25
+ "ListTasksTool",
26
+ "ClearTasksTool",
27
+ ]