mojentic 0.8.3__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@ from mojentic.llm.tools.tool_wrapper import ToolWrapper
10
10
  #
11
11
 
12
12
  temporal_specialist = BaseLLMAgent(
13
- llm=LLMBroker(model="qwen2.5:7b"),
13
+ llm=LLMBroker(model="qwen3:7b"),
14
14
  tools=[ResolveDateTool()],
15
15
  behaviour="You are a historian and sociologist who focuses on sorting out temporal events, determining what happened or will happen when."
16
16
  )
@@ -22,7 +22,7 @@ if not os.path.exists("local"):
22
22
  fs = FilesystemGateway(base_path="local")
23
23
 
24
24
  knowledge_specialist = BaseLLMAgent(
25
- llm=LLMBroker(model="llama3.3-70b-32k"),
25
+ llm=LLMBroker(model="qwen3:32b"),
26
26
  tools=[
27
27
  ListFilesTool(fs),
28
28
  ReadFileTool(fs),
@@ -34,7 +34,7 @@ knowledge_specialist = BaseLLMAgent(
34
34
 
35
35
 
36
36
  coordinator = BaseLLMAgent(
37
- llm=LLMBroker(model="llama3.3-70b-32k"),
37
+ llm=LLMBroker(model="qwen3:32b"),
38
38
  behaviour="You are a coordinator who can manage multiple agents and delegate tasks to them to solve problems.",
39
39
  tools=[
40
40
  ToolWrapper(temporal_specialist, "temporal_specialist", "A historian and sociologist who focuses on sorting out temporal events, figuring out dates, determining what happened or will happen when."),
@@ -21,7 +21,7 @@ def openai_llm(model="gpt-5"):
21
21
  return llm
22
22
 
23
23
 
24
- def ollama_llm(model="llama3.3-70b-32k"):
24
+ def ollama_llm(model="qwen3:32b"):
25
25
  llm = LLMBroker(model=model)
26
26
  return llm
27
27
 
@@ -13,7 +13,7 @@ def openai_llm(model="gpt-4o"):
13
13
  return llm
14
14
 
15
15
 
16
- def ollama_llm(model="llama3.3-70b-32k"):
16
+ def ollama_llm(model="qwen3:32b"):
17
17
  llm = LLMBroker(model=model)
18
18
  return llm
19
19
 
@@ -13,7 +13,7 @@ def check_ollama_gateway():
13
13
  label: str = Field(..., description="The label describing the feeling.")
14
14
 
15
15
  response = gateway.complete(
16
- model="llama3.2:1b",
16
+ model="qwen3:7b",
17
17
  messages=[LLMMessage(content="Hello, how are you?")],
18
18
  object_model=Feeling,
19
19
  temperature=1.0,
@@ -25,7 +25,7 @@ def check_ollama_gateway():
25
25
 
26
26
  def check_tools_call():
27
27
  response = chat(
28
- model="llama3.3-70b-32k",
28
+ model="qwen3:32b",
29
29
  messages=[
30
30
  # {
31
31
  # 'role': 'user',
_examples/chat_session.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from mojentic.llm import ChatSession, LLMBroker
2
2
 
3
- llm_broker = LLMBroker(model="llama3.3-70b-32k")
3
+ llm_broker = LLMBroker(model="qwen3:32b")
4
4
  chat_session = ChatSession(llm_broker)
5
5
 
6
6
  while True:
@@ -1,7 +1,7 @@
1
1
  from mojentic.llm import ChatSession, LLMBroker
2
2
  from mojentic.llm.tools.date_resolver import ResolveDateTool
3
3
 
4
- llm_broker = LLMBroker(model="llama3.3-70b-32k")
4
+ llm_broker = LLMBroker(model="qwen3:32b")
5
5
  chat_session = ChatSession(llm_broker, tools=[ResolveDateTool()])
6
6
 
7
7
  while True:
@@ -38,8 +38,8 @@ api_key = os.getenv("OPENAI_API_KEY")
38
38
  gateway = OpenAIGateway(api_key)
39
39
  llm = LLMBroker(model="o4-mini", gateway=gateway)
40
40
 
41
- # llm = LLMBroker("qwen2.5-coder:32b")
42
- # llm = LLMBroker("llama3.3")
41
+ # llm = LLMBroker("qwen3-coder:30b")
42
+ llm = LLMBroker("qwen3-coder:30b")
43
43
  # llm = LLMBroker(model="qwen3-128k:32b")
44
44
 
45
45
  # Create a filesystem gateway for the sandbox
@@ -75,28 +75,28 @@ solver = IterativeProblemSolver(
75
75
  system_prompt="""
76
76
  # 0 - Project Identity & Context
77
77
 
78
- You are an expert and principled software engineer, well versed in writing Python games. You work
79
- carefully and purposefully and always check your work with an eye to testability and correctness.
80
- You know that every line of code you write is a liability, and you take care that every line
78
+ You are an expert and principled software engineer, well versed in writing Python games. You work
79
+ carefully and purposefully and always check your work with an eye to testability and correctness.
80
+ You know that every line of code you write is a liability, and you take care that every line
81
81
  matters.
82
82
 
83
83
  # 1 - Universal Engineering Principles
84
84
 
85
85
  * **Code is communication** — optimise for the next human reader.
86
- * **Simple Design Heuristics** — guiding principles, not iron laws; consult the user when you
86
+ * **Simple Design Heuristics** — guiding principles, not iron laws; consult the user when you
87
87
  need to break them.
88
88
  1. **All tests pass** — correctness is non‑negotiable.
89
89
  2. **Reveals intent** — code should read like an explanation.
90
- 3. **No *****knowledge***** duplication** — avoid multiple spots that must change together;
90
+ 3. **No *****knowledge***** duplication** — avoid multiple spots that must change together;
91
91
  identical code is only a smell when it hides duplicate *decisions*.
92
92
  4. **Minimal entities** — remove unnecessary indirection, classes, or parameters.
93
93
  * **Small, safe increments** — single‑reason commits; avoid speculative work (**YAGNI**).
94
94
  * **Tests are the executable spec** — red first, green always; test behaviour not implementation.
95
95
  * **Compose over inherit**; favour pure functions where practical, avoid side-effects.
96
- * **Functional core, imperative shell** — isolate pure business logic from I/O and side effects;
96
+ * **Functional core, imperative shell** — isolate pure business logic from I/O and side effects;
97
97
  push mutations to the system boundaries, build mockable gateways at those boundaries.
98
98
  * **Psychological safety** — review code, not colleagues; critique ideas, not authors.
99
- * **Version‑control etiquette** — descriptive commit messages, branch from `main`, PRs require
99
+ * **Version‑control etiquette** — descriptive commit messages, branch from `main`, PRs require
100
100
  green CI.
101
101
 
102
102
  # 2 - Python‑Specific Conventions
@@ -115,21 +115,21 @@ green CI.
115
115
 
116
116
  ## 2.2 Core Libraries
117
117
 
118
- Mandatory: pydantic, structlog, pytest, pytest-spec, pytest-cov, pytest-mock, flake8, black,
119
- pre‑commit, mkdocs‑material. Add new libs only when they eliminate **significant** boilerplate or
118
+ Mandatory: pydantic, structlog, pytest, pytest-spec, pytest-cov, pytest-mock, flake8, black,
119
+ pre‑commit, mkdocs‑material. Add new libs only when they eliminate **significant** boilerplate or
120
120
  risk.
121
121
 
122
122
  ## 2.3 Project Structure & Imports
123
123
 
124
124
  * **src‑layout**: code in `src/<package_name>/`; tests live beside code as `*_spec.py`.
125
- * Import order: 1) stdlib, 2) third‑party, 3) first‑party — each group alphabetised with a blank
125
+ * Import order: 1) stdlib, 2) third‑party, 3) first‑party — each group alphabetised with a blank
126
126
  line.
127
127
 
128
128
  ## 2.4 Naming & Style
129
129
 
130
130
  * `snake_case` for functions & vars, `PascalCase` for classes, `UPPER_SNAKE` for constants.
131
131
  * Prefix intentionally unused vars/args with `_`.
132
- * **flake8** (with plugins) handles linting, and **black** auto‑formats code. Max line length
132
+ * **flake8** (with plugins) handles linting, and **black** auto‑formats code. Max line length
133
133
  **100**.
134
134
  * Cyclomatic complexity cap: **10** (flake8 `C901`).
135
135
  * Use **f‑strings**; avoid magic numbers.
@@ -144,14 +144,14 @@ line.
144
144
 
145
145
  * Configure **structlog** for JSON output by default.
146
146
  * Never use `print` for diagnostics; reserve for user‑facing CLI UX.
147
- * Log levels: `DEBUG` (dev detail) → `INFO` (lifecycle) → `WARNING` (recoverable) → `ERROR` (user
147
+ * Log levels: `DEBUG` (dev detail) → `INFO` (lifecycle) → `WARNING` (recoverable) → `ERROR` (user
148
148
  visible).
149
149
 
150
150
  ## 2.7 Testing Strategy
151
151
 
152
152
  * **pytest** with **pytest-spec** for specification-style output.
153
153
  * Test files end with `_spec.py` and live in the same folder as the code under test.
154
- * Use **Arrange / Act / Assert** blocks separated by a blank line (no comments) **or** BDD
154
+ * Use **Arrange / Act / Assert** blocks separated by a blank line (no comments) **or** BDD
155
155
  `describe/should` classes.
156
156
  * Function names: use `should_*` and BDD-style specifications.
157
157
  * Class names: use `Describe*` and BDD-style test suites.
@@ -162,7 +162,7 @@ visible).
162
162
  # 3 - Planning and Goal Tracking
163
163
 
164
164
  - Use the provided task manager tools to create your plans and work through them step by step.
165
- - Before declaring yourself finished list all tasks, ensure they are all complete, and that you
165
+ - Before declaring yourself finished list all tasks, ensure they are all complete, and that you
166
166
  have not missed any steps
167
167
  - If you've missed or forgotten some steps, add them to the task list and continue
168
168
  - When all tasks are complete, and you can think of no more to add, declare yourself finished.
@@ -4,7 +4,7 @@ from mojentic.llm.tools.current_datetime import CurrentDateTimeTool
4
4
 
5
5
  # Create an LLM broker with a specified model
6
6
  # You can change the model to any supported model
7
- llm = LLMBroker(model="qwen2.5:7b") # Using the same model as in simple_tool.py
7
+ llm = LLMBroker(model="qwen3:7b") # Using the same model as in simple_tool.py
8
8
 
9
9
  # Create our custom tool
10
10
  datetime_tool = CurrentDateTimeTool()
@@ -26,8 +26,8 @@ from mojentic.llm.tools.tell_user_tool import TellUserTool
26
26
 
27
27
  # llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
28
28
  # llm = LLMBroker(model="qwen3:32b")
29
- llm = LLMBroker(model="qwen2.5:7b")
30
- # llm = LLMBroker(model="qwen2.5:72b")
29
+ llm = LLMBroker(model="qwen3:7b")
30
+ # llm = LLMBroker(model="qwen3:72b")
31
31
  # llm = LLMBroker(model="o4-mini", gateway=OpenAIGateway(os.environ["OPENAI_API_KEY"]))
32
32
  message = LLMMessage(
33
33
  content="I want you to count from 1 to 10. Break that request down into individual tasks, track them using available tools, and perform them one by one until you're finished. Interrupt me to tell the user as you complete every task.")
_examples/file_tool.py CHANGED
@@ -52,11 +52,11 @@ This is an unfinished story about Ernie, the most adorable and colourful caterpi
52
52
  #
53
53
 
54
54
 
55
- # llm = LLMBroker("llama3.3-70b-32k")
56
- # llm = LLMBroker("llama3.1:70b")
57
- # llm = LLMBroker("llama3.1:8b")
58
- llm = LLMBroker("qwen2.5:7b")
59
- # llm = LLMBroker("llama3.3")
55
+ # llm = LLMBroker("qwen3:32b")
56
+ # llm = LLMBroker("qwen3:32b")
57
+ # llm = LLMBroker("qwen3:7b")
58
+ llm = LLMBroker("qwen3:7b")
59
+ # llm = LLMBroker("qwen3:32b")
60
60
  # api_key = os.getenv("OPENAI_API_KEY")
61
61
  # gateway = OpenAIGateway(api_key)
62
62
  # llm = LLMBroker(model="gpt-4o-mini", gateway=gateway)
@@ -18,7 +18,7 @@ from mojentic.llm import LLMBroker
18
18
  def main():
19
19
  # Initialize the LLM broker with your preferred model
20
20
  # Uncomment one of the following lines or modify as needed:
21
- # llm = LLMBroker(model="llama3.3-70b-32k") # Ollama model
21
+ # llm = LLMBroker(model="qwen3:32b") # Ollama model
22
22
  # llm = LLMBroker(model="gpt-4o") # OpenAI model
23
23
  llm = LLMBroker(model="qwq") # Default model for example
24
24
 
_examples/react.py CHANGED
@@ -6,7 +6,7 @@ from mojentic import Router, Dispatcher
6
6
  from mojentic.agents import OutputAgent
7
7
  from mojentic.llm import LLMBroker
8
8
 
9
- # llm = LLMBroker("llama3.3-70b-32k")
9
+ # llm = LLMBroker("qwen3:32b")
10
10
  llm = LLMBroker("deepseek-r1:70b")
11
11
  thinking_agent = ThinkingAgent(llm)
12
12
  decisioning_agent = DecisioningAgent(llm)
@@ -21,7 +21,7 @@ async def demonstrate_async():
21
21
  3. Running multiple problem-solving tasks concurrently
22
22
  """
23
23
  # Initialize the LLM broker with your preferred model
24
- # llm = LLMBroker(model="llama3.3-70b-32k")
24
+ # llm = LLMBroker(model="qwen3:32b")
25
25
  llm = LLMBroker(model="qwen3:30b-a3b-q4_K_M")
26
26
 
27
27
  # Create the agent with a maximum of 3 iterations
_examples/simple_llm.py CHANGED
@@ -25,9 +25,9 @@ class RequestAgent(BaseLLMAgent):
25
25
 
26
26
 
27
27
  # llm = LLMBroker("deepseek-r1:70b")
28
- # llm = LLMBroker("llama3.1-instruct-8b-32k")
29
- llm = LLMBroker("qwen2.5:0.5b")
30
- # llm = LLMBroker("llama3.1:8b", gateway=OllamaGateway(host="http://odin.local:11434"))
28
+ # llm = LLMBroker("qwen3:14b")
29
+ llm = LLMBroker("qwen3:0.5b")
30
+ # llm = LLMBroker("qwen3:7b", gateway=OllamaGateway(host="http://odin.local:11434"))
31
31
  request_agent = RequestAgent(llm)
32
32
  output_agent = OutputAgent()
33
33
 
@@ -27,7 +27,7 @@ class ChatOutputAgent(BaseAgent):
27
27
  return []
28
28
 
29
29
 
30
- llm = LLMBroker("llama3.1-instruct-8b-32k")
30
+ llm = LLMBroker("qwen3:14b")
31
31
  request_agent = ChatAgent(llm)
32
32
  output_agent = ChatOutputAgent()
33
33
 
@@ -32,7 +32,7 @@ class RequestAgent(BaseLLMAgent):
32
32
  return [ResponseEvent(source=type(self), correlation_id=event.correlation_id, capitol=response)]
33
33
 
34
34
 
35
- llm = LLMBroker("llama3.1-instruct-8b-32k")
35
+ llm = LLMBroker("qwen3:14b")
36
36
  request_agent = RequestAgent(llm)
37
37
  output_agent = OutputAgent()
38
38
 
_examples/simple_tool.py CHANGED
@@ -33,8 +33,8 @@ class RequestAgent(BaseLLMAgent):
33
33
 
34
34
 
35
35
  # llm = LLMBroker("deepseek-r1:70b")
36
- # llm = LLMBroker("llama3.3-70b-32k")
37
- llm = LLMBroker("qwen2.5:7b")
36
+ # llm = LLMBroker("qwen3:32b")
37
+ llm = LLMBroker("qwen3:7b")
38
38
  request_agent = RequestAgent(llm)
39
39
  output_agent = OutputAgent()
40
40
 
@@ -43,12 +43,12 @@ class IterativeProblemSolverTool(LLMTool):
43
43
 
44
44
  def main():
45
45
  # llm = LLMBroker(model="MFDoom/deepseek-r1-tool-calling:14b")
46
- # llm = LLMBroker(model="qwen2.5:14b")
47
- # llm = LLMBroker(model="qwen2.5:14b")
48
- # llm = LLMBroker(model="qwen2.5:7b")
46
+ # llm = LLMBroker(model="qwen3:14b")
47
+ # llm = LLMBroker(model="qwen3:14b")
48
+ # llm = LLMBroker(model="qwen3:7b")
49
49
  llm = LLMBroker(model="qwq")
50
50
  # llm = LLMBroker(model="qwq:32b-fp16")
51
- # llm = LLMBroker(model="llama3.3-70b-32k")
51
+ # llm = LLMBroker(model="qwen3:32b")
52
52
 
53
53
  tools = [
54
54
  ResolveDateTool(),
_examples/streaming.py CHANGED
@@ -1,34 +1,49 @@
1
+ import os
2
+ from mojentic.llm.llm_broker import LLMBroker
1
3
  from mojentic.llm.gateways.models import LLMMessage
2
4
  from mojentic.llm.gateways.ollama import OllamaGateway
5
+ from mojentic.llm.gateways.openai import OpenAIGateway
3
6
  from mojentic.llm.tools.date_resolver import ResolveDateTool
4
7
 
5
- #
6
- # This is here 2025-02-21 to demonstrate a deficiency in Ollama/llama tool calling
7
- # using the Stream option. We can't get chunk by chunk responses from the LLM
8
- # when using tools. This limits our ability to explore streaming capabilities
9
- # in the mojentic API, so I'm pausing this work for now until this is resolved.
10
- # https://github.com/ollama/ollama/issues/7886
11
- #
12
-
13
8
 
14
9
  def main():
15
- ollama = OllamaGateway()
10
+ """
11
+ Demonstrates streaming text generation with tool calling support.
12
+
13
+ This example shows how generate_stream() handles tool calls seamlessly:
14
+ 1. Streams content as it arrives
15
+ 2. Detects tool calls in the stream
16
+ 3. Executes tools
17
+ 4. Recursively streams the LLM's response after tool execution
18
+ """
19
+ gateway = OllamaGateway()
20
+ # gateway = OpenAIGateway(api_key=os.getenv("OPENAI_API_KEY"))
21
+ broker = LLMBroker(
22
+ model="qwen3:32b",
23
+ # model="gpt-5",
24
+ gateway=gateway
25
+ )
26
+
16
27
  date_tool = ResolveDateTool()
17
-
18
- stream = ollama.complete_stream(
19
- model="qwen2.5:7b",
28
+
29
+ print("Streaming response with tool calling enabled...\n")
30
+
31
+ stream = broker.generate_stream(
20
32
  messages=[
21
- LLMMessage(content="Tell me a story about a dragon. In your story, reference several dates relative to today, "
33
+ LLMMessage(content="Tell me a short story about a dragon. In your story, reference several dates relative to today, "
22
34
  "like 'three days from now' or 'last week'.")
23
35
  ],
24
36
  tools=[date_tool],
25
- temperature=0.5,
37
+ temperature=0.7,
26
38
  num_ctx=32768,
27
39
  num_predict=-1
28
40
  )
29
-
41
+
30
42
  for chunk in stream:
31
- print(chunk.content, end='', flush=True)
43
+ print(chunk, end='', flush=True)
44
+
45
+ print("\n\nDone!")
46
+
32
47
 
33
48
  if __name__ == "__main__":
34
- main()
49
+ main()
@@ -17,7 +17,7 @@ from mojentic.llm import LLMBroker
17
17
  def main():
18
18
  # Initialize the LLM broker with your preferred model
19
19
  # Uncomment one of the following lines or modify as needed:
20
- # llm = LLMBroker(model="llama3.3-70b-32k") # Ollama model
20
+ # llm = LLMBroker(model="qwen3:32b") # Ollama model
21
21
  # llm = LLMBroker(model="gpt-4o") # OpenAI model
22
22
  llm = LLMBroker(model="qwq") # Default model for example
23
23
 
_examples/tracer_demo.py CHANGED
@@ -1,8 +1,8 @@
1
1
  """
2
2
  Example script demonstrating the tracer system with ChatSession and tools.
3
3
 
4
- This example shows how to use the tracer system to monitor an interactive
5
- chat session with LLMBroker and tools. When the user exits the session,
4
+ This example shows how to use the tracer system to monitor an interactive
5
+ chat session with LLMBroker and tools. When the user exits the session,
6
6
  the script displays a summary of all traced events.
7
7
 
8
8
  It also demonstrates how correlation_id is used to trace related events
@@ -35,7 +35,7 @@ def main():
35
35
  tracer = TracerSystem()
36
36
 
37
37
  # Create an LLM broker with the tracer
38
- llm_broker = LLMBroker(model="llama3.3-70b-32k", tracer=tracer)
38
+ llm_broker = LLMBroker(model="gpt-oss:20b", tracer=tracer)
39
39
 
40
40
  # Create a date resolver tool that will also use the tracer
41
41
  date_tool = ResolveDateTool(llm_broker=llm_broker, tracer=tracer)
@@ -51,7 +51,7 @@ memory = SharedWorkingMemory({
51
51
  })
52
52
 
53
53
  llm = LLMBroker("deepseek-r1:70b")
54
- # llm = LLMBroker("llama3.3-instruct-70b-32k")
54
+ # llm = LLMBroker("qwen3:32b-instruct-70b-32k")
55
55
  request_agent = RequestAgent(llm, memory)
56
56
  output_agent = OutputAgent()
57
57
 
@@ -1,4 +1,4 @@
1
- from typing import List, Iterator
1
+ from typing import List, Iterator, Optional
2
2
  import structlog
3
3
  from ollama import Client, Options, ChatResponse
4
4
  from pydantic import BaseModel
@@ -10,8 +10,18 @@ from mojentic.llm.gateways.ollama_messages_adapter import adapt_messages_to_olla
10
10
  logger = structlog.get_logger()
11
11
 
12
12
  class StreamingResponse(BaseModel):
13
- """Simple wrapper for streaming response content"""
14
- content: str
13
+ """
14
+ Wrapper for streaming response chunks.
15
+
16
+ Attributes
17
+ ----------
18
+ content : Optional[str]
19
+ Text content chunk from the LLM response.
20
+ tool_calls : Optional[List]
21
+ Tool calls from the LLM response (raw ollama format).
22
+ """
23
+ content: Optional[str] = None
24
+ tool_calls: Optional[List] = None
15
25
 
16
26
  class OllamaGateway(LLMGateway):
17
27
  """
@@ -144,28 +154,21 @@ class OllamaGateway(LLMGateway):
144
154
  'stream': True
145
155
  }
146
156
 
147
- #
148
- # This is here 2025-02-21 to demonstrate a deficiency in Ollama tool calling
149
- # using the Stream option. We can't get chunk by chunk responses from the LLM
150
- # when using tools. This limits our ability to explore streaming capabilities
151
- # in the mojentic API, so I'm pausing this work for now until this is resolved.
152
- # https://github.com/ollama/ollama/issues/7886
153
- #
154
-
155
- # if 'tools' in args and args['tools'] is not None:
156
- # ollama_args['tools'] = [t.descriptor for t in args['tools']]
157
+ # Enable tool support if tools are provided
158
+ if 'tools' in args and args['tools'] is not None:
159
+ ollama_args['tools'] = [t.descriptor for t in args['tools']]
157
160
 
158
161
  stream = self.client.chat(**ollama_args)
159
162
 
160
163
  for chunk in stream:
161
164
  if chunk.message:
165
+ # Yield content chunks as they arrive
162
166
  if chunk.message.content:
163
167
  yield StreamingResponse(content=chunk.message.content)
164
- # if chunk.message.tool_calls:
165
- # for tool_call in chunk.message.tool_calls:
166
- # yield StreamingResponse(
167
- # content=f"\nTOOL CALL: {tool_call.function.name}({tool_call.function.arguments})\n"
168
- # )
168
+
169
+ # Yield tool calls when they arrive
170
+ if chunk.message.tool_calls:
171
+ yield StreamingResponse(tool_calls=chunk.message.tool_calls)
169
172
 
170
173
  def get_available_models(self) -> List[str]:
171
174
  """