goose-py 0.11.17__tar.gz → 0.11.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {goose_py-0.11.17 → goose_py-0.11.19}/PKG-INFO +2 -2
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/agent.py +27 -16
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/task.py +9 -6
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/types/telemetry.py +13 -17
- {goose_py-0.11.17 → goose_py-0.11.19}/pyproject.toml +2 -2
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_agent.py +8 -2
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_ask.py +15 -4
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_refining.py +8 -1
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_state.py +1 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/uv.lock +5 -5
- {goose_py-0.11.17 → goose_py-0.11.19}/.envrc +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/.github/workflows/publish.yml +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/.gitignore +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/.python-version +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/.stubs/jsonpath_ng/__init__.pyi +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/Makefile +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/README.md +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/__init__.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/conversation.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/flow.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/result.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/state.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/store.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/_internal/types/__init__.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/errors.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/flow.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/py.typed +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/runs.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/goose/task.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/__init__.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_downstream_task.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_hashing.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_looping.py +0 -0
- {goose_py-0.11.17 → goose_py-0.11.19}/tests/test_regenerate.py +0 -0
@@ -1,10 +1,10 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: goose-py
|
3
|
-
Version: 0.11.
|
3
|
+
Version: 0.11.19
|
4
4
|
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
5
|
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
6
|
Requires-Python: >=3.12
|
7
|
-
Requires-Dist: aikernel==0.1.
|
7
|
+
Requires-Dist: aikernel==0.1.39
|
8
8
|
Requires-Dist: jsonpath-ng>=1.7.0
|
9
9
|
Requires-Dist: pydantic>=2.8.2
|
10
10
|
Description-Content-Type: text/markdown
|
@@ -4,10 +4,11 @@ from typing import Any, Literal, Protocol, overload
|
|
4
4
|
|
5
5
|
from aikernel import (
|
6
6
|
LLMAssistantMessage,
|
7
|
-
|
7
|
+
LLMModelAlias,
|
8
8
|
LLMSystemMessage,
|
9
9
|
LLMToolMessage,
|
10
10
|
LLMUserMessage,
|
11
|
+
Router,
|
11
12
|
llm_structured,
|
12
13
|
llm_unstructured,
|
13
14
|
)
|
@@ -40,18 +41,21 @@ class Agent:
|
|
40
41
|
self,
|
41
42
|
*,
|
42
43
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
43
|
-
model:
|
44
|
+
model: LLMModelAlias,
|
44
45
|
task_name: str,
|
46
|
+
router: Router,
|
45
47
|
response_model: type[R] = TextResult,
|
46
48
|
) -> R:
|
47
49
|
start_time = datetime.now()
|
48
50
|
typed_messages: list[ExpectedMessage] = [*messages]
|
49
51
|
|
50
52
|
if response_model is TextResult:
|
51
|
-
response = await llm_unstructured(model=model, messages=typed_messages)
|
53
|
+
response = await llm_unstructured(model=model, messages=typed_messages, router=router)
|
52
54
|
parsed_response = response_model.model_validate({"text": response.text})
|
53
55
|
else:
|
54
|
-
response = await llm_structured(
|
56
|
+
response = await llm_structured(
|
57
|
+
model=model, messages=typed_messages, response_model=response_model, router=router
|
58
|
+
)
|
55
59
|
parsed_response = response.structured_response
|
56
60
|
|
57
61
|
end_time = datetime.now()
|
@@ -88,12 +92,13 @@ class Agent:
|
|
88
92
|
self,
|
89
93
|
*,
|
90
94
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
91
|
-
model:
|
95
|
+
model: LLMModelAlias,
|
92
96
|
task_name: str,
|
97
|
+
router: Router,
|
93
98
|
) -> str:
|
94
99
|
start_time = datetime.now()
|
95
100
|
typed_messages: list[ExpectedMessage] = [*messages]
|
96
|
-
response = await llm_unstructured(model=model, messages=typed_messages)
|
101
|
+
response = await llm_unstructured(model=model, messages=typed_messages, router=router)
|
97
102
|
end_time = datetime.now()
|
98
103
|
|
99
104
|
if isinstance(messages[0], LLMSystemMessage):
|
@@ -128,14 +133,15 @@ class Agent:
|
|
128
133
|
self,
|
129
134
|
*,
|
130
135
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
131
|
-
model:
|
136
|
+
model: LLMModelAlias,
|
137
|
+
router: Router,
|
132
138
|
task_name: str,
|
133
139
|
response_model: type[R],
|
134
140
|
) -> R:
|
135
141
|
start_time = datetime.now()
|
136
142
|
typed_messages: list[ExpectedMessage] = [*messages]
|
137
143
|
find_replace_response = await llm_structured(
|
138
|
-
model=model, messages=typed_messages, response_model=FindReplaceResponse
|
144
|
+
model=model, messages=typed_messages, response_model=FindReplaceResponse, router=router
|
139
145
|
)
|
140
146
|
parsed_find_replace_response = find_replace_response.structured_response
|
141
147
|
end_time = datetime.now()
|
@@ -179,7 +185,8 @@ class Agent:
|
|
179
185
|
self,
|
180
186
|
*,
|
181
187
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
182
|
-
model:
|
188
|
+
model: LLMModelAlias,
|
189
|
+
router: Router,
|
183
190
|
task_name: str,
|
184
191
|
mode: Literal["generate"],
|
185
192
|
response_model: type[R],
|
@@ -190,7 +197,8 @@ class Agent:
|
|
190
197
|
self,
|
191
198
|
*,
|
192
199
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
193
|
-
model:
|
200
|
+
model: LLMModelAlias,
|
201
|
+
router: Router,
|
194
202
|
task_name: str,
|
195
203
|
mode: Literal["ask"],
|
196
204
|
response_model: type[R] = TextResult,
|
@@ -201,7 +209,8 @@ class Agent:
|
|
201
209
|
self,
|
202
210
|
*,
|
203
211
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
204
|
-
model:
|
212
|
+
model: LLMModelAlias,
|
213
|
+
router: Router,
|
205
214
|
task_name: str,
|
206
215
|
response_model: type[R],
|
207
216
|
mode: Literal["refine"],
|
@@ -212,7 +221,8 @@ class Agent:
|
|
212
221
|
self,
|
213
222
|
*,
|
214
223
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
215
|
-
model:
|
224
|
+
model: LLMModelAlias,
|
225
|
+
router: Router,
|
216
226
|
task_name: str,
|
217
227
|
response_model: type[R],
|
218
228
|
) -> R: ...
|
@@ -221,7 +231,8 @@ class Agent:
|
|
221
231
|
self,
|
222
232
|
*,
|
223
233
|
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
224
|
-
model:
|
234
|
+
model: LLMModelAlias,
|
235
|
+
router: Router,
|
225
236
|
task_name: str,
|
226
237
|
response_model: type[R] = TextResult,
|
227
238
|
mode: Literal["generate", "ask", "refine"] = "generate",
|
@@ -229,13 +240,13 @@ class Agent:
|
|
229
240
|
match mode:
|
230
241
|
case "generate":
|
231
242
|
return await self.generate(
|
232
|
-
messages=messages, model=model, task_name=task_name, response_model=response_model
|
243
|
+
messages=messages, model=model, task_name=task_name, router=router, response_model=response_model
|
233
244
|
)
|
234
245
|
case "ask":
|
235
|
-
return await self.ask(messages=messages, model=model, task_name=task_name)
|
246
|
+
return await self.ask(messages=messages, model=model, task_name=task_name, router=router)
|
236
247
|
case "refine":
|
237
248
|
return await self.refine(
|
238
|
-
messages=messages, model=model, task_name=task_name, response_model=response_model
|
249
|
+
messages=messages, model=model, task_name=task_name, router=router, response_model=response_model
|
239
250
|
)
|
240
251
|
|
241
252
|
def __apply_find_replace[R: Result](
|
@@ -2,7 +2,7 @@ import hashlib
|
|
2
2
|
from collections.abc import Awaitable, Callable
|
3
3
|
from typing import Any, overload
|
4
4
|
|
5
|
-
from aikernel import
|
5
|
+
from aikernel import LLMModelAlias, LLMSystemMessage, LLMUserMessage, Router
|
6
6
|
from pydantic import BaseModel
|
7
7
|
|
8
8
|
from goose._internal.agent import Agent
|
@@ -18,11 +18,11 @@ class Task[**P, R: Result]:
|
|
18
18
|
/,
|
19
19
|
*,
|
20
20
|
retries: int = 0,
|
21
|
-
refinement_model:
|
21
|
+
refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
|
22
22
|
) -> None:
|
23
23
|
self._generator = generator
|
24
24
|
self._retries = retries
|
25
|
-
self._refinement_model = refinement_model
|
25
|
+
self._refinement_model: LLMModelAlias = refinement_model
|
26
26
|
|
27
27
|
@property
|
28
28
|
def result_type(self) -> type[R]:
|
@@ -45,7 +45,7 @@ class Task[**P, R: Result]:
|
|
45
45
|
return self.result_type.model_validate_json(state.raw_result)
|
46
46
|
|
47
47
|
async def ask(
|
48
|
-
self, *, user_message: LLMUserMessage, context: LLMSystemMessage | None = None, index: int = 0
|
48
|
+
self, *, user_message: LLMUserMessage, router: Router, context: LLMSystemMessage | None = None, index: int = 0
|
49
49
|
) -> str:
|
50
50
|
flow_run = self.__get_current_flow_run()
|
51
51
|
node_state = flow_run.get_state(task=self, index=index)
|
@@ -62,6 +62,7 @@ class Task[**P, R: Result]:
|
|
62
62
|
model=self._refinement_model,
|
63
63
|
task_name=f"ask--{self.name}",
|
64
64
|
mode="ask",
|
65
|
+
router=router,
|
65
66
|
)
|
66
67
|
node_state.add_answer(answer=answer)
|
67
68
|
flow_run.upsert_node_state(node_state)
|
@@ -72,6 +73,7 @@ class Task[**P, R: Result]:
|
|
72
73
|
self,
|
73
74
|
*,
|
74
75
|
user_message: LLMUserMessage,
|
76
|
+
router: Router,
|
75
77
|
context: LLMSystemMessage | None = None,
|
76
78
|
index: int = 0,
|
77
79
|
) -> R:
|
@@ -91,6 +93,7 @@ class Task[**P, R: Result]:
|
|
91
93
|
task_name=f"refine--{self.name}",
|
92
94
|
response_model=self.result_type,
|
93
95
|
mode="refine",
|
96
|
+
router=router,
|
94
97
|
)
|
95
98
|
node_state.add_result(result=result.model_dump_json())
|
96
99
|
flow_run.upsert_node_state(node_state)
|
@@ -154,14 +157,14 @@ class Task[**P, R: Result]:
|
|
154
157
|
def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
|
155
158
|
@overload
|
156
159
|
def task[**P, R: Result](
|
157
|
-
*, retries: int = 0, refinement_model:
|
160
|
+
*, retries: int = 0, refinement_model: LLMModelAlias = "gemini-2.0-flash-lite"
|
158
161
|
) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
|
159
162
|
def task[**P, R: Result](
|
160
163
|
generator: Callable[P, Awaitable[R]] | None = None,
|
161
164
|
/,
|
162
165
|
*,
|
163
166
|
retries: int = 0,
|
164
|
-
refinement_model:
|
167
|
+
refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
|
165
168
|
) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
|
166
169
|
if generator is None:
|
167
170
|
|
@@ -2,7 +2,7 @@ import json
|
|
2
2
|
from datetime import datetime
|
3
3
|
from typing import ClassVar, TypedDict
|
4
4
|
|
5
|
-
from aikernel import LiteLLMMessage,
|
5
|
+
from aikernel import LiteLLMMessage, LLMModelAlias
|
6
6
|
from pydantic import BaseModel, computed_field
|
7
7
|
|
8
8
|
from goose.errors import Honk
|
@@ -27,28 +27,24 @@ class AgentResponseDump(TypedDict):
|
|
27
27
|
|
28
28
|
|
29
29
|
class AgentResponse[R: BaseModel | str](BaseModel):
|
30
|
-
INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
|
36
|
-
LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
30
|
+
INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModelAlias, float]] = {
|
31
|
+
"gemini-2.0-flash": 0.30,
|
32
|
+
"gemini-2.0-flash-lite": 0.15,
|
33
|
+
"claude-3.5-sonnet": 3.00,
|
34
|
+
"claude-3.7-sonnet": 3.00,
|
37
35
|
}
|
38
|
-
OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
|
44
|
-
LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
36
|
+
OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModelAlias, float]] = {
|
37
|
+
"gemini-2.0-flash": 0.30,
|
38
|
+
"gemini-2.0-flash-lite": 0.15,
|
39
|
+
"claude-3.5-sonnet": 15.00,
|
40
|
+
"claude-3.7-sonnet": 15.00,
|
45
41
|
}
|
46
42
|
|
47
43
|
response: R
|
48
44
|
run_id: str
|
49
45
|
flow_name: str
|
50
46
|
task_name: str
|
51
|
-
model:
|
47
|
+
model: LLMModelAlias
|
52
48
|
system: LiteLLMMessage | None = None
|
53
49
|
input_messages: list[LiteLLMMessage]
|
54
50
|
input_tokens: int
|
@@ -102,7 +98,7 @@ class AgentResponse[R: BaseModel | str](BaseModel):
|
|
102
98
|
"run_id": self.run_id,
|
103
99
|
"flow_name": self.flow_name,
|
104
100
|
"task_name": self.task_name,
|
105
|
-
"model": self.model
|
101
|
+
"model": self.model,
|
106
102
|
"system_message": minimized_system_message,
|
107
103
|
"input_messages": minimized_input_messages,
|
108
104
|
"output_message": output_message,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "goose-py"
|
3
|
-
version = "0.11.
|
3
|
+
version = "0.11.19"
|
4
4
|
description = "A tool for AI workflows based on human-computer collaboration and structured output."
|
5
5
|
readme = "README.md"
|
6
6
|
authors = [
|
@@ -11,7 +11,7 @@ authors = [
|
|
11
11
|
requires-python = ">=3.12"
|
12
12
|
dependencies = [
|
13
13
|
"jsonpath-ng>=1.7.0",
|
14
|
-
"aikernel==0.1.
|
14
|
+
"aikernel==0.1.39",
|
15
15
|
"pydantic>=2.8.2",
|
16
16
|
]
|
17
17
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from unittest.mock import Mock
|
2
2
|
|
3
3
|
import pytest
|
4
|
-
from aikernel import LLMMessagePart,
|
4
|
+
from aikernel import LLMMessagePart, LLMUserMessage, Router
|
5
5
|
from pytest_mock import MockerFixture
|
6
6
|
|
7
7
|
from goose import Agent, AgentResponse, FlowArguments, TextResult, flow, task
|
@@ -22,12 +22,18 @@ def mock_llm_unstructured(mocker: MockerFixture) -> Mock:
|
|
22
22
|
|
23
23
|
@task
|
24
24
|
async def use_agent(*, agent: Agent) -> TextResult:
|
25
|
+
router = Router(
|
26
|
+
model_list=[
|
27
|
+
{"model_name": "gemini-2.0-flash-lite", "litellm_params": {"model": "gemini/gemini-2.0-flash-lite"}}
|
28
|
+
]
|
29
|
+
)
|
25
30
|
return await agent(
|
26
31
|
messages=[LLMUserMessage(parts=[LLMMessagePart(content="Hello")])],
|
27
|
-
model=
|
32
|
+
model="gemini-2.0-flash-lite",
|
28
33
|
task_name="greet",
|
29
34
|
mode="generate",
|
30
35
|
response_model=TextResult,
|
36
|
+
router=router,
|
31
37
|
)
|
32
38
|
|
33
39
|
|
@@ -1,13 +1,17 @@
|
|
1
1
|
from unittest.mock import Mock
|
2
2
|
|
3
3
|
import pytest
|
4
|
-
from aikernel import LLMAssistantMessage, LLMMessagePart, LLMUserMessage
|
4
|
+
from aikernel import LLMAssistantMessage, LLMMessagePart, LLMUserMessage, Router
|
5
5
|
from pytest_mock import MockerFixture
|
6
6
|
|
7
7
|
from goose import Agent, FlowArguments, flow, task
|
8
8
|
from goose._internal.result import TextResult
|
9
9
|
from goose.errors import Honk
|
10
10
|
|
11
|
+
ROUTER = Router(
|
12
|
+
model_list=[{"model_name": "gemini-2.0-flash-lite", "litellm_params": {"model": "gemini/gemini-2.0-flash-lite"}}]
|
13
|
+
)
|
14
|
+
|
11
15
|
|
12
16
|
class MockLiteLLMResponse:
|
13
17
|
def __init__(self, *, response: str, prompt_tokens: int, completion_tokens: int) -> None:
|
@@ -51,7 +55,8 @@ async def test_ask_adds_to_conversation():
|
|
51
55
|
|
52
56
|
# Ask a follow-up question
|
53
57
|
response = await basic_task.ask(
|
54
|
-
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Can you explain how you got that?")])
|
58
|
+
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Can you explain how you got that?")]),
|
59
|
+
router=ROUTER,
|
55
60
|
)
|
56
61
|
|
57
62
|
# Verify the response exists and makes sense
|
@@ -85,7 +90,10 @@ async def test_ask_requires_completed_task():
|
|
85
90
|
|
86
91
|
# Try to ask before running the task
|
87
92
|
with pytest.raises(Honk, match="Cannot ask about a task that has not been initially generated"):
|
88
|
-
await basic_task.ask(
|
93
|
+
await basic_task.ask(
|
94
|
+
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Can you explain?")]),
|
95
|
+
router=ROUTER,
|
96
|
+
)
|
89
97
|
|
90
98
|
|
91
99
|
@pytest.mark.asyncio
|
@@ -101,7 +109,10 @@ async def test_ask_multiple_questions():
|
|
101
109
|
questions = ["Why is that the answer?", "Can you explain it differently?", "What if we added 1 more?"]
|
102
110
|
|
103
111
|
for question in questions:
|
104
|
-
response = await basic_task.ask(
|
112
|
+
response = await basic_task.ask(
|
113
|
+
user_message=LLMUserMessage(parts=[LLMMessagePart(content=question)]),
|
114
|
+
router=ROUTER,
|
115
|
+
)
|
105
116
|
responses.append(response)
|
106
117
|
|
107
118
|
# Verify we got responses for all questions
|
@@ -3,13 +3,17 @@ import string
|
|
3
3
|
from unittest.mock import Mock
|
4
4
|
|
5
5
|
import pytest
|
6
|
-
from aikernel import LLMMessagePart, LLMSystemMessage, LLMUserMessage
|
6
|
+
from aikernel import LLMMessagePart, LLMSystemMessage, LLMUserMessage, Router
|
7
7
|
from pytest_mock import MockerFixture
|
8
8
|
|
9
9
|
from goose import Agent, FlowArguments, Result, flow, task
|
10
10
|
from goose._internal.result import FindReplaceResponse, Replacement
|
11
11
|
from goose.errors import Honk
|
12
12
|
|
13
|
+
ROUTER = Router(
|
14
|
+
model_list=[{"model_name": "gemini-2.0-flash-lite", "litellm_params": {"model": "gemini/gemini-2.0-flash-lite"}}]
|
15
|
+
)
|
16
|
+
|
13
17
|
|
14
18
|
class MyFlowArguments(FlowArguments):
|
15
19
|
pass
|
@@ -63,6 +67,7 @@ async def test_refining() -> None:
|
|
63
67
|
index=0,
|
64
68
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
65
69
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
70
|
+
router=ROUTER,
|
66
71
|
)
|
67
72
|
|
68
73
|
initial_random_words = first_run.get_all_results(task=generate_random_word)
|
@@ -73,6 +78,7 @@ async def test_refining() -> None:
|
|
73
78
|
result = await generate_random_word.refine(
|
74
79
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
75
80
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
81
|
+
router=ROUTER,
|
76
82
|
)
|
77
83
|
# Since refine now directly returns the result from the agent call
|
78
84
|
assert isinstance(result, GeneratedWord)
|
@@ -91,4 +97,5 @@ async def test_refining_before_generate_fails() -> None:
|
|
91
97
|
await generate_random_word.refine(
|
92
98
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
93
99
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
100
|
+
router=ROUTER,
|
94
101
|
)
|
@@ -75,6 +75,7 @@ async def test_state_undo() -> None:
|
|
75
75
|
index=0,
|
76
76
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
77
77
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
78
|
+
router=Mock(),
|
78
79
|
)
|
79
80
|
|
80
81
|
async with with_state.start_run(run_id="2") as run:
|
@@ -3,15 +3,15 @@ requires-python = ">=3.12"
|
|
3
3
|
|
4
4
|
[[package]]
|
5
5
|
name = "aikernel"
|
6
|
-
version = "0.1.
|
6
|
+
version = "0.1.39"
|
7
7
|
source = { registry = "https://pypi.org/simple" }
|
8
8
|
dependencies = [
|
9
9
|
{ name = "litellm" },
|
10
10
|
{ name = "pydantic" },
|
11
11
|
]
|
12
|
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
12
|
+
sdist = { url = "https://files.pythonhosted.org/packages/ab/7e/1da152279e50e6d651b4f29cc6ee950f4e5aad5c64b8b4889df036c20acd/aikernel-0.1.39.tar.gz", hash = "sha256:13e21eaadd66499d3fb00c82d42e6d1ae4022e69017133d1d04da58636a8e399", size = 72243 }
|
13
13
|
wheels = [
|
14
|
-
{ url = "https://files.pythonhosted.org/packages/
|
14
|
+
{ url = "https://files.pythonhosted.org/packages/2f/86/d55e7c00a1d2df6507ab3b41fb5933be2e24c5ad53c8e2879fff8c2008fc/aikernel-0.1.39-py3-none-any.whl", hash = "sha256:604426ae04880e227cffdef491f3ee305661b957f46153465c8e69740313b368", size = 9972 },
|
15
15
|
]
|
16
16
|
|
17
17
|
[[package]]
|
@@ -345,7 +345,7 @@ wheels = [
|
|
345
345
|
|
346
346
|
[[package]]
|
347
347
|
name = "goose-py"
|
348
|
-
version = "0.11.
|
348
|
+
version = "0.11.19"
|
349
349
|
source = { editable = "." }
|
350
350
|
dependencies = [
|
351
351
|
{ name = "aikernel" },
|
@@ -365,7 +365,7 @@ dev = [
|
|
365
365
|
|
366
366
|
[package.metadata]
|
367
367
|
requires-dist = [
|
368
|
-
{ name = "aikernel", specifier = "==0.1.
|
368
|
+
{ name = "aikernel", specifier = "==0.1.39" },
|
369
369
|
{ name = "jsonpath-ng", specifier = ">=1.7.0" },
|
370
370
|
{ name = "pydantic", specifier = ">=2.8.2" },
|
371
371
|
]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|