goose-py 0.11.23__tar.gz → 0.11.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {goose_py-0.11.23 → goose_py-0.11.24}/PKG-INFO +1 -1
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/task.py +11 -14
- {goose_py-0.11.23 → goose_py-0.11.24}/pyproject.toml +1 -1
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_ask.py +3 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_refining.py +3 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_state.py +1 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/uv.lock +1 -1
- {goose_py-0.11.23 → goose_py-0.11.24}/.envrc +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/.github/workflows/publish.yml +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/.gitignore +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/.python-version +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/.stubs/jsonpath_ng/__init__.pyi +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/Makefile +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/README.md +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/__init__.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/agent.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/conversation.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/flow.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/result.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/state.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/store.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/types/__init__.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/_internal/types/telemetry.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/errors.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/flow.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/py.typed +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/runs.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/goose/task.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/__init__.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_agent.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_downstream_task.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_hashing.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_looping.py +0 -0
- {goose_py-0.11.23 → goose_py-0.11.24}/tests/test_regenerate.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: goose-py
|
3
|
-
Version: 0.11.
|
3
|
+
Version: 0.11.24
|
4
4
|
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
5
|
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
6
|
Requires-Python: >=3.12
|
@@ -18,11 +18,9 @@ class Task[**P, R: Result]:
|
|
18
18
|
/,
|
19
19
|
*,
|
20
20
|
retries: int = 0,
|
21
|
-
refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
|
22
21
|
) -> None:
|
23
22
|
self._generator = generator
|
24
23
|
self._retries = retries
|
25
|
-
self._refinement_model: LLMModelAlias = refinement_model
|
26
24
|
|
27
25
|
@property
|
28
26
|
def result_type(self) -> type[R]:
|
@@ -44,11 +42,12 @@ class Task[**P, R: Result]:
|
|
44
42
|
else:
|
45
43
|
return self.result_type.model_validate_json(state.raw_result)
|
46
44
|
|
47
|
-
async def ask(
|
45
|
+
async def ask[M: LLMModelAlias](
|
48
46
|
self,
|
49
47
|
*,
|
50
48
|
user_message: LLMUserMessage,
|
51
|
-
router: LLMRouter[
|
49
|
+
router: LLMRouter[M],
|
50
|
+
model: M,
|
52
51
|
context: LLMSystemMessage | None = None,
|
53
52
|
index: int = 0,
|
54
53
|
) -> str:
|
@@ -64,7 +63,7 @@ class Task[**P, R: Result]:
|
|
64
63
|
|
65
64
|
answer = await flow_run.agent(
|
66
65
|
messages=node_state.conversation.render(),
|
67
|
-
model=
|
66
|
+
model=model,
|
68
67
|
task_name=f"ask--{self.name}",
|
69
68
|
mode="ask",
|
70
69
|
router=router,
|
@@ -74,11 +73,12 @@ class Task[**P, R: Result]:
|
|
74
73
|
|
75
74
|
return answer
|
76
75
|
|
77
|
-
async def refine(
|
76
|
+
async def refine[M: LLMModelAlias](
|
78
77
|
self,
|
79
78
|
*,
|
80
79
|
user_message: LLMUserMessage,
|
81
|
-
router: LLMRouter[
|
80
|
+
router: LLMRouter[M],
|
81
|
+
model: M,
|
82
82
|
context: LLMSystemMessage | None = None,
|
83
83
|
index: int = 0,
|
84
84
|
) -> R:
|
@@ -94,7 +94,7 @@ class Task[**P, R: Result]:
|
|
94
94
|
|
95
95
|
result = await flow_run.agent(
|
96
96
|
messages=node_state.conversation.render(),
|
97
|
-
model=
|
97
|
+
model=model,
|
98
98
|
task_name=f"refine--{self.name}",
|
99
99
|
response_model=self.result_type,
|
100
100
|
mode="refine",
|
@@ -161,21 +161,18 @@ class Task[**P, R: Result]:
|
|
161
161
|
@overload
|
162
162
|
def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
|
163
163
|
@overload
|
164
|
-
def task[**P, R: Result](
|
165
|
-
*, retries: int = 0, refinement_model: LLMModelAlias = "gemini-2.0-flash-lite"
|
166
|
-
) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
|
164
|
+
def task[**P, R: Result](*, retries: int = 0) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
|
167
165
|
def task[**P, R: Result](
|
168
166
|
generator: Callable[P, Awaitable[R]] | None = None,
|
169
167
|
/,
|
170
168
|
*,
|
171
169
|
retries: int = 0,
|
172
|
-
refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
|
173
170
|
) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
|
174
171
|
if generator is None:
|
175
172
|
|
176
173
|
def decorator(fn: Callable[P, Awaitable[R]]) -> Task[P, R]:
|
177
|
-
return Task(fn, retries=retries
|
174
|
+
return Task(fn, retries=retries)
|
178
175
|
|
179
176
|
return decorator
|
180
177
|
|
181
|
-
return Task(generator, retries=retries
|
178
|
+
return Task(generator, retries=retries)
|
@@ -58,6 +58,7 @@ async def test_ask_adds_to_conversation():
|
|
58
58
|
response = await basic_task.ask(
|
59
59
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Can you explain how you got that?")]),
|
60
60
|
router=ROUTER,
|
61
|
+
model="gemini-2.0-flash-lite",
|
61
62
|
)
|
62
63
|
|
63
64
|
# Verify the response exists and makes sense
|
@@ -94,6 +95,7 @@ async def test_ask_requires_completed_task():
|
|
94
95
|
await basic_task.ask(
|
95
96
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Can you explain?")]),
|
96
97
|
router=ROUTER,
|
98
|
+
model="gemini-2.0-flash-lite",
|
97
99
|
)
|
98
100
|
|
99
101
|
|
@@ -113,6 +115,7 @@ async def test_ask_multiple_questions():
|
|
113
115
|
response = await basic_task.ask(
|
114
116
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content=question)]),
|
115
117
|
router=ROUTER,
|
118
|
+
model="gemini-2.0-flash-lite",
|
116
119
|
)
|
117
120
|
responses.append(response)
|
118
121
|
|
@@ -69,6 +69,7 @@ async def test_refining() -> None:
|
|
69
69
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
70
70
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
71
71
|
router=ROUTER,
|
72
|
+
model="gemini-2.0-flash-lite",
|
72
73
|
)
|
73
74
|
|
74
75
|
initial_random_words = first_run.get_all_results(task=generate_random_word)
|
@@ -80,6 +81,7 @@ async def test_refining() -> None:
|
|
80
81
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
81
82
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
82
83
|
router=ROUTER,
|
84
|
+
model="gemini-2.0-flash-lite",
|
83
85
|
)
|
84
86
|
# Since refine now directly returns the result from the agent call
|
85
87
|
assert isinstance(result, GeneratedWord)
|
@@ -99,4 +101,5 @@ async def test_refining_before_generate_fails() -> None:
|
|
99
101
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
100
102
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
101
103
|
router=ROUTER,
|
104
|
+
model="gemini-2.0-flash-lite",
|
102
105
|
)
|
@@ -76,6 +76,7 @@ async def test_state_undo() -> None:
|
|
76
76
|
user_message=LLMUserMessage(parts=[LLMMessagePart(content="Change it")]),
|
77
77
|
context=LLMSystemMessage(parts=[LLMMessagePart(content="Extra info")]),
|
78
78
|
router=Mock(),
|
79
|
+
model="gemini-2.0-flash-lite",
|
79
80
|
)
|
80
81
|
|
81
82
|
async with with_state.start_run(run_id="2") as run:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|