goose-py 0.7.0__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- goose/_internal/agent.py +7 -25
- goose/_internal/conversation.py +10 -16
- goose/_internal/flow.py +3 -4
- goose/_internal/state.py +9 -15
- goose/_internal/task.py +11 -12
- {goose_py-0.7.0.dist-info → goose_py-0.7.2.dist-info}/METADATA +1 -1
- {goose_py-0.7.0.dist-info → goose_py-0.7.2.dist-info}/RECORD +8 -8
- {goose_py-0.7.0.dist-info → goose_py-0.7.2.dist-info}/WHEEL +0 -0
goose/_internal/agent.py
CHANGED
@@ -65,20 +65,12 @@ class AgentResponse[R: BaseModel | str](BaseModel):
|
|
65
65
|
@computed_field
|
66
66
|
@property
|
67
67
|
def input_cost(self) -> float:
|
68
|
-
return
|
69
|
-
self.INPUT_CENTS_PER_MILLION_TOKENS[self.model]
|
70
|
-
* self.input_tokens
|
71
|
-
/ 1_000_000
|
72
|
-
)
|
68
|
+
return self.INPUT_CENTS_PER_MILLION_TOKENS[self.model] * self.input_tokens / 1_000_000
|
73
69
|
|
74
70
|
@computed_field
|
75
71
|
@property
|
76
72
|
def output_cost(self) -> float:
|
77
|
-
return
|
78
|
-
self.OUTPUT_CENTS_PER_MILLION_TOKENS[self.model]
|
79
|
-
* self.output_tokens
|
80
|
-
/ 1_000_000
|
81
|
-
)
|
73
|
+
return self.OUTPUT_CENTS_PER_MILLION_TOKENS[self.model] * self.output_tokens / 1_000_000
|
82
74
|
|
83
75
|
@computed_field
|
84
76
|
@property
|
@@ -100,15 +92,9 @@ class AgentResponse[R: BaseModel | str](BaseModel):
|
|
100
92
|
for part in message["content"]:
|
101
93
|
if part["type"] == "image_url":
|
102
94
|
part["image_url"] = "__MEDIA__"
|
103
|
-
minimized_input_messages = [
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
output_message = (
|
108
|
-
self.response.model_dump_json()
|
109
|
-
if isinstance(self.response, BaseModel)
|
110
|
-
else self.response
|
111
|
-
)
|
95
|
+
minimized_input_messages = [json.dumps(message) for message in minimized_input_messages]
|
96
|
+
|
97
|
+
output_message = self.response.model_dump_json() if isinstance(self.response, BaseModel) else self.response
|
112
98
|
|
113
99
|
return {
|
114
100
|
"run_id": self.run_id,
|
@@ -161,9 +147,7 @@ class Agent:
|
|
161
147
|
|
162
148
|
if response_model is TextResult:
|
163
149
|
response = await acompletion(model=model.value, messages=rendered_messages)
|
164
|
-
parsed_response = response_model.model_validate(
|
165
|
-
{"text": response.choices[0].message.content}
|
166
|
-
)
|
150
|
+
parsed_response = response_model.model_validate({"text": response.choices[0].message.content})
|
167
151
|
else:
|
168
152
|
response = await acompletion(
|
169
153
|
model=model.value,
|
@@ -174,9 +158,7 @@ class Agent:
|
|
174
158
|
"enforce_validation": True,
|
175
159
|
},
|
176
160
|
)
|
177
|
-
parsed_response = response_model.model_validate_json(
|
178
|
-
response.choices[0].message.content
|
179
|
-
)
|
161
|
+
parsed_response = response_model.model_validate_json(response.choices[0].message.content)
|
180
162
|
|
181
163
|
end_time = datetime.now()
|
182
164
|
agent_response = AgentResponse(
|
goose/_internal/conversation.py
CHANGED
@@ -1,12 +1,9 @@
|
|
1
|
+
from typing import Self
|
2
|
+
|
1
3
|
from pydantic import BaseModel
|
2
4
|
|
3
5
|
from goose._internal.result import Result
|
4
|
-
from goose._internal.types.agent import
|
5
|
-
AssistantMessage,
|
6
|
-
LLMMessage,
|
7
|
-
SystemMessage,
|
8
|
-
UserMessage,
|
9
|
-
)
|
6
|
+
from goose._internal.types.agent import AssistantMessage, LLMMessage, SystemMessage, UserMessage
|
10
7
|
|
11
8
|
|
12
9
|
class Conversation[R: Result](BaseModel):
|
@@ -24,18 +21,15 @@ class Conversation[R: Result](BaseModel):
|
|
24
21
|
messages.append(self.context.render())
|
25
22
|
|
26
23
|
for message_index in range(len(self.user_messages)):
|
27
|
-
messages.append(
|
28
|
-
AssistantMessage(
|
29
|
-
text=self.result_messages[message_index].model_dump_json()
|
30
|
-
).render()
|
31
|
-
)
|
24
|
+
messages.append(AssistantMessage(text=self.result_messages[message_index].model_dump_json()).render())
|
32
25
|
messages.append(self.user_messages[message_index].render())
|
33
26
|
|
34
27
|
if len(self.result_messages) > len(self.user_messages):
|
35
|
-
messages.append(
|
36
|
-
AssistantMessage(
|
37
|
-
text=self.result_messages[-1].model_dump_json()
|
38
|
-
).render()
|
39
|
-
)
|
28
|
+
messages.append(AssistantMessage(text=self.result_messages[-1].model_dump_json()).render())
|
40
29
|
|
41
30
|
return messages
|
31
|
+
|
32
|
+
def undo(self) -> Self:
|
33
|
+
self.user_messages.pop()
|
34
|
+
self.result_messages.pop()
|
35
|
+
return self
|
goose/_internal/flow.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
|
+
from collections.abc import AsyncIterator, Awaitable, Callable
|
1
2
|
from contextlib import asynccontextmanager
|
2
3
|
from types import CodeType
|
3
|
-
from typing import
|
4
|
+
from typing import Protocol, overload
|
4
5
|
|
5
6
|
from goose._internal.agent import Agent, IAgentLogger
|
6
7
|
from goose._internal.conversation import Conversation
|
@@ -13,9 +14,7 @@ from goose.errors import Honk
|
|
13
14
|
class IAdapter[ResultT: Result](Protocol):
|
14
15
|
__code__: CodeType
|
15
16
|
|
16
|
-
async def __call__(
|
17
|
-
self, *, conversation: Conversation[ResultT], agent: Agent
|
18
|
-
) -> ResultT: ...
|
17
|
+
async def __call__(self, *, conversation: Conversation[ResultT], agent: Agent) -> ResultT: ...
|
19
18
|
|
20
19
|
|
21
20
|
class Flow[**P]:
|
goose/_internal/state.py
CHANGED
@@ -61,6 +61,10 @@ class NodeState[ResultT: Result](BaseModel):
|
|
61
61
|
self.conversation.user_messages.append(message)
|
62
62
|
return self
|
63
63
|
|
64
|
+
def undo(self) -> Self:
|
65
|
+
self.conversation.undo()
|
66
|
+
return self
|
67
|
+
|
64
68
|
|
65
69
|
class FlowRun:
|
66
70
|
def __init__(self) -> None:
|
@@ -97,23 +101,17 @@ class FlowRun:
|
|
97
101
|
matching_nodes: list[NodeState[R]] = []
|
98
102
|
for key, node_state in self._node_states.items():
|
99
103
|
if key[0] == task.name:
|
100
|
-
matching_nodes.append(
|
101
|
-
NodeState[task.result_type].model_validate_json(node_state)
|
102
|
-
)
|
104
|
+
matching_nodes.append(NodeState[task.result_type].model_validate_json(node_state))
|
103
105
|
return sorted(matching_nodes, key=lambda node: node.index)
|
104
106
|
|
105
107
|
def get[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> NodeState[R]:
|
106
|
-
if (
|
107
|
-
existing_node_state := self._node_states.get((task.name, index))
|
108
|
-
) is not None:
|
108
|
+
if (existing_node_state := self._node_states.get((task.name, index))) is not None:
|
109
109
|
return NodeState[task.result_type].model_validate_json(existing_node_state)
|
110
110
|
else:
|
111
111
|
return NodeState[task.result_type](
|
112
112
|
task_name=task.name,
|
113
113
|
index=index,
|
114
|
-
conversation=Conversation[task.result_type](
|
115
|
-
user_messages=[], result_messages=[]
|
116
|
-
),
|
114
|
+
conversation=Conversation[task.result_type](user_messages=[], result_messages=[]),
|
117
115
|
last_hash=0,
|
118
116
|
)
|
119
117
|
|
@@ -143,9 +141,7 @@ class FlowRun:
|
|
143
141
|
self._last_requested_indices = {}
|
144
142
|
self._flow_name = flow_name
|
145
143
|
self._id = run_id
|
146
|
-
self._agent = Agent(
|
147
|
-
flow_name=self.flow_name, run_id=self.id, logger=agent_logger
|
148
|
-
)
|
144
|
+
self._agent = Agent(flow_name=self.flow_name, run_id=self.id, logger=agent_logger)
|
149
145
|
|
150
146
|
def end(self) -> None:
|
151
147
|
self._last_requested_indices = {}
|
@@ -177,9 +173,7 @@ class FlowRun:
|
|
177
173
|
return flow_run
|
178
174
|
|
179
175
|
|
180
|
-
_current_flow_run: ContextVar[FlowRun | None] = ContextVar(
|
181
|
-
"current_flow_run", default=None
|
182
|
-
)
|
176
|
+
_current_flow_run: ContextVar[FlowRun | None] = ContextVar("current_flow_run", default=None)
|
183
177
|
|
184
178
|
|
185
179
|
def get_current_flow_run() -> FlowRun | None:
|
goose/_internal/task.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
|
-
from
|
1
|
+
from collections.abc import Awaitable, Callable
|
2
|
+
from typing import overload
|
2
3
|
|
3
4
|
from goose._internal.agent import Agent, GeminiModel, SystemMessage, UserMessage
|
4
5
|
from goose._internal.conversation import Conversation
|
@@ -33,9 +34,7 @@ class Task[**P, R: Result]:
|
|
33
34
|
def name(self) -> str:
|
34
35
|
return self._generator.__name__
|
35
36
|
|
36
|
-
async def generate(
|
37
|
-
self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs
|
38
|
-
) -> R:
|
37
|
+
async def generate(self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs) -> R:
|
39
38
|
state_hash = self.__hash_task_call(*args, **kwargs)
|
40
39
|
if state_hash != state.last_hash:
|
41
40
|
result = await self._generator(*args, **kwargs)
|
@@ -58,14 +57,18 @@ class Task[**P, R: Result]:
|
|
58
57
|
node_state.set_context(context=context)
|
59
58
|
node_state.add_user_message(message=user_message)
|
60
59
|
|
61
|
-
result = await self.__adapt(
|
62
|
-
conversation=node_state.conversation, agent=flow_run.agent
|
63
|
-
)
|
60
|
+
result = await self.__adapt(conversation=node_state.conversation, agent=flow_run.agent)
|
64
61
|
node_state.add_result(result=result)
|
65
62
|
flow_run.add_node_state(node_state)
|
66
63
|
|
67
64
|
return result
|
68
65
|
|
66
|
+
def undo(self, *, index: int = 0) -> None:
|
67
|
+
flow_run = self.__get_current_flow_run()
|
68
|
+
node_state = flow_run.get(task=self, index=index)
|
69
|
+
node_state.undo()
|
70
|
+
flow_run.add_node_state(node_state)
|
71
|
+
|
69
72
|
async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
|
70
73
|
flow_run = self.__get_current_flow_run()
|
71
74
|
node_state = flow_run.get_next(task=self)
|
@@ -97,11 +100,7 @@ class Task[**P, R: Result]:
|
|
97
100
|
|
98
101
|
def __hash_task_call(self, *args: P.args, **kwargs: P.kwargs) -> int:
|
99
102
|
try:
|
100
|
-
to_hash = str(
|
101
|
-
tuple(args)
|
102
|
-
+ tuple(kwargs.values())
|
103
|
-
+ (self._generator.__code__, self._adapter_model)
|
104
|
-
)
|
103
|
+
to_hash = str(tuple(args) + tuple(kwargs.values()) + (self._generator.__code__, self._adapter_model))
|
105
104
|
return hash(to_hash)
|
106
105
|
except TypeError:
|
107
106
|
raise Honk(f"Unhashable argument to task {self.name}: {args} {kwargs}")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: goose-py
|
3
|
-
Version: 0.7.
|
3
|
+
Version: 0.7.2
|
4
4
|
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
5
|
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
6
|
Requires-Python: >=3.12
|
@@ -4,15 +4,15 @@ goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
|
|
4
4
|
goose/flow.py,sha256=A1bzNIjnoVXRFm6LGhQglxVnKMP0vEVfvTubTol7Kww,58
|
5
5
|
goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
6
|
goose/runs.py,sha256=HHcky_IbmY9HWBjpXJgOcH2Ko0N39qADsGIPR8QYpek,160
|
7
|
-
goose/_internal/agent.py,sha256=
|
8
|
-
goose/_internal/conversation.py,sha256=
|
9
|
-
goose/_internal/flow.py,sha256=
|
7
|
+
goose/_internal/agent.py,sha256=l0pKfShovrs238sKAr-zubtcacYm82TGwQHcBWVJm2g,5875
|
8
|
+
goose/_internal/conversation.py,sha256=1OZQ_N6QZE7L_ZpXG2bjoWkVQ-G7h0JvKkqswmQWG58,1202
|
9
|
+
goose/_internal/flow.py,sha256=KGT6NpkMY8q_N1yKwWrxfTbhwcu5AwdHtgCPqCdL3F8,3266
|
10
10
|
goose/_internal/result.py,sha256=-eZJn-2sPo7rHZ38Sz6IAHXqiJ-Ss39esEoFGimJEBI,155
|
11
|
-
goose/_internal/state.py,sha256=
|
11
|
+
goose/_internal/state.py,sha256=NTF9L2hrO7xXKPNNJjmOhHiXSTW5mnDULjvm81aqGLQ,5785
|
12
12
|
goose/_internal/store.py,sha256=vIxPIpechF_lEQlQ8JT1NDySDfHe3-eMHEWeTqVbscg,946
|
13
|
-
goose/_internal/task.py,sha256=
|
13
|
+
goose/_internal/task.py,sha256=k_DuFTnaugbkFXz_xvZOrmwFV-MlKf0RZCXfR7M_6ko,4910
|
14
14
|
goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
goose/_internal/types/agent.py,sha256=rNVt2gEr_m4_8tGFgcdichpPp8xhOS5GY0kN2C4tiE8,2153
|
16
|
-
goose_py-0.7.
|
17
|
-
goose_py-0.7.
|
18
|
-
goose_py-0.7.
|
16
|
+
goose_py-0.7.2.dist-info/METADATA,sha256=FVAKqtk6EqumBkzOEtbFn9GA1mLiAinNF-nJ7-K6dC0,441
|
17
|
+
goose_py-0.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
18
|
+
goose_py-0.7.2.dist-info/RECORD,,
|
File without changes
|