goose-py 0.10.2__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- goose/__init__.py +6 -5
- goose/_internal/agent.py +76 -84
- goose/_internal/conversation.py +12 -17
- goose/_internal/flow.py +7 -7
- goose/_internal/state.py +51 -35
- goose/_internal/store.py +1 -1
- goose/_internal/task.py +25 -22
- goose/_internal/types/telemetry.py +18 -19
- {goose_py-0.10.2.dist-info → goose_py-0.11.0.dist-info}/METADATA +2 -2
- goose_py-0.11.0.dist-info/RECORD +18 -0
- goose/_internal/types/agent.py +0 -101
- goose/agent.py +0 -26
- goose_py-0.10.2.dist-info/RECORD +0 -20
- {goose_py-0.10.2.dist-info → goose_py-0.11.0.dist-info}/WHEEL +0 -0
goose/__init__.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
|
-
from ._internal.agent import Agent
|
2
|
-
from ._internal.flow import FlowArguments, flow
|
3
|
-
from ._internal.result import Result, TextResult
|
4
|
-
from ._internal.task import task
|
1
|
+
from goose._internal.agent import Agent
|
2
|
+
from goose._internal.flow import FlowArguments, flow
|
3
|
+
from goose._internal.result import Result, TextResult
|
4
|
+
from goose._internal.task import task
|
5
|
+
from goose._internal.types.telemetry import AgentResponse
|
5
6
|
|
6
|
-
__all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task"]
|
7
|
+
__all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task", "AgentResponse"]
|
goose/_internal/agent.py
CHANGED
@@ -2,11 +2,18 @@ import logging
|
|
2
2
|
from datetime import datetime
|
3
3
|
from typing import Any, Literal, Protocol, overload
|
4
4
|
|
5
|
-
from
|
5
|
+
from aikernel import (
|
6
|
+
LLMAssistantMessage,
|
7
|
+
LLMModel,
|
8
|
+
LLMSystemMessage,
|
9
|
+
LLMUserMessage,
|
10
|
+
llm_structured,
|
11
|
+
llm_unstructured,
|
12
|
+
render_message,
|
13
|
+
)
|
6
14
|
from pydantic import ValidationError
|
7
15
|
|
8
16
|
from goose._internal.result import FindReplaceResponse, Result, TextResult
|
9
|
-
from goose._internal.types.agent import AIModel, AssistantMessage, SystemMessage, UserMessage
|
10
17
|
from goose._internal.types.telemetry import AgentResponse
|
11
18
|
from goose.errors import Honk
|
12
19
|
|
@@ -30,42 +37,39 @@ class Agent:
|
|
30
37
|
async def generate[R: Result](
|
31
38
|
self,
|
32
39
|
*,
|
33
|
-
messages: list[
|
34
|
-
model:
|
40
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
41
|
+
model: LLMModel,
|
35
42
|
task_name: str,
|
36
43
|
response_model: type[R] = TextResult,
|
37
|
-
system: SystemMessage | None = None,
|
38
44
|
) -> R:
|
39
|
-
rendered_messages = [message.render() for message in messages]
|
40
|
-
rendered_system = system.render() if system is not None else None
|
41
|
-
|
42
|
-
completion_messages = (
|
43
|
-
[rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
|
44
|
-
)
|
45
|
-
|
46
45
|
start_time = datetime.now()
|
46
|
+
|
47
47
|
if response_model is TextResult:
|
48
|
-
response = await
|
49
|
-
parsed_response = response_model.model_validate({"text": response.
|
48
|
+
response = await llm_unstructured(model=model, messages=messages)
|
49
|
+
parsed_response = response_model.model_validate({"text": response.text})
|
50
50
|
else:
|
51
|
-
response = await
|
52
|
-
|
53
|
-
messages=completion_messages,
|
54
|
-
response_format=response_model,
|
55
|
-
)
|
56
|
-
parsed_response = response_model.model_validate_json(response.choices[0].message.content)
|
51
|
+
response = await llm_structured(model=model, messages=messages, response_model=response_model)
|
52
|
+
parsed_response = response.structured_response
|
57
53
|
|
58
54
|
end_time = datetime.now()
|
55
|
+
|
56
|
+
if isinstance(messages[0], LLMSystemMessage):
|
57
|
+
system = render_message(messages[0])
|
58
|
+
input_messages = [render_message(message) for message in messages[1:]]
|
59
|
+
else:
|
60
|
+
system = None
|
61
|
+
input_messages = [render_message(message) for message in messages]
|
62
|
+
|
59
63
|
agent_response = AgentResponse(
|
60
64
|
response=parsed_response,
|
61
65
|
run_id=self.run_id,
|
62
66
|
flow_name=self.flow_name,
|
63
67
|
task_name=task_name,
|
64
68
|
model=model,
|
65
|
-
system=
|
66
|
-
input_messages=
|
67
|
-
input_tokens=response.usage.
|
68
|
-
output_tokens=response.usage.
|
69
|
+
system=system,
|
70
|
+
input_messages=input_messages,
|
71
|
+
input_tokens=response.usage.input_tokens,
|
72
|
+
output_tokens=response.usage.output_tokens,
|
69
73
|
start_time=start_time,
|
70
74
|
end_time=end_time,
|
71
75
|
)
|
@@ -80,32 +84,31 @@ class Agent:
|
|
80
84
|
async def ask(
|
81
85
|
self,
|
82
86
|
*,
|
83
|
-
messages: list[
|
84
|
-
model:
|
87
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
88
|
+
model: LLMModel,
|
85
89
|
task_name: str,
|
86
|
-
system: SystemMessage | None = None,
|
87
90
|
) -> str:
|
88
|
-
rendered_messages = [message.render() for message in messages]
|
89
|
-
rendered_system = system.render() if system is not None else None
|
90
|
-
|
91
|
-
completion_messages = (
|
92
|
-
[rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
|
93
|
-
)
|
94
|
-
|
95
91
|
start_time = datetime.now()
|
96
|
-
response = await
|
97
|
-
|
92
|
+
response = await llm_unstructured(model=model, messages=messages)
|
98
93
|
end_time = datetime.now()
|
94
|
+
|
95
|
+
if isinstance(messages[0], LLMSystemMessage):
|
96
|
+
system = render_message(messages[0])
|
97
|
+
input_messages = [render_message(message) for message in messages[1:]]
|
98
|
+
else:
|
99
|
+
system = None
|
100
|
+
input_messages = [render_message(message) for message in messages]
|
101
|
+
|
99
102
|
agent_response = AgentResponse(
|
100
|
-
response=response.
|
103
|
+
response=response.text,
|
101
104
|
run_id=self.run_id,
|
102
105
|
flow_name=self.flow_name,
|
103
106
|
task_name=task_name,
|
104
107
|
model=model,
|
105
|
-
system=
|
106
|
-
input_messages=
|
107
|
-
input_tokens=response.usage.
|
108
|
-
output_tokens=response.usage.
|
108
|
+
system=system,
|
109
|
+
input_messages=input_messages,
|
110
|
+
input_tokens=response.usage.input_tokens,
|
111
|
+
output_tokens=response.usage.output_tokens,
|
109
112
|
start_time=start_time,
|
110
113
|
end_time=end_time,
|
111
114
|
)
|
@@ -115,44 +118,38 @@ class Agent:
|
|
115
118
|
else:
|
116
119
|
logging.info(agent_response.model_dump())
|
117
120
|
|
118
|
-
return response.
|
121
|
+
return response.text
|
119
122
|
|
120
123
|
async def refine[R: Result](
|
121
124
|
self,
|
122
125
|
*,
|
123
|
-
messages: list[
|
124
|
-
model:
|
126
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
127
|
+
model: LLMModel,
|
125
128
|
task_name: str,
|
126
129
|
response_model: type[R],
|
127
|
-
system: SystemMessage | None = None,
|
128
130
|
) -> R:
|
129
131
|
start_time = datetime.now()
|
132
|
+
find_replace_response = await llm_structured(model=model, messages=messages, response_model=FindReplaceResponse)
|
133
|
+
parsed_find_replace_response = find_replace_response.structured_response
|
134
|
+
end_time = datetime.now()
|
130
135
|
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
find_replace_response = await acompletion(
|
139
|
-
model=model.value, messages=completion_messages, response_format=FindReplaceResponse
|
140
|
-
)
|
141
|
-
parsed_find_replace_response = FindReplaceResponse.model_validate_json(
|
142
|
-
find_replace_response.choices[0].message.content
|
143
|
-
)
|
136
|
+
if isinstance(messages[0], LLMSystemMessage):
|
137
|
+
system = render_message(messages[0])
|
138
|
+
input_messages = [render_message(message) for message in messages[1:]]
|
139
|
+
else:
|
140
|
+
system = None
|
141
|
+
input_messages = [render_message(message) for message in messages]
|
144
142
|
|
145
|
-
end_time = datetime.now()
|
146
143
|
agent_response = AgentResponse(
|
147
144
|
response=parsed_find_replace_response,
|
148
145
|
run_id=self.run_id,
|
149
146
|
flow_name=self.flow_name,
|
150
147
|
task_name=task_name,
|
151
148
|
model=model,
|
152
|
-
system=
|
153
|
-
input_messages=
|
154
|
-
input_tokens=find_replace_response.usage.
|
155
|
-
output_tokens=find_replace_response.usage.
|
149
|
+
system=system,
|
150
|
+
input_messages=input_messages,
|
151
|
+
input_tokens=find_replace_response.usage.input_tokens,
|
152
|
+
output_tokens=find_replace_response.usage.output_tokens,
|
156
153
|
start_time=start_time,
|
157
154
|
end_time=end_time,
|
158
155
|
)
|
@@ -174,69 +171,64 @@ class Agent:
|
|
174
171
|
async def __call__[R: Result](
|
175
172
|
self,
|
176
173
|
*,
|
177
|
-
messages: list[
|
178
|
-
model:
|
174
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
175
|
+
model: LLMModel,
|
179
176
|
task_name: str,
|
180
177
|
mode: Literal["generate"],
|
181
178
|
response_model: type[R],
|
182
|
-
system: SystemMessage | None = None,
|
183
179
|
) -> R: ...
|
184
180
|
|
185
181
|
@overload
|
186
182
|
async def __call__[R: Result](
|
187
183
|
self,
|
188
184
|
*,
|
189
|
-
messages: list[
|
190
|
-
model:
|
185
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
186
|
+
model: LLMModel,
|
191
187
|
task_name: str,
|
192
188
|
mode: Literal["ask"],
|
193
189
|
response_model: type[R] = TextResult,
|
194
|
-
system: SystemMessage | None = None,
|
195
190
|
) -> str: ...
|
196
191
|
|
197
192
|
@overload
|
198
193
|
async def __call__[R: Result](
|
199
194
|
self,
|
200
195
|
*,
|
201
|
-
messages: list[
|
202
|
-
model:
|
196
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
197
|
+
model: LLMModel,
|
203
198
|
task_name: str,
|
204
199
|
response_model: type[R],
|
205
200
|
mode: Literal["refine"],
|
206
|
-
system: SystemMessage | None = None,
|
207
201
|
) -> R: ...
|
208
202
|
|
209
203
|
@overload
|
210
204
|
async def __call__[R: Result](
|
211
205
|
self,
|
212
206
|
*,
|
213
|
-
messages: list[
|
214
|
-
model:
|
207
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
208
|
+
model: LLMModel,
|
215
209
|
task_name: str,
|
216
210
|
response_model: type[R],
|
217
|
-
system: SystemMessage | None = None,
|
218
211
|
) -> R: ...
|
219
212
|
|
220
213
|
async def __call__[R: Result](
|
221
214
|
self,
|
222
215
|
*,
|
223
|
-
messages: list[
|
224
|
-
model:
|
216
|
+
messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
|
217
|
+
model: LLMModel,
|
225
218
|
task_name: str,
|
226
219
|
response_model: type[R] = TextResult,
|
227
220
|
mode: Literal["generate", "ask", "refine"] = "generate",
|
228
|
-
system: SystemMessage | None = None,
|
229
221
|
) -> R | str:
|
230
222
|
match mode:
|
231
223
|
case "generate":
|
232
224
|
return await self.generate(
|
233
|
-
messages=messages, model=model, task_name=task_name, response_model=response_model
|
225
|
+
messages=messages, model=model, task_name=task_name, response_model=response_model
|
234
226
|
)
|
235
227
|
case "ask":
|
236
|
-
return await self.ask(messages=messages, model=model, task_name=task_name
|
228
|
+
return await self.ask(messages=messages, model=model, task_name=task_name)
|
237
229
|
case "refine":
|
238
230
|
return await self.refine(
|
239
|
-
messages=messages, model=model, task_name=task_name, response_model=response_model
|
231
|
+
messages=messages, model=model, task_name=task_name, response_model=response_model
|
240
232
|
)
|
241
233
|
|
242
234
|
def __apply_find_replace[R: Result](
|
@@ -249,12 +241,12 @@ class Agent:
|
|
249
241
|
return response_model.model_validate_json(dumped_result)
|
250
242
|
|
251
243
|
def __find_last_result[R: Result](
|
252
|
-
self, *, messages: list[
|
244
|
+
self, *, messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage], response_model: type[R]
|
253
245
|
) -> R:
|
254
246
|
for message in reversed(messages):
|
255
|
-
if isinstance(message,
|
247
|
+
if isinstance(message, LLMAssistantMessage):
|
256
248
|
try:
|
257
|
-
return response_model.model_validate_json(message.
|
249
|
+
return response_model.model_validate_json(message.parts[0].content)
|
258
250
|
except ValidationError:
|
259
251
|
continue
|
260
252
|
|
goose/_internal/conversation.py
CHANGED
@@ -1,38 +1,33 @@
|
|
1
1
|
from typing import Self
|
2
2
|
|
3
|
+
from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
|
3
4
|
from pydantic import BaseModel
|
4
5
|
|
5
|
-
from goose._internal.result import Result
|
6
|
-
from goose._internal.types.agent import AssistantMessage, SystemMessage, UserMessage
|
7
6
|
from goose.errors import Honk
|
8
7
|
|
9
8
|
|
10
|
-
class Conversation
|
11
|
-
user_messages: list[
|
12
|
-
assistant_messages: list[
|
13
|
-
context:
|
9
|
+
class Conversation(BaseModel):
|
10
|
+
user_messages: list[LLMUserMessage]
|
11
|
+
assistant_messages: list[LLMAssistantMessage]
|
12
|
+
context: LLMSystemMessage | None = None
|
14
13
|
|
15
14
|
@property
|
16
15
|
def awaiting_response(self) -> bool:
|
17
16
|
return len(self.user_messages) == len(self.assistant_messages)
|
18
17
|
|
19
|
-
def
|
20
|
-
messages: list[
|
18
|
+
def render(self) -> list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage]:
|
19
|
+
messages: list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage] = []
|
20
|
+
if self.context is not None:
|
21
|
+
messages.append(self.context)
|
22
|
+
|
21
23
|
for message_index in range(len(self.user_messages)):
|
22
24
|
message = self.assistant_messages[message_index]
|
23
|
-
|
24
|
-
messages.append(AssistantMessage(text=message))
|
25
|
-
else:
|
26
|
-
messages.append(AssistantMessage(text=message.model_dump_json()))
|
27
|
-
|
25
|
+
messages.append(message)
|
28
26
|
messages.append(self.user_messages[message_index])
|
29
27
|
|
30
28
|
if len(self.assistant_messages) > len(self.user_messages):
|
31
29
|
message = self.assistant_messages[-1]
|
32
|
-
|
33
|
-
messages.append(AssistantMessage(text=message))
|
34
|
-
else:
|
35
|
-
messages.append(AssistantMessage(text=message.model_dump_json()))
|
30
|
+
messages.append(message)
|
36
31
|
|
37
32
|
return messages
|
38
33
|
|
goose/_internal/flow.py
CHANGED
@@ -3,12 +3,12 @@ from contextlib import asynccontextmanager
|
|
3
3
|
from types import CodeType
|
4
4
|
from typing import Protocol, overload
|
5
5
|
|
6
|
-
from
|
7
|
-
from
|
8
|
-
from
|
9
|
-
from
|
10
|
-
from
|
11
|
-
from
|
6
|
+
from ..errors import Honk
|
7
|
+
from .agent import Agent, IAgentLogger
|
8
|
+
from .conversation import Conversation
|
9
|
+
from .result import Result
|
10
|
+
from .state import FlowArguments, FlowRun, get_current_flow_run, set_current_flow_run
|
11
|
+
from .store import IFlowRunStore, InMemoryFlowRunStore
|
12
12
|
|
13
13
|
|
14
14
|
class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
|
@@ -20,7 +20,7 @@ class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
|
|
20
20
|
class IAdapter[ResultT: Result](Protocol):
|
21
21
|
__code__: CodeType
|
22
22
|
|
23
|
-
async def __call__(self, *, conversation: Conversation
|
23
|
+
async def __call__(self, *, conversation: Conversation, agent: Agent) -> ResultT: ...
|
24
24
|
|
25
25
|
|
26
26
|
class Flow[FlowArgumentsT: FlowArguments]:
|
goose/_internal/state.py
CHANGED
@@ -2,12 +2,12 @@ import json
|
|
2
2
|
from contextvars import ContextVar
|
3
3
|
from typing import TYPE_CHECKING, Any, NewType, Self
|
4
4
|
|
5
|
+
from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
|
5
6
|
from pydantic import BaseModel, ConfigDict
|
6
7
|
|
7
8
|
from goose._internal.agent import Agent, IAgentLogger
|
8
9
|
from goose._internal.conversation import Conversation
|
9
10
|
from goose._internal.result import Result
|
10
|
-
from goose._internal.types.agent import SystemMessage, UserMessage
|
11
11
|
from goose.errors import Honk
|
12
12
|
|
13
13
|
if TYPE_CHECKING:
|
@@ -20,55 +20,55 @@ class FlowArguments(BaseModel):
|
|
20
20
|
model_config = ConfigDict(frozen=True)
|
21
21
|
|
22
22
|
|
23
|
-
class NodeState
|
23
|
+
class NodeState(BaseModel):
|
24
24
|
task_name: str
|
25
25
|
index: int
|
26
|
-
conversation: Conversation
|
26
|
+
conversation: Conversation
|
27
27
|
last_hash: int
|
28
28
|
|
29
29
|
@property
|
30
|
-
def
|
30
|
+
def raw_result(self) -> str:
|
31
31
|
for message in reversed(self.conversation.assistant_messages):
|
32
|
-
if
|
33
|
-
return message
|
32
|
+
if self.__message_is_result(message):
|
33
|
+
return message.parts[0].content
|
34
34
|
|
35
35
|
raise Honk("Node awaiting response, has no result")
|
36
36
|
|
37
|
-
def set_context(self, *, context:
|
37
|
+
def set_context(self, *, context: LLMSystemMessage) -> Self:
|
38
38
|
self.conversation.context = context
|
39
39
|
return self
|
40
40
|
|
41
41
|
def add_result(
|
42
42
|
self,
|
43
43
|
*,
|
44
|
-
result:
|
44
|
+
result: str,
|
45
45
|
new_hash: int | None = None,
|
46
46
|
overwrite: bool = False,
|
47
47
|
) -> Self:
|
48
48
|
if overwrite and len(self.conversation.assistant_messages) > 0:
|
49
|
-
self.conversation.assistant_messages[-1] = result
|
49
|
+
self.conversation.assistant_messages[-1] = LLMAssistantMessage.from_text(result)
|
50
50
|
else:
|
51
|
-
self.conversation.assistant_messages.append(result)
|
51
|
+
self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(result))
|
52
52
|
if new_hash is not None:
|
53
53
|
self.last_hash = new_hash
|
54
54
|
return self
|
55
55
|
|
56
56
|
def add_answer(self, *, answer: str) -> Self:
|
57
|
-
self.conversation.assistant_messages.append(answer)
|
57
|
+
self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(answer))
|
58
58
|
return self
|
59
59
|
|
60
|
-
def add_user_message(self, *, message:
|
60
|
+
def add_user_message(self, *, message: LLMUserMessage) -> Self:
|
61
61
|
self.conversation.user_messages.append(message)
|
62
62
|
return self
|
63
63
|
|
64
|
-
def edit_last_result(self, *, result:
|
64
|
+
def edit_last_result(self, *, result: str) -> Self:
|
65
65
|
if len(self.conversation.assistant_messages) == 0:
|
66
66
|
raise Honk("Node awaiting response, has no result")
|
67
67
|
|
68
68
|
for message_index, message in enumerate(reversed(self.conversation.assistant_messages)):
|
69
|
-
if
|
69
|
+
if self.__message_is_result(message):
|
70
70
|
index = len(self.conversation.assistant_messages) - message_index - 1
|
71
|
-
self.conversation.assistant_messages[index] = result
|
71
|
+
self.conversation.assistant_messages[index] = LLMAssistantMessage.from_text(result)
|
72
72
|
return self
|
73
73
|
|
74
74
|
raise Honk("Node awaiting response, has no result")
|
@@ -77,6 +77,13 @@ class NodeState[ResultT: Result](BaseModel):
|
|
77
77
|
self.conversation.undo()
|
78
78
|
return self
|
79
79
|
|
80
|
+
def __message_is_result(self, message: LLMAssistantMessage, /) -> bool:
|
81
|
+
try:
|
82
|
+
_ = json.loads(message.parts[0].content)
|
83
|
+
return True
|
84
|
+
except json.JSONDecodeError:
|
85
|
+
return False
|
86
|
+
|
80
87
|
|
81
88
|
class FlowRun[FlowArgumentsT: FlowArguments]:
|
82
89
|
def __init__(self, *, flow_arguments_model: type[FlowArgumentsT]) -> None:
|
@@ -109,38 +116,47 @@ class FlowRun[FlowArgumentsT: FlowArguments]:
|
|
109
116
|
|
110
117
|
return self._flow_arguments
|
111
118
|
|
112
|
-
def
|
113
|
-
matching_nodes: list[NodeState[R]] = []
|
114
|
-
for key, node_state in self._node_states.items():
|
115
|
-
if key[0] == task.name:
|
116
|
-
matching_nodes.append(NodeState[task.result_type].model_validate_json(node_state))
|
117
|
-
return sorted(matching_nodes, key=lambda node: node.index)
|
118
|
-
|
119
|
-
def get[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> NodeState[R]:
|
119
|
+
def get_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
|
120
120
|
if (existing_node_state := self._node_states.get((task.name, index))) is not None:
|
121
|
-
return NodeState
|
121
|
+
return NodeState.model_validate_json(existing_node_state)
|
122
122
|
else:
|
123
|
-
return NodeState
|
123
|
+
return NodeState(
|
124
124
|
task_name=task.name,
|
125
125
|
index=index,
|
126
|
-
conversation=Conversation
|
126
|
+
conversation=Conversation(user_messages=[], assistant_messages=[]),
|
127
127
|
last_hash=0,
|
128
128
|
)
|
129
129
|
|
130
|
-
def
|
131
|
-
self._flow_arguments = flow_arguments
|
132
|
-
|
133
|
-
def upsert_node_state(self, node_state: NodeState[Any], /) -> None:
|
134
|
-
key = (node_state.task_name, node_state.index)
|
135
|
-
self._node_states[key] = node_state.model_dump_json()
|
136
|
-
|
137
|
-
def get_next[R: Result](self, *, task: "Task[Any, R]") -> NodeState[R]:
|
130
|
+
def get_next_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
|
138
131
|
if task.name not in self._last_requested_indices:
|
139
132
|
self._last_requested_indices[task.name] = 0
|
140
133
|
else:
|
141
134
|
self._last_requested_indices[task.name] += 1
|
142
135
|
|
143
|
-
return self.
|
136
|
+
return self.get_state(task=task, index=self._last_requested_indices[task.name])
|
137
|
+
|
138
|
+
def get_all_results[R: Result](self, *, task: "Task[Any, R]") -> list[R]:
|
139
|
+
matching_nodes: list[NodeState] = []
|
140
|
+
for key, node_state in self._node_states.items():
|
141
|
+
if key[0] == task.name:
|
142
|
+
matching_nodes.append(NodeState.model_validate_json(node_state))
|
143
|
+
|
144
|
+
sorted_nodes = sorted(matching_nodes, key=lambda node: node.index)
|
145
|
+
return [task.result_type.model_validate_json(node.raw_result) for node in sorted_nodes]
|
146
|
+
|
147
|
+
def get_result[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> R:
|
148
|
+
if (existing_node_state := self._node_states.get((task.name, index))) is not None:
|
149
|
+
parsed_node_state = NodeState.model_validate_json(existing_node_state)
|
150
|
+
return task.result_type.model_validate_json(parsed_node_state.raw_result)
|
151
|
+
else:
|
152
|
+
raise Honk(f"No result found for task {task.name} at index {index}")
|
153
|
+
|
154
|
+
def set_flow_arguments(self, flow_arguments: FlowArgumentsT, /) -> None:
|
155
|
+
self._flow_arguments = flow_arguments
|
156
|
+
|
157
|
+
def upsert_node_state(self, node_state: NodeState, /) -> None:
|
158
|
+
key = (node_state.task_name, node_state.index)
|
159
|
+
self._node_states[key] = node_state.model_dump_json()
|
144
160
|
|
145
161
|
def start(
|
146
162
|
self,
|
goose/_internal/store.py
CHANGED
goose/_internal/task.py
CHANGED
@@ -2,12 +2,12 @@ import hashlib
|
|
2
2
|
from collections.abc import Awaitable, Callable
|
3
3
|
from typing import Any, overload
|
4
4
|
|
5
|
+
from aikernel import LLMModel, LLMSystemMessage, LLMUserMessage
|
5
6
|
from pydantic import BaseModel
|
6
7
|
|
7
|
-
from goose._internal.agent import Agent
|
8
|
+
from goose._internal.agent import Agent
|
8
9
|
from goose._internal.result import Result
|
9
10
|
from goose._internal.state import FlowRun, NodeState, get_current_flow_run
|
10
|
-
from goose._internal.types.agent import SystemMessage, UserMessage
|
11
11
|
from goose.errors import Honk
|
12
12
|
|
13
13
|
|
@@ -18,7 +18,7 @@ class Task[**P, R: Result]:
|
|
18
18
|
/,
|
19
19
|
*,
|
20
20
|
retries: int = 0,
|
21
|
-
refinement_model:
|
21
|
+
refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
|
22
22
|
) -> None:
|
23
23
|
self._generator = generator
|
24
24
|
self._retries = retries
|
@@ -35,28 +35,32 @@ class Task[**P, R: Result]:
|
|
35
35
|
def name(self) -> str:
|
36
36
|
return self._generator.__name__
|
37
37
|
|
38
|
-
async def generate(self, state: NodeState
|
38
|
+
async def generate(self, state: NodeState, *args: P.args, **kwargs: P.kwargs) -> R:
|
39
39
|
state_hash = self.__hash_task_call(*args, **kwargs)
|
40
40
|
if state_hash != state.last_hash:
|
41
41
|
result = await self._generator(*args, **kwargs)
|
42
|
-
state.add_result(result=result, new_hash=state_hash, overwrite=True)
|
42
|
+
state.add_result(result=result.model_dump_json(), new_hash=state_hash, overwrite=True)
|
43
43
|
return result
|
44
44
|
else:
|
45
|
-
return state.
|
45
|
+
return self.result_type.model_validate_json(state.raw_result)
|
46
46
|
|
47
|
-
async def ask(
|
47
|
+
async def ask(
|
48
|
+
self, *, user_message: LLMUserMessage, context: LLMSystemMessage | None = None, index: int = 0
|
49
|
+
) -> str:
|
48
50
|
flow_run = self.__get_current_flow_run()
|
49
|
-
node_state = flow_run.
|
51
|
+
node_state = flow_run.get_state(task=self, index=index)
|
50
52
|
|
51
53
|
if len(node_state.conversation.assistant_messages) == 0:
|
52
54
|
raise Honk("Cannot ask about a task that has not been initially generated")
|
53
55
|
|
56
|
+
if context is not None:
|
57
|
+
node_state.set_context(context=context)
|
54
58
|
node_state.add_user_message(message=user_message)
|
59
|
+
|
55
60
|
answer = await flow_run.agent(
|
56
|
-
messages=node_state.conversation.
|
61
|
+
messages=node_state.conversation.render(),
|
57
62
|
model=self._refinement_model,
|
58
63
|
task_name=f"ask--{self.name}",
|
59
|
-
system=context,
|
60
64
|
mode="ask",
|
61
65
|
)
|
62
66
|
node_state.add_answer(answer=answer)
|
@@ -67,12 +71,12 @@ class Task[**P, R: Result]:
|
|
67
71
|
async def refine(
|
68
72
|
self,
|
69
73
|
*,
|
70
|
-
user_message:
|
71
|
-
context:
|
74
|
+
user_message: LLMUserMessage,
|
75
|
+
context: LLMSystemMessage | None = None,
|
72
76
|
index: int = 0,
|
73
77
|
) -> R:
|
74
78
|
flow_run = self.__get_current_flow_run()
|
75
|
-
node_state = flow_run.
|
79
|
+
node_state = flow_run.get_state(task=self, index=index)
|
76
80
|
|
77
81
|
if len(node_state.conversation.assistant_messages) == 0:
|
78
82
|
raise Honk("Cannot refine a task that has not been initially generated")
|
@@ -82,33 +86,32 @@ class Task[**P, R: Result]:
|
|
82
86
|
node_state.add_user_message(message=user_message)
|
83
87
|
|
84
88
|
result = await flow_run.agent(
|
85
|
-
messages=node_state.conversation.
|
89
|
+
messages=node_state.conversation.render(),
|
86
90
|
model=self._refinement_model,
|
87
91
|
task_name=f"refine--{self.name}",
|
88
|
-
system=context,
|
89
92
|
response_model=self.result_type,
|
90
93
|
mode="refine",
|
91
94
|
)
|
92
|
-
node_state.add_result(result=result)
|
95
|
+
node_state.add_result(result=result.model_dump_json())
|
93
96
|
flow_run.upsert_node_state(node_state)
|
94
97
|
|
95
98
|
return result
|
96
99
|
|
97
100
|
def edit(self, *, result: R, index: int = 0) -> None:
|
98
101
|
flow_run = self.__get_current_flow_run()
|
99
|
-
node_state = flow_run.
|
100
|
-
node_state.edit_last_result(result=result)
|
102
|
+
node_state = flow_run.get_state(task=self, index=index)
|
103
|
+
node_state.edit_last_result(result=result.model_dump_json())
|
101
104
|
flow_run.upsert_node_state(node_state)
|
102
105
|
|
103
106
|
def undo(self, *, index: int = 0) -> None:
|
104
107
|
flow_run = self.__get_current_flow_run()
|
105
|
-
node_state = flow_run.
|
108
|
+
node_state = flow_run.get_state(task=self, index=index)
|
106
109
|
node_state.undo()
|
107
110
|
flow_run.upsert_node_state(node_state)
|
108
111
|
|
109
112
|
async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
|
110
113
|
flow_run = self.__get_current_flow_run()
|
111
|
-
node_state = flow_run.
|
114
|
+
node_state = flow_run.get_next_state(task=self)
|
112
115
|
result = await self.generate(node_state, *args, **kwargs)
|
113
116
|
flow_run.upsert_node_state(node_state)
|
114
117
|
return result
|
@@ -151,14 +154,14 @@ class Task[**P, R: Result]:
|
|
151
154
|
def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
|
152
155
|
@overload
|
153
156
|
def task[**P, R: Result](
|
154
|
-
*, retries: int = 0, refinement_model:
|
157
|
+
*, retries: int = 0, refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH
|
155
158
|
) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
|
156
159
|
def task[**P, R: Result](
|
157
160
|
generator: Callable[P, Awaitable[R]] | None = None,
|
158
161
|
/,
|
159
162
|
*,
|
160
163
|
retries: int = 0,
|
161
|
-
refinement_model:
|
164
|
+
refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
|
162
165
|
) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
|
163
166
|
if generator is None:
|
164
167
|
|
@@ -2,10 +2,9 @@ import json
|
|
2
2
|
from datetime import datetime
|
3
3
|
from typing import ClassVar, TypedDict
|
4
4
|
|
5
|
+
from aikernel import LiteLLMMessage, LLMModel
|
5
6
|
from pydantic import BaseModel, computed_field
|
6
7
|
|
7
|
-
from ..types.agent import AIModel, LLMMessage
|
8
|
-
|
9
8
|
|
10
9
|
class AgentResponseDump(TypedDict):
|
11
10
|
run_id: str
|
@@ -26,30 +25,30 @@ class AgentResponseDump(TypedDict):
|
|
26
25
|
|
27
26
|
|
28
27
|
class AgentResponse[R: BaseModel | str](BaseModel):
|
29
|
-
INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
28
|
+
INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
|
29
|
+
LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
|
30
|
+
LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
|
31
|
+
LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
32
|
+
LLMModel.GEMINI_2_0_FLASH: 0.30,
|
33
|
+
LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
|
34
|
+
LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
36
35
|
}
|
37
|
-
OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
36
|
+
OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
|
37
|
+
LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
|
38
|
+
LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
|
39
|
+
LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
40
|
+
LLMModel.GEMINI_2_0_FLASH: 0.30,
|
41
|
+
LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
|
42
|
+
LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
|
44
43
|
}
|
45
44
|
|
46
45
|
response: R
|
47
46
|
run_id: str
|
48
47
|
flow_name: str
|
49
48
|
task_name: str
|
50
|
-
model:
|
51
|
-
system:
|
52
|
-
input_messages: list[
|
49
|
+
model: LLMModel
|
50
|
+
system: LiteLLMMessage | None = None
|
51
|
+
input_messages: list[LiteLLMMessage]
|
53
52
|
input_tokens: int
|
54
53
|
output_tokens: int
|
55
54
|
start_time: datetime
|
@@ -1,11 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: goose-py
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.11.0
|
4
4
|
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
5
|
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
6
|
Requires-Python: >=3.12
|
7
|
+
Requires-Dist: aikernel>=0.1.5
|
7
8
|
Requires-Dist: jsonpath-ng>=1.7.0
|
8
|
-
Requires-Dist: litellm>=1.56.5
|
9
9
|
Requires-Dist: pydantic>=2.8.2
|
10
10
|
Description-Content-Type: text/markdown
|
11
11
|
|
@@ -0,0 +1,18 @@
|
|
1
|
+
goose/__init__.py,sha256=Muw7HCImZHk3kLCTWhV9Lg-Sfmhnwf_Tae-zCj7woyY,338
|
2
|
+
goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
|
3
|
+
goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
|
4
|
+
goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
|
6
|
+
goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
|
7
|
+
goose/_internal/agent.py,sha256=hpg8xXo7lhFDE3ezr8C0-BC61xWoXl1KChxu2n9o8Aw,8449
|
8
|
+
goose/_internal/conversation.py,sha256=vhJwe1pHk2lV60DaB9Tz9KbpzQo7_thRYInPjbIoUTE,1437
|
9
|
+
goose/_internal/flow.py,sha256=8MJxlhHYSAzUHZefpF_sRJc37o532OF0X7l3KRopDmc,4115
|
10
|
+
goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
|
11
|
+
goose/_internal/state.py,sha256=jGKKZKeDcKaCY-uqgXLOnMNwyzR5qEH5m9afOtixsLk,7923
|
12
|
+
goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
|
13
|
+
goose/_internal/task.py,sha256=X_eRZxZlf6SwyvF1nIyjoneyqD_TISXqESyxluk63mE,6416
|
14
|
+
goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
+
goose/_internal/types/telemetry.py,sha256=wDQz1C60KxlftC0aQCXIjswrSYGI1KNaM2wtnlP1Q7k,3823
|
16
|
+
goose_py-0.11.0.dist-info/METADATA,sha256=LZvLxXhKOj-edk54qSemq8dxnplsUmSzPRPIrmAxO-w,442
|
17
|
+
goose_py-0.11.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
18
|
+
goose_py-0.11.0.dist-info/RECORD,,
|
goose/_internal/types/agent.py
DELETED
@@ -1,101 +0,0 @@
|
|
1
|
-
import base64
|
2
|
-
from enum import StrEnum
|
3
|
-
from typing import Literal, NotRequired, TypedDict
|
4
|
-
|
5
|
-
from pydantic import BaseModel
|
6
|
-
|
7
|
-
|
8
|
-
class AIModel(StrEnum):
|
9
|
-
# vertex (production Google, requires GCP environment)
|
10
|
-
VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
|
11
|
-
VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
|
12
|
-
VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
|
13
|
-
VERTEX_FLASH_2_0 = "vertex_ai/gemini-2.0-flash"
|
14
|
-
|
15
|
-
# gemini (publicly available, no GCP environment required)
|
16
|
-
GEMINI_PRO = "gemini/gemini-1.5-pro"
|
17
|
-
GEMINI_FLASH = "gemini/gemini-1.5-flash"
|
18
|
-
GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
|
19
|
-
GEMINI_FLASH_2_0 = "gemini/gemini-2.0-flash"
|
20
|
-
|
21
|
-
|
22
|
-
class ContentType(StrEnum):
|
23
|
-
# text
|
24
|
-
TEXT = "text/plain"
|
25
|
-
|
26
|
-
# images
|
27
|
-
JPEG = "image/jpeg"
|
28
|
-
PNG = "image/png"
|
29
|
-
WEBP = "image/webp"
|
30
|
-
|
31
|
-
# audio
|
32
|
-
MP3 = "audio/mp3"
|
33
|
-
WAV = "audio/wav"
|
34
|
-
|
35
|
-
# files
|
36
|
-
PDF = "application/pdf"
|
37
|
-
|
38
|
-
|
39
|
-
class LLMTextMessagePart(TypedDict):
|
40
|
-
type: Literal["text"]
|
41
|
-
text: str
|
42
|
-
|
43
|
-
|
44
|
-
class LLMMediaMessagePart(TypedDict):
|
45
|
-
type: Literal["image_url"]
|
46
|
-
image_url: str
|
47
|
-
|
48
|
-
|
49
|
-
class CacheControl(TypedDict):
|
50
|
-
type: Literal["ephemeral"]
|
51
|
-
|
52
|
-
|
53
|
-
class LLMMessage(TypedDict):
|
54
|
-
role: Literal["user", "assistant", "system"]
|
55
|
-
content: list[LLMTextMessagePart | LLMMediaMessagePart]
|
56
|
-
cache_control: NotRequired[CacheControl]
|
57
|
-
|
58
|
-
|
59
|
-
class MessagePart(BaseModel):
|
60
|
-
content: str
|
61
|
-
content_type: ContentType = ContentType.TEXT
|
62
|
-
|
63
|
-
@classmethod
|
64
|
-
def from_media(cls, *, content: bytes, content_type: ContentType) -> "MessagePart":
|
65
|
-
return cls(content=base64.b64encode(content).decode(), content_type=content_type)
|
66
|
-
|
67
|
-
def render(self) -> LLMTextMessagePart | LLMMediaMessagePart:
|
68
|
-
if self.content_type == ContentType.TEXT:
|
69
|
-
return {"type": "text", "text": self.content}
|
70
|
-
else:
|
71
|
-
return {"type": "image_url", "image_url": f"data:{self.content_type};base64,{self.content}"}
|
72
|
-
|
73
|
-
|
74
|
-
class UserMessage(BaseModel):
|
75
|
-
parts: list[MessagePart]
|
76
|
-
|
77
|
-
def render(self) -> LLMMessage:
|
78
|
-
content: LLMMessage = {
|
79
|
-
"role": "user",
|
80
|
-
"content": [part.render() for part in self.parts],
|
81
|
-
}
|
82
|
-
if any(part.content_type != ContentType.TEXT for part in self.parts):
|
83
|
-
content["cache_control"] = {"type": "ephemeral"}
|
84
|
-
return content
|
85
|
-
|
86
|
-
|
87
|
-
class AssistantMessage(BaseModel):
|
88
|
-
text: str
|
89
|
-
|
90
|
-
def render(self) -> LLMMessage:
|
91
|
-
return {"role": "assistant", "content": [{"type": "text", "text": self.text}]}
|
92
|
-
|
93
|
-
|
94
|
-
class SystemMessage(BaseModel):
|
95
|
-
parts: list[MessagePart]
|
96
|
-
|
97
|
-
def render(self) -> LLMMessage:
|
98
|
-
return {
|
99
|
-
"role": "system",
|
100
|
-
"content": [part.render() for part in self.parts],
|
101
|
-
}
|
goose/agent.py
DELETED
@@ -1,26 +0,0 @@
|
|
1
|
-
from ._internal.agent import AgentResponse, IAgentLogger
|
2
|
-
from ._internal.types.agent import (
|
3
|
-
AIModel,
|
4
|
-
AssistantMessage,
|
5
|
-
ContentType,
|
6
|
-
LLMMediaMessagePart,
|
7
|
-
LLMMessage,
|
8
|
-
LLMTextMessagePart,
|
9
|
-
MessagePart,
|
10
|
-
SystemMessage,
|
11
|
-
UserMessage,
|
12
|
-
)
|
13
|
-
|
14
|
-
__all__ = [
|
15
|
-
"AgentResponse",
|
16
|
-
"AIModel",
|
17
|
-
"IAgentLogger",
|
18
|
-
"AssistantMessage",
|
19
|
-
"LLMMediaMessagePart",
|
20
|
-
"LLMMessage",
|
21
|
-
"LLMTextMessagePart",
|
22
|
-
"SystemMessage",
|
23
|
-
"MessagePart",
|
24
|
-
"ContentType",
|
25
|
-
"UserMessage",
|
26
|
-
]
|
goose_py-0.10.2.dist-info/RECORD
DELETED
@@ -1,20 +0,0 @@
|
|
1
|
-
goose/__init__.py,sha256=wjGDgWzKcD6S8loVr0n-rLCpRwg-ZKAixcUaw1wobMc,243
|
2
|
-
goose/agent.py,sha256=u6daAnn4fPgP4Jk9cHANyCEku3RmUqKLdqtyGSr8ljI,510
|
3
|
-
goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
|
4
|
-
goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
|
5
|
-
goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
|
7
|
-
goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
|
8
|
-
goose/_internal/agent.py,sha256=yNkX0lZueKiGxYsSwbZRNn88HfDBYbVpJLDfyr6Var0,8893
|
9
|
-
goose/_internal/conversation.py,sha256=I0Ru5D7piOOQlmFhUpwEeQUQxOVq59yyHEDrmYhwPMI,1695
|
10
|
-
goose/_internal/flow.py,sha256=05U2f5i8ofQWDjghhomwuuEPMk-ftzXn7BVl_s7pIf8,4203
|
11
|
-
goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
|
12
|
-
goose/_internal/state.py,sha256=U4gM0K4MAlRFTpqenCYHX9TYGuhWVKIfa4yBeZ9Qc9s,7090
|
13
|
-
goose/_internal/store.py,sha256=GMW0wBpxESmRBLfL_lFKEi9x2P6Wd6-gZ7AWjWBTUmA,904
|
14
|
-
goose/_internal/task.py,sha256=MXBVLepFSphkSbGgzh7U7QMRXDvz_4MNet_EsrlAKTQ,6244
|
15
|
-
goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
|
-
goose/_internal/types/agent.py,sha256=g0KD-aPWZlUGBx72AwQd3LeniFxHATeflZ7191QjFZA,2696
|
17
|
-
goose/_internal/types/telemetry.py,sha256=7zeqyDDxf95puirNM6Gr9VFuxoDshXcV1__V0tiMswE,3663
|
18
|
-
goose_py-0.10.2.dist-info/METADATA,sha256=VoLeEDnKj2PfjzVyJCt8eSQQygbFyWFihpADvJ4DTLw,442
|
19
|
-
goose_py-0.10.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
20
|
-
goose_py-0.10.2.dist-info/RECORD,,
|
File without changes
|