goose-py 0.10.1__py3-none-any.whl → 0.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
goose/_internal/agent.py CHANGED
@@ -5,12 +5,11 @@ from typing import Any, Literal, Protocol, overload
5
5
  from litellm import acompletion
6
6
  from pydantic import ValidationError
7
7
 
8
+ from goose._internal.result import FindReplaceResponse, Result, TextResult
9
+ from goose._internal.types.agent import AIModel, AssistantMessage, SystemMessage, UserMessage
8
10
  from goose._internal.types.telemetry import AgentResponse
9
11
  from goose.errors import Honk
10
12
 
11
- from .result import FindReplaceResponse, Result, TextResult
12
- from .types.agent import AIModel, LLMMessage
13
-
14
13
 
15
14
  class IAgentLogger(Protocol):
16
15
  async def __call__(self, *, response: AgentResponse[Any]) -> None: ...
@@ -31,23 +30,27 @@ class Agent:
31
30
  async def generate[R: Result](
32
31
  self,
33
32
  *,
34
- messages: list[LLMMessage],
33
+ messages: list[UserMessage | AssistantMessage],
35
34
  model: AIModel,
36
35
  task_name: str,
37
36
  response_model: type[R] = TextResult,
38
- system: LLMMessage | None = None,
37
+ system: SystemMessage | None = None,
39
38
  ) -> R:
40
- start_time = datetime.now()
41
- if system is not None:
42
- messages.insert(0, system)
39
+ rendered_messages = [message.render() for message in messages]
40
+ rendered_system = system.render() if system is not None else None
41
+
42
+ completion_messages = (
43
+ [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
44
+ )
43
45
 
46
+ start_time = datetime.now()
44
47
  if response_model is TextResult:
45
- response = await acompletion(model=model.value, messages=messages)
48
+ response = await acompletion(model=model.value, messages=completion_messages)
46
49
  parsed_response = response_model.model_validate({"text": response.choices[0].message.content})
47
50
  else:
48
51
  response = await acompletion(
49
52
  model=model.value,
50
- messages=messages,
53
+ messages=completion_messages,
51
54
  response_format=response_model,
52
55
  )
53
56
  parsed_response = response_model.model_validate_json(response.choices[0].message.content)
@@ -59,8 +62,8 @@ class Agent:
59
62
  flow_name=self.flow_name,
60
63
  task_name=task_name,
61
64
  model=model,
62
- system=system,
63
- input_messages=messages,
65
+ system=rendered_system,
66
+ input_messages=rendered_messages,
64
67
  input_tokens=response.usage.prompt_tokens,
65
68
  output_tokens=response.usage.completion_tokens,
66
69
  start_time=start_time,
@@ -75,13 +78,22 @@ class Agent:
75
78
  return parsed_response
76
79
 
77
80
  async def ask(
78
- self, *, messages: list[LLMMessage], model: AIModel, task_name: str, system: LLMMessage | None = None
81
+ self,
82
+ *,
83
+ messages: list[UserMessage | AssistantMessage],
84
+ model: AIModel,
85
+ task_name: str,
86
+ system: SystemMessage | None = None,
79
87
  ) -> str:
80
- start_time = datetime.now()
88
+ rendered_messages = [message.render() for message in messages]
89
+ rendered_system = system.render() if system is not None else None
90
+
91
+ completion_messages = (
92
+ [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
93
+ )
81
94
 
82
- if system is not None:
83
- messages.insert(0, system)
84
- response = await acompletion(model=model.value, messages=messages)
95
+ start_time = datetime.now()
96
+ response = await acompletion(model=model.value, messages=completion_messages)
85
97
 
86
98
  end_time = datetime.now()
87
99
  agent_response = AgentResponse(
@@ -90,8 +102,8 @@ class Agent:
90
102
  flow_name=self.flow_name,
91
103
  task_name=task_name,
92
104
  model=model,
93
- system=system,
94
- input_messages=messages,
105
+ system=rendered_system,
106
+ input_messages=rendered_messages,
95
107
  input_tokens=response.usage.prompt_tokens,
96
108
  output_tokens=response.usage.completion_tokens,
97
109
  start_time=start_time,
@@ -108,19 +120,23 @@ class Agent:
108
120
  async def refine[R: Result](
109
121
  self,
110
122
  *,
111
- messages: list[LLMMessage],
123
+ messages: list[UserMessage | AssistantMessage],
112
124
  model: AIModel,
113
125
  task_name: str,
114
126
  response_model: type[R],
115
- system: LLMMessage | None = None,
127
+ system: SystemMessage | None = None,
116
128
  ) -> R:
117
129
  start_time = datetime.now()
118
130
 
119
- if system is not None:
120
- messages.insert(0, system)
131
+ rendered_messages = [message.render() for message in messages]
132
+ rendered_system = system.render() if system is not None else None
133
+
134
+ completion_messages = (
135
+ [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
136
+ )
121
137
 
122
138
  find_replace_response = await acompletion(
123
- model=model.value, messages=messages, response_format=FindReplaceResponse
139
+ model=model.value, messages=completion_messages, response_format=FindReplaceResponse
124
140
  )
125
141
  parsed_find_replace_response = FindReplaceResponse.model_validate_json(
126
142
  find_replace_response.choices[0].message.content
@@ -133,8 +149,8 @@ class Agent:
133
149
  flow_name=self.flow_name,
134
150
  task_name=task_name,
135
151
  model=model,
136
- system=system,
137
- input_messages=messages,
152
+ system=rendered_system,
153
+ input_messages=rendered_messages,
138
154
  input_tokens=find_replace_response.usage.prompt_tokens,
139
155
  output_tokens=find_replace_response.usage.completion_tokens,
140
156
  start_time=start_time,
@@ -158,58 +174,58 @@ class Agent:
158
174
  async def __call__[R: Result](
159
175
  self,
160
176
  *,
161
- messages: list[LLMMessage],
177
+ messages: list[UserMessage | AssistantMessage],
162
178
  model: AIModel,
163
179
  task_name: str,
164
180
  mode: Literal["generate"],
165
181
  response_model: type[R],
166
- system: LLMMessage | None = None,
182
+ system: SystemMessage | None = None,
167
183
  ) -> R: ...
168
184
 
169
185
  @overload
170
186
  async def __call__[R: Result](
171
187
  self,
172
188
  *,
173
- messages: list[LLMMessage],
189
+ messages: list[UserMessage | AssistantMessage],
174
190
  model: AIModel,
175
191
  task_name: str,
176
192
  mode: Literal["ask"],
177
193
  response_model: type[R] = TextResult,
178
- system: LLMMessage | None = None,
194
+ system: SystemMessage | None = None,
179
195
  ) -> str: ...
180
196
 
181
197
  @overload
182
198
  async def __call__[R: Result](
183
199
  self,
184
200
  *,
185
- messages: list[LLMMessage],
201
+ messages: list[UserMessage | AssistantMessage],
186
202
  model: AIModel,
187
203
  task_name: str,
188
204
  response_model: type[R],
189
205
  mode: Literal["refine"],
190
- system: LLMMessage | None = None,
206
+ system: SystemMessage | None = None,
191
207
  ) -> R: ...
192
208
 
193
209
  @overload
194
210
  async def __call__[R: Result](
195
211
  self,
196
212
  *,
197
- messages: list[LLMMessage],
213
+ messages: list[UserMessage | AssistantMessage],
198
214
  model: AIModel,
199
215
  task_name: str,
200
216
  response_model: type[R],
201
- system: LLMMessage | None = None,
217
+ system: SystemMessage | None = None,
202
218
  ) -> R: ...
203
219
 
204
220
  async def __call__[R: Result](
205
221
  self,
206
222
  *,
207
- messages: list[LLMMessage],
223
+ messages: list[UserMessage | AssistantMessage],
208
224
  model: AIModel,
209
225
  task_name: str,
210
226
  response_model: type[R] = TextResult,
211
227
  mode: Literal["generate", "ask", "refine"] = "generate",
212
- system: LLMMessage | None = None,
228
+ system: SystemMessage | None = None,
213
229
  ) -> R | str:
214
230
  match mode:
215
231
  case "generate":
@@ -232,13 +248,13 @@ class Agent:
232
248
 
233
249
  return response_model.model_validate_json(dumped_result)
234
250
 
235
- def __find_last_result[R: Result](self, *, messages: list[LLMMessage], response_model: type[R]) -> R:
251
+ def __find_last_result[R: Result](
252
+ self, *, messages: list[UserMessage | AssistantMessage], response_model: type[R]
253
+ ) -> R:
236
254
  for message in reversed(messages):
237
- if message["role"] == "assistant":
255
+ if isinstance(message, AssistantMessage):
238
256
  try:
239
- only_part = message["content"][0]
240
- if only_part["type"] == "text":
241
- return response_model.model_validate_json(only_part["text"])
257
+ return response_model.model_validate_json(message.text)
242
258
  except ValidationError:
243
259
  continue
244
260
 
@@ -2,11 +2,10 @@ from typing import Self
2
2
 
3
3
  from pydantic import BaseModel
4
4
 
5
+ from goose._internal.result import Result
6
+ from goose._internal.types.agent import AssistantMessage, SystemMessage, UserMessage
5
7
  from goose.errors import Honk
6
8
 
7
- from .result import Result
8
- from .types.agent import AssistantMessage, LLMMessage, SystemMessage, UserMessage
9
-
10
9
 
11
10
  class Conversation[R: Result](BaseModel):
12
11
  user_messages: list[UserMessage]
@@ -17,26 +16,23 @@ class Conversation[R: Result](BaseModel):
17
16
  def awaiting_response(self) -> bool:
18
17
  return len(self.user_messages) == len(self.assistant_messages)
19
18
 
20
- def render(self) -> list[LLMMessage]:
21
- messages: list[LLMMessage] = []
22
- if self.context is not None:
23
- messages.append(self.context.render())
24
-
19
+ def get_messages(self) -> list[UserMessage | AssistantMessage]:
20
+ messages: list[UserMessage | AssistantMessage] = []
25
21
  for message_index in range(len(self.user_messages)):
26
22
  message = self.assistant_messages[message_index]
27
23
  if isinstance(message, str):
28
- messages.append(AssistantMessage(text=message).render())
24
+ messages.append(AssistantMessage(text=message))
29
25
  else:
30
- messages.append(AssistantMessage(text=message.model_dump_json()).render())
26
+ messages.append(AssistantMessage(text=message.model_dump_json()))
31
27
 
32
- messages.append(self.user_messages[message_index].render())
28
+ messages.append(self.user_messages[message_index])
33
29
 
34
30
  if len(self.assistant_messages) > len(self.user_messages):
35
31
  message = self.assistant_messages[-1]
36
32
  if isinstance(message, str):
37
- messages.append(AssistantMessage(text=message).render())
33
+ messages.append(AssistantMessage(text=message))
38
34
  else:
39
- messages.append(AssistantMessage(text=message.model_dump_json()).render())
35
+ messages.append(AssistantMessage(text=message.model_dump_json()))
40
36
 
41
37
  return messages
42
38
 
goose/_internal/flow.py CHANGED
@@ -3,12 +3,12 @@ from contextlib import asynccontextmanager
3
3
  from types import CodeType
4
4
  from typing import Protocol, overload
5
5
 
6
- from ..errors import Honk
7
- from .agent import Agent, IAgentLogger
8
- from .conversation import Conversation
9
- from .result import Result
10
- from .state import FlowArguments, FlowRun, get_current_flow_run, set_current_flow_run
11
- from .store import IFlowRunStore, InMemoryFlowRunStore
6
+ from goose._internal.agent import Agent, IAgentLogger
7
+ from goose._internal.conversation import Conversation
8
+ from goose._internal.result import Result
9
+ from goose._internal.state import FlowArguments, FlowRun, get_current_flow_run, set_current_flow_run
10
+ from goose._internal.store import IFlowRunStore, InMemoryFlowRunStore
11
+ from goose.errors import Honk
12
12
 
13
13
 
14
14
  class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
goose/_internal/store.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Protocol
4
4
 
5
- from .state import SerializedFlowRun
5
+ from goose._internal.state import SerializedFlowRun
6
6
 
7
7
 
8
8
  class IFlowRunStore(Protocol):
goose/_internal/task.py CHANGED
@@ -4,11 +4,11 @@ from typing import Any, overload
4
4
 
5
5
  from pydantic import BaseModel
6
6
 
7
- from ..errors import Honk
8
- from .agent import Agent, AIModel
9
- from .result import Result
10
- from .state import FlowRun, NodeState, get_current_flow_run
11
- from .types.agent import SystemMessage, UserMessage
7
+ from goose._internal.agent import Agent, AIModel
8
+ from goose._internal.result import Result
9
+ from goose._internal.state import FlowRun, NodeState, get_current_flow_run
10
+ from goose._internal.types.agent import SystemMessage, UserMessage
11
+ from goose.errors import Honk
12
12
 
13
13
 
14
14
  class Task[**P, R: Result]:
@@ -53,10 +53,10 @@ class Task[**P, R: Result]:
53
53
 
54
54
  node_state.add_user_message(message=user_message)
55
55
  answer = await flow_run.agent(
56
- messages=node_state.conversation.render(),
56
+ messages=node_state.conversation.get_messages(),
57
57
  model=self._refinement_model,
58
58
  task_name=f"ask--{self.name}",
59
- system=context.render() if context is not None else None,
59
+ system=context,
60
60
  mode="ask",
61
61
  )
62
62
  node_state.add_answer(answer=answer)
@@ -82,10 +82,10 @@ class Task[**P, R: Result]:
82
82
  node_state.add_user_message(message=user_message)
83
83
 
84
84
  result = await flow_run.agent(
85
- messages=node_state.conversation.render(),
85
+ messages=node_state.conversation.get_messages(),
86
86
  model=self._refinement_model,
87
87
  task_name=f"refine--{self.name}",
88
- system=context.render() if context is not None else None,
88
+ system=context,
89
89
  response_model=self.result_type,
90
90
  mode="refine",
91
91
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.10.1
3
+ Version: 0.10.2
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
@@ -5,16 +5,16 @@ goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
5
5
  goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
7
7
  goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
8
- goose/_internal/agent.py,sha256=v_nzQLM_vyKWOc-g1Ke8TwlThkThjraE4HFHNywHMv0,7915
9
- goose/_internal/conversation.py,sha256=zvKqLxJSCIIuhD7gjcSFhleYsLabu-ALl9woWFy3mQU,1766
10
- goose/_internal/flow.py,sha256=RShMsxgt49g1fZJ3rlwDHtI1j39lZzewx8hZ7DGN5kg,4124
8
+ goose/_internal/agent.py,sha256=yNkX0lZueKiGxYsSwbZRNn88HfDBYbVpJLDfyr6Var0,8893
9
+ goose/_internal/conversation.py,sha256=I0Ru5D7piOOQlmFhUpwEeQUQxOVq59yyHEDrmYhwPMI,1695
10
+ goose/_internal/flow.py,sha256=05U2f5i8ofQWDjghhomwuuEPMk-ftzXn7BVl_s7pIf8,4203
11
11
  goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
12
12
  goose/_internal/state.py,sha256=U4gM0K4MAlRFTpqenCYHX9TYGuhWVKIfa4yBeZ9Qc9s,7090
13
- goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
14
- goose/_internal/task.py,sha256=mhcmKDTBl993P3HP3PlNvQtl4gMYy4FMYeQ9xrg5aPk,6252
13
+ goose/_internal/store.py,sha256=GMW0wBpxESmRBLfL_lFKEi9x2P6Wd6-gZ7AWjWBTUmA,904
14
+ goose/_internal/task.py,sha256=MXBVLepFSphkSbGgzh7U7QMRXDvz_4MNet_EsrlAKTQ,6244
15
15
  goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  goose/_internal/types/agent.py,sha256=g0KD-aPWZlUGBx72AwQd3LeniFxHATeflZ7191QjFZA,2696
17
17
  goose/_internal/types/telemetry.py,sha256=7zeqyDDxf95puirNM6Gr9VFuxoDshXcV1__V0tiMswE,3663
18
- goose_py-0.10.1.dist-info/METADATA,sha256=L2yzL8ZW09_75wmrK5YSeEZ2H0RkrODL0zWm1nWW-uA,442
19
- goose_py-0.10.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- goose_py-0.10.1.dist-info/RECORD,,
18
+ goose_py-0.10.2.dist-info/METADATA,sha256=VoLeEDnKj2PfjzVyJCt8eSQQygbFyWFihpADvJ4DTLw,442
19
+ goose_py-0.10.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
+ goose_py-0.10.2.dist-info/RECORD,,