goose-py 0.10.2__py3-none-any.whl → 0.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
goose/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
- from ._internal.agent import Agent
2
- from ._internal.flow import FlowArguments, flow
3
- from ._internal.result import Result, TextResult
4
- from ._internal.task import task
1
+ from goose._internal.agent import Agent
2
+ from goose._internal.flow import FlowArguments, flow
3
+ from goose._internal.result import Result, TextResult
4
+ from goose._internal.task import task
5
+ from goose._internal.types.telemetry import AgentResponse
5
6
 
6
- __all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task"]
7
+ __all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task", "AgentResponse"]
goose/_internal/agent.py CHANGED
@@ -2,11 +2,17 @@ import logging
2
2
  from datetime import datetime
3
3
  from typing import Any, Literal, Protocol, overload
4
4
 
5
- from litellm import acompletion
5
+ from aikernel import (
6
+ LLMAssistantMessage,
7
+ LLMModel,
8
+ LLMSystemMessage,
9
+ LLMUserMessage,
10
+ llm_structured,
11
+ llm_unstructured,
12
+ )
6
13
  from pydantic import ValidationError
7
14
 
8
15
  from goose._internal.result import FindReplaceResponse, Result, TextResult
9
- from goose._internal.types.agent import AIModel, AssistantMessage, SystemMessage, UserMessage
10
16
  from goose._internal.types.telemetry import AgentResponse
11
17
  from goose.errors import Honk
12
18
 
@@ -30,42 +36,39 @@ class Agent:
30
36
  async def generate[R: Result](
31
37
  self,
32
38
  *,
33
- messages: list[UserMessage | AssistantMessage],
34
- model: AIModel,
39
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
40
+ model: LLMModel,
35
41
  task_name: str,
36
42
  response_model: type[R] = TextResult,
37
- system: SystemMessage | None = None,
38
43
  ) -> R:
39
- rendered_messages = [message.render() for message in messages]
40
- rendered_system = system.render() if system is not None else None
41
-
42
- completion_messages = (
43
- [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
44
- )
45
-
46
44
  start_time = datetime.now()
45
+
47
46
  if response_model is TextResult:
48
- response = await acompletion(model=model.value, messages=completion_messages)
49
- parsed_response = response_model.model_validate({"text": response.choices[0].message.content})
47
+ response = await llm_unstructured(model=model, messages=messages)
48
+ parsed_response = response_model.model_validate({"text": response.text})
50
49
  else:
51
- response = await acompletion(
52
- model=model.value,
53
- messages=completion_messages,
54
- response_format=response_model,
55
- )
56
- parsed_response = response_model.model_validate_json(response.choices[0].message.content)
50
+ response = await llm_structured(model=model, messages=messages, response_model=response_model)
51
+ parsed_response = response.structured_response
57
52
 
58
53
  end_time = datetime.now()
54
+
55
+ if isinstance(messages[0], LLMSystemMessage):
56
+ system = messages[0].render()
57
+ input_messages = [message.render() for message in messages[1:]]
58
+ else:
59
+ system = None
60
+ input_messages = [message.render() for message in messages]
61
+
59
62
  agent_response = AgentResponse(
60
63
  response=parsed_response,
61
64
  run_id=self.run_id,
62
65
  flow_name=self.flow_name,
63
66
  task_name=task_name,
64
67
  model=model,
65
- system=rendered_system,
66
- input_messages=rendered_messages,
67
- input_tokens=response.usage.prompt_tokens,
68
- output_tokens=response.usage.completion_tokens,
68
+ system=system,
69
+ input_messages=input_messages,
70
+ input_tokens=response.usage.input_tokens,
71
+ output_tokens=response.usage.output_tokens,
69
72
  start_time=start_time,
70
73
  end_time=end_time,
71
74
  )
@@ -80,32 +83,31 @@ class Agent:
80
83
  async def ask(
81
84
  self,
82
85
  *,
83
- messages: list[UserMessage | AssistantMessage],
84
- model: AIModel,
86
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
87
+ model: LLMModel,
85
88
  task_name: str,
86
- system: SystemMessage | None = None,
87
89
  ) -> str:
88
- rendered_messages = [message.render() for message in messages]
89
- rendered_system = system.render() if system is not None else None
90
-
91
- completion_messages = (
92
- [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
93
- )
94
-
95
90
  start_time = datetime.now()
96
- response = await acompletion(model=model.value, messages=completion_messages)
97
-
91
+ response = await llm_unstructured(model=model, messages=messages)
98
92
  end_time = datetime.now()
93
+
94
+ if isinstance(messages[0], LLMSystemMessage):
95
+ system = messages[0].render()
96
+ input_messages = [message.render() for message in messages[1:]]
97
+ else:
98
+ system = None
99
+ input_messages = [message.render() for message in messages]
100
+
99
101
  agent_response = AgentResponse(
100
- response=response.choices[0].message.content,
102
+ response=response.text,
101
103
  run_id=self.run_id,
102
104
  flow_name=self.flow_name,
103
105
  task_name=task_name,
104
106
  model=model,
105
- system=rendered_system,
106
- input_messages=rendered_messages,
107
- input_tokens=response.usage.prompt_tokens,
108
- output_tokens=response.usage.completion_tokens,
107
+ system=system,
108
+ input_messages=input_messages,
109
+ input_tokens=response.usage.input_tokens,
110
+ output_tokens=response.usage.output_tokens,
109
111
  start_time=start_time,
110
112
  end_time=end_time,
111
113
  )
@@ -115,44 +117,38 @@ class Agent:
115
117
  else:
116
118
  logging.info(agent_response.model_dump())
117
119
 
118
- return response.choices[0].message.content
120
+ return response.text
119
121
 
120
122
  async def refine[R: Result](
121
123
  self,
122
124
  *,
123
- messages: list[UserMessage | AssistantMessage],
124
- model: AIModel,
125
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
126
+ model: LLMModel,
125
127
  task_name: str,
126
128
  response_model: type[R],
127
- system: SystemMessage | None = None,
128
129
  ) -> R:
129
130
  start_time = datetime.now()
131
+ find_replace_response = await llm_structured(model=model, messages=messages, response_model=FindReplaceResponse)
132
+ parsed_find_replace_response = find_replace_response.structured_response
133
+ end_time = datetime.now()
130
134
 
131
- rendered_messages = [message.render() for message in messages]
132
- rendered_system = system.render() if system is not None else None
133
-
134
- completion_messages = (
135
- [rendered_system] + rendered_messages if rendered_system is not None else rendered_messages
136
- )
137
-
138
- find_replace_response = await acompletion(
139
- model=model.value, messages=completion_messages, response_format=FindReplaceResponse
140
- )
141
- parsed_find_replace_response = FindReplaceResponse.model_validate_json(
142
- find_replace_response.choices[0].message.content
143
- )
135
+ if isinstance(messages[0], LLMSystemMessage):
136
+ system = messages[0].render()
137
+ input_messages = [message.render() for message in messages[1:]]
138
+ else:
139
+ system = None
140
+ input_messages = [message.render() for message in messages]
144
141
 
145
- end_time = datetime.now()
146
142
  agent_response = AgentResponse(
147
143
  response=parsed_find_replace_response,
148
144
  run_id=self.run_id,
149
145
  flow_name=self.flow_name,
150
146
  task_name=task_name,
151
147
  model=model,
152
- system=rendered_system,
153
- input_messages=rendered_messages,
154
- input_tokens=find_replace_response.usage.prompt_tokens,
155
- output_tokens=find_replace_response.usage.completion_tokens,
148
+ system=system,
149
+ input_messages=input_messages,
150
+ input_tokens=find_replace_response.usage.input_tokens,
151
+ output_tokens=find_replace_response.usage.output_tokens,
156
152
  start_time=start_time,
157
153
  end_time=end_time,
158
154
  )
@@ -174,69 +170,64 @@ class Agent:
174
170
  async def __call__[R: Result](
175
171
  self,
176
172
  *,
177
- messages: list[UserMessage | AssistantMessage],
178
- model: AIModel,
173
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
174
+ model: LLMModel,
179
175
  task_name: str,
180
176
  mode: Literal["generate"],
181
177
  response_model: type[R],
182
- system: SystemMessage | None = None,
183
178
  ) -> R: ...
184
179
 
185
180
  @overload
186
181
  async def __call__[R: Result](
187
182
  self,
188
183
  *,
189
- messages: list[UserMessage | AssistantMessage],
190
- model: AIModel,
184
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
185
+ model: LLMModel,
191
186
  task_name: str,
192
187
  mode: Literal["ask"],
193
188
  response_model: type[R] = TextResult,
194
- system: SystemMessage | None = None,
195
189
  ) -> str: ...
196
190
 
197
191
  @overload
198
192
  async def __call__[R: Result](
199
193
  self,
200
194
  *,
201
- messages: list[UserMessage | AssistantMessage],
202
- model: AIModel,
195
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
196
+ model: LLMModel,
203
197
  task_name: str,
204
198
  response_model: type[R],
205
199
  mode: Literal["refine"],
206
- system: SystemMessage | None = None,
207
200
  ) -> R: ...
208
201
 
209
202
  @overload
210
203
  async def __call__[R: Result](
211
204
  self,
212
205
  *,
213
- messages: list[UserMessage | AssistantMessage],
214
- model: AIModel,
206
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
207
+ model: LLMModel,
215
208
  task_name: str,
216
209
  response_model: type[R],
217
- system: SystemMessage | None = None,
218
210
  ) -> R: ...
219
211
 
220
212
  async def __call__[R: Result](
221
213
  self,
222
214
  *,
223
- messages: list[UserMessage | AssistantMessage],
224
- model: AIModel,
215
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
216
+ model: LLMModel,
225
217
  task_name: str,
226
218
  response_model: type[R] = TextResult,
227
219
  mode: Literal["generate", "ask", "refine"] = "generate",
228
- system: SystemMessage | None = None,
229
220
  ) -> R | str:
230
221
  match mode:
231
222
  case "generate":
232
223
  return await self.generate(
233
- messages=messages, model=model, task_name=task_name, response_model=response_model, system=system
224
+ messages=messages, model=model, task_name=task_name, response_model=response_model
234
225
  )
235
226
  case "ask":
236
- return await self.ask(messages=messages, model=model, task_name=task_name, system=system)
227
+ return await self.ask(messages=messages, model=model, task_name=task_name)
237
228
  case "refine":
238
229
  return await self.refine(
239
- messages=messages, model=model, task_name=task_name, response_model=response_model, system=system
230
+ messages=messages, model=model, task_name=task_name, response_model=response_model
240
231
  )
241
232
 
242
233
  def __apply_find_replace[R: Result](
@@ -249,12 +240,12 @@ class Agent:
249
240
  return response_model.model_validate_json(dumped_result)
250
241
 
251
242
  def __find_last_result[R: Result](
252
- self, *, messages: list[UserMessage | AssistantMessage], response_model: type[R]
243
+ self, *, messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage], response_model: type[R]
253
244
  ) -> R:
254
245
  for message in reversed(messages):
255
- if isinstance(message, AssistantMessage):
246
+ if isinstance(message, LLMAssistantMessage):
256
247
  try:
257
- return response_model.model_validate_json(message.text)
248
+ return response_model.model_validate_json(message.parts[0].content)
258
249
  except ValidationError:
259
250
  continue
260
251
 
@@ -1,38 +1,33 @@
1
1
  from typing import Self
2
2
 
3
+ from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
3
4
  from pydantic import BaseModel
4
5
 
5
- from goose._internal.result import Result
6
- from goose._internal.types.agent import AssistantMessage, SystemMessage, UserMessage
7
6
  from goose.errors import Honk
8
7
 
9
8
 
10
- class Conversation[R: Result](BaseModel):
11
- user_messages: list[UserMessage]
12
- assistant_messages: list[R | str]
13
- context: SystemMessage | None = None
9
+ class Conversation(BaseModel):
10
+ user_messages: list[LLMUserMessage]
11
+ assistant_messages: list[LLMAssistantMessage]
12
+ context: LLMSystemMessage | None = None
14
13
 
15
14
  @property
16
15
  def awaiting_response(self) -> bool:
17
16
  return len(self.user_messages) == len(self.assistant_messages)
18
17
 
19
- def get_messages(self) -> list[UserMessage | AssistantMessage]:
20
- messages: list[UserMessage | AssistantMessage] = []
18
+ def render(self) -> list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage]:
19
+ messages: list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage] = []
20
+ if self.context is not None:
21
+ messages.append(self.context)
22
+
21
23
  for message_index in range(len(self.user_messages)):
22
24
  message = self.assistant_messages[message_index]
23
- if isinstance(message, str):
24
- messages.append(AssistantMessage(text=message))
25
- else:
26
- messages.append(AssistantMessage(text=message.model_dump_json()))
27
-
25
+ messages.append(message)
28
26
  messages.append(self.user_messages[message_index])
29
27
 
30
28
  if len(self.assistant_messages) > len(self.user_messages):
31
29
  message = self.assistant_messages[-1]
32
- if isinstance(message, str):
33
- messages.append(AssistantMessage(text=message))
34
- else:
35
- messages.append(AssistantMessage(text=message.model_dump_json()))
30
+ messages.append(message)
36
31
 
37
32
  return messages
38
33
 
goose/_internal/flow.py CHANGED
@@ -3,12 +3,12 @@ from contextlib import asynccontextmanager
3
3
  from types import CodeType
4
4
  from typing import Protocol, overload
5
5
 
6
- from goose._internal.agent import Agent, IAgentLogger
7
- from goose._internal.conversation import Conversation
8
- from goose._internal.result import Result
9
- from goose._internal.state import FlowArguments, FlowRun, get_current_flow_run, set_current_flow_run
10
- from goose._internal.store import IFlowRunStore, InMemoryFlowRunStore
11
- from goose.errors import Honk
6
+ from ..errors import Honk
7
+ from .agent import Agent, IAgentLogger
8
+ from .conversation import Conversation
9
+ from .result import Result
10
+ from .state import FlowArguments, FlowRun, get_current_flow_run, set_current_flow_run
11
+ from .store import IFlowRunStore, InMemoryFlowRunStore
12
12
 
13
13
 
14
14
  class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
@@ -20,7 +20,7 @@ class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
20
20
  class IAdapter[ResultT: Result](Protocol):
21
21
  __code__: CodeType
22
22
 
23
- async def __call__(self, *, conversation: Conversation[ResultT], agent: Agent) -> ResultT: ...
23
+ async def __call__(self, *, conversation: Conversation, agent: Agent) -> ResultT: ...
24
24
 
25
25
 
26
26
  class Flow[FlowArgumentsT: FlowArguments]:
goose/_internal/state.py CHANGED
@@ -2,12 +2,12 @@ import json
2
2
  from contextvars import ContextVar
3
3
  from typing import TYPE_CHECKING, Any, NewType, Self
4
4
 
5
+ from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
5
6
  from pydantic import BaseModel, ConfigDict
6
7
 
7
8
  from goose._internal.agent import Agent, IAgentLogger
8
9
  from goose._internal.conversation import Conversation
9
10
  from goose._internal.result import Result
10
- from goose._internal.types.agent import SystemMessage, UserMessage
11
11
  from goose.errors import Honk
12
12
 
13
13
  if TYPE_CHECKING:
@@ -20,55 +20,55 @@ class FlowArguments(BaseModel):
20
20
  model_config = ConfigDict(frozen=True)
21
21
 
22
22
 
23
- class NodeState[ResultT: Result](BaseModel):
23
+ class NodeState(BaseModel):
24
24
  task_name: str
25
25
  index: int
26
- conversation: Conversation[ResultT]
26
+ conversation: Conversation
27
27
  last_hash: int
28
28
 
29
29
  @property
30
- def result(self) -> ResultT:
30
+ def raw_result(self) -> str:
31
31
  for message in reversed(self.conversation.assistant_messages):
32
- if isinstance(message, Result):
33
- return message
32
+ if self.__message_is_result(message):
33
+ return message.parts[0].content
34
34
 
35
35
  raise Honk("Node awaiting response, has no result")
36
36
 
37
- def set_context(self, *, context: SystemMessage) -> Self:
37
+ def set_context(self, *, context: LLMSystemMessage) -> Self:
38
38
  self.conversation.context = context
39
39
  return self
40
40
 
41
41
  def add_result(
42
42
  self,
43
43
  *,
44
- result: ResultT,
44
+ result: str,
45
45
  new_hash: int | None = None,
46
46
  overwrite: bool = False,
47
47
  ) -> Self:
48
48
  if overwrite and len(self.conversation.assistant_messages) > 0:
49
- self.conversation.assistant_messages[-1] = result
49
+ self.conversation.assistant_messages[-1] = LLMAssistantMessage.from_text(result)
50
50
  else:
51
- self.conversation.assistant_messages.append(result)
51
+ self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(result))
52
52
  if new_hash is not None:
53
53
  self.last_hash = new_hash
54
54
  return self
55
55
 
56
56
  def add_answer(self, *, answer: str) -> Self:
57
- self.conversation.assistant_messages.append(answer)
57
+ self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(answer))
58
58
  return self
59
59
 
60
- def add_user_message(self, *, message: UserMessage) -> Self:
60
+ def add_user_message(self, *, message: LLMUserMessage) -> Self:
61
61
  self.conversation.user_messages.append(message)
62
62
  return self
63
63
 
64
- def edit_last_result(self, *, result: ResultT) -> Self:
64
+ def edit_last_result(self, *, result: str) -> Self:
65
65
  if len(self.conversation.assistant_messages) == 0:
66
66
  raise Honk("Node awaiting response, has no result")
67
67
 
68
68
  for message_index, message in enumerate(reversed(self.conversation.assistant_messages)):
69
- if isinstance(message, Result):
69
+ if self.__message_is_result(message):
70
70
  index = len(self.conversation.assistant_messages) - message_index - 1
71
- self.conversation.assistant_messages[index] = result
71
+ self.conversation.assistant_messages[index] = LLMAssistantMessage.from_text(result)
72
72
  return self
73
73
 
74
74
  raise Honk("Node awaiting response, has no result")
@@ -77,6 +77,13 @@ class NodeState[ResultT: Result](BaseModel):
77
77
  self.conversation.undo()
78
78
  return self
79
79
 
80
+ def __message_is_result(self, message: LLMAssistantMessage, /) -> bool:
81
+ try:
82
+ _ = json.loads(message.parts[0].content)
83
+ return True
84
+ except json.JSONDecodeError:
85
+ return False
86
+
80
87
 
81
88
  class FlowRun[FlowArgumentsT: FlowArguments]:
82
89
  def __init__(self, *, flow_arguments_model: type[FlowArgumentsT]) -> None:
@@ -109,38 +116,47 @@ class FlowRun[FlowArgumentsT: FlowArguments]:
109
116
 
110
117
  return self._flow_arguments
111
118
 
112
- def get_all[R: Result](self, *, task: "Task[Any, R]") -> list[NodeState[R]]:
113
- matching_nodes: list[NodeState[R]] = []
114
- for key, node_state in self._node_states.items():
115
- if key[0] == task.name:
116
- matching_nodes.append(NodeState[task.result_type].model_validate_json(node_state))
117
- return sorted(matching_nodes, key=lambda node: node.index)
118
-
119
- def get[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> NodeState[R]:
119
+ def get_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
120
120
  if (existing_node_state := self._node_states.get((task.name, index))) is not None:
121
- return NodeState[task.result_type].model_validate_json(existing_node_state)
121
+ return NodeState.model_validate_json(existing_node_state)
122
122
  else:
123
- return NodeState[task.result_type](
123
+ return NodeState(
124
124
  task_name=task.name,
125
125
  index=index,
126
- conversation=Conversation[task.result_type](user_messages=[], assistant_messages=[]),
126
+ conversation=Conversation(user_messages=[], assistant_messages=[]),
127
127
  last_hash=0,
128
128
  )
129
129
 
130
- def set_flow_arguments(self, flow_arguments: FlowArgumentsT, /) -> None:
131
- self._flow_arguments = flow_arguments
132
-
133
- def upsert_node_state(self, node_state: NodeState[Any], /) -> None:
134
- key = (node_state.task_name, node_state.index)
135
- self._node_states[key] = node_state.model_dump_json()
136
-
137
- def get_next[R: Result](self, *, task: "Task[Any, R]") -> NodeState[R]:
130
+ def get_next_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
138
131
  if task.name not in self._last_requested_indices:
139
132
  self._last_requested_indices[task.name] = 0
140
133
  else:
141
134
  self._last_requested_indices[task.name] += 1
142
135
 
143
- return self.get(task=task, index=self._last_requested_indices[task.name])
136
+ return self.get_state(task=task, index=self._last_requested_indices[task.name])
137
+
138
+ def get_all_results[R: Result](self, *, task: "Task[Any, R]") -> list[R]:
139
+ matching_nodes: list[NodeState] = []
140
+ for key, node_state in self._node_states.items():
141
+ if key[0] == task.name:
142
+ matching_nodes.append(NodeState.model_validate_json(node_state))
143
+
144
+ sorted_nodes = sorted(matching_nodes, key=lambda node: node.index)
145
+ return [task.result_type.model_validate_json(node.raw_result) for node in sorted_nodes]
146
+
147
+ def get_result[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> R:
148
+ if (existing_node_state := self._node_states.get((task.name, index))) is not None:
149
+ parsed_node_state = NodeState.model_validate_json(existing_node_state)
150
+ return task.result_type.model_validate_json(parsed_node_state.raw_result)
151
+ else:
152
+ raise Honk(f"No result found for task {task.name} at index {index}")
153
+
154
+ def set_flow_arguments(self, flow_arguments: FlowArgumentsT, /) -> None:
155
+ self._flow_arguments = flow_arguments
156
+
157
+ def upsert_node_state(self, node_state: NodeState, /) -> None:
158
+ key = (node_state.task_name, node_state.index)
159
+ self._node_states[key] = node_state.model_dump_json()
144
160
 
145
161
  def start(
146
162
  self,
goose/_internal/store.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Protocol
4
4
 
5
- from goose._internal.state import SerializedFlowRun
5
+ from .state import SerializedFlowRun
6
6
 
7
7
 
8
8
  class IFlowRunStore(Protocol):
goose/_internal/task.py CHANGED
@@ -2,12 +2,12 @@ import hashlib
2
2
  from collections.abc import Awaitable, Callable
3
3
  from typing import Any, overload
4
4
 
5
+ from aikernel import LLMModel, LLMSystemMessage, LLMUserMessage
5
6
  from pydantic import BaseModel
6
7
 
7
- from goose._internal.agent import Agent, AIModel
8
+ from goose._internal.agent import Agent
8
9
  from goose._internal.result import Result
9
10
  from goose._internal.state import FlowRun, NodeState, get_current_flow_run
10
- from goose._internal.types.agent import SystemMessage, UserMessage
11
11
  from goose.errors import Honk
12
12
 
13
13
 
@@ -18,7 +18,7 @@ class Task[**P, R: Result]:
18
18
  /,
19
19
  *,
20
20
  retries: int = 0,
21
- refinement_model: AIModel = AIModel.GEMINI_FLASH,
21
+ refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
22
22
  ) -> None:
23
23
  self._generator = generator
24
24
  self._retries = retries
@@ -35,28 +35,32 @@ class Task[**P, R: Result]:
35
35
  def name(self) -> str:
36
36
  return self._generator.__name__
37
37
 
38
- async def generate(self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs) -> R:
38
+ async def generate(self, state: NodeState, *args: P.args, **kwargs: P.kwargs) -> R:
39
39
  state_hash = self.__hash_task_call(*args, **kwargs)
40
40
  if state_hash != state.last_hash:
41
41
  result = await self._generator(*args, **kwargs)
42
- state.add_result(result=result, new_hash=state_hash, overwrite=True)
42
+ state.add_result(result=result.model_dump_json(), new_hash=state_hash, overwrite=True)
43
43
  return result
44
44
  else:
45
- return state.result
45
+ return self.result_type.model_validate_json(state.raw_result)
46
46
 
47
- async def ask(self, *, user_message: UserMessage, context: SystemMessage | None = None, index: int = 0) -> str:
47
+ async def ask(
48
+ self, *, user_message: LLMUserMessage, context: LLMSystemMessage | None = None, index: int = 0
49
+ ) -> str:
48
50
  flow_run = self.__get_current_flow_run()
49
- node_state = flow_run.get(task=self, index=index)
51
+ node_state = flow_run.get_state(task=self, index=index)
50
52
 
51
53
  if len(node_state.conversation.assistant_messages) == 0:
52
54
  raise Honk("Cannot ask about a task that has not been initially generated")
53
55
 
56
+ if context is not None:
57
+ node_state.set_context(context=context)
54
58
  node_state.add_user_message(message=user_message)
59
+
55
60
  answer = await flow_run.agent(
56
- messages=node_state.conversation.get_messages(),
61
+ messages=node_state.conversation.render(),
57
62
  model=self._refinement_model,
58
63
  task_name=f"ask--{self.name}",
59
- system=context,
60
64
  mode="ask",
61
65
  )
62
66
  node_state.add_answer(answer=answer)
@@ -67,12 +71,12 @@ class Task[**P, R: Result]:
67
71
  async def refine(
68
72
  self,
69
73
  *,
70
- user_message: UserMessage,
71
- context: SystemMessage | None = None,
74
+ user_message: LLMUserMessage,
75
+ context: LLMSystemMessage | None = None,
72
76
  index: int = 0,
73
77
  ) -> R:
74
78
  flow_run = self.__get_current_flow_run()
75
- node_state = flow_run.get(task=self, index=index)
79
+ node_state = flow_run.get_state(task=self, index=index)
76
80
 
77
81
  if len(node_state.conversation.assistant_messages) == 0:
78
82
  raise Honk("Cannot refine a task that has not been initially generated")
@@ -82,33 +86,32 @@ class Task[**P, R: Result]:
82
86
  node_state.add_user_message(message=user_message)
83
87
 
84
88
  result = await flow_run.agent(
85
- messages=node_state.conversation.get_messages(),
89
+ messages=node_state.conversation.render(),
86
90
  model=self._refinement_model,
87
91
  task_name=f"refine--{self.name}",
88
- system=context,
89
92
  response_model=self.result_type,
90
93
  mode="refine",
91
94
  )
92
- node_state.add_result(result=result)
95
+ node_state.add_result(result=result.model_dump_json())
93
96
  flow_run.upsert_node_state(node_state)
94
97
 
95
98
  return result
96
99
 
97
100
  def edit(self, *, result: R, index: int = 0) -> None:
98
101
  flow_run = self.__get_current_flow_run()
99
- node_state = flow_run.get(task=self, index=index)
100
- node_state.edit_last_result(result=result)
102
+ node_state = flow_run.get_state(task=self, index=index)
103
+ node_state.edit_last_result(result=result.model_dump_json())
101
104
  flow_run.upsert_node_state(node_state)
102
105
 
103
106
  def undo(self, *, index: int = 0) -> None:
104
107
  flow_run = self.__get_current_flow_run()
105
- node_state = flow_run.get(task=self, index=index)
108
+ node_state = flow_run.get_state(task=self, index=index)
106
109
  node_state.undo()
107
110
  flow_run.upsert_node_state(node_state)
108
111
 
109
112
  async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
110
113
  flow_run = self.__get_current_flow_run()
111
- node_state = flow_run.get_next(task=self)
114
+ node_state = flow_run.get_next_state(task=self)
112
115
  result = await self.generate(node_state, *args, **kwargs)
113
116
  flow_run.upsert_node_state(node_state)
114
117
  return result
@@ -151,14 +154,14 @@ class Task[**P, R: Result]:
151
154
  def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
152
155
  @overload
153
156
  def task[**P, R: Result](
154
- *, retries: int = 0, refinement_model: AIModel = AIModel.GEMINI_FLASH
157
+ *, retries: int = 0, refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH
155
158
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
156
159
  def task[**P, R: Result](
157
160
  generator: Callable[P, Awaitable[R]] | None = None,
158
161
  /,
159
162
  *,
160
163
  retries: int = 0,
161
- refinement_model: AIModel = AIModel.GEMINI_FLASH,
164
+ refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
162
165
  ) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
163
166
  if generator is None:
164
167
 
@@ -2,10 +2,9 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import ClassVar, TypedDict
4
4
 
5
+ from aikernel import LiteLLMMessage, LLMModel
5
6
  from pydantic import BaseModel, computed_field
6
7
 
7
- from ..types.agent import AIModel, LLMMessage
8
-
9
8
 
10
9
  class AgentResponseDump(TypedDict):
11
10
  run_id: str
@@ -26,30 +25,30 @@ class AgentResponseDump(TypedDict):
26
25
 
27
26
 
28
27
  class AgentResponse[R: BaseModel | str](BaseModel):
29
- INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
30
- AIModel.VERTEX_FLASH_8B: 0.30,
31
- AIModel.VERTEX_FLASH: 0.15,
32
- AIModel.VERTEX_PRO: 5.00,
33
- AIModel.GEMINI_FLASH_8B: 0.30,
34
- AIModel.GEMINI_FLASH: 0.15,
35
- AIModel.GEMINI_PRO: 5.00,
28
+ INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
29
+ LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
30
+ LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
31
+ LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
32
+ LLMModel.GEMINI_2_0_FLASH: 0.30,
33
+ LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
34
+ LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
36
35
  }
37
- OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
38
- AIModel.VERTEX_FLASH_8B: 0.30,
39
- AIModel.VERTEX_FLASH: 0.15,
40
- AIModel.VERTEX_PRO: 5.00,
41
- AIModel.GEMINI_FLASH_8B: 0.30,
42
- AIModel.GEMINI_FLASH: 0.15,
43
- AIModel.GEMINI_PRO: 5.00,
36
+ OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
37
+ LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
38
+ LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
39
+ LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
40
+ LLMModel.GEMINI_2_0_FLASH: 0.30,
41
+ LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
42
+ LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
44
43
  }
45
44
 
46
45
  response: R
47
46
  run_id: str
48
47
  flow_name: str
49
48
  task_name: str
50
- model: AIModel
51
- system: LLMMessage | None = None
52
- input_messages: list[LLMMessage]
49
+ model: LLMModel
50
+ system: LiteLLMMessage | None = None
51
+ input_messages: list[LiteLLMMessage]
53
52
  input_tokens: int
54
53
  output_tokens: int
55
54
  start_time: datetime
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.10.2
3
+ Version: 0.11.1
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
7
+ Requires-Dist: aikernel>=0.1.8
7
8
  Requires-Dist: jsonpath-ng>=1.7.0
8
- Requires-Dist: litellm>=1.56.5
9
9
  Requires-Dist: pydantic>=2.8.2
10
10
  Description-Content-Type: text/markdown
11
11
 
@@ -0,0 +1,18 @@
1
+ goose/__init__.py,sha256=Muw7HCImZHk3kLCTWhV9Lg-Sfmhnwf_Tae-zCj7woyY,338
2
+ goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
3
+ goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
4
+ goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
6
+ goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
7
+ goose/_internal/agent.py,sha256=rAE7P0DFl-r-omjrxR2EzkJUKQWbaPM0qM3sxTr70ew,8366
8
+ goose/_internal/conversation.py,sha256=vhJwe1pHk2lV60DaB9Tz9KbpzQo7_thRYInPjbIoUTE,1437
9
+ goose/_internal/flow.py,sha256=8MJxlhHYSAzUHZefpF_sRJc37o532OF0X7l3KRopDmc,4115
10
+ goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
11
+ goose/_internal/state.py,sha256=jGKKZKeDcKaCY-uqgXLOnMNwyzR5qEH5m9afOtixsLk,7923
12
+ goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
13
+ goose/_internal/task.py,sha256=X_eRZxZlf6SwyvF1nIyjoneyqD_TISXqESyxluk63mE,6416
14
+ goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ goose/_internal/types/telemetry.py,sha256=wDQz1C60KxlftC0aQCXIjswrSYGI1KNaM2wtnlP1Q7k,3823
16
+ goose_py-0.11.1.dist-info/METADATA,sha256=mAoNCBZ4vauZDCSqhCRS4zVSZMb4sXGS08epirIUiMY,442
17
+ goose_py-0.11.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ goose_py-0.11.1.dist-info/RECORD,,
@@ -1,101 +0,0 @@
1
- import base64
2
- from enum import StrEnum
3
- from typing import Literal, NotRequired, TypedDict
4
-
5
- from pydantic import BaseModel
6
-
7
-
8
- class AIModel(StrEnum):
9
- # vertex (production Google, requires GCP environment)
10
- VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
11
- VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
12
- VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
13
- VERTEX_FLASH_2_0 = "vertex_ai/gemini-2.0-flash"
14
-
15
- # gemini (publicly available, no GCP environment required)
16
- GEMINI_PRO = "gemini/gemini-1.5-pro"
17
- GEMINI_FLASH = "gemini/gemini-1.5-flash"
18
- GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
19
- GEMINI_FLASH_2_0 = "gemini/gemini-2.0-flash"
20
-
21
-
22
- class ContentType(StrEnum):
23
- # text
24
- TEXT = "text/plain"
25
-
26
- # images
27
- JPEG = "image/jpeg"
28
- PNG = "image/png"
29
- WEBP = "image/webp"
30
-
31
- # audio
32
- MP3 = "audio/mp3"
33
- WAV = "audio/wav"
34
-
35
- # files
36
- PDF = "application/pdf"
37
-
38
-
39
- class LLMTextMessagePart(TypedDict):
40
- type: Literal["text"]
41
- text: str
42
-
43
-
44
- class LLMMediaMessagePart(TypedDict):
45
- type: Literal["image_url"]
46
- image_url: str
47
-
48
-
49
- class CacheControl(TypedDict):
50
- type: Literal["ephemeral"]
51
-
52
-
53
- class LLMMessage(TypedDict):
54
- role: Literal["user", "assistant", "system"]
55
- content: list[LLMTextMessagePart | LLMMediaMessagePart]
56
- cache_control: NotRequired[CacheControl]
57
-
58
-
59
- class MessagePart(BaseModel):
60
- content: str
61
- content_type: ContentType = ContentType.TEXT
62
-
63
- @classmethod
64
- def from_media(cls, *, content: bytes, content_type: ContentType) -> "MessagePart":
65
- return cls(content=base64.b64encode(content).decode(), content_type=content_type)
66
-
67
- def render(self) -> LLMTextMessagePart | LLMMediaMessagePart:
68
- if self.content_type == ContentType.TEXT:
69
- return {"type": "text", "text": self.content}
70
- else:
71
- return {"type": "image_url", "image_url": f"data:{self.content_type};base64,{self.content}"}
72
-
73
-
74
- class UserMessage(BaseModel):
75
- parts: list[MessagePart]
76
-
77
- def render(self) -> LLMMessage:
78
- content: LLMMessage = {
79
- "role": "user",
80
- "content": [part.render() for part in self.parts],
81
- }
82
- if any(part.content_type != ContentType.TEXT for part in self.parts):
83
- content["cache_control"] = {"type": "ephemeral"}
84
- return content
85
-
86
-
87
- class AssistantMessage(BaseModel):
88
- text: str
89
-
90
- def render(self) -> LLMMessage:
91
- return {"role": "assistant", "content": [{"type": "text", "text": self.text}]}
92
-
93
-
94
- class SystemMessage(BaseModel):
95
- parts: list[MessagePart]
96
-
97
- def render(self) -> LLMMessage:
98
- return {
99
- "role": "system",
100
- "content": [part.render() for part in self.parts],
101
- }
goose/agent.py DELETED
@@ -1,26 +0,0 @@
1
- from ._internal.agent import AgentResponse, IAgentLogger
2
- from ._internal.types.agent import (
3
- AIModel,
4
- AssistantMessage,
5
- ContentType,
6
- LLMMediaMessagePart,
7
- LLMMessage,
8
- LLMTextMessagePart,
9
- MessagePart,
10
- SystemMessage,
11
- UserMessage,
12
- )
13
-
14
- __all__ = [
15
- "AgentResponse",
16
- "AIModel",
17
- "IAgentLogger",
18
- "AssistantMessage",
19
- "LLMMediaMessagePart",
20
- "LLMMessage",
21
- "LLMTextMessagePart",
22
- "SystemMessage",
23
- "MessagePart",
24
- "ContentType",
25
- "UserMessage",
26
- ]
@@ -1,20 +0,0 @@
1
- goose/__init__.py,sha256=wjGDgWzKcD6S8loVr0n-rLCpRwg-ZKAixcUaw1wobMc,243
2
- goose/agent.py,sha256=u6daAnn4fPgP4Jk9cHANyCEku3RmUqKLdqtyGSr8ljI,510
3
- goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
4
- goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
5
- goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
7
- goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
8
- goose/_internal/agent.py,sha256=yNkX0lZueKiGxYsSwbZRNn88HfDBYbVpJLDfyr6Var0,8893
9
- goose/_internal/conversation.py,sha256=I0Ru5D7piOOQlmFhUpwEeQUQxOVq59yyHEDrmYhwPMI,1695
10
- goose/_internal/flow.py,sha256=05U2f5i8ofQWDjghhomwuuEPMk-ftzXn7BVl_s7pIf8,4203
11
- goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
12
- goose/_internal/state.py,sha256=U4gM0K4MAlRFTpqenCYHX9TYGuhWVKIfa4yBeZ9Qc9s,7090
13
- goose/_internal/store.py,sha256=GMW0wBpxESmRBLfL_lFKEi9x2P6Wd6-gZ7AWjWBTUmA,904
14
- goose/_internal/task.py,sha256=MXBVLepFSphkSbGgzh7U7QMRXDvz_4MNet_EsrlAKTQ,6244
15
- goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- goose/_internal/types/agent.py,sha256=g0KD-aPWZlUGBx72AwQd3LeniFxHATeflZ7191QjFZA,2696
17
- goose/_internal/types/telemetry.py,sha256=7zeqyDDxf95puirNM6Gr9VFuxoDshXcV1__V0tiMswE,3663
18
- goose_py-0.10.2.dist-info/METADATA,sha256=VoLeEDnKj2PfjzVyJCt8eSQQygbFyWFihpADvJ4DTLw,442
19
- goose_py-0.10.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- goose_py-0.10.2.dist-info/RECORD,,