goose-py 0.10.1__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
goose/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
- from ._internal.agent import Agent
2
- from ._internal.flow import FlowArguments, flow
3
- from ._internal.result import Result, TextResult
4
- from ._internal.task import task
1
+ from goose._internal.agent import Agent
2
+ from goose._internal.flow import FlowArguments, flow
3
+ from goose._internal.result import Result, TextResult
4
+ from goose._internal.task import task
5
+ from goose._internal.types.telemetry import AgentResponse
5
6
 
6
- __all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task"]
7
+ __all__ = ["Agent", "flow", "FlowArguments", "Result", "TextResult", "task", "AgentResponse"]
goose/_internal/agent.py CHANGED
@@ -2,15 +2,21 @@ import logging
2
2
  from datetime import datetime
3
3
  from typing import Any, Literal, Protocol, overload
4
4
 
5
- from litellm import acompletion
5
+ from aikernel import (
6
+ LLMAssistantMessage,
7
+ LLMModel,
8
+ LLMSystemMessage,
9
+ LLMUserMessage,
10
+ llm_structured,
11
+ llm_unstructured,
12
+ render_message,
13
+ )
6
14
  from pydantic import ValidationError
7
15
 
16
+ from goose._internal.result import FindReplaceResponse, Result, TextResult
8
17
  from goose._internal.types.telemetry import AgentResponse
9
18
  from goose.errors import Honk
10
19
 
11
- from .result import FindReplaceResponse, Result, TextResult
12
- from .types.agent import AIModel, LLMMessage
13
-
14
20
 
15
21
  class IAgentLogger(Protocol):
16
22
  async def __call__(self, *, response: AgentResponse[Any]) -> None: ...
@@ -31,28 +37,29 @@ class Agent:
31
37
  async def generate[R: Result](
32
38
  self,
33
39
  *,
34
- messages: list[LLMMessage],
35
- model: AIModel,
40
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
41
+ model: LLMModel,
36
42
  task_name: str,
37
43
  response_model: type[R] = TextResult,
38
- system: LLMMessage | None = None,
39
44
  ) -> R:
40
45
  start_time = datetime.now()
41
- if system is not None:
42
- messages.insert(0, system)
43
46
 
44
47
  if response_model is TextResult:
45
- response = await acompletion(model=model.value, messages=messages)
46
- parsed_response = response_model.model_validate({"text": response.choices[0].message.content})
48
+ response = await llm_unstructured(model=model, messages=messages)
49
+ parsed_response = response_model.model_validate({"text": response.text})
47
50
  else:
48
- response = await acompletion(
49
- model=model.value,
50
- messages=messages,
51
- response_format=response_model,
52
- )
53
- parsed_response = response_model.model_validate_json(response.choices[0].message.content)
51
+ response = await llm_structured(model=model, messages=messages, response_model=response_model)
52
+ parsed_response = response.structured_response
54
53
 
55
54
  end_time = datetime.now()
55
+
56
+ if isinstance(messages[0], LLMSystemMessage):
57
+ system = render_message(messages[0])
58
+ input_messages = [render_message(message) for message in messages[1:]]
59
+ else:
60
+ system = None
61
+ input_messages = [render_message(message) for message in messages]
62
+
56
63
  agent_response = AgentResponse(
57
64
  response=parsed_response,
58
65
  run_id=self.run_id,
@@ -60,9 +67,9 @@ class Agent:
60
67
  task_name=task_name,
61
68
  model=model,
62
69
  system=system,
63
- input_messages=messages,
64
- input_tokens=response.usage.prompt_tokens,
65
- output_tokens=response.usage.completion_tokens,
70
+ input_messages=input_messages,
71
+ input_tokens=response.usage.input_tokens,
72
+ output_tokens=response.usage.output_tokens,
66
73
  start_time=start_time,
67
74
  end_time=end_time,
68
75
  )
@@ -75,25 +82,33 @@ class Agent:
75
82
  return parsed_response
76
83
 
77
84
  async def ask(
78
- self, *, messages: list[LLMMessage], model: AIModel, task_name: str, system: LLMMessage | None = None
85
+ self,
86
+ *,
87
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
88
+ model: LLMModel,
89
+ task_name: str,
79
90
  ) -> str:
80
91
  start_time = datetime.now()
92
+ response = await llm_unstructured(model=model, messages=messages)
93
+ end_time = datetime.now()
81
94
 
82
- if system is not None:
83
- messages.insert(0, system)
84
- response = await acompletion(model=model.value, messages=messages)
95
+ if isinstance(messages[0], LLMSystemMessage):
96
+ system = render_message(messages[0])
97
+ input_messages = [render_message(message) for message in messages[1:]]
98
+ else:
99
+ system = None
100
+ input_messages = [render_message(message) for message in messages]
85
101
 
86
- end_time = datetime.now()
87
102
  agent_response = AgentResponse(
88
- response=response.choices[0].message.content,
103
+ response=response.text,
89
104
  run_id=self.run_id,
90
105
  flow_name=self.flow_name,
91
106
  task_name=task_name,
92
107
  model=model,
93
108
  system=system,
94
- input_messages=messages,
95
- input_tokens=response.usage.prompt_tokens,
96
- output_tokens=response.usage.completion_tokens,
109
+ input_messages=input_messages,
110
+ input_tokens=response.usage.input_tokens,
111
+ output_tokens=response.usage.output_tokens,
97
112
  start_time=start_time,
98
113
  end_time=end_time,
99
114
  )
@@ -103,30 +118,28 @@ class Agent:
103
118
  else:
104
119
  logging.info(agent_response.model_dump())
105
120
 
106
- return response.choices[0].message.content
121
+ return response.text
107
122
 
108
123
  async def refine[R: Result](
109
124
  self,
110
125
  *,
111
- messages: list[LLMMessage],
112
- model: AIModel,
126
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
127
+ model: LLMModel,
113
128
  task_name: str,
114
129
  response_model: type[R],
115
- system: LLMMessage | None = None,
116
130
  ) -> R:
117
131
  start_time = datetime.now()
132
+ find_replace_response = await llm_structured(model=model, messages=messages, response_model=FindReplaceResponse)
133
+ parsed_find_replace_response = find_replace_response.structured_response
134
+ end_time = datetime.now()
118
135
 
119
- if system is not None:
120
- messages.insert(0, system)
121
-
122
- find_replace_response = await acompletion(
123
- model=model.value, messages=messages, response_format=FindReplaceResponse
124
- )
125
- parsed_find_replace_response = FindReplaceResponse.model_validate_json(
126
- find_replace_response.choices[0].message.content
127
- )
136
+ if isinstance(messages[0], LLMSystemMessage):
137
+ system = render_message(messages[0])
138
+ input_messages = [render_message(message) for message in messages[1:]]
139
+ else:
140
+ system = None
141
+ input_messages = [render_message(message) for message in messages]
128
142
 
129
- end_time = datetime.now()
130
143
  agent_response = AgentResponse(
131
144
  response=parsed_find_replace_response,
132
145
  run_id=self.run_id,
@@ -134,9 +147,9 @@ class Agent:
134
147
  task_name=task_name,
135
148
  model=model,
136
149
  system=system,
137
- input_messages=messages,
138
- input_tokens=find_replace_response.usage.prompt_tokens,
139
- output_tokens=find_replace_response.usage.completion_tokens,
150
+ input_messages=input_messages,
151
+ input_tokens=find_replace_response.usage.input_tokens,
152
+ output_tokens=find_replace_response.usage.output_tokens,
140
153
  start_time=start_time,
141
154
  end_time=end_time,
142
155
  )
@@ -158,69 +171,64 @@ class Agent:
158
171
  async def __call__[R: Result](
159
172
  self,
160
173
  *,
161
- messages: list[LLMMessage],
162
- model: AIModel,
174
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
175
+ model: LLMModel,
163
176
  task_name: str,
164
177
  mode: Literal["generate"],
165
178
  response_model: type[R],
166
- system: LLMMessage | None = None,
167
179
  ) -> R: ...
168
180
 
169
181
  @overload
170
182
  async def __call__[R: Result](
171
183
  self,
172
184
  *,
173
- messages: list[LLMMessage],
174
- model: AIModel,
185
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
186
+ model: LLMModel,
175
187
  task_name: str,
176
188
  mode: Literal["ask"],
177
189
  response_model: type[R] = TextResult,
178
- system: LLMMessage | None = None,
179
190
  ) -> str: ...
180
191
 
181
192
  @overload
182
193
  async def __call__[R: Result](
183
194
  self,
184
195
  *,
185
- messages: list[LLMMessage],
186
- model: AIModel,
196
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
197
+ model: LLMModel,
187
198
  task_name: str,
188
199
  response_model: type[R],
189
200
  mode: Literal["refine"],
190
- system: LLMMessage | None = None,
191
201
  ) -> R: ...
192
202
 
193
203
  @overload
194
204
  async def __call__[R: Result](
195
205
  self,
196
206
  *,
197
- messages: list[LLMMessage],
198
- model: AIModel,
207
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
208
+ model: LLMModel,
199
209
  task_name: str,
200
210
  response_model: type[R],
201
- system: LLMMessage | None = None,
202
211
  ) -> R: ...
203
212
 
204
213
  async def __call__[R: Result](
205
214
  self,
206
215
  *,
207
- messages: list[LLMMessage],
208
- model: AIModel,
216
+ messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
217
+ model: LLMModel,
209
218
  task_name: str,
210
219
  response_model: type[R] = TextResult,
211
220
  mode: Literal["generate", "ask", "refine"] = "generate",
212
- system: LLMMessage | None = None,
213
221
  ) -> R | str:
214
222
  match mode:
215
223
  case "generate":
216
224
  return await self.generate(
217
- messages=messages, model=model, task_name=task_name, response_model=response_model, system=system
225
+ messages=messages, model=model, task_name=task_name, response_model=response_model
218
226
  )
219
227
  case "ask":
220
- return await self.ask(messages=messages, model=model, task_name=task_name, system=system)
228
+ return await self.ask(messages=messages, model=model, task_name=task_name)
221
229
  case "refine":
222
230
  return await self.refine(
223
- messages=messages, model=model, task_name=task_name, response_model=response_model, system=system
231
+ messages=messages, model=model, task_name=task_name, response_model=response_model
224
232
  )
225
233
 
226
234
  def __apply_find_replace[R: Result](
@@ -232,13 +240,13 @@ class Agent:
232
240
 
233
241
  return response_model.model_validate_json(dumped_result)
234
242
 
235
- def __find_last_result[R: Result](self, *, messages: list[LLMMessage], response_model: type[R]) -> R:
243
+ def __find_last_result[R: Result](
244
+ self, *, messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage], response_model: type[R]
245
+ ) -> R:
236
246
  for message in reversed(messages):
237
- if message["role"] == "assistant":
247
+ if isinstance(message, LLMAssistantMessage):
238
248
  try:
239
- only_part = message["content"][0]
240
- if only_part["type"] == "text":
241
- return response_model.model_validate_json(only_part["text"])
249
+ return response_model.model_validate_json(message.parts[0].content)
242
250
  except ValidationError:
243
251
  continue
244
252
 
@@ -1,42 +1,33 @@
1
1
  from typing import Self
2
2
 
3
+ from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
3
4
  from pydantic import BaseModel
4
5
 
5
6
  from goose.errors import Honk
6
7
 
7
- from .result import Result
8
- from .types.agent import AssistantMessage, LLMMessage, SystemMessage, UserMessage
9
8
 
10
-
11
- class Conversation[R: Result](BaseModel):
12
- user_messages: list[UserMessage]
13
- assistant_messages: list[R | str]
14
- context: SystemMessage | None = None
9
+ class Conversation(BaseModel):
10
+ user_messages: list[LLMUserMessage]
11
+ assistant_messages: list[LLMAssistantMessage]
12
+ context: LLMSystemMessage | None = None
15
13
 
16
14
  @property
17
15
  def awaiting_response(self) -> bool:
18
16
  return len(self.user_messages) == len(self.assistant_messages)
19
17
 
20
- def render(self) -> list[LLMMessage]:
21
- messages: list[LLMMessage] = []
18
+ def render(self) -> list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage]:
19
+ messages: list[LLMSystemMessage | LLMUserMessage | LLMAssistantMessage] = []
22
20
  if self.context is not None:
23
- messages.append(self.context.render())
21
+ messages.append(self.context)
24
22
 
25
23
  for message_index in range(len(self.user_messages)):
26
24
  message = self.assistant_messages[message_index]
27
- if isinstance(message, str):
28
- messages.append(AssistantMessage(text=message).render())
29
- else:
30
- messages.append(AssistantMessage(text=message.model_dump_json()).render())
31
-
32
- messages.append(self.user_messages[message_index].render())
25
+ messages.append(message)
26
+ messages.append(self.user_messages[message_index])
33
27
 
34
28
  if len(self.assistant_messages) > len(self.user_messages):
35
29
  message = self.assistant_messages[-1]
36
- if isinstance(message, str):
37
- messages.append(AssistantMessage(text=message).render())
38
- else:
39
- messages.append(AssistantMessage(text=message.model_dump_json()).render())
30
+ messages.append(message)
40
31
 
41
32
  return messages
42
33
 
goose/_internal/flow.py CHANGED
@@ -20,7 +20,7 @@ class IGenerator[FlowArgumentsT: FlowArguments](Protocol):
20
20
  class IAdapter[ResultT: Result](Protocol):
21
21
  __code__: CodeType
22
22
 
23
- async def __call__(self, *, conversation: Conversation[ResultT], agent: Agent) -> ResultT: ...
23
+ async def __call__(self, *, conversation: Conversation, agent: Agent) -> ResultT: ...
24
24
 
25
25
 
26
26
  class Flow[FlowArgumentsT: FlowArguments]:
goose/_internal/state.py CHANGED
@@ -2,12 +2,12 @@ import json
2
2
  from contextvars import ContextVar
3
3
  from typing import TYPE_CHECKING, Any, NewType, Self
4
4
 
5
+ from aikernel import LLMAssistantMessage, LLMSystemMessage, LLMUserMessage
5
6
  from pydantic import BaseModel, ConfigDict
6
7
 
7
8
  from goose._internal.agent import Agent, IAgentLogger
8
9
  from goose._internal.conversation import Conversation
9
10
  from goose._internal.result import Result
10
- from goose._internal.types.agent import SystemMessage, UserMessage
11
11
  from goose.errors import Honk
12
12
 
13
13
  if TYPE_CHECKING:
@@ -20,55 +20,55 @@ class FlowArguments(BaseModel):
20
20
  model_config = ConfigDict(frozen=True)
21
21
 
22
22
 
23
- class NodeState[ResultT: Result](BaseModel):
23
+ class NodeState(BaseModel):
24
24
  task_name: str
25
25
  index: int
26
- conversation: Conversation[ResultT]
26
+ conversation: Conversation
27
27
  last_hash: int
28
28
 
29
29
  @property
30
- def result(self) -> ResultT:
30
+ def raw_result(self) -> str:
31
31
  for message in reversed(self.conversation.assistant_messages):
32
- if isinstance(message, Result):
33
- return message
32
+ if self.__message_is_result(message):
33
+ return message.parts[0].content
34
34
 
35
35
  raise Honk("Node awaiting response, has no result")
36
36
 
37
- def set_context(self, *, context: SystemMessage) -> Self:
37
+ def set_context(self, *, context: LLMSystemMessage) -> Self:
38
38
  self.conversation.context = context
39
39
  return self
40
40
 
41
41
  def add_result(
42
42
  self,
43
43
  *,
44
- result: ResultT,
44
+ result: str,
45
45
  new_hash: int | None = None,
46
46
  overwrite: bool = False,
47
47
  ) -> Self:
48
48
  if overwrite and len(self.conversation.assistant_messages) > 0:
49
- self.conversation.assistant_messages[-1] = result
49
+ self.conversation.assistant_messages[-1] = LLMAssistantMessage.from_text(result)
50
50
  else:
51
- self.conversation.assistant_messages.append(result)
51
+ self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(result))
52
52
  if new_hash is not None:
53
53
  self.last_hash = new_hash
54
54
  return self
55
55
 
56
56
  def add_answer(self, *, answer: str) -> Self:
57
- self.conversation.assistant_messages.append(answer)
57
+ self.conversation.assistant_messages.append(LLMAssistantMessage.from_text(answer))
58
58
  return self
59
59
 
60
- def add_user_message(self, *, message: UserMessage) -> Self:
60
+ def add_user_message(self, *, message: LLMUserMessage) -> Self:
61
61
  self.conversation.user_messages.append(message)
62
62
  return self
63
63
 
64
- def edit_last_result(self, *, result: ResultT) -> Self:
64
+ def edit_last_result(self, *, result: str) -> Self:
65
65
  if len(self.conversation.assistant_messages) == 0:
66
66
  raise Honk("Node awaiting response, has no result")
67
67
 
68
68
  for message_index, message in enumerate(reversed(self.conversation.assistant_messages)):
69
- if isinstance(message, Result):
69
+ if self.__message_is_result(message):
70
70
  index = len(self.conversation.assistant_messages) - message_index - 1
71
- self.conversation.assistant_messages[index] = result
71
+ self.conversation.assistant_messages[index] = LLMAssistantMessage.from_text(result)
72
72
  return self
73
73
 
74
74
  raise Honk("Node awaiting response, has no result")
@@ -77,6 +77,13 @@ class NodeState[ResultT: Result](BaseModel):
77
77
  self.conversation.undo()
78
78
  return self
79
79
 
80
+ def __message_is_result(self, message: LLMAssistantMessage, /) -> bool:
81
+ try:
82
+ _ = json.loads(message.parts[0].content)
83
+ return True
84
+ except json.JSONDecodeError:
85
+ return False
86
+
80
87
 
81
88
  class FlowRun[FlowArgumentsT: FlowArguments]:
82
89
  def __init__(self, *, flow_arguments_model: type[FlowArgumentsT]) -> None:
@@ -109,38 +116,47 @@ class FlowRun[FlowArgumentsT: FlowArguments]:
109
116
 
110
117
  return self._flow_arguments
111
118
 
112
- def get_all[R: Result](self, *, task: "Task[Any, R]") -> list[NodeState[R]]:
113
- matching_nodes: list[NodeState[R]] = []
114
- for key, node_state in self._node_states.items():
115
- if key[0] == task.name:
116
- matching_nodes.append(NodeState[task.result_type].model_validate_json(node_state))
117
- return sorted(matching_nodes, key=lambda node: node.index)
118
-
119
- def get[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> NodeState[R]:
119
+ def get_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
120
120
  if (existing_node_state := self._node_states.get((task.name, index))) is not None:
121
- return NodeState[task.result_type].model_validate_json(existing_node_state)
121
+ return NodeState.model_validate_json(existing_node_state)
122
122
  else:
123
- return NodeState[task.result_type](
123
+ return NodeState(
124
124
  task_name=task.name,
125
125
  index=index,
126
- conversation=Conversation[task.result_type](user_messages=[], assistant_messages=[]),
126
+ conversation=Conversation(user_messages=[], assistant_messages=[]),
127
127
  last_hash=0,
128
128
  )
129
129
 
130
- def set_flow_arguments(self, flow_arguments: FlowArgumentsT, /) -> None:
131
- self._flow_arguments = flow_arguments
132
-
133
- def upsert_node_state(self, node_state: NodeState[Any], /) -> None:
134
- key = (node_state.task_name, node_state.index)
135
- self._node_states[key] = node_state.model_dump_json()
136
-
137
- def get_next[R: Result](self, *, task: "Task[Any, R]") -> NodeState[R]:
130
+ def get_next_state(self, *, task: "Task[Any, Any]", index: int = 0) -> NodeState:
138
131
  if task.name not in self._last_requested_indices:
139
132
  self._last_requested_indices[task.name] = 0
140
133
  else:
141
134
  self._last_requested_indices[task.name] += 1
142
135
 
143
- return self.get(task=task, index=self._last_requested_indices[task.name])
136
+ return self.get_state(task=task, index=self._last_requested_indices[task.name])
137
+
138
+ def get_all_results[R: Result](self, *, task: "Task[Any, R]") -> list[R]:
139
+ matching_nodes: list[NodeState] = []
140
+ for key, node_state in self._node_states.items():
141
+ if key[0] == task.name:
142
+ matching_nodes.append(NodeState.model_validate_json(node_state))
143
+
144
+ sorted_nodes = sorted(matching_nodes, key=lambda node: node.index)
145
+ return [task.result_type.model_validate_json(node.raw_result) for node in sorted_nodes]
146
+
147
+ def get_result[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> R:
148
+ if (existing_node_state := self._node_states.get((task.name, index))) is not None:
149
+ parsed_node_state = NodeState.model_validate_json(existing_node_state)
150
+ return task.result_type.model_validate_json(parsed_node_state.raw_result)
151
+ else:
152
+ raise Honk(f"No result found for task {task.name} at index {index}")
153
+
154
+ def set_flow_arguments(self, flow_arguments: FlowArgumentsT, /) -> None:
155
+ self._flow_arguments = flow_arguments
156
+
157
+ def upsert_node_state(self, node_state: NodeState, /) -> None:
158
+ key = (node_state.task_name, node_state.index)
159
+ self._node_states[key] = node_state.model_dump_json()
144
160
 
145
161
  def start(
146
162
  self,
goose/_internal/task.py CHANGED
@@ -2,13 +2,13 @@ import hashlib
2
2
  from collections.abc import Awaitable, Callable
3
3
  from typing import Any, overload
4
4
 
5
+ from aikernel import LLMModel, LLMSystemMessage, LLMUserMessage
5
6
  from pydantic import BaseModel
6
7
 
7
- from ..errors import Honk
8
- from .agent import Agent, AIModel
9
- from .result import Result
10
- from .state import FlowRun, NodeState, get_current_flow_run
11
- from .types.agent import SystemMessage, UserMessage
8
+ from goose._internal.agent import Agent
9
+ from goose._internal.result import Result
10
+ from goose._internal.state import FlowRun, NodeState, get_current_flow_run
11
+ from goose.errors import Honk
12
12
 
13
13
 
14
14
  class Task[**P, R: Result]:
@@ -18,7 +18,7 @@ class Task[**P, R: Result]:
18
18
  /,
19
19
  *,
20
20
  retries: int = 0,
21
- refinement_model: AIModel = AIModel.GEMINI_FLASH,
21
+ refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
22
22
  ) -> None:
23
23
  self._generator = generator
24
24
  self._retries = retries
@@ -35,28 +35,32 @@ class Task[**P, R: Result]:
35
35
  def name(self) -> str:
36
36
  return self._generator.__name__
37
37
 
38
- async def generate(self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs) -> R:
38
+ async def generate(self, state: NodeState, *args: P.args, **kwargs: P.kwargs) -> R:
39
39
  state_hash = self.__hash_task_call(*args, **kwargs)
40
40
  if state_hash != state.last_hash:
41
41
  result = await self._generator(*args, **kwargs)
42
- state.add_result(result=result, new_hash=state_hash, overwrite=True)
42
+ state.add_result(result=result.model_dump_json(), new_hash=state_hash, overwrite=True)
43
43
  return result
44
44
  else:
45
- return state.result
45
+ return self.result_type.model_validate_json(state.raw_result)
46
46
 
47
- async def ask(self, *, user_message: UserMessage, context: SystemMessage | None = None, index: int = 0) -> str:
47
+ async def ask(
48
+ self, *, user_message: LLMUserMessage, context: LLMSystemMessage | None = None, index: int = 0
49
+ ) -> str:
48
50
  flow_run = self.__get_current_flow_run()
49
- node_state = flow_run.get(task=self, index=index)
51
+ node_state = flow_run.get_state(task=self, index=index)
50
52
 
51
53
  if len(node_state.conversation.assistant_messages) == 0:
52
54
  raise Honk("Cannot ask about a task that has not been initially generated")
53
55
 
56
+ if context is not None:
57
+ node_state.set_context(context=context)
54
58
  node_state.add_user_message(message=user_message)
59
+
55
60
  answer = await flow_run.agent(
56
61
  messages=node_state.conversation.render(),
57
62
  model=self._refinement_model,
58
63
  task_name=f"ask--{self.name}",
59
- system=context.render() if context is not None else None,
60
64
  mode="ask",
61
65
  )
62
66
  node_state.add_answer(answer=answer)
@@ -67,12 +71,12 @@ class Task[**P, R: Result]:
67
71
  async def refine(
68
72
  self,
69
73
  *,
70
- user_message: UserMessage,
71
- context: SystemMessage | None = None,
74
+ user_message: LLMUserMessage,
75
+ context: LLMSystemMessage | None = None,
72
76
  index: int = 0,
73
77
  ) -> R:
74
78
  flow_run = self.__get_current_flow_run()
75
- node_state = flow_run.get(task=self, index=index)
79
+ node_state = flow_run.get_state(task=self, index=index)
76
80
 
77
81
  if len(node_state.conversation.assistant_messages) == 0:
78
82
  raise Honk("Cannot refine a task that has not been initially generated")
@@ -85,30 +89,29 @@ class Task[**P, R: Result]:
85
89
  messages=node_state.conversation.render(),
86
90
  model=self._refinement_model,
87
91
  task_name=f"refine--{self.name}",
88
- system=context.render() if context is not None else None,
89
92
  response_model=self.result_type,
90
93
  mode="refine",
91
94
  )
92
- node_state.add_result(result=result)
95
+ node_state.add_result(result=result.model_dump_json())
93
96
  flow_run.upsert_node_state(node_state)
94
97
 
95
98
  return result
96
99
 
97
100
  def edit(self, *, result: R, index: int = 0) -> None:
98
101
  flow_run = self.__get_current_flow_run()
99
- node_state = flow_run.get(task=self, index=index)
100
- node_state.edit_last_result(result=result)
102
+ node_state = flow_run.get_state(task=self, index=index)
103
+ node_state.edit_last_result(result=result.model_dump_json())
101
104
  flow_run.upsert_node_state(node_state)
102
105
 
103
106
  def undo(self, *, index: int = 0) -> None:
104
107
  flow_run = self.__get_current_flow_run()
105
- node_state = flow_run.get(task=self, index=index)
108
+ node_state = flow_run.get_state(task=self, index=index)
106
109
  node_state.undo()
107
110
  flow_run.upsert_node_state(node_state)
108
111
 
109
112
  async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
110
113
  flow_run = self.__get_current_flow_run()
111
- node_state = flow_run.get_next(task=self)
114
+ node_state = flow_run.get_next_state(task=self)
112
115
  result = await self.generate(node_state, *args, **kwargs)
113
116
  flow_run.upsert_node_state(node_state)
114
117
  return result
@@ -151,14 +154,14 @@ class Task[**P, R: Result]:
151
154
  def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
152
155
  @overload
153
156
  def task[**P, R: Result](
154
- *, retries: int = 0, refinement_model: AIModel = AIModel.GEMINI_FLASH
157
+ *, retries: int = 0, refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH
155
158
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
156
159
  def task[**P, R: Result](
157
160
  generator: Callable[P, Awaitable[R]] | None = None,
158
161
  /,
159
162
  *,
160
163
  retries: int = 0,
161
- refinement_model: AIModel = AIModel.GEMINI_FLASH,
164
+ refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
162
165
  ) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
163
166
  if generator is None:
164
167
 
@@ -2,10 +2,9 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import ClassVar, TypedDict
4
4
 
5
+ from aikernel import LiteLLMMessage, LLMModel
5
6
  from pydantic import BaseModel, computed_field
6
7
 
7
- from ..types.agent import AIModel, LLMMessage
8
-
9
8
 
10
9
  class AgentResponseDump(TypedDict):
11
10
  run_id: str
@@ -26,30 +25,30 @@ class AgentResponseDump(TypedDict):
26
25
 
27
26
 
28
27
  class AgentResponse[R: BaseModel | str](BaseModel):
29
- INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
30
- AIModel.VERTEX_FLASH_8B: 0.30,
31
- AIModel.VERTEX_FLASH: 0.15,
32
- AIModel.VERTEX_PRO: 5.00,
33
- AIModel.GEMINI_FLASH_8B: 0.30,
34
- AIModel.GEMINI_FLASH: 0.15,
35
- AIModel.GEMINI_PRO: 5.00,
28
+ INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
29
+ LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
30
+ LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
31
+ LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
32
+ LLMModel.GEMINI_2_0_FLASH: 0.30,
33
+ LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
34
+ LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
36
35
  }
37
- OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
38
- AIModel.VERTEX_FLASH_8B: 0.30,
39
- AIModel.VERTEX_FLASH: 0.15,
40
- AIModel.VERTEX_PRO: 5.00,
41
- AIModel.GEMINI_FLASH_8B: 0.30,
42
- AIModel.GEMINI_FLASH: 0.15,
43
- AIModel.GEMINI_PRO: 5.00,
36
+ OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
37
+ LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
38
+ LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
39
+ LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
40
+ LLMModel.GEMINI_2_0_FLASH: 0.30,
41
+ LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
42
+ LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
44
43
  }
45
44
 
46
45
  response: R
47
46
  run_id: str
48
47
  flow_name: str
49
48
  task_name: str
50
- model: AIModel
51
- system: LLMMessage | None = None
52
- input_messages: list[LLMMessage]
49
+ model: LLMModel
50
+ system: LiteLLMMessage | None = None
51
+ input_messages: list[LiteLLMMessage]
53
52
  input_tokens: int
54
53
  output_tokens: int
55
54
  start_time: datetime
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.10.1
3
+ Version: 0.11.0
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
7
+ Requires-Dist: aikernel>=0.1.5
7
8
  Requires-Dist: jsonpath-ng>=1.7.0
8
- Requires-Dist: litellm>=1.56.5
9
9
  Requires-Dist: pydantic>=2.8.2
10
10
  Description-Content-Type: text/markdown
11
11
 
@@ -0,0 +1,18 @@
1
+ goose/__init__.py,sha256=Muw7HCImZHk3kLCTWhV9Lg-Sfmhnwf_Tae-zCj7woyY,338
2
+ goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
3
+ goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
4
+ goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
6
+ goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
7
+ goose/_internal/agent.py,sha256=hpg8xXo7lhFDE3ezr8C0-BC61xWoXl1KChxu2n9o8Aw,8449
8
+ goose/_internal/conversation.py,sha256=vhJwe1pHk2lV60DaB9Tz9KbpzQo7_thRYInPjbIoUTE,1437
9
+ goose/_internal/flow.py,sha256=8MJxlhHYSAzUHZefpF_sRJc37o532OF0X7l3KRopDmc,4115
10
+ goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
11
+ goose/_internal/state.py,sha256=jGKKZKeDcKaCY-uqgXLOnMNwyzR5qEH5m9afOtixsLk,7923
12
+ goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
13
+ goose/_internal/task.py,sha256=X_eRZxZlf6SwyvF1nIyjoneyqD_TISXqESyxluk63mE,6416
14
+ goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ goose/_internal/types/telemetry.py,sha256=wDQz1C60KxlftC0aQCXIjswrSYGI1KNaM2wtnlP1Q7k,3823
16
+ goose_py-0.11.0.dist-info/METADATA,sha256=LZvLxXhKOj-edk54qSemq8dxnplsUmSzPRPIrmAxO-w,442
17
+ goose_py-0.11.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ goose_py-0.11.0.dist-info/RECORD,,
@@ -1,101 +0,0 @@
1
- import base64
2
- from enum import StrEnum
3
- from typing import Literal, NotRequired, TypedDict
4
-
5
- from pydantic import BaseModel
6
-
7
-
8
- class AIModel(StrEnum):
9
- # vertex (production Google, requires GCP environment)
10
- VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
11
- VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
12
- VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
13
- VERTEX_FLASH_2_0 = "vertex_ai/gemini-2.0-flash"
14
-
15
- # gemini (publicly available, no GCP environment required)
16
- GEMINI_PRO = "gemini/gemini-1.5-pro"
17
- GEMINI_FLASH = "gemini/gemini-1.5-flash"
18
- GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
19
- GEMINI_FLASH_2_0 = "gemini/gemini-2.0-flash"
20
-
21
-
22
- class ContentType(StrEnum):
23
- # text
24
- TEXT = "text/plain"
25
-
26
- # images
27
- JPEG = "image/jpeg"
28
- PNG = "image/png"
29
- WEBP = "image/webp"
30
-
31
- # audio
32
- MP3 = "audio/mp3"
33
- WAV = "audio/wav"
34
-
35
- # files
36
- PDF = "application/pdf"
37
-
38
-
39
- class LLMTextMessagePart(TypedDict):
40
- type: Literal["text"]
41
- text: str
42
-
43
-
44
- class LLMMediaMessagePart(TypedDict):
45
- type: Literal["image_url"]
46
- image_url: str
47
-
48
-
49
- class CacheControl(TypedDict):
50
- type: Literal["ephemeral"]
51
-
52
-
53
- class LLMMessage(TypedDict):
54
- role: Literal["user", "assistant", "system"]
55
- content: list[LLMTextMessagePart | LLMMediaMessagePart]
56
- cache_control: NotRequired[CacheControl]
57
-
58
-
59
- class MessagePart(BaseModel):
60
- content: str
61
- content_type: ContentType = ContentType.TEXT
62
-
63
- @classmethod
64
- def from_media(cls, *, content: bytes, content_type: ContentType) -> "MessagePart":
65
- return cls(content=base64.b64encode(content).decode(), content_type=content_type)
66
-
67
- def render(self) -> LLMTextMessagePart | LLMMediaMessagePart:
68
- if self.content_type == ContentType.TEXT:
69
- return {"type": "text", "text": self.content}
70
- else:
71
- return {"type": "image_url", "image_url": f"data:{self.content_type};base64,{self.content}"}
72
-
73
-
74
- class UserMessage(BaseModel):
75
- parts: list[MessagePart]
76
-
77
- def render(self) -> LLMMessage:
78
- content: LLMMessage = {
79
- "role": "user",
80
- "content": [part.render() for part in self.parts],
81
- }
82
- if any(part.content_type != ContentType.TEXT for part in self.parts):
83
- content["cache_control"] = {"type": "ephemeral"}
84
- return content
85
-
86
-
87
- class AssistantMessage(BaseModel):
88
- text: str
89
-
90
- def render(self) -> LLMMessage:
91
- return {"role": "assistant", "content": [{"type": "text", "text": self.text}]}
92
-
93
-
94
- class SystemMessage(BaseModel):
95
- parts: list[MessagePart]
96
-
97
- def render(self) -> LLMMessage:
98
- return {
99
- "role": "system",
100
- "content": [part.render() for part in self.parts],
101
- }
goose/agent.py DELETED
@@ -1,26 +0,0 @@
1
- from ._internal.agent import AgentResponse, IAgentLogger
2
- from ._internal.types.agent import (
3
- AIModel,
4
- AssistantMessage,
5
- ContentType,
6
- LLMMediaMessagePart,
7
- LLMMessage,
8
- LLMTextMessagePart,
9
- MessagePart,
10
- SystemMessage,
11
- UserMessage,
12
- )
13
-
14
- __all__ = [
15
- "AgentResponse",
16
- "AIModel",
17
- "IAgentLogger",
18
- "AssistantMessage",
19
- "LLMMediaMessagePart",
20
- "LLMMessage",
21
- "LLMTextMessagePart",
22
- "SystemMessage",
23
- "MessagePart",
24
- "ContentType",
25
- "UserMessage",
26
- ]
@@ -1,20 +0,0 @@
1
- goose/__init__.py,sha256=wjGDgWzKcD6S8loVr0n-rLCpRwg-ZKAixcUaw1wobMc,243
2
- goose/agent.py,sha256=u6daAnn4fPgP4Jk9cHANyCEku3RmUqKLdqtyGSr8ljI,510
3
- goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
4
- goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
5
- goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
7
- goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
8
- goose/_internal/agent.py,sha256=v_nzQLM_vyKWOc-g1Ke8TwlThkThjraE4HFHNywHMv0,7915
9
- goose/_internal/conversation.py,sha256=zvKqLxJSCIIuhD7gjcSFhleYsLabu-ALl9woWFy3mQU,1766
10
- goose/_internal/flow.py,sha256=RShMsxgt49g1fZJ3rlwDHtI1j39lZzewx8hZ7DGN5kg,4124
11
- goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
12
- goose/_internal/state.py,sha256=U4gM0K4MAlRFTpqenCYHX9TYGuhWVKIfa4yBeZ9Qc9s,7090
13
- goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
14
- goose/_internal/task.py,sha256=mhcmKDTBl993P3HP3PlNvQtl4gMYy4FMYeQ9xrg5aPk,6252
15
- goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- goose/_internal/types/agent.py,sha256=g0KD-aPWZlUGBx72AwQd3LeniFxHATeflZ7191QjFZA,2696
17
- goose/_internal/types/telemetry.py,sha256=7zeqyDDxf95puirNM6Gr9VFuxoDshXcV1__V0tiMswE,3663
18
- goose_py-0.10.1.dist-info/METADATA,sha256=L2yzL8ZW09_75wmrK5YSeEZ2H0RkrODL0zWm1nWW-uA,442
19
- goose_py-0.10.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
20
- goose_py-0.10.1.dist-info/RECORD,,