goose-py 0.11.17__py3-none-any.whl → 0.11.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
goose/_internal/agent.py CHANGED
@@ -4,10 +4,11 @@ from typing import Any, Literal, Protocol, overload
4
4
 
5
5
  from aikernel import (
6
6
  LLMAssistantMessage,
7
- LLMModel,
7
+ LLMModelAlias,
8
8
  LLMSystemMessage,
9
9
  LLMToolMessage,
10
10
  LLMUserMessage,
11
+ Router,
11
12
  llm_structured,
12
13
  llm_unstructured,
13
14
  )
@@ -40,18 +41,21 @@ class Agent:
40
41
  self,
41
42
  *,
42
43
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
43
- model: LLMModel,
44
+ model: LLMModelAlias,
44
45
  task_name: str,
46
+ router: Router,
45
47
  response_model: type[R] = TextResult,
46
48
  ) -> R:
47
49
  start_time = datetime.now()
48
50
  typed_messages: list[ExpectedMessage] = [*messages]
49
51
 
50
52
  if response_model is TextResult:
51
- response = await llm_unstructured(model=model, messages=typed_messages)
53
+ response = await llm_unstructured(model=model, messages=typed_messages, router=router)
52
54
  parsed_response = response_model.model_validate({"text": response.text})
53
55
  else:
54
- response = await llm_structured(model=model, messages=typed_messages, response_model=response_model)
56
+ response = await llm_structured(
57
+ model=model, messages=typed_messages, response_model=response_model, router=router
58
+ )
55
59
  parsed_response = response.structured_response
56
60
 
57
61
  end_time = datetime.now()
@@ -88,12 +92,13 @@ class Agent:
88
92
  self,
89
93
  *,
90
94
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
91
- model: LLMModel,
95
+ model: LLMModelAlias,
92
96
  task_name: str,
97
+ router: Router,
93
98
  ) -> str:
94
99
  start_time = datetime.now()
95
100
  typed_messages: list[ExpectedMessage] = [*messages]
96
- response = await llm_unstructured(model=model, messages=typed_messages)
101
+ response = await llm_unstructured(model=model, messages=typed_messages, router=router)
97
102
  end_time = datetime.now()
98
103
 
99
104
  if isinstance(messages[0], LLMSystemMessage):
@@ -128,14 +133,15 @@ class Agent:
128
133
  self,
129
134
  *,
130
135
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
131
- model: LLMModel,
136
+ model: LLMModelAlias,
137
+ router: Router,
132
138
  task_name: str,
133
139
  response_model: type[R],
134
140
  ) -> R:
135
141
  start_time = datetime.now()
136
142
  typed_messages: list[ExpectedMessage] = [*messages]
137
143
  find_replace_response = await llm_structured(
138
- model=model, messages=typed_messages, response_model=FindReplaceResponse
144
+ model=model, messages=typed_messages, response_model=FindReplaceResponse, router=router
139
145
  )
140
146
  parsed_find_replace_response = find_replace_response.structured_response
141
147
  end_time = datetime.now()
@@ -179,7 +185,8 @@ class Agent:
179
185
  self,
180
186
  *,
181
187
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
182
- model: LLMModel,
188
+ model: LLMModelAlias,
189
+ router: Router,
183
190
  task_name: str,
184
191
  mode: Literal["generate"],
185
192
  response_model: type[R],
@@ -190,7 +197,8 @@ class Agent:
190
197
  self,
191
198
  *,
192
199
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
193
- model: LLMModel,
200
+ model: LLMModelAlias,
201
+ router: Router,
194
202
  task_name: str,
195
203
  mode: Literal["ask"],
196
204
  response_model: type[R] = TextResult,
@@ -201,7 +209,8 @@ class Agent:
201
209
  self,
202
210
  *,
203
211
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
204
- model: LLMModel,
212
+ model: LLMModelAlias,
213
+ router: Router,
205
214
  task_name: str,
206
215
  response_model: type[R],
207
216
  mode: Literal["refine"],
@@ -212,7 +221,8 @@ class Agent:
212
221
  self,
213
222
  *,
214
223
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
215
- model: LLMModel,
224
+ model: LLMModelAlias,
225
+ router: Router,
216
226
  task_name: str,
217
227
  response_model: type[R],
218
228
  ) -> R: ...
@@ -221,7 +231,8 @@ class Agent:
221
231
  self,
222
232
  *,
223
233
  messages: list[LLMUserMessage | LLMAssistantMessage | LLMSystemMessage],
224
- model: LLMModel,
234
+ model: LLMModelAlias,
235
+ router: Router,
225
236
  task_name: str,
226
237
  response_model: type[R] = TextResult,
227
238
  mode: Literal["generate", "ask", "refine"] = "generate",
@@ -229,13 +240,13 @@ class Agent:
229
240
  match mode:
230
241
  case "generate":
231
242
  return await self.generate(
232
- messages=messages, model=model, task_name=task_name, response_model=response_model
243
+ messages=messages, model=model, task_name=task_name, router=router, response_model=response_model
233
244
  )
234
245
  case "ask":
235
- return await self.ask(messages=messages, model=model, task_name=task_name)
246
+ return await self.ask(messages=messages, model=model, task_name=task_name, router=router)
236
247
  case "refine":
237
248
  return await self.refine(
238
- messages=messages, model=model, task_name=task_name, response_model=response_model
249
+ messages=messages, model=model, task_name=task_name, router=router, response_model=response_model
239
250
  )
240
251
 
241
252
  def __apply_find_replace[R: Result](
goose/_internal/task.py CHANGED
@@ -2,7 +2,7 @@ import hashlib
2
2
  from collections.abc import Awaitable, Callable
3
3
  from typing import Any, overload
4
4
 
5
- from aikernel import LLMModel, LLMSystemMessage, LLMUserMessage
5
+ from aikernel import LLMModelAlias, LLMSystemMessage, LLMUserMessage, Router
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from goose._internal.agent import Agent
@@ -18,11 +18,11 @@ class Task[**P, R: Result]:
18
18
  /,
19
19
  *,
20
20
  retries: int = 0,
21
- refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
21
+ refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
22
22
  ) -> None:
23
23
  self._generator = generator
24
24
  self._retries = retries
25
- self._refinement_model = refinement_model
25
+ self._refinement_model: LLMModelAlias = refinement_model
26
26
 
27
27
  @property
28
28
  def result_type(self) -> type[R]:
@@ -45,7 +45,7 @@ class Task[**P, R: Result]:
45
45
  return self.result_type.model_validate_json(state.raw_result)
46
46
 
47
47
  async def ask(
48
- self, *, user_message: LLMUserMessage, context: LLMSystemMessage | None = None, index: int = 0
48
+ self, *, user_message: LLMUserMessage, router: Router, context: LLMSystemMessage | None = None, index: int = 0
49
49
  ) -> str:
50
50
  flow_run = self.__get_current_flow_run()
51
51
  node_state = flow_run.get_state(task=self, index=index)
@@ -62,6 +62,7 @@ class Task[**P, R: Result]:
62
62
  model=self._refinement_model,
63
63
  task_name=f"ask--{self.name}",
64
64
  mode="ask",
65
+ router=router,
65
66
  )
66
67
  node_state.add_answer(answer=answer)
67
68
  flow_run.upsert_node_state(node_state)
@@ -72,6 +73,7 @@ class Task[**P, R: Result]:
72
73
  self,
73
74
  *,
74
75
  user_message: LLMUserMessage,
76
+ router: Router,
75
77
  context: LLMSystemMessage | None = None,
76
78
  index: int = 0,
77
79
  ) -> R:
@@ -91,6 +93,7 @@ class Task[**P, R: Result]:
91
93
  task_name=f"refine--{self.name}",
92
94
  response_model=self.result_type,
93
95
  mode="refine",
96
+ router=router,
94
97
  )
95
98
  node_state.add_result(result=result.model_dump_json())
96
99
  flow_run.upsert_node_state(node_state)
@@ -154,14 +157,14 @@ class Task[**P, R: Result]:
154
157
  def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
155
158
  @overload
156
159
  def task[**P, R: Result](
157
- *, retries: int = 0, refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH
160
+ *, retries: int = 0, refinement_model: LLMModelAlias = "gemini-2.0-flash-lite"
158
161
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
159
162
  def task[**P, R: Result](
160
163
  generator: Callable[P, Awaitable[R]] | None = None,
161
164
  /,
162
165
  *,
163
166
  retries: int = 0,
164
- refinement_model: LLMModel = LLMModel.GEMINI_2_0_FLASH,
167
+ refinement_model: LLMModelAlias = "gemini-2.0-flash-lite",
165
168
  ) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
166
169
  if generator is None:
167
170
 
@@ -2,7 +2,7 @@ import json
2
2
  from datetime import datetime
3
3
  from typing import ClassVar, TypedDict
4
4
 
5
- from aikernel import LiteLLMMessage, LLMModel
5
+ from aikernel import LiteLLMMessage, LLMModelAlias
6
6
  from pydantic import BaseModel, computed_field
7
7
 
8
8
  from goose.errors import Honk
@@ -27,28 +27,24 @@ class AgentResponseDump(TypedDict):
27
27
 
28
28
 
29
29
  class AgentResponse[R: BaseModel | str](BaseModel):
30
- INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
31
- LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
32
- LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
33
- LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
34
- LLMModel.GEMINI_2_0_FLASH: 0.30,
35
- LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
36
- LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
30
+ INPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModelAlias, float]] = {
31
+ "gemini-2.0-flash": 0.30,
32
+ "gemini-2.0-flash-lite": 0.15,
33
+ "claude-3.5-sonnet": 3.00,
34
+ "claude-3.7-sonnet": 3.00,
37
35
  }
38
- OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModel, float]] = {
39
- LLMModel.VERTEX_GEMINI_2_0_FLASH: 0.30,
40
- LLMModel.VERTEX_GEMINI_2_0_FLASH_LITE: 0.15,
41
- LLMModel.VERTEX_GEMINI_2_0_PRO_EXP_02_05: 5.00,
42
- LLMModel.GEMINI_2_0_FLASH: 0.30,
43
- LLMModel.GEMINI_2_0_FLASH_LITE: 0.15,
44
- LLMModel.GEMINI_2_0_PRO_EXP_02_05: 5.00,
36
+ OUTPUT_DOLLARS_PER_MILLION_TOKENS: ClassVar[dict[LLMModelAlias, float]] = {
37
+ "gemini-2.0-flash": 0.30,
38
+ "gemini-2.0-flash-lite": 0.15,
39
+ "claude-3.5-sonnet": 15.00,
40
+ "claude-3.7-sonnet": 15.00,
45
41
  }
46
42
 
47
43
  response: R
48
44
  run_id: str
49
45
  flow_name: str
50
46
  task_name: str
51
- model: LLMModel
47
+ model: LLMModelAlias
52
48
  system: LiteLLMMessage | None = None
53
49
  input_messages: list[LiteLLMMessage]
54
50
  input_tokens: int
@@ -102,7 +98,7 @@ class AgentResponse[R: BaseModel | str](BaseModel):
102
98
  "run_id": self.run_id,
103
99
  "flow_name": self.flow_name,
104
100
  "task_name": self.task_name,
105
- "model": self.model.value,
101
+ "model": self.model,
106
102
  "system_message": minimized_system_message,
107
103
  "input_messages": minimized_input_messages,
108
104
  "output_message": output_message,
@@ -1,10 +1,10 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.11.17
3
+ Version: 0.11.19
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
7
- Requires-Dist: aikernel==0.1.37
7
+ Requires-Dist: aikernel==0.1.39
8
8
  Requires-Dist: jsonpath-ng>=1.7.0
9
9
  Requires-Dist: pydantic>=2.8.2
10
10
  Description-Content-Type: text/markdown
@@ -4,15 +4,15 @@ goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
4
4
  goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
6
6
  goose/task.py,sha256=95rspdxETJoY12IHBl3KjnVIdqQnf1jDKlnGWNWOTvQ,53
7
- goose/_internal/agent.py,sha256=qDPPsOuvTkUpaKXV5aVTIn_yWUs_XZyHDPJaIq49YAI,8704
7
+ goose/_internal/agent.py,sha256=CpVvuyDkNrFLsVIHaf0Tbvp48vgTxyjQdlETsFdbvJ8,9088
8
8
  goose/_internal/conversation.py,sha256=vhJwe1pHk2lV60DaB9Tz9KbpzQo7_thRYInPjbIoUTE,1437
9
9
  goose/_internal/flow.py,sha256=8MJxlhHYSAzUHZefpF_sRJc37o532OF0X7l3KRopDmc,4115
10
10
  goose/_internal/result.py,sha256=vtJMfBxb9skfl8st2tn4hBmEq6qmXiJTme_B5QTgu2M,538
11
11
  goose/_internal/state.py,sha256=kA116MpsetsQz6nYodsXOqE3uYz37OTgjC9Vcy_3Qvg,8065
12
12
  goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
13
- goose/_internal/task.py,sha256=X_eRZxZlf6SwyvF1nIyjoneyqD_TISXqESyxluk63mE,6416
13
+ goose/_internal/task.py,sha256=Qj7Z5CowDAwjcdNJTgfIDEsu7bYr5wUchgJxF4mDp04,6547
14
14
  goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- goose/_internal/types/telemetry.py,sha256=xTJCJo-TW9Pd5QK983HMw9CqJ7vw-BLUkYkbantIP30,4075
16
- goose_py-0.11.17.dist-info/METADATA,sha256=mid84snXvLt_V2dGQ9Y23BfrTTMuahZaPz3D6T-xsYM,444
17
- goose_py-0.11.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- goose_py-0.11.17.dist-info/RECORD,,
15
+ goose/_internal/types/telemetry.py,sha256=xpfkhx7zCdZjjKU8rPuUJ2UHgqmZ084ZupzbTU36gSM,3791
16
+ goose_py-0.11.19.dist-info/METADATA,sha256=RDrIiV9eVeTK3CEDSBwsQxoubkEbuYN9R03GCqdXK4E,444
17
+ goose_py-0.11.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ goose_py-0.11.19.dist-info/RECORD,,