goose-py 0.3.11__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: goose-py
3
- Version: 0.3.11
3
+ Version: 0.4.0
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Home-page: https://github.com/chelle-ai/goose
6
6
  Keywords: ai,yaml,configuration,llm
@@ -10,10 +10,9 @@ from pydantic import BaseModel, computed_field
10
10
 
11
11
 
12
12
  class GeminiModel(StrEnum):
13
- EXP = "gemini/gemini-exp-1121"
14
- PRO = "gemini/gemini-1.5-pro"
15
- FLASH = "gemini/gemini-1.5-flash"
16
- FLASH_8B = "gemini/gemini-1.5-flash-8b"
13
+ PRO = "vertex_ai/gemini-1.5-pro"
14
+ FLASH = "vertex_ai/gemini-1.5-flash"
15
+ FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
17
16
 
18
17
 
19
18
  class UserMediaContentType(StrEnum):
@@ -121,13 +120,11 @@ class AgentResponse[R: BaseModel](BaseModel):
121
120
  GeminiModel.FLASH_8B: 30,
122
121
  GeminiModel.FLASH: 15,
123
122
  GeminiModel.PRO: 500,
124
- GeminiModel.EXP: 0,
125
123
  }
126
124
  OUTPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[GeminiModel, float]] = {
127
125
  GeminiModel.FLASH_8B: 30,
128
126
  GeminiModel.FLASH: 15,
129
127
  GeminiModel.PRO: 500,
130
- GeminiModel.EXP: 0,
131
128
  }
132
129
 
133
130
  response: R
@@ -115,6 +115,8 @@ class FlowRun:
115
115
  self._flow_name = ""
116
116
  self._id = ""
117
117
  self._agent: Agent | None = None
118
+ self._flow_args: tuple[Any, ...] | None = None
119
+ self._flow_kwargs: dict[str, Any] | None = None
118
120
 
119
121
  @property
120
122
  def flow_name(self) -> str:
@@ -130,17 +132,12 @@ class FlowRun:
130
132
  raise Honk("Agent is only accessible once a run is started")
131
133
  return self._agent
132
134
 
133
- def add(self, node_state: NodeState[Any], /) -> None:
134
- key = (node_state.task_name, node_state.index)
135
- self._node_states[key] = node_state.model_dump_json()
135
+ @property
136
+ def flow_inputs(self) -> tuple[tuple[Any, ...], dict[str, Any]]:
137
+ if self._flow_args is None or self._flow_kwargs is None:
138
+ raise Honk("This Flow run has not been executed before")
136
139
 
137
- def get_next[R: Result](self, *, task: "Task[Any, R]") -> NodeState[R]:
138
- if task.name not in self._last_requested_indices:
139
- self._last_requested_indices[task.name] = 0
140
- else:
141
- self._last_requested_indices[task.name] += 1
142
-
143
- return self.get(task=task, index=self._last_requested_indices[task.name])
140
+ return self._flow_args, self._flow_kwargs
144
141
 
145
142
  def get_all[R: Result](self, *, task: "Task[Any, R]") -> list[NodeState[R]]:
146
143
  matching_nodes: list[NodeState[R]] = []
@@ -159,13 +156,29 @@ class FlowRun:
159
156
  else:
160
157
  return NodeState[task.result_type](
161
158
  task_name=task.name,
162
- index=index or 0,
159
+ index=index,
163
160
  conversation=Conversation[task.result_type](
164
161
  user_messages=[], result_messages=[]
165
162
  ),
166
163
  last_hash=0,
167
164
  )
168
165
 
166
+ def set_flow_inputs(self, *args: Any, **kwargs: Any) -> None:
167
+ self._flow_args = args
168
+ self._flow_kwargs = kwargs
169
+
170
+ def add_node_state(self, node_state: NodeState[Any], /) -> None:
171
+ key = (node_state.task_name, node_state.index)
172
+ self._node_states[key] = node_state.model_dump_json()
173
+
174
+ def get_next[R: Result](self, *, task: "Task[Any, R]") -> NodeState[R]:
175
+ if task.name not in self._last_requested_indices:
176
+ self._last_requested_indices[task.name] = 0
177
+ else:
178
+ self._last_requested_indices[task.name] += 1
179
+
180
+ return self.get(task=task, index=self._last_requested_indices[task.name])
181
+
169
182
  def start(
170
183
  self,
171
184
  *,
@@ -186,26 +199,41 @@ class FlowRun:
186
199
  self._id = ""
187
200
  self._agent = None
188
201
 
202
+ def clear_node(self, *, task: "Task[Any, Result]", index: int) -> None:
203
+ key = (task.name, index)
204
+ if key in self._node_states:
205
+ del self._node_states[key]
206
+
189
207
  def dump(self) -> SerializedFlowRun:
208
+ flow_args, flow_kwargs = self.flow_inputs
209
+
190
210
  return SerializedFlowRun(
191
211
  json.dumps(
192
212
  {
193
- ":".join([task_name, str(index)]): value
194
- for (task_name, index), value in self._node_states.items()
213
+ "node_states": {
214
+ ":".join([task_name, str(index)]): value
215
+ for (task_name, index), value in self._node_states.items()
216
+ },
217
+ "flow_args": list(flow_args),
218
+ "flow_kwargs": flow_kwargs,
195
219
  }
196
220
  )
197
221
  )
198
222
 
199
223
  @classmethod
200
- def load(cls, run: SerializedFlowRun, /) -> Self:
224
+ def load(cls, serialized_flow_run: SerializedFlowRun, /) -> Self:
201
225
  flow_run = cls()
202
- raw_node_states = json.loads(run)
226
+ run = json.loads(serialized_flow_run)
227
+
203
228
  new_node_states: dict[tuple[str, int], str] = {}
204
- for key, node_state in raw_node_states.items():
229
+ for key, node_state in run["node_states"].items():
205
230
  task_name, index = tuple(key.split(":"))
206
231
  new_node_states[(task_name, int(index))] = node_state
207
-
208
232
  flow_run._node_states = new_node_states
233
+
234
+ flow_run._flow_args = tuple(run["flow_args"])
235
+ flow_run._flow_kwargs = run["flow_kwargs"]
236
+
209
237
  return flow_run
210
238
 
211
239
 
@@ -259,8 +287,21 @@ class Flow[**P]:
259
287
  _current_flow_run.set(old_run)
260
288
 
261
289
  async def generate(self, *args: P.args, **kwargs: P.kwargs) -> None:
290
+ flow_run = _current_flow_run.get()
291
+ if flow_run is None:
292
+ raise Honk("No current flow run")
293
+
294
+ flow_run.set_flow_inputs(*args, **kwargs)
262
295
  await self._fn(*args, **kwargs)
263
296
 
297
+ async def regenerate(self) -> None:
298
+ flow_run = _current_flow_run.get()
299
+ if flow_run is None:
300
+ raise Honk("No current flow run")
301
+
302
+ flow_args, flow_kwargs = flow_run.flow_inputs
303
+ await self._fn(*flow_args, **flow_kwargs)
304
+
264
305
 
265
306
  class Task[**P, R: Result]:
266
307
  def __init__(
@@ -318,7 +359,7 @@ class Task[**P, R: Result]:
318
359
 
319
360
  result = await self._adapter(conversation=node_state.conversation)
320
361
  node_state.add_result(result=result)
321
- flow_run.add(node_state)
362
+ flow_run.add_node_state(node_state)
322
363
 
323
364
  return result
324
365
 
@@ -326,7 +367,7 @@ class Task[**P, R: Result]:
326
367
  flow_run = self.__get_current_flow_run()
327
368
  node_state = flow_run.get_next(task=self)
328
369
  result = await self.generate(node_state, *args, **kwargs)
329
- flow_run.add(node_state)
370
+ flow_run.add_node_state(node_state)
330
371
  return result
331
372
 
332
373
  def __hash_task_call(self, *args: P.args, **kwargs: P.kwargs) -> int:
@@ -10,6 +10,7 @@ class IFlowRunStore(Protocol):
10
10
  def __init__(self, *, flow_name: str) -> None: ...
11
11
  async def get(self, *, run_id: str) -> FlowRun | None: ...
12
12
  async def save(self, *, run: FlowRun) -> None: ...
13
+ async def delete(self, *, run_id: str) -> None: ...
13
14
 
14
15
 
15
16
  class InMemoryFlowRunStore(IFlowRunStore):
@@ -22,3 +23,6 @@ class InMemoryFlowRunStore(IFlowRunStore):
22
23
 
23
24
  async def save(self, *, run: FlowRun) -> None:
24
25
  self._runs[run.id] = run
26
+
27
+ async def delete(self, *, run_id: str) -> None:
28
+ self._runs.pop(run_id, None)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "goose-py"
3
- version = "0.3.11"
3
+ version = "0.4.0"
4
4
  description = "A tool for AI workflows based on human-computer collaboration and structured output."
5
5
  authors = [
6
6
  "Nash Taylor <nash@chelle.ai>",
File without changes
File without changes
File without changes
File without changes