goose-py 0.9.7__tar.gz → 0.9.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {goose_py-0.9.7 → goose_py-0.9.9}/.stubs/litellm/__init__.pyi +3 -0
  2. {goose_py-0.9.7 → goose_py-0.9.9}/PKG-INFO +1 -1
  3. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/agent.py +17 -11
  4. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/task.py +4 -4
  5. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/types/agent.py +10 -4
  6. {goose_py-0.9.7 → goose_py-0.9.9}/goose/agent.py +2 -2
  7. goose_py-0.9.9/goose/task.py +3 -0
  8. {goose_py-0.9.7 → goose_py-0.9.9}/pyproject.toml +1 -1
  9. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_agent.py +2 -2
  10. {goose_py-0.9.7 → goose_py-0.9.9}/uv.lock +1 -1
  11. {goose_py-0.9.7 → goose_py-0.9.9}/.envrc +0 -0
  12. {goose_py-0.9.7 → goose_py-0.9.9}/.github/workflows/publish.yml +0 -0
  13. {goose_py-0.9.7 → goose_py-0.9.9}/.gitignore +0 -0
  14. {goose_py-0.9.7 → goose_py-0.9.9}/.python-version +0 -0
  15. {goose_py-0.9.7 → goose_py-0.9.9}/.stubs/jsonpath_ng/__init__.pyi +0 -0
  16. {goose_py-0.9.7 → goose_py-0.9.9}/Makefile +0 -0
  17. {goose_py-0.9.7 → goose_py-0.9.9}/README.md +0 -0
  18. {goose_py-0.9.7 → goose_py-0.9.9}/goose/__init__.py +0 -0
  19. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/conversation.py +0 -0
  20. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/flow.py +0 -0
  21. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/result.py +0 -0
  22. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/state.py +0 -0
  23. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/store.py +0 -0
  24. {goose_py-0.9.7 → goose_py-0.9.9}/goose/_internal/types/__init__.py +0 -0
  25. {goose_py-0.9.7 → goose_py-0.9.9}/goose/errors.py +0 -0
  26. {goose_py-0.9.7 → goose_py-0.9.9}/goose/flow.py +0 -0
  27. {goose_py-0.9.7 → goose_py-0.9.9}/goose/py.typed +0 -0
  28. {goose_py-0.9.7 → goose_py-0.9.9}/goose/runs.py +0 -0
  29. {goose_py-0.9.7 → goose_py-0.9.9}/tests/__init__.py +0 -0
  30. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_downstream_task.py +0 -0
  31. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_hashing.py +0 -0
  32. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_looping.py +0 -0
  33. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_refining.py +0 -0
  34. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_regenerate.py +0 -0
  35. {goose_py-0.9.7 → goose_py-0.9.9}/tests/test_state.py +0 -0
@@ -4,6 +4,9 @@ _LiteLLMGeminiModel = Literal[
4
4
  "vertex_ai/gemini-1.5-flash",
5
5
  "vertex_ai/gemini-1.5-pro",
6
6
  "vertex_ai/gemini-1.5-flash-8b",
7
+ "gemini/gemini-1.5-flash",
8
+ "gemini/gemini-1.5-pro",
9
+ "gemini/gemini-1.5-flash-8b",
7
10
  ]
8
11
  _MessageRole = Literal["system", "user", "assistant"]
9
12
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.9.7
3
+ Version: 0.9.9
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
@@ -7,7 +7,7 @@ from litellm import acompletion
7
7
  from pydantic import BaseModel, computed_field
8
8
 
9
9
  from .result import Result, TextResult
10
- from .types.agent import AssistantMessage, GeminiModel, SystemMessage, UserMessage
10
+ from .types.agent import AIModel, AssistantMessage, SystemMessage, UserMessage
11
11
 
12
12
 
13
13
  class AgentResponseDump(TypedDict):
@@ -29,22 +29,28 @@ class AgentResponseDump(TypedDict):
29
29
 
30
30
 
31
31
  class AgentResponse[R: BaseModel | str](BaseModel):
32
- INPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[GeminiModel, float]] = {
33
- GeminiModel.FLASH_8B: 30,
34
- GeminiModel.FLASH: 15,
35
- GeminiModel.PRO: 500,
32
+ INPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
33
+ AIModel.VERTEX_FLASH_8B: 30,
34
+ AIModel.VERTEX_FLASH: 15,
35
+ AIModel.VERTEX_PRO: 500,
36
+ AIModel.GEMINI_FLASH_8B: 30,
37
+ AIModel.GEMINI_FLASH: 15,
38
+ AIModel.GEMINI_PRO: 500,
36
39
  }
37
- OUTPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[GeminiModel, float]] = {
38
- GeminiModel.FLASH_8B: 30,
39
- GeminiModel.FLASH: 15,
40
- GeminiModel.PRO: 500,
40
+ OUTPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
41
+ AIModel.VERTEX_FLASH_8B: 30,
42
+ AIModel.VERTEX_FLASH: 15,
43
+ AIModel.VERTEX_PRO: 500,
44
+ AIModel.GEMINI_FLASH_8B: 30,
45
+ AIModel.GEMINI_FLASH: 15,
46
+ AIModel.GEMINI_PRO: 500,
41
47
  }
42
48
 
43
49
  response: R
44
50
  run_id: str
45
51
  flow_name: str
46
52
  task_name: str
47
- model: GeminiModel
53
+ model: AIModel
48
54
  system: SystemMessage | None = None
49
55
  input_messages: list[UserMessage | AssistantMessage]
50
56
  input_tokens: int
@@ -130,7 +136,7 @@ class Agent:
130
136
  self,
131
137
  *,
132
138
  messages: list[UserMessage | AssistantMessage],
133
- model: GeminiModel,
139
+ model: AIModel,
134
140
  task_name: str,
135
141
  response_model: type[R] = TextResult,
136
142
  system: SystemMessage | None = None,
@@ -5,7 +5,7 @@ from typing import Any, overload
5
5
  from pydantic import BaseModel
6
6
 
7
7
  from ..errors import Honk
8
- from .agent import Agent, GeminiModel, SystemMessage, UserMessage
8
+ from .agent import Agent, AIModel, SystemMessage, UserMessage
9
9
  from .conversation import Conversation
10
10
  from .result import Result, TextResult
11
11
  from .state import FlowRun, NodeState, get_current_flow_run
@@ -19,7 +19,7 @@ class Task[**P, R: Result]:
19
19
  /,
20
20
  *,
21
21
  retries: int = 0,
22
- adapter_model: GeminiModel = GeminiModel.FLASH,
22
+ adapter_model: AIModel = AIModel.GEMINI_FLASH,
23
23
  ) -> None:
24
24
  self._generator = generator
25
25
  self._retries = retries
@@ -145,14 +145,14 @@ class Task[**P, R: Result]:
145
145
  def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
146
146
  @overload
147
147
  def task[**P, R: Result](
148
- *, retries: int = 0, adapter_model: GeminiModel = GeminiModel.FLASH
148
+ *, retries: int = 0, adapter_model: AIModel = AIModel.GEMINI_FLASH
149
149
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
150
150
  def task[**P, R: Result](
151
151
  generator: Callable[P, Awaitable[R]] | None = None,
152
152
  /,
153
153
  *,
154
154
  retries: int = 0,
155
- adapter_model: GeminiModel = GeminiModel.FLASH,
155
+ adapter_model: AIModel = AIModel.GEMINI_FLASH,
156
156
  ) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
157
157
  if generator is None:
158
158
 
@@ -4,10 +4,16 @@ from typing import Literal, NotRequired, TypedDict
4
4
  from pydantic import BaseModel
5
5
 
6
6
 
7
- class GeminiModel(StrEnum):
8
- PRO = "vertex_ai/gemini-1.5-pro"
9
- FLASH = "vertex_ai/gemini-1.5-flash"
10
- FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
7
+ class AIModel(StrEnum):
8
+ # vertex (production Google, requires GCP environment)
9
+ VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
10
+ VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
11
+ VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
12
+
13
+ # gemini (publicly available, no GCP environment required)
14
+ GEMINI_PRO = "gemini/gemini-1.5-pro"
15
+ GEMINI_FLASH = "gemini/gemini-1.5-flash"
16
+ GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
11
17
 
12
18
 
13
19
  class UserMediaContentType(StrEnum):
@@ -1,7 +1,7 @@
1
1
  from ._internal.agent import AgentResponse, IAgentLogger
2
2
  from ._internal.types.agent import (
3
+ AIModel,
3
4
  AssistantMessage,
4
- GeminiModel,
5
5
  LLMMediaMessagePart,
6
6
  LLMMessage,
7
7
  LLMTextMessagePart,
@@ -14,9 +14,9 @@ from ._internal.types.agent import (
14
14
 
15
15
  __all__ = [
16
16
  "AgentResponse",
17
+ "AIModel",
17
18
  "IAgentLogger",
18
19
  "AssistantMessage",
19
- "GeminiModel",
20
20
  "LLMMediaMessagePart",
21
21
  "LLMMessage",
22
22
  "LLMTextMessagePart",
@@ -0,0 +1,3 @@
1
+ from ._internal.task import Task
2
+
3
+ __all__ = ["Task"]
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "goose-py"
3
- version = "0.9.7"
3
+ version = "0.9.9"
4
4
  description = "A tool for AI workflows based on human-computer collaboration and structured output."
5
5
  readme = "README.md"
6
6
  authors = [
@@ -4,7 +4,7 @@ import pytest
4
4
  from pytest_mock import MockerFixture
5
5
 
6
6
  from goose import Agent, FlowArguments, TextResult, flow, task
7
- from goose.agent import AgentResponse, GeminiModel, IAgentLogger, TextMessagePart, UserMessage
7
+ from goose.agent import AgentResponse, AIModel, IAgentLogger, TextMessagePart, UserMessage
8
8
 
9
9
 
10
10
  class TestFlowArguments(FlowArguments):
@@ -29,7 +29,7 @@ def mock_litellm(mocker: MockerFixture) -> Mock:
29
29
  async def use_agent(*, agent: Agent) -> TextResult:
30
30
  return await agent(
31
31
  messages=[UserMessage(parts=[TextMessagePart(text="Hello")])],
32
- model=GeminiModel.FLASH_8B,
32
+ model=AIModel.GEMINI_FLASH_8B,
33
33
  task_name="greet",
34
34
  )
35
35
 
@@ -234,7 +234,7 @@ wheels = [
234
234
 
235
235
  [[package]]
236
236
  name = "goose-py"
237
- version = "0.9.7"
237
+ version = "0.9.9"
238
238
  source = { editable = "." }
239
239
  dependencies = [
240
240
  { name = "jsonpath-ng" },
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes