goose-py 0.9.7__py3-none-any.whl → 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
goose/_internal/agent.py CHANGED
@@ -7,7 +7,7 @@ from litellm import acompletion
7
7
  from pydantic import BaseModel, computed_field
8
8
 
9
9
  from .result import Result, TextResult
10
- from .types.agent import AssistantMessage, GeminiModel, SystemMessage, UserMessage
10
+ from .types.agent import AIModel, AssistantMessage, SystemMessage, UserMessage
11
11
 
12
12
 
13
13
  class AgentResponseDump(TypedDict):
@@ -29,22 +29,28 @@ class AgentResponseDump(TypedDict):
29
29
 
30
30
 
31
31
  class AgentResponse[R: BaseModel | str](BaseModel):
32
- INPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[GeminiModel, float]] = {
33
- GeminiModel.FLASH_8B: 30,
34
- GeminiModel.FLASH: 15,
35
- GeminiModel.PRO: 500,
32
+ INPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
33
+ AIModel.VERTEX_FLASH_8B: 30,
34
+ AIModel.VERTEX_FLASH: 15,
35
+ AIModel.VERTEX_PRO: 500,
36
+ AIModel.GEMINI_FLASH_8B: 30,
37
+ AIModel.GEMINI_FLASH: 15,
38
+ AIModel.GEMINI_PRO: 500,
36
39
  }
37
- OUTPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[GeminiModel, float]] = {
38
- GeminiModel.FLASH_8B: 30,
39
- GeminiModel.FLASH: 15,
40
- GeminiModel.PRO: 500,
40
+ OUTPUT_CENTS_PER_MILLION_TOKENS: ClassVar[dict[AIModel, float]] = {
41
+ AIModel.VERTEX_FLASH_8B: 30,
42
+ AIModel.VERTEX_FLASH: 15,
43
+ AIModel.VERTEX_PRO: 500,
44
+ AIModel.GEMINI_FLASH_8B: 30,
45
+ AIModel.GEMINI_FLASH: 15,
46
+ AIModel.GEMINI_PRO: 500,
41
47
  }
42
48
 
43
49
  response: R
44
50
  run_id: str
45
51
  flow_name: str
46
52
  task_name: str
47
- model: GeminiModel
53
+ model: AIModel
48
54
  system: SystemMessage | None = None
49
55
  input_messages: list[UserMessage | AssistantMessage]
50
56
  input_tokens: int
@@ -130,7 +136,7 @@ class Agent:
130
136
  self,
131
137
  *,
132
138
  messages: list[UserMessage | AssistantMessage],
133
- model: GeminiModel,
139
+ model: AIModel,
134
140
  task_name: str,
135
141
  response_model: type[R] = TextResult,
136
142
  system: SystemMessage | None = None,
goose/_internal/task.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any, overload
5
5
  from pydantic import BaseModel
6
6
 
7
7
  from ..errors import Honk
8
- from .agent import Agent, GeminiModel, SystemMessage, UserMessage
8
+ from .agent import Agent, AIModel, SystemMessage, UserMessage
9
9
  from .conversation import Conversation
10
10
  from .result import Result, TextResult
11
11
  from .state import FlowRun, NodeState, get_current_flow_run
@@ -19,7 +19,7 @@ class Task[**P, R: Result]:
19
19
  /,
20
20
  *,
21
21
  retries: int = 0,
22
- adapter_model: GeminiModel = GeminiModel.FLASH,
22
+ adapter_model: AIModel = AIModel.GEMINI_FLASH,
23
23
  ) -> None:
24
24
  self._generator = generator
25
25
  self._retries = retries
@@ -145,14 +145,14 @@ class Task[**P, R: Result]:
145
145
  def task[**P, R: Result](generator: Callable[P, Awaitable[R]], /) -> Task[P, R]: ...
146
146
  @overload
147
147
  def task[**P, R: Result](
148
- *, retries: int = 0, adapter_model: GeminiModel = GeminiModel.FLASH
148
+ *, retries: int = 0, adapter_model: AIModel = AIModel.GEMINI_FLASH
149
149
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]: ...
150
150
  def task[**P, R: Result](
151
151
  generator: Callable[P, Awaitable[R]] | None = None,
152
152
  /,
153
153
  *,
154
154
  retries: int = 0,
155
- adapter_model: GeminiModel = GeminiModel.FLASH,
155
+ adapter_model: AIModel = AIModel.GEMINI_FLASH,
156
156
  ) -> Task[P, R] | Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
157
157
  if generator is None:
158
158
 
@@ -4,10 +4,16 @@ from typing import Literal, NotRequired, TypedDict
4
4
  from pydantic import BaseModel
5
5
 
6
6
 
7
- class GeminiModel(StrEnum):
8
- PRO = "vertex_ai/gemini-1.5-pro"
9
- FLASH = "vertex_ai/gemini-1.5-flash"
10
- FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
7
+ class AIModel(StrEnum):
8
+ # vertex (production Google, requires GCP environment)
9
+ VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
10
+ VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
11
+ VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
12
+
13
+ # gemini (publicly available, no GCP environment required)
14
+ GEMINI_PRO = "gemini/gemini-1.5-pro"
15
+ GEMINI_FLASH = "gemini/gemini-1.5-flash"
16
+ GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
11
17
 
12
18
 
13
19
  class UserMediaContentType(StrEnum):
goose/agent.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from ._internal.agent import AgentResponse, IAgentLogger
2
2
  from ._internal.types.agent import (
3
+ AIModel,
3
4
  AssistantMessage,
4
- GeminiModel,
5
5
  LLMMediaMessagePart,
6
6
  LLMMessage,
7
7
  LLMTextMessagePart,
@@ -14,9 +14,9 @@ from ._internal.types.agent import (
14
14
 
15
15
  __all__ = [
16
16
  "AgentResponse",
17
+ "AIModel",
17
18
  "IAgentLogger",
18
19
  "AssistantMessage",
19
- "GeminiModel",
20
20
  "LLMMediaMessagePart",
21
21
  "LLMMessage",
22
22
  "LLMTextMessagePart",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: goose-py
3
- Version: 0.9.7
3
+ Version: 0.9.8
4
4
  Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
5
  Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
6
  Requires-Python: >=3.12
@@ -1,18 +1,18 @@
1
1
  goose/__init__.py,sha256=wjGDgWzKcD6S8loVr0n-rLCpRwg-ZKAixcUaw1wobMc,243
2
- goose/agent.py,sha256=xA5mYqjS9iwiXooQzP0l1FsoDe6R7t1lyhekqroMu7c,590
2
+ goose/agent.py,sha256=2iFjsZtXXNLXu1grCp3S_eJotGJnfP2190GYKEFQdTg,582
3
3
  goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
4
4
  goose/flow.py,sha256=YsZLBa5I1W27_P6LYGWbtFX8ZYx9vJG3KtENYChHm5E,111
5
5
  goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  goose/runs.py,sha256=ub-r_gzbUbaIzWXX-jc-dncNxEh6zTfzIkmnDfCSbRI,160
7
- goose/_internal/agent.py,sha256=DmkQGXoNEYo7CYUMUu81gJ0ZguCy7SiKQbamqYnvLpk,5824
7
+ goose/_internal/agent.py,sha256=VRDEhBDTpKZS09B6xnWsOqLsxP7D5NHKVGlxwayPdoo,6030
8
8
  goose/_internal/conversation.py,sha256=zbMvP4oxhKAzATVEXZfGVKXWfEjh472MYKhmyJzSLgI,1172
9
9
  goose/_internal/flow.py,sha256=RShMsxgt49g1fZJ3rlwDHtI1j39lZzewx8hZ7DGN5kg,4124
10
10
  goose/_internal/result.py,sha256=-eZJn-2sPo7rHZ38Sz6IAHXqiJ-Ss39esEoFGimJEBI,155
11
11
  goose/_internal/state.py,sha256=pI-C37Ybazo7EJPbZklxbiCYFy3u4I031NKBr8Jm_CI,6534
12
12
  goose/_internal/store.py,sha256=tWmKfa1-yq1jU6lT3l6kSOmVt2m3H7I1xLMTrxnUDI8,889
13
- goose/_internal/task.py,sha256=wK0LMlQKzlRQO-F2Xyni0hkg91cSKkW65aHv0w9ANoc,6035
13
+ goose/_internal/task.py,sha256=Ka3tm5Ymu8sE6_GCbkj3JDHK683ZET5HlkcgrQgnzEo,6028
14
14
  goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- goose/_internal/types/agent.py,sha256=rNVt2gEr_m4_8tGFgcdichpPp8xhOS5GY0kN2C4tiE8,2153
16
- goose_py-0.9.7.dist-info/METADATA,sha256=dtdYEIAxpQERMBb3GwU3-ukAy_-ecRZuchae4Dpz2Po,441
17
- goose_py-0.9.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
- goose_py-0.9.7.dist-info/RECORD,,
15
+ goose/_internal/types/agent.py,sha256=QRYux_vbOe_wRtYBMqFpt4y6v9fKN70mwcsqaT0e1wE,2430
16
+ goose_py-0.9.8.dist-info/METADATA,sha256=UpVDqVUdVPwynJJtQwQUbyva7qThIBdAPwaTOXpjUPs,441
17
+ goose_py-0.9.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
18
+ goose_py-0.9.8.dist-info/RECORD,,