goose-py 0.9.12__tar.gz → 0.9.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {goose_py-0.9.12 → goose_py-0.9.14}/.stubs/litellm/__init__.pyi +2 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/PKG-INFO +1 -1
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/types/agent.py +22 -31
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/agent.py +4 -8
- {goose_py-0.9.12 → goose_py-0.9.14}/pyproject.toml +1 -1
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_agent.py +3 -2
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_refining.py +6 -5
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_state.py +3 -3
- {goose_py-0.9.12 → goose_py-0.9.14}/uv.lock +1 -1
- {goose_py-0.9.12 → goose_py-0.9.14}/.envrc +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/.github/workflows/publish.yml +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/.gitignore +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/.python-version +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/.stubs/jsonpath_ng/__init__.pyi +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/Makefile +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/README.md +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/__init__.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/agent.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/conversation.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/flow.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/result.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/state.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/store.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/task.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/_internal/types/__init__.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/errors.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/flow.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/py.typed +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/runs.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/goose/task.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/__init__.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_downstream_task.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_hashing.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_looping.py +0 -0
- {goose_py-0.9.12 → goose_py-0.9.14}/tests/test_regenerate.py +0 -0
@@ -4,9 +4,11 @@ _LiteLLMGeminiModel = Literal[
|
|
4
4
|
"vertex_ai/gemini-1.5-flash",
|
5
5
|
"vertex_ai/gemini-1.5-pro",
|
6
6
|
"vertex_ai/gemini-1.5-flash-8b",
|
7
|
+
"vertex_ai/gemini-2.0-flash",
|
7
8
|
"gemini/gemini-1.5-flash",
|
8
9
|
"gemini/gemini-1.5-pro",
|
9
10
|
"gemini/gemini-1.5-flash-8b",
|
11
|
+
"gemini/gemini-2.0-flash",
|
10
12
|
]
|
11
13
|
_MessageRole = Literal["system", "user", "assistant"]
|
12
14
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: goose-py
|
3
|
-
Version: 0.9.
|
3
|
+
Version: 0.9.14
|
4
4
|
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
5
|
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
6
|
Requires-Python: >=3.12
|
@@ -1,19 +1,8 @@
|
|
1
1
|
import base64
|
2
2
|
from enum import StrEnum
|
3
|
-
from typing import
|
3
|
+
from typing import Literal, NotRequired, TypedDict
|
4
4
|
|
5
|
-
from pydantic import BaseModel
|
6
|
-
from pydantic_core import CoreSchema, core_schema
|
7
|
-
|
8
|
-
|
9
|
-
class Base64MediaContent(str):
|
10
|
-
@classmethod
|
11
|
-
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
|
12
|
-
return core_schema.no_info_after_validator_function(cls, handler(str))
|
13
|
-
|
14
|
-
@classmethod
|
15
|
-
def from_bytes(cls, content: bytes, /) -> "Base64MediaContent":
|
16
|
-
return cls(base64.b64encode(content).decode())
|
5
|
+
from pydantic import BaseModel
|
17
6
|
|
18
7
|
|
19
8
|
class AIModel(StrEnum):
|
@@ -21,14 +10,19 @@ class AIModel(StrEnum):
|
|
21
10
|
VERTEX_PRO = "vertex_ai/gemini-1.5-pro"
|
22
11
|
VERTEX_FLASH = "vertex_ai/gemini-1.5-flash"
|
23
12
|
VERTEX_FLASH_8B = "vertex_ai/gemini-1.5-flash-8b"
|
13
|
+
VERTEX_FLASH_2_0 = "vertex_ai/gemini-2.0-flash"
|
24
14
|
|
25
15
|
# gemini (publicly available, no GCP environment required)
|
26
16
|
GEMINI_PRO = "gemini/gemini-1.5-pro"
|
27
17
|
GEMINI_FLASH = "gemini/gemini-1.5-flash"
|
28
18
|
GEMINI_FLASH_8B = "gemini/gemini-1.5-flash-8b"
|
19
|
+
GEMINI_FLASH_2_0 = "gemini/gemini-2.0-flash"
|
20
|
+
|
29
21
|
|
22
|
+
class ContentType(StrEnum):
|
23
|
+
# text
|
24
|
+
TEXT = "text/plain"
|
30
25
|
|
31
|
-
class UserMediaContentType(StrEnum):
|
32
26
|
# images
|
33
27
|
JPEG = "image/jpeg"
|
34
28
|
PNG = "image/png"
|
@@ -62,33 +56,30 @@ class LLMMessage(TypedDict):
|
|
62
56
|
cache_control: NotRequired[CacheControl]
|
63
57
|
|
64
58
|
|
65
|
-
class
|
66
|
-
|
67
|
-
|
68
|
-
def render(self) -> LLMTextMessagePart:
|
69
|
-
return {"type": "text", "text": self.text}
|
70
|
-
|
59
|
+
class MessagePart(BaseModel):
|
60
|
+
content: str
|
61
|
+
content_type: ContentType = ContentType.TEXT
|
71
62
|
|
72
|
-
|
73
|
-
content_type:
|
74
|
-
|
63
|
+
@classmethod
|
64
|
+
def from_media(cls, *, content: bytes, content_type: ContentType) -> "MessagePart":
|
65
|
+
return cls(content=base64.b64encode(content).decode(), content_type=content_type)
|
75
66
|
|
76
|
-
def render(self) -> LLMMediaMessagePart:
|
77
|
-
|
78
|
-
"type": "
|
79
|
-
|
80
|
-
|
67
|
+
def render(self) -> LLMTextMessagePart | LLMMediaMessagePart:
|
68
|
+
if self.content_type == ContentType.TEXT:
|
69
|
+
return {"type": "text", "text": self.content}
|
70
|
+
else:
|
71
|
+
return {"type": "image_url", "image_url": f"data:{self.content_type};base64,{self.content}"}
|
81
72
|
|
82
73
|
|
83
74
|
class UserMessage(BaseModel):
|
84
|
-
parts: list[
|
75
|
+
parts: list[MessagePart]
|
85
76
|
|
86
77
|
def render(self) -> LLMMessage:
|
87
78
|
content: LLMMessage = {
|
88
79
|
"role": "user",
|
89
80
|
"content": [part.render() for part in self.parts],
|
90
81
|
}
|
91
|
-
if any(
|
82
|
+
if any(part.content_type != ContentType.TEXT for part in self.parts):
|
92
83
|
content["cache_control"] = {"type": "ephemeral"}
|
93
84
|
return content
|
94
85
|
|
@@ -101,7 +92,7 @@ class AssistantMessage(BaseModel):
|
|
101
92
|
|
102
93
|
|
103
94
|
class SystemMessage(BaseModel):
|
104
|
-
parts: list[
|
95
|
+
parts: list[MessagePart]
|
105
96
|
|
106
97
|
def render(self) -> LLMMessage:
|
107
98
|
return {
|
@@ -2,14 +2,12 @@ from ._internal.agent import AgentResponse, IAgentLogger
|
|
2
2
|
from ._internal.types.agent import (
|
3
3
|
AIModel,
|
4
4
|
AssistantMessage,
|
5
|
-
|
5
|
+
ContentType,
|
6
6
|
LLMMediaMessagePart,
|
7
7
|
LLMMessage,
|
8
8
|
LLMTextMessagePart,
|
9
|
-
|
9
|
+
MessagePart,
|
10
10
|
SystemMessage,
|
11
|
-
TextMessagePart,
|
12
|
-
UserMediaContentType,
|
13
11
|
UserMessage,
|
14
12
|
)
|
15
13
|
|
@@ -18,13 +16,11 @@ __all__ = [
|
|
18
16
|
"AIModel",
|
19
17
|
"IAgentLogger",
|
20
18
|
"AssistantMessage",
|
21
|
-
"Base64MediaContent",
|
22
19
|
"LLMMediaMessagePart",
|
23
20
|
"LLMMessage",
|
24
21
|
"LLMTextMessagePart",
|
25
|
-
"MediaMessagePart",
|
26
22
|
"SystemMessage",
|
27
|
-
"
|
28
|
-
"
|
23
|
+
"MessagePart",
|
24
|
+
"ContentType",
|
29
25
|
"UserMessage",
|
30
26
|
]
|
@@ -4,7 +4,8 @@ import pytest
|
|
4
4
|
from pytest_mock import MockerFixture
|
5
5
|
|
6
6
|
from goose import Agent, FlowArguments, TextResult, flow, task
|
7
|
-
from goose.agent import
|
7
|
+
from goose._internal.types.agent import MessagePart
|
8
|
+
from goose.agent import AgentResponse, AIModel, IAgentLogger, UserMessage
|
8
9
|
|
9
10
|
|
10
11
|
class TestFlowArguments(FlowArguments):
|
@@ -28,7 +29,7 @@ def mock_litellm(mocker: MockerFixture) -> Mock:
|
|
28
29
|
@task
|
29
30
|
async def use_agent(*, agent: Agent) -> TextResult:
|
30
31
|
return await agent(
|
31
|
-
messages=[UserMessage(parts=[
|
32
|
+
messages=[UserMessage(parts=[MessagePart(content="Hello")])],
|
32
33
|
model=AIModel.GEMINI_FLASH_8B,
|
33
34
|
task_name="greet",
|
34
35
|
)
|
@@ -6,7 +6,8 @@ import pytest
|
|
6
6
|
from pytest_mock import MockerFixture
|
7
7
|
|
8
8
|
from goose import Agent, FlowArguments, Result, flow, task
|
9
|
-
from goose.agent import
|
9
|
+
from goose._internal.types.agent import MessagePart
|
10
|
+
from goose.agent import SystemMessage, UserMessage
|
10
11
|
from goose.errors import Honk
|
11
12
|
|
12
13
|
|
@@ -59,8 +60,8 @@ async def test_refining() -> None:
|
|
59
60
|
# imagine this is a new process
|
60
61
|
async with sentence.start_run(run_id="1") as second_run:
|
61
62
|
await generate_random_word.refine(
|
62
|
-
user_message=UserMessage(parts=[
|
63
|
-
context=SystemMessage(parts=[
|
63
|
+
user_message=UserMessage(parts=[MessagePart(content="Change it")]),
|
64
|
+
context=SystemMessage(parts=[MessagePart(content="Extra info")]),
|
64
65
|
)
|
65
66
|
|
66
67
|
random_words = second_run.get_all(task=generate_random_word)
|
@@ -76,6 +77,6 @@ async def test_refining_before_generate_fails() -> None:
|
|
76
77
|
with pytest.raises(Honk):
|
77
78
|
async with sentence.start_run(run_id="2"):
|
78
79
|
await generate_random_word.refine(
|
79
|
-
user_message=UserMessage(parts=[
|
80
|
-
context=SystemMessage(parts=[
|
80
|
+
user_message=UserMessage(parts=[MessagePart(content="Change it")]),
|
81
|
+
context=SystemMessage(parts=[MessagePart(content="Extra info")]),
|
81
82
|
)
|
@@ -6,7 +6,7 @@ import pytest
|
|
6
6
|
from pytest_mock import MockerFixture
|
7
7
|
|
8
8
|
from goose import Agent, FlowArguments, Result, flow, task
|
9
|
-
from goose._internal.types.agent import
|
9
|
+
from goose._internal.types.agent import MessagePart, SystemMessage, UserMessage
|
10
10
|
from goose.errors import Honk
|
11
11
|
|
12
12
|
|
@@ -74,8 +74,8 @@ async def test_state_undo() -> None:
|
|
74
74
|
async with with_state.start_run(run_id="2"):
|
75
75
|
await generate_random_word.refine(
|
76
76
|
index=0,
|
77
|
-
user_message=UserMessage(parts=[
|
78
|
-
context=SystemMessage(parts=[
|
77
|
+
user_message=UserMessage(parts=[MessagePart(content="Change it")]),
|
78
|
+
context=SystemMessage(parts=[MessagePart(content="Extra info")]),
|
79
79
|
)
|
80
80
|
|
81
81
|
async with with_state.start_run(run_id="2") as run:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|