goose-py 0.6.0__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- goose/__init__.py +4 -5
- goose/{_agent.py → _internal/agent.py} +14 -27
- goose/{_conversation.py → _internal/conversation.py} +9 -12
- goose/{_flow.py → _internal/flow.py} +8 -9
- goose/{_state.py → _internal/state.py} +9 -19
- goose/{_store.py → _internal/store.py} +2 -2
- goose/{_task.py → _internal/task.py} +11 -18
- goose/agent.py +28 -0
- goose/flow.py +3 -0
- goose/runs.py +4 -0
- goose_py-0.7.1.dist-info/METADATA +14 -0
- goose_py-0.7.1.dist-info/RECORD +18 -0
- {goose_py-0.6.0.dist-info → goose_py-0.7.1.dist-info}/WHEEL +1 -1
- goose_py-0.6.0.dist-info/METADATA +0 -31
- goose_py-0.6.0.dist-info/RECORD +0 -15
- /goose/{_result.py → _internal/result.py} +0 -0
- /goose/{types → _internal/types}/__init__.py +0 -0
- /goose/{types → _internal/types}/agent.py +0 -0
goose/__init__.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1
|
-
from goose.
|
2
|
-
from goose.
|
3
|
-
from goose.
|
4
|
-
from goose.
|
5
|
-
from goose._task import task
|
1
|
+
from goose._internal.agent import Agent
|
2
|
+
from goose._internal.flow import flow
|
3
|
+
from goose._internal.result import Result, TextResult
|
4
|
+
from goose._internal.task import task
|
@@ -6,8 +6,13 @@ from typing import Any, ClassVar, Protocol, TypedDict
|
|
6
6
|
from litellm import acompletion
|
7
7
|
from pydantic import BaseModel, computed_field
|
8
8
|
|
9
|
-
from goose.
|
10
|
-
from goose.types.agent import
|
9
|
+
from goose._internal.result import Result, TextResult
|
10
|
+
from goose._internal.types.agent import (
|
11
|
+
AssistantMessage,
|
12
|
+
GeminiModel,
|
13
|
+
SystemMessage,
|
14
|
+
UserMessage,
|
15
|
+
)
|
11
16
|
|
12
17
|
|
13
18
|
class AgentResponseDump(TypedDict):
|
@@ -60,20 +65,12 @@ class AgentResponse[R: BaseModel | str](BaseModel):
|
|
60
65
|
@computed_field
|
61
66
|
@property
|
62
67
|
def input_cost(self) -> float:
|
63
|
-
return
|
64
|
-
self.INPUT_CENTS_PER_MILLION_TOKENS[self.model]
|
65
|
-
* self.input_tokens
|
66
|
-
/ 1_000_000
|
67
|
-
)
|
68
|
+
return self.INPUT_CENTS_PER_MILLION_TOKENS[self.model] * self.input_tokens / 1_000_000
|
68
69
|
|
69
70
|
@computed_field
|
70
71
|
@property
|
71
72
|
def output_cost(self) -> float:
|
72
|
-
return
|
73
|
-
self.OUTPUT_CENTS_PER_MILLION_TOKENS[self.model]
|
74
|
-
* self.output_tokens
|
75
|
-
/ 1_000_000
|
76
|
-
)
|
73
|
+
return self.OUTPUT_CENTS_PER_MILLION_TOKENS[self.model] * self.output_tokens / 1_000_000
|
77
74
|
|
78
75
|
@computed_field
|
79
76
|
@property
|
@@ -95,15 +92,9 @@ class AgentResponse[R: BaseModel | str](BaseModel):
|
|
95
92
|
for part in message["content"]:
|
96
93
|
if part["type"] == "image_url":
|
97
94
|
part["image_url"] = "__MEDIA__"
|
98
|
-
minimized_input_messages = [
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
output_message = (
|
103
|
-
self.response.model_dump_json()
|
104
|
-
if isinstance(self.response, BaseModel)
|
105
|
-
else self.response
|
106
|
-
)
|
95
|
+
minimized_input_messages = [json.dumps(message) for message in minimized_input_messages]
|
96
|
+
|
97
|
+
output_message = self.response.model_dump_json() if isinstance(self.response, BaseModel) else self.response
|
107
98
|
|
108
99
|
return {
|
109
100
|
"run_id": self.run_id,
|
@@ -156,9 +147,7 @@ class Agent:
|
|
156
147
|
|
157
148
|
if response_model is TextResult:
|
158
149
|
response = await acompletion(model=model.value, messages=rendered_messages)
|
159
|
-
parsed_response = response_model.model_validate(
|
160
|
-
{"text": response.choices[0].message.content}
|
161
|
-
)
|
150
|
+
parsed_response = response_model.model_validate({"text": response.choices[0].message.content})
|
162
151
|
else:
|
163
152
|
response = await acompletion(
|
164
153
|
model=model.value,
|
@@ -169,9 +158,7 @@ class Agent:
|
|
169
158
|
"enforce_validation": True,
|
170
159
|
},
|
171
160
|
)
|
172
|
-
parsed_response = response_model.model_validate_json(
|
173
|
-
response.choices[0].message.content
|
174
|
-
)
|
161
|
+
parsed_response = response_model.model_validate_json(response.choices[0].message.content)
|
175
162
|
|
176
163
|
end_time = datetime.now()
|
177
164
|
agent_response = AgentResponse(
|
@@ -1,7 +1,12 @@
|
|
1
1
|
from pydantic import BaseModel
|
2
2
|
|
3
|
-
from goose.
|
4
|
-
from goose.types.agent import
|
3
|
+
from goose._internal.result import Result
|
4
|
+
from goose._internal.types.agent import (
|
5
|
+
AssistantMessage,
|
6
|
+
LLMMessage,
|
7
|
+
SystemMessage,
|
8
|
+
UserMessage,
|
9
|
+
)
|
5
10
|
|
6
11
|
|
7
12
|
class Conversation[R: Result](BaseModel):
|
@@ -19,18 +24,10 @@ class Conversation[R: Result](BaseModel):
|
|
19
24
|
messages.append(self.context.render())
|
20
25
|
|
21
26
|
for message_index in range(len(self.user_messages)):
|
22
|
-
messages.append(
|
23
|
-
AssistantMessage(
|
24
|
-
text=self.result_messages[message_index].model_dump_json()
|
25
|
-
).render()
|
26
|
-
)
|
27
|
+
messages.append(AssistantMessage(text=self.result_messages[message_index].model_dump_json()).render())
|
27
28
|
messages.append(self.user_messages[message_index].render())
|
28
29
|
|
29
30
|
if len(self.result_messages) > len(self.user_messages):
|
30
|
-
messages.append(
|
31
|
-
AssistantMessage(
|
32
|
-
text=self.result_messages[-1].model_dump_json()
|
33
|
-
).render()
|
34
|
-
)
|
31
|
+
messages.append(AssistantMessage(text=self.result_messages[-1].model_dump_json()).render())
|
35
32
|
|
36
33
|
return messages
|
@@ -1,21 +1,20 @@
|
|
1
|
+
from collections.abc import AsyncIterator, Awaitable, Callable
|
1
2
|
from contextlib import asynccontextmanager
|
2
3
|
from types import CodeType
|
3
|
-
from typing import
|
4
|
+
from typing import Protocol, overload
|
4
5
|
|
5
|
-
from goose.
|
6
|
-
from goose.
|
7
|
-
from goose.
|
8
|
-
from goose.
|
9
|
-
from goose.
|
6
|
+
from goose._internal.agent import Agent, IAgentLogger
|
7
|
+
from goose._internal.conversation import Conversation
|
8
|
+
from goose._internal.result import Result
|
9
|
+
from goose._internal.state import FlowRun, get_current_flow_run, set_current_flow_run
|
10
|
+
from goose._internal.store import IFlowRunStore, InMemoryFlowRunStore
|
10
11
|
from goose.errors import Honk
|
11
12
|
|
12
13
|
|
13
14
|
class IAdapter[ResultT: Result](Protocol):
|
14
15
|
__code__: CodeType
|
15
16
|
|
16
|
-
async def __call__(
|
17
|
-
self, *, conversation: Conversation[ResultT], agent: Agent
|
18
|
-
) -> ResultT: ...
|
17
|
+
async def __call__(self, *, conversation: Conversation[ResultT], agent: Agent) -> ResultT: ...
|
19
18
|
|
20
19
|
|
21
20
|
class Flow[**P]:
|
@@ -4,18 +4,18 @@ from typing import TYPE_CHECKING, Any, Self
|
|
4
4
|
|
5
5
|
from pydantic import BaseModel
|
6
6
|
|
7
|
-
from goose.
|
7
|
+
from goose._internal.agent import (
|
8
8
|
Agent,
|
9
9
|
IAgentLogger,
|
10
10
|
SystemMessage,
|
11
11
|
UserMessage,
|
12
12
|
)
|
13
|
-
from goose.
|
14
|
-
from goose.
|
13
|
+
from goose._internal.conversation import Conversation
|
14
|
+
from goose._internal.result import Result
|
15
15
|
from goose.errors import Honk
|
16
16
|
|
17
17
|
if TYPE_CHECKING:
|
18
|
-
from goose.
|
18
|
+
from goose._internal.task import Task
|
19
19
|
|
20
20
|
|
21
21
|
@dataclass
|
@@ -97,23 +97,17 @@ class FlowRun:
|
|
97
97
|
matching_nodes: list[NodeState[R]] = []
|
98
98
|
for key, node_state in self._node_states.items():
|
99
99
|
if key[0] == task.name:
|
100
|
-
matching_nodes.append(
|
101
|
-
NodeState[task.result_type].model_validate_json(node_state)
|
102
|
-
)
|
100
|
+
matching_nodes.append(NodeState[task.result_type].model_validate_json(node_state))
|
103
101
|
return sorted(matching_nodes, key=lambda node: node.index)
|
104
102
|
|
105
103
|
def get[R: Result](self, *, task: "Task[Any, R]", index: int = 0) -> NodeState[R]:
|
106
|
-
if (
|
107
|
-
existing_node_state := self._node_states.get((task.name, index))
|
108
|
-
) is not None:
|
104
|
+
if (existing_node_state := self._node_states.get((task.name, index))) is not None:
|
109
105
|
return NodeState[task.result_type].model_validate_json(existing_node_state)
|
110
106
|
else:
|
111
107
|
return NodeState[task.result_type](
|
112
108
|
task_name=task.name,
|
113
109
|
index=index,
|
114
|
-
conversation=Conversation[task.result_type](
|
115
|
-
user_messages=[], result_messages=[]
|
116
|
-
),
|
110
|
+
conversation=Conversation[task.result_type](user_messages=[], result_messages=[]),
|
117
111
|
last_hash=0,
|
118
112
|
)
|
119
113
|
|
@@ -143,9 +137,7 @@ class FlowRun:
|
|
143
137
|
self._last_requested_indices = {}
|
144
138
|
self._flow_name = flow_name
|
145
139
|
self._id = run_id
|
146
|
-
self._agent = Agent(
|
147
|
-
flow_name=self.flow_name, run_id=self.id, logger=agent_logger
|
148
|
-
)
|
140
|
+
self._agent = Agent(flow_name=self.flow_name, run_id=self.id, logger=agent_logger)
|
149
141
|
|
150
142
|
def end(self) -> None:
|
151
143
|
self._last_requested_indices = {}
|
@@ -177,9 +169,7 @@ class FlowRun:
|
|
177
169
|
return flow_run
|
178
170
|
|
179
171
|
|
180
|
-
_current_flow_run: ContextVar[FlowRun | None] = ContextVar(
|
181
|
-
"current_flow_run", default=None
|
182
|
-
)
|
172
|
+
_current_flow_run: ContextVar[FlowRun | None] = ContextVar("current_flow_run", default=None)
|
183
173
|
|
184
174
|
|
185
175
|
def get_current_flow_run() -> FlowRun | None:
|
@@ -2,8 +2,8 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from typing import Protocol
|
4
4
|
|
5
|
-
from goose.
|
6
|
-
from goose.
|
5
|
+
from goose._internal.flow import FlowRun
|
6
|
+
from goose._internal.state import FlowRunState
|
7
7
|
|
8
8
|
|
9
9
|
class IFlowRunStore(Protocol):
|
@@ -1,11 +1,12 @@
|
|
1
|
-
from
|
2
|
-
|
3
|
-
|
4
|
-
from goose.
|
5
|
-
from goose.
|
6
|
-
from goose.
|
1
|
+
from collections.abc import Awaitable, Callable
|
2
|
+
from typing import overload
|
3
|
+
|
4
|
+
from goose._internal.agent import Agent, GeminiModel, SystemMessage, UserMessage
|
5
|
+
from goose._internal.conversation import Conversation
|
6
|
+
from goose._internal.result import Result, TextResult
|
7
|
+
from goose._internal.state import FlowRun, NodeState, get_current_flow_run
|
8
|
+
from goose._internal.types.agent import AssistantMessage
|
7
9
|
from goose.errors import Honk
|
8
|
-
from goose.types.agent import AssistantMessage
|
9
10
|
|
10
11
|
|
11
12
|
class Task[**P, R: Result]:
|
@@ -33,9 +34,7 @@ class Task[**P, R: Result]:
|
|
33
34
|
def name(self) -> str:
|
34
35
|
return self._generator.__name__
|
35
36
|
|
36
|
-
async def generate(
|
37
|
-
self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs
|
38
|
-
) -> R:
|
37
|
+
async def generate(self, state: NodeState[R], *args: P.args, **kwargs: P.kwargs) -> R:
|
39
38
|
state_hash = self.__hash_task_call(*args, **kwargs)
|
40
39
|
if state_hash != state.last_hash:
|
41
40
|
result = await self._generator(*args, **kwargs)
|
@@ -58,9 +57,7 @@ class Task[**P, R: Result]:
|
|
58
57
|
node_state.set_context(context=context)
|
59
58
|
node_state.add_user_message(message=user_message)
|
60
59
|
|
61
|
-
result = await self.__adapt(
|
62
|
-
conversation=node_state.conversation, agent=flow_run.agent
|
63
|
-
)
|
60
|
+
result = await self.__adapt(conversation=node_state.conversation, agent=flow_run.agent)
|
64
61
|
node_state.add_result(result=result)
|
65
62
|
flow_run.add_node_state(node_state)
|
66
63
|
|
@@ -97,11 +94,7 @@ class Task[**P, R: Result]:
|
|
97
94
|
|
98
95
|
def __hash_task_call(self, *args: P.args, **kwargs: P.kwargs) -> int:
|
99
96
|
try:
|
100
|
-
to_hash = str(
|
101
|
-
tuple(args)
|
102
|
-
+ tuple(kwargs.values())
|
103
|
-
+ (self._generator.__code__, self._adapter_model)
|
104
|
-
)
|
97
|
+
to_hash = str(tuple(args) + tuple(kwargs.values()) + (self._generator.__code__, self._adapter_model))
|
105
98
|
return hash(to_hash)
|
106
99
|
except TypeError:
|
107
100
|
raise Honk(f"Unhashable argument to task {self.name}: {args} {kwargs}")
|
goose/agent.py
ADDED
@@ -0,0 +1,28 @@
|
|
1
|
+
from goose._internal.agent import AgentResponse, IAgentLogger
|
2
|
+
from goose._internal.types.agent import (
|
3
|
+
AssistantMessage,
|
4
|
+
GeminiModel,
|
5
|
+
LLMMediaMessagePart,
|
6
|
+
LLMMessage,
|
7
|
+
LLMTextMessagePart,
|
8
|
+
MediaMessagePart,
|
9
|
+
SystemMessage,
|
10
|
+
TextMessagePart,
|
11
|
+
UserMediaContentType,
|
12
|
+
UserMessage,
|
13
|
+
)
|
14
|
+
|
15
|
+
__all__ = [
|
16
|
+
"AgentResponse",
|
17
|
+
"IAgentLogger",
|
18
|
+
"AssistantMessage",
|
19
|
+
"GeminiModel",
|
20
|
+
"LLMMediaMessagePart",
|
21
|
+
"LLMMessage",
|
22
|
+
"LLMTextMessagePart",
|
23
|
+
"MediaMessagePart",
|
24
|
+
"SystemMessage",
|
25
|
+
"TextMessagePart",
|
26
|
+
"UserMediaContentType",
|
27
|
+
"UserMessage",
|
28
|
+
]
|
goose/flow.py
ADDED
goose/runs.py
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: goose-py
|
3
|
+
Version: 0.7.1
|
4
|
+
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
|
+
Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
|
6
|
+
Requires-Python: >=3.12
|
7
|
+
Requires-Dist: jsonpath-ng>=1.7.0
|
8
|
+
Requires-Dist: litellm>=1.56.5
|
9
|
+
Requires-Dist: pydantic>=2.8.2
|
10
|
+
Description-Content-Type: text/markdown
|
11
|
+
|
12
|
+
# Goose
|
13
|
+
|
14
|
+
Docs to come.
|
@@ -0,0 +1,18 @@
|
|
1
|
+
goose/__init__.py,sha256=mppYCowcZw9ke_4y1d1ayHwI3502LBaY959jdOVBPp0,170
|
2
|
+
goose/agent.py,sha256=g2tPFqEhqBABEjmpNJ2ShfjHDGzmeUXIgOZCKDZ2-40,600
|
3
|
+
goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
|
4
|
+
goose/flow.py,sha256=A1bzNIjnoVXRFm6LGhQglxVnKMP0vEVfvTubTol7Kww,58
|
5
|
+
goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
goose/runs.py,sha256=HHcky_IbmY9HWBjpXJgOcH2Ko0N39qADsGIPR8QYpek,160
|
7
|
+
goose/_internal/agent.py,sha256=l0pKfShovrs238sKAr-zubtcacYm82TGwQHcBWVJm2g,5875
|
8
|
+
goose/_internal/conversation.py,sha256=BiWxWN2W-0fMMLgVRaVYld5_5-AttA3M5KF8JGznVwg,1081
|
9
|
+
goose/_internal/flow.py,sha256=KGT6NpkMY8q_N1yKwWrxfTbhwcu5AwdHtgCPqCdL3F8,3266
|
10
|
+
goose/_internal/result.py,sha256=-eZJn-2sPo7rHZ38Sz6IAHXqiJ-Ss39esEoFGimJEBI,155
|
11
|
+
goose/_internal/state.py,sha256=dKXSNMVzeM_2h0YuyMatU1I57REAzttuYBJmoUMy3ag,5703
|
12
|
+
goose/_internal/store.py,sha256=vIxPIpechF_lEQlQ8JT1NDySDfHe3-eMHEWeTqVbscg,946
|
13
|
+
goose/_internal/task.py,sha256=cHzS7TTDXruPwXDK8P03zs3HUiJqAXECuPuAM3Oo5Tw,4685
|
14
|
+
goose/_internal/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
+
goose/_internal/types/agent.py,sha256=rNVt2gEr_m4_8tGFgcdichpPp8xhOS5GY0kN2C4tiE8,2153
|
16
|
+
goose_py-0.7.1.dist-info/METADATA,sha256=b_FEa3StNuozZAxU6W_qXdEVzW98W6nLpibqcqNhh9s,441
|
17
|
+
goose_py-0.7.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
18
|
+
goose_py-0.7.1.dist-info/RECORD,,
|
@@ -1,31 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: goose-py
|
3
|
-
Version: 0.6.0
|
4
|
-
Summary: A tool for AI workflows based on human-computer collaboration and structured output.
|
5
|
-
Home-page: https://github.com/chelle-ai/goose
|
6
|
-
Keywords: ai,yaml,configuration,llm
|
7
|
-
Author: Nash Taylor
|
8
|
-
Author-email: nash@chelle.ai
|
9
|
-
Requires-Python: >=3.12,<4.0
|
10
|
-
Classifier: Development Status :: 4 - Beta
|
11
|
-
Classifier: Intended Audience :: Developers
|
12
|
-
Classifier: Operating System :: OS Independent
|
13
|
-
Classifier: Programming Language :: Python :: 3
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
15
|
-
Classifier: Programming Language :: Python :: 3.13
|
16
|
-
Provides-Extra: test
|
17
|
-
Requires-Dist: ipykernel ; extra == "test"
|
18
|
-
Requires-Dist: jsonpath-ng (>=1.7.0,<2.0.0)
|
19
|
-
Requires-Dist: litellm (>=1.56.5,<2.0.0)
|
20
|
-
Requires-Dist: pydantic (>=2.8.2,<3.0.0)
|
21
|
-
Requires-Dist: pytest (<8) ; extra == "test"
|
22
|
-
Requires-Dist: pytest-asyncio ; extra == "test"
|
23
|
-
Requires-Dist: pytest-mock ; extra == "test"
|
24
|
-
Project-URL: Documentation, https://github.com/chelle-ai/goose
|
25
|
-
Project-URL: Repository, https://github.com/chelle-ai/goose
|
26
|
-
Description-Content-Type: text/markdown
|
27
|
-
|
28
|
-
# Goose
|
29
|
-
|
30
|
-
Docs to come.
|
31
|
-
|
goose_py-0.6.0.dist-info/RECORD
DELETED
@@ -1,15 +0,0 @@
|
|
1
|
-
goose/__init__.py,sha256=vDVznQm4FIZmqjmCEJdOozSWLse5c-adbesRLdmNyiE,202
|
2
|
-
goose/_agent.py,sha256=aeHJrCP3257fNDwpt4ls-zqL76Vs3p4wNJvQFHdk_xM,6061
|
3
|
-
goose/_conversation.py,sha256=8ZgoBHnwWZ4zYnf5wQXEo0mVs50ixMIQqc2qDOTorPE,1177
|
4
|
-
goose/_flow.py,sha256=lx8L6ZizovtgLG1GGn0zf_zuRAez5TN6gD8Q182s0Ks,3208
|
5
|
-
goose/_result.py,sha256=-eZJn-2sPo7rHZ38Sz6IAHXqiJ-Ss39esEoFGimJEBI,155
|
6
|
-
goose/_state.py,sha256=eG5SyO-YVj9A9a4HSoIF-YInVF3cwJkZ906B1GHohr4,5793
|
7
|
-
goose/_store.py,sha256=wB-VWaWXvW1PlyKm05Ti21XM6PFqsAPEGvAYyFokApk,928
|
8
|
-
goose/_task.py,sha256=lE7k79Jmb7Lkz4vJj2rEt53TG7XnVqDFltON3iONamk,4710
|
9
|
-
goose/errors.py,sha256=-0OyZQJWYTRw5YgnCB2_uorVaUsL6Z0QYQO2FqzCiyg,32
|
10
|
-
goose/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
-
goose/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
-
goose/types/agent.py,sha256=rNVt2gEr_m4_8tGFgcdichpPp8xhOS5GY0kN2C4tiE8,2153
|
13
|
-
goose_py-0.6.0.dist-info/METADATA,sha256=VR7a7oYrrSN8a1-aF5xC05ZwUp45gwh8txPv1VMgYxE,1106
|
14
|
-
goose_py-0.6.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
15
|
-
goose_py-0.6.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|