goose-py 0.11.25__tar.gz → 0.12.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {goose_py-0.11.25 → goose_py-0.12.0}/.gitignore +1 -0
  2. goose_py-0.12.0/PKG-INFO +248 -0
  3. goose_py-0.12.0/README.md +237 -0
  4. goose_py-0.12.0/examples/01_structured_responses.py +105 -0
  5. goose_py-0.12.0/examples/02_task_orchestration.py +205 -0
  6. goose_py-0.12.0/examples/03_stateful_conversations.py +123 -0
  7. goose_py-0.12.0/examples/04_result_caching.py +130 -0
  8. goose_py-0.12.0/examples/05_iterative_refinement.py +167 -0
  9. goose_py-0.12.0/examples/06_result_validation.py +164 -0
  10. goose_py-0.12.0/examples/07_run_persistence.py +191 -0
  11. goose_py-0.12.0/examples/08_custom_logging.py +216 -0
  12. goose_py-0.12.0/examples/README.md +56 -0
  13. goose_py-0.12.0/goose/__init__.py +22 -0
  14. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/agent.py +114 -4
  15. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/flow.py +126 -0
  16. goose_py-0.12.0/goose/_internal/result.py +56 -0
  17. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/task.py +161 -0
  18. goose_py-0.12.0/goose/errors.py +16 -0
  19. {goose_py-0.11.25 → goose_py-0.12.0}/pyproject.toml +2 -2
  20. {goose_py-0.11.25 → goose_py-0.12.0}/uv.lock +164 -9
  21. goose_py-0.11.25/PKG-INFO +0 -14
  22. goose_py-0.11.25/README.md +0 -3
  23. goose_py-0.11.25/goose/__init__.py +0 -7
  24. goose_py-0.11.25/goose/_internal/result.py +0 -20
  25. goose_py-0.11.25/goose/errors.py +0 -2
  26. {goose_py-0.11.25 → goose_py-0.12.0}/.envrc +0 -0
  27. {goose_py-0.11.25 → goose_py-0.12.0}/.github/workflows/publish.yml +0 -0
  28. {goose_py-0.11.25 → goose_py-0.12.0}/.python-version +0 -0
  29. {goose_py-0.11.25 → goose_py-0.12.0}/.stubs/jsonpath_ng/__init__.pyi +0 -0
  30. {goose_py-0.11.25 → goose_py-0.12.0}/Makefile +0 -0
  31. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/conversation.py +0 -0
  32. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/state.py +0 -0
  33. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/store.py +0 -0
  34. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/types/__init__.py +0 -0
  35. {goose_py-0.11.25 → goose_py-0.12.0}/goose/_internal/types/telemetry.py +0 -0
  36. {goose_py-0.11.25 → goose_py-0.12.0}/goose/flow.py +0 -0
  37. {goose_py-0.11.25 → goose_py-0.12.0}/goose/py.typed +0 -0
  38. {goose_py-0.11.25 → goose_py-0.12.0}/goose/runs.py +0 -0
  39. {goose_py-0.11.25 → goose_py-0.12.0}/goose/task.py +0 -0
  40. {goose_py-0.11.25 → goose_py-0.12.0}/tests/__init__.py +0 -0
  41. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_agent.py +0 -0
  42. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_ask.py +0 -0
  43. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_downstream_task.py +0 -0
  44. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_hashing.py +0 -0
  45. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_looping.py +0 -0
  46. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_refining.py +0 -0
  47. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_regenerate.py +0 -0
  48. {goose_py-0.11.25 → goose_py-0.12.0}/tests/test_state.py +0 -0
@@ -3,3 +3,4 @@ __pycache__
3
3
  poetry.lock
4
4
  notebooks
5
5
  dist
6
+ .aider*
@@ -0,0 +1,248 @@
1
+ Metadata-Version: 2.4
2
+ Name: goose-py
3
+ Version: 0.12.0
4
+ Summary: A tool for AI workflows based on human-computer collaboration and structured output.
5
+ Author-email: Nash Taylor <nash@chelle.ai>, Joshua Cook <joshua@chelle.ai>, Michael Sankur <michael@chelle.ai>
6
+ Requires-Python: >=3.12
7
+ Requires-Dist: aikernel==0.1.43
8
+ Requires-Dist: jsonpath-ng>=1.7.0
9
+ Requires-Dist: pydantic>=2.8.2
10
+ Description-Content-Type: text/markdown
11
+
12
+ # Goose
13
+
14
+ Goose is a framework for building LLM-based agents and workflows with strong typing and state management. Here's what's fundamentally possible:
15
+
16
+ 1. Structured LLM interactions - Organize model calls with typed inputs/outputs
17
+ 2. Task orchestration - Create reusable tasks that can be composed into flows
18
+ 3. Stateful conversations - Maintain conversation history and model outputs
19
+ 4. Result caching - Avoid redundant computation based on input hashing
20
+ 5. Iterative refinement - Enhance results through progressive feedback loops
21
+ 6. Result validation - Ensure model outputs conform to expected schemas
22
+ 7. Run persistence - Save and reload workflow executions
23
+ 8. Custom logging - Track telemetry and performance metrics
24
+
25
+ It enables building reliable, maintainable AI applications with proper error handling, state tracking, and flow control while ensuring type safety throughout.
26
+
27
+ ## Key Features
28
+
29
+ ### Structured LLM Interactions
30
+
31
+ Organize model calls with typed inputs and outputs using Pydantic models. This ensures that responses from language models conform to expected structures.
32
+
33
+ ```mermaid
34
+ graph LR
35
+ A[User Input] --> B[Agent]
36
+ B --> C[LLM Model]
37
+ C --> D[Structured Response]
38
+ D --> E[Validated Result]
39
+ E --> F[Application Logic]
40
+
41
+ classDef user fill:#f9f,stroke:#333,stroke-width:2px
42
+ classDef llm fill:#bbf,stroke:#333,stroke-width:2px
43
+ classDef validation fill:#bfb,stroke:#333,stroke-width:2px
44
+
45
+ class A user
46
+ class C llm
47
+ class D,E validation
48
+ ```
49
+
50
+ ### Task Orchestration
51
+
52
+ Create reusable tasks that can be composed into flows. Tasks are decorated functions that handle specific operations, while flows coordinate multiple tasks.
53
+
54
+ ```mermaid
55
+ graph TD
56
+ A[Flow] --> B[Task 1]
57
+ A --> C[Task 2]
58
+ A --> D[Task 3]
59
+ B --> E[Result 1]
60
+ C --> F[Result 2]
61
+ D --> G[Result 3]
62
+ E --> H[Flow Output]
63
+ F --> H
64
+ G --> H
65
+
66
+ classDef flow fill:#f9f,stroke:#333,stroke-width:2px
67
+ classDef task fill:#bbf,stroke:#333,stroke-width:2px
68
+ classDef result fill:#bfb,stroke:#333,stroke-width:2px
69
+
70
+ class A flow
71
+ class B,C,D task
72
+ class E,F,G,H result
73
+ ```
74
+
75
+ ### Stateful Conversations
76
+
77
+ Maintain conversation history and model outputs across multiple interactions. The framework tracks the state of each task in a flow.
78
+
79
+ ```mermaid
80
+ sequenceDiagram
81
+ participant User
82
+ participant Flow
83
+ participant Task
84
+ participant Agent
85
+ participant LLM
86
+
87
+ User->>Flow: Start Conversation
88
+ Flow->>Task: Execute
89
+ Task->>Agent: Generate Response
90
+ Agent->>LLM: Send Messages
91
+ LLM-->>Agent: Generate Response
92
+ Agent-->>Task: Store Result
93
+ Task-->>Flow: Update State
94
+ Flow-->>User: Return Result
95
+
96
+ User->>Flow: Follow-up Question
97
+ Flow->>Task: Get State
98
+ Task->>Agent: Send Previous Context + New Question
99
+ Agent->>LLM: Send Updated Messages
100
+ LLM-->>Agent: Generate Response
101
+ Agent-->>Task: Update Conversation
102
+ Task-->>Flow: Update State
103
+ Flow-->>User: Return Result
104
+ ```
105
+
106
+ ### Result Caching
107
+
108
+ Avoid redundant computation by caching results based on input hashing. The framework automatically detects when inputs change and only regenerates results when necessary.
109
+
110
+ ```mermaid
111
+ flowchart TD
112
+ A[Task Call] --> B{Inputs Changed?}
113
+ B -- Yes --> C[Execute Task]
114
+ B -- No --> D[Return Cached Result]
115
+ C --> E[Cache Result]
116
+ E --> F[Return Result]
117
+ D --> F
118
+
119
+ classDef decision fill:#f9f,stroke:#333,stroke-width:2px
120
+ classDef action fill:#bbf,stroke:#333,stroke-width:2px
121
+ classDef cache fill:#bfb,stroke:#333,stroke-width:2px
122
+
123
+ class B decision
124
+ class A,C,F action
125
+ class D,E cache
126
+ ```
127
+
128
+ ### Iterative Refinement
129
+
130
+ Enhance results through progressive feedback loops. The framework supports asking follow-up questions about results and refining them based on feedback.
131
+
132
+ ```mermaid
133
+ sequenceDiagram
134
+ participant User
135
+ participant Task
136
+ participant Agent
137
+ participant LLM
138
+
139
+ User->>Task: Generate Initial Result
140
+ Task->>Agent: Send Request
141
+ Agent->>LLM: Generate Structured Output
142
+ LLM-->>Agent: Return Output
143
+ Agent-->>Task: Store Result
144
+ Task-->>User: Return Result
145
+
146
+ User->>Task: Request Refinement
147
+ Task->>Agent: Send Feedback + Original Result
148
+ Agent->>LLM: Generate Find/Replace Operations
149
+ LLM-->>Agent: Return Changes
150
+ Agent-->>Task: Apply Changes to Result
151
+ Task-->>User: Return Refined Result
152
+ ```
153
+
154
+ ### Result Validation
155
+
156
+ Ensure model outputs conform to expected schemas using Pydantic validation. All results must conform to predefined models.
157
+
158
+ ```mermaid
159
+ flowchart LR
160
+ A[LLM Response] --> B[Parse JSON]
161
+ B --> C{Valid Schema?}
162
+ C -- Yes --> D[Return Validated Result]
163
+ C -- No --> E[Raise Error]
164
+
165
+ classDef input fill:#bbf,stroke:#333,stroke-width:2px
166
+ classDef validation fill:#f9f,stroke:#333,stroke-width:2px
167
+ classDef output fill:#bfb,stroke:#333,stroke-width:2px
168
+ classDef error fill:#fbb,stroke:#333,stroke-width:2px
169
+
170
+ class A input
171
+ class B,C validation
172
+ class D output
173
+ class E error
174
+ ```
175
+
176
+ ### Run Persistence
177
+
178
+ Save and reload workflow executions. The framework provides interfaces for storing flow runs, allowing for resuming work or reviewing past executions.
179
+
180
+ ```mermaid
181
+ graph TD
182
+ A[Start Flow] --> B[Create Flow Run]
183
+ B --> C[Execute Tasks]
184
+ C --> D[Save Run State]
185
+ D --> E[End Flow]
186
+
187
+ F[Later Time] --> G[Load Saved Run]
188
+ G --> H[Resume Execution]
189
+ H --> D
190
+
191
+ classDef flow fill:#f9f,stroke:#333,stroke-width:2px
192
+ classDef execution fill:#bbf,stroke:#333,stroke-width:2px
193
+ classDef storage fill:#bfb,stroke:#333,stroke-width:2px
194
+
195
+ class A,E,F flow
196
+ class B,C,H execution
197
+ class D,G storage
198
+ ```
199
+
200
+ ### Custom Logging
201
+
202
+ Track telemetry and performance metrics. The framework supports custom loggers to record model usage, token counts, and execution time.
203
+
204
+ ```mermaid
205
+ flowchart TD
206
+ A[Agent Call] --> B[Execute LLM Request]
207
+ B --> C[Record Metrics]
208
+ C --> D{Custom Logger?}
209
+ D -- Yes --> E[Send to Custom Logger]
210
+ D -- No --> F[Log to Default Logger]
211
+ E --> G[Return Result]
212
+ F --> G
213
+
214
+ classDef action fill:#bbf,stroke:#333,stroke-width:2px
215
+ classDef logging fill:#bfb,stroke:#333,stroke-width:2px
216
+ classDef decision fill:#f9f,stroke:#333,stroke-width:2px
217
+
218
+ class A,B,G action
219
+ class C,E,F logging
220
+ class D decision
221
+ ```
222
+
223
+ ## Building with Goose
224
+
225
+ Goose enables building reliable, maintainable AI applications with proper error handling, state tracking, and flow control while ensuring type safety throughout. This approach reduces common issues in LLM applications like:
226
+
227
+ - Type inconsistencies in model responses
228
+ - Loss of context between interactions
229
+ - Redundant LLM calls for identical inputs
230
+ - Difficulty in resuming interrupted workflows
231
+ - Lack of structured error handling
232
+
233
+ Start building more robust LLM applications with Goose's typed, stateful approach to agent development.
234
+
235
+ ## Installation and Package Management
236
+
237
+ Goose uses `uv` for package management. Never use pip with this project.
238
+
239
+ ```bash
240
+ # Install dependencies
241
+ uv add <package-name>
242
+
243
+ # Update dependencies file
244
+ uv sync
245
+
246
+ # Run commands
247
+ uv run <command>
248
+ ```
@@ -0,0 +1,237 @@
1
+ # Goose
2
+
3
+ Goose is a framework for building LLM-based agents and workflows with strong typing and state management. Here's what's fundamentally possible:
4
+
5
+ 1. Structured LLM interactions - Organize model calls with typed inputs/outputs
6
+ 2. Task orchestration - Create reusable tasks that can be composed into flows
7
+ 3. Stateful conversations - Maintain conversation history and model outputs
8
+ 4. Result caching - Avoid redundant computation based on input hashing
9
+ 5. Iterative refinement - Enhance results through progressive feedback loops
10
+ 6. Result validation - Ensure model outputs conform to expected schemas
11
+ 7. Run persistence - Save and reload workflow executions
12
+ 8. Custom logging - Track telemetry and performance metrics
13
+
14
+ It enables building reliable, maintainable AI applications with proper error handling, state tracking, and flow control while ensuring type safety throughout.
15
+
16
+ ## Key Features
17
+
18
+ ### Structured LLM Interactions
19
+
20
+ Organize model calls with typed inputs and outputs using Pydantic models. This ensures that responses from language models conform to expected structures.
21
+
22
+ ```mermaid
23
+ graph LR
24
+ A[User Input] --> B[Agent]
25
+ B --> C[LLM Model]
26
+ C --> D[Structured Response]
27
+ D --> E[Validated Result]
28
+ E --> F[Application Logic]
29
+
30
+ classDef user fill:#f9f,stroke:#333,stroke-width:2px
31
+ classDef llm fill:#bbf,stroke:#333,stroke-width:2px
32
+ classDef validation fill:#bfb,stroke:#333,stroke-width:2px
33
+
34
+ class A user
35
+ class C llm
36
+ class D,E validation
37
+ ```
38
+
39
+ ### Task Orchestration
40
+
41
+ Create reusable tasks that can be composed into flows. Tasks are decorated functions that handle specific operations, while flows coordinate multiple tasks.
42
+
43
+ ```mermaid
44
+ graph TD
45
+ A[Flow] --> B[Task 1]
46
+ A --> C[Task 2]
47
+ A --> D[Task 3]
48
+ B --> E[Result 1]
49
+ C --> F[Result 2]
50
+ D --> G[Result 3]
51
+ E --> H[Flow Output]
52
+ F --> H
53
+ G --> H
54
+
55
+ classDef flow fill:#f9f,stroke:#333,stroke-width:2px
56
+ classDef task fill:#bbf,stroke:#333,stroke-width:2px
57
+ classDef result fill:#bfb,stroke:#333,stroke-width:2px
58
+
59
+ class A flow
60
+ class B,C,D task
61
+ class E,F,G,H result
62
+ ```
63
+
64
+ ### Stateful Conversations
65
+
66
+ Maintain conversation history and model outputs across multiple interactions. The framework tracks the state of each task in a flow.
67
+
68
+ ```mermaid
69
+ sequenceDiagram
70
+ participant User
71
+ participant Flow
72
+ participant Task
73
+ participant Agent
74
+ participant LLM
75
+
76
+ User->>Flow: Start Conversation
77
+ Flow->>Task: Execute
78
+ Task->>Agent: Generate Response
79
+ Agent->>LLM: Send Messages
80
+ LLM-->>Agent: Generate Response
81
+ Agent-->>Task: Store Result
82
+ Task-->>Flow: Update State
83
+ Flow-->>User: Return Result
84
+
85
+ User->>Flow: Follow-up Question
86
+ Flow->>Task: Get State
87
+ Task->>Agent: Send Previous Context + New Question
88
+ Agent->>LLM: Send Updated Messages
89
+ LLM-->>Agent: Generate Response
90
+ Agent-->>Task: Update Conversation
91
+ Task-->>Flow: Update State
92
+ Flow-->>User: Return Result
93
+ ```
94
+
95
+ ### Result Caching
96
+
97
+ Avoid redundant computation by caching results based on input hashing. The framework automatically detects when inputs change and only regenerates results when necessary.
98
+
99
+ ```mermaid
100
+ flowchart TD
101
+ A[Task Call] --> B{Inputs Changed?}
102
+ B -- Yes --> C[Execute Task]
103
+ B -- No --> D[Return Cached Result]
104
+ C --> E[Cache Result]
105
+ E --> F[Return Result]
106
+ D --> F
107
+
108
+ classDef decision fill:#f9f,stroke:#333,stroke-width:2px
109
+ classDef action fill:#bbf,stroke:#333,stroke-width:2px
110
+ classDef cache fill:#bfb,stroke:#333,stroke-width:2px
111
+
112
+ class B decision
113
+ class A,C,F action
114
+ class D,E cache
115
+ ```
116
+
117
+ ### Iterative Refinement
118
+
119
+ Enhance results through progressive feedback loops. The framework supports asking follow-up questions about results and refining them based on feedback.
120
+
121
+ ```mermaid
122
+ sequenceDiagram
123
+ participant User
124
+ participant Task
125
+ participant Agent
126
+ participant LLM
127
+
128
+ User->>Task: Generate Initial Result
129
+ Task->>Agent: Send Request
130
+ Agent->>LLM: Generate Structured Output
131
+ LLM-->>Agent: Return Output
132
+ Agent-->>Task: Store Result
133
+ Task-->>User: Return Result
134
+
135
+ User->>Task: Request Refinement
136
+ Task->>Agent: Send Feedback + Original Result
137
+ Agent->>LLM: Generate Find/Replace Operations
138
+ LLM-->>Agent: Return Changes
139
+ Agent-->>Task: Apply Changes to Result
140
+ Task-->>User: Return Refined Result
141
+ ```
142
+
143
+ ### Result Validation
144
+
145
+ Ensure model outputs conform to expected schemas using Pydantic validation. All results must conform to predefined models.
146
+
147
+ ```mermaid
148
+ flowchart LR
149
+ A[LLM Response] --> B[Parse JSON]
150
+ B --> C{Valid Schema?}
151
+ C -- Yes --> D[Return Validated Result]
152
+ C -- No --> E[Raise Error]
153
+
154
+ classDef input fill:#bbf,stroke:#333,stroke-width:2px
155
+ classDef validation fill:#f9f,stroke:#333,stroke-width:2px
156
+ classDef output fill:#bfb,stroke:#333,stroke-width:2px
157
+ classDef error fill:#fbb,stroke:#333,stroke-width:2px
158
+
159
+ class A input
160
+ class B,C validation
161
+ class D output
162
+ class E error
163
+ ```
164
+
165
+ ### Run Persistence
166
+
167
+ Save and reload workflow executions. The framework provides interfaces for storing flow runs, allowing for resuming work or reviewing past executions.
168
+
169
+ ```mermaid
170
+ graph TD
171
+ A[Start Flow] --> B[Create Flow Run]
172
+ B --> C[Execute Tasks]
173
+ C --> D[Save Run State]
174
+ D --> E[End Flow]
175
+
176
+ F[Later Time] --> G[Load Saved Run]
177
+ G --> H[Resume Execution]
178
+ H --> D
179
+
180
+ classDef flow fill:#f9f,stroke:#333,stroke-width:2px
181
+ classDef execution fill:#bbf,stroke:#333,stroke-width:2px
182
+ classDef storage fill:#bfb,stroke:#333,stroke-width:2px
183
+
184
+ class A,E,F flow
185
+ class B,C,H execution
186
+ class D,G storage
187
+ ```
188
+
189
+ ### Custom Logging
190
+
191
+ Track telemetry and performance metrics. The framework supports custom loggers to record model usage, token counts, and execution time.
192
+
193
+ ```mermaid
194
+ flowchart TD
195
+ A[Agent Call] --> B[Execute LLM Request]
196
+ B --> C[Record Metrics]
197
+ C --> D{Custom Logger?}
198
+ D -- Yes --> E[Send to Custom Logger]
199
+ D -- No --> F[Log to Default Logger]
200
+ E --> G[Return Result]
201
+ F --> G
202
+
203
+ classDef action fill:#bbf,stroke:#333,stroke-width:2px
204
+ classDef logging fill:#bfb,stroke:#333,stroke-width:2px
205
+ classDef decision fill:#f9f,stroke:#333,stroke-width:2px
206
+
207
+ class A,B,G action
208
+ class C,E,F logging
209
+ class D decision
210
+ ```
211
+
212
+ ## Building with Goose
213
+
214
+ Goose enables building reliable, maintainable AI applications with proper error handling, state tracking, and flow control while ensuring type safety throughout. This approach reduces common issues in LLM applications like:
215
+
216
+ - Type inconsistencies in model responses
217
+ - Loss of context between interactions
218
+ - Redundant LLM calls for identical inputs
219
+ - Difficulty in resuming interrupted workflows
220
+ - Lack of structured error handling
221
+
222
+ Start building more robust LLM applications with Goose's typed, stateful approach to agent development.
223
+
224
+ ## Installation and Package Management
225
+
226
+ Goose uses `uv` for package management. Never use pip with this project.
227
+
228
+ ```bash
229
+ # Install dependencies
230
+ uv add <package-name>
231
+
232
+ # Update dependencies file
233
+ uv sync
234
+
235
+ # Run commands
236
+ uv run <command>
237
+ ```
@@ -0,0 +1,105 @@
1
+ """
2
+ Example demonstrating structured LLM responses.
3
+
4
+ This example shows how to create a structured result type and use it
5
+ with a flow to ensure the LLM output conforms to expected schema.
6
+ """
7
+
8
+ import asyncio
9
+ import os
10
+
11
+ from aikernel import LLMMessagePart, LLMSystemMessage, LLMUserMessage, get_router
12
+ from pydantic import Field
13
+
14
+ from goose import Agent, FlowArguments, Result, flow, task
15
+
16
+
17
+ # Define a structured result type
18
+ class RecipeResult(Result):
19
+ """Recipe with structured attributes."""
20
+ title: str = Field(description="The title of the recipe")
21
+ ingredients: list[str] = Field(description="List of ingredients needed")
22
+ steps: list[str] = Field(description="Step-by-step cooking instructions")
23
+ prep_time_minutes: int = Field(description="Preparation time in minutes")
24
+ cooking_time_minutes: int = Field(description="Cooking time in minutes")
25
+
26
+
27
+ class RecipeFlowArguments(FlowArguments):
28
+ """Arguments for the recipe flow."""
29
+ ingredient: str
30
+
31
+
32
+ @task
33
+ async def generate_recipe(*, agent: Agent, ingredient: str) -> RecipeResult:
34
+ """Generate a recipe that uses the specified ingredient."""
35
+ print(f"Generating recipe for {ingredient}...")
36
+
37
+ # Create a router for Gemini 2.0 Flash
38
+ router = get_router(models=("gemini-2.0-flash",))
39
+
40
+ # System message with instructions
41
+ system_message = LLMSystemMessage(
42
+ parts=[LLMMessagePart(content=f"You are a creative chef. Create a recipe using {ingredient} as a main ingredient.")]
43
+ )
44
+
45
+ # User request message
46
+ user_message = LLMUserMessage(
47
+ parts=[LLMMessagePart(content=f"Please create a recipe that features {ingredient} as a main ingredient.")]
48
+ )
49
+
50
+ # Make the actual LLM call
51
+ return await agent(
52
+ messages=[system_message, user_message],
53
+ model="gemini-2.0-flash",
54
+ task_name="generate_recipe",
55
+ response_model=RecipeResult,
56
+ router=router
57
+ )
58
+
59
+
60
+ @flow
61
+ async def recipe_flow(*, flow_arguments: RecipeFlowArguments, agent: Agent) -> None:
62
+ """Flow for generating a recipe with structured output."""
63
+ await generate_recipe(agent=agent, ingredient=flow_arguments.ingredient)
64
+
65
+
66
+ async def main():
67
+ """Run the recipe flow and display the results."""
68
+ # Create a unique run ID
69
+ run_id = f"recipe-{os.getpid()}"
70
+
71
+ print("=== Structured LLM Responses Example ===")
72
+ print("This example demonstrates how to create structured result types")
73
+ print("and ensure LLM outputs conform to expected schemas.\n")
74
+
75
+ # Run the recipe flow
76
+ async with recipe_flow.start_run(run_id=run_id) as run:
77
+ await recipe_flow.generate(RecipeFlowArguments(ingredient="avocado"))
78
+
79
+ # Get the recipe from the result
80
+ recipe = run.get_result(task=generate_recipe)
81
+
82
+ # Display the recipe information
83
+ print("\n--- Generated Recipe ---")
84
+ print(f"Recipe: {recipe.title}")
85
+ print("\nIngredients:")
86
+ for item in recipe.ingredients:
87
+ print(f"- {item}")
88
+
89
+ print("\nInstructions:")
90
+ for i, step in enumerate(recipe.steps, 1):
91
+ print(f"{i}. {step}")
92
+
93
+ print(f"\nPrep time: {recipe.prep_time_minutes} minutes")
94
+ print(f"Cooking time: {recipe.cooking_time_minutes} minutes")
95
+
96
+ # Access fields directly with type safety
97
+ total_time = recipe.prep_time_minutes + recipe.cooking_time_minutes
98
+ print(f"Total time: {total_time} minutes")
99
+
100
+ print("\nNote: This demonstrates how structured outputs provide type safety and")
101
+ print("predictable fields that can be accessed in your application code.")
102
+
103
+
104
+ if __name__ == "__main__":
105
+ asyncio.run(main())