fleet-python 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fleet-python might be problematic. Click here for more details.

examples/example_sync.py CHANGED
@@ -11,6 +11,9 @@ def main():
11
11
  environments = flt.env.list_envs()
12
12
  print("Environments:", len(environments))
13
13
 
14
+ instances = flt.env.list_instances()
15
+ print("Instances:", len(instances))
16
+
14
17
  # Create a new instance
15
18
  env = flt.env.make("hubspot:v1.2.7")
16
19
  print("New Instance:", env.instance_id)
@@ -0,0 +1,432 @@
1
+ import os
2
+ import json
3
+ import argparse
4
+ from typing import List, Dict, Any, Optional, Tuple, TypedDict
5
+ from pathlib import Path
6
+ from google import genai
7
+ from google.genai import types
8
+ import fleet as flt
9
+ from dotenv import load_dotenv
10
+ import base64
11
+ import re
12
+ import time
13
+ from concurrent.futures import ThreadPoolExecutor, as_completed
14
+
15
+ load_dotenv()
16
+
17
+ # Initialize Gemini client
18
+ client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
19
+ MODEL = "gemini-2.5-pro"
20
+
21
+
22
+ class Problem(TypedDict):
23
+ id: str
24
+ problem: str
25
+ category: str
26
+ difficulty: str
27
+ verifier_func: str
28
+
29
+
30
+ class GeminiAgent:
31
+ def __init__(
32
+ self,
33
+ browser: flt.FleetPlaywrightWrapper,
34
+ model: str = MODEL,
35
+ print_steps: bool = True,
36
+ debug: bool = False,
37
+ ):
38
+ self.browser = browser
39
+ self.model = model
40
+ self.print_steps = print_steps
41
+ self.debug = debug
42
+ self.conversation_history = []
43
+ self.last_action = None # Track the last action performed
44
+
45
+ @property
46
+ def page(self):
47
+ """Access the underlying Playwright page object."""
48
+ return self.browser._page if hasattr(self.browser, "_page") else None
49
+
50
+ def debug_print(self, *args):
51
+ if self.debug:
52
+ print("[DEBUG]", *args)
53
+
54
+ def take_screenshot(self) -> str:
55
+ return self.browser.screenshot()
56
+
57
+ def execute_action(self, action: Dict[str, Any]) -> Dict[str, Any]:
58
+ action_type = action.get("type")
59
+ params = action.get("parameters", {})
60
+
61
+ if self.print_steps:
62
+ print(f"Action: {action_type}({params})")
63
+
64
+ try:
65
+ if action_type == "click":
66
+ self.browser.click(
67
+ x=params.get("x", params.get("coordinate", [0, 0])[0]),
68
+ y=params.get("y", params.get("coordinate", [0, 0])[1]),
69
+ )
70
+ # Small delay to ensure click is registered and element is focused
71
+ time.sleep(0.2)
72
+ self.last_action = {"type": "click", "target": params}
73
+ elif action_type == "type":
74
+ self.browser.type(text=params.get("text", ""))
75
+ self.last_action = {"type": "type", "text": params.get("text", "")}
76
+ elif action_type == "key":
77
+ # FleetPlaywrightWrapper expects keypress with a list of keys
78
+ key = params.get("key", "")
79
+ self.browser.keypress([key])
80
+ self.last_action = {"type": "key", "key": key}
81
+ elif action_type == "scroll":
82
+ # FleetPlaywrightWrapper expects scroll(x, y, scroll_x, scroll_y)
83
+ x = params.get("x", params.get("coordinate", [0, 0])[0])
84
+ y = params.get("y", params.get("coordinate", [0, 0])[1])
85
+ direction = params.get("direction", "down")
86
+ amount = params.get("amount", 5)
87
+
88
+ # Convert direction and amount to scroll_x and scroll_y
89
+ scroll_x = 0
90
+ scroll_y = 0
91
+ if direction == "down":
92
+ scroll_y = amount * 100
93
+ elif direction == "up":
94
+ scroll_y = -amount * 100
95
+ elif direction == "right":
96
+ scroll_x = amount * 100
97
+ elif direction == "left":
98
+ scroll_x = -amount * 100
99
+
100
+ self.browser.scroll(x=x, y=y, scroll_x=scroll_x, scroll_y=scroll_y)
101
+ self.last_action = {"type": "scroll"}
102
+ elif action_type == "wait":
103
+ time.sleep(params.get("seconds", 1))
104
+ self.last_action = {"type": "wait"}
105
+ elif action_type == "navigate":
106
+ # Use the browser's goto method
107
+ url = params.get("url", "")
108
+ if url:
109
+ self.browser.goto(url)
110
+ self.last_action = {"type": "navigate", "url": url}
111
+ else:
112
+ return {
113
+ "success": False,
114
+ "error": f"Unknown action type: {action_type}",
115
+ }
116
+
117
+ return {"success": True}
118
+ except Exception as e:
119
+ return {"success": False, "error": str(e)}
120
+
121
+ def create_prompt_with_screenshot(
122
+ self, task: str, screenshot_b64: str
123
+ ) -> List[Any]:
124
+ # Add context about last action
125
+ last_action_context = ""
126
+ if self.last_action:
127
+ if self.last_action["type"] == "click":
128
+ last_action_context = f"\n\nIMPORTANT: You just clicked at coordinates {self.last_action['target']}. If you clicked on a text input field, search bar, or any editable element, you MUST now use the 'type' action to enter text. Do not click the same element again."
129
+ elif self.last_action["type"] == "type":
130
+ last_action_context = f"\n\nYou just typed: '{self.last_action['text']}'. You may now need to press Enter or click a button to submit."
131
+
132
+ prompt_text = (
133
+ "You are an AI agent that can interact with web browsers. "
134
+ f"Your task is to: {task}\n\n"
135
+ "You can see the current state of the browser in the screenshot provided."
136
+ f"{last_action_context}\n\n"
137
+ "You can perform the following actions:\n"
138
+ '- click: Click at specific coordinates {"type": "click", "parameters": {"x": x, "y": y}}\n'
139
+ '- type: Type text into the currently focused element {"type": "type", "parameters": {"text": "text to type"}}\n'
140
+ '- key: Press a special key {"type": "key", "parameters": {"key": "Enter"}} (e.g., "Enter", "Tab", "Escape")\n'
141
+ '- scroll: Scroll the page {"type": "scroll", "parameters": {"x": x, "y": y, "direction": "down", "amount": 5}} (direction: up/down/left/right)\n'
142
+ '- wait: Wait for a number of seconds {"type": "wait", "parameters": {"seconds": 1}}\n\n'
143
+ "CRITICAL RULES:\n"
144
+ "1. After clicking on ANY text input, search bar, or form field, you MUST type in the next step\n"
145
+ "2. Never click the same element twice in a row\n"
146
+ "3. If you mention searching for something in your reasoning, you must actually type the search query\n"
147
+ "4. Common workflow: click search bar → type query → press Enter\n\n"
148
+ "Analyze the screenshot and decide what action to take next. Respond with a JSON object containing:\n"
149
+ '- "reasoning": Your analysis of the current state and what needs to be done\n'
150
+ '- "action": The action to perform (as described above)\n'
151
+ '- "completed": true if the task is complete, false otherwise\n\n'
152
+ "Example responses:\n"
153
+ "{\n"
154
+ ' "reasoning": "I can see a search bar at the top. I need to click on it first to focus it.",\n'
155
+ ' "action": {"type": "click", "parameters": {"x": 450, "y": 30}},\n'
156
+ ' "completed": false\n'
157
+ "}\n\n"
158
+ "{\n"
159
+ ' "reasoning": "I just clicked on the search bar and it should now be focused. I need to type my search query for PHI encryption ticket.",\n'
160
+ ' "action": {"type": "type", "parameters": {"text": "PHI encryption"}},\n'
161
+ ' "completed": false\n'
162
+ "}\n\n"
163
+ "{\n"
164
+ ' "reasoning": "I typed the search query. Now I need to press Enter to execute the search.",\n'
165
+ ' "action": {"type": "key", "parameters": {"key": "Enter"}},\n'
166
+ ' "completed": false\n'
167
+ "}"
168
+ )
169
+
170
+ return [
171
+ prompt_text,
172
+ types.Part.from_bytes(
173
+ data=base64.b64decode(screenshot_b64), mime_type="image/png"
174
+ ),
175
+ ]
176
+
177
+ def solve_task(self, task: str, max_steps: int = 30) -> Tuple[bool, str]:
178
+ steps = 0
179
+
180
+ try:
181
+ while steps < max_steps:
182
+ steps += 1
183
+
184
+ # Take screenshot
185
+ screenshot = self.take_screenshot()
186
+
187
+ # Create prompt with current state
188
+ prompt_parts = self.create_prompt_with_screenshot(task, screenshot)
189
+
190
+ # Get Gemini's response
191
+ response = client.models.generate_content(
192
+ model=self.model,
193
+ contents=prompt_parts,
194
+ config=types.GenerateContentConfig(
195
+ response_mime_type="application/json",
196
+ temperature=0.1, # Lower temperature for more deterministic behavior
197
+ ),
198
+ )
199
+
200
+ # Parse response
201
+ try:
202
+ result = json.loads(response.text)
203
+ self.debug_print(f"Step {steps}: {result}")
204
+
205
+ if self.print_steps:
206
+ print(
207
+ f"Step {steps}: {result.get('reasoning', 'No reasoning provided')}"
208
+ )
209
+
210
+ # Debug: Print the full action if in debug mode
211
+ if self.debug and "action" in result:
212
+ print(f"[DEBUG] Full action: {result['action']}")
213
+
214
+ # Check if task is completed
215
+ if result.get("completed", False):
216
+ return True, "Task completed successfully"
217
+
218
+ # Execute the action
219
+ if "action" in result:
220
+ action_result = self.execute_action(result["action"])
221
+ if not action_result["success"]:
222
+ self.debug_print(
223
+ f"Action failed: {action_result.get('error')}"
224
+ )
225
+ else:
226
+ print(f"[WARNING] No action in response: {result}")
227
+
228
+ # Small delay to let the page update
229
+ time.sleep(0.5)
230
+
231
+ except json.JSONDecodeError as e:
232
+ self.debug_print(f"Failed to parse Gemini response: {e}")
233
+ self.debug_print(f"Response text: {response.text}")
234
+ # Try to extract any useful information from the response
235
+ print(
236
+ f"[ERROR] Invalid JSON response from Gemini: {response.text[:200]}..."
237
+ )
238
+ continue
239
+
240
+ return False, f"Max steps ({max_steps}) reached without completing the task"
241
+
242
+ except Exception as e:
243
+ return False, f"Error during task execution: {str(e)}"
244
+
245
+
246
+ def extract_function_name(function_str: str) -> str:
247
+ match = re.search(r"(?:async\s+)?def\s+(\w+)\s*\(", function_str)
248
+ if match:
249
+ return match.group(1)
250
+ raise ValueError(f"No function name found in {function_str}")
251
+
252
+
253
+ def evaluate_problem(
254
+ problem: Problem,
255
+ problem_idx: int,
256
+ total_problems: int,
257
+ env_key: str,
258
+ max_steps: int = 30,
259
+ ) -> Tuple[str, bool, Optional[str]]:
260
+ env = None
261
+ browser = None
262
+
263
+ try:
264
+ # Create environment
265
+ env = flt.env.make(env_key)
266
+ print(
267
+ f"[Problem {problem_idx + 1}/{total_problems}] Created environment for {problem['id']}: {env.urls.app}"
268
+ )
269
+
270
+ # Create browser wrapper
271
+ browser = flt.FleetPlaywrightWrapper(env)
272
+ browser.start()
273
+
274
+ # Create agent
275
+ agent = GeminiAgent(browser, print_steps=True, debug=False)
276
+
277
+ # Solve the problem
278
+ print(
279
+ f"[Problem {problem_idx + 1}/{total_problems}] Solving {problem['id']}..."
280
+ )
281
+ success, message = agent.solve_task(problem["problem"], max_steps=max_steps)
282
+
283
+ if not success:
284
+ print(
285
+ f"[Problem {problem_idx + 1}/{total_problems}] Failed to solve: {message}"
286
+ )
287
+ # return problem["id"], False, message
288
+
289
+ # Verify the solution
290
+ function_name = extract_function_name(problem["verifier_func"])
291
+ print(
292
+ f"[Problem {problem_idx + 1}/{total_problems}] Verifying {function_name} ({problem['id']})..."
293
+ )
294
+ response = env.verify_raw(problem["verifier_func"], function_name)
295
+
296
+ print(
297
+ f"[Problem {problem_idx + 1}/{total_problems}] Result for {problem['id']}: {'✓' if response.success else '✗'}"
298
+ )
299
+
300
+ return problem["id"], response.success, None
301
+
302
+ except Exception as e:
303
+ print(
304
+ f"[Problem {problem_idx + 1}/{total_problems}] Fatal error processing {problem['id']}: {e}"
305
+ )
306
+ return problem["id"], False, str(e)
307
+ finally:
308
+ # Clean up
309
+ if browser:
310
+ browser.close()
311
+ if env:
312
+ env.close()
313
+
314
+
315
+ def interactive_mode():
316
+ # Create a Fleet environment instance
317
+ instance = flt.env.make("hubspot")
318
+
319
+ # Create the browser wrapper
320
+ browser = flt.FleetPlaywrightWrapper(instance)
321
+ browser.start()
322
+
323
+ try:
324
+ agent = GeminiAgent(browser, print_steps=True, debug=False)
325
+
326
+ print("Gemini Agent Interactive Mode")
327
+ print("Type your task or 'quit' to exit")
328
+ print("-" * 60)
329
+
330
+ while True:
331
+ try:
332
+ user_input = input("\n> ")
333
+ if user_input.lower() in ["quit", "exit", "q"]:
334
+ break
335
+
336
+ success, message = agent.solve_task(user_input)
337
+ print(f"\nResult: {'Success' if success else 'Failed'} - {message}")
338
+
339
+ except KeyboardInterrupt:
340
+ print("\nShutting down...")
341
+ break
342
+ except Exception as e:
343
+ print(f"Error: {e}")
344
+
345
+ finally:
346
+ browser.close()
347
+ instance.close()
348
+
349
+
350
+ def evaluate_from_json(json_file: str, max_concurrent: int = 3, max_steps: int = 30):
351
+ file_path = Path(json_file)
352
+ if not file_path.exists():
353
+ raise FileNotFoundError(f"Error: File '{json_file}' not found")
354
+
355
+ with open(json_file, "r") as f:
356
+ data = json.load(f)
357
+ problems: List[Problem] = data["problems"]
358
+
359
+ print(f"Loaded {len(problems)} problems from '{json_file}'")
360
+ print(f"Running with max {max_concurrent} concurrent tasks")
361
+ print("-" * 60)
362
+
363
+ # Process problems with thread pool for concurrency
364
+ results = []
365
+ with ThreadPoolExecutor(max_workers=max_concurrent) as executor:
366
+ # Submit all tasks
367
+ future_to_problem = {
368
+ executor.submit(
369
+ evaluate_problem, problem, idx, len(problems), "fira:v1.3.1", max_steps
370
+ ): (problem, idx)
371
+ for idx, problem in enumerate(problems)
372
+ }
373
+
374
+ # Collect results as they complete
375
+ for future in as_completed(future_to_problem):
376
+ result = future.result()
377
+ results.append(result)
378
+
379
+ # Display results
380
+ print("\n" + "=" * 60)
381
+ print("EVALUATION RESULTS")
382
+ print("=" * 60)
383
+
384
+ successes = 0
385
+ for problem_id, success, error in results:
386
+ status = "✓ PASS" if success else "✗ FAIL"
387
+ print(f"{status} | {problem_id}")
388
+ if error and not success:
389
+ print(f" └─ Error: {error}")
390
+ if success:
391
+ successes += 1
392
+
393
+ print("-" * 60)
394
+ print(f"Total problems: {len(problems)}")
395
+ print(f"Successes: {successes}")
396
+ print(f"Failures: {len(problems) - successes}")
397
+ print(f"Success rate: {successes / len(problems):.2%}")
398
+
399
+
400
+ def main():
401
+ parser = argparse.ArgumentParser(description="Gemini Agent for Fleet SDK")
402
+ parser.add_argument(
403
+ "--eval", type=str, help="Path to JSON file with problems to evaluate"
404
+ )
405
+ parser.add_argument(
406
+ "--max-concurrent",
407
+ type=int,
408
+ default=3,
409
+ help="Maximum number of concurrent evaluations (default: 3)",
410
+ )
411
+ parser.add_argument(
412
+ "--max-steps",
413
+ type=int,
414
+ default=30,
415
+ help="Maximum steps per problem (default: 30)",
416
+ )
417
+ parser.add_argument(
418
+ "--interactive", action="store_true", help="Run in interactive mode"
419
+ )
420
+
421
+ args = parser.parse_args()
422
+
423
+ if args.eval:
424
+ evaluate_from_json(args.eval, args.max_concurrent, args.max_steps)
425
+ elif args.interactive:
426
+ interactive_mode()
427
+ else:
428
+ raise ValueError("No arguments provided")
429
+
430
+
431
+ if __name__ == "__main__":
432
+ main()
@@ -2,7 +2,7 @@ import re
2
2
  import asyncio
3
3
  import argparse
4
4
  import json
5
- from typing import TypedDict, List
5
+ from typing import TypedDict, List, Optional, Tuple
6
6
  from pathlib import Path
7
7
  import fleet as flt
8
8
  from nova_act import NovaAct, ActResult
@@ -12,6 +12,7 @@ load_dotenv()
12
12
 
13
13
 
14
14
  MAX_STEPS = 30
15
+ MAX_CONCURRENT_TASKS = 5 # Limit concurrent tasks to avoid overwhelming the system
15
16
 
16
17
 
17
18
  class Problem(TypedDict):
@@ -29,6 +30,67 @@ def extract_function_name(function_str: str) -> str | None:
29
30
  raise ValueError(f"No function name found in {function_str}")
30
31
 
31
32
 
33
+ async def process_problem(
34
+ problem: Problem, problem_idx: int, total_problems: int, env_key: str
35
+ ) -> Tuple[str, bool, Optional[str]]:
36
+ env = None
37
+ try:
38
+ # Create a new environment instance for this problem
39
+ env = await flt.env.make_async(env_key)
40
+ print(
41
+ f"[Problem {problem_idx + 1}/{total_problems}] Created environment for {problem['id']}: {env.urls.app}"
42
+ )
43
+
44
+ # Run NovaAct in a thread (since it's synchronous)
45
+ def run_nova() -> ActResult:
46
+ with NovaAct(starting_page=env.urls.app, headless=True) as nova:
47
+ return nova.act(problem["problem"], max_steps=MAX_STEPS)
48
+
49
+ try:
50
+ print(
51
+ f"[Problem {problem_idx + 1}/{total_problems}] Solving {problem['id']}..."
52
+ )
53
+ await asyncio.to_thread(run_nova)
54
+ except Exception as e:
55
+ print(
56
+ f"[Problem {problem_idx + 1}/{total_problems}] Error during solving {problem['id']}: {e}"
57
+ )
58
+ error_msg = str(e)
59
+ else:
60
+ error_msg = None
61
+
62
+ # Verify the solution
63
+ function_name = extract_function_name(problem["verifier_func"])
64
+ print(
65
+ f"[Problem {problem_idx + 1}/{total_problems}] Verifying {function_name} ({problem['id']})..."
66
+ )
67
+ response = await env.verify_raw(problem["verifier_func"], function_name)
68
+
69
+ print(
70
+ f"[Problem {problem_idx + 1}/{total_problems}] Result for {problem['id']}: {'✓' if response.success else '✗'}"
71
+ )
72
+
73
+ return problem["id"], response.success, error_msg
74
+
75
+ except Exception as e:
76
+ print(
77
+ f"[Problem {problem_idx + 1}/{total_problems}] Fatal error processing {problem['id']}: {e}"
78
+ )
79
+ return problem["id"], False, str(e)
80
+ finally:
81
+ # Clean up the environment
82
+ if env:
83
+ try:
84
+ await env.close()
85
+ print(
86
+ f"[Problem {problem_idx + 1}/{total_problems}] Closed environment for {problem['id']}"
87
+ )
88
+ except Exception as e:
89
+ print(
90
+ f"[Problem {problem_idx + 1}/{total_problems}] Error closing environment for {problem['id']}: {e}"
91
+ )
92
+
93
+
32
94
  async def main():
33
95
  parser = argparse.ArgumentParser(
34
96
  description="Load and display Jira problems from JSON file"
@@ -36,49 +98,66 @@ async def main():
36
98
  parser.add_argument(
37
99
  "json_file", type=str, help="Path to the JSON file containing problems"
38
100
  )
101
+ parser.add_argument(
102
+ "--max-concurrent",
103
+ type=int,
104
+ default=MAX_CONCURRENT_TASKS,
105
+ help=f"Maximum number of concurrent tasks (default: {MAX_CONCURRENT_TASKS})",
106
+ )
39
107
  args = parser.parse_args()
40
108
 
41
109
  file_path = Path(args.json_file)
42
110
  if not file_path.exists():
43
111
  raise FileNotFoundError(f"Error: File '{args.json_file}' not found")
44
112
 
45
- env = await flt.env.make_async("fira:v1.2.7")
46
- print(f"New Instance: {env.urls.app}")
47
-
48
- successes = 0
49
-
50
113
  try:
51
114
  with open(args.json_file, "r") as f:
52
115
  data = json.load(f)
53
116
  problems: List[Problem] = data["problems"]
54
117
 
55
118
  print(f"Loaded {len(problems)} problems from '{args.json_file}'")
56
-
57
- for i, problem in enumerate(problems):
58
- print(f"Solving problem {i + 1} of {len(problems)}: {problem['id']}")
59
- await env.reset()
60
-
61
- def run_nova() -> ActResult:
62
- with NovaAct(starting_page=env.urls.app, headless=True) as nova:
63
- return nova.act(problem["problem"], max_steps=MAX_STEPS)
64
-
65
- try:
66
- await asyncio.to_thread(run_nova)
67
- except Exception as e:
68
- print(f"Error: {e}")
69
-
70
- function_name = extract_function_name(problem["verifier_func"])
71
- print(f"Verifying {function_name} ({problem['id']})...")
72
- response = await env.verify_raw(problem["verifier_func"], function_name)
73
- print(response)
74
- if response.success:
119
+ print(f"Running with max {args.max_concurrent} concurrent tasks")
120
+ print("-" * 60)
121
+
122
+ # Create a semaphore to limit concurrent tasks
123
+ semaphore = asyncio.Semaphore(args.max_concurrent)
124
+
125
+ async def process_with_semaphore(
126
+ problem: Problem, idx: int
127
+ ) -> Tuple[str, bool, Optional[str]]:
128
+ async with semaphore:
129
+ return await process_problem(problem, idx, len(problems), "fira:v1.2.7")
130
+
131
+ # Process all problems concurrently (with semaphore limiting)
132
+ tasks = [
133
+ process_with_semaphore(problem, i) for i, problem in enumerate(problems)
134
+ ]
135
+
136
+ results = await asyncio.gather(*tasks)
137
+
138
+ # Count successes and display summary
139
+ print("\n" + "=" * 60)
140
+ print("EVALUATION RESULTS")
141
+ print("=" * 60)
142
+
143
+ successes = 0
144
+ for problem_id, success, error in results:
145
+ status = "✓ PASS" if success else "✗ FAIL"
146
+ print(f"{status} | {problem_id}")
147
+ if error and not success:
148
+ print(f" └─ Error: {error}")
149
+ if success:
75
150
  successes += 1
76
151
 
152
+ print("-" * 60)
153
+ print(f"Total problems: {len(problems)}")
77
154
  print(f"Successes: {successes}")
78
- print(f"Total: {len(problems)}")
79
- print(f"Success rate: {successes / len(problems)}")
80
- finally:
81
- await env.close()
155
+ print(f"Failures: {len(problems) - successes}")
156
+ print(f"Success rate: {successes / len(problems):.2%}")
157
+
158
+ except Exception as e:
159
+ print(f"Fatal error: {e}")
160
+ raise
82
161
 
83
162
 
84
163
  if __name__ == "__main__":
fleet/_async/client.py CHANGED
@@ -99,7 +99,9 @@ class AsyncFleet:
99
99
  response = await self.client.request("GET", f"/v1/env/{env_key}")
100
100
  return EnvironmentModel(**response.json())
101
101
 
102
- async def make(self, env_key: str) -> AsyncEnvironment:
102
+ async def make(
103
+ self, env_key: str, region: Optional[str] = None
104
+ ) -> AsyncEnvironment:
103
105
  if ":" in env_key:
104
106
  env_key_part, version = env_key.split(":", 1)
105
107
  if not version.startswith("v"):
@@ -108,7 +110,7 @@ class AsyncFleet:
108
110
  env_key_part = env_key
109
111
  version = None
110
112
 
111
- request = InstanceRequest(env_key=env_key_part, version=version)
113
+ request = InstanceRequest(env_key=env_key_part, version=version, region=region)
112
114
  response = await self.client.request(
113
115
  "POST", "/v1/env/instances", json=request.model_dump()
114
116
  )
@@ -116,10 +118,14 @@ class AsyncFleet:
116
118
  await instance.instance.load()
117
119
  return instance
118
120
 
119
- async def instances(self, status: Optional[str] = None) -> List[AsyncEnvironment]:
121
+ async def instances(
122
+ self, status: Optional[str] = None, region: Optional[str] = None
123
+ ) -> List[AsyncEnvironment]:
120
124
  params = {}
121
125
  if status:
122
126
  params["status"] = status
127
+ if region:
128
+ params["region"] = region
123
129
 
124
130
  response = await self.client.request("GET", "/v1/env/instances", params=params)
125
131
  return [AsyncEnvironment(**instance_data) for instance_data in response.json()]
@@ -3,16 +3,18 @@ from ..models import Environment as EnvironmentModel
3
3
  from typing import List, Optional
4
4
 
5
5
 
6
- async def make_async(env_key: str) -> AsyncEnvironment:
7
- return await AsyncFleet().make(env_key)
6
+ async def make_async(env_key: str, region: Optional[str] = None) -> AsyncEnvironment:
7
+ return await AsyncFleet().make(env_key, region=region)
8
8
 
9
9
 
10
10
  async def list_envs_async() -> List[EnvironmentModel]:
11
11
  return await AsyncFleet().list_envs()
12
12
 
13
13
 
14
- async def list_instances_async(status: Optional[str] = None) -> List[AsyncEnvironment]:
15
- return await AsyncFleet().instances(status=status)
14
+ async def list_instances_async(
15
+ status: Optional[str] = None, region: Optional[str] = None
16
+ ) -> List[AsyncEnvironment]:
17
+ return await AsyncFleet().instances(status=status, region=region)
16
18
 
17
19
 
18
20
  async def get_async(instance_id: str) -> AsyncEnvironment:
fleet/client.py CHANGED
@@ -99,7 +99,9 @@ class Fleet:
99
99
  response = self.client.request("GET", f"/v1/env/{env_key}")
100
100
  return EnvironmentModel(**response.json())
101
101
 
102
- def make(self, env_key: str) -> Environment:
102
+ def make(
103
+ self, env_key: str, region: Optional[str] = None
104
+ ) -> Environment:
103
105
  if ":" in env_key:
104
106
  env_key_part, version = env_key.split(":", 1)
105
107
  if not version.startswith("v"):
@@ -108,7 +110,7 @@ class Fleet:
108
110
  env_key_part = env_key
109
111
  version = None
110
112
 
111
- request = InstanceRequest(env_key=env_key_part, version=version)
113
+ request = InstanceRequest(env_key=env_key_part, version=version, region=region)
112
114
  response = self.client.request(
113
115
  "POST", "/v1/env/instances", json=request.model_dump()
114
116
  )
@@ -116,10 +118,14 @@ class Fleet:
116
118
  instance.instance.load()
117
119
  return instance
118
120
 
119
- def instances(self, status: Optional[str] = None) -> List[Environment]:
121
+ def instances(
122
+ self, status: Optional[str] = None, region: Optional[str] = None
123
+ ) -> List[Environment]:
120
124
  params = {}
121
125
  if status:
122
126
  params["status"] = status
127
+ if region:
128
+ params["region"] = region
123
129
 
124
130
  response = self.client.request("GET", "/v1/env/instances", params=params)
125
131
  return [Environment(**instance_data) for instance_data in response.json()]
fleet/env/__init__.py CHANGED
@@ -1,15 +1,17 @@
1
1
  """Fleet env module - convenience functions for environment management."""
2
2
 
3
- from .client import make, list_envs, get
3
+ from .client import make, list_envs, get, list_instances
4
4
 
5
5
  # Import async versions from _async
6
- from .._async.env.client import make_async, list_envs_async, get_async
6
+ from .._async.env.client import make_async, list_envs_async, get_async, list_instances_async
7
7
 
8
8
  __all__ = [
9
9
  "make",
10
10
  "list_envs",
11
+ "list_instances",
11
12
  "get",
12
13
  "make_async",
13
14
  "list_envs_async",
15
+ "list_instances_async",
14
16
  "get_async",
15
17
  ]
fleet/env/client.py CHANGED
@@ -3,16 +3,18 @@ from ..models import Environment as EnvironmentModel
3
3
  from typing import List, Optional
4
4
 
5
5
 
6
- def make(env_key: str) -> Environment:
7
- return Fleet().make(env_key)
6
+ def make(env_key: str, region: Optional[str] = None) -> Environment:
7
+ return Fleet().make(env_key, region=region)
8
8
 
9
9
 
10
10
  def list_envs() -> List[EnvironmentModel]:
11
11
  return Fleet().list_envs()
12
12
 
13
13
 
14
- def list_instances(status: Optional[str] = None) -> List[Environment]:
15
- return Fleet().instances(status=status)
14
+ def list_instances(
15
+ status: Optional[str] = None, region: Optional[str] = None
16
+ ) -> List[Environment]:
17
+ return Fleet().instances(status=status, region=region)
16
18
 
17
19
 
18
20
  def get(instance_id: str) -> Environment:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fleet-python
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: Python SDK for Fleet environments
5
5
  Author-email: Fleet AI <nic@fleet.so>
6
6
  License: Apache-2.0
@@ -1,26 +1,27 @@
1
1
  examples/dsl_example.py,sha256=3Eu5924a8x61nuSGXqGz8XjPLNKKH8Ye7lSYHSvixtk,5361
2
2
  examples/example.py,sha256=FFPfM5Oso7IP9Q8aELpof1J41zslELdHHJhAAck9vLk,1008
3
3
  examples/example_client.py,sha256=70HKEhz_Gb79YcvKQauCPdS08AAwjo9unt2dh1jN_Oo,1030
4
- examples/example_sync.py,sha256=sW8pMU1WsEecGlc_NeQ8BAS1YxOud4iG6MafWGnFeTg,885
5
- examples/json_tasks_example.py,sha256=EhsJKVWoJFcrqhIIKF5lVrnIqpZDtizQqYblJaOZtmk,2400
4
+ examples/example_sync.py,sha256=Kj_Mj2G88ADdQ2Pw_JQqJNbnpZrfMGSNR9paRQhSHLY,967
5
+ examples/gemini_example.py,sha256=8mDXGGCaodyK6uXgpWhxi-DQ5OA-GFW12Gfwh0b3EDY,16177
6
+ examples/json_tasks_example.py,sha256=3ub2LLiC6hXpVEH1175QxCmfCD3Blfo3yoG85uV5CS8,5334
6
7
  examples/nova_act_example.py,sha256=hZLpObVsiXKQzqGwMZVMf4A2j_z4TYE-YO9pgNmaKPk,836
7
8
  examples/openai_example.py,sha256=I2vk_SJN9BkSRQCYRJfbtGJ-HJ2xzQj-lOjwqmLos5M,8234
8
9
  examples/openai_simple_example.py,sha256=I42ytIwv0INgDO39pp1MOQSqsJz2YYH8GeNNBaUtq3A,1748
9
10
  examples/quickstart.py,sha256=1VT39IRRhemsJgxi0O0gprdpcw7HB4pYO97GAYagIcg,3788
10
11
  fleet/__init__.py,sha256=p-0zZMxKoD6dlQ9IGQ753QQWuwcw-RPl4E6HezhKfV4,2111
11
12
  fleet/base.py,sha256=t4xkgazl8kEP05JFjNByyf39RvvASRP0GsvxuoqKPY0,1395
12
- fleet/client.py,sha256=xPLlHS14Q2F8jn8e5lSLMUFE6o6ro7oM0XclOjVrxE4,4759
13
+ fleet/client.py,sha256=Qn2DEXqO21POxhT1Gg5hZhTEeR3EVe6huocdw_o9MZo,4919
13
14
  fleet/exceptions.py,sha256=yG3QWprCw1OnF-vdFBFJWE4m3ftBLBng31Dr__VbjI4,2249
14
15
  fleet/models.py,sha256=Jf6Zmk689TPXhTSnVENK_VCw0VsujWzEWsN3T29MQ0k,3713
15
16
  fleet/playwright.py,sha256=BmRvez5DUa0ttAQB084hPAyt9_8WxdzCGBGF-GZbTuQ,8593
16
17
  fleet/_async/__init__.py,sha256=AJWCnuo7XKja4yBb8fK2wX7ntciLXQrpzdRHwjTRP6M,62
17
18
  fleet/_async/base.py,sha256=hUch1I5oUPgaCXR3IpJ8f_PjigifAZg2-LR7BJdZSo8,1413
18
- fleet/_async/client.py,sha256=T15EwovKCCshr4sF_HeHNdnbIUFoyZqjrN6_tU5o4aY,5010
19
+ fleet/_async/client.py,sha256=Do5HPlhGw7iYpqlJ1eVmdwF_RATv9yAFAvENO_XwUq0,5170
19
20
  fleet/_async/exceptions.py,sha256=yG3QWprCw1OnF-vdFBFJWE4m3ftBLBng31Dr__VbjI4,2249
20
21
  fleet/_async/models.py,sha256=Jf6Zmk689TPXhTSnVENK_VCw0VsujWzEWsN3T29MQ0k,3713
21
22
  fleet/_async/playwright.py,sha256=2r4ywuv2ZqT0Qu3-k8A7V4YijeAOHnN8HiqJreLEYGI,8924
22
23
  fleet/_async/env/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- fleet/_async/env/client.py,sha256=CO2KuD6FFkP1SdOJ2ehwBKrCdLEFsz5tj3jPR5tVu4U,596
24
+ fleet/_async/env/client.py,sha256=JPFTWRjyFkUVYFodQ4bRlafH5GRu34LckeFSmb8YdZo,692
24
25
  fleet/_async/instance/__init__.py,sha256=jIt-7EEJ0WM_ipheT_s0lniCbLei6yUdN0qQv1bMJ3E,524
25
26
  fleet/_async/instance/base.py,sha256=QgcCTHdcqhi5VQi6_a1uuR-uO2_2Z19-RwVPp1k266A,947
26
27
  fleet/_async/instance/client.py,sha256=qmX5g6lPrq0b3BQ6LvTApeyquTtCse98Cu_Kwc72y6A,5653
@@ -29,8 +30,8 @@ fleet/_async/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
29
30
  fleet/_async/resources/base.py,sha256=203gD54NP1IvjuSqFo-f7FvrkhtjChggtzrxJK7xf2E,667
30
31
  fleet/_async/resources/browser.py,sha256=x11y4aKHogIEv83FByHtExerjV-cDWI3U62349Guq_Q,1368
31
32
  fleet/_async/resources/sqlite.py,sha256=sRiII_qJ8X6-FSemlBsXThz4ZPjkNy9wDT8g5UAz2XM,1501
32
- fleet/env/__init__.py,sha256=_lvYBqieXWmvU_dyPi2seSpLO3AZh5kdprdqFeefkzk,338
33
- fleet/env/client.py,sha256=LVHXnC19bl0zEW20qykC63wJ0nO9gETF6z0_74dCdCo,479
33
+ fleet/env/__init__.py,sha256=dlh41oWCKwg1-wUsy1c4lCA34hRKlBjfGpgEXO96TyY,426
34
+ fleet/env/client.py,sha256=kS6Qx466i37CA6gU-LR18p7c16OzL7rb80Gsa7ggcrM,575
34
35
  fleet/instance/__init__.py,sha256=Hr8xPPoqzKOViXZXWmaL6dQ7NOBn-GooTGzoIvGmiE4,514
35
36
  fleet/instance/base.py,sha256=U-qW1EQVBo6yvMpP1JeKiPRhCjZ3y3aTsYFhLPNOTtQ,929
36
37
  fleet/instance/client.py,sha256=2EcuBpq21kUHCFommYdS9Ya-unLn-e8mrdAZBIZea3Q,5467
@@ -43,10 +44,10 @@ fleet/verifiers/__init__.py,sha256=mRMN8x0gDWFJ1MRLqdBtQw0gn_q8kDV3lMLyoiEf1yY,2
43
44
  fleet/verifiers/code.py,sha256=NJ4OLZnpqLkI1lXY7-5m2GuZklLxMzHUCnRMVyN2_OI,25
44
45
  fleet/verifiers/db.py,sha256=tssmvJjDHuBIy8qlL_P5-UdmEFUw2DZcqLsWZ8ot3Xw,27766
45
46
  fleet/verifiers/sql_differ.py,sha256=dmiGCFXVMEMbAX519OjhVqgA8ZvhnvdmC1BVpL7QCF0,6490
46
- fleet_python-0.2.7.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
47
+ fleet_python-0.2.9.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
47
48
  scripts/fix_sync_imports.py,sha256=b7tRvShgOFqyildqs1qI-Io0gaHappykBI-PSWWqUwE,2941
48
49
  scripts/unasync.py,sha256=--Fmaae47o-dZ1HYgX1c3Nvi-rMjcFymTRlJcWWnmpw,725
49
- fleet_python-0.2.7.dist-info/METADATA,sha256=hWvnoVCE-orcwesjDjMAm01Cjs5XWYpYXLfN9d7Hg44,4321
50
- fleet_python-0.2.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
51
- fleet_python-0.2.7.dist-info/top_level.txt,sha256=_3DSmTohvSDf3AIP_BYfGzhwO1ECFwuzg83X-wHCx3Y,23
52
- fleet_python-0.2.7.dist-info/RECORD,,
50
+ fleet_python-0.2.9.dist-info/METADATA,sha256=UEzKyrhsKU7cZtJFwwvBt2za7-inTrtEB2M6utTmq3Q,4321
51
+ fleet_python-0.2.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
52
+ fleet_python-0.2.9.dist-info/top_level.txt,sha256=_3DSmTohvSDf3AIP_BYfGzhwO1ECFwuzg83X-wHCx3Y,23
53
+ fleet_python-0.2.9.dist-info/RECORD,,