vision-agent 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,9 @@
1
1
  import json
2
2
  import logging
3
3
  from pathlib import Path
4
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
4
+ from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
5
5
 
6
+ import pandas as pd
6
7
  from rich.console import Console
7
8
  from rich.syntax import Syntax
8
9
  from tabulate import tabulate
@@ -20,6 +21,7 @@ from vision_agent.agent.vision_agent_v2_prompt import (
20
21
  TEST,
21
22
  USER_REQ_CONTEXT,
22
23
  USER_REQ_SUBTASK_CONTEXT,
24
+ USER_REQ_SUBTASK_WM_CONTEXT,
23
25
  )
24
26
  from vision_agent.llm import LLM, OpenAILLM
25
27
  from vision_agent.tools.tools_v2 import TOOL_DESCRIPTIONS, TOOLS_DF
@@ -31,28 +33,53 @@ _MAX_TABULATE_COL_WIDTH = 80
31
33
  _CONSOLE = Console()
32
34
 
33
35
 
36
+ def build_working_memory(working_memory: Mapping[str, List[str]]) -> Sim:
37
+ data: Mapping[str, List[str]] = {"desc": [], "doc": []}
38
+ for key, value in working_memory.items():
39
+ data["desc"].append(key)
40
+ data["doc"].append("\n".join(value))
41
+ df = pd.DataFrame(data) # type: ignore
42
+ return Sim(df, sim_key="desc")
43
+
44
+
34
45
  def extract_code(code: str) -> str:
35
46
  if "```python" in code:
36
47
  code = code[code.find("```python") + len("```python") :]
37
48
  code = code[: code.find("```")]
49
+ if code.startswith("python\n"):
50
+ code = code[len("python\n") :]
38
51
  return code
39
52
 
40
53
 
41
54
  def write_plan(
42
- user_requirements: str, tool_desc: str, model: LLM
43
- ) -> List[Dict[str, Any]]:
55
+ chat: List[Dict[str, str]],
56
+ plan: Optional[List[Dict[str, Any]]],
57
+ tool_desc: str,
58
+ model: LLM,
59
+ ) -> Tuple[str, List[Dict[str, Any]]]:
60
+ # Get last user request
61
+ if chat[-1]["role"] != "user":
62
+ raise ValueError("Last chat message must be from the user.")
63
+ user_requirements = chat[-1]["content"]
64
+
44
65
  context = USER_REQ_CONTEXT.format(user_requirement=user_requirements)
45
- prompt = PLAN.format(context=context, plan="", tool_desc=tool_desc)
46
- plan = json.loads(model(prompt).replace("```", "").strip())
47
- return plan["plan"] # type: ignore
66
+ prompt = PLAN.format(context=context, plan=str(plan), tool_desc=tool_desc)
67
+ chat[-1]["content"] = prompt
68
+ plan = json.loads(model.chat(chat).replace("```", "").strip())
69
+ return plan["user_req"], plan["plan"] # type: ignore
48
70
 
49
71
 
50
72
  def write_code(
51
- user_req: str, subtask: str, tool_info: str, code: str, model: LLM
73
+ user_req: str,
74
+ subtask: str,
75
+ working_memory: str,
76
+ tool_info: str,
77
+ code: str,
78
+ model: LLM,
52
79
  ) -> str:
53
80
  prompt = CODE.format(
54
- context=USER_REQ_SUBTASK_CONTEXT.format(
55
- user_requirement=user_req, subtask=subtask
81
+ context=USER_REQ_SUBTASK_WM_CONTEXT.format(
82
+ user_requirement=user_req, working_memory=working_memory, subtask=subtask
56
83
  ),
57
84
  tool_info=tool_info,
58
85
  code=code,
@@ -66,7 +93,7 @@ def write_code(
66
93
 
67
94
 
68
95
  def write_test(
69
- user_req: str, subtask: str, tool_info: str, code: str, model: LLM
96
+ user_req: str, subtask: str, tool_info: str, _: str, code: str, model: LLM
70
97
  ) -> str:
71
98
  prompt = TEST.format(
72
99
  context=USER_REQ_SUBTASK_CONTEXT.format(
@@ -83,14 +110,24 @@ def write_test(
83
110
  return extract_code(code)
84
111
 
85
112
 
86
- def debug_code(sub_task: str, working_memory: List[str], model: LLM) -> Tuple[str, str]:
113
+ def debug_code(
114
+ user_req: str,
115
+ subtask: str,
116
+ retrieved_ltm: str,
117
+ working_memory: str,
118
+ model: LLM,
119
+ ) -> Tuple[str, str]:
87
120
  # Make debug model output JSON
88
121
  if hasattr(model, "kwargs"):
89
122
  model.kwargs["response_format"] = {"type": "json_object"}
90
123
  prompt = DEBUG.format(
91
124
  debug_example=DEBUG_EXAMPLE,
92
- context=USER_REQ_CONTEXT.format(user_requirement=sub_task),
93
- previous_impl="\n".join(working_memory),
125
+ context=USER_REQ_SUBTASK_WM_CONTEXT.format(
126
+ user_requirement=user_req,
127
+ subtask=subtask,
128
+ working_memory=retrieved_ltm,
129
+ ),
130
+ previous_impl=working_memory,
94
131
  )
95
132
  messages = [
96
133
  {"role": "system", "content": DEBUG_SYS_MSG},
@@ -106,10 +143,11 @@ def write_and_exec_code(
106
143
  user_req: str,
107
144
  subtask: str,
108
145
  orig_code: str,
109
- code_writer_call: Callable,
146
+ code_writer_call: Callable[..., str],
110
147
  model: LLM,
111
148
  tool_info: str,
112
149
  exec: Execute,
150
+ retrieved_ltm: str,
113
151
  max_retry: int = 3,
114
152
  verbose: bool = False,
115
153
  ) -> Tuple[bool, str, str, Dict[str, List[str]]]:
@@ -117,8 +155,9 @@ def write_and_exec_code(
117
155
  counter = 0
118
156
  reflection = ""
119
157
 
120
- # TODO: add working memory to code_writer_call and debug_code
121
- code = code_writer_call(user_req, subtask, tool_info, orig_code, model)
158
+ code = code_writer_call(
159
+ user_req, subtask, retrieved_ltm, tool_info, orig_code, model
160
+ )
122
161
  success, result = exec.run_isolation(code)
123
162
  working_memory: Dict[str, List[str]] = {}
124
163
  while not success and counter < max_retry:
@@ -136,7 +175,9 @@ def write_and_exec_code(
136
175
  PREV_CODE_CONTEXT.format(code=code, result=result)
137
176
  )
138
177
 
139
- code, reflection = debug_code(subtask, working_memory[subtask], model)
178
+ code, reflection = debug_code(
179
+ user_req, subtask, retrieved_ltm, "\n".join(working_memory[subtask]), model
180
+ )
140
181
  success, result = exec.run_isolation(code)
141
182
  counter += 1
142
183
  if verbose:
@@ -148,7 +189,7 @@ def write_and_exec_code(
148
189
  if success:
149
190
  working_memory[subtask].append(
150
191
  PREV_CODE_CONTEXT_WITH_REFLECTION.format(
151
- code=code, result=result, reflection=reflection
192
+ reflection=reflection, code=code, result=result
152
193
  )
153
194
  )
154
195
 
@@ -162,12 +203,15 @@ def run_plan(
162
203
  exec: Execute,
163
204
  code: str,
164
205
  tool_recommender: Sim,
206
+ long_term_memory: Optional[Sim] = None,
165
207
  verbose: bool = False,
166
208
  ) -> Tuple[str, str, List[Dict[str, Any]], Dict[str, List[str]]]:
167
209
  active_plan = [e for e in plan if "success" not in e or not e["success"]]
168
- working_memory: Dict[str, List[str]] = {}
169
210
  current_code = code
170
211
  current_test = ""
212
+ retrieved_ltm = ""
213
+ working_memory: Dict[str, List[str]] = {}
214
+
171
215
  for task in active_plan:
172
216
  _LOGGER.info(
173
217
  f"""
@@ -176,7 +220,13 @@ def run_plan(
176
220
  tool_info = "\n".join(
177
221
  [e["doc"] for e in tool_recommender.top_k(task["instruction"])]
178
222
  )
179
- success, code, result, task_memory = write_and_exec_code(
223
+
224
+ if long_term_memory is not None:
225
+ retrieved_ltm = "\n".join(
226
+ [e["doc"] for e in long_term_memory.top_k(task["instruction"], 1)]
227
+ )
228
+
229
+ success, code, result, working_memory_i = write_and_exec_code(
180
230
  user_req,
181
231
  task["instruction"],
182
232
  current_code,
@@ -184,14 +234,15 @@ def run_plan(
184
234
  coder,
185
235
  tool_info,
186
236
  exec,
187
- verbose,
237
+ retrieved_ltm,
238
+ verbose=verbose,
188
239
  )
189
240
  if task["type"] == "code":
190
241
  current_code = code
191
242
  else:
192
243
  current_test = code
193
244
 
194
- working_memory.update(task_memory)
245
+ working_memory.update(working_memory_i)
195
246
 
196
247
  if verbose:
197
248
  _CONSOLE.print(
@@ -231,6 +282,7 @@ class VisionAgentV2(Agent):
231
282
  self,
232
283
  timeout: int = 600,
233
284
  tool_recommender: Optional[Sim] = None,
285
+ long_term_memory: Optional[Sim] = None,
234
286
  verbose: bool = False,
235
287
  ) -> None:
236
288
  self.planner = OpenAILLM(temperature=0.1, json_mode=True)
@@ -241,6 +293,12 @@ class VisionAgentV2(Agent):
241
293
  else:
242
294
  self.tool_recommender = tool_recommender
243
295
  self.verbose = verbose
296
+ self._working_memory: Dict[str, List[str]] = {}
297
+ if long_term_memory is not None:
298
+ if "doc" not in long_term_memory.df.columns:
299
+ raise ValueError("Long term memory must have a 'doc' column.")
300
+ self.long_term_memory = long_term_memory
301
+ self.max_retries = 3
244
302
  if self.verbose:
245
303
  _LOGGER.setLevel(logging.INFO)
246
304
 
@@ -248,36 +306,47 @@ class VisionAgentV2(Agent):
248
306
  self,
249
307
  input: Union[List[Dict[str, str]], str],
250
308
  image: Optional[Union[str, Path]] = None,
309
+ plan: Optional[List[Dict[str, Any]]] = None,
251
310
  ) -> str:
252
311
  if isinstance(input, str):
253
312
  input = [{"role": "user", "content": input}]
254
- code, _ = self.chat_with_tests(input, image)
255
- return code
313
+ results = self.chat_with_workflow(input, image, plan)
314
+ return results["code"] # type: ignore
256
315
 
257
- def chat_with_tests(
316
+ def chat_with_workflow(
258
317
  self,
259
318
  chat: List[Dict[str, str]],
260
319
  image: Optional[Union[str, Path]] = None,
261
- ) -> Tuple[str, str]:
320
+ plan: Optional[List[Dict[str, Any]]] = None,
321
+ ) -> Dict[str, Any]:
262
322
  if len(chat) == 0:
263
323
  raise ValueError("Input cannot be empty.")
264
324
 
265
- user_req = chat[0]["content"]
266
325
  if image is not None:
267
- user_req += f" Image name {image}"
326
+ # append file names to all user messages
327
+ for chat_i in chat:
328
+ if chat_i["role"] == "user":
329
+ chat_i["content"] += f" Image name {image}"
330
+
331
+ working_code = ""
332
+ if plan is not None:
333
+ # grab the latest working code from a previous plan
334
+ for task in plan:
335
+ if "success" in task and "code" in task and task["success"]:
336
+ working_code = task["code"]
268
337
 
269
- plan = write_plan(user_req, TOOL_DESCRIPTIONS, self.planner)
338
+ user_req, plan = write_plan(chat, plan, TOOL_DESCRIPTIONS, self.planner)
270
339
  _LOGGER.info(
271
340
  f"""Plan:
272
341
  {tabulate(tabular_data=plan, headers="keys", tablefmt="mixed_grid", maxcolwidths=_MAX_TABULATE_COL_WIDTH)}"""
273
342
  )
274
- working_memory: Dict[str, List[str]] = {}
275
343
 
276
- working_code = ""
277
344
  working_test = ""
345
+ working_memory: Dict[str, List[str]] = {}
278
346
  success = False
347
+ retries = 0
279
348
 
280
- while not success:
349
+ while not success and retries < self.max_retries:
281
350
  working_code, working_test, plan, working_memory_i = run_plan(
282
351
  user_req,
283
352
  plan,
@@ -285,16 +354,25 @@ class VisionAgentV2(Agent):
285
354
  self.exec,
286
355
  working_code,
287
356
  self.tool_recommender,
357
+ self.long_term_memory,
288
358
  self.verbose,
289
359
  )
290
360
  success = all(task["success"] for task in plan)
291
361
  working_memory.update(working_memory_i)
292
362
 
293
363
  if not success:
294
- # TODO: ask for feedback and replan
364
+ # return to user and request feedback
295
365
  break
296
366
 
297
- return working_code, working_test
367
+ retries += 1
368
+
369
+ return {
370
+ "code": working_code,
371
+ "test": working_test,
372
+ "success": success,
373
+ "working_memory": build_working_memory(working_memory),
374
+ "plan": plan,
375
+ }
298
376
 
299
377
  def log_progress(self, description: str) -> None:
300
378
  pass
@@ -1,3 +1,8 @@
1
+ USER_REQ_CONTEXT = """
2
+ ## User Requirement
3
+ {user_requirement}
4
+ """
5
+
1
6
  USER_REQ_SUBTASK_CONTEXT = """
2
7
  ## User Requirement
3
8
  {user_requirement}
@@ -6,11 +11,16 @@ USER_REQ_SUBTASK_CONTEXT = """
6
11
  {subtask}
7
12
  """
8
13
 
9
- USER_REQ_CONTEXT = """
14
+ USER_REQ_SUBTASK_WM_CONTEXT = """
10
15
  ## User Requirement
11
16
  {user_requirement}
12
- """
13
17
 
18
+ ## Current Subtask
19
+ {subtask}
20
+
21
+ ## Previous Task
22
+ {working_memory}
23
+ """
14
24
 
15
25
  PLAN = """
16
26
  # Context
@@ -27,11 +37,13 @@ Based on the context and the tools you have available, write a plan of subtasks
27
37
  - For each subtask, you should provide a short instruction on what to do. Ensure the subtasks are large enough to be meaningful, encompassing multiple lines of code.
28
38
  - You do not need to have the agent rewrite any tool functionality you already have, you should instead instruct it to utilize one or more of those tools in each subtask.
29
39
  - You can have agents either write coding tasks, to code some functionality or testing tasks to test previous functionality.
40
+ - If a current plan exists, examine each item in the plan to determine if it was successful. If there was an item that failed, i.e. 'success': False, then you should rewrite that item and all subsequent items to ensure that the rewritten plan is successful.
30
41
 
31
42
  Output a list of jsons in the following format:
32
43
 
33
44
  ```json
34
45
  {{
46
+ "user_req": str, # "a summarized version of the user requirement"
35
47
  "plan":
36
48
  [
37
49
  {{
@@ -61,8 +73,9 @@ CODE = """
61
73
  {code}
62
74
 
63
75
  # Constraints
64
- - Write a function that accomplishes the User Requirement. You are supplied code from a previous task, feel free to copy over that code into your own implementation if you need it.
65
- - Always prioritize using pre-defined tools or code for the same functionality. You have access to all these tools through the `from vision_agent.tools.tools_v2 import *` import.
76
+ - Write a function that accomplishes the 'User Requirement'. You are supplied code from a previous task under 'Previous Code', feel free to copy over that code into your own implementation if you need it.
77
+ - Always prioritize using pre-defined tools or code for the same functionality from 'Tool Info for Current Subtask'. You have access to all these tools through the `from vision_agent.tools.tools_v2 import *` import.
78
+ - You may recieve previous trials and errors under 'Previous Task', this is code, output and reflections from previous tasks. You can use these to avoid running in to the same issues when writing your code.
66
79
  - Write clean, readable, and well-documented code.
67
80
 
68
81
  # Output
@@ -102,6 +115,7 @@ def add(a: int, b: int) -> int:
102
115
 
103
116
 
104
117
  PREV_CODE_CONTEXT = """
118
+ [previous impl]
105
119
  ```python
106
120
  {code}
107
121
  ```
@@ -112,18 +126,20 @@ PREV_CODE_CONTEXT = """
112
126
 
113
127
 
114
128
  PREV_CODE_CONTEXT_WITH_REFLECTION = """
129
+ [reflection on previous impl]
130
+ {reflection}
131
+
132
+ [new impl]
115
133
  ```python
116
134
  {code}
117
135
  ```
118
136
 
119
- [previous output]
137
+ [new output]
120
138
  {result}
121
139
 
122
- [reflection on previous impl]
123
- {reflection}
124
140
  """
125
141
 
126
-
142
+ # don't need [previous impl] because it will come from PREV_CODE_CONTEXT or PREV_CODE_CONTEXT_WITH_REFLECTION
127
143
  DEBUG = """
128
144
  [example]
129
145
  Here is an example of debugging with reflection.
@@ -133,7 +149,6 @@ Here is an example of debugging with reflection.
133
149
  [context]
134
150
  {context}
135
151
 
136
- [previous impl]
137
152
  {previous_impl}
138
153
 
139
154
  [instruction]
@@ -158,7 +173,7 @@ TEST = """
158
173
  {code}
159
174
 
160
175
  # Constraints
161
- - Write code to test the functionality of the provided code according to the Current Subtask. If you cannot test the code, then write code to visualize the result by calling the code.
176
+ - Write code to test the functionality of the provided code according to the 'Current Subtask'. If you cannot test the code, then write code to visualize the result by calling the code.
162
177
  - Always prioritize using pre-defined tools for the same functionality.
163
178
  - Write clean, readable, and well-documented code.
164
179
 
vision_agent/llm/llm.py CHANGED
@@ -34,7 +34,7 @@ class OpenAILLM(LLM):
34
34
 
35
35
  def __init__(
36
36
  self,
37
- model_name: str = "gpt-4-turbo",
37
+ model_name: str = "gpt-4o",
38
38
  api_key: Optional[str] = None,
39
39
  json_mode: bool = False,
40
40
  system_prompt: Optional[str] = None,
@@ -1,3 +1,3 @@
1
1
  from .execute import Execute
2
- from .sim import Sim
2
+ from .sim import Sim, load_sim, merge_sim
3
3
  from .video import extract_frames_from_video
vision_agent/utils/sim.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from pathlib import Path
2
2
  from typing import Dict, List, Optional, Sequence, Union
3
3
 
4
+ import numpy as np
4
5
  import pandas as pd
5
6
  from openai import Client
6
7
  from scipy.spatial.distance import cosine # type: ignore
@@ -46,7 +47,14 @@ class Sim:
46
47
  )
47
48
 
48
49
  def save(self, sim_file: Union[str, Path]) -> None:
49
- self.df.to_csv(sim_file, index=False)
50
+ sim_file = Path(sim_file)
51
+ sim_file.mkdir(parents=True, exist_ok=True)
52
+
53
+ df = self.df.copy()
54
+ embs = np.array(df.embs.tolist())
55
+ np.save(sim_file / "embs.npy", embs)
56
+ df = df.drop("embs", axis=1)
57
+ df.to_csv(sim_file / "df.csv", index=False)
50
58
 
51
59
  def top_k(self, query: str, k: int = 5) -> Sequence[Dict]:
52
60
  """Returns the top k most similar items to the query.
@@ -65,6 +73,13 @@ class Sim:
65
73
  return res[[c for c in res.columns if c != "embs"]].to_dict(orient="records")
66
74
 
67
75
 
76
+ def merge_sim(sim1: Sim, sim2: Sim) -> Sim:
77
+ return Sim(pd.concat([sim1.df, sim2.df], ignore_index=True))
78
+
79
+
68
80
  def load_sim(sim_file: Union[str, Path]) -> Sim:
69
- df = pd.read_csv(sim_file)
81
+ sim_file = Path(sim_file)
82
+ df = pd.read_csv(sim_file / "df.csv")
83
+ embs = np.load(sim_file / "embs.npy")
84
+ df["embs"] = list(embs)
70
85
  return Sim(df)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vision-agent
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: Toolset for Vision Agent
5
5
  Author: Landing AI
6
6
  Author-email: dev@landing.ai
@@ -9,12 +9,12 @@ vision_agent/agent/reflexion.py,sha256=4gz30BuFMeGxSsTzoDV4p91yE0R8LISXp28IaOI6w
9
9
  vision_agent/agent/reflexion_prompts.py,sha256=G7UAeNz_g2qCb2yN6OaIC7bQVUkda4m3z42EG8wAyfE,9342
10
10
  vision_agent/agent/vision_agent.py,sha256=ywOowbuwNSapVwl02ePZP_EzW1FlZULoCV59LR5nFww,27028
11
11
  vision_agent/agent/vision_agent_prompts.py,sha256=MZSIwovYgB-f-kdJ6btaNDVXptJn47bfOL3-Zn6NiC0,8573
12
- vision_agent/agent/vision_agent_v2.py,sha256=CDgGBSoa2LoMS0b4JhyDkoS3PJJNmCCPfxIGUc4RfQg,9658
13
- vision_agent/agent/vision_agent_v2_prompt.py,sha256=-90Hlbtqb5Fp7OVjGabpTdgr-yCr8AYKIfiMRfoL4SY,5141
12
+ vision_agent/agent/vision_agent_v2.py,sha256=K-zJ0utlvgWpR0TlP9M3yO0O9t9L37FcBs31OfOC4C0,12185
13
+ vision_agent/agent/vision_agent_v2_prompt.py,sha256=dd9m9Vqp91r4dpsKMDwXr54jG_GTBdJNDzpgR115S8Q,5997
14
14
  vision_agent/fonts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  vision_agent/fonts/default_font_ch_en.ttf,sha256=1YM0Z3XqLDjSNbF7ihQFSAIUdjF9m1rtHiNC_6QosTE,1594400
16
16
  vision_agent/llm/__init__.py,sha256=BoUm_zSAKnLlE8s-gKTSQugXDqVZKPqYlWwlTLdhcz4,48
17
- vision_agent/llm/llm.py,sha256=qWDBpJolGLWNwDjpEXu1NrjlJbo7Fj9efJYkSfVn6oE,5784
17
+ vision_agent/llm/llm.py,sha256=A-gN0vMb79fSxhSK1qBs6PTu1fba9Gvy6pitOyjW2gM,5779
18
18
  vision_agent/lmm/__init__.py,sha256=nnNeKD1k7q_4vLb1x51O_EUTYaBgGfeiCx5F433gr3M,67
19
19
  vision_agent/lmm/lmm.py,sha256=gK90vMxh0OcGSuIZQikBkDXm4pfkdFk1R2y7rtWDl84,10539
20
20
  vision_agent/tools/__init__.py,sha256=p5SM0YhThSVO_jRF9O-OjH2fYDPv-iMjexDX9xPPb7M,452
@@ -22,13 +22,13 @@ vision_agent/tools/prompts.py,sha256=V1z4YJLXZuUl_iZ5rY0M5hHc_2tmMEUKr0WocXKGt4E
22
22
  vision_agent/tools/tool_utils.py,sha256=mK6QfbYr6oo9ci979-_6R1DrxU2i8HGhwosADyvciI0,865
23
23
  vision_agent/tools/tools.py,sha256=sVxN7SpDkz_XTc_SKwkoRF4EwaMTuHvTsCHwtR942Fc,47373
24
24
  vision_agent/tools/tools_v2.py,sha256=1Y_ZbYJyuo2eZZkq7jY3YfuKWC82C-GFCZMLYH-I5ew,13800
25
- vision_agent/utils/__init__.py,sha256=AKXf1QVOpO6MnqU8RSaFLQ_4us4DcKf8ibgEbhuHjvI,95
25
+ vision_agent/utils/__init__.py,sha256=xsHFyJSDbLdonB9Dh74cwZnVTiT__2OQF3Brd3Nmglc,116
26
26
  vision_agent/utils/execute.py,sha256=RC_jKrm2kOWwzNe9xKuA2xJcbsNcD0Hb95_o3_Le0_E,3820
27
27
  vision_agent/utils/image_utils.py,sha256=1dggPBhW8_hUXDItCRLa23h-hdBwS50cjL4v1hsoUbg,7586
28
- vision_agent/utils/sim.py,sha256=FaD16kKL1-JR2aSCmznF9KkJux9u3_Nr9tF4smBeoK0,2327
28
+ vision_agent/utils/sim.py,sha256=SO4-pj2Fjs3yr-KT8S0nuUd66lf7m7XvMAp7_ecvKuQ,2813
29
29
  vision_agent/utils/type_defs.py,sha256=4LTnTL4HNsfYqCrDn9Ppjg9bSG2ZGcoKSSd9YeQf4Bw,1792
30
30
  vision_agent/utils/video.py,sha256=xTElFSFp1Jw4ulOMnk81Vxsh-9dTxcWUO6P9fzEi3AM,7653
31
- vision_agent-0.2.18.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
32
- vision_agent-0.2.18.dist-info/METADATA,sha256=jGDZiQGkBTBepGOkLgwu0ac4SpYNz4WQDZrPgAj6bJI,9121
33
- vision_agent-0.2.18.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
34
- vision_agent-0.2.18.dist-info/RECORD,,
31
+ vision_agent-0.2.20.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
32
+ vision_agent-0.2.20.dist-info/METADATA,sha256=z40HQBzaHYcJqndMnEDAJMlsvGBv0SdW0VCaVA2O6ec,9121
33
+ vision_agent-0.2.20.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
34
+ vision_agent-0.2.20.dist-info/RECORD,,