vision-agent 0.0.30__py3-none-any.whl → 0.0.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vision_agent/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
+ from .agent import Agent
1
2
  from .data import DataStore, build_data_store
2
3
  from .emb import Embedder, OpenAIEmb, SentenceTransformerEmb, get_embedder
3
4
  from .llm import LLM, OpenAILLM
4
5
  from .lmm import LMM, LLaVALMM, OpenAILMM, get_lmm
5
- from .agent import Agent
@@ -1,3 +1,3 @@
1
1
  from .agent import Agent
2
- from .reflexion import Reflexion
3
2
  from .easytool import EasyTool
3
+ from .reflexion import Reflexion
@@ -4,7 +4,8 @@ import sys
4
4
  from pathlib import Path
5
5
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
6
 
7
- from vision_agent import LLM, LMM, OpenAILLM
7
+ from vision_agent.llm import LLM, OpenAILLM
8
+ from vision_agent.lmm import LMM
8
9
  from vision_agent.tools import TOOLS
9
10
 
10
11
  from .agent import Agent
@@ -42,10 +43,37 @@ def change_name(name: str) -> str:
42
43
 
43
44
  def format_tools(tools: Dict[int, Any]) -> str:
44
45
  # Format this way so it's clear what the ID's are
45
- tool_list = []
46
+ tool_str = ""
46
47
  for key in tools:
47
- tool_list.append(f"ID: {key}, {tools[key]}\\n")
48
- return str(tool_list)
48
+ tool_str += f"ID: {key}, {tools[key]}\n"
49
+ return tool_str
50
+
51
+
52
+ def topological_sort(tasks: List[Dict]) -> List[Dict]:
53
+ in_degree = {task["id"]: 0 for task in tasks}
54
+ for task in tasks:
55
+ for dep in task["dep"]:
56
+ if dep in in_degree:
57
+ in_degree[task["id"]] += 1
58
+
59
+ queue = [task for task in tasks if in_degree[task["id"]] == 0]
60
+ sorted_order = []
61
+
62
+ while queue:
63
+ current = queue.pop(0)
64
+ sorted_order.append(current)
65
+
66
+ for task in tasks:
67
+ if current["id"] in task["dep"]:
68
+ in_degree[task["id"]] -= 1
69
+ if in_degree[task["id"]] == 0:
70
+ queue.append(task)
71
+
72
+ if len(sorted_order) != len(tasks):
73
+ completed_ids = set([task["id"] for task in sorted_order])
74
+ remaining_tasks = [task for task in tasks if task["id"] not in completed_ids]
75
+ sorted_order.extend(remaining_tasks)
76
+ return sorted_order
49
77
 
50
78
 
51
79
  def task_decompose(
@@ -151,7 +179,11 @@ def answer_summarize(
151
179
 
152
180
 
153
181
  def function_call(tool: Callable, parameters: Dict[str, Any]) -> Any:
154
- return tool()(**parameters)
182
+ try:
183
+ return tool()(**parameters)
184
+ except Exception as e:
185
+ _LOGGER.error(f"Failed function_call on: {e}")
186
+ return None
155
187
 
156
188
 
157
189
  def retrieval(
@@ -160,7 +192,6 @@ def retrieval(
160
192
  tools: Dict[int, Any],
161
193
  previous_log: str,
162
194
  ) -> Tuple[List[Dict], str]:
163
- # TODO: remove tools_used?
164
195
  tool_id = choose_tool(
165
196
  model, question, {k: v["description"] for k, v in tools.items()}
166
197
  )
@@ -200,7 +231,7 @@ def retrieval(
200
231
  call_results.extend(parse_tool_results(result))
201
232
  tool_results[i]["call_results"] = call_results
202
233
 
203
- call_results_str = "\n\n".join([str(e) for e in call_results])
234
+ call_results_str = "\n\n".join([str(e) for e in call_results if e is not None])
204
235
  _LOGGER.info(f"\tCall Results: {call_results_str}")
205
236
  return tool_results, call_results_str
206
237
 
@@ -261,6 +292,10 @@ class EasyTool(Agent):
261
292
  if tasks is not None:
262
293
  task_list = [{"task": task, "id": i + 1} for i, task in enumerate(tasks)]
263
294
  task_list = task_topology(self.task_model, question, task_list)
295
+ try:
296
+ task_list = topological_sort(task_list)
297
+ except Exception:
298
+ _LOGGER.error(f"Failed topological_sort on: {task_list}")
264
299
  else:
265
300
  task_list = []
266
301
 
@@ -270,7 +305,6 @@ class EasyTool(Agent):
270
305
  answers = []
271
306
  for task in task_list:
272
307
  task_depend[task["id"]] = {"task": task["task"], "answer": ""} # type: ignore
273
- # TODO topological sort task_list
274
308
  all_tool_results = []
275
309
  for task in task_list:
276
310
  task_str = task["task"]
@@ -4,7 +4,8 @@ import sys
4
4
  from pathlib import Path
5
5
  from typing import Dict, List, Optional, Tuple, Union
6
6
 
7
- from vision_agent import LLM, LMM, OpenAILLM
7
+ from vision_agent.llm import LLM, OpenAILLM
8
+ from vision_agent.lmm import LMM
8
9
 
9
10
  from .agent import Agent
10
11
  from .reflexion_prompts import (
@@ -114,7 +115,7 @@ class Reflexion(Agent):
114
115
  self.reflect_prompt = reflect_prompt
115
116
  self.finsh_prompt = finsh_prompt
116
117
  self.cot_examples = cot_examples
117
- self.refelct_examples = reflect_examples
118
+ self.reflect_examples = reflect_examples
118
119
  self.reflections: List[str] = []
119
120
  if verbose:
120
121
  _LOGGER.setLevel(logging.INFO)
@@ -273,7 +274,7 @@ class Reflexion(Agent):
273
274
  self, question: str, context: str = "", scratchpad: str = ""
274
275
  ) -> str:
275
276
  return self.reflect_prompt.format(
276
- examples=self.refelct_examples,
277
+ examples=self.reflect_examples,
277
278
  context=context,
278
279
  question=question,
279
280
  scratchpad=scratchpad,
@@ -1,2 +1,2 @@
1
1
  from .prompts import CHOOSE_PARAMS, SYSTEM_PROMPT
2
- from .tools import CLIP, TOOLS, GroundingDINO, GroundingSAM, Tool
2
+ from .tools import CLIP, TOOLS, Counter, Crop, GroundingDINO, GroundingSAM, Tool
@@ -1,10 +1,13 @@
1
1
  import logging
2
+ import tempfile
2
3
  from abc import ABC
4
+ from collections import Counter as CounterClass
3
5
  from pathlib import Path
4
6
  from typing import Any, Dict, List, Tuple, Union, cast
5
7
 
6
8
  import numpy as np
7
9
  import requests
10
+ from PIL import Image
8
11
  from PIL.Image import Image as ImageType
9
12
 
10
13
  from vision_agent.image_utils import convert_to_b64, get_image_size
@@ -52,19 +55,16 @@ class CLIP(Tool):
52
55
  or tags.
53
56
 
54
57
  Examples::
55
- >>> from vision_agent.tools import tools
56
- >>> t = tools.CLIP(["red line", "yellow dot", "none"])
57
- >>> t("examples/img/ct_scan1.jpg"))
58
- >>> [[0.02567436918616295, 0.9534115791320801, 0.020914122462272644]]
58
+ >>> import vision_agent as va
59
+ >>> clip = va.tools.CLIP()
60
+ >>> clip(["red line", "yellow dot"], "ct_scan1.jpg"))
61
+ >>> [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}]
59
62
  """
60
63
 
61
64
  _ENDPOINT = "https://rb4ii6dfacmwqfxivi4aedyyfm0endsv.lambda-url.us-east-2.on.aws"
62
65
 
63
66
  name = "clip_"
64
- description = (
65
- "'clip_' is a tool that can classify or tag any image given a set if input classes or tags."
66
- "Here are some exmaples of how to use the tool, the examples are in the format of User Question: which will have the user's question in quotes followed by the parameters in JSON format, which is the parameters you need to output to call the API to solve the user's question.\n"
67
- )
67
+ description = "'clip_' is a tool that can classify or tag any image given a set if input classes or tags."
68
68
  usage = {
69
69
  "required_parameters": [
70
70
  {"name": "prompt", "type": "List[str]"},
@@ -106,22 +106,30 @@ class CLIP(Tool):
106
106
  ) or "statusCode" not in resp_json:
107
107
  _LOGGER.error(f"Request failed: {resp_json}")
108
108
  raise ValueError(f"Request failed: {resp_json}")
109
- return cast(List[Dict], resp_json["data"])
109
+
110
+ rets = []
111
+ for elt in resp_json["data"]:
112
+ rets.append({"labels": prompt, "scores": [round(prob, 2) for prob in elt]})
113
+ return cast(List[Dict], rets)
110
114
 
111
115
 
112
116
  class GroundingDINO(Tool):
117
+ r"""Grounding DINO is a tool that can detect arbitrary objects with inputs such as
118
+ category names or referring expressions.
119
+
120
+ Examples::
121
+ >>> import vision_agent as va
122
+ >>> t = va.tools.GroundingDINO()
123
+ >>> t("red line. yellow dot", "ct_scan1.jpg")
124
+ >>> [{'labels': ['red line', 'yellow dot'],
125
+ >>> 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]],
126
+ >>> 'scores': [0.98, 0.02]}]
127
+ """
128
+
113
129
  _ENDPOINT = "https://chnicr4kes5ku77niv2zoytggq0qyqlp.lambda-url.us-east-2.on.aws"
114
130
 
115
131
  name = "grounding_dino_"
116
- description = (
117
- "'grounding_dino_' is a tool that can detect arbitrary objects with inputs such as category names or referring expressions."
118
- "Here are some exmaples of how to use the tool, the examples are in the format of User Question: which will have the user's question in quotes followed by the parameters in JSON format, which is the parameters you need to output to call the API to solve the user's question.\n"
119
- "The tool returns a list of dictionaries, each containing the following keys:\n"
120
- ' - "label": The label of the detected object.\n'
121
- ' - "score": The confidence score of the detection.\n'
122
- ' - "bbox": The bounding box of the detected object. The box coordinates are normalize to [0, 1]\n'
123
- 'An example output would be: [{"label": ["car"], "score": [0.99], "bbox": [[0.1, 0.2, 0.3, 0.4]]}]\n'
124
- )
132
+ description = "'grounding_dino_' is a tool that can detect arbitrary objects with inputs such as category names or referring expressions."
125
133
  usage = {
126
134
  "required_parameters": [
127
135
  {"name": "prompt", "type": "str"},
@@ -180,27 +188,27 @@ class GroundingSAM(Tool):
180
188
  inputs such as category names or referring expressions.
181
189
 
182
190
  Examples::
183
- >>> from vision_agent.tools import tools
184
- >>> t = tools.GroundingSAM(["red line", "yellow dot", "none"])
185
- >>> t("examples/img/ct_scan1.jpg")
186
- >>> [{'label': 'none', 'mask': array([[0, 0, 0, ..., 0, 0, 0],
191
+ >>> import vision_agent as va
192
+ >>> t = va.tools.GroundingSAM()
193
+ >>> t(["red line", "yellow dot"], ct_scan1.jpg"])
194
+ >>> [{'labels': ['yellow dot', 'red line'],
195
+ >>> 'bboxes': [[0.38, 0.15, 0.59, 0.7], [0.48, 0.25, 0.69, 0.71]],
196
+ >>> 'masks': [array([[0, 0, 0, ..., 0, 0, 0],
187
197
  >>> [0, 0, 0, ..., 0, 0, 0],
188
198
  >>> ...,
189
199
  >>> [0, 0, 0, ..., 0, 0, 0],
190
- >>> [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)}, {'label': 'red line', 'mask': array([[0, 0, 0, ..., 0, 0, 0],
200
+ >>> [0, 0, 0, ..., 0, 0, 0]], dtype=uint8)},
201
+ >>> array([[0, 0, 0, ..., 0, 0, 0],
191
202
  >>> [0, 0, 0, ..., 0, 0, 0],
192
203
  >>> ...,
193
204
  >>> [1, 1, 1, ..., 1, 1, 1],
194
- >>> [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)}]
205
+ >>> [1, 1, 1, ..., 1, 1, 1]], dtype=uint8)]}]
195
206
  """
196
207
 
197
208
  _ENDPOINT = "https://cou5lfmus33jbddl6hoqdfbw7e0qidrw.lambda-url.us-east-2.on.aws"
198
209
 
199
210
  name = "grounding_sam_"
200
- description = (
201
- "'grounding_sam_' is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions."
202
- "Here are some exmaples of how to use the tool, the examples are in the format of User Question: which will have the user's question in quotes followed by the parameters in JSON format, which is the parameters you need to output to call the API to solve the user's question.\n"
203
- )
211
+ description = "'grounding_sam_' is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions."
204
212
  usage = {
205
213
  "required_parameters": [
206
214
  {"name": "prompt", "type": "List[str]"},
@@ -226,6 +234,7 @@ class GroundingSAM(Tool):
226
234
  }
227
235
 
228
236
  def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict]:
237
+ image_size = get_image_size(image)
229
238
  image_b64 = convert_to_b64(image)
230
239
  data = {
231
240
  "classes": prompt,
@@ -243,24 +252,100 @@ class GroundingSAM(Tool):
243
252
  _LOGGER.error(f"Request failed: {resp_json}")
244
253
  raise ValueError(f"Request failed: {resp_json}")
245
254
  resp_data = resp_json["data"]
246
- preds = []
255
+ ret_pred: Dict[str, List] = {"labels": [], "bboxes": [], "masks": []}
247
256
  for pred in resp_data["preds"]:
248
257
  encoded_mask = pred["encoded_mask"]
249
258
  mask = rle_decode(mask_rle=encoded_mask, shape=pred["mask_shape"])
250
- preds.append(
251
- {
252
- "label": pred["label_name"],
253
- "mask": mask,
254
- }
255
- )
256
- return preds
259
+ ret_pred["labels"].append(pred["label_name"])
260
+ ret_pred["bboxes"].append(normalize_bbox(pred["bbox"], image_size))
261
+ ret_pred["masks"].append(mask)
262
+ ret_preds = [ret_pred]
263
+ return ret_preds
264
+
265
+
266
+ class AgentGroundingSAM(GroundingSAM):
267
+ r"""AgentGroundingSAM is the same as GroundingSAM but it saves the masks as files
268
+ returns the file name. This makes it easier for agents to use.
269
+ """
270
+
271
+ def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict]:
272
+ rets = super().__call__(prompt, image)
273
+ for ret in rets:
274
+ mask_files = []
275
+ for mask in ret["masks"]:
276
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp:
277
+ Image.fromarray(mask * 255).save(tmp)
278
+ mask_files.append(tmp.name)
279
+ ret["masks"] = mask_files
280
+ return rets
281
+
282
+
283
+ class Counter(Tool):
284
+ name = "counter_"
285
+ description = "'counter_' detects and counts the number of objects in an image given an input such as a category name or referring expression."
286
+ usage = {
287
+ "required_parameters": [
288
+ {"name": "prompt", "type": "str"},
289
+ {"name": "image", "type": "str"},
290
+ ],
291
+ "examples": [
292
+ {
293
+ "scenario": "Can you count the number of cars in this image? Image name image.jpg",
294
+ "parameters": {"prompt": "car", "image": "image.jpg"},
295
+ },
296
+ {
297
+ "scenario": "Can you count the number of people? Image name: people.png",
298
+ "parameters": {"prompt": "person", "image": "people.png"},
299
+ },
300
+ ],
301
+ }
302
+
303
+ def __call__(self, prompt: str, image: Union[str, ImageType]) -> Dict:
304
+ resp = GroundingDINO()(prompt, image)
305
+ return dict(CounterClass(resp[0]["labels"]))
306
+
307
+
308
+ class Crop(Tool):
309
+ name = "crop_"
310
+ description = "'crop_' crops an image given a bounding box and returns a file name of the cropped image."
311
+ usage = {
312
+ "required_parameters": [
313
+ {"name": "bbox", "type": "List[float]"},
314
+ {"name": "image", "type": "str"},
315
+ ],
316
+ "examples": [
317
+ {
318
+ "scenario": "Can you crop the image to the bounding box [0.1, 0.1, 0.9, 0.9]? Image name: image.jpg",
319
+ "parameters": {"bbox": [0.1, 0.1, 0.9, 0.9], "image": "image.jpg"},
320
+ },
321
+ {
322
+ "scenario": "Cut out the image to the bounding box [0.2, 0.2, 0.8, 0.8]. Image name: car.jpg",
323
+ "parameters": {"bbox": [0.2, 0.2, 0.8, 0.8], "image": "car.jpg"},
324
+ },
325
+ ],
326
+ }
327
+
328
+ def __call__(self, bbox: List[float], image: Union[str, Path]) -> str:
329
+ pil_image = Image.open(image)
330
+ width, height = pil_image.size
331
+ bbox = [
332
+ int(bbox[0] * width),
333
+ int(bbox[1] * height),
334
+ int(bbox[2] * width),
335
+ int(bbox[3] * height),
336
+ ]
337
+ cropped_image = pil_image.crop(bbox) # type: ignore
338
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tmp:
339
+ cropped_image.save(tmp.name)
340
+
341
+ return tmp.name
257
342
 
258
343
 
259
344
  class Add(Tool):
260
345
  name = "add_"
261
346
  description = "'add_' returns the sum of all the arguments passed to it, normalized to 2 decimal places."
262
347
  usage = {
263
- "required_parameters": {"name": "input", "type": "List[int]"},
348
+ "required_parameters": [{"name": "input", "type": "List[int]"}],
264
349
  "examples": [
265
350
  {
266
351
  "scenario": "If you want to calculate 2 + 4",
@@ -277,7 +362,7 @@ class Subtract(Tool):
277
362
  name = "subtract_"
278
363
  description = "'subtract_' returns the difference of all the arguments passed to it, normalized to 2 decimal places."
279
364
  usage = {
280
- "required_parameters": {"name": "input", "type": "List[int]"},
365
+ "required_parameters": [{"name": "input", "type": "List[int]"}],
281
366
  "examples": [
282
367
  {
283
368
  "scenario": "If you want to calculate 4 - 2",
@@ -294,7 +379,7 @@ class Multiply(Tool):
294
379
  name = "multiply_"
295
380
  description = "'multiply_' returns the product of all the arguments passed to it, normalized to 2 decimal places."
296
381
  usage = {
297
- "required_parameters": {"name": "input", "type": "List[int]"},
382
+ "required_parameters": [{"name": "input", "type": "List[int]"}],
298
383
  "examples": [
299
384
  {
300
385
  "scenario": "If you want to calculate 2 * 4",
@@ -311,7 +396,7 @@ class Divide(Tool):
311
396
  name = "divide_"
312
397
  description = "'divide_' returns the division of all the arguments passed to it, normalized to 2 decimal places."
313
398
  usage = {
314
- "required_parameters": {"name": "input", "type": "List[int]"},
399
+ "required_parameters": [{"name": "input", "type": "List[int]"}],
315
400
  "examples": [
316
401
  {
317
402
  "scenario": "If you want to calculate 4 / 2",
@@ -327,7 +412,17 @@ class Divide(Tool):
327
412
  TOOLS = {
328
413
  i: {"name": c.name, "description": c.description, "usage": c.usage, "class": c}
329
414
  for i, c in enumerate(
330
- [CLIP, GroundingDINO, GroundingSAM, Add, Subtract, Multiply, Divide]
415
+ [
416
+ CLIP,
417
+ GroundingDINO,
418
+ AgentGroundingSAM,
419
+ Counter,
420
+ Crop,
421
+ Add,
422
+ Subtract,
423
+ Multiply,
424
+ Divide,
425
+ ]
331
426
  )
332
427
  if (hasattr(c, "name") and hasattr(c, "description") and hasattr(c, "usage"))
333
428
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vision-agent
3
- Version: 0.0.30
3
+ Version: 0.0.32
4
4
  Summary: Toolset for Vision Agent
5
5
  Author: Landing AI
6
6
  Author-email: dev@landing.ai
@@ -1,9 +1,9 @@
1
- vision_agent/__init__.py,sha256=utvPXq_buLtsFRTYjfefkrGOpKG92h21wwmcdVop9O8,229
2
- vision_agent/agent/__init__.py,sha256=Ee-L9w7UMH8Vkkbe8OPl05qXdRhRMoX3cuxid4n3zTc,89
1
+ vision_agent/__init__.py,sha256=wD1cssVTAJ55uTViNfBGooqJUV0p9fmVAuTMHHrmUBU,229
2
+ vision_agent/agent/__init__.py,sha256=WZrGKMglRlwy0xL77Kr7S0Lrk696gOYI5CeaiJNRmlw,89
3
3
  vision_agent/agent/agent.py,sha256=PRLItaPfMc94H6mAIPj_gBvJ8RezDEPanB6Cmu81A0M,306
4
- vision_agent/agent/easytool.py,sha256=4zGZ20m2DdT96oLPPrwPc4mLrlGbFVvNvaF28svFThU,10042
4
+ vision_agent/agent/easytool.py,sha256=LhilXcYnhcayALE_EVPegzNWGNMnTKH0-9QFtc7xtVg,11175
5
5
  vision_agent/agent/easytool_prompts.py,sha256=tTuWwthYVELTVH3iLaZUbtX0MJsYG5TWC7Pim844cJ4,4505
6
- vision_agent/agent/reflexion.py,sha256=waCiZRH7Ph6cSdMaHIdq4MGBZyx-zeByoz5PcpYRjJ4,9742
6
+ vision_agent/agent/reflexion.py,sha256=DXxFenwlZSPUKsA4vkoZ-xcdKF4YVReEZWcMKgFdfWg,9774
7
7
  vision_agent/agent/reflexion_prompts.py,sha256=UPGkt_qgHBMUY0VPVoF-BqhR0d_6WPjjrhbYLBYOtnQ,9342
8
8
  vision_agent/data/__init__.py,sha256=YU-5g3LbEQ6a4drz0RLGTagXMVU2Z4Xr3RlfWE-R0jU,46
9
9
  vision_agent/data/data.py,sha256=pgtSGZdAnbQ8oGsuapLtFTMPajnCGDGekEXTnFuBwsY,5122
@@ -14,10 +14,10 @@ vision_agent/llm/__init__.py,sha256=fBKsIjL4z08eA0QYx6wvhRe4Nkp2pJ4VrZK0-uUL5Ec,
14
14
  vision_agent/llm/llm.py,sha256=d8A7jmLVGx5HzoiYJ75mTMU7dbD5-bOYeXYlHaay6WA,3957
15
15
  vision_agent/lmm/__init__.py,sha256=I8mbeNUajTfWVNqLsuFQVOaNBDlkIhYp9DFU8H4kB7g,51
16
16
  vision_agent/lmm/lmm.py,sha256=ARcbgkcyP83TbVVoXI9B-gtG0gJuTaG_MjcUGbams4U,8052
17
- vision_agent/tools/__init__.py,sha256=vI27yWOxMzW-l9C4Sa6ZESBdivXJj6ettMwjlzeXjk0,116
17
+ vision_agent/tools/__init__.py,sha256=aX0pU3pXU1V0Cj9FzYCvdsX76TAglFMHx59kNhXHbPs,131
18
18
  vision_agent/tools/prompts.py,sha256=9RBbyqlNlExsGKlJ89Jkph83DAEJ8PCVGaHoNbyN7TM,1416
19
- vision_agent/tools/tools.py,sha256=FlRA6d-PGwaIVI-7kG-S2QCK7nV_FE2QQBsrCYvfOJw,12504
20
- vision_agent-0.0.30.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
21
- vision_agent-0.0.30.dist-info/METADATA,sha256=8RWYVhncLEp-zTcslILJX4Xwf_VIee8TaF8cQ-gvNbY,4384
22
- vision_agent-0.0.30.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
23
- vision_agent-0.0.30.dist-info/RECORD,,
19
+ vision_agent/tools/tools.py,sha256=ZCak2hERXBR3SAy5zLe165PIoQQX_TykvO9ve8ZXvYY,15140
20
+ vision_agent-0.0.32.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
21
+ vision_agent-0.0.32.dist-info/METADATA,sha256=oQdX4D0FzapLeEyRQF_-YmSxgDYrdITkVTI89KSlJu0,4384
22
+ vision_agent-0.0.32.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
23
+ vision_agent-0.0.32.dist-info/RECORD,,