vision-agent 0.2.94__py3-none-any.whl → 0.2.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,7 +9,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union, cast
9
9
  import cv2
10
10
  import numpy as np
11
11
  import requests
12
- from moviepy.editor import ImageSequenceClip
13
12
  from PIL import Image, ImageDraw, ImageFont
14
13
  from pillow_heif import register_heif_opener # type: ignore
15
14
  from pytube import YouTube # type: ignore
@@ -107,6 +106,7 @@ def grounding_dino(
107
106
  "visual_grounding" if model_size == "large" else "visual_grounding_tiny"
108
107
  ),
109
108
  "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold},
109
+ "function_name": "grounding_dino",
110
110
  }
111
111
  data: Dict[str, Any] = send_inference_request(request_data, "tools")
112
112
  return_data = []
@@ -162,6 +162,7 @@ def owl_v2(
162
162
  "image": image_b64,
163
163
  "tool": "open_vocab_detection",
164
164
  "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold},
165
+ "function_name": "owl_v2",
165
166
  }
166
167
  data: Dict[str, Any] = send_inference_request(request_data, "tools")
167
168
  return_data = []
@@ -226,6 +227,7 @@ def grounding_sam(
226
227
  "image": image_b64,
227
228
  "tool": "visual_grounding_segment",
228
229
  "kwargs": {"box_threshold": box_threshold, "iou_threshold": iou_threshold},
230
+ "function_name": "grounding_sam",
229
231
  }
230
232
  data: Dict[str, Any] = send_inference_request(request_data, "tools")
231
233
  return_data = []
@@ -365,6 +367,7 @@ def loca_zero_shot_counting(image: np.ndarray) -> Dict[str, Any]:
365
367
  data = {
366
368
  "image": image_b64,
367
369
  "tool": "zero_shot_counting",
370
+ "function_name": "loca_zero_shot_counting",
368
371
  }
369
372
  resp_data = send_inference_request(data, "tools")
370
373
  resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0]))
@@ -400,6 +403,7 @@ def loca_visual_prompt_counting(
400
403
  "image": image_b64,
401
404
  "prompt": bbox_str,
402
405
  "tool": "few_shot_counting",
406
+ "function_name": "loca_visual_prompt_counting",
403
407
  }
404
408
  resp_data = send_inference_request(data, "tools")
405
409
  resp_data["heat_map"] = np.array(b64_to_pil(resp_data["heat_map"][0]))
@@ -429,6 +433,7 @@ def florencev2_roberta_vqa(prompt: str, image: np.ndarray) -> str:
429
433
  "image": image_b64,
430
434
  "prompt": prompt,
431
435
  "tool": "image_question_answering_with_context",
436
+ "function_name": "florencev2_roberta_vqa",
432
437
  }
433
438
 
434
439
  answer = send_inference_request(data, "tools")
@@ -458,6 +463,7 @@ def git_vqa_v2(prompt: str, image: np.ndarray) -> str:
458
463
  "image": image_b64,
459
464
  "prompt": prompt,
460
465
  "tool": "image_question_answering",
466
+ "function_name": "git_vqa_v2",
461
467
  }
462
468
 
463
469
  answer = send_inference_request(data, "tools")
@@ -488,6 +494,7 @@ def clip(image: np.ndarray, classes: List[str]) -> Dict[str, Any]:
488
494
  "prompt": ",".join(classes),
489
495
  "image": image_b64,
490
496
  "tool": "closed_set_image_classification",
497
+ "function_name": "clip",
491
498
  }
492
499
  resp_data = send_inference_request(data, "tools")
493
500
  resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]]
@@ -515,6 +522,7 @@ def vit_image_classification(image: np.ndarray) -> Dict[str, Any]:
515
522
  data = {
516
523
  "image": image_b64,
517
524
  "tool": "image_classification",
525
+ "function_name": "vit_image_classification",
518
526
  }
519
527
  resp_data = send_inference_request(data, "tools")
520
528
  resp_data["scores"] = [round(prob, 4) for prob in resp_data["scores"]]
@@ -542,6 +550,7 @@ def vit_nsfw_classification(image: np.ndarray) -> Dict[str, Any]:
542
550
  data = {
543
551
  "image": image_b64,
544
552
  "tool": "nsfw_image_classification",
553
+ "function_name": "vit_nsfw_classification",
545
554
  }
546
555
  resp_data = send_inference_request(data, "tools")
547
556
  resp_data["scores"] = round(resp_data["scores"], 4)
@@ -568,6 +577,7 @@ def blip_image_caption(image: np.ndarray) -> str:
568
577
  data = {
569
578
  "image": image_b64,
570
579
  "tool": "image_captioning",
580
+ "function_name": "blip_image_caption",
571
581
  }
572
582
 
573
583
  answer = send_inference_request(data, "tools")
@@ -596,6 +606,7 @@ def florencev2_image_caption(image: np.ndarray, detail_caption: bool = True) ->
596
606
  "image": image_b64,
597
607
  "tool": "florence2_image_captioning",
598
608
  "detail_caption": detail_caption,
609
+ "function_name": "florencev2_image_caption",
599
610
  }
600
611
 
601
612
  answer = send_inference_request(data, "tools")
@@ -631,6 +642,7 @@ def florencev2_object_detection(image: np.ndarray) -> List[Dict[str, Any]]:
631
642
  data = {
632
643
  "image": image_b64,
633
644
  "tool": "object_detection",
645
+ "function_name": "florencev2_object_detection",
634
646
  }
635
647
 
636
648
  answer = send_inference_request(data, "tools")
@@ -687,6 +699,7 @@ def detr_segmentation(image: np.ndarray) -> List[Dict[str, Any]]:
687
699
  data = {
688
700
  "image": image_b64,
689
701
  "tool": "panoptic_segmentation",
702
+ "function_name": "detr_segmentation",
690
703
  }
691
704
 
692
705
  answer = send_inference_request(data, "tools")
@@ -729,6 +742,7 @@ def depth_anything_v2(image: np.ndarray) -> np.ndarray:
729
742
  data = {
730
743
  "image": image_b64,
731
744
  "tool": "generate_depth",
745
+ "function_name": "depth_anything_v2",
732
746
  }
733
747
 
734
748
  answer = send_inference_request(data, "tools")
@@ -760,6 +774,7 @@ def generate_soft_edge_image(image: np.ndarray) -> np.ndarray:
760
774
  data = {
761
775
  "image": image_b64,
762
776
  "tool": "generate_hed",
777
+ "function_name": "generate_soft_edge_image",
763
778
  }
764
779
 
765
780
  answer = send_inference_request(data, "tools")
@@ -792,6 +807,7 @@ def dpt_hybrid_midas(image: np.ndarray) -> np.ndarray:
792
807
  data = {
793
808
  "image": image_b64,
794
809
  "tool": "generate_normal",
810
+ "function_name": "dpt_hybrid_midas",
795
811
  }
796
812
 
797
813
  answer = send_inference_request(data, "tools")
@@ -823,6 +839,7 @@ def generate_pose_image(image: np.ndarray) -> np.ndarray:
823
839
  data = {
824
840
  "image": image_b64,
825
841
  "tool": "generate_pose",
842
+ "function_name": "generate_pose_image",
826
843
  }
827
844
 
828
845
  answer = send_inference_request(data, "tools")
@@ -863,6 +880,7 @@ def template_match(
863
880
  "image": image_b64,
864
881
  "template": template_image_b64,
865
882
  "tool": "template_match",
883
+ "function_name": "template_match",
866
884
  }
867
885
 
868
886
  answer = send_inference_request(data, "tools")
@@ -1044,15 +1062,21 @@ def save_video(
1044
1062
  if fps <= 0:
1045
1063
  _LOGGER.warning(f"Invalid fps value: {fps}. Setting fps to 4 (default value).")
1046
1064
  fps = 4
1047
- with ImageSequenceClip(frames, fps=fps) as video:
1048
- if output_video_path:
1049
- f = open(output_video_path, "wb")
1050
- else:
1051
- f = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) # type: ignore
1052
- video.write_videofile(f.name, codec="libx264")
1053
- f.close()
1054
- _save_video_to_result(f.name)
1055
- return f.name
1065
+
1066
+ if not output_video_path:
1067
+ output_video_path = tempfile.NamedTemporaryFile(
1068
+ suffix=".mp4", delete=False
1069
+ ).name
1070
+
1071
+ height, width, layers = frames[0].shape if frames else (0, 0, 0)
1072
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
1073
+ video = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
1074
+ for frame in frames:
1075
+ video.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
1076
+ video.release()
1077
+
1078
+ _save_video_to_result(output_video_path)
1079
+ return output_video_path
1056
1080
 
1057
1081
 
1058
1082
  def _save_video_to_result(video_uri: str) -> None:
@@ -1,6 +1,5 @@
1
1
  import abc
2
2
  import base64
3
- import copy
4
3
  import logging
5
4
  import os
6
5
  import platform
@@ -17,9 +16,14 @@ from typing import Any, Dict, Iterable, List, Optional, Union
17
16
  import nbformat
18
17
  import tenacity
19
18
  from dotenv import load_dotenv
19
+ from e2b.exceptions import SandboxException
20
20
  from e2b_code_interpreter import CodeInterpreter as E2BCodeInterpreterImpl
21
21
  from e2b_code_interpreter import Execution as E2BExecution
22
22
  from e2b_code_interpreter import Result as E2BResult
23
+ from h11._util import LocalProtocolError
24
+ from httpx import ConnectError
25
+ from httpx import RemoteProtocolError as HttpcoreRemoteProtocolError
26
+ from httpx import RemoteProtocolError as HttpxRemoteProtocolError
23
27
  from nbclient import NotebookClient
24
28
  from nbclient import __version__ as nbclient_version
25
29
  from nbclient.exceptions import CellTimeoutError, DeadKernelError
@@ -29,7 +33,6 @@ from pydantic import BaseModel, field_serializer
29
33
  from typing_extensions import Self
30
34
 
31
35
  from vision_agent.utils.exceptions import (
32
- RemoteSandboxClosedError,
33
36
  RemoteSandboxCreationError,
34
37
  RemoteSandboxExecutionError,
35
38
  )
@@ -106,13 +109,8 @@ class Result:
106
109
  is_main_result: bool
107
110
  "Whether this data is the result of the cell. Data can be produced by display calls of which can be multiple in a cell."
108
111
 
109
- raw: Dict[str, str]
110
- "Dictionary that maps MIME types to their corresponding string representations of the data."
111
-
112
112
  def __init__(self, is_main_result: bool, data: Dict[str, Any]):
113
113
  self.is_main_result = is_main_result
114
- self.raw = copy.deepcopy(data)
115
-
116
114
  self.text = data.pop(MimeType.TEXT_PLAIN, None)
117
115
  if self.text and (self.text.startswith("'") and self.text.endswith("'")):
118
116
  # This is a workaround for the issue that str result is wrapped with single quotes by notebook.
@@ -136,13 +134,13 @@ class Result:
136
134
 
137
135
  # Allows to iterate over formats()
138
136
  def __getitem__(self, key: Any) -> Any:
139
- return self.raw[key] if key in self.raw else getattr(self, key)
137
+ return getattr(self, key)
140
138
 
141
139
  def __str__(self) -> str:
142
140
  return repr(self)
143
141
 
144
142
  def __repr__(self) -> str:
145
- return str(self.raw)
143
+ return str(self.text)
146
144
 
147
145
  def _repr_html_(self) -> Optional[str]:
148
146
  """Returns the HTML representation of the data."""
@@ -215,9 +213,16 @@ class Result:
215
213
  """
216
214
  Creates a Result object from an E2BResult object.
217
215
  """
216
+ data = {
217
+ MimeType.TEXT_PLAIN.value: result.text,
218
+ MimeType.IMAGE_PNG.value: result.png,
219
+ MimeType.APPLICATION_JSON.value: result.json,
220
+ }
221
+ for k, v in result.extra.items():
222
+ data[k] = v
218
223
  return Result(
219
224
  is_main_result=result.is_main_result,
220
- data=result.raw,
225
+ data=data,
221
226
  )
222
227
 
223
228
 
@@ -367,7 +372,7 @@ class Execution(BaseModel):
367
372
  value=_remove_escape_and_color_codes(exec.error.value),
368
373
  traceback_raw=[
369
374
  _remove_escape_and_color_codes(line)
370
- for line in exec.error.traceback_raw
375
+ for line in exec.error.traceback.split("\n")
371
376
  ],
372
377
  )
373
378
  if exec.error
@@ -436,11 +441,12 @@ va_version = importlib.metadata.version("vision-agent")
436
441
  print(f"Vision Agent version: {va_version}")"""
437
442
  )
438
443
  sys_versions = "\n".join(result.logs.stdout)
439
- _LOGGER.info(f"E2BCodeInterpreter initialized:\n{sys_versions}")
444
+ _LOGGER.info(
445
+ f"E2BCodeInterpreter (sandbox id: {self.interpreter.sandbox_id}) initialized:\n{sys_versions}"
446
+ )
440
447
 
441
448
  def close(self, *args: Any, **kwargs: Any) -> None:
442
449
  try:
443
- self.interpreter.notebook.close()
444
450
  self.interpreter.kill(request_timeout=2)
445
451
  _LOGGER.info(
446
452
  f"The sandbox {self.interpreter.sandbox_id} is closed successfully."
@@ -451,28 +457,67 @@ print(f"Vision Agent version: {va_version}")"""
451
457
  )
452
458
 
453
459
  def restart_kernel(self) -> None:
454
- self._check_sandbox_liveness()
455
460
  self.interpreter.notebook.restart_kernel()
456
461
 
457
462
  @tenacity.retry(
458
463
  wait=tenacity.wait_exponential_jitter(),
459
- stop=tenacity.stop_after_attempt(2),
460
- # TODO: change TimeoutError to a more specific exception when e2b team provides more granular retryable exceptions
461
- retry=tenacity.retry_if_exception_type(TimeoutError),
464
+ stop=tenacity.stop_after_attempt(3),
465
+ retry=tenacity.retry_if_exception_type(
466
+ (
467
+ LocalProtocolError,
468
+ HttpxRemoteProtocolError,
469
+ HttpcoreRemoteProtocolError,
470
+ ConnectError,
471
+ SandboxException,
472
+ )
473
+ ),
474
+ before_sleep=tenacity.before_sleep_log(_LOGGER, logging.INFO),
475
+ after=tenacity.after_log(_LOGGER, logging.INFO),
462
476
  )
463
477
  def exec_cell(self, code: str) -> Execution:
464
- self._check_sandbox_liveness()
465
478
  self.interpreter.set_timeout(_SESSION_TIMEOUT) # Extend the life of the sandbox
466
479
  try:
467
- execution = self.interpreter.notebook.exec_cell(code, timeout=self.timeout)
480
+ _LOGGER.info(
481
+ f"Start code execution in remote sandbox {self.interpreter.sandbox_id}. Timeout: {_SESSION_TIMEOUT}. Code hash: {hash(code)}"
482
+ )
483
+ execution = self.interpreter.notebook.exec_cell(
484
+ code=code,
485
+ on_stdout=lambda msg: _LOGGER.info(msg),
486
+ on_stderr=lambda msg: _LOGGER.info(msg),
487
+ )
488
+ _LOGGER.info(
489
+ f"Finished code execution in remote sandbox {self.interpreter.sandbox_id}. Code hash: {hash(code)}"
490
+ )
468
491
  return Execution.from_e2b_execution(execution)
492
+ except (
493
+ LocalProtocolError,
494
+ HttpxRemoteProtocolError,
495
+ HttpcoreRemoteProtocolError,
496
+ ConnectError,
497
+ SandboxException,
498
+ ) as e:
499
+ raise e
469
500
  except Exception as e:
470
501
  raise RemoteSandboxExecutionError(
471
- f"Failed executing code in remote sandbox due to {e}: {code}"
502
+ f"Failed executing code in remote sandbox ({self.interpreter.sandbox_id}) due to error '{type(e).__name__} {str(e)}', code: {code}"
472
503
  ) from e
473
504
 
505
+ @tenacity.retry(
506
+ wait=tenacity.wait_exponential_jitter(),
507
+ stop=tenacity.stop_after_attempt(3),
508
+ retry=tenacity.retry_if_exception_type(
509
+ (
510
+ LocalProtocolError,
511
+ HttpxRemoteProtocolError,
512
+ HttpcoreRemoteProtocolError,
513
+ ConnectError,
514
+ SandboxException,
515
+ )
516
+ ),
517
+ before_sleep=tenacity.before_sleep_log(_LOGGER, logging.INFO),
518
+ after=tenacity.after_log(_LOGGER, logging.INFO),
519
+ )
474
520
  def upload_file(self, file: Union[str, Path]) -> str:
475
- self._check_sandbox_liveness()
476
521
  file_name = Path(file).name
477
522
  remote_path = f"/home/user/{file_name}"
478
523
  with open(file, "rb") as f:
@@ -481,28 +526,18 @@ print(f"Vision Agent version: {va_version}")"""
481
526
  return remote_path
482
527
 
483
528
  def download_file(self, file_path: str) -> Path:
484
- self._check_sandbox_liveness()
485
529
  with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as file:
486
530
  file.write(self.interpreter.files.read(path=file_path, format="bytes"))
487
531
  _LOGGER.info(f"File ({file_path}) is downloaded to: {file.name}")
488
532
  return Path(file.name)
489
533
 
490
- def _check_sandbox_liveness(self) -> None:
491
- try:
492
- alive = self.interpreter.is_running(request_timeout=2)
493
- except Exception as e:
494
- _LOGGER.error(
495
- f"Failed to check the health of the remote sandbox ({self.interpreter.sandbox_id}) due to {e}. Consider the sandbox as dead."
496
- )
497
- alive = False
498
- if not alive:
499
- raise RemoteSandboxClosedError(
500
- "Remote sandbox is closed unexpectedly. Please start a new VisionAgent instance."
501
- )
502
-
503
534
  @staticmethod
504
535
  def _new_e2b_interpreter_impl(*args, **kwargs) -> E2BCodeInterpreterImpl: # type: ignore
505
- return E2BCodeInterpreterImpl(template="va-sandbox", *args, **kwargs)
536
+ template_name = os.environ.get("E2B_TEMPLATE_NAME", "nx3fagq7sgdliww9cvm3")
537
+ _LOGGER.info(
538
+ f"Creating a new E2BCodeInterpreter using template: {template_name}"
539
+ )
540
+ return E2BCodeInterpreterImpl(template=template_name, *args, **kwargs)
506
541
 
507
542
 
508
543
  class LocalCodeInterpreter(CodeInterpreter):
vision_agent/utils/sim.py CHANGED
@@ -9,6 +9,7 @@ from openai import AzureOpenAI, Client, OpenAI
9
9
  from scipy.spatial.distance import cosine # type: ignore
10
10
 
11
11
 
12
+ @lru_cache(maxsize=512)
12
13
  def get_embedding(
13
14
  client: Client, text: str, model: str = "text-embedding-3-small"
14
15
  ) -> List[float]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vision-agent
3
- Version: 0.2.94
3
+ Version: 0.2.96
4
4
  Summary: Toolset for Vision Agent
5
5
  Author: Landing AI
6
6
  Author-email: dev@landing.ai
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: Programming Language :: Python :: 3.11
12
12
  Requires-Dist: anthropic (>=0.31.0,<0.32.0)
13
13
  Requires-Dist: e2b (>=0.17.1,<0.18.0)
14
- Requires-Dist: e2b-code-interpreter (==0.0.11a2)
14
+ Requires-Dist: e2b-code-interpreter (==0.0.11a17)
15
15
  Requires-Dist: ipykernel (>=6.29.4,<7.0.0)
16
16
  Requires-Dist: langsmith (>=0.1.58,<0.2.0)
17
17
  Requires-Dist: moviepy (>=1.0.0,<2.0.0)
@@ -23,6 +23,7 @@ Requires-Dist: opencv-python (>=4.0.0,<5.0.0)
23
23
  Requires-Dist: pandas (>=2.0.0,<3.0.0)
24
24
  Requires-Dist: pillow (>=10.0.0,<11.0.0)
25
25
  Requires-Dist: pillow-heif (>=0.16.0,<0.17.0)
26
+ Requires-Dist: pydantic (==2.7.4)
26
27
  Requires-Dist: pydantic-settings (>=2.2.1,<3.0.0)
27
28
  Requires-Dist: pytube (==15.0.0)
28
29
  Requires-Dist: requests (>=2.0.0,<3.0.0)
@@ -15,15 +15,15 @@ vision_agent/tools/__init__.py,sha256=UNiaJAOt1C709gaJ-a9h9BzKnY5JmoEUpgKftsOnyP
15
15
  vision_agent/tools/meta_tools.py,sha256=rmxgVzj-vJKeewHbue3qHru4sYsFLxlSZV-YH-eyH5w,13366
16
16
  vision_agent/tools/prompts.py,sha256=V1z4YJLXZuUl_iZ5rY0M5hHc_2tmMEUKr0WocXKGt4E,1430
17
17
  vision_agent/tools/tool_utils.py,sha256=XoB-iae8hHrBQgJd3fV6-UjZAkClysobUaOM17IcHuE,4597
18
- vision_agent/tools/tools.py,sha256=HT8stRTUmhwm2VbpB3QQNXnL1KIxka-BDCkU2tLaFN4,42326
18
+ vision_agent/tools/tools.py,sha256=wiMLnhH2pSelWv-XtIzI8DL1MtVk8ISGMk_f17GmQi0,43339
19
19
  vision_agent/utils/__init__.py,sha256=CW84HnhqI6XQVuxf2KifkLnSuO7EOhmuL09-gAymAak,219
20
20
  vision_agent/utils/exceptions.py,sha256=isVH-SVL4vHj3q5kK4z7cy5_aOapAqHXWkpibfSNbUs,1659
21
- vision_agent/utils/execute.py,sha256=JPjiXyAHsMSPYFDZ9bmiiX2lgiGroxqDhXhsD8BTv5E,23848
21
+ vision_agent/utils/execute.py,sha256=s43aUtuq7ZNjil2mxrddiz8EvvqlJwttkYlIiZouXqM,25125
22
22
  vision_agent/utils/image_utils.py,sha256=y69wtNla0xHZ1h1x0-vv7nOyKUq69jtjSJBiDCn6EM0,7703
23
- vision_agent/utils/sim.py,sha256=1HTaiVaBiKeyXIy21IYGXlPw0TipOyw9FPOJDfyLI94,4409
23
+ vision_agent/utils/sim.py,sha256=7JvtWGN0Ik5ife3qQYWs7Fm3T8AnAXGFd5HnvDC15mQ,4433
24
24
  vision_agent/utils/type_defs.py,sha256=oVFJcicB-s_09lqvn61u0A5ncZsTqZArZledXWbrrg0,1384
25
25
  vision_agent/utils/video.py,sha256=rNmU9KEIkZB5-EztZNlUiKYN0mm_55A_2VGUM0QpqLA,8779
26
- vision_agent-0.2.94.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
27
- vision_agent-0.2.94.dist-info/METADATA,sha256=Ef1XAiAKdtYv9NNwCK9HUO2QwOk-oUZPflLh2wOo9r4,10693
28
- vision_agent-0.2.94.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
29
- vision_agent-0.2.94.dist-info/RECORD,,
26
+ vision_agent-0.2.96.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
27
+ vision_agent-0.2.96.dist-info/METADATA,sha256=CHUkz8xYiyCJGAlk043s1d0WS86PzLoLyaHK19iBZm0,10728
28
+ vision_agent-0.2.96.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
29
+ vision_agent-0.2.96.dist-info/RECORD,,