vision-agent 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vision_agent/agent/easytool_prompts.py +1 -1
- vision_agent/agent/vision_agent.py +9 -3
- vision_agent/agent/vision_agent_prompts.py +21 -13
- vision_agent/image_utils.py +1 -1
- vision_agent/tools/tools.py +30 -18
- {vision_agent-0.2.7.dist-info → vision_agent-0.2.9.dist-info}/METADATA +1 -1
- {vision_agent-0.2.7.dist-info → vision_agent-0.2.9.dist-info}/RECORD +9 -9
- {vision_agent-0.2.7.dist-info → vision_agent-0.2.9.dist-info}/LICENSE +0 -0
- {vision_agent-0.2.7.dist-info → vision_agent-0.2.9.dist-info}/WHEEL +0 -0
@@ -44,7 +44,7 @@ Output: """
|
|
44
44
|
|
45
45
|
CHOOSE_PARAMETER = """Given a user's question and an API tool documentation, you need to output parameters according to the API tool documentation to successfully call the API to solve the user's question.
|
46
46
|
Please note that:
|
47
|
-
1. The Example in the API tool documentation can help you better understand the use of the API.
|
47
|
+
1. The Example in the API tool documentation can help you better understand the use of the API. Pay attention to the examples which show how to parse the question and extract tool parameters such as prompts and visual inputs.
|
48
48
|
2. Ensure the parameters you output are correct. The output must contain the required parameters, and can contain the optional parameters based on the question. If there are no paremters in the required parameters and optional parameters, just leave it as {{"Parameters":{{}}}}
|
49
49
|
3. If the user's question mentions other APIs, you should ONLY consider the API tool documentation I give and do not consider other APIs.
|
50
50
|
4. The question may have dependencies on answers of other questions, so we will provide logs of previous questions and answers for your reference.
|
@@ -308,6 +308,9 @@ def _handle_extract_frames(
|
|
308
308
|
# handle extract_frames_ case, useful if it extracts frames but doesn't do
|
309
309
|
# any following processing
|
310
310
|
for video_file_output in tool_result["call_results"]:
|
311
|
+
# When the video tool is run with wrong parameters, exit the loop
|
312
|
+
if len(video_file_output) < 2:
|
313
|
+
break
|
311
314
|
for frame, _ in video_file_output:
|
312
315
|
image = frame
|
313
316
|
if image not in image_to_data:
|
@@ -346,7 +349,9 @@ def _handle_viz_tools(
|
|
346
349
|
# 2. return a dictionary but not have the necessary keys
|
347
350
|
|
348
351
|
if not isinstance(call_result, dict) or (
|
349
|
-
"bboxes" not in call_result
|
352
|
+
"bboxes" not in call_result
|
353
|
+
and "mask" not in call_result
|
354
|
+
and "heat_map" not in call_result
|
350
355
|
):
|
351
356
|
return image_to_data
|
352
357
|
|
@@ -366,7 +371,8 @@ def _handle_viz_tools(
|
|
366
371
|
image_to_data[image]["scores"].extend(call_result.get("scores", []))
|
367
372
|
image_to_data[image]["masks"].extend(call_result.get("masks", []))
|
368
373
|
# only single heatmap is returned
|
369
|
-
|
374
|
+
if "heat_map" in call_result:
|
375
|
+
image_to_data[image]["heat_map"].append(call_result["heat_map"])
|
370
376
|
if "mask_shape" in call_result:
|
371
377
|
image_to_data[image]["mask_shape"] = call_result["mask_shape"]
|
372
378
|
|
@@ -444,7 +450,7 @@ class VisionAgent(Agent):
|
|
444
450
|
task_model: Optional[Union[LLM, LMM]] = None,
|
445
451
|
answer_model: Optional[Union[LLM, LMM]] = None,
|
446
452
|
reflect_model: Optional[Union[LLM, LMM]] = None,
|
447
|
-
max_retries: int =
|
453
|
+
max_retries: int = 2,
|
448
454
|
verbose: bool = False,
|
449
455
|
report_progress_callback: Optional[Callable[[str], None]] = None,
|
450
456
|
):
|
@@ -26,22 +26,24 @@ Final answer:
|
|
26
26
|
|
27
27
|
Reflection: """
|
28
28
|
|
29
|
-
TASK_DECOMPOSE = """You need to decompose a user's complex question into
|
29
|
+
TASK_DECOMPOSE = """You need to decompose a user's complex question into one or more simple subtasks and let the model execute it step by step.
|
30
30
|
This is the user's question: {question}
|
31
31
|
This is the tool list:
|
32
32
|
{tools}
|
33
33
|
|
34
34
|
Please note that:
|
35
|
-
1.
|
36
|
-
2. If
|
35
|
+
1. If the given task is simple and the answer can be provided by executing one tool, you should only use that tool to provide the answer.
|
36
|
+
2. If the given task is complex, You should decompose this user's complex question into simple subtasks which can only be executed easily by using one single tool in the tool list.
|
37
|
+
3. You should try to decompose the complex question into least number of subtasks.
|
38
|
+
4. If one subtask needs the results from another subtask, you should write clearly. For example:
|
37
39
|
{{"Tasks": ["Convert 23 km/h to X km/min by 'divide_'", "Multiply X km/min by 45 min to get Y by 'multiply_'"]}}
|
38
|
-
|
40
|
+
5. You must ONLY output in a parsible JSON format. An example output looks like:
|
39
41
|
|
40
42
|
{{"Tasks": ["Task 1", "Task 2", ...]}}
|
41
43
|
|
42
44
|
Output: """
|
43
45
|
|
44
|
-
TASK_DECOMPOSE_DEPENDS = """You need to decompose a user's complex question into
|
46
|
+
TASK_DECOMPOSE_DEPENDS = """You need to decompose a user's complex question into one or more simple subtasks and let the model execute it step by step.
|
45
47
|
This is the user's question: {question}
|
46
48
|
|
47
49
|
This is the tool list:
|
@@ -51,10 +53,12 @@ This is a reflection from a previous failed attempt:
|
|
51
53
|
{reflections}
|
52
54
|
|
53
55
|
Please note that:
|
54
|
-
1.
|
55
|
-
2. If
|
56
|
+
1. If the given task is simple and the answer can be provided by executing one tool, you should only use that tool to provide the answer.
|
57
|
+
2. If the given task is complex, You should decompose this user's complex question into simple subtasks which can only be executed easily by using one single tool in the tool list.
|
58
|
+
3. You should try to decompose the complex question into least number of subtasks.
|
59
|
+
4. If one subtask needs the results from another subtask, you should write clearly. For example:
|
56
60
|
{{"Tasks": ["Convert 23 km/h to X km/min by 'divide_'", "Multiply X km/min by 45 min to get Y by 'multiply_'"]}}
|
57
|
-
|
61
|
+
5. You must ONLY output in a parsible JSON format. An example output looks like:
|
58
62
|
|
59
63
|
{{"Tasks": ["Task 1", "Task 2", ...]}}
|
60
64
|
|
@@ -65,8 +69,10 @@ These are the tools you can select to solve the question:
|
|
65
69
|
{tools}
|
66
70
|
|
67
71
|
Please note that:
|
68
|
-
1. You should only choose one tool from the Tool List to solve this question.
|
69
|
-
2. You
|
72
|
+
1. You should only choose one tool from the Tool List to solve this question and it should have maximum chance of solving the question.
|
73
|
+
2. You should only choose the tool whose parameters are most relevant to the user's question and are availale as part of the question.
|
74
|
+
3. You should choose the tool whose return type is most relevant to the answer of the user's question.
|
75
|
+
4. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two example outputs look like:
|
70
76
|
|
71
77
|
Example 1: {{"ID": 1}}
|
72
78
|
Example 2: {{"ID": 2}}
|
@@ -81,8 +87,10 @@ This is a reflection from a previous failed attempt:
|
|
81
87
|
{reflections}
|
82
88
|
|
83
89
|
Please note that:
|
84
|
-
1. You should only choose one tool from the Tool List to solve this question.
|
85
|
-
2. You
|
90
|
+
1. You should only choose one tool from the Tool List to solve this question and it should have maximum chance of solving the question.
|
91
|
+
2. You should only choose the tool whose parameters are most relevant to the user's question and are availale as part of the question.
|
92
|
+
3. You should choose the tool whose return type is most relevant to the answer of the user's question.
|
93
|
+
4. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two example outputs look like:
|
86
94
|
|
87
95
|
Example 1: {{"ID": 1}}
|
88
96
|
Example 2: {{"ID": 2}}
|
@@ -91,7 +99,7 @@ Output: """
|
|
91
99
|
|
92
100
|
CHOOSE_PARAMETER_DEPENDS = """Given a user's question and an API tool documentation, you need to output parameters according to the API tool documentation to successfully call the API to solve the user's question.
|
93
101
|
Please note that:
|
94
|
-
1. The Example in the API tool documentation can help you better understand the use of the API.
|
102
|
+
1. The Example in the API tool documentation can help you better understand the use of the API. Pay attention to the examples which show how to parse the question and extract tool parameters such as prompts and visual inputs.
|
95
103
|
2. Ensure the parameters you output are correct. The output must contain the required parameters, and can contain the optional parameters based on the question. If there are no paremters in the required parameters and optional parameters, just leave it as {{"Parameters":{{}}}}
|
96
104
|
3. If the user's question mentions other APIs, you should ONLY consider the API tool documentation I give and do not consider other APIs.
|
97
105
|
4. The question may have dependencies on answers of other questions, so we will provide logs of previous questions and answers for your reference.
|
vision_agent/image_utils.py
CHANGED
@@ -238,7 +238,7 @@ def overlay_heat_map(
|
|
238
238
|
elif isinstance(image, np.ndarray):
|
239
239
|
image = Image.fromarray(image)
|
240
240
|
|
241
|
-
if "heat_map" not in heat_map:
|
241
|
+
if "heat_map" not in heat_map or len(heat_map["heat_map"]) == 0:
|
242
242
|
return image.convert("RGB")
|
243
243
|
|
244
244
|
image = image.convert("L")
|
vision_agent/tools/tools.py
CHANGED
@@ -175,7 +175,7 @@ class GroundingDINO(Tool):
|
|
175
175
|
"""
|
176
176
|
|
177
177
|
name = "grounding_dino_"
|
178
|
-
description = "'grounding_dino_' is a tool that can detect
|
178
|
+
description = "'grounding_dino_' is a tool that can detect and count objects given a text prompt such as category names or referring expressions. It returns a list and count of bounding boxes, label names and associated probability scores."
|
179
179
|
usage = {
|
180
180
|
"required_parameters": [
|
181
181
|
{"name": "prompt", "type": "str"},
|
@@ -186,6 +186,13 @@ class GroundingDINO(Tool):
|
|
186
186
|
{"name": "iou_threshold", "type": "float"},
|
187
187
|
],
|
188
188
|
"examples": [
|
189
|
+
{
|
190
|
+
"scenario": "Can you detect and count the giraffes and zebras in this image? Image name: animal.jpg",
|
191
|
+
"parameters": {
|
192
|
+
"prompt": "giraffe. zebra",
|
193
|
+
"image": "person.jpg",
|
194
|
+
},
|
195
|
+
},
|
189
196
|
{
|
190
197
|
"scenario": "Can you build me a car detector?",
|
191
198
|
"parameters": {"prompt": "car", "image": ""},
|
@@ -198,7 +205,7 @@ class GroundingDINO(Tool):
|
|
198
205
|
},
|
199
206
|
},
|
200
207
|
{
|
201
|
-
"scenario": "Detect the red shirts and green
|
208
|
+
"scenario": "Detect the red shirts and green shirt. Image name: shirts.jpg",
|
202
209
|
"parameters": {
|
203
210
|
"prompt": "red shirt. green shirt",
|
204
211
|
"image": "shirts.jpg",
|
@@ -271,7 +278,7 @@ class GroundingSAM(Tool):
|
|
271
278
|
"""
|
272
279
|
|
273
280
|
name = "grounding_sam_"
|
274
|
-
description = "'grounding_sam_' is a tool that can detect
|
281
|
+
description = "'grounding_sam_' is a tool that can detect and segment objects given a text prompt such as category names or referring expressions. It returns a list of bounding boxes, label names and masks file names and associated probability scores."
|
275
282
|
usage = {
|
276
283
|
"required_parameters": [
|
277
284
|
{"name": "prompt", "type": "str"},
|
@@ -282,6 +289,13 @@ class GroundingSAM(Tool):
|
|
282
289
|
{"name": "iou_threshold", "type": "float"},
|
283
290
|
],
|
284
291
|
"examples": [
|
292
|
+
{
|
293
|
+
"scenario": "Can you segment the apples and grapes in this image? Image name: fruits.jpg",
|
294
|
+
"parameters": {
|
295
|
+
"prompt": "apple. grape",
|
296
|
+
"image": "fruits.jpg",
|
297
|
+
},
|
298
|
+
},
|
285
299
|
{
|
286
300
|
"scenario": "Can you build me a car segmentor?",
|
287
301
|
"parameters": {"prompt": "car", "image": ""},
|
@@ -478,7 +492,7 @@ class ZeroShotCounting(Tool):
|
|
478
492
|
"""
|
479
493
|
|
480
494
|
name = "zero_shot_counting_"
|
481
|
-
description = "'zero_shot_counting_' is a tool that counts
|
495
|
+
description = "'zero_shot_counting_' is a tool that counts foreground items given only an image and no other information. It returns only the count of the objects in the image"
|
482
496
|
|
483
497
|
usage = {
|
484
498
|
"required_parameters": [
|
@@ -486,7 +500,7 @@ class ZeroShotCounting(Tool):
|
|
486
500
|
],
|
487
501
|
"examples": [
|
488
502
|
{
|
489
|
-
"scenario": "Can you count the
|
503
|
+
"scenario": "Can you count the items in the image? Image name: lids.jpg",
|
490
504
|
"parameters": {"image": "lids.jpg"},
|
491
505
|
},
|
492
506
|
{
|
@@ -535,7 +549,7 @@ class VisualPromptCounting(Tool):
|
|
535
549
|
"""
|
536
550
|
|
537
551
|
name = "visual_prompt_counting_"
|
538
|
-
description = "'visual_prompt_counting_' is a tool that
|
552
|
+
description = "'visual_prompt_counting_' is a tool that counts foreground items in an image given a visual prompt which is a bounding box describing the object. It returns only the count of the objects in the image."
|
539
553
|
|
540
554
|
usage = {
|
541
555
|
"required_parameters": [
|
@@ -544,7 +558,7 @@ class VisualPromptCounting(Tool):
|
|
544
558
|
],
|
545
559
|
"examples": [
|
546
560
|
{
|
547
|
-
"scenario": "Here is an example of a lid '0.1, 0.1, 0.14, 0.2', Can you count the
|
561
|
+
"scenario": "Here is an example of a lid '0.1, 0.1, 0.14, 0.2', Can you count the items in the image ? Image name: lids.jpg",
|
548
562
|
"parameters": {"image": "lids.jpg", "prompt": "0.1, 0.1, 0.14, 0.2"},
|
549
563
|
},
|
550
564
|
{
|
@@ -552,7 +566,7 @@ class VisualPromptCounting(Tool):
|
|
552
566
|
"parameters": {"image": "tray.jpg", "prompt": "0.1, 0.1, 0.2, 0.25"},
|
553
567
|
},
|
554
568
|
{
|
555
|
-
"scenario": "Can you
|
569
|
+
"scenario": "Can you count this item based on an example, reference_data: '0.1, 0.15, 0.2, 0.2' ? Image name: shirts.jpg",
|
556
570
|
"parameters": {
|
557
571
|
"image": "shirts.jpg",
|
558
572
|
"prompt": "0.1, 0.15, 0.2, 0.2",
|
@@ -605,7 +619,7 @@ class VisualQuestionAnswering(Tool):
|
|
605
619
|
"""
|
606
620
|
|
607
621
|
name = "visual_question_answering_"
|
608
|
-
description = "'visual_question_answering_' is a tool that can
|
622
|
+
description = "'visual_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question"
|
609
623
|
|
610
624
|
usage = {
|
611
625
|
"required_parameters": [
|
@@ -672,7 +686,7 @@ class ImageQuestionAnswering(Tool):
|
|
672
686
|
"""
|
673
687
|
|
674
688
|
name = "image_question_answering_"
|
675
|
-
description = "'image_question_answering_' is a tool that can
|
689
|
+
description = "'image_question_answering_' is a tool that can answer basic questions about the image given a question and an image. It returns a text describing the image and the answer to the question"
|
676
690
|
|
677
691
|
usage = {
|
678
692
|
"required_parameters": [
|
@@ -773,7 +787,7 @@ class BboxArea(Tool):
|
|
773
787
|
r"""BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places."""
|
774
788
|
|
775
789
|
name = "bbox_area_"
|
776
|
-
description = "'bbox_area_' returns the area of the bounding box in pixels normalized to 2 decimal places."
|
790
|
+
description = "'bbox_area_' returns the area of the given bounding box in pixels normalized to 2 decimal places."
|
777
791
|
usage = {
|
778
792
|
"required_parameters": [{"name": "bboxes", "type": "List[int]"}],
|
779
793
|
"examples": [
|
@@ -803,7 +817,7 @@ class SegArea(Tool):
|
|
803
817
|
r"""SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places."""
|
804
818
|
|
805
819
|
name = "seg_area_"
|
806
|
-
description = "'seg_area_' returns the area of the segmentation mask in pixels normalized to 2 decimal places."
|
820
|
+
description = "'seg_area_' returns the area of the given segmentation mask in pixels normalized to 2 decimal places."
|
807
821
|
usage = {
|
808
822
|
"required_parameters": [{"name": "masks", "type": "str"}],
|
809
823
|
"examples": [
|
@@ -883,7 +897,7 @@ class SegIoU(Tool):
|
|
883
897
|
|
884
898
|
class BboxContains(Tool):
|
885
899
|
name = "bbox_contains_"
|
886
|
-
description = "Given two bounding boxes, a target bounding box and a region bounding box, 'bbox_contains_' returns the intersection of the two bounding boxes
|
900
|
+
description = "Given two bounding boxes, a target bounding box and a region bounding box, 'bbox_contains_' returns the intersection of the two bounding boxes which is the percentage area of the target bounding box overlaps with the region bounding box. This is a good tool for determining if the region object contains the target object."
|
887
901
|
usage = {
|
888
902
|
"required_parameters": [
|
889
903
|
{"name": "target", "type": "List[int]"},
|
@@ -935,9 +949,7 @@ class BboxContains(Tool):
|
|
935
949
|
|
936
950
|
class BoxDistance(Tool):
|
937
951
|
name = "box_distance_"
|
938
|
-
description =
|
939
|
-
"'box_distance_' returns the minimum distance between two bounding boxes."
|
940
|
-
)
|
952
|
+
description = "'box_distance_' calculates distance between two bounding boxes. It returns the minumum distance between the given bounding boxes"
|
941
953
|
usage = {
|
942
954
|
"required_parameters": [
|
943
955
|
{"name": "bbox1", "type": "List[int]"},
|
@@ -945,7 +957,7 @@ class BoxDistance(Tool):
|
|
945
957
|
],
|
946
958
|
"examples": [
|
947
959
|
{
|
948
|
-
"scenario": "
|
960
|
+
"scenario": "Calculate the distance between the bounding boxes [0.2, 0.21, 0.34, 0.42] and [0.3, 0.31, 0.44, 0.52]",
|
949
961
|
"parameters": {
|
950
962
|
"bbox1": [0.2, 0.21, 0.34, 0.42],
|
951
963
|
"bbox2": [0.3, 0.31, 0.44, 0.52],
|
@@ -1008,7 +1020,7 @@ class ExtractFrames(Tool):
|
|
1008
1020
|
|
1009
1021
|
class OCR(Tool):
|
1010
1022
|
name = "ocr_"
|
1011
|
-
description = "'ocr_' extracts text from an image."
|
1023
|
+
description = "'ocr_' extracts text from an image. It returns a list of detected text, bounding boxes, and confidence scores."
|
1012
1024
|
usage = {
|
1013
1025
|
"required_parameters": [
|
1014
1026
|
{"name": "image", "type": "str"},
|
@@ -2,24 +2,24 @@ vision_agent/__init__.py,sha256=GVLHCeK_R-zgldpbcPmOzJat-BkadvkuRCMxDvTIcXs,108
|
|
2
2
|
vision_agent/agent/__init__.py,sha256=B4JVrbY4IRVCJfjmrgvcp7h1mTUEk8MZvL0Zmej4Ka0,127
|
3
3
|
vision_agent/agent/agent.py,sha256=X7kON-g9ePUKumCDaYfQNBX_MEFE-ax5PnRp7-Cc5Wo,529
|
4
4
|
vision_agent/agent/easytool.py,sha256=oMHnBg7YBtIPgqQUNcZgq7uMgpPThs99_UnO7ERkMVg,11511
|
5
|
-
vision_agent/agent/easytool_prompts.py,sha256=
|
5
|
+
vision_agent/agent/easytool_prompts.py,sha256=Bikw-PPLkm78dwywTlnv32Y1Tw6JMeC-R7oCnXWLcTk,4656
|
6
6
|
vision_agent/agent/reflexion.py,sha256=4gz30BuFMeGxSsTzoDV4p91yE0R8LISXp28IaOI6wdM,10506
|
7
7
|
vision_agent/agent/reflexion_prompts.py,sha256=G7UAeNz_g2qCb2yN6OaIC7bQVUkda4m3z42EG8wAyfE,9342
|
8
|
-
vision_agent/agent/vision_agent.py,sha256=
|
9
|
-
vision_agent/agent/vision_agent_prompts.py,sha256=
|
8
|
+
vision_agent/agent/vision_agent.py,sha256=PyAtzDl5h1Uasd-Fjzdl-NK9gdZ2ARxoF9y3tvap7PU,26243
|
9
|
+
vision_agent/agent/vision_agent_prompts.py,sha256=moihXFhEzFw8xnf2sUSgd_k9eoxQam3T6XUkB0fyp5o,8570
|
10
10
|
vision_agent/fonts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
vision_agent/fonts/default_font_ch_en.ttf,sha256=1YM0Z3XqLDjSNbF7ihQFSAIUdjF9m1rtHiNC_6QosTE,1594400
|
12
|
-
vision_agent/image_utils.py,sha256=
|
12
|
+
vision_agent/image_utils.py,sha256=1dggPBhW8_hUXDItCRLa23h-hdBwS50cjL4v1hsoUbg,7586
|
13
13
|
vision_agent/llm/__init__.py,sha256=BoUm_zSAKnLlE8s-gKTSQugXDqVZKPqYlWwlTLdhcz4,48
|
14
14
|
vision_agent/llm/llm.py,sha256=1BkrSVBWEClyqLc0Rmyw4heLhi_ZVm6JO7-i1wd1ziw,5383
|
15
15
|
vision_agent/lmm/__init__.py,sha256=nnNeKD1k7q_4vLb1x51O_EUTYaBgGfeiCx5F433gr3M,67
|
16
16
|
vision_agent/lmm/lmm.py,sha256=gK90vMxh0OcGSuIZQikBkDXm4pfkdFk1R2y7rtWDl84,10539
|
17
17
|
vision_agent/tools/__init__.py,sha256=HfUr0JQUwk0Kyieen93df9lMbbdpVf9Q6CcVFmKv_q4,413
|
18
18
|
vision_agent/tools/prompts.py,sha256=V1z4YJLXZuUl_iZ5rY0M5hHc_2tmMEUKr0WocXKGt4E,1430
|
19
|
-
vision_agent/tools/tools.py,sha256=
|
19
|
+
vision_agent/tools/tools.py,sha256=EvNDLUxe-Ed8-meHInTIiX3aySLUXFBsAWwL0Is5S1o,43823
|
20
20
|
vision_agent/tools/video.py,sha256=xTElFSFp1Jw4ulOMnk81Vxsh-9dTxcWUO6P9fzEi3AM,7653
|
21
21
|
vision_agent/type_defs.py,sha256=4LTnTL4HNsfYqCrDn9Ppjg9bSG2ZGcoKSSd9YeQf4Bw,1792
|
22
|
-
vision_agent-0.2.
|
23
|
-
vision_agent-0.2.
|
24
|
-
vision_agent-0.2.
|
25
|
-
vision_agent-0.2.
|
22
|
+
vision_agent-0.2.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
23
|
+
vision_agent-0.2.9.dist-info/METADATA,sha256=jyfAwSfDnObeILoLyfB8ijuLLpZUWd-Fvg-xncEMCYc,7697
|
24
|
+
vision_agent-0.2.9.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
|
25
|
+
vision_agent-0.2.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|