vision-agent 0.0.44__tar.gz → 0.0.46__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vision_agent-0.0.44 → vision_agent-0.0.46}/PKG-INFO +19 -18
- {vision_agent-0.0.44 → vision_agent-0.0.46}/README.md +18 -17
- {vision_agent-0.0.44 → vision_agent-0.0.46}/pyproject.toml +1 -1
- {vision_agent-0.0.44 → vision_agent-0.0.46}/LICENSE +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/agent.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/easytool.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/easytool_prompts.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/reflexion.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/reflexion_prompts.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/vision_agent.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/agent/vision_agent_prompts.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/data/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/data/data.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/emb/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/emb/emb.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/image_utils.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/llm/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/llm/llm.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/lmm/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/lmm/lmm.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/tools/__init__.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/tools/prompts.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/tools/tools.py +0 -0
- {vision_agent-0.0.44 → vision_agent-0.0.46}/vision_agent/tools/video.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vision-agent
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.46
|
4
4
|
Summary: Toolset for Vision Agent
|
5
5
|
Author: Landing AI
|
6
6
|
Author-email: dev@landing.ai
|
@@ -71,8 +71,8 @@ You can interact with the agents as you would with any LLM or LMM model:
|
|
71
71
|
```python
|
72
72
|
>>> import vision_agent as va
|
73
73
|
>>> agent = VisionAgent()
|
74
|
-
>>> agent("
|
75
|
-
"
|
74
|
+
>>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg")
|
75
|
+
"The percentage of area of the jar filled with coffee beans is 25%."
|
76
76
|
```
|
77
77
|
|
78
78
|
To better understand how the model came up with it's answer, you can also run it in
|
@@ -86,22 +86,22 @@ You can also have it return the workflow it used to complete the task along with
|
|
86
86
|
the individual steps and tools to get the answer:
|
87
87
|
|
88
88
|
```python
|
89
|
-
>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "
|
89
|
+
>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg")
|
90
90
|
>>> print(workflow)
|
91
|
-
[{"task": "
|
92
|
-
"tool": "
|
93
|
-
"parameters": {"prompt": "
|
91
|
+
[{"task": "Segment the jar using 'grounding_sam_'.",
|
92
|
+
"tool": "grounding_sam_",
|
93
|
+
"parameters": {"prompt": "jar", "image": "jar.jpg"},
|
94
94
|
"call_results": [[
|
95
95
|
{
|
96
|
-
"labels": ["
|
97
|
-
"scores": [0.99
|
96
|
+
"labels": ["jar"],
|
97
|
+
"scores": [0.99],
|
98
98
|
"bboxes": [
|
99
99
|
[0.58, 0.2, 0.72, 0.45],
|
100
|
-
|
101
|
-
|
100
|
+
],
|
101
|
+
"masks": "mask.png"
|
102
102
|
}
|
103
103
|
]],
|
104
|
-
"answer": "
|
104
|
+
"answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].",
|
105
105
|
}]
|
106
106
|
```
|
107
107
|
|
@@ -113,13 +113,12 @@ you. For example:
|
|
113
113
|
```python
|
114
114
|
>>> import vision_agent as va
|
115
115
|
>>> llm = va.llm.OpenAILLM()
|
116
|
-
>>> detector = llm.generate_detector("Can you build
|
117
|
-
>>> detector("
|
118
|
-
[{"labels": ["
|
119
|
-
"scores": [0.99
|
116
|
+
>>> detector = llm.generate_detector("Can you build a jar detector for me?")
|
117
|
+
>>> detector("jar.jpg")
|
118
|
+
[{"labels": ["jar",],
|
119
|
+
"scores": [0.99],
|
120
120
|
"bboxes": [
|
121
121
|
[0.58, 0.2, 0.72, 0.45],
|
122
|
-
[0.94, 0.57, 0.98, 0.66],
|
123
122
|
]
|
124
123
|
}]
|
125
124
|
```
|
@@ -133,7 +132,9 @@ you. For example:
|
|
133
132
|
| Crop | Crop crops an image given a bounding box and returns a file name of the cropped image. |
|
134
133
|
| BboxArea | BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places. |
|
135
134
|
| SegArea | SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places. |
|
136
|
-
|
|
135
|
+
| BboxIoU | BboxIoU returns the intersection over union of two bounding boxes normalized to 2 decimal places. |
|
136
|
+
| SegIoU | SegIoU returns the intersection over union of two segmentation masks normalized to 2 decimal places. |
|
137
|
+
| ExtractFrames | ExtractFrames extracts frames with motion from a video. |
|
137
138
|
|
138
139
|
|
139
140
|
It also has a basic set of calculate tools such as add, subtract, multiply and divide.
|
@@ -42,8 +42,8 @@ You can interact with the agents as you would with any LLM or LMM model:
|
|
42
42
|
```python
|
43
43
|
>>> import vision_agent as va
|
44
44
|
>>> agent = VisionAgent()
|
45
|
-
>>> agent("
|
46
|
-
"
|
45
|
+
>>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg")
|
46
|
+
"The percentage of area of the jar filled with coffee beans is 25%."
|
47
47
|
```
|
48
48
|
|
49
49
|
To better understand how the model came up with it's answer, you can also run it in
|
@@ -57,22 +57,22 @@ You can also have it return the workflow it used to complete the task along with
|
|
57
57
|
the individual steps and tools to get the answer:
|
58
58
|
|
59
59
|
```python
|
60
|
-
>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "
|
60
|
+
>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg")
|
61
61
|
>>> print(workflow)
|
62
|
-
[{"task": "
|
63
|
-
"tool": "
|
64
|
-
"parameters": {"prompt": "
|
62
|
+
[{"task": "Segment the jar using 'grounding_sam_'.",
|
63
|
+
"tool": "grounding_sam_",
|
64
|
+
"parameters": {"prompt": "jar", "image": "jar.jpg"},
|
65
65
|
"call_results": [[
|
66
66
|
{
|
67
|
-
"labels": ["
|
68
|
-
"scores": [0.99
|
67
|
+
"labels": ["jar"],
|
68
|
+
"scores": [0.99],
|
69
69
|
"bboxes": [
|
70
70
|
[0.58, 0.2, 0.72, 0.45],
|
71
|
-
|
72
|
-
|
71
|
+
],
|
72
|
+
"masks": "mask.png"
|
73
73
|
}
|
74
74
|
]],
|
75
|
-
"answer": "
|
75
|
+
"answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].",
|
76
76
|
}]
|
77
77
|
```
|
78
78
|
|
@@ -84,13 +84,12 @@ you. For example:
|
|
84
84
|
```python
|
85
85
|
>>> import vision_agent as va
|
86
86
|
>>> llm = va.llm.OpenAILLM()
|
87
|
-
>>> detector = llm.generate_detector("Can you build
|
88
|
-
>>> detector("
|
89
|
-
[{"labels": ["
|
90
|
-
"scores": [0.99
|
87
|
+
>>> detector = llm.generate_detector("Can you build a jar detector for me?")
|
88
|
+
>>> detector("jar.jpg")
|
89
|
+
[{"labels": ["jar",],
|
90
|
+
"scores": [0.99],
|
91
91
|
"bboxes": [
|
92
92
|
[0.58, 0.2, 0.72, 0.45],
|
93
|
-
[0.94, 0.57, 0.98, 0.66],
|
94
93
|
]
|
95
94
|
}]
|
96
95
|
```
|
@@ -104,7 +103,9 @@ you. For example:
|
|
104
103
|
| Crop | Crop crops an image given a bounding box and returns a file name of the cropped image. |
|
105
104
|
| BboxArea | BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places. |
|
106
105
|
| SegArea | SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places. |
|
107
|
-
|
|
106
|
+
| BboxIoU | BboxIoU returns the intersection over union of two bounding boxes normalized to 2 decimal places. |
|
107
|
+
| SegIoU | SegIoU returns the intersection over union of two segmentation masks normalized to 2 decimal places. |
|
108
|
+
| ExtractFrames | ExtractFrames extracts frames with motion from a video. |
|
108
109
|
|
109
110
|
|
110
111
|
It also has a basic set of calculate tools such as add, subtract, multiply and divide.
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|