vision-agent 0.0.44__tar.gz → 0.0.45__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {vision_agent-0.0.44 → vision_agent-0.0.45}/PKG-INFO +16 -17
  2. {vision_agent-0.0.44 → vision_agent-0.0.45}/README.md +15 -16
  3. {vision_agent-0.0.44 → vision_agent-0.0.45}/pyproject.toml +1 -1
  4. {vision_agent-0.0.44 → vision_agent-0.0.45}/LICENSE +0 -0
  5. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/__init__.py +0 -0
  6. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/__init__.py +0 -0
  7. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/agent.py +0 -0
  8. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/easytool.py +0 -0
  9. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/easytool_prompts.py +0 -0
  10. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/reflexion.py +0 -0
  11. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/reflexion_prompts.py +0 -0
  12. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/vision_agent.py +0 -0
  13. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/agent/vision_agent_prompts.py +0 -0
  14. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/data/__init__.py +0 -0
  15. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/data/data.py +0 -0
  16. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/emb/__init__.py +0 -0
  17. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/emb/emb.py +0 -0
  18. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/image_utils.py +0 -0
  19. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/llm/__init__.py +0 -0
  20. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/llm/llm.py +0 -0
  21. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/lmm/__init__.py +0 -0
  22. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/lmm/lmm.py +0 -0
  23. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/tools/__init__.py +0 -0
  24. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/tools/prompts.py +0 -0
  25. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/tools/tools.py +0 -0
  26. {vision_agent-0.0.44 → vision_agent-0.0.45}/vision_agent/tools/video.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vision-agent
3
- Version: 0.0.44
3
+ Version: 0.0.45
4
4
  Summary: Toolset for Vision Agent
5
5
  Author: Landing AI
6
6
  Author-email: dev@landing.ai
@@ -71,8 +71,8 @@ You can interact with the agents as you would with any LLM or LMM model:
71
71
  ```python
72
72
  >>> import vision_agent as va
73
73
  >>> agent = VisionAgent()
74
- >>> agent("How many apples are in this image?", image="apples.jpg")
75
- "There are 2 apples in the image."
74
+ >>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg")
75
+ "The percentage of area of the jar filled with coffee beans is 25%."
76
76
  ```
77
77
 
78
78
  To better understand how the model came up with it's answer, you can also run it in
@@ -86,22 +86,22 @@ You can also have it return the workflow it used to complete the task along with
86
86
  the individual steps and tools to get the answer:
87
87
 
88
88
  ```python
89
- >>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "How many apples are in this image?"}], image="apples.jpg")
89
+ >>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg")
90
90
  >>> print(workflow)
91
- [{"task": "Count the number of apples using 'grounding_dino_'.",
92
- "tool": "grounding_dino_",
93
- "parameters": {"prompt": "apple", "image": "apples.jpg"},
91
+ [{"task": "Segment the jar using 'grounding_sam_'.",
92
+ "tool": "grounding_sam_",
93
+ "parameters": {"prompt": "jar", "image": "jar.jpg"},
94
94
  "call_results": [[
95
95
  {
96
- "labels": ["apple", "apple"],
97
- "scores": [0.99, 0.95],
96
+ "labels": ["jar"],
97
+ "scores": [0.99],
98
98
  "bboxes": [
99
99
  [0.58, 0.2, 0.72, 0.45],
100
- [0.94, 0.57, 0.98, 0.66],
101
- ]
100
+ ],
101
+ "masks": "mask.png"
102
102
  }
103
103
  ]],
104
- "answer": "There are 2 apples in the image.",
104
+ "answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].",
105
105
  }]
106
106
  ```
107
107
 
@@ -113,13 +113,12 @@ you. For example:
113
113
  ```python
114
114
  >>> import vision_agent as va
115
115
  >>> llm = va.llm.OpenAILLM()
116
- >>> detector = llm.generate_detector("Can you build an apple detector for me?")
117
- >>> detector("apples.jpg")
118
- [{"labels": ["apple", "apple"],
119
- "scores": [0.99, 0.95],
116
+ >>> detector = llm.generate_detector("Can you build a jar detector for me?")
117
+ >>> detector("jar.jpg")
118
+ [{"labels": ["jar",],
119
+ "scores": [0.99],
120
120
  "bboxes": [
121
121
  [0.58, 0.2, 0.72, 0.45],
122
- [0.94, 0.57, 0.98, 0.66],
123
122
  ]
124
123
  }]
125
124
  ```
@@ -42,8 +42,8 @@ You can interact with the agents as you would with any LLM or LMM model:
42
42
  ```python
43
43
  >>> import vision_agent as va
44
44
  >>> agent = VisionAgent()
45
- >>> agent("How many apples are in this image?", image="apples.jpg")
46
- "There are 2 apples in the image."
45
+ >>> agent("What percentage of the area of this jar is filled with coffee beans?", image="jar.jpg")
46
+ "The percentage of area of the jar filled with coffee beans is 25%."
47
47
  ```
48
48
 
49
49
  To better understand how the model came up with it's answer, you can also run it in
@@ -57,22 +57,22 @@ You can also have it return the workflow it used to complete the task along with
57
57
  the individual steps and tools to get the answer:
58
58
 
59
59
  ```python
60
- >>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "How many apples are in this image?"}], image="apples.jpg")
60
+ >>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "What percentage of the area of this jar is filled with coffee beans?"}], image="jar.jpg")
61
61
  >>> print(workflow)
62
- [{"task": "Count the number of apples using 'grounding_dino_'.",
63
- "tool": "grounding_dino_",
64
- "parameters": {"prompt": "apple", "image": "apples.jpg"},
62
+ [{"task": "Segment the jar using 'grounding_sam_'.",
63
+ "tool": "grounding_sam_",
64
+ "parameters": {"prompt": "jar", "image": "jar.jpg"},
65
65
  "call_results": [[
66
66
  {
67
- "labels": ["apple", "apple"],
68
- "scores": [0.99, 0.95],
67
+ "labels": ["jar"],
68
+ "scores": [0.99],
69
69
  "bboxes": [
70
70
  [0.58, 0.2, 0.72, 0.45],
71
- [0.94, 0.57, 0.98, 0.66],
72
- ]
71
+ ],
72
+ "masks": "mask.png"
73
73
  }
74
74
  ]],
75
- "answer": "There are 2 apples in the image.",
75
+ "answer": "The jar is located at [0.58, 0.2, 0.72, 0.45].",
76
76
  }]
77
77
  ```
78
78
 
@@ -84,13 +84,12 @@ you. For example:
84
84
  ```python
85
85
  >>> import vision_agent as va
86
86
  >>> llm = va.llm.OpenAILLM()
87
- >>> detector = llm.generate_detector("Can you build an apple detector for me?")
88
- >>> detector("apples.jpg")
89
- [{"labels": ["apple", "apple"],
90
- "scores": [0.99, 0.95],
87
+ >>> detector = llm.generate_detector("Can you build a jar detector for me?")
88
+ >>> detector("jar.jpg")
89
+ [{"labels": ["jar",],
90
+ "scores": [0.99],
91
91
  "bboxes": [
92
92
  [0.58, 0.2, 0.72, 0.45],
93
- [0.94, 0.57, 0.98, 0.66],
94
93
  ]
95
94
  }]
96
95
  ```
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "vision-agent"
7
- version = "0.0.44"
7
+ version = "0.0.45"
8
8
  description = "Toolset for Vision Agent"
9
9
  authors = ["Landing AI <dev@landing.ai>"]
10
10
  readme = "README.md"
File without changes