vision-agent 0.2.83__tar.gz → 0.2.84__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vision_agent-0.2.83 → vision_agent-0.2.84}/PKG-INFO +2 -1
- {vision_agent-0.2.83 → vision_agent-0.2.84}/pyproject.toml +2 -1
- vision_agent-0.2.84/vision_agent/lmm/__init__.py +1 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/lmm/lmm.py +91 -0
- vision_agent-0.2.83/vision_agent/lmm/__init__.py +0 -1
- {vision_agent-0.2.83 → vision_agent-0.2.84}/LICENSE +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/README.md +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/__init__.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/agent/__init__.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/agent/agent.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/agent/vision_agent.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/agent/vision_agent_prompts.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/fonts/__init__.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/fonts/default_font_ch_en.ttf +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/tools/__init__.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/tools/prompts.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/tools/tool_utils.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/tools/tools.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/__init__.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/exceptions.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/execute.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/image_utils.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/sim.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/type_defs.py +0 -0
- {vision_agent-0.2.83 → vision_agent-0.2.84}/vision_agent/utils/video.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vision-agent
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.84
|
4
4
|
Summary: Toolset for Vision Agent
|
5
5
|
Author: Landing AI
|
6
6
|
Author-email: dev@landing.ai
|
@@ -9,6 +9,7 @@ Classifier: Programming Language :: Python :: 3
|
|
9
9
|
Classifier: Programming Language :: Python :: 3.9
|
10
10
|
Classifier: Programming Language :: Python :: 3.10
|
11
11
|
Classifier: Programming Language :: Python :: 3.11
|
12
|
+
Requires-Dist: anthropic (>=0.31.0,<0.32.0)
|
12
13
|
Requires-Dist: e2b (>=0.17.1,<0.18.0)
|
13
14
|
Requires-Dist: e2b-code-interpreter (==0.0.11a2)
|
14
15
|
Requires-Dist: ipykernel (>=6.29.4,<7.0.0)
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
4
4
|
|
5
5
|
[tool.poetry]
|
6
6
|
name = "vision-agent"
|
7
|
-
version = "0.2.
|
7
|
+
version = "0.2.84"
|
8
8
|
description = "Toolset for Vision Agent"
|
9
9
|
authors = ["Landing AI <dev@landing.ai>"]
|
10
10
|
readme = "README.md"
|
@@ -39,6 +39,7 @@ e2b-code-interpreter = "0.0.11a2"
|
|
39
39
|
tenacity = "^8.3.0"
|
40
40
|
pillow-heif = "^0.16.0"
|
41
41
|
pytube = "15.0.0"
|
42
|
+
anthropic = "^0.31.0"
|
42
43
|
|
43
44
|
[tool.poetry.group.dev.dependencies]
|
44
45
|
autoflake = "1.*"
|
@@ -0,0 +1 @@
|
|
1
|
+
from .lmm import LMM, AzureOpenAILMM, ClaudeSonnetLMM, Message, OllamaLMM, OpenAILMM
|
@@ -7,7 +7,9 @@ from abc import ABC, abstractmethod
|
|
7
7
|
from pathlib import Path
|
8
8
|
from typing import Any, Callable, Dict, List, Optional, Union, cast
|
9
9
|
|
10
|
+
import anthropic
|
10
11
|
import requests
|
12
|
+
from anthropic.types import ImageBlockParam, MessageParam, TextBlockParam
|
11
13
|
from openai import AzureOpenAI, OpenAI
|
12
14
|
from PIL import Image
|
13
15
|
|
@@ -375,3 +377,92 @@ class OllamaLMM(LMM):
|
|
375
377
|
|
376
378
|
response = response.json()
|
377
379
|
return response["response"] # type: ignore
|
380
|
+
|
381
|
+
|
382
|
+
class ClaudeSonnetLMM(LMM):
|
383
|
+
r"""An LMM class for Anthropic's Claude Sonnet model."""
|
384
|
+
|
385
|
+
def __init__(
|
386
|
+
self,
|
387
|
+
api_key: Optional[str] = None,
|
388
|
+
model_name: str = "claude-3-sonnet-20240229",
|
389
|
+
max_tokens: int = 4096,
|
390
|
+
temperature: float = 0.7,
|
391
|
+
**kwargs: Any,
|
392
|
+
):
|
393
|
+
self.client = anthropic.Anthropic(api_key=api_key)
|
394
|
+
self.model_name = model_name
|
395
|
+
self.max_tokens = max_tokens
|
396
|
+
self.temperature = temperature
|
397
|
+
self.kwargs = kwargs
|
398
|
+
|
399
|
+
def __call__(
|
400
|
+
self,
|
401
|
+
input: Union[str, List[Dict[str, Any]]],
|
402
|
+
) -> str:
|
403
|
+
if isinstance(input, str):
|
404
|
+
return self.generate(input)
|
405
|
+
return self.chat(input)
|
406
|
+
|
407
|
+
def chat(
|
408
|
+
self,
|
409
|
+
chat: List[Dict[str, Any]],
|
410
|
+
) -> str:
|
411
|
+
messages: List[MessageParam] = []
|
412
|
+
for msg in chat:
|
413
|
+
content: List[Union[TextBlockParam, ImageBlockParam]] = [
|
414
|
+
TextBlockParam(type="text", text=msg["content"])
|
415
|
+
]
|
416
|
+
if "media" in msg:
|
417
|
+
for media_path in msg["media"]:
|
418
|
+
encoded_media = encode_media(media_path)
|
419
|
+
content.append(
|
420
|
+
ImageBlockParam(
|
421
|
+
type="image",
|
422
|
+
source={
|
423
|
+
"type": "base64",
|
424
|
+
"media_type": "image/png",
|
425
|
+
"data": encoded_media,
|
426
|
+
},
|
427
|
+
)
|
428
|
+
)
|
429
|
+
messages.append({"role": msg["role"], "content": content})
|
430
|
+
|
431
|
+
response = self.client.messages.create(
|
432
|
+
model=self.model_name,
|
433
|
+
max_tokens=self.max_tokens,
|
434
|
+
temperature=self.temperature,
|
435
|
+
messages=messages,
|
436
|
+
**self.kwargs,
|
437
|
+
)
|
438
|
+
return cast(str, response.content[0].text)
|
439
|
+
|
440
|
+
def generate(
|
441
|
+
self,
|
442
|
+
prompt: str,
|
443
|
+
media: Optional[List[Union[str, Path]]] = None,
|
444
|
+
) -> str:
|
445
|
+
content: List[Union[TextBlockParam, ImageBlockParam]] = [
|
446
|
+
TextBlockParam(type="text", text=prompt)
|
447
|
+
]
|
448
|
+
if media:
|
449
|
+
for m in media:
|
450
|
+
encoded_media = encode_media(m)
|
451
|
+
content.append(
|
452
|
+
ImageBlockParam(
|
453
|
+
type="image",
|
454
|
+
source={
|
455
|
+
"type": "base64",
|
456
|
+
"media_type": "image/png",
|
457
|
+
"data": encoded_media,
|
458
|
+
},
|
459
|
+
)
|
460
|
+
)
|
461
|
+
response = self.client.messages.create(
|
462
|
+
model=self.model_name,
|
463
|
+
max_tokens=self.max_tokens,
|
464
|
+
temperature=self.temperature,
|
465
|
+
messages=[{"role": "user", "content": content}],
|
466
|
+
**self.kwargs,
|
467
|
+
)
|
468
|
+
return cast(str, response.content[0].text)
|
@@ -1 +0,0 @@
|
|
1
|
-
from .lmm import LMM, AzureOpenAILMM, Message, OllamaLMM, OpenAILMM
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|