vision-agent 0.2.245__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -78,7 +78,7 @@ def write_code(
78
78
  plan=plan,
79
79
  )
80
80
  response = cast(str, coder([{"role": "user", "content": prompt}], stream=False))
81
- maybe_code = extract_tag(response, "code")
81
+ maybe_code = extract_tag(response, "code", extract_markdown="python")
82
82
 
83
83
  # if the response wasn't properly formatted with the code tags just retrun the response
84
84
  if maybe_code is None:
@@ -105,7 +105,7 @@ def write_test(
105
105
  media=media_list,
106
106
  )
107
107
  response = cast(str, tester([{"role": "user", "content": prompt}], stream=False))
108
- maybe_code = extract_tag(response, "code")
108
+ maybe_code = extract_tag(response, "code", extract_markdown="python")
109
109
 
110
110
  # if the response wasn't properly formatted with the code tags just retrun the response
111
111
  if maybe_code is None:
@@ -151,8 +151,12 @@ def debug_code(
151
151
  fixed_code_and_test_str = cast(str, fixed_code_and_test_str)
152
152
  thoughts_tag = extract_tag(fixed_code_and_test_str, "thoughts")
153
153
  thoughts = thoughts_tag if thoughts_tag is not None else ""
154
- fixed_code = extract_tag(fixed_code_and_test_str, "code")
155
- fixed_test = extract_tag(fixed_code_and_test_str, "test")
154
+ fixed_code = extract_tag(
155
+ fixed_code_and_test_str, "code", extract_markdown="python"
156
+ )
157
+ fixed_test = extract_tag(
158
+ fixed_code_and_test_str, "test", extract_markdown="python"
159
+ )
156
160
 
157
161
  success = not (fixed_code is None and fixed_test is None)
158
162
 
@@ -410,7 +410,7 @@ class VisionAgentPlanner(Agent):
410
410
  plans = write_plans(
411
411
  chat,
412
412
  get_tool_descriptions_by_names(
413
- custom_tool_names, T.FUNCTION_TOOLS, T.UTIL_TOOLS # type: ignore
413
+ custom_tool_names, T.tools.FUNCTION_TOOLS, T.tools.UTIL_TOOLS # type: ignore
414
414
  ),
415
415
  format_feedback(working_memory),
416
416
  self.planner,
@@ -154,7 +154,7 @@ def run_multi_trial_planning(
154
154
  response3=responses[2],
155
155
  )
156
156
  response = cast(str, model.chat([{"role": "user", "content": prompt}]))
157
- json_str = extract_tag(response, "json")
157
+ json_str = extract_tag(response, "json", extract_markdown="json")
158
158
  if json_str:
159
159
  json_data = extract_json(json_str)
160
160
  best = np.argmax([int(json_data[f"response{k}"]) for k in [1, 2, 3]])
@@ -230,7 +230,7 @@ def execute_code_action(
230
230
  while not execution.success and count <= 3:
231
231
  prompt = FIX_BUG.format(chat_history=get_planning(chat), code=code, error=obs)
232
232
  response = cast(str, model.chat([{"role": "user", "content": prompt}]))
233
- new_code = extract_tag(response, "code")
233
+ new_code = extract_tag(response, "code", extract_markdown="python")
234
234
  if not new_code:
235
235
  continue
236
236
  else:
@@ -343,7 +343,7 @@ def create_finalize_plan(
343
343
  plan_str = cast(str, response)
344
344
  return_chat = [AgentMessage(role="planner", content=plan_str, media=None)]
345
345
 
346
- plan_json = extract_tag(plan_str, "json")
346
+ plan_json = extract_tag(plan_str, "json", extract_markdown="json")
347
347
 
348
348
  # sometimes the planner model will refuse to answer a question becuase of some
349
349
  # safety concern, we then wont be able to parse the response so we have to send
@@ -357,7 +357,7 @@ def create_finalize_plan(
357
357
  except json.JSONDecodeError:
358
358
  return return_chat, ErrorContext(error=plan_str)
359
359
 
360
- code_snippets = extract_tag(plan_str, "code")
360
+ code_snippets = extract_tag(plan_str, "code", extract_markdown="python")
361
361
  plan["code"] = code_snippets if code_snippets is not None else ""
362
362
  if verbose:
363
363
  _CONSOLE.print(
@@ -544,7 +544,9 @@ class VisionAgentPlannerV2(AgentPlanner):
544
544
 
545
545
  response = response_safeguards(response)
546
546
  thinking = extract_tag(response, "thinking")
547
- code = extract_tag(response, "execute_python")
547
+ code = extract_tag(
548
+ response, "execute_python", extract_markdown="python"
549
+ )
548
550
  finalize_plan = extract_tag(response, "finalize_plan")
549
551
  finished = finalize_plan is not None
550
552
  self.update_callback({"role": "planner_update", "content": response})
@@ -54,12 +54,22 @@ AGENT: <response>I am VisionAgent, an agent built by LandingAI, to help users wr
54
54
  - Understanding documents
55
55
  - Pose estimation
56
56
  - Visual question answering for both images and videos
57
- - Action recognition in videos
57
+ - Activity recognition in videos
58
58
  - Image inpainting
59
59
 
60
60
  How can I help you?</response>
61
61
  --- END EXAMPLE2 ---
62
62
 
63
+ **Helpful Context**:
64
+ - You are helping the user write vision code that utilizes the `vision-agent` library to solve different vision tasks.
65
+ - The goal is to write code the user can use to solve a visual task and then deploy the code so they can run it repeatedly on that task.
66
+ - If the user wants to run the code, they can `pip install vision-agent` to install the library, copy the code, and run it locally themselves.
67
+ - They can find the vision-agent source code at https://github.com/landing-ai/vision-agent. They can find the documentation for vision-agent at https://landing-ai.github.io/vision-agent/.
68
+ - The code utilizes LandingAI's API to run the different tools, the API is free to use and does not require an API key.
69
+ - The user can run VisionAgent (you) from two places, either on our website at https://va.landing.ai or locally.
70
+ - If the user is running VisionAgent on the website, they can only upload images or videos, nothing else at the moment. They can view files created by VisionAgent by clicking the "Files" button in the upper right part of the screen.
71
+ - If the user is running VisionAgent locally they can ask it to do more things like run over files in a folder. They can view files created by VisionAgent by checking their local file system.
72
+
63
73
  **Conversation**:
64
74
  Here is the current conversation so far:
65
75
  --- START CONVERSATION ---
vision_agent/sim/sim.py CHANGED
@@ -14,7 +14,7 @@ from scipy.spatial.distance import cosine # type: ignore
14
14
 
15
15
  from vision_agent.tools.tools import get_tools_df
16
16
  from vision_agent.utils.tools import (
17
- _LND_API_KEY,
17
+ get_vision_agent_api_key,
18
18
  _create_requests_session,
19
19
  _LND_API_URL_v2,
20
20
  )
@@ -57,7 +57,8 @@ def stella_embeddings(prompts: List[str]) -> List[np.ndarray]:
57
57
  "model": "stella1.5b",
58
58
  }
59
59
  url = f"{_LND_API_URL_v2}/embeddings"
60
- headers = {"apikey": _LND_API_KEY}
60
+ vision_agent_api_key = get_vision_agent_api_key()
61
+ headers = {"Authorization": f"Basic {vision_agent_api_key}"}
61
62
  session = _create_requests_session(
62
63
  url=url,
63
64
  num_retry=3,
@@ -130,7 +130,7 @@ def run_multi_judge(
130
130
 
131
131
  def run_judge() -> Tuple[Optional[Callable], str, str]:
132
132
  response = tool_chooser.generate(prompt, media=image_paths, temperature=1.0)
133
- tool_choice_context = extract_tag(response, "json") # type: ignore
133
+ tool_choice_context = extract_tag(response, "json", extract_markdown="json") # type: ignore
134
134
  tool_choice_context_dict = extract_json(tool_choice_context) # type: ignore
135
135
  tool, tool_thoughts, tool_docstring, _ = extract_tool_info(
136
136
  tool_choice_context_dict
@@ -275,7 +275,7 @@ def run_tool_testing(
275
275
  )
276
276
 
277
277
  response = lmm.generate(prompt, media=image_paths)
278
- code = extract_tag(response, "code") # type: ignore
278
+ code = extract_tag(response, "code", extract_markdown="python") # type: ignore
279
279
  if code is None:
280
280
  raise ValueError(f"Could not extract code from response: {response}")
281
281
 
@@ -304,7 +304,7 @@ def run_tool_testing(
304
304
  media=str(image_paths),
305
305
  )
306
306
  response = cast(str, lmm.generate(prompt, media=image_paths))
307
- code = extract_tag(response, "code")
307
+ code = extract_tag(response, "code", extract_markdown="python")
308
308
  if code is None:
309
309
  code = response
310
310
 
@@ -103,24 +103,37 @@ def extract_code(code: str) -> str:
103
103
  return code
104
104
 
105
105
 
106
- def extract_tag(
107
- content: str,
108
- tag: str,
109
- ) -> Optional[str]:
106
+ def _extract_arbitrary(content: str, start: str, stop: str) -> Optional[str]:
110
107
  inner_content = None
111
108
  remaning = content
112
109
  all_inner_content = []
113
110
 
114
- while f"<{tag}>" in remaning:
115
- inner_content_i = remaning[remaning.find(f"<{tag}>") + len(f"<{tag}>") :]
116
- if f"</{tag}>" not in inner_content_i:
111
+ while start in remaning:
112
+ inner_content_i = remaning[remaning.find(start) + len(start) :]
113
+ if stop not in inner_content_i:
117
114
  break
118
- inner_content_i = inner_content_i[: inner_content_i.find(f"</{tag}>")]
119
- remaning = remaning[remaning.find(f"</{tag}>") + len(f"</{tag}>") :]
115
+ inner_content_i = inner_content_i[: inner_content_i.find(stop)]
116
+ remaning = remaning[remaning.find(stop) + len(stop) :]
120
117
  all_inner_content.append(inner_content_i)
121
118
 
122
119
  if len(all_inner_content) > 0:
123
120
  inner_content = "\n".join(all_inner_content)
121
+
122
+ return inner_content
123
+
124
+
125
+ def extract_markdown(content: str, tag: str) -> Optional[str]:
126
+ return _extract_arbitrary(content, f"```{tag}", "```")
127
+
128
+
129
+ def extract_tag(
130
+ content: str,
131
+ tag: str,
132
+ extract_markdown: Optional[str] = None,
133
+ ) -> Optional[str]:
134
+ inner_content = _extract_arbitrary(content, f"<{tag}>", f"</{tag}>")
135
+ if inner_content is None and extract_markdown is not None:
136
+ inner_content = _extract_arbitrary(content, f"```{extract_markdown}", "```")
124
137
  return inner_content
125
138
 
126
139
 
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import os
3
3
  from base64 import b64encode
4
+ from functools import cache
4
5
  from typing import Any, Dict, List, MutableMapping, Optional, Tuple
5
6
 
6
7
  import numpy as np
@@ -13,15 +14,22 @@ from urllib3.util.retry import Retry
13
14
  from vision_agent.utils.exceptions import RemoteToolCallFailed
14
15
  from vision_agent.utils.execute import Error, MimeType
15
16
  from vision_agent.utils.image_utils import normalize_bbox
16
- from vision_agent.utils.type_defs import LandingaiAPIKey
17
17
 
18
18
  _LOGGER = logging.getLogger(__name__)
19
- _LND_API_KEY = os.environ.get("LANDINGAI_API_KEY", LandingaiAPIKey().api_key)
20
- _LND_BASE_URL = os.environ.get("LANDINGAI_URL", "https://api.landing.ai")
19
+ _LND_BASE_URL = os.environ.get("LANDINGAI_URL", "https://api.va.landing.ai")
21
20
  _LND_API_URL = f"{_LND_BASE_URL}/v1/agent/model"
22
21
  _LND_API_URL_v2 = f"{_LND_BASE_URL}/v1/tools"
23
22
 
24
23
 
24
+ @cache
25
+ def get_vision_agent_api_key() -> str:
26
+ vision_agent_api_key = os.environ.get("VISION_AGENT_API_KEY")
27
+ if vision_agent_api_key:
28
+ return vision_agent_api_key
29
+ else:
30
+ raise ValueError("VISION_AGENT_API_KEY not found in environment variables.")
31
+
32
+
25
33
  def should_report_tool_traces() -> bool:
26
34
  return bool(os.environ.get("REPORT_TOOL_TRACES", False))
27
35
 
@@ -47,12 +55,13 @@ def send_inference_request(
47
55
  if "TOOL_ENDPOINT_URL" in os.environ:
48
56
  url = os.environ["TOOL_ENDPOINT_URL"]
49
57
 
50
- headers = {"apikey": _LND_API_KEY}
58
+ vision_agent_api_key = get_vision_agent_api_key()
59
+ headers = {"Authorization": f"Basic {vision_agent_api_key}"}
51
60
  if "TOOL_ENDPOINT_AUTH" in os.environ:
52
61
  headers["Authorization"] = os.environ["TOOL_ENDPOINT_AUTH"]
53
62
  headers.pop("apikey")
54
63
 
55
- if runtime_tag := os.environ.get("RUNTIME_TAG", ""):
64
+ if runtime_tag := os.environ.get("RUNTIME_TAG", "vision-agent"):
56
65
  headers["runtime_tag"] = runtime_tag
57
66
 
58
67
  session = _create_requests_session(
@@ -80,7 +89,8 @@ def send_task_inference_request(
80
89
  is_form: bool = False,
81
90
  ) -> Any:
82
91
  url = f"{_LND_API_URL_v2}/{task_name}"
83
- headers = {"apikey": _LND_API_KEY}
92
+ vision_agent_api_key = get_vision_agent_api_key()
93
+ headers = {"Authorization": f"Basic {vision_agent_api_key}"}
84
94
  session = _create_requests_session(
85
95
  url=url,
86
96
  num_retry=3,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vision-agent
3
- Version: 0.2.245
3
+ Version: 1.0.1
4
4
  Summary: Toolset for Vision Agent
5
5
  Author: Landing AI
6
6
  Author-email: dev@landing.ai
@@ -11,6 +11,7 @@ Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: Programming Language :: Python :: 3.11
12
12
  Requires-Dist: anthropic (>=0.31.0,<0.32.0)
13
13
  Requires-Dist: av (>=11.0.0,<12.0.0)
14
+ Requires-Dist: dotenv (>=0.9.9,<0.10.0)
14
15
  Requires-Dist: e2b (>=0.17.2a50,<0.18.0)
15
16
  Requires-Dist: e2b-code-interpreter (==0.0.11a37)
16
17
  Requires-Dist: flake8 (>=7.0.0,<8.0.0)
@@ -27,7 +28,6 @@ Requires-Dist: pandas (>=2.0.0,<3.0.0)
27
28
  Requires-Dist: pillow (>=10.0.0,<11.0.0)
28
29
  Requires-Dist: pillow-heif (>=0.16.0,<0.17.0)
29
30
  Requires-Dist: pydantic (==2.7.4)
30
- Requires-Dist: pydantic-settings (>=2.2.1,<3.0.0)
31
31
  Requires-Dist: pytube (==15.0.0)
32
32
  Requires-Dist: requests (>=2.0.0,<3.0.0)
33
33
  Requires-Dist: rich (>=13.7.1,<14.0.0)
@@ -72,6 +72,12 @@ export OPENAI_API_KEY="your-api-key"
72
72
 
73
73
  > **_NOTE:_** We found using both Anthropic Claude-3.5 and OpenAI o1 to be provide the best performance for VisionAgent. If you want to use a different LLM provider or only one, see 'Using Other LLM Providers' below.
74
74
 
75
+ You will also need to set your VisionAgent API key to be able to authenticate when using the hosted vision tools that we provide through our APIs. Currently, the APIs are free to use so you will only need to get it from [here](https://va.landing.ai/account/api-key).
76
+
77
+ ```bash
78
+ export VISION_AGENT_API_KEY="your-api-key"
79
+ ```
80
+
75
81
  ## Documentation
76
82
 
77
83
  [VisionAgent Library Docs](https://landing-ai.github.io/vision-agent/)
@@ -8,13 +8,13 @@ vision_agent/agent/vision_agent.py,sha256=4LqvwPTSsiuJEDwBbMx9Dg9ALJwNR6x1c63TZv
8
8
  vision_agent/agent/vision_agent_coder.py,sha256=Ry6AiyAj3hsSeYPu_5guMcTzf2E4SoebPzpHyJtSPbQ,27360
9
9
  vision_agent/agent/vision_agent_coder_prompts.py,sha256=D4RJxTWoxpl-WtYRvHNxaLSdWVHsdYb0jJIQ2ZCGU0A,12277
10
10
  vision_agent/agent/vision_agent_coder_prompts_v2.py,sha256=53b_DhQtffX5wxLuCbNQ83AJhB0P_3wEnuKr-v5bx-o,4866
11
- vision_agent/agent/vision_agent_coder_v2.py,sha256=I4gWrneFIqhX6W-MxiaNyPKGk5tRKgC8xryV-YdeSZU,17289
12
- vision_agent/agent/vision_agent_planner.py,sha256=rp_atRMDg35WFXNKOTkjUpGPrpSCsiMhcfZtqK-DIV4,18668
11
+ vision_agent/agent/vision_agent_coder_v2.py,sha256=VXLYtBG65gTc1X7QFcicNmUmaj8V6ZIu6yHcIW33TvM,17457
12
+ vision_agent/agent/vision_agent_planner.py,sha256=8LeUsxUlGuQMqCdJ6jHXOe6RpKpzLtMW1AaJlfuIfzw,18680
13
13
  vision_agent/agent/vision_agent_planner_prompts.py,sha256=rYRdJthc-sQN57VgCBKrF09Sd73BSxcBdjNe6C4WNZ8,6837
14
14
  vision_agent/agent/vision_agent_planner_prompts_v2.py,sha256=TiiF5BGnFVraFlQnDaeRU67927LvszvpcMUOgVgo0ps,35843
15
- vision_agent/agent/vision_agent_planner_v2.py,sha256=3Rt0kAIpIiufdEuO5-0IpUitou-QlhWXArNXTgoRzxw,21848
15
+ vision_agent/agent/vision_agent_planner_v2.py,sha256=Ot6ugP8pGbD2iNoOwRHKV6-nyJ0LotdEy0uEO-k_jAI,22017
16
16
  vision_agent/agent/vision_agent_prompts.py,sha256=KaJwYPUP7_GvQsCPPs6Fdawmi3AQWmWajBUuzj7gTG4,13812
17
- vision_agent/agent/vision_agent_prompts_v2.py,sha256=jTfu_heNTBaHj1UNI0XIyyFDgDOjPTPP83vrS-g3A1U,2961
17
+ vision_agent/agent/vision_agent_prompts_v2.py,sha256=OnHmmteAj8__2spEXNcckApQvEpkK_JIZd_SWzEyg9c,4230
18
18
  vision_agent/agent/vision_agent_v2.py,sha256=QPAyDjnRRHUCD4Pw4TQYffWkucbn4WkEjYn8dBIWll4,11682
19
19
  vision_agent/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  vision_agent/clients/http.py,sha256=k883i6M_4nl7zwwHSI-yP5sAgQZIDPM1nrKD6YFJ3Xs,2009
@@ -32,23 +32,22 @@ vision_agent/models/agent_types.py,sha256=vBZ9-ns5lHDdFMO7ulCGGeZ6OwRo3gK4O3vN08
32
32
  vision_agent/models/lmm_types.py,sha256=v04h-NjbczHOIN8UWa1vvO5-1BDuZ4JQhD2mge1cXmw,305
33
33
  vision_agent/models/tools_types.py,sha256=8hYf2OZhI58gvf65KGaeGkt4EQ56nwLFqIQDPHioOBc,2339
34
34
  vision_agent/sim/__init__.py,sha256=Aouz6HEPPTYcLxR5_0fTYCL1OvPKAH1RMWAF90QXAlA,135
35
- vision_agent/sim/sim.py,sha256=VSU_1rYd4ifvF45xKWBEYugxdeeEQVpj0QL6rjx49i4,9801
35
+ vision_agent/sim/sim.py,sha256=xkioTFNTPl7wAnNgIFfNzswKK_9dRQ6G8OL8-K4CqsI,9893
36
36
  vision_agent/tools/__init__.py,sha256=H7FWx0OXGVIjrSOTpNH-YwE4LBuOfThZTG-SHFpo_Z8,2576
37
37
  vision_agent/tools/meta_tools.py,sha256=DNRXHX9nZ1GBeqeLiq87sBshoe0aiZeYasETbG-9neI,24053
38
- vision_agent/tools/planner_tools.py,sha256=orBTdJQz2NKoLuX9WE6XixaYuG305xz0UBYvZOiuquQ,19474
38
+ vision_agent/tools/planner_tools.py,sha256=mlpUODpsN9sg-OKdsBm-6maK3eA97FUgAWm1etfgx4E,19553
39
39
  vision_agent/tools/prompts.py,sha256=V1z4YJLXZuUl_iZ5rY0M5hHc_2tmMEUKr0WocXKGt4E,1430
40
40
  vision_agent/tools/tools.py,sha256=-jBrykNYPinRpDXnBsnzlSgJ_hbZClzCp3pkzWjTUxs,122098
41
41
  vision_agent/utils/__init__.py,sha256=mANUs_84VL-3gpZbXryvV2mWU623eWnRlJCSUHtMjuw,122
42
- vision_agent/utils/agent.py,sha256=8z4Ei0q397lVWUga8v9nQKuenGAsh2wfkAKQOB8CwpI,14701
42
+ vision_agent/utils/agent.py,sha256=2ifTP5QElItnr4YHOJR6L5P1PUzV0GhChTTqVxuVyQg,15153
43
43
  vision_agent/utils/exceptions.py,sha256=zis8smCbdEylBVZBTVfEUfAh7Rb7cWV3MSPambu6FsQ,1837
44
44
  vision_agent/utils/execute.py,sha256=vOEP5Ys7S2lc0_7pOJbgk7OaWi85hrCNu9_8Bo3zk6I,29356
45
45
  vision_agent/utils/image_utils.py,sha256=bJM2mEvB6E__M9pxi74yQYzAiZ7mu3KE2ptyVrp5vzQ,12533
46
- vision_agent/utils/tools.py,sha256=USZL0MKsiJgqA8RFiYRTcj_Kn2FVYKLHK4wIk0gP1Ow,7694
46
+ vision_agent/utils/tools.py,sha256=lGELSZ3xmrUa1cHR4ey_f9gyDvWsIA8Q2R7G9LNGKiE,8036
47
47
  vision_agent/utils/tools_doc.py,sha256=yFue6KSXoa_Z1ngCdBEc4SdPZOWF1rVLeaHu02I8Wis,2523
48
- vision_agent/utils/type_defs.py,sha256=BE12s3JNQy36QvauXHjwyeffVh5enfcvd4vTzSwvEZI,1384
49
48
  vision_agent/utils/video.py,sha256=rjsQ1sKKisaQ6AVjJz0zd_G4g-ovRweS_rs4JEhenoI,5340
50
49
  vision_agent/utils/video_tracking.py,sha256=DZLFpNCuzuPJQzbQoVNcp-m4dKxgiKdCNM5QTh_zURE,12245
51
- vision_agent-0.2.245.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
52
- vision_agent-0.2.245.dist-info/METADATA,sha256=gvPRMzRk4svQwN9n9bVvn9H-J8Nj-0aKvQ134P75hH0,5712
53
- vision_agent-0.2.245.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
54
- vision_agent-0.2.245.dist-info/RECORD,,
50
+ vision_agent-1.0.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
51
+ vision_agent-1.0.1.dist-info/METADATA,sha256=-BKv2FRgq0nAPEgwvlsPNUALsySfnpN6OUBL_1So8Xo,6024
52
+ vision_agent-1.0.1.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
53
+ vision_agent-1.0.1.dist-info/RECORD,,
@@ -1,38 +0,0 @@
1
- from pydantic import Field, field_validator
2
- from pydantic_settings import BaseSettings
3
-
4
- from vision_agent.utils.exceptions import InvalidApiKeyError
5
-
6
-
7
- class LandingaiAPIKey(BaseSettings):
8
- """The API key of a user in a particular organization in LandingLens.
9
- It supports loading from environment variables or .env files.
10
- The supported name of the environment variables are (case-insensitive):
11
- - LANDINGAI_API_KEY
12
-
13
- Environment variables will always take priority over values loaded from a dotenv file.
14
- """
15
-
16
- api_key: str = Field(
17
- default="land_sk_zKvyPcPV2bVoq7q87KwduoerAxuQpx33DnqP8M1BliOCiZOSoI",
18
- alias="LANDINGAI_API_KEY",
19
- description="The API key of LandingAI.",
20
- )
21
-
22
- @field_validator("api_key")
23
- @classmethod
24
- def is_api_key_valid(cls, key: str) -> str:
25
- """Check if the API key is a v2 key."""
26
- if not key:
27
- raise InvalidApiKeyError(f"LandingAI API key is required, but it's {key}")
28
- if not key.startswith("land_sk_"):
29
- raise InvalidApiKeyError(
30
- f"LandingAI API key (v2) must start with 'land_sk_' prefix, but it's {key}. See https://support.landing.ai/docs/api-key for more information."
31
- )
32
- return key
33
-
34
- class Config:
35
- env_file = ".env"
36
- env_prefix = "landingai_"
37
- case_sensitive = False
38
- extra = "ignore"