skyvern-llamaindex 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,108 +1,132 @@
1
- from typing import Any, Dict, List, Literal, Tuple
1
+ from typing import List, Literal, Optional
2
2
 
3
+ from llama_index.core.tools import FunctionTool
3
4
  from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
4
- from llama_index.core.tools.types import ToolMetadata
5
- from skyvern_llamaindex.schema import GetTaskInput, TaskV1Request, TaskV2Request
5
+ from skyvern_llamaindex.settings import settings
6
6
 
7
7
  from skyvern.agent import Agent
8
- from skyvern.forge.sdk.schemas.observers import ObserverTask
9
- from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskResponse
8
+ from skyvern.forge import app
9
+ from skyvern.forge.prompts import prompt_engine
10
+ from skyvern.forge.sdk.schemas.observers import ObserverTask, ObserverTaskRequest
11
+ from skyvern.forge.sdk.schemas.task_generations import TaskGenerationBase
12
+ from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskRequest, TaskResponse
13
+
14
+ default_agent = Agent()
15
+
16
+
17
+ class SkyvernTool:
18
+ def __init__(self, agent: Optional[Agent] = None):
19
+ if agent is None:
20
+ agent = default_agent
21
+ self.agent = agent
22
+
23
+ def run_task(self) -> FunctionTool:
24
+ task_tool_spec = SkyvernTaskToolSpec(agent=self.agent)
25
+ return task_tool_spec.to_tool_list(["run_task"])[0]
26
+
27
+ def dispatch_task(self) -> FunctionTool:
28
+ task_tool_spec = SkyvernTaskToolSpec(agent=self.agent)
29
+ return task_tool_spec.to_tool_list(["dispatch_task"])[0]
30
+
31
+ def get_task(self) -> FunctionTool:
32
+ task_tool_spec = SkyvernTaskToolSpec(agent=self.agent)
33
+ return task_tool_spec.to_tool_list(["get_task"])[0]
10
34
 
11
35
 
12
36
  class SkyvernTaskToolSpec(BaseToolSpec):
13
37
  spec_functions: List[SPEC_FUNCTION_TYPE] = [
14
- "run",
15
- "dispatch",
16
- "get",
38
+ "run_task",
39
+ "dispatch_task",
40
+ "get_task",
17
41
  ]
18
- spec_metadata: Dict[str, Dict[str, ToolMetadata]] = {
19
- "TaskV1": {
20
- "run": ToolMetadata(
21
- name="run-skyvern-agent-task",
22
- description="Use Skyvern agent to run a task. This function won't return until the task is finished.",
23
- fn_schema=TaskV1Request,
24
- ),
25
- "dispatch": ToolMetadata(
26
- name="dispatch-skyvern-agent-task",
27
- description="Use Skyvern agent to dispatch a task. This function will return immediately and the task will be running in the background.",
28
- fn_schema=TaskV1Request,
29
- ),
30
- "get": ToolMetadata(
31
- name="get-skyvern-agent-task",
32
- description="Use Skyvern agent to get a task.",
33
- fn_schema=GetTaskInput,
34
- ),
35
- },
36
- "TaskV2": {
37
- "run": ToolMetadata(
38
- name="run-skyvern-agent-task",
39
- description="Use Skyvern agent to run a task. This function won't return until the task is finished.",
40
- fn_schema=TaskV2Request,
41
- ),
42
- "dispatch": ToolMetadata(
43
- name="dispatch-skyvern-agent-task",
44
- description="Use Skyvern agent to dispatch a task. This function will return immediately and the task will be running in the background.",
45
- fn_schema=TaskV2Request,
46
- ),
47
- "get": ToolMetadata(
48
- name="get-skyvern-agent-task",
49
- description="Use Skyvern agent to get a task.",
50
- fn_schema=GetTaskInput,
51
- ),
52
- },
53
- }
54
-
55
- def __init__(self, *, engine: Literal["TaskV1", "TaskV2"] = "TaskV2") -> None:
56
- self.agent = Agent()
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ agent: Optional[Agent] = None,
47
+ engine: Literal["TaskV1", "TaskV2"] = settings.engine,
48
+ run_task_timeout_seconds: int = settings.run_task_timeout_seconds,
49
+ ) -> None:
50
+ if agent is None:
51
+ agent = Agent()
52
+ self.agent = agent
57
53
  self.engine = engine
54
+ self.run_task_timeout_seconds = run_task_timeout_seconds
58
55
 
59
- def get_metadata_from_fn_name(
60
- self, fn_name: str, spec_functions: List[str | Tuple[str, str]] | None = None
61
- ) -> ToolMetadata | None:
62
- try:
63
- getattr(self, fn_name)
64
- except AttributeError:
65
- return None
56
+ # TODO: agent haven't exposed the task v1 generate function, we can migrate to use agent interface when it's available
57
+ async def _generate_v1_task_request(self, user_prompt: str) -> TaskGenerationBase:
58
+ llm_prompt = prompt_engine.load_prompt("generate-task", user_prompt=user_prompt)
59
+ llm_response = await app.LLM_API_HANDLER(prompt=llm_prompt, prompt_name="generate-task")
60
+ return TaskGenerationBase.model_validate(llm_response)
66
61
 
67
- return self.spec_metadata.get(self.engine, {}).get(fn_name)
62
+ async def run_task(self, user_prompt: str, url: Optional[str] = None) -> TaskResponse | ObserverTask:
63
+ """
64
+ Use Skyvern agent to run a task. This function won't return until the task is finished.
65
+
66
+ Args:
67
+ user_prompt[str]: The user's prompt describing the task.
68
+ url (Optional[str]): The URL of the target website for the task.
69
+ """
68
70
 
69
- async def run(self, **kwargs: Dict[str, Any]) -> TaskResponse | ObserverTask:
70
71
  if self.engine == "TaskV1":
71
- return await self.run_task_v1(**kwargs)
72
+ return await self.run_task_v1(user_prompt=user_prompt, url=url)
72
73
  else:
73
- return await self.run_task_v2(**kwargs)
74
+ return await self.run_task_v2(user_prompt=user_prompt, url=url)
75
+
76
+ async def dispatch_task(self, user_prompt: str, url: Optional[str] = None) -> CreateTaskResponse | ObserverTask:
77
+ """
78
+ Use Skyvern agent to dispatch a task. This function will return immediately and the task will be running in the background.
79
+
80
+ Args:
81
+ user_prompt[str]: The user's prompt describing the task.
82
+ url (Optional[str]): The URL of the target website for the task.
83
+ """
74
84
 
75
- async def dispatch(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | ObserverTask:
76
85
  if self.engine == "TaskV1":
77
- return await self.dispatch_task_v1(**kwargs)
86
+ return await self.dispatch_task_v1(user_prompt=user_prompt, url=url)
78
87
  else:
79
- return await self.dispatch_task_v2(**kwargs)
88
+ return await self.dispatch_task_v2(user_prompt=user_prompt, url=url)
89
+
90
+ async def get_task(self, task_id: str) -> TaskResponse | ObserverTask | None:
91
+ """
92
+ Use Skyvern agent to get a task.
93
+
94
+ Args:
95
+ task_id[str]: The id of the task.
96
+ """
80
97
 
81
- async def get(self, task_id: str) -> TaskResponse | ObserverTask | None:
82
98
  if self.engine == "TaskV1":
83
99
  return await self.get_task_v1(task_id)
84
100
  else:
85
101
  return await self.get_task_v2(task_id)
86
102
 
87
- async def run_task_v1(self, **kwargs: Dict[str, Any]) -> TaskResponse:
88
- task_request = TaskV1Request(**kwargs)
89
- return await self.agent.run_task(task_request=task_request, timeout_seconds=task_request.timeout_seconds)
103
+ async def run_task_v1(self, user_prompt: str, url: Optional[str] = None) -> TaskResponse:
104
+ task_generation = await self._generate_v1_task_request(user_prompt=user_prompt)
105
+ task_request = TaskRequest.model_validate(task_generation, from_attributes=True)
106
+ if url is not None:
107
+ task_request.url = url
108
+
109
+ return await self.agent.run_task(task_request=task_request, timeout_seconds=self.run_task_timeout_seconds)
110
+
111
+ async def dispatch_task_v1(self, user_prompt: str, url: Optional[str] = None) -> CreateTaskResponse:
112
+ task_generation = await self._generate_v1_task_request(user_prompt=user_prompt)
113
+ task_request = TaskRequest.model_validate(task_generation, from_attributes=True)
114
+ if url is not None:
115
+ task_request.url = url
90
116
 
91
- async def dispatch_task_v1(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse:
92
- task_request = TaskV1Request(**kwargs)
93
117
  return await self.agent.create_task(task_request=task_request)
94
118
 
95
119
  async def get_task_v1(self, task_id: str) -> TaskResponse | None:
96
120
  return await self.agent.get_task(task_id=task_id)
97
121
 
98
- async def run_task_v2(self, **kwargs: Dict[str, Any]) -> ObserverTask:
99
- task_request = TaskV2Request(**kwargs)
122
+ async def run_task_v2(self, user_prompt: str, url: Optional[str] = None) -> ObserverTask:
123
+ task_request = ObserverTaskRequest(user_prompt=user_prompt, url=url)
100
124
  return await self.agent.run_observer_task_v_2(
101
- task_request=task_request, timeout_seconds=task_request.timeout_seconds
125
+ task_request=task_request, timeout_seconds=self.run_task_timeout_seconds
102
126
  )
103
127
 
104
- async def dispatch_task_v2(self, **kwargs: Dict[str, Any]) -> ObserverTask:
105
- task_request = TaskV2Request(**kwargs)
128
+ async def dispatch_task_v2(self, user_prompt: str, url: Optional[str] = None) -> ObserverTask:
129
+ task_request = ObserverTaskRequest(user_prompt=user_prompt, url=url)
106
130
  return await self.agent.observer_task_v_2(task_request=task_request)
107
131
 
108
132
  async def get_task_v2(self, task_id: str) -> ObserverTask | None:
@@ -1,171 +1,172 @@
1
- from typing import Any, Dict, List, Literal, Tuple
1
+ from typing import Any, Dict, List, Literal, Optional
2
2
 
3
3
  from httpx import AsyncClient
4
+ from llama_index.core.tools import FunctionTool
4
5
  from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
5
- from llama_index.core.tools.types import ToolMetadata
6
- from skyvern_llamaindex.schema import GetTaskInput, TaskV1Request, TaskV2Request
6
+ from pydantic import BaseModel
7
+ from skyvern_llamaindex.settings import settings
7
8
 
8
9
  from skyvern.client import AsyncSkyvern
9
- from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskResponse
10
+ from skyvern.forge.sdk.schemas.observers import ObserverTaskRequest
11
+ from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskRequest, TaskResponse
12
+
13
+
14
+ class SkyvernTool(BaseModel):
15
+ api_key: str = settings.api_key
16
+ base_url: str = settings.base_url
17
+
18
+ def run_task(self) -> FunctionTool:
19
+ task_tool_spec = SkyvernTaskToolSpec(
20
+ api_key=self.api_key,
21
+ base_url=self.base_url,
22
+ )
23
+
24
+ return task_tool_spec.to_tool_list(["run_task"])[0]
25
+
26
+ def dispatch_task(self) -> FunctionTool:
27
+ task_tool_spec = SkyvernTaskToolSpec(
28
+ api_key=self.api_key,
29
+ base_url=self.base_url,
30
+ )
31
+
32
+ return task_tool_spec.to_tool_list(["dispatch_task"])[0]
33
+
34
+ def get_task(self) -> FunctionTool:
35
+ task_tool_spec = SkyvernTaskToolSpec(
36
+ api_key=self.api_key,
37
+ base_url=self.base_url,
38
+ )
39
+
40
+ return task_tool_spec.to_tool_list(["get_task"])[0]
10
41
 
11
42
 
12
43
  class SkyvernTaskToolSpec(BaseToolSpec):
13
44
  spec_functions: List[SPEC_FUNCTION_TYPE] = [
14
- "run",
15
- "dispatch",
16
- "get",
45
+ "run_task",
46
+ "dispatch_task",
47
+ "get_task",
17
48
  ]
18
49
 
19
- spec_metadata: Dict[str, Dict[str, ToolMetadata]] = {
20
- "TaskV1": {
21
- "run": ToolMetadata(
22
- name="run-skyvern-client-task",
23
- description="Use Skyvern client to run a task. This function won't return until the task is finished.",
24
- fn_schema=TaskV1Request,
25
- ),
26
- "dispatch": ToolMetadata(
27
- name="dispatch-skyvern-client-task",
28
- description="Use Skyvern client to dispatch a task. This function will return immediately and the task will be running in the background.",
29
- fn_schema=TaskV1Request,
30
- ),
31
- "get": ToolMetadata(
32
- name="get-skyvern-client-task",
33
- description="Use Skyvern client to get a task.",
34
- fn_schema=GetTaskInput,
35
- ),
36
- },
37
- "TaskV2": {
38
- "run": ToolMetadata(
39
- name="run-skyvern-client-task",
40
- description="Use Skyvern client to run a task. This function won't return until the task is finished.",
41
- fn_schema=TaskV2Request,
42
- ),
43
- "dispatch": ToolMetadata(
44
- name="dispatch-skyvern-client-task",
45
- description="Use Skyvern client to dispatch a task. This function will return immediately and the task will be running in the background.",
46
- fn_schema=TaskV2Request,
47
- ),
48
- "get": ToolMetadata(
49
- name="get-skyvern-client-task",
50
- description="Use Skyvern client to get a task.",
51
- fn_schema=GetTaskInput,
52
- ),
53
- },
54
- }
55
-
56
50
  def __init__(
57
51
  self,
58
- credential: str,
59
52
  *,
60
- base_url: str = "https://api.skyvern.com",
61
- engine: Literal["TaskV1", "TaskV2"] = "TaskV2",
53
+ api_key: str = settings.api_key,
54
+ base_url: str = settings.base_url,
55
+ engine: Literal["TaskV1", "TaskV2"] = settings.engine,
56
+ run_task_timeout_seconds: int = settings.run_task_timeout_seconds,
62
57
  ):
63
58
  httpx_client = AsyncClient(
64
59
  headers={
65
60
  "Content-Type": "application/json",
66
- "x-api-key": credential,
61
+ "x-api-key": api_key,
67
62
  },
68
63
  )
69
64
  self.engine = engine
65
+ self.run_task_timeout_seconds = run_task_timeout_seconds
70
66
  self.client = AsyncSkyvern(base_url=base_url, httpx_client=httpx_client)
71
67
 
72
- def get_metadata_from_fn_name(
73
- self, fn_name: str, spec_functions: List[str | Tuple[str, str]] | None = None
74
- ) -> ToolMetadata | None:
75
- try:
76
- getattr(self, fn_name)
77
- except AttributeError:
78
- return None
68
+ async def run_task(self, user_prompt: str, url: Optional[str] = None) -> TaskResponse | Dict[str, Any | None]:
69
+ """
70
+ Use Skyvern client to run a task. This function won't return until the task is finished.
79
71
 
80
- return self.spec_metadata.get(self.engine, {}).get(fn_name)
72
+ Args:
73
+ user_prompt[str]: The user's prompt describing the task.
74
+ url (Optional[str]): The URL of the target website for the task.
75
+ """
81
76
 
82
- async def run(self, **kwargs: Dict[str, Any]) -> TaskResponse | Dict[str, Any | None]:
83
77
  if self.engine == "TaskV1":
84
- return await self.run_task_v1(**kwargs)
78
+ return await self.run_task_v1(user_prompt=user_prompt, url=url)
85
79
  else:
86
- return await self.run_task_v2(**kwargs)
80
+ return await self.run_task_v2(user_prompt=user_prompt, url=url)
81
+
82
+ async def dispatch_task(
83
+ self, user_prompt: str, url: Optional[str] = None
84
+ ) -> CreateTaskResponse | Dict[str, Any | None]:
85
+ """
86
+ Use Skyvern client to dispatch a task. This function will return immediately and the task will be running in the background.
87
+
88
+ Args:
89
+ user_prompt[str]: The user's prompt describing the task.
90
+ url (Optional[str]): The URL of the target website for the task.
91
+ """
87
92
 
88
- async def dispatch(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | Dict[str, Any | None]:
89
93
  if self.engine == "TaskV1":
90
- return await self.dispatch_task_v1(**kwargs)
94
+ return await self.dispatch_task_v1(user_prompt=user_prompt, url=url)
91
95
  else:
92
- return await self.dispatch_task_v2(**kwargs)
96
+ return await self.dispatch_task_v2(user_prompt=user_prompt, url=url)
97
+
98
+ async def get_task(self, task_id: str) -> TaskResponse | Dict[str, Any | None]:
99
+ """
100
+ Use Skyvern client to get a task.
101
+
102
+ Args:
103
+ task_id[str]: The id of the task.
104
+ """
93
105
 
94
- async def get(self, task_id: str) -> TaskResponse | Dict[str, Any | None]:
95
106
  if self.engine == "TaskV1":
96
107
  return await self.get_task_v1(task_id)
97
108
  else:
98
109
  return await self.get_task_v2(task_id)
99
110
 
100
- async def run_task_v1(self, **kwargs: Dict[str, Any]) -> TaskResponse:
101
- task_request = TaskV1Request(**kwargs)
111
+ async def run_task_v1(self, user_prompt: str, url: Optional[str] = None) -> TaskResponse:
112
+ task_generation = await self.client.agent.generate_task(
113
+ prompt=user_prompt,
114
+ )
115
+ task_request = TaskRequest.model_validate(task_generation, from_attributes=True)
116
+ if url is not None:
117
+ task_request.url = url
118
+
102
119
  return await self.client.agent.run_task(
103
- max_steps_override=task_request.max_steps,
104
- timeout_seconds=task_request.timeout_seconds,
120
+ timeout_seconds=self.run_task_timeout_seconds,
105
121
  url=task_request.url,
106
122
  title=task_request.title,
107
- webhook_callback_url=task_request.webhook_callback_url,
108
- totp_verification_url=task_request.totp_verification_url,
109
- totp_identifier=task_request.totp_identifier,
110
123
  navigation_goal=task_request.navigation_goal,
111
124
  data_extraction_goal=task_request.data_extraction_goal,
112
125
  navigation_payload=task_request.navigation_goal,
113
126
  error_code_mapping=task_request.error_code_mapping,
114
- proxy_location=task_request.proxy_location,
115
127
  extracted_information_schema=task_request.extracted_information_schema,
116
128
  complete_criterion=task_request.complete_criterion,
117
129
  terminate_criterion=task_request.terminate_criterion,
118
- browser_session_id=task_request.browser_session_id,
119
130
  )
120
131
 
121
- async def dispatch_task_v1(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse:
122
- task_request = TaskV1Request(**kwargs)
132
+ async def dispatch_task_v1(self, user_prompt: str, url: Optional[str] = None) -> CreateTaskResponse:
133
+ task_generation = await self.client.agent.generate_task(
134
+ prompt=user_prompt,
135
+ )
136
+ task_request = TaskRequest.model_validate(task_generation, from_attributes=True)
137
+ if url is not None:
138
+ task_request.url = url
139
+
123
140
  return await self.client.agent.create_task(
124
- max_steps_override=task_request.max_steps,
125
141
  url=task_request.url,
126
142
  title=task_request.title,
127
- webhook_callback_url=task_request.webhook_callback_url,
128
- totp_verification_url=task_request.totp_verification_url,
129
- totp_identifier=task_request.totp_identifier,
130
143
  navigation_goal=task_request.navigation_goal,
131
144
  data_extraction_goal=task_request.data_extraction_goal,
132
145
  navigation_payload=task_request.navigation_goal,
133
146
  error_code_mapping=task_request.error_code_mapping,
134
- proxy_location=task_request.proxy_location,
135
147
  extracted_information_schema=task_request.extracted_information_schema,
136
148
  complete_criterion=task_request.complete_criterion,
137
149
  terminate_criterion=task_request.terminate_criterion,
138
- browser_session_id=task_request.browser_session_id,
139
150
  )
140
151
 
141
152
  async def get_task_v1(self, task_id: str) -> TaskResponse:
142
153
  return await self.client.agent.get_task(task_id=task_id)
143
154
 
144
- async def run_task_v2(self, **kwargs: Dict[str, Any]) -> Dict[str, Any | None]:
145
- task_request = TaskV2Request(**kwargs)
155
+ async def run_task_v2(self, user_prompt: str, url: Optional[str] = None) -> Dict[str, Any | None]:
156
+ task_request = ObserverTaskRequest(url=url, user_prompt=user_prompt)
146
157
  return await self.client.agent.run_observer_task_v_2(
147
- max_iterations_override=task_request.max_iterations,
148
- timeout_seconds=task_request.timeout_seconds,
158
+ timeout_seconds=self.run_task_timeout_seconds,
149
159
  user_prompt=task_request.user_prompt,
150
160
  url=task_request.url,
151
161
  browser_session_id=task_request.browser_session_id,
152
- webhook_callback_url=task_request.webhook_callback_url,
153
- totp_verification_url=task_request.totp_verification_url,
154
- totp_identifier=task_request.totp_identifier,
155
- proxy_location=task_request.proxy_location,
156
162
  )
157
163
 
158
- async def dispatch_task_v2(self, **kwargs: Dict[str, Any]) -> Dict[str, Any | None]:
159
- task_request = TaskV2Request(**kwargs)
164
+ async def dispatch_task_v2(self, user_prompt: str, url: Optional[str] = None) -> Dict[str, Any | None]:
165
+ task_request = ObserverTaskRequest(url=url, user_prompt=user_prompt)
160
166
  return await self.client.agent.observer_task_v_2(
161
- max_iterations_override=task_request.max_iterations,
162
167
  user_prompt=task_request.user_prompt,
163
168
  url=task_request.url,
164
169
  browser_session_id=task_request.browser_session_id,
165
- webhook_callback_url=task_request.webhook_callback_url,
166
- totp_verification_url=task_request.totp_verification_url,
167
- totp_identifier=task_request.totp_identifier,
168
- proxy_location=task_request.proxy_location,
169
170
  )
170
171
 
171
172
  async def get_task_v2(self, task_id: str) -> Dict[str, Any | None]:
@@ -0,0 +1,18 @@
1
+ from typing import Literal
2
+
3
+ from dotenv import load_dotenv
4
+ from pydantic_settings import BaseSettings
5
+
6
+
7
+ class Settings(BaseSettings):
8
+ api_key: str = ""
9
+ base_url: str = "https://api.skyvern.com"
10
+ engine: Literal["TaskV1", "TaskV2"] = "TaskV2"
11
+ run_task_timeout_seconds: int = 60 * 60
12
+
13
+ class Config:
14
+ env_prefix = "SKYVERN_"
15
+
16
+
17
+ load_dotenv()
18
+ settings = Settings()
@@ -0,0 +1,308 @@
1
+ Metadata-Version: 2.3
2
+ Name: skyvern-llamaindex
3
+ Version: 0.0.4
4
+ Summary: Skyvern integration for LlamaIndex
5
+ Author: lawyzheng
6
+ Author-email: lawy@skyvern.com
7
+ Requires-Python: >=3.11,<3.12
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Programming Language :: Python :: 3.11
10
+ Requires-Dist: llama-index (>=0.12.19,<0.13.0)
11
+ Requires-Dist: skyvern (>=0.1.56,<0.2.0)
12
+ Description-Content-Type: text/markdown
13
+
14
+ <!-- START doctoc generated TOC please keep comment here to allow auto update -->
15
+ <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
16
+ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
17
+
18
+ - [Skyvern LlamaIndex](#skyvern-llamaindex)
19
+ - [Installation](#installation)
20
+ - [Basic Usage](#basic-usage)
21
+ - [Run a task(sync) locally in your local environment](#run-a-tasksync-locally-in-your-local-environment)
22
+ - [Run a task(async) locally in your local environment](#run-a-taskasync-locally-in-your-local-environment)
23
+ - [Get a task locally in your local environment](#get-a-task-locally-in-your-local-environment)
24
+ - [Run a task(sync) by calling skyvern APIs](#run-a-tasksync-by-calling-skyvern-apis)
25
+ - [Run a task(async) by calling skyvern APIs](#run-a-taskasync-by-calling-skyvern-apis)
26
+ - [Get a task by calling skyvern APIs](#get-a-task-by-calling-skyvern-apis)
27
+ - [Advanced Usage](#advanced-usage)
28
+ - [Dispatch a task(async) locally in your local environment and wait until the task is finished](#dispatch-a-taskasync-locally-in-your-local-environment-and-wait-until-the-task-is-finished)
29
+ - [Dispatch a task(async) by calling skyvern APIs and wait until the task is finished](#dispatch-a-taskasync-by-calling-skyvern-apis-and-wait-until-the-task-is-finished)
30
+
31
+ <!-- END doctoc generated TOC please keep comment here to allow auto update -->
32
+
33
+ # Skyvern LlamaIndex
34
+
35
+ This is a LlamaIndex integration for Skyvern.
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ pip install skyvern-llamaindex
41
+ ```
42
+
43
+ ## Basic Usage
44
+
45
+ ### Run a task(sync) locally in your local environment
46
+ > sync task won't return until the task is finished.
47
+
48
+ :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
49
+
50
+
51
+ ```python
52
+ from dotenv import load_dotenv
53
+ from llama_index.agent.openai import OpenAIAgent
54
+ from llama_index.llms.openai import OpenAI
55
+ from skyvern_llamaindex.agent import SkyvernTool
56
+
57
+ # load OpenAI API key from .env
58
+ load_dotenv()
59
+
60
+ skyvern_tool = SkyvernTool()
61
+
62
+ agent = OpenAIAgent.from_tools(
63
+ tools=[skyvern_tool.run_task()],
64
+ llm=OpenAI(model="gpt-4o"),
65
+ verbose=True,
66
+ )
67
+
68
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.'")
69
+ print(response)
70
+ ```
71
+
72
+ ### Run a task(async) locally in your local environment
73
+ > async task will return immediately and the task will be running in the background.
74
+
75
+ :warning: :warning: if you want to run the task in the background, you need to keep the agent running until the task is finished, otherwise the task will be killed when the agent finished the chat.
76
+
77
+ :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
78
+
79
+ ```python
80
+ import asyncio
81
+ from dotenv import load_dotenv
82
+ from llama_index.agent.openai import OpenAIAgent
83
+ from llama_index.llms.openai import OpenAI
84
+ from skyvern_llamaindex.agent import SkyvernTool
85
+ from llama_index.core.tools import FunctionTool
86
+
87
+ # load OpenAI API key from .env
88
+ load_dotenv()
89
+
90
+ async def sleep(seconds: int) -> str:
91
+ await asyncio.sleep(seconds)
92
+ return f"Slept for {seconds} seconds"
93
+
94
+ # define a sleep tool to keep the agent running until the task is finished
95
+ sleep_tool = FunctionTool.from_defaults(
96
+ async_fn=sleep,
97
+ description="Sleep for a given number of seconds",
98
+ name="sleep",
99
+ )
100
+
101
+ skyvern_tool = SkyvernTool()
102
+
103
+ agent = OpenAIAgent.from_tools(
104
+ tools=[skyvern_tool.dispatch_task(), sleep_tool],
105
+ llm=OpenAI(model="gpt-4o"),
106
+ verbose=True,
107
+ )
108
+
109
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.' Then, sleep for 10 minutes.")
110
+ print(response)
111
+ ```
112
+
113
+ ### Get a task locally in your local environment
114
+
115
+ :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
116
+
117
+ ```python
118
+ from dotenv import load_dotenv
119
+ from llama_index.agent.openai import OpenAIAgent
120
+ from llama_index.llms.openai import OpenAI
121
+ from skyvern_llamaindex.agent import SkyvernTool
122
+
123
+ # load OpenAI API key from .env
124
+ load_dotenv()
125
+
126
+ skyvern_tool = SkyvernTool()
127
+
128
+ agent = OpenAIAgent.from_tools(
129
+ tools=[skyvern_tool.get_task()],
130
+ llm=OpenAI(model="gpt-4o"),
131
+ verbose=True,
132
+ )
133
+
134
+ response = agent.chat("Get the task information with Skyvern. The task id is '<task_id>'.")
135
+ print(response)
136
+ ```
137
+
138
+ ### Run a task(sync) by calling skyvern APIs
139
+ > sync task won't return until the task is finished.
140
+
141
+ no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
142
+
143
+ ```python
144
+ from dotenv import load_dotenv
145
+ from llama_index.agent.openai import OpenAIAgent
146
+ from llama_index.llms.openai import OpenAI
147
+ from skyvern_llamaindex.client import SkyvernTool
148
+
149
+ # load OpenAI API key from .env
150
+ load_dotenv()
151
+
152
+ skyvern_tool = SkyvernTool(api_key="<your_organization_api_key>")
153
+ # or you can load the api_key from SKYVERN_API_KEY in .env
154
+ # skyvern_tool = SkyvernTool()
155
+
156
+ agent = OpenAIAgent.from_tools(
157
+ tools=[skyvern_tool.run_task()],
158
+ llm=OpenAI(model="gpt-4o"),
159
+ verbose=True,
160
+ )
161
+
162
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.'")
163
+ print(response)
164
+ ```
165
+
166
+ ### Run a task(async) by calling skyvern APIs
167
+ > async task will return immediately and the task will be running in the background.
168
+
169
+ no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
170
+
171
+ the task is actually running in the skyvern cloud service, so you don't need to keep your agent running until the task is finished.
172
+
173
+ ```python
174
+ from dotenv import load_dotenv
175
+ from llama_index.agent.openai import OpenAIAgent
176
+ from llama_index.llms.openai import OpenAI
177
+ from skyvern_llamaindex.client import SkyvernTool
178
+
179
+ # load OpenAI API key from .env
180
+ load_dotenv()
181
+
182
+ skyvern_tool = SkyvernTool(api_key="<your_organization_api_key>")
183
+ # or you can load the api_key from SKYVERN_API_KEY in .env
184
+ # skyvern_tool = SkyvernTool()
185
+
186
+ agent = OpenAIAgent.from_tools(
187
+ tools=[skyvern_tool.dispatch_task()],
188
+ llm=OpenAI(model="gpt-4o"),
189
+ verbose=True,
190
+ )
191
+
192
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.'")
193
+ print(response)
194
+ ```
195
+
196
+
197
+ ### Get a task by calling skyvern APIs
198
+
199
+ no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
200
+
201
+ ```python
202
+ from dotenv import load_dotenv
203
+ from llama_index.agent.openai import OpenAIAgent
204
+ from llama_index.llms.openai import OpenAI
205
+ from skyvern_llamaindex.client import SkyvernTool
206
+
207
+ # load OpenAI API key from .env
208
+ load_dotenv()
209
+
210
+ skyvern_tool = SkyvernTool(api_key="<your_organization_api_key>")
211
+ # or you can load the api_key from SKYVERN_API_KEY in .env
212
+ # skyvern_tool = SkyvernTool()
213
+
214
+ agent = OpenAIAgent.from_tools(
215
+ tools=[skyvern_tool.get_task()],
216
+ llm=OpenAI(model="gpt-4o"),
217
+ verbose=True,
218
+ )
219
+
220
+ response = agent.chat("Get the task information with Skyvern. The task id is '<task_id>'.")
221
+ print(response)
222
+ ```
223
+
224
+ ## Advanced Usage
225
+
226
+ To provide some examples of how to integrate Skyvern with other llama-index tools in the agent.
227
+
228
+ ### Dispatch a task(async) locally in your local environment and wait until the task is finished
229
+ > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
230
+
231
+ :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
232
+
233
+ ```python
234
+ import asyncio
235
+ from dotenv import load_dotenv
236
+ from llama_index.agent.openai import OpenAIAgent
237
+ from llama_index.llms.openai import OpenAI
238
+ from llama_index.core.tools import FunctionTool
239
+ from skyvern_llamaindex.agent import SkyvernTool
240
+
241
+ # load OpenAI API key from .env
242
+ load_dotenv()
243
+
244
+ async def sleep(seconds: int) -> str:
245
+ await asyncio.sleep(seconds)
246
+ return f"Slept for {seconds} seconds"
247
+
248
+ sleep_tool = FunctionTool.from_defaults(
249
+ async_fn=sleep,
250
+ description="Sleep for a given number of seconds",
251
+ name="sleep",
252
+ )
253
+
254
+ skyvern_tool = SkyvernTool()
255
+
256
+ agent = OpenAIAgent.from_tools(
257
+ tools=[skyvern_tool.dispatch_task(), skyvern_tool.get_task(), sleep_tool],
258
+ llm=OpenAI(model="gpt-4o"),
259
+ verbose=True,
260
+ max_function_calls=10,
261
+ )
262
+
263
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.' Then, get this task information until it's completed. The task information re-get interval should be 60s.")
264
+ print(response)
265
+
266
+ ```
267
+
268
+ ### Dispatch a task(async) by calling skyvern APIs and wait until the task is finished
269
+ > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
270
+
271
+ no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
272
+
273
+ ```python
274
+ import asyncio
275
+ from dotenv import load_dotenv
276
+ from llama_index.agent.openai import OpenAIAgent
277
+ from llama_index.llms.openai import OpenAI
278
+ from llama_index.core.tools import FunctionTool
279
+ from skyvern_llamaindex.client import SkyvernTool
280
+
281
+ # load OpenAI API key from .env
282
+ load_dotenv()
283
+
284
+ async def sleep(seconds: int) -> str:
285
+ await asyncio.sleep(seconds)
286
+ return f"Slept for {seconds} seconds"
287
+
288
+ sleep_tool = FunctionTool.from_defaults(
289
+ async_fn=sleep,
290
+ description="Sleep for a given number of seconds",
291
+ name="sleep",
292
+ )
293
+
294
+ skyvern_tool = SkyvernTool(api_key="<your_organization_api_key>")
295
+ # or you can load the api_key from SKYVERN_API_KEY in .env
296
+ # skyvern_tool = SkyvernTool()
297
+
298
+ agent = OpenAIAgent.from_tools(
299
+ tools=[skyvern_tool.dispatch_task(), skyvern_tool.get_task(), sleep_tool],
300
+ llm=OpenAI(model="gpt-4o"),
301
+ verbose=True,
302
+ max_function_calls=10,
303
+ )
304
+
305
+ response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.' Then, get this task information until it's completed. The task information re-get interval should be 60s.")
306
+ print(response)
307
+
308
+ ```
@@ -0,0 +1,8 @@
1
+ skyvern_llamaindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ skyvern_llamaindex/agent.py,sha256=BrA9JxqyETTnJcVv_Hj18oFeDesOINpPpXpszh0F-pc,5622
3
+ skyvern_llamaindex/client.py,sha256=JZZXghqLctZ_yV2XwNNOHhHcPYN7Q38jevOAuzUh4JU,6834
4
+ skyvern_llamaindex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ skyvern_llamaindex/settings.py,sha256=DsW7xaol9azsJ89b9qvJfoB2BWqSR3C5BkiL1aYx8Ws,386
6
+ skyvern_llamaindex-0.0.4.dist-info/METADATA,sha256=xdaJbvcWjUyqbAslNuFAyDixt1JAKTqem67fWMDjYxI,10787
7
+ skyvern_llamaindex-0.0.4.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
8
+ skyvern_llamaindex-0.0.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,18 +0,0 @@
1
- from pydantic import BaseModel
2
-
3
- from skyvern.forge.sdk.schemas.observers import ObserverTaskRequest
4
- from skyvern.forge.sdk.schemas.tasks import TaskRequest
5
-
6
-
7
- class TaskV1Request(TaskRequest):
8
- max_steps: int = 10
9
- timeout_seconds: int = 60 * 60
10
-
11
-
12
- class TaskV2Request(ObserverTaskRequest):
13
- max_iterations: int = 10
14
- timeout_seconds: int = 60 * 60
15
-
16
-
17
- class GetTaskInput(BaseModel):
18
- task_id: str
@@ -1,197 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: skyvern-llamaindex
3
- Version: 0.0.3
4
- Summary: Skyvern integration for LlamaIndex
5
- Author: lawyzheng
6
- Author-email: lawy@skyvern.com
7
- Requires-Python: >=3.11,<3.12
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: Programming Language :: Python :: 3.11
10
- Requires-Dist: llama-index (>=0.12.19,<0.13.0)
11
- Requires-Dist: skyvern (>=0.1.56,<0.2.0)
12
- Description-Content-Type: text/markdown
13
-
14
- <!-- START doctoc generated TOC please keep comment here to allow auto update -->
15
- <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
16
- **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
17
-
18
- - [Skyvern LlamaIndex](#skyvern-llamaindex)
19
- - [Installation](#installation)
20
- - [Usage](#usage)
21
- - [Run a task(sync) with skyvern agent (calling skyvern agent function directly in the tool)](#run-a-tasksync-with-skyvern-agent-calling-skyvern-agent-function-directly-in-the-tool)
22
- - [Dispatch a task(async) with skyvern agent (calling skyvern agent function directly in the tool)](#dispatch-a-taskasync-with-skyvern-agent-calling-skyvern-agent-function-directly-in-the-tool)
23
- - [Run a task(sync) with skyvern client (calling skyvern OpenAPI in the tool)](#run-a-tasksync-with-skyvern-client-calling-skyvern-openapi-in-the-tool)
24
- - [Dispatch a task(async) with skyvern client (calling skyvern OpenAPI in the tool)](#dispatch-a-taskasync-with-skyvern-client-calling-skyvern-openapi-in-the-tool)
25
-
26
- <!-- END doctoc generated TOC please keep comment here to allow auto update -->
27
-
28
- # Skyvern LlamaIndex
29
-
30
- This is a LlamaIndex integration for Skyvern.
31
-
32
- ## Installation
33
-
34
- ```bash
35
- pip install skyvern-llamaindex
36
- ```
37
-
38
- ## Usage
39
-
40
- ### Run a task(sync) with skyvern agent (calling skyvern agent function directly in the tool)
41
- > sync task won't return until the task is finished.
42
-
43
- :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
44
-
45
-
46
- ```python
47
- import asyncio
48
- from dotenv import load_dotenv
49
- from llama_index.agent.openai import OpenAIAgent
50
- from llama_index.llms.openai import OpenAI
51
- from skyvern_llamaindex.agent import SkyvernTaskToolSpec
52
-
53
- # load OpenAI API key from .env
54
- load_dotenv()
55
-
56
- skyvern_tool = SkyvernTaskToolSpec()
57
-
58
- tools = skyvern_tool.to_tool_list(["run"])
59
-
60
- agent = OpenAIAgent.from_tools(
61
- tools=tools,
62
- llm=OpenAI(model="gpt-4o"),
63
- verbose=True,
64
- max_function_calls=10,
65
- )
66
-
67
- # to run skyvern agent locally, must run `skyvern init` first
68
- response = agent.chat("Run the task with skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.'")
69
- print(response)
70
- ```
71
-
72
- ### Dispatch a task(async) with skyvern agent (calling skyvern agent function directly in the tool)
73
- > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
74
-
75
- :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
76
-
77
- ```python
78
- import asyncio
79
- from dotenv import load_dotenv
80
- from llama_index.agent.openai import OpenAIAgent
81
- from llama_index.llms.openai import OpenAI
82
- from llama_index.core.tools import FunctionTool
83
- from skyvern_llamaindex.agent import SkyvernTaskToolSpec
84
-
85
- async def sleep(seconds: int) -> str:
86
- await asyncio.sleep(seconds)
87
- return f"Slept for {seconds} seconds"
88
-
89
- # load OpenAI API key from .env
90
- load_dotenv()
91
-
92
- skyvern_tool = SkyvernTaskToolSpec()
93
-
94
- sleep_tool = FunctionTool.from_defaults(
95
- async_fn=sleep,
96
- description="Sleep for a given number of seconds",
97
- name="sleep",
98
- )
99
-
100
- tools = skyvern_tool.to_tool_list(["dispatch", "get"])
101
- tools.append(sleep_tool)
102
-
103
- agent = OpenAIAgent.from_tools(
104
- tools=tools,
105
- llm=OpenAI(model="gpt-4o"),
106
- verbose=True,
107
- max_function_calls=10,
108
- )
109
-
110
- response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.' Then, get this task information until it's completed. The task information re-get interval should be 60s.")
111
- print(response)
112
-
113
- ```
114
-
115
- ### Run a task(sync) with skyvern client (calling skyvern OpenAPI in the tool)
116
- > sync task won't return until the task is finished.
117
-
118
- no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
119
-
120
- ```python
121
- import asyncio
122
- from dotenv import load_dotenv
123
- from llama_index.agent.openai import OpenAIAgent
124
- from llama_index.llms.openai import OpenAI
125
- from skyvern_llamaindex.client import SkyvernTaskToolSpec
126
-
127
-
128
- async def sleep(seconds: int) -> str:
129
- await asyncio.sleep(seconds)
130
- return f"Slept for {seconds} seconds"
131
-
132
- # load OpenAI API key from .env
133
- load_dotenv()
134
-
135
- skyvern_client_tool = SkyvernTaskToolSpec(
136
- credential="<your_organization_api_key>",
137
- )
138
-
139
- tools = skyvern_client_tool.to_tool_list(["run"])
140
-
141
- agent = OpenAIAgent.from_tools(
142
- tools=tools,
143
- llm=OpenAI(model="gpt-4o"),
144
- verbose=True,
145
- max_function_calls=10,
146
- )
147
-
148
- response = agent.chat("Run the task with skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.'")
149
- print(response)
150
-
151
- ```
152
-
153
- ### Dispatch a task(async) with skyvern client (calling skyvern OpenAPI in the tool)
154
- > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
155
-
156
- no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
157
-
158
- ```python
159
- import asyncio
160
- from dotenv import load_dotenv
161
- from llama_index.agent.openai import OpenAIAgent
162
- from llama_index.llms.openai import OpenAI
163
- from llama_index.core.tools import FunctionTool
164
- from skyvern_llamaindex.client import SkyvernTaskToolSpec
165
-
166
-
167
- async def sleep(seconds: int) -> str:
168
- await asyncio.sleep(seconds)
169
- return f"Slept for {seconds} seconds"
170
-
171
- # load OpenAI API key from .env
172
- load_dotenv()
173
-
174
- skyvern_client_tool = SkyvernTaskToolSpec(
175
- credential="<your_organization_api_key>",
176
- )
177
-
178
- sleep_tool = FunctionTool.from_defaults(
179
- async_fn=sleep,
180
- description="Sleep for a given number of seconds",
181
- name="sleep",
182
- )
183
-
184
- tools = skyvern_client_tool.to_tool_list(["dispatch", "get"])
185
- tools.append(sleep_tool)
186
-
187
- agent = OpenAIAgent.from_tools(
188
- tools=tools,
189
- llm=OpenAI(model="gpt-4o"),
190
- verbose=True,
191
- max_function_calls=10,
192
- )
193
-
194
- response = agent.chat("Run a task with Skyvern. The task is about 'Navigate to the Hacker News homepage and get the top 3 posts.' Then, get this task information until it's completed. The task information re-get interval should be 60s.")
195
- print(response)
196
-
197
- ```
@@ -1,8 +0,0 @@
1
- skyvern_llamaindex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- skyvern_llamaindex/agent.py,sha256=LAW5IAXkMZL0PR8E2A0bl8KvdGIEnkZCyT3lWZoWVGY,4606
3
- skyvern_llamaindex/client.py,sha256=o_5fmTFGHpLx-viTegJGiYdDIf11NOP5uXYlc9XXd-w,7682
4
- skyvern_llamaindex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- skyvern_llamaindex/schema.py,sha256=tTvnSC-ms_tW8bnzIn6FXPOCngom7l62B-IyhIwvRxQ,409
6
- skyvern_llamaindex-0.0.3.dist-info/METADATA,sha256=mewOXnIZlYnvG9c1q-ETn5h5Oqi9gQixeWlHvkdWihc,6704
7
- skyvern_llamaindex-0.0.3.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
8
- skyvern_llamaindex-0.0.3.dist-info/RECORD,,