skyvern-llamaindex 0.0.2__tar.gz → 0.0.3__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: skyvern-llamaindex
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: Skyvern integration for LlamaIndex
5
5
  Author: lawyzheng
6
6
  Author-email: lawy@skyvern.com
@@ -48,14 +48,14 @@ import asyncio
48
48
  from dotenv import load_dotenv
49
49
  from llama_index.agent.openai import OpenAIAgent
50
50
  from llama_index.llms.openai import OpenAI
51
- from skyvern_llamaindex.agent import SkyvernToolSpec
51
+ from skyvern_llamaindex.agent import SkyvernTaskToolSpec
52
52
 
53
53
  # load OpenAI API key from .env
54
54
  load_dotenv()
55
55
 
56
- skyvern_tool = SkyvernToolSpec()
56
+ skyvern_tool = SkyvernTaskToolSpec()
57
57
 
58
- tools = skyvern_tool.to_tool_list(["run_task"])
58
+ tools = skyvern_tool.to_tool_list(["run"])
59
59
 
60
60
  agent = OpenAIAgent.from_tools(
61
61
  tools=tools,
@@ -70,7 +70,7 @@ print(response)
70
70
  ```
71
71
 
72
72
  ### Dispatch a task(async) with skyvern agent (calling skyvern agent function directly in the tool)
73
- > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
73
+ > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
74
74
 
75
75
  :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
76
76
 
@@ -80,7 +80,7 @@ from dotenv import load_dotenv
80
80
  from llama_index.agent.openai import OpenAIAgent
81
81
  from llama_index.llms.openai import OpenAI
82
82
  from llama_index.core.tools import FunctionTool
83
- from skyvern_llamaindex.agent import SkyvernToolSpec
83
+ from skyvern_llamaindex.agent import SkyvernTaskToolSpec
84
84
 
85
85
  async def sleep(seconds: int) -> str:
86
86
  await asyncio.sleep(seconds)
@@ -89,7 +89,7 @@ async def sleep(seconds: int) -> str:
89
89
  # load OpenAI API key from .env
90
90
  load_dotenv()
91
91
 
92
- skyvern_tool = SkyvernToolSpec()
92
+ skyvern_tool = SkyvernTaskToolSpec()
93
93
 
94
94
  sleep_tool = FunctionTool.from_defaults(
95
95
  async_fn=sleep,
@@ -97,7 +97,7 @@ sleep_tool = FunctionTool.from_defaults(
97
97
  name="sleep",
98
98
  )
99
99
 
100
- tools = skyvern_tool.to_tool_list(["dispatch_task", "get_task"])
100
+ tools = skyvern_tool.to_tool_list(["dispatch", "get"])
101
101
  tools.append(sleep_tool)
102
102
 
103
103
  agent = OpenAIAgent.from_tools(
@@ -122,7 +122,7 @@ import asyncio
122
122
  from dotenv import load_dotenv
123
123
  from llama_index.agent.openai import OpenAIAgent
124
124
  from llama_index.llms.openai import OpenAI
125
- from skyvern_llamaindex.client import SkyvernToolSpec
125
+ from skyvern_llamaindex.client import SkyvernTaskToolSpec
126
126
 
127
127
 
128
128
  async def sleep(seconds: int) -> str:
@@ -132,11 +132,11 @@ async def sleep(seconds: int) -> str:
132
132
  # load OpenAI API key from .env
133
133
  load_dotenv()
134
134
 
135
- skyvern_client_tool = SkyvernToolSpec(
135
+ skyvern_client_tool = SkyvernTaskToolSpec(
136
136
  credential="<your_organization_api_key>",
137
137
  )
138
138
 
139
- tools = skyvern_client_tool.to_tool_list(["run_task"])
139
+ tools = skyvern_client_tool.to_tool_list(["run"])
140
140
 
141
141
  agent = OpenAIAgent.from_tools(
142
142
  tools=tools,
@@ -151,7 +151,7 @@ print(response)
151
151
  ```
152
152
 
153
153
  ### Dispatch a task(async) with skyvern client (calling skyvern OpenAPI in the tool)
154
- > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
154
+ > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
155
155
 
156
156
  no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
157
157
 
@@ -161,7 +161,7 @@ from dotenv import load_dotenv
161
161
  from llama_index.agent.openai import OpenAIAgent
162
162
  from llama_index.llms.openai import OpenAI
163
163
  from llama_index.core.tools import FunctionTool
164
- from skyvern_llamaindex.client import SkyvernToolSpec
164
+ from skyvern_llamaindex.client import SkyvernTaskToolSpec
165
165
 
166
166
 
167
167
  async def sleep(seconds: int) -> str:
@@ -171,7 +171,7 @@ async def sleep(seconds: int) -> str:
171
171
  # load OpenAI API key from .env
172
172
  load_dotenv()
173
173
 
174
- skyvern_client_tool = SkyvernToolSpec(
174
+ skyvern_client_tool = SkyvernTaskToolSpec(
175
175
  credential="<your_organization_api_key>",
176
176
  )
177
177
 
@@ -181,7 +181,7 @@ sleep_tool = FunctionTool.from_defaults(
181
181
  name="sleep",
182
182
  )
183
183
 
184
- tools = skyvern_client_tool.to_tool_list(["dispatch_task", "get_task"])
184
+ tools = skyvern_client_tool.to_tool_list(["dispatch", "get"])
185
185
  tools.append(sleep_tool)
186
186
 
187
187
  agent = OpenAIAgent.from_tools(
@@ -35,14 +35,14 @@ import asyncio
35
35
  from dotenv import load_dotenv
36
36
  from llama_index.agent.openai import OpenAIAgent
37
37
  from llama_index.llms.openai import OpenAI
38
- from skyvern_llamaindex.agent import SkyvernToolSpec
38
+ from skyvern_llamaindex.agent import SkyvernTaskToolSpec
39
39
 
40
40
  # load OpenAI API key from .env
41
41
  load_dotenv()
42
42
 
43
- skyvern_tool = SkyvernToolSpec()
43
+ skyvern_tool = SkyvernTaskToolSpec()
44
44
 
45
- tools = skyvern_tool.to_tool_list(["run_task"])
45
+ tools = skyvern_tool.to_tool_list(["run"])
46
46
 
47
47
  agent = OpenAIAgent.from_tools(
48
48
  tools=tools,
@@ -57,7 +57,7 @@ print(response)
57
57
  ```
58
58
 
59
59
  ### Dispatch a task(async) with skyvern agent (calling skyvern agent function directly in the tool)
60
- > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
60
+ > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
61
61
 
62
62
  :warning: :warning: if you want to run this code block, you need to run `skyvern init --openai-api-key <your_openai_api_key>` command in your terminal to set up skyvern first.
63
63
 
@@ -67,7 +67,7 @@ from dotenv import load_dotenv
67
67
  from llama_index.agent.openai import OpenAIAgent
68
68
  from llama_index.llms.openai import OpenAI
69
69
  from llama_index.core.tools import FunctionTool
70
- from skyvern_llamaindex.agent import SkyvernToolSpec
70
+ from skyvern_llamaindex.agent import SkyvernTaskToolSpec
71
71
 
72
72
  async def sleep(seconds: int) -> str:
73
73
  await asyncio.sleep(seconds)
@@ -76,7 +76,7 @@ async def sleep(seconds: int) -> str:
76
76
  # load OpenAI API key from .env
77
77
  load_dotenv()
78
78
 
79
- skyvern_tool = SkyvernToolSpec()
79
+ skyvern_tool = SkyvernTaskToolSpec()
80
80
 
81
81
  sleep_tool = FunctionTool.from_defaults(
82
82
  async_fn=sleep,
@@ -84,7 +84,7 @@ sleep_tool = FunctionTool.from_defaults(
84
84
  name="sleep",
85
85
  )
86
86
 
87
- tools = skyvern_tool.to_tool_list(["dispatch_task", "get_task"])
87
+ tools = skyvern_tool.to_tool_list(["dispatch", "get"])
88
88
  tools.append(sleep_tool)
89
89
 
90
90
  agent = OpenAIAgent.from_tools(
@@ -109,7 +109,7 @@ import asyncio
109
109
  from dotenv import load_dotenv
110
110
  from llama_index.agent.openai import OpenAIAgent
111
111
  from llama_index.llms.openai import OpenAI
112
- from skyvern_llamaindex.client import SkyvernToolSpec
112
+ from skyvern_llamaindex.client import SkyvernTaskToolSpec
113
113
 
114
114
 
115
115
  async def sleep(seconds: int) -> str:
@@ -119,11 +119,11 @@ async def sleep(seconds: int) -> str:
119
119
  # load OpenAI API key from .env
120
120
  load_dotenv()
121
121
 
122
- skyvern_client_tool = SkyvernToolSpec(
122
+ skyvern_client_tool = SkyvernTaskToolSpec(
123
123
  credential="<your_organization_api_key>",
124
124
  )
125
125
 
126
- tools = skyvern_client_tool.to_tool_list(["run_task"])
126
+ tools = skyvern_client_tool.to_tool_list(["run"])
127
127
 
128
128
  agent = OpenAIAgent.from_tools(
129
129
  tools=tools,
@@ -138,7 +138,7 @@ print(response)
138
138
  ```
139
139
 
140
140
  ### Dispatch a task(async) with skyvern client (calling skyvern OpenAPI in the tool)
141
- > dispatch task will return immediately and the task will be running in the background. You can use `get_task` tool to poll the task information until the task is finished.
141
+ > dispatch task will return immediately and the task will be running in the background. You can use `get` tool to poll the task information until the task is finished.
142
142
 
143
143
  no need to run `skyvern init` command in your terminal to set up skyvern before using this integration.
144
144
 
@@ -148,7 +148,7 @@ from dotenv import load_dotenv
148
148
  from llama_index.agent.openai import OpenAIAgent
149
149
  from llama_index.llms.openai import OpenAI
150
150
  from llama_index.core.tools import FunctionTool
151
- from skyvern_llamaindex.client import SkyvernToolSpec
151
+ from skyvern_llamaindex.client import SkyvernTaskToolSpec
152
152
 
153
153
 
154
154
  async def sleep(seconds: int) -> str:
@@ -158,7 +158,7 @@ async def sleep(seconds: int) -> str:
158
158
  # load OpenAI API key from .env
159
159
  load_dotenv()
160
160
 
161
- skyvern_client_tool = SkyvernToolSpec(
161
+ skyvern_client_tool = SkyvernTaskToolSpec(
162
162
  credential="<your_organization_api_key>",
163
163
  )
164
164
 
@@ -168,7 +168,7 @@ sleep_tool = FunctionTool.from_defaults(
168
168
  name="sleep",
169
169
  )
170
170
 
171
- tools = skyvern_client_tool.to_tool_list(["dispatch_task", "get_task"])
171
+ tools = skyvern_client_tool.to_tool_list(["dispatch", "get"])
172
172
  tools.append(sleep_tool)
173
173
 
174
174
  agent = OpenAIAgent.from_tools(
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "skyvern-llamaindex"
3
- version = "0.0.2"
3
+ version = "0.0.3"
4
4
  description = "Skyvern integration for LlamaIndex"
5
5
  authors = ["lawyzheng <lawy@skyvern.com>"]
6
6
  packages = [{ include = "skyvern_llamaindex" }]
@@ -9,42 +9,42 @@ from skyvern.forge.sdk.schemas.observers import ObserverTask
9
9
  from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskResponse
10
10
 
11
11
 
12
- class SkyvernToolSpec(BaseToolSpec):
12
+ class SkyvernTaskToolSpec(BaseToolSpec):
13
13
  spec_functions: List[SPEC_FUNCTION_TYPE] = [
14
- "run_task",
15
- "dispatch_task",
16
- "get_task",
14
+ "run",
15
+ "dispatch",
16
+ "get",
17
17
  ]
18
18
  spec_metadata: Dict[str, Dict[str, ToolMetadata]] = {
19
19
  "TaskV1": {
20
- "run_task": ToolMetadata(
20
+ "run": ToolMetadata(
21
21
  name="run-skyvern-agent-task",
22
22
  description="Use Skyvern agent to run a task. This function won't return until the task is finished.",
23
23
  fn_schema=TaskV1Request,
24
24
  ),
25
- "dispatch_task": ToolMetadata(
25
+ "dispatch": ToolMetadata(
26
26
  name="dispatch-skyvern-agent-task",
27
27
  description="Use Skyvern agent to dispatch a task. This function will return immediately and the task will be running in the background.",
28
28
  fn_schema=TaskV1Request,
29
29
  ),
30
- "get_task": ToolMetadata(
30
+ "get": ToolMetadata(
31
31
  name="get-skyvern-agent-task",
32
32
  description="Use Skyvern agent to get a task.",
33
33
  fn_schema=GetTaskInput,
34
34
  ),
35
35
  },
36
36
  "TaskV2": {
37
- "run_task": ToolMetadata(
37
+ "run": ToolMetadata(
38
38
  name="run-skyvern-agent-task",
39
39
  description="Use Skyvern agent to run a task. This function won't return until the task is finished.",
40
40
  fn_schema=TaskV2Request,
41
41
  ),
42
- "dispatch_task": ToolMetadata(
42
+ "dispatch": ToolMetadata(
43
43
  name="dispatch-skyvern-agent-task",
44
44
  description="Use Skyvern agent to dispatch a task. This function will return immediately and the task will be running in the background.",
45
45
  fn_schema=TaskV2Request,
46
46
  ),
47
- "get_task": ToolMetadata(
47
+ "get": ToolMetadata(
48
48
  name="get-skyvern-agent-task",
49
49
  description="Use Skyvern agent to get a task.",
50
50
  fn_schema=GetTaskInput,
@@ -66,19 +66,19 @@ class SkyvernToolSpec(BaseToolSpec):
66
66
 
67
67
  return self.spec_metadata.get(self.engine, {}).get(fn_name)
68
68
 
69
- async def run_task(self, **kwargs: Dict[str, Any]) -> TaskResponse | ObserverTask:
69
+ async def run(self, **kwargs: Dict[str, Any]) -> TaskResponse | ObserverTask:
70
70
  if self.engine == "TaskV1":
71
71
  return await self.run_task_v1(**kwargs)
72
72
  else:
73
73
  return await self.run_task_v2(**kwargs)
74
74
 
75
- async def dispatch_task(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | ObserverTask:
75
+ async def dispatch(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | ObserverTask:
76
76
  if self.engine == "TaskV1":
77
77
  return await self.dispatch_task_v1(**kwargs)
78
78
  else:
79
79
  return await self.dispatch_task_v2(**kwargs)
80
80
 
81
- async def get_task(self, task_id: str) -> TaskResponse | ObserverTask | None:
81
+ async def get(self, task_id: str) -> TaskResponse | ObserverTask | None:
82
82
  if self.engine == "TaskV1":
83
83
  return await self.get_task_v1(task_id)
84
84
  else:
@@ -9,43 +9,43 @@ from skyvern.client import AsyncSkyvern
9
9
  from skyvern.forge.sdk.schemas.tasks import CreateTaskResponse, TaskResponse
10
10
 
11
11
 
12
- class SkyvernToolSpec(BaseToolSpec):
12
+ class SkyvernTaskToolSpec(BaseToolSpec):
13
13
  spec_functions: List[SPEC_FUNCTION_TYPE] = [
14
- "run_task",
15
- "dispatch_task",
16
- "get_task",
14
+ "run",
15
+ "dispatch",
16
+ "get",
17
17
  ]
18
18
 
19
19
  spec_metadata: Dict[str, Dict[str, ToolMetadata]] = {
20
20
  "TaskV1": {
21
- "run_task": ToolMetadata(
21
+ "run": ToolMetadata(
22
22
  name="run-skyvern-client-task",
23
23
  description="Use Skyvern client to run a task. This function won't return until the task is finished.",
24
24
  fn_schema=TaskV1Request,
25
25
  ),
26
- "dispatch_task": ToolMetadata(
26
+ "dispatch": ToolMetadata(
27
27
  name="dispatch-skyvern-client-task",
28
28
  description="Use Skyvern client to dispatch a task. This function will return immediately and the task will be running in the background.",
29
29
  fn_schema=TaskV1Request,
30
30
  ),
31
- "get_task": ToolMetadata(
31
+ "get": ToolMetadata(
32
32
  name="get-skyvern-client-task",
33
33
  description="Use Skyvern client to get a task.",
34
34
  fn_schema=GetTaskInput,
35
35
  ),
36
36
  },
37
37
  "TaskV2": {
38
- "run_task": ToolMetadata(
38
+ "run": ToolMetadata(
39
39
  name="run-skyvern-client-task",
40
40
  description="Use Skyvern client to run a task. This function won't return until the task is finished.",
41
41
  fn_schema=TaskV2Request,
42
42
  ),
43
- "dispatch_task": ToolMetadata(
43
+ "dispatch": ToolMetadata(
44
44
  name="dispatch-skyvern-client-task",
45
45
  description="Use Skyvern client to dispatch a task. This function will return immediately and the task will be running in the background.",
46
46
  fn_schema=TaskV2Request,
47
47
  ),
48
- "get_task": ToolMetadata(
48
+ "get": ToolMetadata(
49
49
  name="get-skyvern-client-task",
50
50
  description="Use Skyvern client to get a task.",
51
51
  fn_schema=GetTaskInput,
@@ -79,19 +79,19 @@ class SkyvernToolSpec(BaseToolSpec):
79
79
 
80
80
  return self.spec_metadata.get(self.engine, {}).get(fn_name)
81
81
 
82
- async def run_task(self, **kwargs: Dict[str, Any]) -> TaskResponse | Dict[str, Any | None]:
82
+ async def run(self, **kwargs: Dict[str, Any]) -> TaskResponse | Dict[str, Any | None]:
83
83
  if self.engine == "TaskV1":
84
84
  return await self.run_task_v1(**kwargs)
85
85
  else:
86
86
  return await self.run_task_v2(**kwargs)
87
87
 
88
- async def dispatch_task(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | Dict[str, Any | None]:
88
+ async def dispatch(self, **kwargs: Dict[str, Any]) -> CreateTaskResponse | Dict[str, Any | None]:
89
89
  if self.engine == "TaskV1":
90
90
  return await self.dispatch_task_v1(**kwargs)
91
91
  else:
92
92
  return await self.dispatch_task_v2(**kwargs)
93
93
 
94
- async def get_task(self, task_id: str) -> TaskResponse | Dict[str, Any | None]:
94
+ async def get(self, task_id: str) -> TaskResponse | Dict[str, Any | None]:
95
95
  if self.engine == "TaskV1":
96
96
  return await self.get_task_v1(task_id)
97
97
  else: