hyperpocket-llamaindex 0.1.10__tar.gz → 0.2.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hyperpocket-llamaindex
3
- Version: 0.1.10
3
+ Version: 0.2.0
4
4
  Author-email: Hyperpocket Team <hyperpocket@vessl.ai>
5
5
  Requires-Python: >=3.10
6
6
  Requires-Dist: hyperpocket>=0.0.3
@@ -3,11 +3,9 @@ from typing import List, Optional
3
3
  from pydantic import BaseModel
4
4
 
5
5
  try:
6
- from llama_index.core.tools import FunctionTool, BaseTool, ToolMetadata
6
+ from llama_index.core.tools import BaseTool, FunctionTool, ToolMetadata
7
7
  except ImportError:
8
- raise ImportError(
9
- "You need to install llama-index to use pocket llamaindex"
10
- )
8
+ raise ImportError("You need to install llama-index to use pocket llamaindex")
11
9
 
12
10
  from hyperpocket import Pocket
13
11
  from hyperpocket.tool import Tool
@@ -35,11 +33,16 @@ class PocketLlamaindex(Pocket):
35
33
  if isinstance(body, BaseModel):
36
34
  body = body.model_dump()
37
35
 
38
- result, interrupted = self.invoke_with_state(pocket_tool.name, body=body, thread_id=thread_id,
39
- profile=profile, **kwargs)
36
+ result, interrupted = self.invoke_with_state(
37
+ pocket_tool.name,
38
+ body=body,
39
+ thread_id=thread_id,
40
+ profile=profile,
41
+ **kwargs,
42
+ )
40
43
  say = result
41
44
  if interrupted:
42
- say = f'{say}\n\nThe tool execution interrupted. Please talk to me to resume.'
45
+ say = f"{say}\n\nThe tool execution interrupted. Please talk to me to resume."
43
46
  return say
44
47
 
45
48
  async def _ainvoke(**kwargs) -> str:
@@ -55,11 +58,16 @@ class PocketLlamaindex(Pocket):
55
58
  if isinstance(body, BaseModel):
56
59
  body = body.model_dump()
57
60
 
58
- result, interrupted = await self.ainvoke_with_state(pocket_tool.name, body=body,
59
- thread_id=thread_id, profile=profile, **kwargs)
61
+ result, interrupted = await self.ainvoke_with_state(
62
+ pocket_tool.name,
63
+ body=body,
64
+ thread_id=thread_id,
65
+ profile=profile,
66
+ **kwargs,
67
+ )
60
68
  say = result
61
69
  if interrupted:
62
- say = f'{say}\n\nThe tool execution interrupted. Please talk to me to resume.'
70
+ say = f"{say}\n\nThe tool execution interrupted. Please talk to me to resume."
63
71
  return say
64
72
 
65
73
  return FunctionTool.from_defaults(
@@ -69,5 +77,5 @@ class PocketLlamaindex(Pocket):
69
77
  name=pocket_tool.name,
70
78
  description=pocket_tool.get_description(use_profile=self.use_profile),
71
79
  fn_schema=pocket_tool.schema_model(use_profile=self.use_profile),
72
- )
80
+ ),
73
81
  )
@@ -1,7 +1,7 @@
1
1
 
2
2
  [project]
3
3
  name = "hyperpocket-llamaindex"
4
- version = "0.1.10"
4
+ version = "0.2.0"
5
5
  description = ""
6
6
  authors = [{ name = "Hyperpocket Team", email = "hyperpocket@vessl.ai" }]
7
7
  requires-python = ">=3.10"
@@ -18,19 +18,5 @@ dev = ["pytest>=8.3.4", "ruff>=0.8.6"]
18
18
  requires = ["hatchling"]
19
19
  build-backend = "hatchling.build"
20
20
 
21
- [tool.ruff.lint]
22
- select = [
23
- "E", # pycodestyle errors,
24
- "F", # pyflakes errors,
25
- "I", # isort errors,
26
- ]
27
- ignore = [
28
- "E501", # line too long, handled by formatting
29
- ]
30
-
31
21
  [tool.ruff]
32
- line-length = 88
33
- target-version = "py310"
34
-
35
- [tool.ruff.lint.per-file-ignores]
36
- "__init__.py" = ["F401"]
22
+ extend = "../../../.ruff.toml"
@@ -2,17 +2,16 @@ import ast
2
2
  import json
3
3
  from unittest.async_case import IsolatedAsyncioTestCase
4
4
 
5
+ from hyperpocket.config import config, secret
6
+ from hyperpocket.tool import from_git
5
7
  from llama_index.agent.openai import OpenAIAgent
6
8
  from llama_index.llms.openai import OpenAI
7
9
  from pydantic import BaseModel
8
10
 
9
- from hyperpocket.config import config, secret
10
- from hyperpocket.tool import from_git
11
11
  from hyperpocket_llamaindex import PocketLlamaindex
12
12
 
13
13
 
14
14
  class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
15
-
16
15
  async def asyncSetUp(self):
17
16
  config.public_server_port = "https"
18
17
  config.public_hostname = "localhost"
@@ -22,12 +21,15 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
22
21
 
23
22
  self.pocket = PocketLlamaindex(
24
23
  tools=[
25
- from_git("https://github.com/vessl-ai/hyperawesometools", "main",
26
- "managed-tools/none/simple-echo-tool"),
24
+ from_git(
25
+ "https://github.com/vessl-ai/hyperawesometools",
26
+ "main",
27
+ "managed-tools/none/simple-echo-tool",
28
+ ),
27
29
  self.add,
28
- self.sub_pydantic_args
30
+ self.sub_pydantic_args,
29
31
  ],
30
- use_profile=False
32
+ use_profile=False,
31
33
  )
32
34
 
33
35
  self.llm = OpenAI(model="gpt-4o", api_key=secret["OPENAI_API_KEY"])
@@ -40,7 +42,7 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
40
42
  agent = OpenAIAgent.from_tools(
41
43
  tools=self.pocket.get_tools(),
42
44
  llm=OpenAI(model="gpt-4o", api_key=secret["OPENAI_API_KEY"]),
43
- verbose=True
45
+ verbose=True,
44
46
  )
45
47
 
46
48
  # when
@@ -50,7 +52,9 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
50
52
 
51
53
  async def test_function_tool_no_profile(self):
52
54
  # when
53
- response = self.llm.chat_with_tools(user_msg="add 1, 2", tools=self.pocket.get_tools(), verbose=True)
55
+ response = self.llm.chat_with_tools(
56
+ user_msg="add 1, 2", tools=self.pocket.get_tools(), verbose=True
57
+ )
54
58
  message = response.message
55
59
  tool_calls = message.additional_kwargs["tool_calls"]
56
60
 
@@ -62,15 +66,20 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
62
66
 
63
67
  # then
64
68
  self.assertEqual(tool_name, "add")
65
- self.assertEqual(args, {
66
- "a": 1,
67
- "b": 2,
68
- })
69
- self.assertEqual(result, '3')
69
+ self.assertEqual(
70
+ args,
71
+ {
72
+ "a": 1,
73
+ "b": 2,
74
+ },
75
+ )
76
+ self.assertEqual(result, "3")
70
77
 
71
78
  async def test_pydantic_function_tool_no_profile(self):
72
79
  # when
73
- response = self.llm.chat_with_tools(user_msg="sub 1, 2", tools=self.pocket.get_tools(), verbose=True)
80
+ response = self.llm.chat_with_tools(
81
+ user_msg="sub 1, 2", tools=self.pocket.get_tools(), verbose=True
82
+ )
74
83
  tool_calls = response.message.additional_kwargs["tool_calls"]
75
84
 
76
85
  tool_name = tool_calls[0].function.name
@@ -81,15 +90,20 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
81
90
 
82
91
  # then
83
92
  self.assertEqual(tool_name, "sub_pydantic_args")
84
- self.assertEqual(args, {
85
- "a": {"first": 1},
86
- "b": {"second": 2},
87
- })
88
- self.assertEqual(result, '-1')
93
+ self.assertEqual(
94
+ args,
95
+ {
96
+ "a": {"first": 1},
97
+ "b": {"second": 2},
98
+ },
99
+ )
100
+ self.assertEqual(result, "-1")
89
101
 
90
102
  async def test_wasm_tool_no_profile(self):
91
103
  # when
92
- response = self.llm.chat_with_tools(user_msg="echo 'hello world'", tools=self.pocket.get_tools(), verbose=True)
104
+ response = self.llm.chat_with_tools(
105
+ user_msg="echo 'hello world'", tools=self.pocket.get_tools(), verbose=True
106
+ )
93
107
  tool_calls = response.message.additional_kwargs["tool_calls"]
94
108
 
95
109
  tool_name = tool_calls[0].function.name
@@ -101,9 +115,7 @@ class TestPocketLlamaindexNoProfile(IsolatedAsyncioTestCase):
101
115
 
102
116
  # then
103
117
  self.assertEqual(tool_name, "simple_echo_text")
104
- self.assertEqual(args, {
105
- "text": "hello world"
106
- })
118
+ self.assertEqual(args, {"text": "hello world"})
107
119
  self.assertTrue(output["stdout"].startswith("echo message : hello world"))
108
120
 
109
121
  @staticmethod
@@ -2,17 +2,16 @@ import ast
2
2
  import json
3
3
  from unittest.async_case import IsolatedAsyncioTestCase
4
4
 
5
+ from hyperpocket.config import config, secret
6
+ from hyperpocket.tool import from_git
5
7
  from llama_index.agent.openai import OpenAIAgent
6
8
  from llama_index.llms.openai import OpenAI
7
9
  from pydantic import BaseModel
8
10
 
9
- from hyperpocket.config import config, secret
10
- from hyperpocket.tool import from_git
11
11
  from hyperpocket_llamaindex import PocketLlamaindex
12
12
 
13
13
 
14
14
  class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
15
-
16
15
  async def asyncSetUp(self):
17
16
  config.public_server_port = "https"
18
17
  config.public_hostname = "localhost"
@@ -22,12 +21,15 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
22
21
 
23
22
  self.pocket = PocketLlamaindex(
24
23
  tools=[
25
- from_git("https://github.com/vessl-ai/hyperawesometools", "main",
26
- "managed-tools/none/simple-echo-tool"),
24
+ from_git(
25
+ "https://github.com/vessl-ai/hyperawesometools",
26
+ "main",
27
+ "managed-tools/none/simple-echo-tool",
28
+ ),
27
29
  self.add,
28
- self.sub_pydantic_args
30
+ self.sub_pydantic_args,
29
31
  ],
30
- use_profile=True
32
+ use_profile=True,
31
33
  )
32
34
 
33
35
  self.llm = OpenAI(model="gpt-4o", api_key=secret["OPENAI_API_KEY"])
@@ -40,7 +42,7 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
40
42
  agent = OpenAIAgent.from_tools(
41
43
  tools=self.pocket.get_tools(),
42
44
  llm=OpenAI(model="gpt-4o", api_key=secret["OPENAI_API_KEY"]),
43
- verbose=True
45
+ verbose=True,
44
46
  )
45
47
 
46
48
  # when
@@ -50,7 +52,9 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
50
52
 
51
53
  async def test_function_tool(self):
52
54
  # when
53
- response = self.llm.chat_with_tools(user_msg="add 1, 2", tools=self.pocket.get_tools(), verbose=True)
55
+ response = self.llm.chat_with_tools(
56
+ user_msg="add 1, 2", tools=self.pocket.get_tools(), verbose=True
57
+ )
54
58
  message = response.message
55
59
  tool_calls = message.additional_kwargs["tool_calls"]
56
60
 
@@ -62,15 +66,20 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
62
66
 
63
67
  # then
64
68
  self.assertEqual(tool_name, "add")
65
- self.assertEqual(args["body"], {
66
- "a": 1,
67
- "b": 2,
68
- })
69
- self.assertEqual(result, '3')
69
+ self.assertEqual(
70
+ args["body"],
71
+ {
72
+ "a": 1,
73
+ "b": 2,
74
+ },
75
+ )
76
+ self.assertEqual(result, "3")
70
77
 
71
78
  async def test_pydantic_function_tool(self):
72
79
  # when
73
- response = self.llm.chat_with_tools(user_msg="sub 1, 2", tools=self.pocket.get_tools(), verbose=True)
80
+ response = self.llm.chat_with_tools(
81
+ user_msg="sub 1, 2", tools=self.pocket.get_tools(), verbose=True
82
+ )
74
83
  tool_calls = response.message.additional_kwargs["tool_calls"]
75
84
 
76
85
  tool_name = tool_calls[0].function.name
@@ -81,15 +90,20 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
81
90
 
82
91
  # then
83
92
  self.assertEqual(tool_name, "sub_pydantic_args")
84
- self.assertEqual(args["body"], {
85
- "a": {"first": 1},
86
- "b": {"second": 2},
87
- })
88
- self.assertEqual(result, '-1')
93
+ self.assertEqual(
94
+ args["body"],
95
+ {
96
+ "a": {"first": 1},
97
+ "b": {"second": 2},
98
+ },
99
+ )
100
+ self.assertEqual(result, "-1")
89
101
 
90
102
  async def test_wasm_tool(self):
91
103
  # when
92
- response = self.llm.chat_with_tools(user_msg="echo 'hello world'", tools=self.pocket.get_tools(), verbose=True)
104
+ response = self.llm.chat_with_tools(
105
+ user_msg="echo 'hello world'", tools=self.pocket.get_tools(), verbose=True
106
+ )
93
107
  tool_calls = response.message.additional_kwargs["tool_calls"]
94
108
 
95
109
  tool_name = tool_calls[0].function.name
@@ -101,9 +115,7 @@ class TestPocketLlamaindexUseProfile(IsolatedAsyncioTestCase):
101
115
 
102
116
  # then
103
117
  self.assertEqual(tool_name, "simple_echo_text")
104
- self.assertEqual(args["body"], {
105
- "text": "hello world"
106
- })
118
+ self.assertEqual(args["body"], {"text": "hello world"})
107
119
  self.assertTrue(output["stdout"].startswith("echo message : hello world"))
108
120
 
109
121
  @staticmethod