opengradient 0.4.7__tar.gz → 0.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. {opengradient-0.4.7/src/opengradient.egg-info → opengradient-0.5.2}/PKG-INFO +6 -26
  2. {opengradient-0.4.7 → opengradient-0.5.2}/pyproject.toml +6 -6
  3. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/__init__.py +20 -12
  4. opengradient-0.5.2/src/opengradient/abi/InferencePrecompile.abi +1 -0
  5. opengradient-0.5.2/src/opengradient/alphasense/run_model_tool.py +152 -0
  6. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/cli.py +160 -41
  7. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/client.py +635 -201
  8. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/defaults.py +4 -1
  9. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/llm/og_langchain.py +7 -5
  10. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/types.py +103 -7
  11. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/utils.py +5 -14
  12. opengradient-0.5.2/src/opengradient/workflow_models/__init__.py +28 -0
  13. opengradient-0.5.2/src/opengradient/workflow_models/constants.py +13 -0
  14. opengradient-0.5.2/src/opengradient/workflow_models/types.py +16 -0
  15. opengradient-0.5.2/src/opengradient/workflow_models/utils.py +39 -0
  16. opengradient-0.5.2/src/opengradient/workflow_models/workflow_models.py +97 -0
  17. {opengradient-0.4.7 → opengradient-0.5.2/src/opengradient.egg-info}/PKG-INFO +6 -26
  18. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient.egg-info/SOURCES.txt +7 -1
  19. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient.egg-info/requires.txt +1 -0
  20. opengradient-0.4.7/src/opengradient/alphasense/run_model_tool.py +0 -114
  21. {opengradient-0.4.7 → opengradient-0.5.2}/LICENSE +0 -0
  22. {opengradient-0.4.7 → opengradient-0.5.2}/README.md +0 -0
  23. {opengradient-0.4.7 → opengradient-0.5.2}/setup.cfg +0 -0
  24. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  25. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  26. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/abi/inference.abi +0 -0
  27. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/account.py +0 -0
  28. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/alphasense/__init__.py +0 -0
  29. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  30. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/alphasense/types.py +0 -0
  31. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  32. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/exceptions.py +0 -0
  33. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/llm/__init__.py +0 -0
  34. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/llm/og_openai.py +0 -0
  35. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/proto/__init__.py +0 -0
  36. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/proto/infer.proto +0 -0
  37. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/proto/infer_pb2.py +0 -0
  38. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  39. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient.egg-info/dependency_links.txt +0 -0
  40. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient.egg-info/entry_points.txt +0 -0
  41. {opengradient-0.4.7 → opengradient-0.5.2}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,34 +1,12 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.4.7
3
+ Version: 0.5.2
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
- Author-email: OpenGradient <oliver@opengradient.ai>
6
- License: MIT License
7
-
8
- Copyright (c) 2024 OpenGradient
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
-
5
+ Author-email: OpenGradient <kyle@vannalabs.ai>
6
+ License-Expression: MIT
28
7
  Project-URL: Homepage, https://opengradient.ai
29
8
  Classifier: Development Status :: 3 - Alpha
30
9
  Classifier: Intended Audience :: Developers
31
- Classifier: License :: OSI Approved :: MIT License
32
10
  Classifier: Programming Language :: Python :: 3.10
33
11
  Classifier: Programming Language :: Python :: 3.11
34
12
  Classifier: Programming Language :: Python :: 3.12
@@ -45,6 +23,8 @@ Requires-Dist: requests>=2.32.3
45
23
  Requires-Dist: langchain>=0.3.7
46
24
  Requires-Dist: openai>=1.58.1
47
25
  Requires-Dist: pydantic>=2.9.2
26
+ Requires-Dist: og-test-x402==0.0.1
27
+ Dynamic: license-file
48
28
 
49
29
  # OpenGradient Python SDK
50
30
 
@@ -1,19 +1,18 @@
1
1
  [build-system]
2
- requires = ["setuptools>=61.0"]
2
+ requires = ["setuptools>=77.0.0"]
3
3
  build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.7"
7
+ version = "0.5.2"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
- authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
- license = {file = "LICENSE"}
9
+ authors = [{name = "OpenGradient", email = "kyle@vannalabs.ai"}]
11
10
  readme = "README.md"
12
11
  requires-python = ">=3.10"
12
+ license = "MIT"
13
13
  classifiers = [
14
14
  "Development Status :: 3 - Alpha",
15
15
  "Intended Audience :: Developers",
16
- "License :: OSI Approved :: MIT License",
17
16
  "Programming Language :: Python :: 3.10",
18
17
  "Programming Language :: Python :: 3.11",
19
18
  "Programming Language :: Python :: 3.12",
@@ -30,6 +29,7 @@ dependencies = [
30
29
  "langchain>=0.3.7",
31
30
  "openai>=1.58.1",
32
31
  "pydantic>=2.9.2",
32
+ "og-test-x402==0.0.1",
33
33
  ]
34
34
 
35
35
  [project.scripts]
@@ -47,7 +47,7 @@ include-package-data = true
47
47
 
48
48
  [tool.setuptools.packages.find]
49
49
  where = ["src"]
50
- include = ["opengradient*"] # Explicitly include all opengradient packages
50
+ include = ["opengradient*"]
51
51
  exclude = ["tests*", "stresstest*"]
52
52
 
53
53
  [tool.setuptools.package-data]
@@ -5,7 +5,7 @@ OpenGradient Python SDK for interacting with AI models and infrastructure.
5
5
  from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  from .client import Client
8
- from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
8
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL, DEFAULT_API_URL
9
9
  from .types import (
10
10
  LLM,
11
11
  TEE_LLM,
@@ -14,9 +14,12 @@ from .types import (
14
14
  CandleType,
15
15
  CandleOrder,
16
16
  InferenceMode,
17
+ InferenceResult,
17
18
  LlmInferenceMode,
18
19
  TextGenerationOutput,
19
20
  ModelOutput,
21
+ ModelRepository,
22
+ FileUploadResult,
20
23
  )
21
24
 
22
25
  from . import llm, alphasense
@@ -29,7 +32,9 @@ def new_client(
29
32
  password: Optional[str],
30
33
  private_key: str,
31
34
  rpc_url=DEFAULT_RPC_URL,
35
+ api_url=DEFAULT_API_URL,
32
36
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
37
+ **kwargs,
33
38
  ) -> Client:
34
39
  """
35
40
  Creates a unique OpenGradient client instance with the given authentication and network settings.
@@ -42,10 +47,10 @@ def new_client(
42
47
  contract_address: Optional inference contract address
43
48
  """
44
49
 
45
- return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, contract_address=contract_address)
50
+ return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, api_url=api_url, contract_address=contract_address, **kwargs)
46
51
 
47
52
 
48
- def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
53
+ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, api_url=DEFAULT_API_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
49
54
  """Initialize the OpenGradient SDK with authentication and network settings.
50
55
 
51
56
  Args:
@@ -53,15 +58,16 @@ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, c
53
58
  password: User's password for authentication
54
59
  private_key: Ethereum private key for blockchain transactions
55
60
  rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
61
+ api_url: Optional API URL for the OpenGradient API, defaults to mainnet
56
62
  contract_address: Optional inference contract address
57
63
  """
58
64
  global _client
59
-
60
- _client = Client(private_key=private_key, rpc_url=rpc_url, email=email, password=password, contract_address=contract_address)
65
+
66
+ _client = Client(private_key=private_key, rpc_url=rpc_url, api_url=api_url, email=email, password=password, contract_address=contract_address)
61
67
  return _client
62
68
 
63
69
 
64
- def upload(model_path, model_name, version):
70
+ def upload(model_path, model_name, version) -> FileUploadResult:
65
71
  """Upload a model file to OpenGradient.
66
72
 
67
73
  Args:
@@ -70,7 +76,7 @@ def upload(model_path, model_name, version):
70
76
  version: Version string for this model upload
71
77
 
72
78
  Returns:
73
- dict: Upload response containing file metadata
79
+ FileUploadResult: Upload response containing file metadata
74
80
 
75
81
  Raises:
76
82
  RuntimeError: If SDK is not initialized
@@ -80,7 +86,7 @@ def upload(model_path, model_name, version):
80
86
  return _client.upload(model_path, model_name, version)
81
87
 
82
88
 
83
- def create_model(model_name: str, model_desc: str, model_path: Optional[str] = None):
89
+ def create_model(model_name: str, model_desc: str, model_path: Optional[str] = None) -> ModelRepository:
84
90
  """Create a new model repository.
85
91
 
86
92
  Args:
@@ -89,7 +95,7 @@ def create_model(model_name: str, model_desc: str, model_path: Optional[str] = N
89
95
  model_path: Optional path to model file to upload immediately
90
96
 
91
97
  Returns:
92
- dict: Creation response with model metadata and optional upload results
98
+ ModelRepository: Creation response with model metadata and optional upload results
93
99
 
94
100
  Raises:
95
101
  RuntimeError: If SDK is not initialized
@@ -126,7 +132,7 @@ def create_version(model_name, notes=None, is_major=False):
126
132
  return _client.create_version(model_name, notes, is_major)
127
133
 
128
134
 
129
- def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = None):
135
+ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = None) -> InferenceResult:
130
136
  """Run inference on a model.
131
137
 
132
138
  Args:
@@ -136,7 +142,9 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
136
142
  max_retries: Maximum number of retries for failed transactions
137
143
 
138
144
  Returns:
139
- InferenceResult: Transaction hash and model output
145
+ InferenceResult (InferenceResult): A dataclass object containing the transaction hash and model output.
146
+ * transaction_hash (str): Blockchain hash for the transaction
147
+ * model_output (Dict[str, np.ndarray]): Output of the ONNX model
140
148
 
141
149
  Raises:
142
150
  RuntimeError: If SDK is not initialized
@@ -319,7 +327,7 @@ def run_workflow(contract_address: str) -> ModelOutput:
319
327
  return _client.run_workflow(contract_address)
320
328
 
321
329
 
322
- def read_workflow_history(contract_address: str, num_results: int) -> List[Dict]:
330
+ def read_workflow_history(contract_address: str, num_results: int) -> List[ModelOutput]:
323
331
  """
324
332
  Gets historical inference results from a workflow contract.
325
333
 
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMChatRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChat","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"indexed":false,"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"response","type":"tuple"}],"name":"ModelInferenceEvent","type":"event"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"}],"name":"runModelInference","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -0,0 +1,152 @@
1
+ from enum import Enum
2
+ from typing import Any, Callable, List, Dict, Type, Optional, Union
3
+
4
+ from langchain_core.tools import BaseTool, StructuredTool
5
+ from pydantic import BaseModel
6
+
7
+ import opengradient as og
8
+ from .types import ToolType
9
+ from opengradient import InferenceResult
10
+ import numpy as np
11
+
12
+
13
+ def create_run_model_tool(
14
+ tool_type: ToolType,
15
+ model_cid: str,
16
+ tool_name: str,
17
+ model_input_provider: Callable[..., Dict[str, Union[str, int, float, List, np.ndarray]]],
18
+ model_output_formatter: Callable[[InferenceResult], str],
19
+ tool_input_schema: Optional[Type[BaseModel]] = None,
20
+ tool_description: str = "Executes the given ML model",
21
+ inference_mode: og.InferenceMode = og.InferenceMode.VANILLA,
22
+ ) -> BaseTool | Callable:
23
+ """
24
+ Creates a tool that wraps an OpenGradient model for inference.
25
+
26
+ This function generates a tool that can be integrated into either a LangChain pipeline
27
+ or a Swarm system, allowing the model to be executed as part of a chain of operations.
28
+ The tool uses the provided input_getter function to obtain the necessary input data and
29
+ runs inference using the specified OpenGradient model.
30
+
31
+ Args:
32
+ tool_type (ToolType): Specifies the framework to create the tool for. Use
33
+ ToolType.LANGCHAIN for LangChain integration or ToolType.SWARM for Swarm
34
+ integration.
35
+ model_cid (str): The CID of the OpenGradient model to be executed.
36
+ tool_name (str): The name to assign to the created tool. This will be used to identify
37
+ and invoke the tool within the agent.
38
+ model_input_provider (Callable): A function that takes in the tool_input_schema with arguments
39
+ filled by the agent and returns input data required by the model.
40
+
41
+ The function should return data in a format compatible with the model's expectations.
42
+ model_output_formatter (Callable[..., str]): A function that takes the output of
43
+ the OpenGradient infer method (with type InferenceResult) and formats it into a string.
44
+
45
+ This is required to ensure the output is compatible with the tool framework.
46
+
47
+ Default returns the InferenceResult object.
48
+
49
+ InferenceResult has attributes:
50
+ * transaction_hash (str): Blockchain hash for the transaction
51
+ * model_output (Dict[str, np.ndarray]): Output of the ONNX model
52
+ tool_input_schema (Type[BaseModel], optional): A Pydantic BaseModel class defining the
53
+ input schema.
54
+
55
+ For LangChain tools the schema will be used directly. The defined schema will be used as
56
+ input keyword arguments for the `model_input_provider` function. If no arguments are required
57
+ for the `model_input_provider` function then this schema can be unspecified.
58
+
59
+ For Swarm tools the schema will be converted to appropriate annotations.
60
+
61
+ Default is None -- an empty schema will be provided for LangChain.
62
+ tool_description (str, optional): A description of what the tool does. Defaults to
63
+ "Executes the given ML model".
64
+ inference_mode (og.InferenceMode, optional): The inference mode to use when running
65
+ the model. Defaults to VANILLA.
66
+
67
+ Returns:
68
+ BaseTool: For ToolType.LANGCHAIN, returns a LangChain StructuredTool.
69
+ Callable: For ToolType.SWARM, returns a decorated function with appropriate metadata.
70
+
71
+ Raises:
72
+ ValueError: If an invalid tool_type is provided.
73
+
74
+ Examples:
75
+ >>> from pydantic import BaseModel, Field
76
+ >>> from enum import Enum
77
+ >>> from opengradient.alphasense import create_run_model_tool
78
+ >>> class Token(str, Enum):
79
+ ... ETH = "ethereum"
80
+ ... BTC = "bitcoin"
81
+ ...
82
+ >>> class InputSchema(BaseModel):
83
+ ... token: Token = Field(default=Token.ETH, description="Token name specified by user.")
84
+ ...
85
+ >>> eth_model_input = {"price_series": [2010.1, 2012.3, 2020.1, 2019.2]} # Example data
86
+ >>> btc_model_input = {"price_series": [100001.1, 100013.2, 100149.2, 99998.1]} # Example data
87
+ >>> def model_input_provider(**llm_input):
88
+ ... token = llm_input.get("token")
89
+ ... if token == Token.BTC:
90
+ ... return btc_model_input
91
+ ... elif token == Token.ETH:
92
+ ... return eth_model_input
93
+ ... else:
94
+ ... raise ValueError("Unexpected token found")
95
+ ...
96
+ >>> def output_formatter(inference_result):
97
+ ... return format(float(inference_result.model_output["std"].item()), ".3%")
98
+ ...
99
+ >>> run_model_tool = create_run_model_tool(
100
+ ... tool_type=ToolType.LANGCHAIN,
101
+ ... model_cid="QmZdSfHWGJyzBiB2K98egzu3MypPcv4R1ASypUxwZ1MFUG",
102
+ ... tool_name="Return_volatility_tool",
103
+ ... model_input_provider=model_input_provider,
104
+ ... model_output_formatter=output_formatter,
105
+ ... tool_input_schema=InputSchema,
106
+ ... tool_description="This tool takes a token and measures the return volatility (standard deviation of returns).",
107
+ ... inference_mode=og.InferenceMode.VANILLA,
108
+ ... )
109
+ """
110
+
111
+ def model_executor(**llm_input):
112
+ # Pass LLM input arguments (formatted based on tool_input_schema) as parameters into model_input_provider
113
+ model_input = model_input_provider(**llm_input)
114
+
115
+ inference_result = og.infer(model_cid=model_cid, inference_mode=inference_mode, model_input=model_input)
116
+
117
+ return model_output_formatter(inference_result)
118
+
119
+ if tool_type == ToolType.LANGCHAIN:
120
+ if not tool_input_schema:
121
+ tool_input_schema = type("EmptyInputSchema", (BaseModel,), {})
122
+
123
+ return StructuredTool.from_function(
124
+ func=model_executor, name=tool_name, description=tool_description, args_schema=tool_input_schema
125
+ )
126
+ elif tool_type == ToolType.SWARM:
127
+ model_executor.__name__ = tool_name
128
+ model_executor.__doc__ = tool_description
129
+ # Convert Pydantic model to Swarm annotations if provided
130
+ if tool_input_schema:
131
+ model_executor.__annotations__ = _convert_pydantic_to_annotations(tool_input_schema)
132
+ return model_executor
133
+ else:
134
+ raise ValueError(f"Invalid tooltype: {tool_type}")
135
+
136
+
137
+ def _convert_pydantic_to_annotations(model: Type[BaseModel]) -> Dict[str, Any]:
138
+ """
139
+ Convert a Pydantic model to function annotations format used by Swarm.
140
+
141
+ Args:
142
+ model: A Pydantic BaseModel class
143
+
144
+ Returns:
145
+ Dict mapping field names to (type, description) tuples
146
+ """
147
+ annotations = {}
148
+ for field_name, field in model.model_fields.items():
149
+ field_type = field.annotation
150
+ description = field.description or ""
151
+ annotations[field_name] = (field_type, description)
152
+ return annotations