opengradient 0.4.6__tar.gz → 0.4.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. opengradient-0.4.7/PKG-INFO +159 -0
  2. opengradient-0.4.7/README.md +111 -0
  3. {opengradient-0.4.6 → opengradient-0.4.7}/pyproject.toml +2 -1
  4. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/__init__.py +59 -67
  5. opengradient-0.4.7/src/opengradient/abi/PriceHistoryInference.abi +1 -0
  6. opengradient-0.4.7/src/opengradient/abi/WorkflowScheduler.abi +13 -0
  7. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/alphasense/read_workflow_tool.py +1 -1
  8. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/alphasense/run_model_tool.py +3 -3
  9. opengradient-0.4.7/src/opengradient/bin/PriceHistoryInference.bin +1 -0
  10. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/cli.py +8 -4
  11. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/client.py +282 -217
  12. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/defaults.py +1 -0
  13. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/llm/__init__.py +1 -1
  14. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/llm/og_langchain.py +36 -22
  15. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/llm/og_openai.py +1 -1
  16. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/types.py +22 -20
  17. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/utils.py +2 -0
  18. opengradient-0.4.7/src/opengradient.egg-info/PKG-INFO +159 -0
  19. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient.egg-info/SOURCES.txt +3 -2
  20. opengradient-0.4.6/MANIFEST.in +0 -3
  21. opengradient-0.4.6/PKG-INFO +0 -189
  22. opengradient-0.4.6/README.md +0 -141
  23. opengradient-0.4.6/src/opengradient/abi/ModelExecutorHistorical.abi +0 -1
  24. opengradient-0.4.6/src/opengradient.egg-info/PKG-INFO +0 -189
  25. {opengradient-0.4.6 → opengradient-0.4.7}/LICENSE +0 -0
  26. {opengradient-0.4.6 → opengradient-0.4.7}/setup.cfg +0 -0
  27. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/abi/inference.abi +0 -0
  28. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/account.py +0 -0
  29. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/alphasense/__init__.py +0 -0
  30. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/alphasense/types.py +0 -0
  31. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/exceptions.py +0 -0
  32. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/proto/__init__.py +0 -0
  33. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/proto/infer.proto +0 -0
  34. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/proto/infer_pb2.py +0 -0
  35. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  36. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient.egg-info/dependency_links.txt +0 -0
  37. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient.egg-info/entry_points.txt +0 -0
  38. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient.egg-info/requires.txt +0 -0
  39. {opengradient-0.4.6 → opengradient-0.4.7}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -0,0 +1,159 @@
1
+ Metadata-Version: 2.2
2
+ Name: opengradient
3
+ Version: 0.4.7
4
+ Summary: Python SDK for OpenGradient decentralized model management & inference services
5
+ Author-email: OpenGradient <oliver@opengradient.ai>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2024 OpenGradient
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+
28
+ Project-URL: Homepage, https://opengradient.ai
29
+ Classifier: Development Status :: 3 - Alpha
30
+ Classifier: Intended Audience :: Developers
31
+ Classifier: License :: OSI Approved :: MIT License
32
+ Classifier: Programming Language :: Python :: 3.10
33
+ Classifier: Programming Language :: Python :: 3.11
34
+ Classifier: Programming Language :: Python :: 3.12
35
+ Requires-Python: >=3.10
36
+ Description-Content-Type: text/markdown
37
+ License-File: LICENSE
38
+ Requires-Dist: eth-account>=0.13.4
39
+ Requires-Dist: web3>=7.3.0
40
+ Requires-Dist: click>=8.1.7
41
+ Requires-Dist: firebase-rest-api>=1.11.0
42
+ Requires-Dist: grpcio>=1.66.2
43
+ Requires-Dist: numpy>=1.26.4
44
+ Requires-Dist: requests>=2.32.3
45
+ Requires-Dist: langchain>=0.3.7
46
+ Requires-Dist: openai>=1.58.1
47
+ Requires-Dist: pydantic>=2.9.2
48
+
49
+ # OpenGradient Python SDK
50
+
51
+ A Python SDK for decentralized model management and inference services on the OpenGradient platform. The SDK enables programmatic access to our model repository and decentralized AI infrastructure.
52
+
53
+ ## Key Features
54
+
55
+ - Model management and versioning
56
+ - Decentralized model inference
57
+ - Support for LLM inference with various models
58
+ - End-to-end verified AI execution
59
+ - Command-line interface (CLI) for direct access
60
+
61
+ ## Model Hub
62
+
63
+ Browse and discover AI models on our [Model Hub](https://hub.opengradient.ai/). The Hub provides:
64
+ - Registry of models and LLMs
65
+ - Easy model discovery and deployment
66
+ - Direct integration with the SDK
67
+
68
+ ## Installation
69
+
70
+ ```bash
71
+ pip install opengradient
72
+ ```
73
+
74
+ Note: Windows users should temporarily enable WSL when installing `opengradient` (fix in progress).
75
+
76
+ ## Getting Started
77
+
78
+ ### 1. Account Setup
79
+
80
+ You'll need two accounts to use the SDK:
81
+ - **Model Hub account**: Create one at [Hub Sign Up](https://hub.opengradient.ai/signup)
82
+ - **OpenGradient account**: Use an existing Ethereum-compatible wallet or create a new one via SDK
83
+
84
+ The easiest way to set up your accounts is through our configuration wizard:
85
+
86
+ ```bash
87
+ opengradient config init
88
+ ```
89
+
90
+ This wizard will:
91
+ - Guide you through account creation
92
+ - Help you set up credentials
93
+ - Direct you to our Test Faucet for devnet tokens
94
+
95
+ ### 2. Initialize the SDK
96
+
97
+ ```python
98
+ import opengradient as og
99
+ og.init(private_key="<private_key>", email="<email>", password="<password>")
100
+ ```
101
+
102
+ ### 3. Basic Usage
103
+
104
+ Browse available models on our [Model Hub](https://hub.opengradient.ai/) or create and upload your own:
105
+
106
+
107
+ ```python
108
+ # Create and upload a model
109
+ og.create_model(
110
+ model_name="my-model",
111
+ model_desc="Model description",
112
+ model_path="/path/to/model"
113
+ )
114
+
115
+ # Run inference
116
+ inference_mode = og.InferenceMode.VANILLA
117
+ result = og.infer(
118
+ model_cid="your-model-cid",
119
+ model_inputs={"input": "value"},
120
+ inference_mode=inference_mode
121
+ )
122
+ ```
123
+
124
+ ### 4. Examples
125
+
126
+ See code examples under [examples](./examples).
127
+
128
+ ## CLI Usage
129
+
130
+ The SDK includes a command-line interface for quick operations. First, verify your configuration:
131
+
132
+ ```bash
133
+ opengradient config show
134
+ ```
135
+
136
+ Run a test inference:
137
+
138
+ ```bash
139
+ opengradient infer -m QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ \
140
+ --input '{"num_input1":[1.0, 2.0, 3.0], "num_input2":10}'
141
+ ```
142
+
143
+ ## Use Cases
144
+
145
+ 1. **Off-chain Applications**: Use OpenGradient as a decentralized alternative to centralized AI providers like HuggingFace and OpenAI.
146
+
147
+ 2. **Model Development**: Manage models on the Model Hub and integrate directly into your development workflow.
148
+
149
+ ## Documentation
150
+
151
+ For comprehensive documentation, API reference, and examples, visit:
152
+ - [OpenGradient Documentation](https://docs.opengradient.ai/)
153
+ - [API Reference](https://docs.opengradient.ai/api_reference/python_sdk/)
154
+
155
+ ## Support
156
+
157
+ - Run `opengradient --help` for CLI command reference
158
+ - Visit our [documentation](https://docs.opengradient.ai/) for detailed guides
159
+ - Join our [community](https://.opengradient.ai/) for support
@@ -0,0 +1,111 @@
1
+ # OpenGradient Python SDK
2
+
3
+ A Python SDK for decentralized model management and inference services on the OpenGradient platform. The SDK enables programmatic access to our model repository and decentralized AI infrastructure.
4
+
5
+ ## Key Features
6
+
7
+ - Model management and versioning
8
+ - Decentralized model inference
9
+ - Support for LLM inference with various models
10
+ - End-to-end verified AI execution
11
+ - Command-line interface (CLI) for direct access
12
+
13
+ ## Model Hub
14
+
15
+ Browse and discover AI models on our [Model Hub](https://hub.opengradient.ai/). The Hub provides:
16
+ - Registry of models and LLMs
17
+ - Easy model discovery and deployment
18
+ - Direct integration with the SDK
19
+
20
+ ## Installation
21
+
22
+ ```bash
23
+ pip install opengradient
24
+ ```
25
+
26
+ Note: Windows users should temporarily enable WSL when installing `opengradient` (fix in progress).
27
+
28
+ ## Getting Started
29
+
30
+ ### 1. Account Setup
31
+
32
+ You'll need two accounts to use the SDK:
33
+ - **Model Hub account**: Create one at [Hub Sign Up](https://hub.opengradient.ai/signup)
34
+ - **OpenGradient account**: Use an existing Ethereum-compatible wallet or create a new one via SDK
35
+
36
+ The easiest way to set up your accounts is through our configuration wizard:
37
+
38
+ ```bash
39
+ opengradient config init
40
+ ```
41
+
42
+ This wizard will:
43
+ - Guide you through account creation
44
+ - Help you set up credentials
45
+ - Direct you to our Test Faucet for devnet tokens
46
+
47
+ ### 2. Initialize the SDK
48
+
49
+ ```python
50
+ import opengradient as og
51
+ og.init(private_key="<private_key>", email="<email>", password="<password>")
52
+ ```
53
+
54
+ ### 3. Basic Usage
55
+
56
+ Browse available models on our [Model Hub](https://hub.opengradient.ai/) or create and upload your own:
57
+
58
+
59
+ ```python
60
+ # Create and upload a model
61
+ og.create_model(
62
+ model_name="my-model",
63
+ model_desc="Model description",
64
+ model_path="/path/to/model"
65
+ )
66
+
67
+ # Run inference
68
+ inference_mode = og.InferenceMode.VANILLA
69
+ result = og.infer(
70
+ model_cid="your-model-cid",
71
+ model_inputs={"input": "value"},
72
+ inference_mode=inference_mode
73
+ )
74
+ ```
75
+
76
+ ### 4. Examples
77
+
78
+ See code examples under [examples](./examples).
79
+
80
+ ## CLI Usage
81
+
82
+ The SDK includes a command-line interface for quick operations. First, verify your configuration:
83
+
84
+ ```bash
85
+ opengradient config show
86
+ ```
87
+
88
+ Run a test inference:
89
+
90
+ ```bash
91
+ opengradient infer -m QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ \
92
+ --input '{"num_input1":[1.0, 2.0, 3.0], "num_input2":10}'
93
+ ```
94
+
95
+ ## Use Cases
96
+
97
+ 1. **Off-chain Applications**: Use OpenGradient as a decentralized alternative to centralized AI providers like HuggingFace and OpenAI.
98
+
99
+ 2. **Model Development**: Manage models on the Model Hub and integrate directly into your development workflow.
100
+
101
+ ## Documentation
102
+
103
+ For comprehensive documentation, API reference, and examples, visit:
104
+ - [OpenGradient Documentation](https://docs.opengradient.ai/)
105
+ - [API Reference](https://docs.opengradient.ai/api_reference/python_sdk/)
106
+
107
+ ## Support
108
+
109
+ - Run `opengradient --help` for CLI command reference
110
+ - Visit our [documentation](https://docs.opengradient.ai/) for detailed guides
111
+ - Join our [community](https://.opengradient.ai/) for support
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.6"
7
+ version = "0.4.7"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
@@ -53,6 +53,7 @@ exclude = ["tests*", "stresstest*"]
53
53
  [tool.setuptools.package-data]
54
54
  "opengradient" = [
55
55
  "abi/*.abi",
56
+ "bin/*.bin",
56
57
  "proto/*.proto",
57
58
  "**/*.py"
58
59
  ]
@@ -6,7 +6,18 @@ from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  from .client import Client
8
8
  from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
9
- from .types import LLM, TEE_LLM, HistoricalInputQuery, InferenceMode, LlmInferenceMode, SchedulerParams
9
+ from .types import (
10
+ LLM,
11
+ TEE_LLM,
12
+ HistoricalInputQuery,
13
+ SchedulerParams,
14
+ CandleType,
15
+ CandleOrder,
16
+ InferenceMode,
17
+ LlmInferenceMode,
18
+ TextGenerationOutput,
19
+ ModelOutput,
20
+ )
10
21
 
11
22
  from . import llm, alphasense
12
23
 
@@ -14,15 +25,19 @@ _client = None
14
25
 
15
26
 
16
27
  def new_client(
17
- email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS
28
+ email: Optional[str],
29
+ password: Optional[str],
30
+ private_key: str,
31
+ rpc_url=DEFAULT_RPC_URL,
32
+ contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
33
  ) -> Client:
19
34
  """
20
35
  Creates a unique OpenGradient client instance with the given authentication and network settings.
21
36
 
22
37
  Args:
23
- email: User's email address for authentication
24
- password: User's password for authentication
25
- private_key: Ethereum private key for blockchain transactions
38
+ email: User's email address for authentication with Model Hub
39
+ password: User's password for authentication with Model Hub
40
+ private_key: Private key for OpenGradient transactions
26
41
  rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
27
42
  contract_address: Optional inference contract address
28
43
  """
@@ -65,7 +80,7 @@ def upload(model_path, model_name, version):
65
80
  return _client.upload(model_path, model_name, version)
66
81
 
67
82
 
68
- def create_model(model_name: str, model_desc: str, model_path: str = None):
83
+ def create_model(model_name: str, model_desc: str, model_path: Optional[str] = None):
69
84
  """Create a new model repository.
70
85
 
71
86
  Args:
@@ -121,7 +136,7 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
121
136
  max_retries: Maximum number of retries for failed transactions
122
137
 
123
138
  Returns:
124
- Tuple[str, Any]: Transaction hash and model output
139
+ InferenceResult: Transaction hash and model output
125
140
 
126
141
  Raises:
127
142
  RuntimeError: If SDK is not initialized
@@ -134,12 +149,12 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
134
149
  def llm_completion(
135
150
  model_cid: LLM,
136
151
  prompt: str,
137
- inference_mode: str = LlmInferenceMode.VANILLA,
152
+ inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
138
153
  max_tokens: int = 100,
139
154
  stop_sequence: Optional[List[str]] = None,
140
155
  temperature: float = 0.0,
141
156
  max_retries: Optional[int] = None,
142
- ) -> Tuple[str, str]:
157
+ ) -> TextGenerationOutput:
143
158
  """Generate text completion using an LLM.
144
159
 
145
160
  Args:
@@ -152,7 +167,7 @@ def llm_completion(
152
167
  max_retries: Maximum number of retries for failed transactions
153
168
 
154
169
  Returns:
155
- Tuple[str, str]: Transaction hash and generated text
170
+ TextGenerationOutput: Transaction hash and generated text
156
171
 
157
172
  Raises:
158
173
  RuntimeError: If SDK is not initialized
@@ -173,14 +188,14 @@ def llm_completion(
173
188
  def llm_chat(
174
189
  model_cid: LLM,
175
190
  messages: List[Dict],
176
- inference_mode: str = LlmInferenceMode.VANILLA,
191
+ inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
177
192
  max_tokens: int = 100,
178
193
  stop_sequence: Optional[List[str]] = None,
179
194
  temperature: float = 0.0,
180
195
  tools: Optional[List[Dict]] = None,
181
196
  tool_choice: Optional[str] = None,
182
197
  max_retries: Optional[int] = None,
183
- ) -> Tuple[str, str, Dict]:
198
+ ) -> TextGenerationOutput:
184
199
  """Have a chat conversation with an LLM.
185
200
 
186
201
  Args:
@@ -195,7 +210,7 @@ def llm_chat(
195
210
  max_retries: Maximum number of retries for failed transactions
196
211
 
197
212
  Returns:
198
- Tuple[str, str, Dict]: Transaction hash, model response, and metadata
213
+ TextGenerationOutput
199
214
 
200
215
  Raises:
201
216
  RuntimeError: If SDK is not initialized
@@ -215,24 +230,6 @@ def llm_chat(
215
230
  )
216
231
 
217
232
 
218
- def login(email: str, password: str):
219
- """Login to OpenGradient.
220
-
221
- Args:
222
- email: User's email address
223
- password: User's password
224
-
225
- Returns:
226
- dict: Login response with authentication tokens
227
-
228
- Raises:
229
- RuntimeError: If SDK is not initialized
230
- """
231
- if _client is None:
232
- raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
233
- return _client.login(email, password)
234
-
235
-
236
233
  def list_files(model_name: str, version: str) -> List[Dict]:
237
234
  """List files in a model repository version.
238
235
 
@@ -251,32 +248,11 @@ def list_files(model_name: str, version: str) -> List[Dict]:
251
248
  return _client.list_files(model_name, version)
252
249
 
253
250
 
254
- def generate_image(model: str, prompt: str, height: Optional[int] = None, width: Optional[int] = None) -> bytes:
255
- """Generate an image from a text prompt.
256
-
257
- Args:
258
- model: Model identifier (e.g. "stabilityai/stable-diffusion-xl-base-1.0")
259
- prompt: Text description of the desired image
260
- height: Optional height of the generated image in pixels
261
- width: Optional width of the generated image in pixels
262
-
263
- Returns:
264
- bytes: Raw image data as bytes
265
-
266
- Raises:
267
- RuntimeError: If SDK is not initialized
268
- OpenGradientError: If image generation fails
269
- """
270
- if _client is None:
271
- raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
272
- return _client.generate_image(model, prompt, height=height, width=width)
273
-
274
-
275
251
  def new_workflow(
276
252
  model_cid: str,
277
- input_query: Union[Dict[str, Any], HistoricalInputQuery],
253
+ input_query: HistoricalInputQuery,
278
254
  input_tensor_name: str,
279
- scheduler_params: Optional[Union[Dict[str, int], SchedulerParams]] = None,
255
+ scheduler_params: Optional[SchedulerParams] = None,
280
256
  ) -> str:
281
257
  """
282
258
  Deploy a new workflow contract with the specified parameters.
@@ -287,13 +263,9 @@ def new_workflow(
287
263
 
288
264
  Args:
289
265
  model_cid: IPFS CID of the model
290
- input_query: Dictionary or HistoricalInputQuery containing query parameters
266
+ input_query: HistoricalInputQuery containing query parameters
291
267
  input_tensor_name: Name of the input tensor
292
- scheduler_params: Optional scheduler configuration:
293
- - Can be a dictionary with:
294
- - frequency: Execution frequency in seconds (default: 600)
295
- - duration_hours: How long to run in hours (default: 2)
296
- - Or a SchedulerParams instance
268
+ scheduler_params: Optional scheduler configuration as SchedulerParams instance
297
269
  If not provided, the workflow will be deployed without scheduling.
298
270
 
299
271
  Returns:
@@ -303,15 +275,12 @@ def new_workflow(
303
275
  if _client is None:
304
276
  raise RuntimeError("OpenGradient client not initialized. Call og.init(...) first.")
305
277
 
306
- # Convert scheduler_params if it's a dict, otherwise use as is
307
- scheduler = SchedulerParams.from_dict(scheduler_params) if isinstance(scheduler_params, dict) else scheduler_params
308
-
309
278
  return _client.new_workflow(
310
- model_cid=model_cid, input_query=input_query, input_tensor_name=input_tensor_name, scheduler_params=scheduler
279
+ model_cid=model_cid, input_query=input_query, input_tensor_name=input_tensor_name, scheduler_params=scheduler_params
311
280
  )
312
281
 
313
282
 
314
- def read_workflow_result(contract_address: str) -> Dict[str, Union[str, Dict]]:
283
+ def read_workflow_result(contract_address: str) -> ModelOutput:
315
284
  """
316
285
  Reads the latest inference result from a deployed workflow contract.
317
286
 
@@ -335,7 +304,7 @@ def read_workflow_result(contract_address: str) -> Dict[str, Union[str, Dict]]:
335
304
  return _client.read_workflow_result(contract_address)
336
305
 
337
306
 
338
- def run_workflow(contract_address: str) -> Dict[str, Union[str, Dict]]:
307
+ def run_workflow(contract_address: str) -> ModelOutput:
339
308
  """
340
309
  Executes the workflow by calling run() on the contract to pull latest data and perform inference.
341
310
 
@@ -350,8 +319,23 @@ def run_workflow(contract_address: str) -> Dict[str, Union[str, Dict]]:
350
319
  return _client.run_workflow(contract_address)
351
320
 
352
321
 
322
+ def read_workflow_history(contract_address: str, num_results: int) -> List[Dict]:
323
+ """
324
+ Gets historical inference results from a workflow contract.
325
+
326
+ Args:
327
+ contract_address (str): Address of the deployed workflow contract
328
+ num_results (int): Number of historical results to retrieve
329
+
330
+ Returns:
331
+ List[Dict]: List of historical inference results
332
+ """
333
+ if _client is None:
334
+ raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
335
+ return _client.read_workflow_history(contract_address, num_results)
336
+
337
+
353
338
  __all__ = [
354
- "generate_image",
355
339
  "list_files",
356
340
  "login",
357
341
  "llm_chat",
@@ -366,6 +350,14 @@ __all__ = [
366
350
  "new_workflow",
367
351
  "read_workflow_result",
368
352
  "run_workflow",
353
+ "read_workflow_history",
354
+ "InferenceMode",
355
+ "LlmInferenceMode",
356
+ "HistoricalInputQuery",
357
+ "SchedulerParams",
358
+ "CandleType",
359
+ "CandleOrder",
360
+ "InferenceMode",
369
361
  "llm",
370
362
  "alphasense",
371
363
  ]
@@ -0,0 +1 @@
1
+ [{"inputs":[{"internalType":"string","name":"_modelId","type":"string"},{"internalType":"string","name":"_inputName","type":"string"},{"components":[{"internalType":"string","name":"base","type":"string"},{"internalType":"string","name":"quote","type":"string"},{"internalType":"uint32","name":"total_candles","type":"uint32"},{"internalType":"uint32","name":"candle_duration_in_mins","type":"uint32"},{"internalType":"enum CandleOrder","name":"order","type":"uint8"},{"internalType":"enum CandleType[]","name":"candle_types","type":"uint8[]"}],"internalType":"struct HistoricalInputQuery","name":"_query","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"caller","type":"address"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"result","type":"tuple"}],"name":"InferenceResultEmitted","type":"event"},{"inputs":[],"name":"getInferenceResult","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"num","type":"uint256"}],"name":"getLastInferenceResults","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput[]","name":"","type":"tuple[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"historicalContract","outputs":[{"internalType":"contract OGHistorical","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"inputName","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"inputQuery","outputs":[{"internalType":"string","name":"base","type":"string"},{"internalType":"string","name":"quote","type":"string"},{"internalType":"uint32","name":"total_candles","type":"uint32"},{"internalType":"uint32","name":"candle_duration_in_mins","type":"uint32"},{"internalType":"enum CandleOrder","name":"order","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"modelId","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"run","outputs":[],"stateMutability":"nonpayable","type":"function"}]
@@ -0,0 +1,13 @@
1
+ [
2
+ {
3
+ "inputs": [
4
+ {"internalType": "address", "name": "contractAddress", "type": "address"},
5
+ {"internalType": "uint256", "name": "endTime", "type": "uint256"},
6
+ {"internalType": "uint256", "name": "frequency", "type": "uint256"}
7
+ ],
8
+ "name": "registerTask",
9
+ "outputs": [],
10
+ "stateMutability": "nonpayable",
11
+ "type": "function"
12
+ }
13
+ ]
@@ -12,7 +12,7 @@ def create_read_workflow_tool(
12
12
  tool_name: str,
13
13
  tool_description: str,
14
14
  output_formatter: Callable[..., str] = lambda x: x,
15
- ) -> BaseTool:
15
+ ) -> BaseTool | Callable:
16
16
  """
17
17
  Creates a tool that reads results from a workflow contract on OpenGradient.
18
18
 
@@ -1,5 +1,5 @@
1
1
  from enum import Enum
2
- from typing import Any, Callable, Dict, Type
2
+ from typing import Any, Callable, Dict, Type, Optional
3
3
 
4
4
  from langchain_core.tools import BaseTool, StructuredTool
5
5
  from pydantic import BaseModel
@@ -14,10 +14,10 @@ def create_run_model_tool(
14
14
  tool_name: str,
15
15
  input_getter: Callable,
16
16
  output_formatter: Callable[..., str] = lambda x: x,
17
- input_schema: Type[BaseModel] = None,
17
+ input_schema: Optional[Type[BaseModel]] = None,
18
18
  tool_description: str = "Executes the given ML model",
19
19
  inference_mode: og.InferenceMode = og.InferenceMode.VANILLA,
20
- ) -> BaseTool:
20
+ ) -> BaseTool | Callable:
21
21
  """
22
22
  Creates a tool that wraps an OpenGradient model for inference.
23
23