opengradient 0.4.6__tar.gz → 0.4.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opengradient-0.4.8/PKG-INFO +159 -0
- opengradient-0.4.8/README.md +111 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/pyproject.toml +2 -1
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/__init__.py +64 -70
- opengradient-0.4.8/src/opengradient/abi/PriceHistoryInference.abi +1 -0
- opengradient-0.4.8/src/opengradient/abi/WorkflowScheduler.abi +13 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/alphasense/read_workflow_tool.py +1 -1
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/alphasense/run_model_tool.py +3 -3
- opengradient-0.4.8/src/opengradient/bin/PriceHistoryInference.bin +1 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/cli.py +8 -4
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/client.py +303 -259
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/defaults.py +1 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/llm/__init__.py +1 -1
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/llm/og_langchain.py +36 -22
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/llm/og_openai.py +1 -1
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/types.py +34 -20
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/utils.py +2 -0
- opengradient-0.4.8/src/opengradient.egg-info/PKG-INFO +159 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient.egg-info/SOURCES.txt +3 -2
- opengradient-0.4.6/MANIFEST.in +0 -3
- opengradient-0.4.6/PKG-INFO +0 -189
- opengradient-0.4.6/README.md +0 -141
- opengradient-0.4.6/src/opengradient/abi/ModelExecutorHistorical.abi +0 -1
- opengradient-0.4.6/src/opengradient.egg-info/PKG-INFO +0 -189
- {opengradient-0.4.6 → opengradient-0.4.8}/LICENSE +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/setup.cfg +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/abi/inference.abi +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/account.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/alphasense/__init__.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/alphasense/types.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/exceptions.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/proto/__init__.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/proto/infer.proto +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/proto/infer_pb2.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient.egg-info/dependency_links.txt +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient.egg-info/entry_points.txt +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient.egg-info/requires.txt +0 -0
- {opengradient-0.4.6 → opengradient-0.4.8}/src/opengradient.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: opengradient
|
|
3
|
+
Version: 0.4.8
|
|
4
|
+
Summary: Python SDK for OpenGradient decentralized model management & inference services
|
|
5
|
+
Author-email: OpenGradient <oliver@opengradient.ai>
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2024 OpenGradient
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
|
|
28
|
+
Project-URL: Homepage, https://opengradient.ai
|
|
29
|
+
Classifier: Development Status :: 3 - Alpha
|
|
30
|
+
Classifier: Intended Audience :: Developers
|
|
31
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
32
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
33
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
34
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
35
|
+
Requires-Python: >=3.10
|
|
36
|
+
Description-Content-Type: text/markdown
|
|
37
|
+
License-File: LICENSE
|
|
38
|
+
Requires-Dist: eth-account>=0.13.4
|
|
39
|
+
Requires-Dist: web3>=7.3.0
|
|
40
|
+
Requires-Dist: click>=8.1.7
|
|
41
|
+
Requires-Dist: firebase-rest-api>=1.11.0
|
|
42
|
+
Requires-Dist: grpcio>=1.66.2
|
|
43
|
+
Requires-Dist: numpy>=1.26.4
|
|
44
|
+
Requires-Dist: requests>=2.32.3
|
|
45
|
+
Requires-Dist: langchain>=0.3.7
|
|
46
|
+
Requires-Dist: openai>=1.58.1
|
|
47
|
+
Requires-Dist: pydantic>=2.9.2
|
|
48
|
+
|
|
49
|
+
# OpenGradient Python SDK
|
|
50
|
+
|
|
51
|
+
A Python SDK for decentralized model management and inference services on the OpenGradient platform. The SDK enables programmatic access to our model repository and decentralized AI infrastructure.
|
|
52
|
+
|
|
53
|
+
## Key Features
|
|
54
|
+
|
|
55
|
+
- Model management and versioning
|
|
56
|
+
- Decentralized model inference
|
|
57
|
+
- Support for LLM inference with various models
|
|
58
|
+
- End-to-end verified AI execution
|
|
59
|
+
- Command-line interface (CLI) for direct access
|
|
60
|
+
|
|
61
|
+
## Model Hub
|
|
62
|
+
|
|
63
|
+
Browse and discover AI models on our [Model Hub](https://hub.opengradient.ai/). The Hub provides:
|
|
64
|
+
- Registry of models and LLMs
|
|
65
|
+
- Easy model discovery and deployment
|
|
66
|
+
- Direct integration with the SDK
|
|
67
|
+
|
|
68
|
+
## Installation
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
pip install opengradient
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
Note: Windows users should temporarily enable WSL when installing `opengradient` (fix in progress).
|
|
75
|
+
|
|
76
|
+
## Getting Started
|
|
77
|
+
|
|
78
|
+
### 1. Account Setup
|
|
79
|
+
|
|
80
|
+
You'll need two accounts to use the SDK:
|
|
81
|
+
- **Model Hub account**: Create one at [Hub Sign Up](https://hub.opengradient.ai/signup)
|
|
82
|
+
- **OpenGradient account**: Use an existing Ethereum-compatible wallet or create a new one via SDK
|
|
83
|
+
|
|
84
|
+
The easiest way to set up your accounts is through our configuration wizard:
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
opengradient config init
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
This wizard will:
|
|
91
|
+
- Guide you through account creation
|
|
92
|
+
- Help you set up credentials
|
|
93
|
+
- Direct you to our Test Faucet for devnet tokens
|
|
94
|
+
|
|
95
|
+
### 2. Initialize the SDK
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
import opengradient as og
|
|
99
|
+
og.init(private_key="<private_key>", email="<email>", password="<password>")
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### 3. Basic Usage
|
|
103
|
+
|
|
104
|
+
Browse available models on our [Model Hub](https://hub.opengradient.ai/) or create and upload your own:
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
```python
|
|
108
|
+
# Create and upload a model
|
|
109
|
+
og.create_model(
|
|
110
|
+
model_name="my-model",
|
|
111
|
+
model_desc="Model description",
|
|
112
|
+
model_path="/path/to/model"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Run inference
|
|
116
|
+
inference_mode = og.InferenceMode.VANILLA
|
|
117
|
+
result = og.infer(
|
|
118
|
+
model_cid="your-model-cid",
|
|
119
|
+
model_inputs={"input": "value"},
|
|
120
|
+
inference_mode=inference_mode
|
|
121
|
+
)
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
### 4. Examples
|
|
125
|
+
|
|
126
|
+
See code examples under [examples](./examples).
|
|
127
|
+
|
|
128
|
+
## CLI Usage
|
|
129
|
+
|
|
130
|
+
The SDK includes a command-line interface for quick operations. First, verify your configuration:
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
opengradient config show
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
Run a test inference:
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
opengradient infer -m QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ \
|
|
140
|
+
--input '{"num_input1":[1.0, 2.0, 3.0], "num_input2":10}'
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
## Use Cases
|
|
144
|
+
|
|
145
|
+
1. **Off-chain Applications**: Use OpenGradient as a decentralized alternative to centralized AI providers like HuggingFace and OpenAI.
|
|
146
|
+
|
|
147
|
+
2. **Model Development**: Manage models on the Model Hub and integrate directly into your development workflow.
|
|
148
|
+
|
|
149
|
+
## Documentation
|
|
150
|
+
|
|
151
|
+
For comprehensive documentation, API reference, and examples, visit:
|
|
152
|
+
- [OpenGradient Documentation](https://docs.opengradient.ai/)
|
|
153
|
+
- [API Reference](https://docs.opengradient.ai/api_reference/python_sdk/)
|
|
154
|
+
|
|
155
|
+
## Support
|
|
156
|
+
|
|
157
|
+
- Run `opengradient --help` for CLI command reference
|
|
158
|
+
- Visit our [documentation](https://docs.opengradient.ai/) for detailed guides
|
|
159
|
+
- Join our [community](https://.opengradient.ai/) for support
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# OpenGradient Python SDK
|
|
2
|
+
|
|
3
|
+
A Python SDK for decentralized model management and inference services on the OpenGradient platform. The SDK enables programmatic access to our model repository and decentralized AI infrastructure.
|
|
4
|
+
|
|
5
|
+
## Key Features
|
|
6
|
+
|
|
7
|
+
- Model management and versioning
|
|
8
|
+
- Decentralized model inference
|
|
9
|
+
- Support for LLM inference with various models
|
|
10
|
+
- End-to-end verified AI execution
|
|
11
|
+
- Command-line interface (CLI) for direct access
|
|
12
|
+
|
|
13
|
+
## Model Hub
|
|
14
|
+
|
|
15
|
+
Browse and discover AI models on our [Model Hub](https://hub.opengradient.ai/). The Hub provides:
|
|
16
|
+
- Registry of models and LLMs
|
|
17
|
+
- Easy model discovery and deployment
|
|
18
|
+
- Direct integration with the SDK
|
|
19
|
+
|
|
20
|
+
## Installation
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
pip install opengradient
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
Note: Windows users should temporarily enable WSL when installing `opengradient` (fix in progress).
|
|
27
|
+
|
|
28
|
+
## Getting Started
|
|
29
|
+
|
|
30
|
+
### 1. Account Setup
|
|
31
|
+
|
|
32
|
+
You'll need two accounts to use the SDK:
|
|
33
|
+
- **Model Hub account**: Create one at [Hub Sign Up](https://hub.opengradient.ai/signup)
|
|
34
|
+
- **OpenGradient account**: Use an existing Ethereum-compatible wallet or create a new one via SDK
|
|
35
|
+
|
|
36
|
+
The easiest way to set up your accounts is through our configuration wizard:
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
opengradient config init
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
This wizard will:
|
|
43
|
+
- Guide you through account creation
|
|
44
|
+
- Help you set up credentials
|
|
45
|
+
- Direct you to our Test Faucet for devnet tokens
|
|
46
|
+
|
|
47
|
+
### 2. Initialize the SDK
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
import opengradient as og
|
|
51
|
+
og.init(private_key="<private_key>", email="<email>", password="<password>")
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### 3. Basic Usage
|
|
55
|
+
|
|
56
|
+
Browse available models on our [Model Hub](https://hub.opengradient.ai/) or create and upload your own:
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
# Create and upload a model
|
|
61
|
+
og.create_model(
|
|
62
|
+
model_name="my-model",
|
|
63
|
+
model_desc="Model description",
|
|
64
|
+
model_path="/path/to/model"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Run inference
|
|
68
|
+
inference_mode = og.InferenceMode.VANILLA
|
|
69
|
+
result = og.infer(
|
|
70
|
+
model_cid="your-model-cid",
|
|
71
|
+
model_inputs={"input": "value"},
|
|
72
|
+
inference_mode=inference_mode
|
|
73
|
+
)
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### 4. Examples
|
|
77
|
+
|
|
78
|
+
See code examples under [examples](./examples).
|
|
79
|
+
|
|
80
|
+
## CLI Usage
|
|
81
|
+
|
|
82
|
+
The SDK includes a command-line interface for quick operations. First, verify your configuration:
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
opengradient config show
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Run a test inference:
|
|
89
|
+
|
|
90
|
+
```bash
|
|
91
|
+
opengradient infer -m QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ \
|
|
92
|
+
--input '{"num_input1":[1.0, 2.0, 3.0], "num_input2":10}'
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Use Cases
|
|
96
|
+
|
|
97
|
+
1. **Off-chain Applications**: Use OpenGradient as a decentralized alternative to centralized AI providers like HuggingFace and OpenAI.
|
|
98
|
+
|
|
99
|
+
2. **Model Development**: Manage models on the Model Hub and integrate directly into your development workflow.
|
|
100
|
+
|
|
101
|
+
## Documentation
|
|
102
|
+
|
|
103
|
+
For comprehensive documentation, API reference, and examples, visit:
|
|
104
|
+
- [OpenGradient Documentation](https://docs.opengradient.ai/)
|
|
105
|
+
- [API Reference](https://docs.opengradient.ai/api_reference/python_sdk/)
|
|
106
|
+
|
|
107
|
+
## Support
|
|
108
|
+
|
|
109
|
+
- Run `opengradient --help` for CLI command reference
|
|
110
|
+
- Visit our [documentation](https://docs.opengradient.ai/) for detailed guides
|
|
111
|
+
- Join our [community](https://.opengradient.ai/) for support
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "opengradient"
|
|
7
|
-
version = "0.4.
|
|
7
|
+
version = "0.4.8"
|
|
8
8
|
description = "Python SDK for OpenGradient decentralized model management & inference services"
|
|
9
9
|
authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
|
|
10
10
|
license = {file = "LICENSE"}
|
|
@@ -53,6 +53,7 @@ exclude = ["tests*", "stresstest*"]
|
|
|
53
53
|
[tool.setuptools.package-data]
|
|
54
54
|
"opengradient" = [
|
|
55
55
|
"abi/*.abi",
|
|
56
|
+
"bin/*.bin",
|
|
56
57
|
"proto/*.proto",
|
|
57
58
|
"**/*.py"
|
|
58
59
|
]
|
|
@@ -6,7 +6,20 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|
|
6
6
|
|
|
7
7
|
from .client import Client
|
|
8
8
|
from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
|
|
9
|
-
from .types import
|
|
9
|
+
from .types import (
|
|
10
|
+
LLM,
|
|
11
|
+
TEE_LLM,
|
|
12
|
+
HistoricalInputQuery,
|
|
13
|
+
SchedulerParams,
|
|
14
|
+
CandleType,
|
|
15
|
+
CandleOrder,
|
|
16
|
+
InferenceMode,
|
|
17
|
+
LlmInferenceMode,
|
|
18
|
+
TextGenerationOutput,
|
|
19
|
+
ModelOutput,
|
|
20
|
+
ModelRepository,
|
|
21
|
+
FileUploadResult,
|
|
22
|
+
)
|
|
10
23
|
|
|
11
24
|
from . import llm, alphasense
|
|
12
25
|
|
|
@@ -14,15 +27,19 @@ _client = None
|
|
|
14
27
|
|
|
15
28
|
|
|
16
29
|
def new_client(
|
|
17
|
-
email: str,
|
|
30
|
+
email: Optional[str],
|
|
31
|
+
password: Optional[str],
|
|
32
|
+
private_key: str,
|
|
33
|
+
rpc_url=DEFAULT_RPC_URL,
|
|
34
|
+
contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
|
|
18
35
|
) -> Client:
|
|
19
36
|
"""
|
|
20
37
|
Creates a unique OpenGradient client instance with the given authentication and network settings.
|
|
21
38
|
|
|
22
39
|
Args:
|
|
23
|
-
email: User's email address for authentication
|
|
24
|
-
password: User's password for authentication
|
|
25
|
-
private_key:
|
|
40
|
+
email: User's email address for authentication with Model Hub
|
|
41
|
+
password: User's password for authentication with Model Hub
|
|
42
|
+
private_key: Private key for OpenGradient transactions
|
|
26
43
|
rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
|
|
27
44
|
contract_address: Optional inference contract address
|
|
28
45
|
"""
|
|
@@ -46,7 +63,7 @@ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, c
|
|
|
46
63
|
return _client
|
|
47
64
|
|
|
48
65
|
|
|
49
|
-
def upload(model_path, model_name, version):
|
|
66
|
+
def upload(model_path, model_name, version) -> FileUploadResult:
|
|
50
67
|
"""Upload a model file to OpenGradient.
|
|
51
68
|
|
|
52
69
|
Args:
|
|
@@ -55,7 +72,7 @@ def upload(model_path, model_name, version):
|
|
|
55
72
|
version: Version string for this model upload
|
|
56
73
|
|
|
57
74
|
Returns:
|
|
58
|
-
|
|
75
|
+
FileUploadResult: Upload response containing file metadata
|
|
59
76
|
|
|
60
77
|
Raises:
|
|
61
78
|
RuntimeError: If SDK is not initialized
|
|
@@ -65,7 +82,7 @@ def upload(model_path, model_name, version):
|
|
|
65
82
|
return _client.upload(model_path, model_name, version)
|
|
66
83
|
|
|
67
84
|
|
|
68
|
-
def create_model(model_name: str, model_desc: str, model_path: str = None):
|
|
85
|
+
def create_model(model_name: str, model_desc: str, model_path: Optional[str] = None) -> ModelRepository:
|
|
69
86
|
"""Create a new model repository.
|
|
70
87
|
|
|
71
88
|
Args:
|
|
@@ -74,7 +91,7 @@ def create_model(model_name: str, model_desc: str, model_path: str = None):
|
|
|
74
91
|
model_path: Optional path to model file to upload immediately
|
|
75
92
|
|
|
76
93
|
Returns:
|
|
77
|
-
|
|
94
|
+
ModelRepository: Creation response with model metadata and optional upload results
|
|
78
95
|
|
|
79
96
|
Raises:
|
|
80
97
|
RuntimeError: If SDK is not initialized
|
|
@@ -121,7 +138,7 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
|
|
|
121
138
|
max_retries: Maximum number of retries for failed transactions
|
|
122
139
|
|
|
123
140
|
Returns:
|
|
124
|
-
|
|
141
|
+
InferenceResult: Transaction hash and model output
|
|
125
142
|
|
|
126
143
|
Raises:
|
|
127
144
|
RuntimeError: If SDK is not initialized
|
|
@@ -134,12 +151,12 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
|
|
|
134
151
|
def llm_completion(
|
|
135
152
|
model_cid: LLM,
|
|
136
153
|
prompt: str,
|
|
137
|
-
inference_mode:
|
|
154
|
+
inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
|
|
138
155
|
max_tokens: int = 100,
|
|
139
156
|
stop_sequence: Optional[List[str]] = None,
|
|
140
157
|
temperature: float = 0.0,
|
|
141
158
|
max_retries: Optional[int] = None,
|
|
142
|
-
) ->
|
|
159
|
+
) -> TextGenerationOutput:
|
|
143
160
|
"""Generate text completion using an LLM.
|
|
144
161
|
|
|
145
162
|
Args:
|
|
@@ -152,7 +169,7 @@ def llm_completion(
|
|
|
152
169
|
max_retries: Maximum number of retries for failed transactions
|
|
153
170
|
|
|
154
171
|
Returns:
|
|
155
|
-
|
|
172
|
+
TextGenerationOutput: Transaction hash and generated text
|
|
156
173
|
|
|
157
174
|
Raises:
|
|
158
175
|
RuntimeError: If SDK is not initialized
|
|
@@ -173,14 +190,14 @@ def llm_completion(
|
|
|
173
190
|
def llm_chat(
|
|
174
191
|
model_cid: LLM,
|
|
175
192
|
messages: List[Dict],
|
|
176
|
-
inference_mode:
|
|
193
|
+
inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
|
|
177
194
|
max_tokens: int = 100,
|
|
178
195
|
stop_sequence: Optional[List[str]] = None,
|
|
179
196
|
temperature: float = 0.0,
|
|
180
197
|
tools: Optional[List[Dict]] = None,
|
|
181
198
|
tool_choice: Optional[str] = None,
|
|
182
199
|
max_retries: Optional[int] = None,
|
|
183
|
-
) ->
|
|
200
|
+
) -> TextGenerationOutput:
|
|
184
201
|
"""Have a chat conversation with an LLM.
|
|
185
202
|
|
|
186
203
|
Args:
|
|
@@ -195,7 +212,7 @@ def llm_chat(
|
|
|
195
212
|
max_retries: Maximum number of retries for failed transactions
|
|
196
213
|
|
|
197
214
|
Returns:
|
|
198
|
-
|
|
215
|
+
TextGenerationOutput
|
|
199
216
|
|
|
200
217
|
Raises:
|
|
201
218
|
RuntimeError: If SDK is not initialized
|
|
@@ -215,24 +232,6 @@ def llm_chat(
|
|
|
215
232
|
)
|
|
216
233
|
|
|
217
234
|
|
|
218
|
-
def login(email: str, password: str):
|
|
219
|
-
"""Login to OpenGradient.
|
|
220
|
-
|
|
221
|
-
Args:
|
|
222
|
-
email: User's email address
|
|
223
|
-
password: User's password
|
|
224
|
-
|
|
225
|
-
Returns:
|
|
226
|
-
dict: Login response with authentication tokens
|
|
227
|
-
|
|
228
|
-
Raises:
|
|
229
|
-
RuntimeError: If SDK is not initialized
|
|
230
|
-
"""
|
|
231
|
-
if _client is None:
|
|
232
|
-
raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
|
|
233
|
-
return _client.login(email, password)
|
|
234
|
-
|
|
235
|
-
|
|
236
235
|
def list_files(model_name: str, version: str) -> List[Dict]:
|
|
237
236
|
"""List files in a model repository version.
|
|
238
237
|
|
|
@@ -251,32 +250,11 @@ def list_files(model_name: str, version: str) -> List[Dict]:
|
|
|
251
250
|
return _client.list_files(model_name, version)
|
|
252
251
|
|
|
253
252
|
|
|
254
|
-
def generate_image(model: str, prompt: str, height: Optional[int] = None, width: Optional[int] = None) -> bytes:
|
|
255
|
-
"""Generate an image from a text prompt.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
model: Model identifier (e.g. "stabilityai/stable-diffusion-xl-base-1.0")
|
|
259
|
-
prompt: Text description of the desired image
|
|
260
|
-
height: Optional height of the generated image in pixels
|
|
261
|
-
width: Optional width of the generated image in pixels
|
|
262
|
-
|
|
263
|
-
Returns:
|
|
264
|
-
bytes: Raw image data as bytes
|
|
265
|
-
|
|
266
|
-
Raises:
|
|
267
|
-
RuntimeError: If SDK is not initialized
|
|
268
|
-
OpenGradientError: If image generation fails
|
|
269
|
-
"""
|
|
270
|
-
if _client is None:
|
|
271
|
-
raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
|
|
272
|
-
return _client.generate_image(model, prompt, height=height, width=width)
|
|
273
|
-
|
|
274
|
-
|
|
275
253
|
def new_workflow(
|
|
276
254
|
model_cid: str,
|
|
277
|
-
input_query:
|
|
255
|
+
input_query: HistoricalInputQuery,
|
|
278
256
|
input_tensor_name: str,
|
|
279
|
-
scheduler_params: Optional[
|
|
257
|
+
scheduler_params: Optional[SchedulerParams] = None,
|
|
280
258
|
) -> str:
|
|
281
259
|
"""
|
|
282
260
|
Deploy a new workflow contract with the specified parameters.
|
|
@@ -287,13 +265,9 @@ def new_workflow(
|
|
|
287
265
|
|
|
288
266
|
Args:
|
|
289
267
|
model_cid: IPFS CID of the model
|
|
290
|
-
input_query:
|
|
268
|
+
input_query: HistoricalInputQuery containing query parameters
|
|
291
269
|
input_tensor_name: Name of the input tensor
|
|
292
|
-
scheduler_params: Optional scheduler configuration
|
|
293
|
-
- Can be a dictionary with:
|
|
294
|
-
- frequency: Execution frequency in seconds (default: 600)
|
|
295
|
-
- duration_hours: How long to run in hours (default: 2)
|
|
296
|
-
- Or a SchedulerParams instance
|
|
270
|
+
scheduler_params: Optional scheduler configuration as SchedulerParams instance
|
|
297
271
|
If not provided, the workflow will be deployed without scheduling.
|
|
298
272
|
|
|
299
273
|
Returns:
|
|
@@ -303,15 +277,12 @@ def new_workflow(
|
|
|
303
277
|
if _client is None:
|
|
304
278
|
raise RuntimeError("OpenGradient client not initialized. Call og.init(...) first.")
|
|
305
279
|
|
|
306
|
-
# Convert scheduler_params if it's a dict, otherwise use as is
|
|
307
|
-
scheduler = SchedulerParams.from_dict(scheduler_params) if isinstance(scheduler_params, dict) else scheduler_params
|
|
308
|
-
|
|
309
280
|
return _client.new_workflow(
|
|
310
|
-
model_cid=model_cid, input_query=input_query, input_tensor_name=input_tensor_name, scheduler_params=
|
|
281
|
+
model_cid=model_cid, input_query=input_query, input_tensor_name=input_tensor_name, scheduler_params=scheduler_params
|
|
311
282
|
)
|
|
312
283
|
|
|
313
284
|
|
|
314
|
-
def read_workflow_result(contract_address: str) ->
|
|
285
|
+
def read_workflow_result(contract_address: str) -> ModelOutput:
|
|
315
286
|
"""
|
|
316
287
|
Reads the latest inference result from a deployed workflow contract.
|
|
317
288
|
|
|
@@ -335,7 +306,7 @@ def read_workflow_result(contract_address: str) -> Dict[str, Union[str, Dict]]:
|
|
|
335
306
|
return _client.read_workflow_result(contract_address)
|
|
336
307
|
|
|
337
308
|
|
|
338
|
-
def run_workflow(contract_address: str) ->
|
|
309
|
+
def run_workflow(contract_address: str) -> ModelOutput:
|
|
339
310
|
"""
|
|
340
311
|
Executes the workflow by calling run() on the contract to pull latest data and perform inference.
|
|
341
312
|
|
|
@@ -350,8 +321,23 @@ def run_workflow(contract_address: str) -> Dict[str, Union[str, Dict]]:
|
|
|
350
321
|
return _client.run_workflow(contract_address)
|
|
351
322
|
|
|
352
323
|
|
|
324
|
+
def read_workflow_history(contract_address: str, num_results: int) -> List[ModelOutput]:
|
|
325
|
+
"""
|
|
326
|
+
Gets historical inference results from a workflow contract.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
contract_address (str): Address of the deployed workflow contract
|
|
330
|
+
num_results (int): Number of historical results to retrieve
|
|
331
|
+
|
|
332
|
+
Returns:
|
|
333
|
+
List[Dict]: List of historical inference results
|
|
334
|
+
"""
|
|
335
|
+
if _client is None:
|
|
336
|
+
raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
|
|
337
|
+
return _client.read_workflow_history(contract_address, num_results)
|
|
338
|
+
|
|
339
|
+
|
|
353
340
|
__all__ = [
|
|
354
|
-
"generate_image",
|
|
355
341
|
"list_files",
|
|
356
342
|
"login",
|
|
357
343
|
"llm_chat",
|
|
@@ -366,6 +352,14 @@ __all__ = [
|
|
|
366
352
|
"new_workflow",
|
|
367
353
|
"read_workflow_result",
|
|
368
354
|
"run_workflow",
|
|
355
|
+
"read_workflow_history",
|
|
356
|
+
"InferenceMode",
|
|
357
|
+
"LlmInferenceMode",
|
|
358
|
+
"HistoricalInputQuery",
|
|
359
|
+
"SchedulerParams",
|
|
360
|
+
"CandleType",
|
|
361
|
+
"CandleOrder",
|
|
362
|
+
"InferenceMode",
|
|
369
363
|
"llm",
|
|
370
364
|
"alphasense",
|
|
371
365
|
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
[{"inputs":[{"internalType":"string","name":"_modelId","type":"string"},{"internalType":"string","name":"_inputName","type":"string"},{"components":[{"internalType":"string","name":"base","type":"string"},{"internalType":"string","name":"quote","type":"string"},{"internalType":"uint32","name":"total_candles","type":"uint32"},{"internalType":"uint32","name":"candle_duration_in_mins","type":"uint32"},{"internalType":"enum CandleOrder","name":"order","type":"uint8"},{"internalType":"enum CandleType[]","name":"candle_types","type":"uint8[]"}],"internalType":"struct HistoricalInputQuery","name":"_query","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"caller","type":"address"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"result","type":"tuple"}],"name":"InferenceResultEmitted","type":"event"},{"inputs":[],"name":"getInferenceResult","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"num","type":"uint256"}],"name":"getLastInferenceResults","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput[]","name":"","type":"tuple[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"historicalContract","outputs":[{"internalType":"contract OGHistorical","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"inputName","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"inputQuery","outputs":[{"internalType":"string","name":"base","type":"string"},{"internalType":"string","name":"quote","type":"string"},{"internalType":"uint32","name":"total_candles","type":"uint32"},{"internalType":"uint32","name":"candle_duration_in_mins","type":"uint32"},{"internalType":"enum CandleOrder","name":"order","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"modelId","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"run","outputs":[],"stateMutability":"nonpayable","type":"function"}]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"inputs": [
|
|
4
|
+
{"internalType": "address", "name": "contractAddress", "type": "address"},
|
|
5
|
+
{"internalType": "uint256", "name": "endTime", "type": "uint256"},
|
|
6
|
+
{"internalType": "uint256", "name": "frequency", "type": "uint256"}
|
|
7
|
+
],
|
|
8
|
+
"name": "registerTask",
|
|
9
|
+
"outputs": [],
|
|
10
|
+
"stateMutability": "nonpayable",
|
|
11
|
+
"type": "function"
|
|
12
|
+
}
|
|
13
|
+
]
|
|
@@ -12,7 +12,7 @@ def create_read_workflow_tool(
|
|
|
12
12
|
tool_name: str,
|
|
13
13
|
tool_description: str,
|
|
14
14
|
output_formatter: Callable[..., str] = lambda x: x,
|
|
15
|
-
) -> BaseTool:
|
|
15
|
+
) -> BaseTool | Callable:
|
|
16
16
|
"""
|
|
17
17
|
Creates a tool that reads results from a workflow contract on OpenGradient.
|
|
18
18
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
|
-
from typing import Any, Callable, Dict, Type
|
|
2
|
+
from typing import Any, Callable, Dict, Type, Optional
|
|
3
3
|
|
|
4
4
|
from langchain_core.tools import BaseTool, StructuredTool
|
|
5
5
|
from pydantic import BaseModel
|
|
@@ -14,10 +14,10 @@ def create_run_model_tool(
|
|
|
14
14
|
tool_name: str,
|
|
15
15
|
input_getter: Callable,
|
|
16
16
|
output_formatter: Callable[..., str] = lambda x: x,
|
|
17
|
-
input_schema: Type[BaseModel] = None,
|
|
17
|
+
input_schema: Optional[Type[BaseModel]] = None,
|
|
18
18
|
tool_description: str = "Executes the given ML model",
|
|
19
19
|
inference_mode: og.InferenceMode = og.InferenceMode.VANILLA,
|
|
20
|
-
) -> BaseTool:
|
|
20
|
+
) -> BaseTool | Callable:
|
|
21
21
|
"""
|
|
22
22
|
Creates a tool that wraps an OpenGradient model for inference.
|
|
23
23
|
|