opengradient 0.2.7__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -1,16 +1,16 @@
1
1
  from .client import Client
2
- from .exceptions import OpenGradientError, FileNotFoundError, UploadError, InferenceError, ResultRetrievalError
3
- from .types import ModelInput, InferenceMode, Number, NumberTensor, StringTensor, ModelOutput
4
-
5
- __version__ = "0.2.7"
2
+ from .defaults import *
3
+ from .types import InferenceMode
4
+ from typing import List, Dict
5
+ __version__ = "0.3.0"
6
6
 
7
7
  _client = None
8
8
 
9
- def init(private_key="cd09980ef6e280afc3900d2d6801f9e9c5d858a5deaeeab74a65643f5ff1a4c1",
10
- rpc_url="http://18.218.115.248:8545",
11
- contract_address="0x350E0A430b2B1563481833a99523Cfd17a530e4e",
12
- email="test@test.com",
13
- password="Test-123"):
9
+ def init(email: str,
10
+ password: str,
11
+ private_key=DEFAULT_PRIVATE_KEY,
12
+ rpc_url=DEFAULT_RPC_URL,
13
+ contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
14
14
  global _client
15
15
  _client = Client(private_key=private_key, rpc_url=rpc_url, contract_address=contract_address, email=email, password=password)
16
16
 
@@ -19,10 +19,18 @@ def upload(model_path, model_name, version):
19
19
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
20
20
  return _client.upload(model_path, model_name, version)
21
21
 
22
- def create_model(model_name, model_desc):
22
+ def create_model(model_name: str, model_desc: str, model_path: str = None):
23
23
  if _client is None:
24
24
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
25
- return _client.create_model(model_name, model_desc)
25
+
26
+ result = _client.create_model(model_name, model_desc)
27
+
28
+ if model_path:
29
+ version = "0.01"
30
+ upload_result = _client.upload(model_path, model_name, version)
31
+ result["upload"] = upload_result
32
+
33
+ return result
26
34
 
27
35
  def create_version(model_name, notes=None, is_major=False):
28
36
  if _client is None:
@@ -37,4 +45,9 @@ def infer(model_cid, inference_mode, model_input):
37
45
  def login(email: str, password: str):
38
46
  if _client is None:
39
47
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
40
- return _client.login(email, password)
48
+ return _client.login(email, password)
49
+
50
+ def list_files(model_name: str, version: str) -> List[Dict]:
51
+ if _client is None:
52
+ raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
53
+ return _client.list_files(model_name, version)
@@ -1 +1 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct Number[]","name":"values","type":"tuple[]"}],"internalType":"struct NumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum IInference.ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct Number[]","name":"values","type":"tuple[]"}],"internalType":"struct NumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct Number[]","name":"values","type":"tuple[]"}],"internalType":"struct NumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LlmResponse","name":"response","type":"tuple"}],"name":"LLMResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LlmInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LlmInferenceRequest","name":"request","type":"tuple"}],"name":"runLLM","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LlmResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -0,0 +1,37 @@
1
+ from eth_account import Account
2
+ import secrets
3
+ from collections import namedtuple
4
+ import os
5
+ import hashlib
6
+
7
+ EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
+
9
+
10
+ def generate_eth_account() -> EthAccount:
11
+ user_seed = _get_user_random_seed()
12
+ private_key = _generate_secure_private_key(user_seed)
13
+
14
+ # derive account
15
+ account = Account.from_key(private_key)
16
+
17
+ # get the public key (address)
18
+ public_key = account.address
19
+
20
+ return EthAccount(address=public_key, private_key=private_key)
21
+
22
+ def _get_user_random_seed():
23
+ print("Please type a random string of characters (the longer and more random, the better):")
24
+ print("> ", end="") # Add a '>' prompt on a new line
25
+ return input().encode()
26
+
27
+ def _generate_secure_private_key(user_input):
28
+ # Combine multiple sources of entropy
29
+ system_random = secrets.token_bytes(32)
30
+ os_urandom = os.urandom(32)
31
+ timestamp = str(secrets.randbits(256)).encode()
32
+
33
+ # Add user input to the entropy sources
34
+ combined = system_random + os_urandom + timestamp + user_input
35
+
36
+ # Hash the combined entropy
37
+ return hashlib.sha256(combined).hexdigest()
opengradient/cli.py CHANGED
@@ -1,18 +1,30 @@
1
1
  import click
2
- import os
3
2
  import opengradient
4
3
  import json
5
4
  import ast
6
5
  from pathlib import Path
6
+ import logging
7
+ from pprint import pformat
8
+ import webbrowser
9
+ import sys
10
+
7
11
  from .client import Client
8
- from opengradient.types import InferenceMode, ModelInput
12
+ from .defaults import *
13
+ from .types import InferenceMode
14
+ from .account import EthAccount, generate_eth_account
15
+
16
+ OG_CONFIG_FILE = Path.home() / '.opengradient_config.json'
17
+
9
18
 
10
- # Environment variable names
11
- API_KEY_ENV = 'OPENGRADIENT_API_KEY'
12
- RPC_URL_ENV = 'OPENGRADIENT_RPC_URL'
13
- CONTRACT_ADDRESS_ENV = 'OPENGRADIENT_CONTRACT_ADDRESS'
14
- EMAIL_ENV = 'OPENGRADIENT_EMAIL'
15
- PASSWORD_ENV = 'OPENGRADIENT_PASSWORD'
19
+ def load_og_config():
20
+ if OG_CONFIG_FILE.exists():
21
+ with OG_CONFIG_FILE.open('r') as f:
22
+ return json.load(f)
23
+ return {}
24
+
25
+ def save_og_config(ctx):
26
+ with OG_CONFIG_FILE.open('w') as f:
27
+ json.dump(ctx.obj, f)
16
28
 
17
29
  # Convert string to dictionary click parameter typing
18
30
  class DictParamType(click.ParamType):
@@ -39,122 +51,231 @@ Dict = DictParamType()
39
51
 
40
52
  # Support inference modes
41
53
  InferenceModes = {
42
- "VANILLA": opengradient.InferenceMode.VANILLA,
43
- "ZKML": opengradient.InferenceMode.ZKML,
44
- "TEE": opengradient.InferenceMode.TEE,
54
+ "VANILLA": InferenceMode.VANILLA,
55
+ "ZKML": InferenceMode.ZKML,
56
+ "TEE": InferenceMode.TEE,
45
57
  }
46
58
 
47
- # TODO (Kyle): Once we're farther into development, we should remove the defaults for these options
59
+
60
+ def initialize_config(ctx):
61
+ """Interactively initialize OpenGradient config"""
62
+ if ctx.obj: # Check if config data already exists
63
+ click.echo("A config already exists. Please run 'opengradient config clear' first if you want to reinitialize.")
64
+ click.echo("You can view your current config with 'opengradient config show'.")
65
+
66
+ click.echo("Initializing OpenGradient config...")
67
+ click.secho(f"Config will be stored in: {OG_CONFIG_FILE}", fg='cyan')
68
+
69
+ # Check if user has an existing account
70
+ has_account = click.confirm("Do you already have an OpenGradient account?", default=True)
71
+
72
+ if not has_account:
73
+ eth_account = create_account_impl()
74
+ if eth_account is None:
75
+ click.echo("Account creation cancelled. Config initialization aborted.")
76
+ return
77
+ ctx.obj['private_key'] = eth_account.private_key
78
+ else:
79
+ ctx.obj['private_key'] = click.prompt("Enter your OpenGradient private key", type=str)
80
+
81
+ # Make email and password optional
82
+ email = click.prompt("Enter your OpenGradient Hub email address (optional, press Enter to skip)",
83
+ type=str, default='', show_default=False)
84
+ ctx.obj['email'] = email if email else None
85
+ password = click.prompt("Enter your OpenGradient Hub password (optional, press Enter to skip)",
86
+ type=str, hide_input=True, default='', show_default=False)
87
+ ctx.obj['password'] = password if password else None
88
+
89
+ ctx.obj['rpc_url'] = DEFAULT_RPC_URL
90
+ ctx.obj['contract_address'] = DEFAULT_INFERENCE_CONTRACT_ADDRESS
91
+
92
+ save_og_config(ctx)
93
+ click.echo("Config has been saved.")
94
+ click.secho("You can run 'opengradient config show' to see configs.", fg='green')
95
+
96
+
48
97
  @click.group()
49
- @click.option('--api_key',
50
- envvar=API_KEY_ENV,
51
- help='Your OpenGradient private key',
52
- default="cd09980ef6e280afc3900d2d6801f9e9c5d858a5deaeeab74a65643f5ff1a4c1")
53
- @click.option('--rpc_url',
54
- envvar=RPC_URL_ENV,
55
- help='OpenGradient RPC URL address',
56
- default="http://18.218.115.248:8545")
57
- @click.option('--contract_address',
58
- envvar=CONTRACT_ADDRESS_ENV,
59
- help='OpenGradient inference contract address',
60
- default="0x350E0A430b2B1563481833a99523Cfd17a530e4e")
61
- @click.option('--email',
62
- envvar=EMAIL_ENV,
63
- help='Your OpenGradient Hub email address -- not required for inference',
64
- default="test@test.com")
65
- @click.option('--password',
66
- envvar=PASSWORD_ENV,
67
- help='Your OpenGradient Hub password -- not required for inference',
68
- default="Test-123")
69
98
  @click.pass_context
70
- def cli(ctx, api_key, rpc_url, contract_address, email, password):
71
- """CLI for OpenGradient SDK"""
72
- if not api_key:
73
- click.echo("Please provide an API key via flag or setting environment variable OPENGRADIENT_API_KEY")
74
- if not rpc_url:
75
- click.echo("Please provide a RPC URL via flag or setting environment variable OPENGRADIENT_RPC_URL")
76
- if not contract_address:
77
- click.echo("Please provide a contract address via flag or setting environment variable OPENGRADIENT_CONTRACT_ADDRESS")
78
- if not api_key or not rpc_url or not contract_address:
99
+ def cli(ctx):
100
+ """CLI for OpenGradient SDK. Visit https://docs.opengradient.ai/developers/python_sdk/ for more documentation."""
101
+ # Load existing config
102
+ ctx.obj = load_og_config()
103
+
104
+ no_client_commands = ['config', 'create-account', 'version']
105
+
106
+ # Only create client if this is not a config management command
107
+ if ctx.invoked_subcommand in no_client_commands:
108
+ return
109
+
110
+ if all(key in ctx.obj for key in ['private_key', 'rpc_url', 'contract_address']):
111
+ try:
112
+ ctx.obj['client'] = Client(private_key=ctx.obj['private_key'],
113
+ rpc_url=ctx.obj['rpc_url'],
114
+ contract_address=ctx.obj['contract_address'],
115
+ email=ctx.obj.get('email'),
116
+ password=ctx.obj.get('password'))
117
+ except Exception as e:
118
+ click.echo(f"Failed to create OpenGradient client: {str(e)}")
119
+ ctx.exit(1)
120
+ else:
121
+ click.echo("Insufficient information to create client. Some commands may not be available.")
122
+ click.echo("Please run 'opengradient config clear' and/or 'opengradient config init' and to reinitialize your configs.")
79
123
  ctx.exit(1)
124
+
125
+
126
+ @cli.group()
127
+ def config():
128
+ """Manage your OpenGradient configuration (credentials etc)"""
129
+ pass
130
+
131
+
132
+ @config.command()
133
+ @click.pass_context
134
+ def init(ctx):
135
+ """Initialize or reinitialize the OpenGradient config"""
136
+ initialize_config(ctx)
137
+
138
+
139
+ @config.command()
140
+ @click.pass_context
141
+ def show(ctx):
142
+ """Display current config information"""
143
+ click.secho(f"Config file location: {OG_CONFIG_FILE}", fg='cyan')
144
+
145
+ if not ctx.obj:
146
+ click.echo("Config is empty. Run 'opengradient config init' to initialize it.")
80
147
  return
81
148
 
82
- try:
83
- ctx.obj = Client(private_key=api_key,
84
- rpc_url=rpc_url,
85
- contract_address=contract_address,
86
- email=email,
87
- password=password)
88
- except Exception as e:
89
- click.echo(f"Failed to create OpenGradient client: {str(e)}")
149
+ click.echo("Current config:")
150
+ for key, value in ctx.obj.items():
151
+ if key != 'client': # Don't display the client object
152
+ if key == 'password' and value is not None:
153
+ click.echo(f"{key}: {'*' * len(value)}") # Mask the password
154
+ elif value is None:
155
+ click.echo(f"{key}: Not set")
156
+ else:
157
+ click.echo(f"{key}: {value}")
90
158
 
91
- @cli.command()
159
+
160
+ @config.command()
92
161
  @click.pass_context
93
- def client_settings(ctx):
94
- """Display OpenGradient client settings"""
95
- client = ctx.obj
96
- if not client:
97
- click.echo("Client not initialized")
98
- ctx.exit(1)
99
-
100
- click.echo("Settings for OpenGradient client:")
101
- click.echo(f"\tAPI key ({API_KEY_ENV}): {client.private_key}")
102
- click.echo(f"\tRPC URL ({RPC_URL_ENV}): {client.rpc_url}")
103
- click.echo(f"\tContract address ({CONTRACT_ADDRESS_ENV}): {client.contract_address}")
104
- if client.user:
105
- click.echo(f"\tEmail ({EMAIL_ENV}): {client.user["email"]}")
162
+ def clear(ctx):
163
+ """Clear all saved configs"""
164
+ if not ctx.obj:
165
+ click.echo("No configs to clear.")
166
+ return
167
+
168
+ if click.confirm("Are you sure you want to clear all configs? This action cannot be undone.", abort=True):
169
+ ctx.obj.clear()
170
+ save_og_config(ctx)
171
+ click.echo("Configs cleared.")
106
172
  else:
107
- click.echo(f"\tEmail: not set")
173
+ click.echo("Config clear cancelled.")
174
+
108
175
 
109
176
  @cli.command()
110
- @click.argument('model_path', type=Path)
111
- @click.argument('model_id', type=str)
112
- @click.argument('version_id', type=str)
177
+ @click.option('--repo', '-r', '--name', 'repo_name', required=True, help='Name of the new model repository')
178
+ @click.option('--description', '-d', required=True, help='Description of the model')
113
179
  @click.pass_obj
114
- def upload(client, model_path, model_id, version_id):
115
- """Upload a model"""
180
+ def create_model_repo(obj, repo_name: str, description: str):
181
+ """
182
+ Create a new model repository.
183
+
184
+ This command creates a new model repository with the specified name and description.
185
+ The repository name should be unique within your account.
186
+
187
+ Example usage:
188
+
189
+ \b
190
+ opengradient create-model-repo --name "my_new_model" --description "A new model for XYZ task"
191
+ opengradient create-model-repo -n "my_new_model" -d "A new model for XYZ task"
192
+ """
193
+ client: Client = obj['client']
194
+
116
195
  try:
117
- result = client.upload(model_path, model_id, version_id)
118
- click.echo(f"Model uploaded successfully: {result}")
196
+ result = client.create_model(repo_name, description)
197
+ click.echo(f"Model repository created successfully: {result}")
119
198
  except Exception as e:
120
- click.echo(f"Error uploading model: {str(e)}")
199
+ click.echo(f"Error creating model: {str(e)}")
200
+
121
201
 
122
202
  @cli.command()
123
- @click.argument('model_name', type=str)
124
- @click.argument('model_desc', type=str)
203
+ @click.option('--repo', '-r', 'repo_name', required=True, help='Name of the existing model repository')
204
+ @click.option('--notes', '-n', help='Version notes (optional)')
205
+ @click.option('--major', '-m', is_flag=True, default=False, help='Flag to indicate a major version update')
125
206
  @click.pass_obj
126
- def create_model(client, model_name, model_desc):
127
- """Create a new model"""
207
+ def create_version(obj, repo_name: str, notes: str, major: bool):
208
+ """Create a new version in an existing model repository.
209
+
210
+ This command creates a new version for the specified model repository.
211
+ You can optionally provide version notes and indicate if it's a major version update.
212
+
213
+ Example usage:
214
+
215
+ \b
216
+ opengradient create-version --repo my_model_repo --notes "Added new feature X" --major
217
+ opengradient create-version -r my_model_repo -n "Bug fixes"
218
+ """
219
+ client: Client = obj['client']
220
+
128
221
  try:
129
- result = client.create_model(model_name, model_desc)
130
- click.echo(f"Model created successfully: {result}")
222
+ result = client.create_version(repo_name, notes, major)
223
+ click.echo(f"New version created successfully: {result}")
131
224
  except Exception as e:
132
- click.echo(f"Error creating model: {str(e)}")
225
+ click.echo(f"Error creating version: {str(e)}")
226
+
133
227
 
134
228
  @cli.command()
135
- @click.argument('model_id', type=str)
136
- @click.option('--notes', type=str, default=None, help='Version notes')
137
- @click.option('--is-major', default=False, is_flag=True, help='Is this a major version')
229
+ @click.argument('file_path', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, path_type=Path),
230
+ metavar='FILE_PATH')
231
+ @click.option('--repo', '-r', 'repo_name', required=True, help='Name of the model repository')
232
+ @click.option('--version', '-v', required=True, help='Version of the model (e.g., "0.01")')
138
233
  @click.pass_obj
139
- def create_version(client, model_id, notes, is_major):
140
- """Create a new version of a model"""
234
+ def upload_file(obj, file_path: Path, repo_name: str, version: str):
235
+ """
236
+ Upload a file to an existing model repository and version.
237
+
238
+ FILE_PATH: Path to the file you want to upload (e.g., model.onnx)
239
+
240
+ Example usage:
241
+
242
+ \b
243
+ opengradient upload-file path/to/model.onnx --repo my_model_repo --version 0.01
244
+ opengradient upload-file path/to/model.onnx -r my_model_repo -v 0.01
245
+ """
246
+ client: Client = obj['client']
247
+
141
248
  try:
142
- result = client.create_version(model_id, notes, is_major)
143
- click.echo(f"Version created successfully: {result}")
249
+ result = client.upload(file_path, repo_name, version)
250
+ click.echo(f"File uploaded successfully: {result}")
144
251
  except Exception as e:
145
- click.echo(f"Error creating version: {str(e)}")
252
+ click.echo(f"Error uploading model: {str(e)}")
253
+
146
254
 
147
255
  @cli.command()
148
- @click.argument('model_cid', type=str)
149
- @click.argument('inference_mode', type=click.Choice(InferenceModes.keys()), default="VANILLA")
150
- @click.argument('input_data', type=Dict, required=False)
151
- @click.option('--input_file',
256
+ @click.option('--model', '-m', 'model_cid', required=True, help='CID of the model to run inference on')
257
+ @click.option('--mode', 'inference_mode', type=click.Choice(InferenceModes.keys()), default="VANILLA",
258
+ help='Inference mode (default: VANILLA)')
259
+ @click.option('--input', '-d', 'input_data', type=Dict, help='Input data for inference as a JSON string')
260
+ @click.option('--input-file', '-f',
152
261
  type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True, path_type=Path),
153
- help="Optional file input for model inference -- must be JSON")
262
+ help="JSON file containing input data for inference")
154
263
  @click.pass_context
155
- def infer(ctx, model_cid, inference_mode, input_data, input_file):
156
- """Run inference on a model"""
157
- client = ctx.obj
264
+ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path):
265
+ """
266
+ Run inference on a model.
267
+
268
+ This command runs inference on the specified model using the provided input data.
269
+ You must provide either --input or --input-file, but not both.
270
+
271
+ Example usage:
272
+
273
+ \b
274
+ opengradient infer --model Qm... --mode VANILLA --input '{"key": "value"}'
275
+ opengradient infer -m Qm... -i ZKML -f input_data.json
276
+ """
277
+ client: Client = ctx.obj['client']
278
+
158
279
  try:
159
280
  if not input_data and not input_file:
160
281
  click.echo("Must specify either input_data or input_file")
@@ -174,11 +295,12 @@ def infer(ctx, model_cid, inference_mode, input_data, input_file):
174
295
  model_input = json.load(file)
175
296
 
176
297
  # Parse input data from string to dict
177
- click.echo(f"Running {inference_mode} inference for {model_cid}...")
298
+ click.echo(f"Running {inference_mode} inference for model \"{model_cid}\"\n")
178
299
  tx_hash, model_output = client.infer(model_cid=model_cid, inference_mode=InferenceModes[inference_mode], model_input=model_input)
300
+
179
301
  click.secho("Success!", fg="green")
180
- click.echo(f"\nTransaction Hash: \n{tx_hash}")
181
- click.echo(f"\nInference result: \n{model_output}")
302
+ click.echo(f"Transaction hash: {tx_hash}")
303
+ click.echo(f"Inference result:\n{pformat(model_output, indent=2, width=120)}")
182
304
  except json.JSONDecodeError as e:
183
305
  click.echo(f"Error decoding JSON: {e}", err=True)
184
306
  click.echo(f"Error occurred on line {e.lineno}, column {e.colno}", err=True)
@@ -186,9 +308,83 @@ def infer(ctx, model_cid, inference_mode, input_data, input_file):
186
308
  click.echo(f"Error running inference: {str(e)}")
187
309
 
188
310
 
311
+ @cli.command()
312
+ def create_account():
313
+ """Create a new test account for OpenGradient inference and model management"""
314
+ create_account_impl()
315
+
316
+
317
+ def create_account_impl() -> EthAccount:
318
+ click.echo("\n" + "=" * 50)
319
+ click.echo("OpenGradient Account Creation Wizard".center(50))
320
+ click.echo("=" * 50 + "\n")
321
+
322
+ click.echo("\n" + "-" * 50)
323
+ click.echo("Step 1: Create Account on OpenGradient Hub")
324
+ click.echo("-" * 50)
325
+
326
+ click.echo(f"Please create an account on the OpenGradient Hub")
327
+ webbrowser.open(DEFAULT_HUB_SIGNUP_URL, new=2)
328
+ click.confirm("Have you successfully created your account on the OpenGradient Hub?", abort=True)
329
+
330
+ click.echo("\n" + "-" * 50)
331
+ click.echo("Step 2: Generate Ethereum Account")
332
+ click.echo("-" * 50)
333
+ eth_account = generate_eth_account()
334
+ click.echo(f"Generated OpenGradient chain account with address: {eth_account.address}")
335
+
336
+ click.echo("\n" + "-" * 50)
337
+ click.echo("Step 3: Fund Your Account")
338
+ click.echo("-" * 50)
339
+ click.echo(f"Please fund your account clicking 'Request' on the Faucet website")
340
+ webbrowser.open(DEFAULT_OG_FAUCET_URL + eth_account.address, new=2)
341
+ click.confirm("Have you successfully funded your account using the Faucet?", abort=True)
342
+
343
+ click.echo("\n" + "=" * 50)
344
+ click.echo("Account Creation Complete!".center(50))
345
+ click.echo("=" * 50)
346
+ click.echo("\nYour OpenGradient account has been successfully created and funded.")
347
+ click.secho(f"Address: {eth_account.address}", fg='green')
348
+ click.secho(f"Private Key: {eth_account.private_key}", fg='green')
349
+ click.secho("\nPlease save this information for your records.\n", fg='cyan')
350
+
351
+ return eth_account
352
+
353
+
189
354
  @cli.command()
190
355
  def version():
356
+ """Return version of OpenGradient CLI"""
191
357
  click.echo(f"OpenGradient CLI version: {opengradient.__version__}")
192
358
 
359
+
360
+ @cli.command()
361
+ @click.option('--repo', '-r', 'repo_name', required=True, help='Name of the model repository')
362
+ @click.option('--version', '-v', required=True, help='Version of the model (e.g., "0.01")')
363
+ @click.pass_obj
364
+ def list_files(client: Client, repo_name: str, version: str):
365
+ """
366
+ List files for a specific version of a model repository.
367
+
368
+ This command lists all files associated with the specified model repository and version.
369
+
370
+ Example usage:
371
+
372
+ \b
373
+ opengradient list-files --repo my_model_repo --version 0.01
374
+ opengradient list-files -r my_model_repo -v 0.01
375
+ """
376
+ try:
377
+ files = client.list_files(repo_name, version)
378
+ if files:
379
+ click.echo(f"Files for {repo_name} version {version}:")
380
+ for file in files:
381
+ click.echo(f" - {file['name']} (Size: {file['size']} bytes)")
382
+ else:
383
+ click.echo(f"No files found for {repo_name} version {version}")
384
+ except Exception as e:
385
+ click.echo(f"Error listing files: {str(e)}")
386
+
387
+
193
388
  if __name__ == '__main__':
389
+ logging.getLogger().setLevel(logging.WARN)
194
390
  cli()
opengradient/client.py CHANGED
@@ -11,8 +11,6 @@ from typing import Dict, Tuple, Union, List
11
11
  from web3.exceptions import ContractLogicError
12
12
  import firebase
13
13
 
14
- logging.basicConfig(level=logging.INFO)
15
-
16
14
  class Client:
17
15
  FIREBASE_CONFIG = {
18
16
  "apiKey": "AIzaSyDUVckVtfl-hiteBzPopy1pDD8Uvfncs7w",
@@ -23,7 +21,7 @@ class Client:
23
21
  "databaseURL": ""
24
22
  }
25
23
 
26
- def __init__(self, private_key: str, rpc_url: str, contract_address: str, email: str = "test@test.com", password: str = "Test-123"):
24
+ def __init__(self, private_key: str, rpc_url: str, contract_address: str, email: str, password: str):
27
25
  """
28
26
  Initialize the Client with private key, RPC URL, and contract address.
29
27
 
@@ -43,13 +41,22 @@ class Client:
43
41
  self.firebase_app = firebase.initialize_app(self.FIREBASE_CONFIG)
44
42
  self.auth = self.firebase_app.auth()
45
43
  self.user = None
44
+ self.email = email
45
+ self.password = password
46
+
47
+ logging.debug("Initialized client with parameters:\n"
48
+ "private key: %s\n"
49
+ "RPC URL: %s\n"
50
+ "Contract Address: %s\n",
51
+ private_key, rpc_url, contract_address)
46
52
 
47
53
  abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
48
54
  with open(abi_path, 'r') as abi_file:
49
55
  inference_abi = json.load(abi_file)
50
56
  self.abi = inference_abi
51
57
 
52
- self.login(email, password)
58
+ if email is not None:
59
+ self.login(email, password)
53
60
 
54
61
  def _initialize_web3(self):
55
62
  """
@@ -273,7 +280,6 @@ class Client:
273
280
  logging.error(f"Request exception during upload: {str(e)}")
274
281
  if hasattr(e, 'response') and e.response is not None:
275
282
  logging.error(f"Response status code: {e.response.status_code}")
276
- logging.error(f"Response headers: {e.response.headers}")
277
283
  logging.error(f"Response content: {e.response.text[:1000]}...") # Log first 1000 characters
278
284
  raise OpenGradientError(f"Upload failed due to request exception: {str(e)}",
279
285
  status_code=e.response.status_code if hasattr(e, 'response') else None)
@@ -402,4 +408,49 @@ class Client:
402
408
  return self.user
403
409
  except Exception as e:
404
410
  logging.error(f"Authentication failed: {str(e)}")
405
- raise
411
+ raise
412
+
413
+ def list_files(self, model_name: str, version: str) -> List[Dict]:
414
+ """
415
+ List files for a specific version of a model.
416
+
417
+ Args:
418
+ model_name (str): The unique identifier for the model.
419
+ version (str): The version identifier for the model.
420
+
421
+ Returns:
422
+ List[Dict]: A list of dictionaries containing file information.
423
+
424
+ Raises:
425
+ OpenGradientError: If the file listing fails.
426
+ """
427
+ if not self.user:
428
+ raise ValueError("User not authenticated")
429
+
430
+ url = f"https://api.opengradient.ai/api/v0/models/{model_name}/versions/{version}/files"
431
+ headers = {
432
+ 'Authorization': f'Bearer {self.user["idToken"]}'
433
+ }
434
+
435
+ logging.debug(f"List Files URL: {url}")
436
+ logging.debug(f"Headers: {headers}")
437
+
438
+ try:
439
+ response = requests.get(url, headers=headers)
440
+ response.raise_for_status()
441
+
442
+ json_response = response.json()
443
+ logging.info(f"File listing successful. Number of files: {len(json_response)}")
444
+
445
+ return json_response
446
+
447
+ except requests.RequestException as e:
448
+ logging.error(f"File listing failed: {str(e)}")
449
+ if hasattr(e, 'response') and e.response is not None:
450
+ logging.error(f"Response status code: {e.response.status_code}")
451
+ logging.error(f"Response content: {e.response.text[:1000]}...") # Log first 1000 characters
452
+ raise OpenGradientError(f"File listing failed: {str(e)}",
453
+ status_code=e.response.status_code if hasattr(e, 'response') else None)
454
+ except Exception as e:
455
+ logging.error(f"Unexpected error during file listing: {str(e)}", exc_info=True)
456
+ raise OpenGradientError(f"Unexpected error during file listing: {str(e)}")
@@ -0,0 +1,7 @@
1
+
2
+ # Default variables
3
+ DEFAULT_PRIVATE_KEY="cd09980ef6e280afc3900d2d6801f9e9c5d858a5deaeeab74a65643f5ff1a4c1"
4
+ DEFAULT_RPC_URL="http://18.218.115.248:8545"
5
+ DEFAULT_OG_FAUCET_URL="http://18.218.115.248:8080/?address="
6
+ DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
7
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x75D0266DAb643417e9FFD828A1A31C1E039a966c"
opengradient/utils.py CHANGED
@@ -40,11 +40,10 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
40
40
  """
41
41
  Expect SDK input to be a dict with the format
42
42
  key: tensor name
43
- value: np.array
43
+ value: np.array / list
44
44
 
45
- Note: np.array types must be float or string. Ints currently not supported.
46
-
47
- Return a tuple of (number tensors, string tensors) depending on the input type
45
+ Return a tuple of (number tensors, string tensors) depending on the input type. Each number and string tensor converted
46
+ to a numpy array and flattened and the shape saved.
48
47
  """
49
48
  logging.debug("Converting the following input dictionary to ModelInput: %s", inputs)
50
49
  number_tensors = []
@@ -59,19 +58,37 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
59
58
  logging.debug(f"\tConverting single entry {tensor_data} to a list")
60
59
  tensor_data = np.array([tensor_data])
61
60
 
61
+ # Check if type is np array
62
+ if not isinstance(tensor_data, np.ndarray):
63
+ raise TypeError("Inference input must be list, numpy array, or type (str, int, float): %s" % type(tensor_data))
64
+
65
+ # Flatten list and retain shape
66
+ shape = tensor_data.shape
67
+ flat_data = tensor_data.flatten()
68
+ logging.debug("Shape and flattened data: %s, %s", shape, flat_data)
69
+
62
70
  # Parse into number and string tensors
63
71
  if issubclass(tensor_data.dtype.type, np.floating):
64
- input = (tensor_name, [convert_to_fixed_point(i) for i in tensor_data])
72
+ # Convert to fixed-point tuples
73
+ data_type = np.dtype([('value', int), ('decimal', int)])
74
+ converted_tensor_data = np.array([convert_to_fixed_point(i) for i in flat_data], dtype=data_type)
75
+
76
+ input = (tensor_name, converted_tensor_data.tolist(), shape)
65
77
  logging.debug("\tFloating tensor input: %s", input)
66
78
 
67
79
  number_tensors.append(input)
68
80
  elif issubclass(tensor_data.dtype.type, np.integer):
69
- input = (tensor_name, [convert_to_fixed_point(int(i)) for i in tensor_data])
81
+ # Convert to fixed-point tuples
82
+ data_type = np.dtype([('value', int), ('decimal', int)])
83
+ converted_tensor_data = np.array([convert_to_fixed_point(int(i)) for i in flat_data], dtype=data_type)
84
+
85
+ input = (tensor_name, converted_tensor_data.tolist(), shape)
70
86
  logging.debug("\tInteger tensor input: %s", input)
71
87
 
72
88
  number_tensors.append(input)
73
89
  elif issubclass(tensor_data.dtype.type, np.str_):
74
- input = (tensor_name, [s for s in tensor_data])
90
+ # TODO (Kyle): Add shape into here as well
91
+ input = (tensor_name, [s for s in flat_data])
75
92
  logging.debug("\tString tensor input: %s", input)
76
93
 
77
94
  string_tensors.append(input)
@@ -85,6 +102,15 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
85
102
  return number_tensors, string_tensors
86
103
 
87
104
  def convert_to_model_output(event_data: AttributeDict) -> Dict[str, np.ndarray]:
105
+ """
106
+ Converts inference output into a user-readable output.
107
+ Expects the inference node to return a dict with the format:
108
+ key: output_name (str)
109
+ value: (output_array (list), shape (list)) (tuple)
110
+
111
+ We need to reshape each output array using the shape parameter in order to get the array
112
+ back into its original shape.
113
+ """
88
114
  logging.debug(f"Parsing event data: {event_data}")
89
115
 
90
116
  output_dict = {}
@@ -98,6 +124,7 @@ def convert_to_model_output(event_data: AttributeDict) -> Dict[str, np.ndarray]:
98
124
  logging.debug(f"Processing number tensor: {tensor}")
99
125
  if isinstance(tensor, AttributeDict):
100
126
  name = tensor.get('name')
127
+ shape = tensor.get('shape')
101
128
  values = []
102
129
  # Convert from fixed point back into np.float32
103
130
  for v in tensor.get('values', []):
@@ -105,7 +132,7 @@ def convert_to_model_output(event_data: AttributeDict) -> Dict[str, np.ndarray]:
105
132
  values.append(convert_to_float32(value=int(v.get('value')), decimals=int(v.get('decimals'))))
106
133
  else:
107
134
  logging.warning(f"Unexpected number type: {type(v)}")
108
- output_dict[name] = np.array(values)
135
+ output_dict[name] = np.array(values).reshape(shape)
109
136
  else:
110
137
  logging.warning(f"Unexpected tensor type: {type(tensor)}")
111
138
 
@@ -114,8 +141,9 @@ def convert_to_model_output(event_data: AttributeDict) -> Dict[str, np.ndarray]:
114
141
  logging.debug(f"Processing string tensor: {tensor}")
115
142
  if isinstance(tensor, AttributeDict):
116
143
  name = tensor.get('name')
144
+ shape = tensor.get('shape')
117
145
  values = tensor.get('values', [])
118
- output_dict[name] = values
146
+ output_dict[name] = np.array(values).reshape(shape)
119
147
  else:
120
148
  logging.warning(f"Unexpected tensor type: {type(tensor)}")
121
149
  else:
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.2.7
4
- Summary: A Python SDK for OpenGradient inference services
3
+ Version: 0.3.0
4
+ Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
7
7
 
@@ -135,47 +135,86 @@ Requires-Dist: websockets==13.1
135
135
  Requires-Dist: xattr==1.1.0
136
136
  Requires-Dist: yarl==1.13.1
137
137
 
138
- # OpenGradient Python SDK
138
+ # OpenGradient SDK
139
139
 
140
- Python SDK for OpenGradient inference services.
140
+ Python SDK for OpenGradient decentralized model management & inference services.
141
141
 
142
142
  ## Installation
143
- ```
143
+ ```python
144
144
  pip install opengradient
145
145
  ```
146
146
 
147
147
  ## Quick Start
148
- ```
148
+ ```python
149
149
  import opengradient as og
150
- og.init(private_key="x", rpc_url="y", contract_address="z")
151
- ```
152
-
153
- ### Sign in with Email
154
- ```
155
- og.login(email="you@opengradient.ai", password="xyz")
150
+ og.init(email="<email>", password="<password>")
156
151
  ```
157
152
 
158
153
  ### Create a Model
154
+ ```python
155
+ og.create_model(model_name="<model_name>", model_desc="<model_description>")
159
156
  ```
160
- og.create_model(model_name="test-network-model", model_desc="testing upload to sdk")
157
+
158
+ ### Create a Model (with file upload)
159
+ ```python
160
+ og.create_model(model_name="<model_name>", model_desc="<model_description>", model_path="<model_path>")
161
161
  ```
162
162
 
163
163
  ### Create a Version of a Model
164
- ```
165
- og.create_version(model_name="test-network-model", notes="test notes")
164
+ ```python
165
+ og.create_version(model_name="<model_name>", notes="<model_notes>")
166
166
  ```
167
167
 
168
168
  ### Upload Files to a Model
169
+ ```python
170
+ og.upload(model_path="<model_path>", model_name="<model_name>", version="<version>")
169
171
  ```
170
- og.upload(model_path="local_path_to_your_model.onnx", model_name="test-network-model", version="0.01")
172
+
173
+ ### List Files of a Model Version
174
+ ```python
175
+ og.list_files(model_name="<model_name>", version="<version>")
171
176
  ```
172
177
 
173
178
  ### Run Inference
174
- ```
179
+ ```python
175
180
  inference_mode = og.InferenceMode.VANILLA
176
- inference_cid = og.infer(model_cid, model_inputs, inference_mode)
181
+ og.infer(model_cid, model_inputs, inference_mode)
177
182
  ```
178
183
 
184
+ ## Using the CLI
185
+
186
+ ```bash
187
+ export OPENGRADIENT_EMAIL="<email>"
188
+ export OPENGRADIENT_PASSWORD="<password>"
179
189
  ```
180
- og.infer(model_id, inference_mode, model_input)
190
+
191
+ #### Creating a Model
192
+ ```bash
193
+ opengradient create_model "<model_name>" "<description>"
194
+ ```
195
+ - creating a model automatically initializes version `v0.01`
196
+
197
+ #### Creating a Version
198
+ ```bash
199
+ opengradient create_model "<model_name>" "<description>"
200
+ ```
201
+
202
+ #### Upload a File
203
+ ```bash
204
+ opengradient upload "<model_path>" "<model_name>" "<version>"
205
+ ```
206
+
207
+ #### List Files of a Model Version
208
+ ```bash
209
+ opengradient list_files "<model_name>" "<version>"
210
+ ```
211
+
212
+ #### CLI infer using string
213
+ ```bash
214
+ opengradient infer QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ VANILLA '{"num_input1":[1.0, 2.0, 3.0], "num_input2":10, "str_input1":["hello", "ONNX"], "str_input2":" world"}'
215
+ ```
216
+
217
+ #### CLI infer using file path input
218
+ ```bash
219
+ opengradient infer QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ VANILLA --input_file input.json
181
220
  ```
@@ -0,0 +1,16 @@
1
+ opengradient/__init__.py,sha256=JtRa1lL5qn5slM2egGm76gVuft-qEHc9gtU6tbOPRG8,2028
2
+ opengradient/account.py,sha256=s1C4hAtc8vcHObWjwxwlYJA041S6DTbr7-rK6qiWPsQ,1149
3
+ opengradient/cli.py,sha256=YKctHMZhT_Y1fANWDnGo68QpIVLXqz5ifH5kQXIxD8A,14412
4
+ opengradient/client.py,sha256=__KtU-i6EQqZNj8xX-yKoqb_V8Cdzgr9zHBY0m02Xfw,20141
5
+ opengradient/defaults.py,sha256=5JNcTNfOmoP3DymEvBqZPQHUYCIslvxvLRtJLon0MkM,354
6
+ opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
+ opengradient/types.py,sha256=EoJN-DkQrJ2WTUv8OenlrlWJWFY2jPGTl-T8C_OVjp8,1849
8
+ opengradient/utils.py,sha256=F1Nj-GMNFQFxCtbGgWQq1RP4TSurbpQxJV3yKeEo1b0,6482
9
+ opengradient/abi/inference.abi,sha256=u8FsW0s1YeRjUb9eLS1k_qh_5f_cwOdr0bii-tAdxh0,2683
10
+ opengradient/abi/llm.abi,sha256=zhiPFyBT09EI3QU5DVoKHo7e8T9PFcfIQ3RHDYetm4M,3609
11
+ opengradient-0.3.0.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
12
+ opengradient-0.3.0.dist-info/METADATA,sha256=A6qo4CPFnnCw1N_PK5yfxKS9hCxFUbpvKEiBWma8byw,7168
13
+ opengradient-0.3.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
14
+ opengradient-0.3.0.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
15
+ opengradient-0.3.0.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
16
+ opengradient-0.3.0.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- opengradient/__init__.py,sha256=BRxrOg8w_gPDiYpBbDAeEy-XFidmGNU-GvWozFw3vbo,1783
2
- opengradient/cli.py,sha256=EVb_JNvfaXWEalKKtfRknyw7tXko5qqKkHPwTR-W3cQ,7468
3
- opengradient/client.py,sha256=XJ5AGB518Mwmfdk1xeHyR51B2KvGvFHEoRjWLsr1x7M,18052
4
- opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
5
- opengradient/types.py,sha256=EoJN-DkQrJ2WTUv8OenlrlWJWFY2jPGTl-T8C_OVjp8,1849
6
- opengradient/utils.py,sha256=95i5RVn-32MRsn00M21io8QHLtmEAoRbgueMhDh0TVk,5079
7
- opengradient/abi/inference.abi,sha256=HH2SmCJ_D4O0I-CFsln0vFHd2PU-A-fxgCnUtHg0ZQg,2373
8
- opengradient-0.2.7.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
9
- opengradient-0.2.7.dist-info/METADATA,sha256=AXjww8-Sq9V1hil5VFOYGdOV9A7Yd60ytTGOtu2u1QQ,6085
10
- opengradient-0.2.7.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
11
- opengradient-0.2.7.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
12
- opengradient-0.2.7.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
13
- opengradient-0.2.7.dist-info/RECORD,,