opengradient 0.3.24__tar.gz → 0.3.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {opengradient-0.3.24/src/opengradient.egg-info → opengradient-0.3.25}/PKG-INFO +1 -1
  2. {opengradient-0.3.24 → opengradient-0.3.25}/pyproject.toml +1 -1
  3. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/__init__.py +125 -98
  4. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/account.py +6 -4
  5. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/cli.py +151 -154
  6. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/client.py +300 -362
  7. opengradient-0.3.25/src/opengradient/defaults.py +8 -0
  8. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/exceptions.py +25 -0
  9. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/llm/__init__.py +7 -10
  10. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/llm/og_langchain.py +34 -51
  11. opengradient-0.3.25/src/opengradient/llm/og_openai.py +113 -0
  12. opengradient-0.3.25/src/opengradient/mltools/__init__.py +9 -0
  13. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/mltools/model_tool.py +20 -26
  14. opengradient-0.3.25/src/opengradient/proto/infer_pb2.py +45 -0
  15. opengradient-0.3.25/src/opengradient/proto/infer_pb2_grpc.py +195 -0
  16. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/types.py +39 -35
  17. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/utils.py +30 -31
  18. {opengradient-0.3.24 → opengradient-0.3.25/src/opengradient.egg-info}/PKG-INFO +1 -1
  19. opengradient-0.3.24/src/opengradient/defaults.py +0 -8
  20. opengradient-0.3.24/src/opengradient/llm/og_openai.py +0 -120
  21. opengradient-0.3.24/src/opengradient/mltools/__init__.py +0 -14
  22. opengradient-0.3.24/src/opengradient/proto/infer_pb2.py +0 -50
  23. opengradient-0.3.24/src/opengradient/proto/infer_pb2_grpc.py +0 -186
  24. {opengradient-0.3.24 → opengradient-0.3.25}/LICENSE +0 -0
  25. {opengradient-0.3.24 → opengradient-0.3.25}/MANIFEST.in +0 -0
  26. {opengradient-0.3.24 → opengradient-0.3.25}/README.md +0 -0
  27. {opengradient-0.3.24 → opengradient-0.3.25}/setup.cfg +0 -0
  28. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/abi/ModelExecutorHistorical.abi +0 -0
  29. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/abi/inference.abi +0 -0
  30. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/contracts/templates/ModelExecutorHistorical.bin +0 -0
  31. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/proto/__init__.py +0 -0
  32. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient/proto/infer.proto +0 -0
  33. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient.egg-info/SOURCES.txt +0 -0
  34. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient.egg-info/dependency_links.txt +0 -0
  35. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient.egg-info/entry_points.txt +0 -0
  36. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient.egg-info/requires.txt +0 -0
  37. {opengradient-0.3.24 → opengradient-0.3.25}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: opengradient
3
- Version: 0.3.24
3
+ Version: 0.3.25
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.3.24"
7
+ version = "0.3.25"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
@@ -1,21 +1,36 @@
1
1
  """
2
2
  OpenGradient Python SDK for interacting with AI models and infrastructure.
3
3
  """
4
- from typing import Dict, List, Optional, Tuple, Any, Union
5
- from pathlib import Path
4
+
5
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
+
6
7
  from .client import Client
7
8
  from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
8
- from .types import HistoricalInputQuery, InferenceMode, LlmInferenceMode, LLM, TEE_LLM, SchedulerParams
9
- from . import llm
10
- from . import mltools
9
+ from .types import LLM, TEE_LLM, HistoricalInputQuery, InferenceMode, LlmInferenceMode, SchedulerParams
10
+
11
+ from . import llm, mltools
11
12
 
12
13
  _client = None
13
14
 
14
- def init(email: str,
15
- password: str,
16
- private_key: str,
17
- rpc_url=DEFAULT_RPC_URL,
18
- contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
15
+
16
+ def new_client(
17
+ email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS
18
+ ) -> Client:
19
+ """
20
+ Creates a unique OpenGradient client instance with the given authentication and network settings.
21
+
22
+ Args:
23
+ email: User's email address for authentication
24
+ password: User's password for authentication
25
+ private_key: Ethereum private key for blockchain transactions
26
+ rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
27
+ contract_address: Optional inference contract address
28
+ """
29
+
30
+ return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, contract_address=contract_address)
31
+
32
+
33
+ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
19
34
  """Initialize the OpenGradient SDK with authentication and network settings.
20
35
 
21
36
  Args:
@@ -26,16 +41,11 @@ def init(email: str,
26
41
  contract_address: Optional inference contract address
27
42
  """
28
43
  global _client
29
-
30
- _client = Client(
31
- private_key=private_key,
32
- rpc_url=rpc_url,
33
- email=email,
34
- password=password,
35
- contract_address=contract_address
36
- )
44
+
45
+ _client = Client(private_key=private_key, rpc_url=rpc_url, email=email, password=password, contract_address=contract_address)
37
46
  return _client
38
47
 
48
+
39
49
  def upload(model_path, model_name, version):
40
50
  """Upload a model file to OpenGradient.
41
51
 
@@ -54,6 +64,7 @@ def upload(model_path, model_name, version):
54
64
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
55
65
  return _client.upload(model_path, model_name, version)
56
66
 
67
+
57
68
  def create_model(model_name: str, model_desc: str, model_path: str = None):
58
69
  """Create a new model repository.
59
70
 
@@ -70,16 +81,17 @@ def create_model(model_name: str, model_desc: str, model_path: str = None):
70
81
  """
71
82
  if _client is None:
72
83
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
73
-
84
+
74
85
  result = _client.create_model(model_name, model_desc)
75
-
86
+
76
87
  if model_path:
77
88
  version = "0.01"
78
89
  upload_result = _client.upload(model_path, model_name, version)
79
90
  result["upload"] = upload_result
80
-
91
+
81
92
  return result
82
93
 
94
+
83
95
  def create_version(model_name, notes=None, is_major=False):
84
96
  """Create a new version for an existing model.
85
97
 
@@ -98,6 +110,7 @@ def create_version(model_name, notes=None, is_major=False):
98
110
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
99
111
  return _client.create_version(model_name, notes, is_major)
100
112
 
113
+
101
114
  def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = None):
102
115
  """Run inference on a model.
103
116
 
@@ -117,13 +130,16 @@ def infer(model_cid, inference_mode, model_input, max_retries: Optional[int] = N
117
130
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
118
131
  return _client.infer(model_cid, inference_mode, model_input, max_retries=max_retries)
119
132
 
120
- def llm_completion(model_cid: LLM,
121
- prompt: str,
122
- inference_mode: str = LlmInferenceMode.VANILLA,
123
- max_tokens: int = 100,
124
- stop_sequence: Optional[List[str]] = None,
125
- temperature: float = 0.0,
126
- max_retries: Optional[int] = None) -> Tuple[str, str]:
133
+
134
+ def llm_completion(
135
+ model_cid: LLM,
136
+ prompt: str,
137
+ inference_mode: str = LlmInferenceMode.VANILLA,
138
+ max_tokens: int = 100,
139
+ stop_sequence: Optional[List[str]] = None,
140
+ temperature: float = 0.0,
141
+ max_retries: Optional[int] = None,
142
+ ) -> Tuple[str, str]:
127
143
  """Generate text completion using an LLM.
128
144
 
129
145
  Args:
@@ -143,23 +159,28 @@ def llm_completion(model_cid: LLM,
143
159
  """
144
160
  if _client is None:
145
161
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
146
- return _client.llm_completion(model_cid=model_cid,
147
- inference_mode=inference_mode,
148
- prompt=prompt,
149
- max_tokens=max_tokens,
150
- stop_sequence=stop_sequence,
151
- temperature=temperature,
152
- max_retries=max_retries)
153
-
154
- def llm_chat(model_cid: LLM,
155
- messages: List[Dict],
156
- inference_mode: str = LlmInferenceMode.VANILLA,
157
- max_tokens: int = 100,
158
- stop_sequence: Optional[List[str]] = None,
159
- temperature: float = 0.0,
160
- tools: Optional[List[Dict]] = None,
161
- tool_choice: Optional[str] = None,
162
- max_retries: Optional[int] = None) -> Tuple[str, str, Dict]:
162
+ return _client.llm_completion(
163
+ model_cid=model_cid,
164
+ inference_mode=inference_mode,
165
+ prompt=prompt,
166
+ max_tokens=max_tokens,
167
+ stop_sequence=stop_sequence,
168
+ temperature=temperature,
169
+ max_retries=max_retries,
170
+ )
171
+
172
+
173
+ def llm_chat(
174
+ model_cid: LLM,
175
+ messages: List[Dict],
176
+ inference_mode: str = LlmInferenceMode.VANILLA,
177
+ max_tokens: int = 100,
178
+ stop_sequence: Optional[List[str]] = None,
179
+ temperature: float = 0.0,
180
+ tools: Optional[List[Dict]] = None,
181
+ tool_choice: Optional[str] = None,
182
+ max_retries: Optional[int] = None,
183
+ ) -> Tuple[str, str, Dict]:
163
184
  """Have a chat conversation with an LLM.
164
185
 
165
186
  Args:
@@ -181,15 +202,18 @@ def llm_chat(model_cid: LLM,
181
202
  """
182
203
  if _client is None:
183
204
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
184
- return _client.llm_chat(model_cid=model_cid,
185
- inference_mode=inference_mode,
186
- messages=messages,
187
- max_tokens=max_tokens,
188
- stop_sequence=stop_sequence,
189
- temperature=temperature,
190
- tools=tools,
191
- tool_choice=tool_choice,
192
- max_retries=max_retries)
205
+ return _client.llm_chat(
206
+ model_cid=model_cid,
207
+ inference_mode=inference_mode,
208
+ messages=messages,
209
+ max_tokens=max_tokens,
210
+ stop_sequence=stop_sequence,
211
+ temperature=temperature,
212
+ tools=tools,
213
+ tool_choice=tool_choice,
214
+ max_retries=max_retries,
215
+ )
216
+
193
217
 
194
218
  def login(email: str, password: str):
195
219
  """Login to OpenGradient.
@@ -208,6 +232,7 @@ def login(email: str, password: str):
208
232
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
209
233
  return _client.login(email, password)
210
234
 
235
+
211
236
  def list_files(model_name: str, version: str) -> List[Dict]:
212
237
  """List files in a model repository version.
213
238
 
@@ -225,6 +250,7 @@ def list_files(model_name: str, version: str) -> List[Dict]:
225
250
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
226
251
  return _client.list_files(model_name, version)
227
252
 
253
+
228
254
  def generate_image(model: str, prompt: str, height: Optional[int] = None, width: Optional[int] = None) -> bytes:
229
255
  """Generate an image from a text prompt.
230
256
 
@@ -245,19 +271,20 @@ def generate_image(model: str, prompt: str, height: Optional[int] = None, width:
245
271
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
246
272
  return _client.generate_image(model, prompt, height=height, width=width)
247
273
 
274
+
248
275
  def new_workflow(
249
276
  model_cid: str,
250
277
  input_query: Union[Dict[str, Any], HistoricalInputQuery],
251
278
  input_tensor_name: str,
252
- scheduler_params: Optional[Union[Dict[str, int], SchedulerParams]] = None
279
+ scheduler_params: Optional[Union[Dict[str, int], SchedulerParams]] = None,
253
280
  ) -> str:
254
281
  """
255
282
  Deploy a new workflow contract with the specified parameters.
256
-
257
- This function deploys a new workflow contract and optionally registers it with
258
- the scheduler for automated execution. If scheduler_params is not provided,
283
+
284
+ This function deploys a new workflow contract and optionally registers it with
285
+ the scheduler for automated execution. If scheduler_params is not provided,
259
286
  the workflow will be deployed without automated execution scheduling.
260
-
287
+
261
288
  Args:
262
289
  model_cid: IPFS CID of the model
263
290
  input_query: Dictionary or HistoricalInputQuery containing query parameters
@@ -268,40 +295,38 @@ def new_workflow(
268
295
  - duration_hours: How long to run in hours (default: 2)
269
296
  - Or a SchedulerParams instance
270
297
  If not provided, the workflow will be deployed without scheduling.
271
-
298
+
272
299
  Returns:
273
300
  str: Deployed contract address. If scheduler_params was provided, the workflow
274
301
  will be automatically executed according to the specified schedule.
275
302
  """
276
303
  if _client is None:
277
304
  raise RuntimeError("OpenGradient client not initialized. Call og.init(...) first.")
278
-
305
+
279
306
  # Convert scheduler_params if it's a dict, otherwise use as is
280
307
  scheduler = SchedulerParams.from_dict(scheduler_params) if isinstance(scheduler_params, dict) else scheduler_params
281
-
308
+
282
309
  return _client.new_workflow(
283
- model_cid=model_cid,
284
- input_query=input_query,
285
- input_tensor_name=input_tensor_name,
286
- scheduler_params=scheduler
310
+ model_cid=model_cid, input_query=input_query, input_tensor_name=input_tensor_name, scheduler_params=scheduler
287
311
  )
288
312
 
313
+
289
314
  def read_workflow_result(contract_address: str) -> Dict[str, Union[str, Dict]]:
290
315
  """
291
316
  Reads the latest inference result from a deployed workflow contract.
292
-
317
+
293
318
  This function retrieves the most recent output from a deployed model executor contract.
294
319
  It includes built-in retry logic to handle blockchain state delays.
295
-
320
+
296
321
  Args:
297
322
  contract_address (str): Address of the deployed workflow contract
298
-
323
+
299
324
  Returns:
300
325
  Dict[str, Union[str, Dict]]: A dictionary containing:
301
326
  - status: "success" or "error"
302
327
  - result: The model output data if successful
303
328
  - error: Error message if status is "error"
304
-
329
+
305
330
  Raises:
306
331
  RuntimeError: If OpenGradient client is not initialized
307
332
  """
@@ -309,13 +334,14 @@ def read_workflow_result(contract_address: str) -> Dict[str, Union[str, Dict]]:
309
334
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
310
335
  return _client.read_workflow_result(contract_address)
311
336
 
337
+
312
338
  def run_workflow(contract_address: str) -> Dict[str, Union[str, Dict]]:
313
339
  """
314
340
  Executes the workflow by calling run() on the contract to pull latest data and perform inference.
315
-
341
+
316
342
  Args:
317
343
  contract_address (str): Address of the deployed workflow contract
318
-
344
+
319
345
  Returns:
320
346
  Dict[str, Union[str, Dict]]: Status of the run operation
321
347
  """
@@ -323,33 +349,34 @@ def run_workflow(contract_address: str) -> Dict[str, Union[str, Dict]]:
323
349
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
324
350
  return _client.run_workflow(contract_address)
325
351
 
352
+
326
353
  __all__ = [
327
- 'generate_image',
328
- 'list_files',
329
- 'login',
330
- 'llm_chat',
331
- 'llm_completion',
332
- 'infer',
333
- 'create_version',
334
- 'create_model',
335
- 'upload',
336
- 'init',
337
- 'LLM',
338
- 'TEE_LLM',
339
- 'new_workflow',
340
- 'read_workflow_result',
341
- 'run_workflow'
354
+ "generate_image",
355
+ "list_files",
356
+ "login",
357
+ "llm_chat",
358
+ "llm_completion",
359
+ "infer",
360
+ "create_version",
361
+ "create_model",
362
+ "upload",
363
+ "init",
364
+ "LLM",
365
+ "TEE_LLM",
366
+ "new_workflow",
367
+ "read_workflow_result",
368
+ "run_workflow",
342
369
  ]
343
370
 
344
371
  __pdoc__ = {
345
- 'account': False,
346
- 'cli': False,
347
- 'client': False,
348
- 'defaults': False,
349
- 'exceptions': False,
350
- 'llm': True,
351
- 'mltools': True,
352
- 'proto': False,
353
- 'types': True,
354
- 'utils': False
355
- }
372
+ "account": False,
373
+ "cli": False,
374
+ "client": False,
375
+ "defaults": False,
376
+ "exceptions": False,
377
+ "llm": True,
378
+ "mltools": True,
379
+ "proto": False,
380
+ "types": False,
381
+ "utils": False,
382
+ }
@@ -5,7 +5,7 @@ from collections import namedtuple
5
5
 
6
6
  from eth_account import Account
7
7
 
8
- EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
+ EthAccount = namedtuple("EthAccount", ["address", "private_key"])
9
9
 
10
10
 
11
11
  def generate_eth_account() -> EthAccount:
@@ -20,19 +20,21 @@ def generate_eth_account() -> EthAccount:
20
20
 
21
21
  return EthAccount(address=public_key, private_key=private_key)
22
22
 
23
+
23
24
  def _get_user_random_seed():
24
25
  print("Please type a random string of characters (the longer and more random, the better):")
25
26
  print("> ", end="") # Add a '>' prompt on a new line
26
27
  return input().encode()
27
28
 
29
+
28
30
  def _generate_secure_private_key(user_input):
29
31
  # Combine multiple sources of entropy
30
32
  system_random = secrets.token_bytes(32)
31
33
  os_urandom = os.urandom(32)
32
34
  timestamp = str(secrets.randbits(256)).encode()
33
-
35
+
34
36
  # Add user input to the entropy sources
35
37
  combined = system_random + os_urandom + timestamp + user_input
36
-
38
+
37
39
  # Hash the combined entropy
38
- return hashlib.sha256(combined).hexdigest()
40
+ return hashlib.sha256(combined).hexdigest()