opengradient 0.3.5__tar.gz → 0.3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {opengradient-0.3.5/src/opengradient.egg-info → opengradient-0.3.7}/PKG-INFO +2 -6
  2. {opengradient-0.3.5 → opengradient-0.3.7}/pyproject.toml +11 -6
  3. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/__init__.py +22 -22
  4. opengradient-0.3.7/src/opengradient/abi/inference.abi +1 -0
  5. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/account.py +4 -3
  6. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/cli.py +33 -21
  7. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/client.py +198 -50
  8. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/types.py +12 -6
  9. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/utils.py +11 -5
  10. {opengradient-0.3.5 → opengradient-0.3.7/src/opengradient.egg-info}/PKG-INFO +2 -6
  11. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient.egg-info/SOURCES.txt +1 -2
  12. opengradient-0.3.5/src/opengradient/abi/inference.abi +0 -1
  13. opengradient-0.3.5/src/opengradient/abi/llm.abi +0 -1
  14. {opengradient-0.3.5 → opengradient-0.3.7}/LICENSE +0 -0
  15. {opengradient-0.3.5 → opengradient-0.3.7}/README.md +0 -0
  16. {opengradient-0.3.5 → opengradient-0.3.7}/setup.cfg +0 -0
  17. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/defaults.py +0 -0
  18. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient/exceptions.py +0 -0
  19. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient.egg-info/dependency_links.txt +0 -0
  20. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient.egg-info/entry_points.txt +0 -0
  21. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient.egg-info/requires.txt +0 -0
  22. {opengradient-0.3.5 → opengradient-0.3.7}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -4,20 +4,16 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.3.5"
7
+ version = "0.3.7"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
11
11
  readme = "README.md"
12
- requires-python = ">=3.7"
12
+ requires-python = ">=3.10"
13
13
  classifiers = [
14
14
  "Development Status :: 3 - Alpha",
15
15
  "Intended Audience :: Developers",
16
16
  "License :: OSI Approved :: MIT License",
17
- "Programming Language :: Python :: 3",
18
- "Programming Language :: Python :: 3.7",
19
- "Programming Language :: Python :: 3.8",
20
- "Programming Language :: Python :: 3.9",
21
17
  "Programming Language :: Python :: 3.10",
22
18
  "Programming Language :: Python :: 3.11",
23
19
  "Programming Language :: Python :: 3.12",
@@ -140,3 +136,12 @@ exclude = ["tests*", "stresstest*"]
140
136
 
141
137
  [tool.setuptools.exclude-package-data]
142
138
  "*" = ["*.ipynb", "*.pyc", "*.pyo", ".gitignore", "requirements.txt", "conftest.py"]
139
+
140
+ [tool.ruff]
141
+ line-length = 140
142
+ target-version = "py310" # Specify your Python version
143
+ select = ["E", "F", "I", "N"]
144
+ ignore = []
145
+
146
+ [tool.ruff.mccabe]
147
+ max-complexity = 10
@@ -1,11 +1,10 @@
1
+ from typing import Dict, List, Optional, Tuple
2
+
1
3
  from .client import Client
2
- from .defaults import *
3
- from .types import InferenceMode
4
- from typing import List, Dict, Optional, Tuple
5
- import os
4
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
+ from .types import InferenceMode, LLM
6
6
 
7
7
  __version__ = "0.3.5"
8
- __all__ = ['init', 'upload', 'create_model', 'create_version', 'infer', 'infer_llm', 'login', 'list_files', 'InferenceMode']
9
8
 
10
9
  _client = None
11
10
 
@@ -45,31 +44,32 @@ def infer(model_cid, inference_mode, model_input):
45
44
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
46
45
  return _client.infer(model_cid, inference_mode, model_input)
47
46
 
48
- def infer_llm(model_cid: str,
49
- prompt: str,
50
- max_tokens: int = 100,
51
- stop_sequence: Optional[List[str]] = None,
52
- temperature: float = 0.0) -> Tuple[str, str]:
47
+ def llm_completion(model_cid: LLM,
48
+ prompt: str,
49
+ max_tokens: int = 100,
50
+ stop_sequence: Optional[List[str]] = None,
51
+ temperature: float = 0.0) -> Tuple[str, str]:
53
52
  if _client is None:
54
53
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
55
- return _client.infer_llm(model_cid, prompt, max_tokens, stop_sequence, temperature)
54
+ return _client.llm_completion(model_cid, prompt, max_tokens, stop_sequence, temperature)
56
55
 
57
- def login(email: str, password: str):
56
+ def llm_chat(model_cid: LLM,
57
+ messages: List[Dict],
58
+ max_tokens: int = 100,
59
+ stop_sequence: Optional[List[str]] = None,
60
+ temperature: float = 0.0,
61
+ tools: Optional[List[Dict]] = None,
62
+ tool_choice: Optional[str] = None):
58
63
  if _client is None:
59
64
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
60
- return _client.login(email, password)
65
+ return _client.llm_chat(model_cid, messages, max_tokens, stop_sequence, temperature, tools, tool_choice)
61
66
 
62
- def list_files(model_name: str, version: str) -> List[Dict]:
67
+ def login(email: str, password: str):
63
68
  if _client is None:
64
69
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
65
- return _client.list_files(model_name, version)
70
+ return _client.login(email, password)
66
71
 
67
- def create_model_from_huggingface(repo_id: str, model_name: str, model_desc: str):
72
+ def list_files(model_name: str, version: str) -> List[Dict]:
68
73
  if _client is None:
69
74
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
70
-
71
- with tempfile.TemporaryDirectory() as temp_dir:
72
- snapshot_download(repo_id, local_dir=temp_dir)
73
- result = create_model(model_name, model_desc, temp_dir)
74
-
75
- return result
75
+ return _client.list_files(model_name, version)
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1,8 +1,9 @@
1
- from eth_account import Account
1
+ import hashlib
2
+ import os
2
3
  import secrets
3
4
  from collections import namedtuple
4
- import os
5
- import hashlib
5
+
6
+ from eth_account import Account
6
7
 
7
8
  EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
9
 
@@ -1,18 +1,24 @@
1
- import click
2
- import opengradient
3
- import json
4
1
  import ast
5
- from pathlib import Path
2
+ import json
6
3
  import logging
7
- from pprint import pformat
8
- from typing import List
9
4
  import webbrowser
10
- import sys
5
+ from pathlib import Path
6
+ from typing import List
11
7
 
8
+ import click
9
+
10
+ import opengradient
11
+
12
+ from .account import EthAccount, generate_eth_account
12
13
  from .client import Client
13
- from .defaults import *
14
+ from .defaults import (
15
+ DEFAULT_BLOCKCHAIN_EXPLORER,
16
+ DEFAULT_HUB_SIGNUP_URL,
17
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
+ DEFAULT_OG_FAUCET_URL,
19
+ DEFAULT_RPC_URL,
20
+ )
14
21
  from .types import InferenceMode
15
- from .account import EthAccount, generate_eth_account
16
22
 
17
23
  OG_CONFIG_FILE = Path.home() / '.opengradient_config.json'
18
24
 
@@ -50,13 +56,19 @@ class DictParamType(click.ParamType):
50
56
 
51
57
  Dict = DictParamType()
52
58
 
53
- # Support inference modes
59
+ # Supported inference modes
54
60
  InferenceModes = {
55
61
  "VANILLA": InferenceMode.VANILLA,
56
62
  "ZKML": InferenceMode.ZKML,
57
63
  "TEE": InferenceMode.TEE,
58
64
  }
59
65
 
66
+ # Supported LLMs
67
+ LlmModels = {
68
+ "meta-llama/Meta-Llama-3-8B-Instruct",
69
+ "meta-llama/Llama-3.2-3B-Instruct",
70
+ "mistralai/Mistral-7B-Instruct-v0.3"
71
+ }
60
72
 
61
73
  def initialize_config(ctx):
62
74
  """Interactively initialize OpenGradient config"""
@@ -307,11 +319,11 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
307
319
  click.echo() # Add a newline for better spacing
308
320
  click.secho("✅ Transaction successful", fg="green", bold=True)
309
321
  click.echo("──────────────────────────────────────")
310
- click.echo(f"Transaction hash: ", nl=False)
322
+ click.echo("Transaction hash: ", nl=False)
311
323
  click.secho(tx_hash, fg="cyan", bold=True)
312
324
 
313
325
  block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
314
- click.echo(f"Block explorer link: ", nl=False)
326
+ click.echo("Block explorer link: ", nl=False)
315
327
  click.secho(block_explorer_link, fg="blue", underline=True)
316
328
  click.echo()
317
329
 
@@ -325,7 +337,7 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
325
337
  click.echo(f"Error running inference: {str(e)}")
326
338
 
327
339
  @cli.command()
328
- @click.option('--model', '-m', 'model_cid', required=True, help='CID of the LLM model to run inference on')
340
+ @click.option('--model', '-m', 'model_cid', type=click.Choice(LlmModels), required=True, help='CID of the LLM model to run inference on')
329
341
  @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM')
330
342
  @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM output')
331
343
  @click.option('--stop-sequence', multiple=True, help='Stop sequences for LLM')
@@ -340,8 +352,8 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
340
352
  Example usage:
341
353
 
342
354
  \b
343
- opengradient llm --model Qm... --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
344
- opengradient llm -m Qm... -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
355
+ opengradient llm --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
356
+ opengradient llm -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
345
357
  """
346
358
  client: Client = ctx.obj['client']
347
359
  try:
@@ -359,14 +371,14 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
359
371
  click.echo(f"Error running LLM inference: {str(e)}")
360
372
 
361
373
  def print_llm_inference_result(model_cid, tx_hash, llm_output):
362
- click.secho(f"✅ LLM Inference Successful", fg="green", bold=True)
374
+ click.secho("✅ LLM Inference Successful", fg="green", bold=True)
363
375
  click.echo("──────────────────────────────────────")
364
- click.echo(f"Model CID: ", nl=False)
376
+ click.echo("Model CID: ", nl=False)
365
377
  click.secho(model_cid, fg="cyan", bold=True)
366
- click.echo(f"Transaction hash: ", nl=False)
378
+ click.echo("Transaction hash: ", nl=False)
367
379
  click.secho(tx_hash, fg="cyan", bold=True)
368
380
  block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
369
- click.echo(f"Block explorer link: ", nl=False)
381
+ click.echo("Block explorer link: ", nl=False)
370
382
  click.secho(block_explorer_link, fg="blue", underline=True)
371
383
  click.echo("──────────────────────────────────────")
372
384
  click.secho("LLM Output:", fg="yellow", bold=True)
@@ -389,7 +401,7 @@ def create_account_impl() -> EthAccount:
389
401
  click.echo("Step 1: Create Account on OpenGradient Hub")
390
402
  click.echo("-" * 50)
391
403
 
392
- click.echo(f"Please create an account on the OpenGradient Hub")
404
+ click.echo("Please create an account on the OpenGradient Hub")
393
405
  webbrowser.open(DEFAULT_HUB_SIGNUP_URL, new=2)
394
406
  click.confirm("Have you successfully created your account on the OpenGradient Hub?", abort=True)
395
407
 
@@ -402,7 +414,7 @@ def create_account_impl() -> EthAccount:
402
414
  click.echo("\n" + "-" * 50)
403
415
  click.echo("Step 3: Fund Your Account")
404
416
  click.echo("-" * 50)
405
- click.echo(f"Please fund your account clicking 'Request' on the Faucet website")
417
+ click.echo("Please fund your account clicking 'Request' on the Faucet website")
406
418
  webbrowser.open(DEFAULT_OG_FAUCET_URL + eth_account.address, new=2)
407
419
  click.confirm("Have you successfully funded your account using the Faucet?", abort=True)
408
420
 
@@ -1,15 +1,19 @@
1
- import requests
2
- import os
3
1
  import json
4
- from web3 import Web3
5
- from opengradient.exceptions import OpenGradientError
6
- from opengradient.types import InferenceMode
7
- from opengradient import utils
8
- import numpy as np
9
2
  import logging
10
- from typing import Dict, Optional, Tuple, Union, List
11
- from web3.exceptions import ContractLogicError
3
+ import os
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
12
6
  import firebase
7
+ import numpy as np
8
+ import requests
9
+ from web3 import Web3
10
+ from web3.exceptions import ContractLogicError
11
+ from web3.logs import DISCARD
12
+
13
+ from opengradient import utils
14
+ from opengradient.exceptions import OpenGradientError
15
+ from opengradient.types import InferenceMode, LLM
16
+
13
17
 
14
18
  class Client:
15
19
  FIREBASE_CONFIG = {
@@ -100,7 +104,7 @@ class Client:
100
104
  if not self.user:
101
105
  raise ValueError("User not authenticated")
102
106
 
103
- url = f"https://api.opengradient.ai/api/v0/models/"
107
+ url = "https://api.opengradient.ai/api/v0/models/"
104
108
  headers = {
105
109
  'Authorization': f'Bearer {self.user["idToken"]}',
106
110
  'Content-Type': 'application/json'
@@ -186,15 +190,15 @@ class Client:
186
190
  logging.debug(f"Full server response: {json_response}")
187
191
 
188
192
  if isinstance(json_response, list) and not json_response:
189
- logging.info(f"Server returned an empty list. Assuming version was created successfully.")
193
+ logging.info("Server returned an empty list. Assuming version was created successfully.")
190
194
  return {"versionString": "Unknown", "note": "Created based on empty response"}
191
195
  elif isinstance(json_response, dict):
192
- versionString = json_response.get('versionString')
193
- if not versionString:
196
+ version_string = json_response.get('versionString')
197
+ if not version_string:
194
198
  logging.warning(f"'versionString' not found in response. Response: {json_response}")
195
199
  return {"versionString": "Unknown", "note": "Version ID not provided in response"}
196
- logging.info(f"Version creation successful. Version ID: {versionString}")
197
- return {"versionString": versionString}
200
+ logging.info(f"Version creation successful. Version ID: {version_string}")
201
+ return {"versionString": version_string}
198
202
  else:
199
203
  logging.error(f"Unexpected response type: {type(json_response)}. Content: {json_response}")
200
204
  raise Exception(f"Unexpected response type: {type(json_response)}")
@@ -295,7 +299,12 @@ class Client:
295
299
  logging.error(f"Unexpected error during upload: {str(e)}", exc_info=True)
296
300
  raise OpenGradientError(f"Unexpected error during upload: {str(e)}")
297
301
 
298
- def infer(self, model_cid: str, inference_mode: InferenceMode, model_input: Dict[str, Union[str, int, float, List, np.ndarray]]) -> Tuple[str, Dict[str, np.ndarray]]:
302
+ def infer(
303
+ self,
304
+ model_cid: str,
305
+ inference_mode: InferenceMode,
306
+ model_input: Dict[str, Union[str, int, float, List, np.ndarray]]
307
+ ) -> Tuple[str, Dict[str, np.ndarray]]:
299
308
  """
300
309
  Perform inference on a model.
301
310
 
@@ -376,19 +385,11 @@ class Client:
376
385
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
377
386
 
378
387
  # Process the InferenceResult event
379
- inference_result = None
380
- for log in tx_receipt['logs']:
381
- try:
382
- decoded_log = contract.events.InferenceResult().process_log(log)
383
- inference_result = decoded_log
384
- break
385
- except:
386
- continue
387
-
388
- if inference_result is None:
389
- logging.error("InferenceResult event not found in transaction logs")
390
- logging.debug(f"Transaction receipt logs: {tx_receipt['logs']}")
388
+ parsed_logs = contract.events.InferenceResult().process_receipt(tx_receipt, errors=DISCARD)
389
+
390
+ if len(parsed_logs) < 1:
391
391
  raise OpenGradientError("InferenceResult event not found in transaction logs")
392
+ inference_result = parsed_logs[0]
392
393
 
393
394
  # Extract the ModelOutput from the event
394
395
  event_data = inference_result['args']
@@ -410,24 +411,24 @@ class Client:
410
411
  logging.error(f"Error in infer method: {str(e)}", exc_info=True)
411
412
  raise OpenGradientError(f"Inference failed: {str(e)}")
412
413
 
413
- def infer_llm(self,
414
- model_cid: str,
415
- prompt: str,
416
- max_tokens: int = 100,
417
- stop_sequence: Optional[List[str]] = None,
418
- temperature: float = 0.0) -> Tuple[str, str]:
414
+ def llm_completion(self,
415
+ model_cid: LLM,
416
+ prompt: str,
417
+ max_tokens: int = 100,
418
+ stop_sequence: Optional[List[str]] = None,
419
+ temperature: float = 0.0) -> Tuple[str, str]:
419
420
  """
420
421
  Perform inference on an LLM model using completions.
421
422
 
422
423
  Args:
423
- model_cid (str): The unique content identifier for the model.
424
+ model_cid (LLM): The unique content identifier for the model.
424
425
  prompt (str): The input prompt for the LLM.
425
426
  max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
426
427
  stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
427
428
  temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
428
429
 
429
430
  Returns:
430
- Tuple[str, str]: The transaction hash and the LLM output.
431
+ Tuple[str, str]: The transaction hash and the LLM completion output.
431
432
 
432
433
  Raises:
433
434
  OpenGradientError: If the inference fails.
@@ -435,7 +436,7 @@ class Client:
435
436
  try:
436
437
  self._initialize_web3()
437
438
 
438
- abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'llm.abi')
439
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
439
440
  with open(abi_path, 'r') as abi_file:
440
441
  llm_abi = json.load(abi_file)
441
442
  contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
@@ -452,7 +453,7 @@ class Client:
452
453
  logging.debug(f"Prepared LLM request: {llm_request}")
453
454
 
454
455
  # Prepare run function
455
- run_function = contract.functions.runLLM(llm_request)
456
+ run_function = contract.functions.runLLMCompletion(llm_request)
456
457
 
457
458
  # Build transaction
458
459
  nonce = self._w3.eth.get_transaction_count(self.wallet_address)
@@ -478,19 +479,166 @@ class Client:
478
479
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
479
480
 
480
481
  # Process the LLMResult event
481
- llm_result = None
482
- for log in tx_receipt['logs']:
483
- try:
484
- decoded_log = contract.events.LLMResult().process_log(log)
485
- llm_result = decoded_log['args']['response']['answer']
486
- break
487
- except:
488
- continue
482
+ parsed_logs = contract.events.LLMCompletionResult().process_receipt(tx_receipt, errors=DISCARD)
483
+
484
+ if len(parsed_logs) < 1:
485
+ raise OpenGradientError("LLM completion result event not found in transaction logs")
486
+ llm_result = parsed_logs[0]
489
487
 
490
- if llm_result is None:
491
- raise OpenGradientError("LLMResult event not found in transaction logs")
488
+ llm_answer = llm_result['args']['response']['answer']
489
+ return tx_hash.hex(), llm_answer
490
+
491
+ except ContractLogicError as e:
492
+ logging.error(f"Contract logic error: {str(e)}", exc_info=True)
493
+ raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
494
+ except Exception as e:
495
+ logging.error(f"Error in infer completion method: {str(e)}", exc_info=True)
496
+ raise OpenGradientError(f"LLM inference failed: {str(e)}")
497
+
498
+ def llm_chat(self,
499
+ model_cid: str,
500
+ messages: List[Dict],
501
+ max_tokens: int = 100,
502
+ stop_sequence: Optional[List[str]] = None,
503
+ temperature: float = 0.0,
504
+ tools: Optional[List[Dict]] = [],
505
+ tool_choice: Optional[str] = None) -> Tuple[str, str]:
506
+ """
507
+ Perform inference on an LLM model using chat.
508
+
509
+ Args:
510
+ model_cid (LLM): The unique content identifier for the model.
511
+ messages (dict): The messages that will be passed into the chat.
512
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create)
513
+ Example:
514
+ [
515
+ {
516
+ "role": "system",
517
+ "content": "You are a helpful assistant."
518
+ },
519
+ {
520
+ "role": "user",
521
+ "content": "Hello!"
522
+ }
523
+ ]
524
+ max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
525
+ stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
526
+ temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
527
+ tools (List[dict], optional): Set of tools
528
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
529
+ Example:
530
+ [
531
+ {
532
+ "type": "function",
533
+ "function": {
534
+ "name": "get_current_weather",
535
+ "description": "Get the current weather in a given location",
536
+ "parameters": {
537
+ "type": "object",
538
+ "properties": {
539
+ "location": {
540
+ "type": "string",
541
+ "description": "The city and state, e.g. San Francisco, CA"
542
+ },
543
+ "unit": {
544
+ "type": "string",
545
+ "enum": ["celsius", "fahrenheit"]
546
+ }
547
+ },
548
+ "required": ["location"]
549
+ }
550
+ }
551
+ }
552
+ ]
553
+ tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
554
+
555
+ Returns:
556
+ Tuple[str, str]: The transaction hash and the LLM chat output.
557
+
558
+ Raises:
559
+ OpenGradientError: If the inference fails.
560
+ """
561
+ try:
562
+ self._initialize_web3()
563
+
564
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
565
+ with open(abi_path, 'r') as abi_file:
566
+ llm_abi = json.load(abi_file)
567
+ contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
568
+
569
+ # For incoming chat messages, tool_calls can be empty. Add an empty array so that it will fit the ABI.
570
+ for message in messages:
571
+ if 'tool_calls' not in message:
572
+ message['tool_calls'] = []
573
+
574
+ # Create simplified tool structure for smart contract
575
+ #
576
+ # struct ToolDefinition {
577
+ # string description;
578
+ # string name;
579
+ # string parameters; // This must be a JSON
580
+ # }
581
+ converted_tools = []
582
+ if tools is not None:
583
+ for tool in tools:
584
+ function = tool['function']
585
+
586
+ converted_tool = {}
587
+ converted_tool['name'] = function['name']
588
+ converted_tool['description'] = function['description']
589
+ if (parameters := function.get('parameters')) is not None:
590
+ try:
591
+ converted_tool['parameters'] = json.dumps(parameters)
592
+ except Exception as e:
593
+ raise OpenGradientError("Chat LLM failed to convert parameters into JSON: %s", e)
594
+
595
+ converted_tools.append(converted_tool)
596
+
597
+ # Prepare LLM input
598
+ llm_request = {
599
+ "mode": InferenceMode.VANILLA,
600
+ "modelCID": model_cid,
601
+ "messages": messages,
602
+ "max_tokens": max_tokens,
603
+ "stop_sequence": stop_sequence or [],
604
+ "temperature": int(temperature * 100), # Scale to 0-100 range
605
+ "tools": converted_tools or [],
606
+ "tool_choice": tool_choice if tool_choice else ("" if tools is None else "auto")
607
+ }
608
+ logging.debug(f"Prepared LLM request: {llm_request}")
609
+
610
+ # Prepare run function
611
+ run_function = contract.functions.runLLMChat(llm_request)
612
+
613
+ # Build transaction
614
+ nonce = self._w3.eth.get_transaction_count(self.wallet_address)
615
+ estimated_gas = run_function.estimate_gas({'from': self.wallet_address})
616
+ gas_limit = int(estimated_gas * 1.2)
617
+
618
+ transaction = run_function.build_transaction({
619
+ 'from': self.wallet_address,
620
+ 'nonce': nonce,
621
+ 'gas': gas_limit,
622
+ 'gasPrice': self._w3.eth.gas_price,
623
+ })
624
+
625
+ # Sign and send transaction
626
+ signed_tx = self._w3.eth.account.sign_transaction(transaction, self.private_key)
627
+ tx_hash = self._w3.eth.send_raw_transaction(signed_tx.raw_transaction)
628
+ logging.debug(f"Transaction sent. Hash: {tx_hash.hex()}")
629
+
630
+ # Wait for transaction receipt
631
+ tx_receipt = self._w3.eth.wait_for_transaction_receipt(tx_hash)
632
+
633
+ if tx_receipt['status'] == 0:
634
+ raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
635
+
636
+ # Process the LLMResult event
637
+ parsed_logs = contract.events.LLMChatResult().process_receipt(tx_receipt, errors=DISCARD)
492
638
 
493
- logging.debug(f"LLM output: {llm_result}")
639
+ if len(parsed_logs) < 1:
640
+ raise OpenGradientError("LLM chat result event not found in transaction logs")
641
+ llm_result = parsed_logs[0]
494
642
 
495
643
  return tx_hash.hex(), llm_result
496
644
 
@@ -498,7 +646,7 @@ class Client:
498
646
  logging.error(f"Contract logic error: {str(e)}", exc_info=True)
499
647
  raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
500
648
  except Exception as e:
501
- logging.error(f"Error in infer_llm method: {str(e)}", exc_info=True)
649
+ logging.error(f"Error in infer chat method: {str(e)}", exc_info=True)
502
650
  raise OpenGradientError(f"LLM inference failed: {str(e)}")
503
651
 
504
652
 
@@ -1,5 +1,6 @@
1
- from typing import List, Tuple, Union
2
1
  from dataclasses import dataclass
2
+ from typing import List, Tuple, Union
3
+
3
4
 
4
5
  @dataclass
5
6
  class Number:
@@ -9,7 +10,7 @@ class Number:
9
10
  @dataclass
10
11
  class NumberTensor:
11
12
  name: str
12
- values: List[Tuple[int, int]] # (int128, int128)[]
13
+ values: List[Tuple[int, int]]
13
14
 
14
15
  @dataclass
15
16
  class StringTensor:
@@ -37,7 +38,7 @@ class AbiFunction:
37
38
  name: str
38
39
  inputs: List[Union[str, 'AbiFunction']]
39
40
  outputs: List[Union[str, 'AbiFunction']]
40
- stateMutability: str
41
+ state_mutability: str
41
42
 
42
43
  @dataclass
43
44
  class Abi:
@@ -54,7 +55,7 @@ class Abi:
54
55
  name=item['name'],
55
56
  inputs=inputs,
56
57
  outputs=outputs,
57
- stateMutability=item['stateMutability']
58
+ state_mutability=item['stateMutability']
58
59
  ))
59
60
  return cls(functions=functions)
60
61
 
@@ -67,8 +68,13 @@ class Abi:
67
68
  name=item['name'],
68
69
  inputs=Abi._parse_inputs_outputs(item['components']),
69
70
  outputs=[],
70
- stateMutability=''
71
+ state_mutability=''
71
72
  ))
72
73
  else:
73
74
  result.append(f"{item['name']}:{item['type']}")
74
- return result
75
+ return result
76
+
77
+ class LLM:
78
+ META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
79
+ LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
80
+ MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
@@ -1,9 +1,11 @@
1
- import numpy as np
2
1
  import logging
3
2
  from decimal import Decimal
4
3
  from typing import Dict, List, Tuple
4
+
5
+ import numpy as np
5
6
  from web3.datastructures import AttributeDict
6
7
 
8
+
7
9
  def convert_to_fixed_point(number: float) -> Tuple[int, int]:
8
10
  """
9
11
  Converts input number to the Number tensor used by the sequencer.
@@ -36,14 +38,16 @@ def convert_to_float32(value: int, decimals: int) -> np.float32:
36
38
  """
37
39
  return np.float32(Decimal(value) / (10 ** Decimal(decimals)))
38
40
 
39
- def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
41
+ def convert_to_model_input(
42
+ inputs: Dict[str, np.ndarray]
43
+ ) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
40
44
  """
41
45
  Expect SDK input to be a dict with the format
42
46
  key: tensor name
43
47
  value: np.array / list
44
48
 
45
- Return a tuple of (number tensors, string tensors) depending on the input type. Each number and string tensor converted
46
- to a numpy array and flattened and the shape saved.
49
+ Return a tuple of (number tensors, string tensors) depending on the input type.
50
+ Each number and string tensor converted to a numpy array and flattened and the shape saved.
47
51
  """
48
52
  logging.debug("Converting the following input dictionary to ModelInput: %s", inputs)
49
53
  number_tensors = []
@@ -60,7 +64,9 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
60
64
 
61
65
  # Check if type is np array
62
66
  if not isinstance(tensor_data, np.ndarray):
63
- raise TypeError("Inference input must be list, numpy array, or type (str, int, float): %s" % type(tensor_data))
67
+ raise TypeError(
68
+ "Inference input must be list, numpy array, or type (str, int, float): %s"
69
+ % type(tensor_data))
64
70
 
65
71
  # Flatten list and retain shape
66
72
  shape = tensor_data.shape
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -15,5 +15,4 @@ src/opengradient.egg-info/dependency_links.txt
15
15
  src/opengradient.egg-info/entry_points.txt
16
16
  src/opengradient.egg-info/requires.txt
17
17
  src/opengradient.egg-info/top_level.txt
18
- src/opengradient/abi/inference.abi
19
- src/opengradient/abi/llm.abi
18
+ src/opengradient/abi/inference.abi
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LlmResponse","name":"response","type":"tuple"}],"name":"LLMResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LlmInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LlmInferenceRequest","name":"request","type":"tuple"}],"name":"runLLM","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LlmResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
File without changes
File without changes
File without changes