opengradient 0.4.11__tar.gz → 0.4.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {opengradient-0.4.11/src/opengradient.egg-info → opengradient-0.4.12}/PKG-INFO +1 -1
  2. {opengradient-0.4.11 → opengradient-0.4.12}/pyproject.toml +1 -1
  3. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/__init__.py +7 -5
  4. opengradient-0.4.12/src/opengradient/abi/InferencePrecompile.abi +1 -0
  5. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/cli.py +2 -0
  6. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/client.py +106 -64
  7. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/defaults.py +2 -1
  8. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/llm/og_langchain.py +2 -2
  9. {opengradient-0.4.11 → opengradient-0.4.12/src/opengradient.egg-info}/PKG-INFO +1 -1
  10. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient.egg-info/SOURCES.txt +1 -0
  11. {opengradient-0.4.11 → opengradient-0.4.12}/LICENSE +0 -0
  12. {opengradient-0.4.11 → opengradient-0.4.12}/README.md +0 -0
  13. {opengradient-0.4.11 → opengradient-0.4.12}/setup.cfg +0 -0
  14. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  15. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  16. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/abi/inference.abi +0 -0
  17. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/account.py +0 -0
  18. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/alphasense/__init__.py +0 -0
  19. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  20. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/alphasense/run_model_tool.py +0 -0
  21. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/alphasense/types.py +0 -0
  22. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  23. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/exceptions.py +0 -0
  24. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/llm/__init__.py +0 -0
  25. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/llm/og_openai.py +0 -0
  26. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/proto/__init__.py +0 -0
  27. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/proto/infer.proto +0 -0
  28. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/proto/infer_pb2.py +0 -0
  29. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  30. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/types.py +0 -0
  31. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/utils.py +0 -0
  32. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/workflow_models/__init__.py +0 -0
  33. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/workflow_models/constants.py +0 -0
  34. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/workflow_models/types.py +0 -0
  35. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/workflow_models/utils.py +0 -0
  36. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient/workflow_models/workflow_models.py +0 -0
  37. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient.egg-info/dependency_links.txt +0 -0
  38. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient.egg-info/entry_points.txt +0 -0
  39. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient.egg-info/requires.txt +0 -0
  40. {opengradient-0.4.11 → opengradient-0.4.12}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.4.11
3
+ Version: 0.4.12
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.11"
7
+ version = "0.4.12"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
@@ -5,7 +5,7 @@ OpenGradient Python SDK for interacting with AI models and infrastructure.
5
5
  from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  from .client import Client
8
- from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
8
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL, DEFAULT_API_URL
9
9
  from .types import (
10
10
  LLM,
11
11
  TEE_LLM,
@@ -32,6 +32,7 @@ def new_client(
32
32
  password: Optional[str],
33
33
  private_key: str,
34
34
  rpc_url=DEFAULT_RPC_URL,
35
+ api_url=DEFAULT_API_URL,
35
36
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
36
37
  ) -> Client:
37
38
  """
@@ -45,10 +46,10 @@ def new_client(
45
46
  contract_address: Optional inference contract address
46
47
  """
47
48
 
48
- return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, contract_address=contract_address)
49
+ return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, api_url=api_url, contract_address=contract_address)
49
50
 
50
51
 
51
- def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
52
+ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, api_url=DEFAULT_API_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
52
53
  """Initialize the OpenGradient SDK with authentication and network settings.
53
54
 
54
55
  Args:
@@ -56,11 +57,12 @@ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, c
56
57
  password: User's password for authentication
57
58
  private_key: Ethereum private key for blockchain transactions
58
59
  rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
60
+ api_url: Optional API URL for the OpenGradient API, defaults to mainnet
59
61
  contract_address: Optional inference contract address
60
62
  """
61
63
  global _client
62
-
63
- _client = Client(private_key=private_key, rpc_url=rpc_url, email=email, password=password, contract_address=contract_address)
64
+
65
+ _client = Client(private_key=private_key, rpc_url=rpc_url, api_url=api_url, email=email, password=password, contract_address=contract_address)
64
66
  return _client
65
67
 
66
68
 
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMChatRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChat","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"indexed":false,"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"response","type":"tuple"}],"name":"ModelInferenceEvent","type":"event"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"}],"name":"runModelInference","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -17,6 +17,7 @@ from .defaults import (
17
17
  DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
18
  DEFAULT_OG_FAUCET_URL,
19
19
  DEFAULT_RPC_URL,
20
+ DEFAULT_API_URL,
20
21
  )
21
22
  from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM
22
23
 
@@ -132,6 +133,7 @@ def cli(ctx):
132
133
  ctx.obj["client"] = Client(
133
134
  private_key=ctx.obj["private_key"],
134
135
  rpc_url=DEFAULT_RPC_URL,
136
+ api_url=DEFAULT_API_URL,
135
137
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
136
138
  email=ctx.obj.get("email"),
137
139
  password=ctx.obj.get("password"),
@@ -12,6 +12,7 @@ from eth_account.account import LocalAccount
12
12
  from web3 import Web3
13
13
  from web3.exceptions import ContractLogicError
14
14
  from web3.logs import DISCARD
15
+ import urllib.parse
15
16
 
16
17
  from .exceptions import OpenGradientError
17
18
  from .proto import infer_pb2, infer_pb2_grpc
@@ -49,6 +50,7 @@ REGULAR_TX_TIMEOUT = 30
49
50
  DEFAULT_MAX_RETRY = 5
50
51
  DEFAULT_RETRY_DELAY_SEC = 1
51
52
 
53
+ PRECOMPILE_CONTRACT_ADDRESS = "0x00000000000000000000000000000000000000F4"
52
54
 
53
55
  class Client:
54
56
  _inference_hub_contract_address: str
@@ -56,9 +58,10 @@ class Client:
56
58
  _wallet_account: LocalAccount
57
59
 
58
60
  _hub_user: Optional[Dict]
61
+ _api_url: str
59
62
  _inference_abi: Dict
60
-
61
- def __init__(self, private_key: str, rpc_url: str, contract_address: str, email: Optional[str], password: Optional[str]):
63
+ _precompile_abi: Dict
64
+ def __init__(self, private_key: str, rpc_url: str, api_url: str, contract_address: str, email: Optional[str], password: Optional[str]):
62
65
  """
63
66
  Initialize the Client with private key, RPC URL, and contract address.
64
67
 
@@ -71,12 +74,17 @@ class Client:
71
74
  """
72
75
  self._inference_hub_contract_address = contract_address
73
76
  self._blockchain = Web3(Web3.HTTPProvider(rpc_url))
77
+ self._api_url = api_url
74
78
  self._wallet_account = self._blockchain.eth.account.from_key(private_key)
75
79
 
76
80
  abi_path = Path(__file__).parent / "abi" / "inference.abi"
77
81
  with open(abi_path, "r") as abi_file:
78
82
  self._inference_abi = json.load(abi_file)
79
83
 
84
+ abi_path = Path(__file__).parent / "abi" / "InferencePrecompile.abi"
85
+ with open(abi_path, "r") as abi_file:
86
+ self._precompile_abi = json.load(abi_file)
87
+
80
88
  if email is not None:
81
89
  self._hub_user = self._login_to_hub(email, password)
82
90
  else:
@@ -292,38 +300,26 @@ class Client:
292
300
 
293
301
  def execute_transaction():
294
302
  contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
303
+ precompile_contract = self._blockchain.eth.contract(address=PRECOMPILE_CONTRACT_ADDRESS, abi=self._precompile_abi)
295
304
 
296
305
  inference_mode_uint8 = inference_mode.value
297
306
  converted_model_input = convert_to_model_input(model_input)
298
307
 
299
308
  run_function = contract.functions.run(model_cid, inference_mode_uint8, converted_model_input)
300
309
 
301
- nonce = self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending")
302
- estimated_gas = run_function.estimate_gas({"from": self._wallet_account.address})
303
- gas_limit = int(estimated_gas * 3)
304
-
305
- transaction = run_function.build_transaction(
306
- {
307
- "from": self._wallet_account.address,
308
- "nonce": nonce,
309
- "gas": gas_limit,
310
- "gasPrice": self._blockchain.eth.gas_price,
311
- }
312
- )
313
-
314
- signed_tx = self._wallet_account.sign_transaction(transaction)
315
- tx_hash = self._blockchain.eth.send_raw_transaction(signed_tx.raw_transaction)
316
- tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=INFERENCE_TX_TIMEOUT)
317
-
318
- if tx_receipt["status"] == 0:
319
- raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
320
-
310
+ tx_hash, tx_receipt = self._send_tx_with_revert_handling(run_function)
321
311
  parsed_logs = contract.events.InferenceResult().process_receipt(tx_receipt, errors=DISCARD)
322
312
  if len(parsed_logs) < 1:
323
313
  raise OpenGradientError("InferenceResult event not found in transaction logs")
324
314
 
325
315
  # TODO: This should return a ModelOutput class object
326
316
  model_output = convert_to_model_output(parsed_logs[0]["args"])
317
+ if len(model_output) == 0:
318
+ # check inference directly from node
319
+ parsed_logs = precompile_contract.events.ModelInferenceEvent().process_receipt(tx_receipt, errors=DISCARD)
320
+ inference_id = parsed_logs[0]["args"]["inferenceID"]
321
+ inference_result = self.get_inference_result_from_node(inference_id)
322
+ model_output = inference_result
327
323
 
328
324
  return InferenceResult(tx_hash.hex(), model_output)
329
325
 
@@ -382,27 +378,7 @@ class Client:
382
378
 
383
379
  run_function = contract.functions.runLLMCompletion(llm_request)
384
380
 
385
- nonce = self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending")
386
- estimated_gas = run_function.estimate_gas({"from": self._wallet_account.address})
387
- # Artificially increase required gas for safety
388
- gas_limit = int(estimated_gas * 1.5)
389
-
390
- transaction = run_function.build_transaction(
391
- {
392
- "from": self._wallet_account.address,
393
- "nonce": nonce,
394
- "gas": gas_limit,
395
- "gasPrice": self._blockchain.eth.gas_price,
396
- }
397
- )
398
-
399
- signed_tx = self._wallet_account.sign_transaction(transaction)
400
- tx_hash = self._blockchain.eth.send_raw_transaction(signed_tx.raw_transaction)
401
- tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=LLM_TX_TIMEOUT)
402
-
403
- if tx_receipt["status"] == 0:
404
- raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
405
-
381
+ tx_hash, tx_receipt = self._send_tx_with_revert_handling(run_function)
406
382
  parsed_logs = contract.events.LLMCompletionResult().process_receipt(tx_receipt, errors=DISCARD)
407
383
  if len(parsed_logs) < 1:
408
384
  raise OpenGradientError("LLM completion result event not found in transaction logs")
@@ -534,27 +510,7 @@ class Client:
534
510
 
535
511
  run_function = contract.functions.runLLMChat(llm_request)
536
512
 
537
- nonce = self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending")
538
- estimated_gas = run_function.estimate_gas({"from": self._wallet_account.address})
539
- # Artificially increase required gas for safety
540
- gas_limit = int(estimated_gas * 1.5)
541
-
542
- transaction = run_function.build_transaction(
543
- {
544
- "from": self._wallet_account.address,
545
- "nonce": nonce,
546
- "gas": gas_limit,
547
- "gasPrice": self._blockchain.eth.gas_price,
548
- }
549
- )
550
-
551
- signed_tx = self._wallet_account.sign_transaction(transaction)
552
- tx_hash = self._blockchain.eth.send_raw_transaction(signed_tx.raw_transaction)
553
- tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=LLM_TX_TIMEOUT)
554
-
555
- if tx_receipt["status"] == 0:
556
- raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
557
-
513
+ tx_hash, tx_receipt = self._send_tx_with_revert_handling(run_function)
558
514
  parsed_logs = contract.events.LLMChatResult().process_receipt(tx_receipt, errors=DISCARD)
559
515
  if len(parsed_logs) < 1:
560
516
  raise OpenGradientError("LLM chat result event not found in transaction logs")
@@ -752,6 +708,58 @@ class Client:
752
708
  bytecode = "0x" + bytecode
753
709
  return bytecode
754
710
 
711
+ def _send_tx_with_revert_handling(self, run_function):
712
+ """
713
+ Execute a blockchain transaction with revert error.
714
+
715
+ Args:
716
+ run_function: Function that executes the transaction
717
+
718
+ Returns:
719
+ tx_hash: Transaction hash
720
+ tx_receipt: Transaction receipt
721
+
722
+ Raises:
723
+ Exception: If transaction fails or gas estimation fails
724
+ """
725
+ nonce = self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending")
726
+ try:
727
+ estimated_gas = run_function.estimate_gas({"from": self._wallet_account.address})
728
+ except ContractLogicError as e:
729
+ try:
730
+ run_function.call({"from": self._wallet_account.address})
731
+
732
+ except ContractLogicError as call_err:
733
+ raise ContractLogicError(f"simulation failed with revert reason: {call_err.args[0]}")
734
+
735
+ raise ContractLogicError(f"simulation failed with no revert reason. Reason: {e}")
736
+
737
+ gas_limit = int(estimated_gas * 3)
738
+
739
+ transaction = run_function.build_transaction(
740
+ {
741
+ "from": self._wallet_account.address,
742
+ "nonce": nonce,
743
+ "gas": gas_limit,
744
+ "gasPrice": self._blockchain.eth.gas_price,
745
+ }
746
+ )
747
+
748
+ signed_tx = self._wallet_account.sign_transaction(transaction)
749
+ tx_hash = self._blockchain.eth.send_raw_transaction(signed_tx.raw_transaction)
750
+ tx_receipt = self._blockchain.eth.wait_for_transaction_receipt(tx_hash, timeout=INFERENCE_TX_TIMEOUT)
751
+
752
+ if tx_receipt["status"] == 0:
753
+ try:
754
+ run_function.call({"from": self._wallet_account.address})
755
+
756
+ except ContractLogicError as call_err:
757
+ raise ContractLogicError(f"Transaction failed with revert reason: {call_err.args[0]}")
758
+
759
+ raise ContractLogicError(f"Transaction failed with no revert reason. Receipt: {tx_receipt}")
760
+
761
+ return tx_hash, tx_receipt
762
+
755
763
  def new_workflow(
756
764
  self,
757
765
  model_cid: str,
@@ -963,6 +971,40 @@ class Client:
963
971
  return [convert_array_to_model_output(result) for result in results]
964
972
 
965
973
 
974
+ def get_inference_result_from_node(self, inference_id: str) -> str:
975
+ """
976
+ Get the inference result from node.
977
+
978
+ Args:
979
+ inference_id (str): Inference id for a inference request
980
+
981
+ Returns:
982
+ str: The inference result as returned by the node
983
+
984
+ Raises:
985
+ OpenGradientError: If the request fails or returns an error
986
+ """
987
+ try:
988
+ encoded_id = urllib.parse.quote(inference_id, safe='')
989
+ url = f"{self._api_url}/artela-network/artela-rollkit/inference/tx/{encoded_id}"
990
+
991
+ response = requests.get(url)
992
+ if response.status_code == 200:
993
+ return response.json()
994
+ else:
995
+ error_message = f"Failed to get inference result: HTTP {response.status_code}"
996
+ if response.text:
997
+ error_message += f" - {response.text}"
998
+ logging.error(error_message)
999
+ raise OpenGradientError(error_message)
1000
+
1001
+ except requests.RequestException as e:
1002
+ logging.error(f"Request exception when getting inference result: {str(e)}")
1003
+ raise OpenGradientError(f"Failed to get inference result: {str(e)}")
1004
+ except Exception as e:
1005
+ logging.error(f"Unexpected error when getting inference result: {str(e)}", exc_info=True)
1006
+ raise OpenGradientError(f"Failed to get inference result: {str(e)}")
1007
+
966
1008
  def run_with_retry(txn_function: Callable, max_retries=DEFAULT_MAX_RETRY, retry_delay=DEFAULT_RETRY_DELAY_SEC):
967
1009
  """
968
1010
  Execute a blockchain transaction with retry logic.
@@ -1,9 +1,10 @@
1
1
  # Default variables
2
2
  DEFAULT_RPC_URL = "https://eth-devnet.opengradient.ai"
3
+ DEFAULT_API_URL = "https://sdk-devnet.opengradient.ai"
3
4
  DEFAULT_OG_FAUCET_URL = "https://faucet.opengradient.ai/?address="
4
5
  DEFAULT_HUB_SIGNUP_URL = "https://hub.opengradient.ai/signup"
5
6
  DEFAULT_INFERENCE_CONTRACT_ADDRESS = "0x8383C9bD7462F12Eb996DD02F78234C0421A6FaE"
6
7
  DEFAULT_SCHEDULER_ADDRESS = "0x7179724De4e7FF9271FA40C0337c7f90C0508eF6"
7
8
  DEFAULT_BLOCKCHAIN_EXPLORER = "https://explorer.opengradient.ai/tx/"
8
9
  DEFAULT_IMAGE_GEN_HOST = "18.217.25.69"
9
- DEFAULT_IMAGE_GEN_PORT = 5125
10
+ DEFAULT_IMAGE_GEN_PORT = 5125
@@ -19,7 +19,7 @@ from langchain_core.runnables import Runnable
19
19
  from langchain_core.language_models.base import LanguageModelInput
20
20
 
21
21
  from opengradient import Client, LlmInferenceMode, LLM
22
- from opengradient.defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
22
+ from opengradient.defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL, DEFAULT_API_URL
23
23
 
24
24
 
25
25
  class OpenGradientChatModel(BaseChatModel):
@@ -34,7 +34,7 @@ class OpenGradientChatModel(BaseChatModel):
34
34
  super().__init__()
35
35
 
36
36
  self._client = Client(
37
- private_key=private_key, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS, email=None, password=None
37
+ private_key=private_key, rpc_url=DEFAULT_RPC_URL, api_url=DEFAULT_API_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS, email=None, password=None
38
38
  )
39
39
  self._model_cid = model_cid
40
40
  self._max_tokens = max_tokens
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.4.11
3
+ Version: 0.4.12
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -15,6 +15,7 @@ src/opengradient.egg-info/dependency_links.txt
15
15
  src/opengradient.egg-info/entry_points.txt
16
16
  src/opengradient.egg-info/requires.txt
17
17
  src/opengradient.egg-info/top_level.txt
18
+ src/opengradient/abi/InferencePrecompile.abi
18
19
  src/opengradient/abi/PriceHistoryInference.abi
19
20
  src/opengradient/abi/WorkflowScheduler.abi
20
21
  src/opengradient/abi/inference.abi
File without changes
File without changes
File without changes