opengradient 0.4.12b1__tar.gz → 0.5.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {opengradient-0.4.12b1/src/opengradient.egg-info → opengradient-0.5.4}/PKG-INFO +6 -29
  2. {opengradient-0.4.12b1 → opengradient-0.5.4}/pyproject.toml +8 -10
  3. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/__init__.py +8 -5
  4. opengradient-0.5.4/src/opengradient/abi/InferencePrecompile.abi +1 -0
  5. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/cli.py +172 -42
  6. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/client.py +548 -78
  7. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/defaults.py +3 -0
  8. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/llm/og_langchain.py +7 -5
  9. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/types.py +61 -4
  10. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/utils.py +5 -14
  11. {opengradient-0.4.12b1 → opengradient-0.5.4/src/opengradient.egg-info}/PKG-INFO +6 -29
  12. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient.egg-info/SOURCES.txt +1 -0
  13. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient.egg-info/requires.txt +3 -4
  14. {opengradient-0.4.12b1 → opengradient-0.5.4}/LICENSE +0 -0
  15. {opengradient-0.4.12b1 → opengradient-0.5.4}/README.md +0 -0
  16. {opengradient-0.4.12b1 → opengradient-0.5.4}/setup.cfg +0 -0
  17. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  18. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  19. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/abi/inference.abi +0 -0
  20. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/account.py +0 -0
  21. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/alphasense/__init__.py +0 -0
  22. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  23. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/alphasense/run_model_tool.py +0 -0
  24. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/alphasense/types.py +0 -0
  25. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  26. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/exceptions.py +0 -0
  27. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/llm/__init__.py +0 -0
  28. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/llm/og_openai.py +0 -0
  29. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/proto/__init__.py +0 -0
  30. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/proto/infer.proto +0 -0
  31. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/proto/infer_pb2.py +0 -0
  32. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  33. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/workflow_models/__init__.py +0 -0
  34. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/workflow_models/constants.py +0 -0
  35. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/workflow_models/types.py +0 -0
  36. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/workflow_models/utils.py +0 -0
  37. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient/workflow_models/workflow_models.py +0 -0
  38. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient.egg-info/dependency_links.txt +0 -0
  39. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient.egg-info/entry_points.txt +0 -0
  40. {opengradient-0.4.12b1 → opengradient-0.5.4}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,44 +1,20 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.4.12b1
3
+ Version: 0.5.4
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
- Author-email: OpenGradient <oliver@opengradient.ai>
6
- License: MIT License
7
-
8
- Copyright (c) 2024 OpenGradient
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
-
5
+ Author-email: OpenGradient <kyle@vannalabs.ai>
6
+ License-Expression: MIT
28
7
  Project-URL: Homepage, https://opengradient.ai
29
8
  Classifier: Development Status :: 3 - Alpha
30
9
  Classifier: Intended Audience :: Developers
31
- Classifier: License :: OSI Approved :: MIT License
32
10
  Classifier: Programming Language :: Python :: 3.10
33
11
  Classifier: Programming Language :: Python :: 3.11
34
12
  Classifier: Programming Language :: Python :: 3.12
35
13
  Requires-Python: >=3.10
36
14
  Description-Content-Type: text/markdown
37
15
  License-File: LICENSE
38
- Requires-Dist: eth-utils==2.2.2
39
- Requires-Dist: eth-account>=0.13.0
40
- Requires-Dist: web3>=6.11
41
- Requires-Dist: websockets>=14.1
16
+ Requires-Dist: eth-account>=0.13.4
17
+ Requires-Dist: web3>=7.3.0
42
18
  Requires-Dist: click>=8.1.7
43
19
  Requires-Dist: firebase-rest-api>=1.11.0
44
20
  Requires-Dist: grpcio>=1.66.2
@@ -47,6 +23,7 @@ Requires-Dist: requests>=2.32.3
47
23
  Requires-Dist: langchain>=0.3.7
48
24
  Requires-Dist: openai>=1.58.1
49
25
  Requires-Dist: pydantic>=2.9.2
26
+ Requires-Dist: og-test-x402==0.0.1
50
27
  Dynamic: license-file
51
28
 
52
29
  # OpenGradient Python SDK
@@ -1,29 +1,26 @@
1
1
  [build-system]
2
- requires = ["setuptools>=61.0"]
2
+ requires = ["setuptools>=77.0.0"]
3
3
  build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.12.beta1"
7
+ version = "0.5.4"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
- authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
- license = {file = "LICENSE"}
9
+ authors = [{name = "OpenGradient", email = "kyle@vannalabs.ai"}]
11
10
  readme = "README.md"
12
11
  requires-python = ">=3.10"
12
+ license = "MIT"
13
13
  classifiers = [
14
14
  "Development Status :: 3 - Alpha",
15
15
  "Intended Audience :: Developers",
16
- "License :: OSI Approved :: MIT License",
17
16
  "Programming Language :: Python :: 3.10",
18
17
  "Programming Language :: Python :: 3.11",
19
18
  "Programming Language :: Python :: 3.12",
20
19
  ]
21
20
 
22
21
  dependencies = [
23
- "eth-utils==2.2.2",
24
- "eth-account>=0.13.0",
25
- "web3>=6.11",
26
- "websockets>=14.1",
22
+ "eth-account>=0.13.4",
23
+ "web3>=7.3.0",
27
24
  "click>=8.1.7",
28
25
  "firebase-rest-api>=1.11.0",
29
26
  "grpcio>=1.66.2",
@@ -32,6 +29,7 @@ dependencies = [
32
29
  "langchain>=0.3.7",
33
30
  "openai>=1.58.1",
34
31
  "pydantic>=2.9.2",
32
+ "og-test-x402==0.0.1",
35
33
  ]
36
34
 
37
35
  [project.scripts]
@@ -49,7 +47,7 @@ include-package-data = true
49
47
 
50
48
  [tool.setuptools.packages.find]
51
49
  where = ["src"]
52
- include = ["opengradient*"] # Explicitly include all opengradient packages
50
+ include = ["opengradient*"]
53
51
  exclude = ["tests*", "stresstest*"]
54
52
 
55
53
  [tool.setuptools.package-data]
@@ -5,7 +5,7 @@ OpenGradient Python SDK for interacting with AI models and infrastructure.
5
5
  from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  from .client import Client
8
- from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
8
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL, DEFAULT_API_URL
9
9
  from .types import (
10
10
  LLM,
11
11
  TEE_LLM,
@@ -32,7 +32,9 @@ def new_client(
32
32
  password: Optional[str],
33
33
  private_key: str,
34
34
  rpc_url=DEFAULT_RPC_URL,
35
+ api_url=DEFAULT_API_URL,
35
36
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
37
+ **kwargs,
36
38
  ) -> Client:
37
39
  """
38
40
  Creates a unique OpenGradient client instance with the given authentication and network settings.
@@ -45,10 +47,10 @@ def new_client(
45
47
  contract_address: Optional inference contract address
46
48
  """
47
49
 
48
- return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, contract_address=contract_address)
50
+ return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, api_url=api_url, contract_address=contract_address, **kwargs)
49
51
 
50
52
 
51
- def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
53
+ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, api_url=DEFAULT_API_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
52
54
  """Initialize the OpenGradient SDK with authentication and network settings.
53
55
 
54
56
  Args:
@@ -56,11 +58,12 @@ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, c
56
58
  password: User's password for authentication
57
59
  private_key: Ethereum private key for blockchain transactions
58
60
  rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
61
+ api_url: Optional API URL for the OpenGradient API, defaults to mainnet
59
62
  contract_address: Optional inference contract address
60
63
  """
61
64
  global _client
62
-
63
- _client = Client(private_key=private_key, rpc_url=rpc_url, email=email, password=password, contract_address=contract_address)
65
+
66
+ _client = Client(private_key=private_key, rpc_url=rpc_url, api_url=api_url, email=email, password=password, contract_address=contract_address)
64
67
  return _client
65
68
 
66
69
 
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMChatRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChat","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"indexed":false,"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"response","type":"tuple"}],"name":"ModelInferenceEvent","type":"event"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"}],"name":"runModelInference","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -17,8 +17,10 @@ from .defaults import (
17
17
  DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
18
  DEFAULT_OG_FAUCET_URL,
19
19
  DEFAULT_RPC_URL,
20
+ DEFAULT_API_URL,
21
+ DEFAULT_LLM_SERVER_URL,
20
22
  )
21
- from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM
23
+ from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM, x402SettlementMode
22
24
 
23
25
  OG_CONFIG_FILE = Path.home() / ".opengradient_config.json"
24
26
 
@@ -72,6 +74,12 @@ LlmInferenceModes = {
72
74
  }
73
75
 
74
76
 
77
+ x402SettlementModes = {
78
+ "settle-batch": x402SettlementMode.SETTLE_BATCH,
79
+ "settle": x402SettlementMode.SETTLE,
80
+ "settle-metadata": x402SettlementMode.SETTLE_METADATA,
81
+ }
82
+
75
83
  def initialize_config(ctx):
76
84
  """Interactively initialize OpenGradient config"""
77
85
  if ctx.obj: # Check if config data already exists
@@ -118,23 +126,32 @@ def cli(ctx):
118
126
 
119
127
  Visit https://docs.opengradient.ai/developers/python_sdk/ for more documentation.
120
128
  """
121
- # Load existing config
122
129
  ctx.obj = load_og_config()
123
130
 
124
131
  no_client_commands = ["config", "create-account", "version"]
125
132
 
126
- # Only create client if this is not a config management command
127
133
  if ctx.invoked_subcommand in no_client_commands:
128
134
  return
129
135
 
130
136
  if all(key in ctx.obj for key in ["private_key"]):
131
137
  try:
138
+ # Extract API keys from config
139
+ llm_server_url = ctx.obj.get("llm_server_url", DEFAULT_LLM_SERVER_URL)
140
+ openai_api_key = ctx.obj.get("openai_api_key")
141
+ anthropic_api_key = ctx.obj.get("anthropic_api_key")
142
+ google_api_key = ctx.obj.get("google_api_key")
143
+
132
144
  ctx.obj["client"] = Client(
133
145
  private_key=ctx.obj["private_key"],
134
146
  rpc_url=DEFAULT_RPC_URL,
147
+ api_url=DEFAULT_API_URL,
135
148
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
136
149
  email=ctx.obj.get("email"),
137
150
  password=ctx.obj.get("password"),
151
+ llm_server_url=llm_server_url,
152
+ openai_api_key=openai_api_key,
153
+ anthropic_api_key=anthropic_api_key,
154
+ google_api_key=google_api_key,
138
155
  )
139
156
  except Exception as e:
140
157
  click.echo(f"Failed to create OpenGradient client: {str(e)}")
@@ -195,6 +212,50 @@ def clear(ctx):
195
212
  click.echo("Config clear cancelled.")
196
213
 
197
214
 
215
+ @config.command()
216
+ @click.option("--provider", type=click.Choice(["openai", "anthropic", "google"]), required=True)
217
+ @click.option("--key", required=True, help="API key for the provider")
218
+ @click.pass_context
219
+ def set_api_key(ctx, provider: str, key: str):
220
+ """
221
+ Set API key for external LLM providers.
222
+
223
+ Example usage:
224
+
225
+ \b
226
+ opengradient config set-api-key --provider openai --key ..
227
+ opengradient config set-api-key --provider anthropic --key ...
228
+ opengradient config set-api-key --provider google --key ...
229
+ """
230
+ config_key = f"{provider}_api_key"
231
+ ctx.obj[config_key] = key
232
+ save_og_config(ctx)
233
+
234
+ click.secho(f"✅ API key for {provider} has been set", fg="green")
235
+ click.echo("You can now use models from this provider in completion and chat commands.")
236
+
237
+
238
+ @config.command()
239
+ @click.option("--provider", type=click.Choice(["openai", "anthropic", "google"]), required=True)
240
+ @click.pass_context
241
+ def remove_api_key(ctx, provider: str):
242
+ """
243
+ Remove API key for an external LLM provider.
244
+
245
+ Example usage:
246
+
247
+ \b
248
+ opengradient config remove-api-key --provider openai
249
+ """
250
+ config_key = f"{provider}_api_key"
251
+ if config_key in ctx.obj:
252
+ del ctx.obj[config_key]
253
+ save_og_config(ctx)
254
+ click.secho(f"✅ API key for {provider} has been removed", fg="green")
255
+ else:
256
+ click.echo(f"No API key found for {provider}")
257
+
258
+
198
259
  @cli.command()
199
260
  @click.option("--repo", "-r", "--name", "repo_name", required=True, help="Name of the new model repository")
200
261
  @click.option("--description", "-d", required=True, help="Description of the model")
@@ -352,33 +413,56 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
352
413
  "--model",
353
414
  "-m",
354
415
  "model_cid",
355
- type=click.Choice([e.value for e in LLM]),
356
416
  required=True,
357
- help="CID of the LLM model to run inference on",
417
+ help="Model identifier (local model from LLM enum or external model like 'gpt-4o', 'gemini-2.5-flash-lite', etc.)",
358
418
  )
359
419
  @click.option(
360
- "--mode", "inference_mode", type=click.Choice(LlmInferenceModes.keys()), default="VANILLA", help="Inference mode (default: VANILLA)"
420
+ "--mode",
421
+ "inference_mode",
422
+ type=click.Choice(LlmInferenceModes.keys()),
423
+ default="VANILLA",
424
+ help="Inference mode (only applies to local models, default: VANILLA)"
361
425
  )
362
426
  @click.option("--prompt", "-p", required=True, help="Input prompt for the LLM completion")
363
427
  @click.option("--max-tokens", type=int, default=100, help="Maximum number of tokens for LLM completion output")
364
428
  @click.option("--stop-sequence", multiple=True, help="Stop sequences for LLM")
365
429
  @click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
430
+ @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
431
+ @click.option("--x402-settlement-mode", "x402_settlement_mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
366
432
  @click.pass_context
367
- def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
433
+ def completion(ctx, model_cid: str, inference_mode: str, x402_settlement_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
368
434
  """
369
- Run completion inference on an LLM model.
435
+ Run completion inference on an LLM model (local or external).
370
436
 
371
- This command runs a completion inference on the specified LLM model using the provided prompt and parameters.
437
+ This command supports both local OpenGradient models and external providers
438
+ (OpenAI, Anthropic, Google, etc.). For external models, make sure to set
439
+ the appropriate API key using 'opengradient config set-api-key'.
372
440
 
373
441
  Example usage:
374
442
 
375
443
  \b
376
- opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
377
- opengradient completion -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\\n"
444
+ # Local model
445
+ opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50
446
+
447
+ # External OpenAI model
448
+ opengradient completion --model gpt-4o --prompt "Translate to French: Hello world" --max-tokens 50
449
+
450
+ # External Anthropic model
451
+ opengradient completion --model claude-haiku-4-5-20251001--prompt "Write a haiku about coding" --max-tokens 100
452
+
453
+ # External Google model
454
+ opengradient completion --model gemini-2.5-flash-lite --prompt "Explain quantum computing" --max-tokens 200
378
455
  """
379
456
  client: Client = ctx.obj["client"]
457
+
380
458
  try:
381
- click.echo(f'Running LLM completion inference for model "{model_cid}"\n')
459
+ is_local = local or model_cid in [llm.value for llm in LLM]
460
+
461
+ if is_local:
462
+ click.echo(f'Running LLM completion inference for local model "{model_cid}"\n')
463
+ else:
464
+ click.echo(f'Running LLM completion inference for external model "{model_cid}"\n')
465
+
382
466
  completion_output = client.llm_completion(
383
467
  model_cid=model_cid,
384
468
  inference_mode=LlmInferenceModes[inference_mode],
@@ -386,23 +470,32 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
386
470
  max_tokens=max_tokens,
387
471
  stop_sequence=list(stop_sequence),
388
472
  temperature=temperature,
473
+ local_model=local,
474
+ x402_settlement_mode=x402_settlement_mode,
389
475
  )
390
476
 
391
- print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output)
477
+ print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output, is_local)
478
+
392
479
  except Exception as e:
393
480
  click.echo(f"Error running LLM completion: {str(e)}")
394
481
 
395
482
 
396
- def print_llm_completion_result(model_cid, tx_hash, llm_output):
483
+ def print_llm_completion_result(model_cid, tx_hash, llm_output, is_local=True):
397
484
  click.secho("✅ LLM completion Successful", fg="green", bold=True)
398
485
  click.echo("──────────────────────────────────────")
399
- click.echo("Model CID: ", nl=False)
486
+ click.echo("Model: ", nl=False)
400
487
  click.secho(model_cid, fg="cyan", bold=True)
401
- click.echo("Transaction hash: ", nl=False)
402
- click.secho(tx_hash, fg="cyan", bold=True)
403
- block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
404
- click.echo("Block explorer link: ", nl=False)
405
- click.secho(block_explorer_link, fg="blue", underline=True)
488
+
489
+ if is_local and tx_hash != "external":
490
+ click.echo("Transaction hash: ", nl=False)
491
+ click.secho(tx_hash, fg="cyan", bold=True)
492
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
493
+ click.echo("Block explorer link: ", nl=False)
494
+ click.secho(block_explorer_link, fg="blue", underline=True)
495
+ else:
496
+ click.echo("Source: ", nl=False)
497
+ click.secho("External Provider", fg="cyan", bold=True)
498
+
406
499
  click.echo("──────────────────────────────────────")
407
500
  click.secho("LLM Output:", fg="yellow", bold=True)
408
501
  click.echo()
@@ -415,12 +508,15 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output):
415
508
  "--model",
416
509
  "-m",
417
510
  "model_cid",
418
- type=click.Choice([e.value for e in LLM]),
419
511
  required=True,
420
- help="CID of the LLM model to run inference on",
512
+ help="Model identifier (local model from LLM enum or external model like 'gpt-4o', 'gemini-2.5-flash-lite', etc.)",
421
513
  )
422
514
  @click.option(
423
- "--mode", "inference_mode", type=click.Choice(LlmInferenceModes.keys()), default="VANILLA", help="Inference mode (default: VANILLA)"
515
+ "--mode",
516
+ "inference_mode",
517
+ type=click.Choice(LlmInferenceModes.keys()),
518
+ default="VANILLA",
519
+ help="Inference mode (only applies to local models, default: VANILLA)"
424
520
  )
425
521
  @click.option("--messages", type=str, required=False, help="Input messages for the chat inference in JSON format")
426
522
  @click.option(
@@ -434,9 +530,14 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output):
434
530
  @click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
435
531
  @click.option("--tools", type=str, default=None, help="Tool configurations in JSON format")
436
532
  @click.option(
437
- "--tools-file", type=click.Path(exists=True, path_type=Path), required=False, help="Path to JSON file containing tool configurations"
533
+ "--tools-file",
534
+ type=click.Path(exists=True, path_type=Path),
535
+ required=False,
536
+ help="Path to JSON file containing tool configurations"
438
537
  )
439
538
  @click.option("--tool-choice", type=str, default="", help="Specific tool choice for the LLM")
539
+ @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
540
+ @click.option("--x402-settlement-mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
440
541
  @click.pass_context
441
542
  def chat(
442
543
  ctx,
@@ -450,23 +551,38 @@ def chat(
450
551
  tools: Optional[str],
451
552
  tools_file: Optional[Path],
452
553
  tool_choice: Optional[str],
554
+ x402_settlement_mode: Optional[str],
555
+ local: bool,
453
556
  ):
454
557
  """
455
- Run chat inference on an LLM model.
558
+ Run chat inference on an LLM model (local or external).
456
559
 
457
- This command runs a chat inference on the specified LLM model using the provided messages and parameters.
458
-
459
- Tool call formatting is based on OpenAI documentation tool calls (see here: https://platform.openai.com/docs/guides/function-calling).
560
+ This command supports both local OpenGradient models and external providers.
561
+ Tool calling is supported for compatible models.
460
562
 
461
563
  Example usage:
462
564
 
463
565
  \b
464
- opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
465
- opengradient chat --model mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --tools-file tools.json --max-tokens 200 --stop-sequence "." --stop-sequence "\\n"
566
+ # Local model
567
+ opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50
568
+
569
+ # External OpenAI model with tools
570
+ opengradient chat --model gpt-4o --messages-file messages.json --tools-file tools.json --max-tokens 200
571
+
572
+ # External Anthropic model
573
+ opengradient chat --model claude-haiku-4-5-20251001 --messages '[{"role":"user","content":"Write a poem"}]' --max-tokens 100
466
574
  """
467
575
  client: Client = ctx.obj["client"]
576
+
468
577
  try:
469
- click.echo(f'Running LLM chat inference for model "{model_cid}"\n')
578
+ is_local = local or model_cid in [llm.value for llm in LLM]
579
+
580
+ if is_local:
581
+ click.echo(f'Running LLM chat inference for local model "{model_cid}"\n')
582
+ else:
583
+ click.echo(f'Running LLM chat inference for external model "{model_cid}"\n')
584
+
585
+ # Parse messages
470
586
  if not messages and not messages_file:
471
587
  click.echo("Must specify either messages or messages-file")
472
588
  ctx.exit(1)
@@ -486,10 +602,10 @@ def chat(
486
602
  with messages_file.open("r") as file:
487
603
  messages = json.load(file)
488
604
 
489
- # Parse tools if provided
605
+ # Parse tools
490
606
  if (tools and tools != "[]") and tools_file:
491
607
  click.echo("Cannot have both tools and tools-file")
492
- click.exit(1)
608
+ ctx.exit(1)
493
609
  return
494
610
 
495
611
  parsed_tools = []
@@ -530,23 +646,38 @@ def chat(
530
646
  temperature=temperature,
531
647
  tools=parsed_tools,
532
648
  tool_choice=tool_choice,
649
+ local_model=local,
650
+ x402_settlement_mode=x402_settlement_mode,
533
651
  )
534
652
 
535
- print_llm_chat_result(model_cid, completion_output.transaction_hash, completion_output.finish_reason, completion_output.chat_output)
653
+ print_llm_chat_result(
654
+ model_cid,
655
+ completion_output.transaction_hash,
656
+ completion_output.finish_reason,
657
+ completion_output.chat_output,
658
+ is_local
659
+ )
660
+
536
661
  except Exception as e:
537
662
  click.echo(f"Error running LLM chat inference: {str(e)}")
538
663
 
539
664
 
540
- def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output):
665
+ def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output, is_local=True):
541
666
  click.secho("✅ LLM Chat Successful", fg="green", bold=True)
542
667
  click.echo("──────────────────────────────────────")
543
- click.echo("Model CID: ", nl=False)
668
+ click.echo("Model: ", nl=False)
544
669
  click.secho(model_cid, fg="cyan", bold=True)
545
- click.echo("Transaction hash: ", nl=False)
546
- click.secho(tx_hash, fg="cyan", bold=True)
547
- block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
548
- click.echo("Block explorer link: ", nl=False)
549
- click.secho(block_explorer_link, fg="blue", underline=True)
670
+
671
+ if is_local and tx_hash != "external":
672
+ click.echo("Transaction hash: ", nl=False)
673
+ click.secho(tx_hash, fg="cyan", bold=True)
674
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
675
+ click.echo("Block explorer link: ", nl=False)
676
+ click.secho(block_explorer_link, fg="blue", underline=True)
677
+ else:
678
+ click.echo("Source: ", nl=False)
679
+ click.secho("External Provider", fg="cyan", bold=True)
680
+
550
681
  click.echo("──────────────────────────────────────")
551
682
  click.secho("Finish Reason: ", fg="yellow", bold=True)
552
683
  click.echo()
@@ -555,7 +686,6 @@ def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output):
555
686
  click.secho("Chat Output:", fg="yellow", bold=True)
556
687
  click.echo()
557
688
  for key, value in chat_output.items():
558
- # If the value doesn't give any information, don't print it
559
689
  if value != None and value != "" and value != "[]" and value != []:
560
690
  click.echo(f"{key}: {value}")
561
691
  click.echo()