opengradient 0.4.12b1__tar.gz → 0.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {opengradient-0.4.12b1/src/opengradient.egg-info → opengradient-0.5.2}/PKG-INFO +6 -29
  2. {opengradient-0.4.12b1 → opengradient-0.5.2}/pyproject.toml +8 -10
  3. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/__init__.py +8 -5
  4. opengradient-0.5.2/src/opengradient/abi/InferencePrecompile.abi +1 -0
  5. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/cli.py +160 -41
  6. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/client.py +539 -78
  7. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/defaults.py +3 -0
  8. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/llm/og_langchain.py +7 -5
  9. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/types.py +53 -3
  10. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/utils.py +5 -14
  11. {opengradient-0.4.12b1 → opengradient-0.5.2/src/opengradient.egg-info}/PKG-INFO +6 -29
  12. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient.egg-info/SOURCES.txt +1 -0
  13. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient.egg-info/requires.txt +3 -4
  14. {opengradient-0.4.12b1 → opengradient-0.5.2}/LICENSE +0 -0
  15. {opengradient-0.4.12b1 → opengradient-0.5.2}/README.md +0 -0
  16. {opengradient-0.4.12b1 → opengradient-0.5.2}/setup.cfg +0 -0
  17. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  18. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  19. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/abi/inference.abi +0 -0
  20. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/account.py +0 -0
  21. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/alphasense/__init__.py +0 -0
  22. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  23. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/alphasense/run_model_tool.py +0 -0
  24. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/alphasense/types.py +0 -0
  25. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  26. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/exceptions.py +0 -0
  27. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/llm/__init__.py +0 -0
  28. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/llm/og_openai.py +0 -0
  29. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/proto/__init__.py +0 -0
  30. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/proto/infer.proto +0 -0
  31. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/proto/infer_pb2.py +0 -0
  32. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  33. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/workflow_models/__init__.py +0 -0
  34. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/workflow_models/constants.py +0 -0
  35. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/workflow_models/types.py +0 -0
  36. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/workflow_models/utils.py +0 -0
  37. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient/workflow_models/workflow_models.py +0 -0
  38. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient.egg-info/dependency_links.txt +0 -0
  39. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient.egg-info/entry_points.txt +0 -0
  40. {opengradient-0.4.12b1 → opengradient-0.5.2}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,44 +1,20 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.4.12b1
3
+ Version: 0.5.2
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
- Author-email: OpenGradient <oliver@opengradient.ai>
6
- License: MIT License
7
-
8
- Copyright (c) 2024 OpenGradient
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the "Software"), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
-
5
+ Author-email: OpenGradient <kyle@vannalabs.ai>
6
+ License-Expression: MIT
28
7
  Project-URL: Homepage, https://opengradient.ai
29
8
  Classifier: Development Status :: 3 - Alpha
30
9
  Classifier: Intended Audience :: Developers
31
- Classifier: License :: OSI Approved :: MIT License
32
10
  Classifier: Programming Language :: Python :: 3.10
33
11
  Classifier: Programming Language :: Python :: 3.11
34
12
  Classifier: Programming Language :: Python :: 3.12
35
13
  Requires-Python: >=3.10
36
14
  Description-Content-Type: text/markdown
37
15
  License-File: LICENSE
38
- Requires-Dist: eth-utils==2.2.2
39
- Requires-Dist: eth-account>=0.13.0
40
- Requires-Dist: web3>=6.11
41
- Requires-Dist: websockets>=14.1
16
+ Requires-Dist: eth-account>=0.13.4
17
+ Requires-Dist: web3>=7.3.0
42
18
  Requires-Dist: click>=8.1.7
43
19
  Requires-Dist: firebase-rest-api>=1.11.0
44
20
  Requires-Dist: grpcio>=1.66.2
@@ -47,6 +23,7 @@ Requires-Dist: requests>=2.32.3
47
23
  Requires-Dist: langchain>=0.3.7
48
24
  Requires-Dist: openai>=1.58.1
49
25
  Requires-Dist: pydantic>=2.9.2
26
+ Requires-Dist: og-test-x402==0.0.1
50
27
  Dynamic: license-file
51
28
 
52
29
  # OpenGradient Python SDK
@@ -1,29 +1,26 @@
1
1
  [build-system]
2
- requires = ["setuptools>=61.0"]
2
+ requires = ["setuptools>=77.0.0"]
3
3
  build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.12.beta1"
7
+ version = "0.5.2"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
- authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
- license = {file = "LICENSE"}
9
+ authors = [{name = "OpenGradient", email = "kyle@vannalabs.ai"}]
11
10
  readme = "README.md"
12
11
  requires-python = ">=3.10"
12
+ license = "MIT"
13
13
  classifiers = [
14
14
  "Development Status :: 3 - Alpha",
15
15
  "Intended Audience :: Developers",
16
- "License :: OSI Approved :: MIT License",
17
16
  "Programming Language :: Python :: 3.10",
18
17
  "Programming Language :: Python :: 3.11",
19
18
  "Programming Language :: Python :: 3.12",
20
19
  ]
21
20
 
22
21
  dependencies = [
23
- "eth-utils==2.2.2",
24
- "eth-account>=0.13.0",
25
- "web3>=6.11",
26
- "websockets>=14.1",
22
+ "eth-account>=0.13.4",
23
+ "web3>=7.3.0",
27
24
  "click>=8.1.7",
28
25
  "firebase-rest-api>=1.11.0",
29
26
  "grpcio>=1.66.2",
@@ -32,6 +29,7 @@ dependencies = [
32
29
  "langchain>=0.3.7",
33
30
  "openai>=1.58.1",
34
31
  "pydantic>=2.9.2",
32
+ "og-test-x402==0.0.1",
35
33
  ]
36
34
 
37
35
  [project.scripts]
@@ -49,7 +47,7 @@ include-package-data = true
49
47
 
50
48
  [tool.setuptools.packages.find]
51
49
  where = ["src"]
52
- include = ["opengradient*"] # Explicitly include all opengradient packages
50
+ include = ["opengradient*"]
53
51
  exclude = ["tests*", "stresstest*"]
54
52
 
55
53
  [tool.setuptools.package-data]
@@ -5,7 +5,7 @@ OpenGradient Python SDK for interacting with AI models and infrastructure.
5
5
  from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
7
  from .client import Client
8
- from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
8
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL, DEFAULT_API_URL
9
9
  from .types import (
10
10
  LLM,
11
11
  TEE_LLM,
@@ -32,7 +32,9 @@ def new_client(
32
32
  password: Optional[str],
33
33
  private_key: str,
34
34
  rpc_url=DEFAULT_RPC_URL,
35
+ api_url=DEFAULT_API_URL,
35
36
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
37
+ **kwargs,
36
38
  ) -> Client:
37
39
  """
38
40
  Creates a unique OpenGradient client instance with the given authentication and network settings.
@@ -45,10 +47,10 @@ def new_client(
45
47
  contract_address: Optional inference contract address
46
48
  """
47
49
 
48
- return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, contract_address=contract_address)
50
+ return Client(email=email, password=password, private_key=private_key, rpc_url=rpc_url, api_url=api_url, contract_address=contract_address, **kwargs)
49
51
 
50
52
 
51
- def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
53
+ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, api_url=DEFAULT_API_URL, contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS):
52
54
  """Initialize the OpenGradient SDK with authentication and network settings.
53
55
 
54
56
  Args:
@@ -56,11 +58,12 @@ def init(email: str, password: str, private_key: str, rpc_url=DEFAULT_RPC_URL, c
56
58
  password: User's password for authentication
57
59
  private_key: Ethereum private key for blockchain transactions
58
60
  rpc_url: Optional RPC URL for the blockchain network, defaults to mainnet
61
+ api_url: Optional API URL for the OpenGradient API, defaults to mainnet
59
62
  contract_address: Optional inference contract address
60
63
  """
61
64
  global _client
62
-
63
- _client = Client(private_key=private_key, rpc_url=rpc_url, email=email, password=password, contract_address=contract_address)
65
+
66
+ _client = Client(private_key=private_key, rpc_url=rpc_url, api_url=api_url, email=email, password=password, contract_address=contract_address)
64
67
  return _client
65
68
 
66
69
 
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMChatRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChat","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"indexed":false,"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"},{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"inferenceID","type":"string"},{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"indexed":false,"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"response","type":"tuple"}],"name":"ModelInferenceEvent","type":"event"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum ModelInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"input","type":"tuple"}],"internalType":"struct ModelInferenceRequest","name":"request","type":"tuple"}],"name":"runModelInference","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"value","type":"string"}],"internalType":"struct TensorLib.JsonScalar[]","name":"jsons","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -17,6 +17,8 @@ from .defaults import (
17
17
  DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
18
  DEFAULT_OG_FAUCET_URL,
19
19
  DEFAULT_RPC_URL,
20
+ DEFAULT_API_URL,
21
+ DEFAULT_LLM_SERVER_URL,
20
22
  )
21
23
  from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM
22
24
 
@@ -118,23 +120,32 @@ def cli(ctx):
118
120
 
119
121
  Visit https://docs.opengradient.ai/developers/python_sdk/ for more documentation.
120
122
  """
121
- # Load existing config
122
123
  ctx.obj = load_og_config()
123
124
 
124
125
  no_client_commands = ["config", "create-account", "version"]
125
126
 
126
- # Only create client if this is not a config management command
127
127
  if ctx.invoked_subcommand in no_client_commands:
128
128
  return
129
129
 
130
130
  if all(key in ctx.obj for key in ["private_key"]):
131
131
  try:
132
+ # Extract API keys from config
133
+ llm_server_url = ctx.obj.get("llm_server_url", DEFAULT_LLM_SERVER_URL)
134
+ openai_api_key = ctx.obj.get("openai_api_key")
135
+ anthropic_api_key = ctx.obj.get("anthropic_api_key")
136
+ google_api_key = ctx.obj.get("google_api_key")
137
+
132
138
  ctx.obj["client"] = Client(
133
139
  private_key=ctx.obj["private_key"],
134
140
  rpc_url=DEFAULT_RPC_URL,
141
+ api_url=DEFAULT_API_URL,
135
142
  contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
136
143
  email=ctx.obj.get("email"),
137
144
  password=ctx.obj.get("password"),
145
+ llm_server_url=llm_server_url,
146
+ openai_api_key=openai_api_key,
147
+ anthropic_api_key=anthropic_api_key,
148
+ google_api_key=google_api_key,
138
149
  )
139
150
  except Exception as e:
140
151
  click.echo(f"Failed to create OpenGradient client: {str(e)}")
@@ -195,6 +206,50 @@ def clear(ctx):
195
206
  click.echo("Config clear cancelled.")
196
207
 
197
208
 
209
+ @config.command()
210
+ @click.option("--provider", type=click.Choice(["openai", "anthropic", "google"]), required=True)
211
+ @click.option("--key", required=True, help="API key for the provider")
212
+ @click.pass_context
213
+ def set_api_key(ctx, provider: str, key: str):
214
+ """
215
+ Set API key for external LLM providers.
216
+
217
+ Example usage:
218
+
219
+ \b
220
+ opengradient config set-api-key --provider openai --key ..
221
+ opengradient config set-api-key --provider anthropic --key ...
222
+ opengradient config set-api-key --provider google --key ...
223
+ """
224
+ config_key = f"{provider}_api_key"
225
+ ctx.obj[config_key] = key
226
+ save_og_config(ctx)
227
+
228
+ click.secho(f"✅ API key for {provider} has been set", fg="green")
229
+ click.echo("You can now use models from this provider in completion and chat commands.")
230
+
231
+
232
+ @config.command()
233
+ @click.option("--provider", type=click.Choice(["openai", "anthropic", "google"]), required=True)
234
+ @click.pass_context
235
+ def remove_api_key(ctx, provider: str):
236
+ """
237
+ Remove API key for an external LLM provider.
238
+
239
+ Example usage:
240
+
241
+ \b
242
+ opengradient config remove-api-key --provider openai
243
+ """
244
+ config_key = f"{provider}_api_key"
245
+ if config_key in ctx.obj:
246
+ del ctx.obj[config_key]
247
+ save_og_config(ctx)
248
+ click.secho(f"✅ API key for {provider} has been removed", fg="green")
249
+ else:
250
+ click.echo(f"No API key found for {provider}")
251
+
252
+
198
253
  @cli.command()
199
254
  @click.option("--repo", "-r", "--name", "repo_name", required=True, help="Name of the new model repository")
200
255
  @click.option("--description", "-d", required=True, help="Description of the model")
@@ -352,33 +407,55 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
352
407
  "--model",
353
408
  "-m",
354
409
  "model_cid",
355
- type=click.Choice([e.value for e in LLM]),
356
410
  required=True,
357
- help="CID of the LLM model to run inference on",
411
+ help="Model identifier (local model from LLM enum or external model like 'gpt-4o', 'gemini-2.5-flash-lite', etc.)",
358
412
  )
359
413
  @click.option(
360
- "--mode", "inference_mode", type=click.Choice(LlmInferenceModes.keys()), default="VANILLA", help="Inference mode (default: VANILLA)"
414
+ "--mode",
415
+ "inference_mode",
416
+ type=click.Choice(LlmInferenceModes.keys()),
417
+ default="VANILLA",
418
+ help="Inference mode (only applies to local models, default: VANILLA)"
361
419
  )
362
420
  @click.option("--prompt", "-p", required=True, help="Input prompt for the LLM completion")
363
421
  @click.option("--max-tokens", type=int, default=100, help="Maximum number of tokens for LLM completion output")
364
422
  @click.option("--stop-sequence", multiple=True, help="Stop sequences for LLM")
365
423
  @click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
424
+ @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
366
425
  @click.pass_context
367
- def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
426
+ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
368
427
  """
369
- Run completion inference on an LLM model.
428
+ Run completion inference on an LLM model (local or external).
370
429
 
371
- This command runs a completion inference on the specified LLM model using the provided prompt and parameters.
430
+ This command supports both local OpenGradient models and external providers
431
+ (OpenAI, Anthropic, Google, etc.). For external models, make sure to set
432
+ the appropriate API key using 'opengradient config set-api-key'.
372
433
 
373
434
  Example usage:
374
435
 
375
436
  \b
376
- opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
377
- opengradient completion -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\\n"
437
+ # Local model
438
+ opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50
439
+
440
+ # External OpenAI model
441
+ opengradient completion --model gpt-4o --prompt "Translate to French: Hello world" --max-tokens 50
442
+
443
+ # External Anthropic model
444
+ opengradient completion --model claude-haiku-4-5-20251001--prompt "Write a haiku about coding" --max-tokens 100
445
+
446
+ # External Google model
447
+ opengradient completion --model gemini-2.5-flash-lite --prompt "Explain quantum computing" --max-tokens 200
378
448
  """
379
449
  client: Client = ctx.obj["client"]
450
+
380
451
  try:
381
- click.echo(f'Running LLM completion inference for model "{model_cid}"\n')
452
+ is_local = local or model_cid in [llm.value for llm in LLM]
453
+
454
+ if is_local:
455
+ click.echo(f'Running LLM completion inference for local model "{model_cid}"\n')
456
+ else:
457
+ click.echo(f'Running LLM completion inference for external model "{model_cid}"\n')
458
+
382
459
  completion_output = client.llm_completion(
383
460
  model_cid=model_cid,
384
461
  inference_mode=LlmInferenceModes[inference_mode],
@@ -386,23 +463,31 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
386
463
  max_tokens=max_tokens,
387
464
  stop_sequence=list(stop_sequence),
388
465
  temperature=temperature,
466
+ local_model=local,
389
467
  )
390
468
 
391
- print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output)
469
+ print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output, is_local)
470
+
392
471
  except Exception as e:
393
472
  click.echo(f"Error running LLM completion: {str(e)}")
394
473
 
395
474
 
396
- def print_llm_completion_result(model_cid, tx_hash, llm_output):
475
+ def print_llm_completion_result(model_cid, tx_hash, llm_output, is_local=True):
397
476
  click.secho("✅ LLM completion Successful", fg="green", bold=True)
398
477
  click.echo("──────────────────────────────────────")
399
- click.echo("Model CID: ", nl=False)
478
+ click.echo("Model: ", nl=False)
400
479
  click.secho(model_cid, fg="cyan", bold=True)
401
- click.echo("Transaction hash: ", nl=False)
402
- click.secho(tx_hash, fg="cyan", bold=True)
403
- block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
404
- click.echo("Block explorer link: ", nl=False)
405
- click.secho(block_explorer_link, fg="blue", underline=True)
480
+
481
+ if is_local and tx_hash != "external":
482
+ click.echo("Transaction hash: ", nl=False)
483
+ click.secho(tx_hash, fg="cyan", bold=True)
484
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
485
+ click.echo("Block explorer link: ", nl=False)
486
+ click.secho(block_explorer_link, fg="blue", underline=True)
487
+ else:
488
+ click.echo("Source: ", nl=False)
489
+ click.secho("External Provider", fg="cyan", bold=True)
490
+
406
491
  click.echo("──────────────────────────────────────")
407
492
  click.secho("LLM Output:", fg="yellow", bold=True)
408
493
  click.echo()
@@ -415,12 +500,15 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output):
415
500
  "--model",
416
501
  "-m",
417
502
  "model_cid",
418
- type=click.Choice([e.value for e in LLM]),
419
503
  required=True,
420
- help="CID of the LLM model to run inference on",
504
+ help="Model identifier (local model from LLM enum or external model like 'gpt-4o', 'gemini-2.5-flash-lite', etc.)",
421
505
  )
422
506
  @click.option(
423
- "--mode", "inference_mode", type=click.Choice(LlmInferenceModes.keys()), default="VANILLA", help="Inference mode (default: VANILLA)"
507
+ "--mode",
508
+ "inference_mode",
509
+ type=click.Choice(LlmInferenceModes.keys()),
510
+ default="VANILLA",
511
+ help="Inference mode (only applies to local models, default: VANILLA)"
424
512
  )
425
513
  @click.option("--messages", type=str, required=False, help="Input messages for the chat inference in JSON format")
426
514
  @click.option(
@@ -434,9 +522,13 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output):
434
522
  @click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
435
523
  @click.option("--tools", type=str, default=None, help="Tool configurations in JSON format")
436
524
  @click.option(
437
- "--tools-file", type=click.Path(exists=True, path_type=Path), required=False, help="Path to JSON file containing tool configurations"
525
+ "--tools-file",
526
+ type=click.Path(exists=True, path_type=Path),
527
+ required=False,
528
+ help="Path to JSON file containing tool configurations"
438
529
  )
439
530
  @click.option("--tool-choice", type=str, default="", help="Specific tool choice for the LLM")
531
+ @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
440
532
  @click.pass_context
441
533
  def chat(
442
534
  ctx,
@@ -450,23 +542,37 @@ def chat(
450
542
  tools: Optional[str],
451
543
  tools_file: Optional[Path],
452
544
  tool_choice: Optional[str],
545
+ local: bool,
453
546
  ):
454
547
  """
455
- Run chat inference on an LLM model.
548
+ Run chat inference on an LLM model (local or external).
456
549
 
457
- This command runs a chat inference on the specified LLM model using the provided messages and parameters.
458
-
459
- Tool call formatting is based on OpenAI documentation tool calls (see here: https://platform.openai.com/docs/guides/function-calling).
550
+ This command supports both local OpenGradient models and external providers.
551
+ Tool calling is supported for compatible models.
460
552
 
461
553
  Example usage:
462
554
 
463
555
  \b
464
- opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
465
- opengradient chat --model mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --tools-file tools.json --max-tokens 200 --stop-sequence "." --stop-sequence "\\n"
556
+ # Local model
557
+ opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50
558
+
559
+ # External OpenAI model with tools
560
+ opengradient chat --model gpt-4o --messages-file messages.json --tools-file tools.json --max-tokens 200
561
+
562
+ # External Anthropic model
563
+ opengradient chat --model claude-haiku-4-5-20251001 --messages '[{"role":"user","content":"Write a poem"}]' --max-tokens 100
466
564
  """
467
565
  client: Client = ctx.obj["client"]
566
+
468
567
  try:
469
- click.echo(f'Running LLM chat inference for model "{model_cid}"\n')
568
+ is_local = local or model_cid in [llm.value for llm in LLM]
569
+
570
+ if is_local:
571
+ click.echo(f'Running LLM chat inference for local model "{model_cid}"\n')
572
+ else:
573
+ click.echo(f'Running LLM chat inference for external model "{model_cid}"\n')
574
+
575
+ # Parse messages
470
576
  if not messages and not messages_file:
471
577
  click.echo("Must specify either messages or messages-file")
472
578
  ctx.exit(1)
@@ -486,10 +592,10 @@ def chat(
486
592
  with messages_file.open("r") as file:
487
593
  messages = json.load(file)
488
594
 
489
- # Parse tools if provided
595
+ # Parse tools
490
596
  if (tools and tools != "[]") and tools_file:
491
597
  click.echo("Cannot have both tools and tools-file")
492
- click.exit(1)
598
+ ctx.exit(1)
493
599
  return
494
600
 
495
601
  parsed_tools = []
@@ -530,23 +636,37 @@ def chat(
530
636
  temperature=temperature,
531
637
  tools=parsed_tools,
532
638
  tool_choice=tool_choice,
639
+ local_model=local,
533
640
  )
534
641
 
535
- print_llm_chat_result(model_cid, completion_output.transaction_hash, completion_output.finish_reason, completion_output.chat_output)
642
+ print_llm_chat_result(
643
+ model_cid,
644
+ completion_output.transaction_hash,
645
+ completion_output.finish_reason,
646
+ completion_output.chat_output,
647
+ is_local
648
+ )
649
+
536
650
  except Exception as e:
537
651
  click.echo(f"Error running LLM chat inference: {str(e)}")
538
652
 
539
653
 
540
- def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output):
654
+ def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output, is_local=True):
541
655
  click.secho("✅ LLM Chat Successful", fg="green", bold=True)
542
656
  click.echo("──────────────────────────────────────")
543
- click.echo("Model CID: ", nl=False)
657
+ click.echo("Model: ", nl=False)
544
658
  click.secho(model_cid, fg="cyan", bold=True)
545
- click.echo("Transaction hash: ", nl=False)
546
- click.secho(tx_hash, fg="cyan", bold=True)
547
- block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
548
- click.echo("Block explorer link: ", nl=False)
549
- click.secho(block_explorer_link, fg="blue", underline=True)
659
+
660
+ if is_local and tx_hash != "external":
661
+ click.echo("Transaction hash: ", nl=False)
662
+ click.secho(tx_hash, fg="cyan", bold=True)
663
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
664
+ click.echo("Block explorer link: ", nl=False)
665
+ click.secho(block_explorer_link, fg="blue", underline=True)
666
+ else:
667
+ click.echo("Source: ", nl=False)
668
+ click.secho("External Provider", fg="cyan", bold=True)
669
+
550
670
  click.echo("──────────────────────────────────────")
551
671
  click.secho("Finish Reason: ", fg="yellow", bold=True)
552
672
  click.echo()
@@ -555,7 +675,6 @@ def print_llm_chat_result(model_cid, tx_hash, finish_reason, chat_output):
555
675
  click.secho("Chat Output:", fg="yellow", bold=True)
556
676
  click.echo()
557
677
  for key, value in chat_output.items():
558
- # If the value doesn't give any information, don't print it
559
678
  if value != None and value != "" and value != "[]" and value != []:
560
679
  click.echo(f"{key}: {value}")
561
680
  click.echo()