opengradient 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -5,7 +5,7 @@ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
5
  from .types import InferenceMode, LLM
6
6
  from . import llm
7
7
 
8
- __version__ = "0.3.13"
8
+ __version__ = "0.3.15"
9
9
 
10
10
  _client = None
11
11
 
opengradient/llm/chat.py CHANGED
@@ -10,6 +10,9 @@ from langchain.schema import (
10
10
  ChatResult,
11
11
  ChatGeneration,
12
12
  )
13
+ from langchain_core.messages.tool import (
14
+ ToolMessage
15
+ )
13
16
  from langchain_core.callbacks.manager import CallbackManagerForLLMRun
14
17
  from langchain_core.tools import BaseTool
15
18
  from langchain_core.messages import ToolCall
@@ -70,24 +73,36 @@ class OpenGradientChatModel(BaseChatModel):
70
73
  run_manager: Optional[CallbackManagerForLLMRun] = None,
71
74
  **kwargs: Any,
72
75
  ) -> ChatResult:
73
- chat_messages = []
76
+
77
+ sdk_messages = []
74
78
  for message in messages:
75
79
  if isinstance(message, SystemMessage):
76
- chat_messages.append({"role": "system", "content": message.content})
80
+ sdk_messages.append({"role": "system", "content": message.content})
77
81
  elif isinstance(message, HumanMessage):
78
- chat_messages.append({"role": "user", "content": message.content})
82
+ sdk_messages.append({"role": "user", "content": message.content})
79
83
  elif isinstance(message, AIMessage):
80
- chat_messages.append({"role": "assistant", "content": message.content})
84
+ sdk_messages.append({
85
+ "role": "assistant",
86
+ "content": message.content,
87
+ "tool_calls": [{
88
+ "id": call["id"],
89
+ "name": call["name"],
90
+ "arguments": json.dumps(call["args"])
91
+ } for call in message.tool_calls]})
92
+ elif isinstance(message, ToolMessage):
93
+ sdk_messages.append({"role": "tool", "content": message.content, "tool_call_id": message.tool_call_id})
94
+ else:
95
+ raise ValueError(f"Unexpected message type: {message}")
81
96
 
82
- tx_hash, finish_reason, chat_response = self.client.llm_chat(
97
+ _, finish_reason, chat_response = self.client.llm_chat(
83
98
  model_cid=self.model_cid,
84
- messages=chat_messages,
99
+ messages=sdk_messages,
85
100
  stop_sequence=stop,
86
101
  max_tokens=self.max_tokens,
87
102
  tools=self.tools
88
103
  )
89
104
 
90
- if "tool_calls" in chat_response:
105
+ if "tool_calls" in chat_response and chat_response["tool_calls"]:
91
106
  tool_calls = []
92
107
  for tool_call in chat_response["tool_calls"]:
93
108
  tool_calls.append(
@@ -104,7 +119,7 @@ class OpenGradientChatModel(BaseChatModel):
104
119
  )
105
120
  else:
106
121
  message = AIMessage(content=chat_response["content"])
107
-
122
+
108
123
  return ChatResult(
109
124
  generations=[ChatGeneration(
110
125
  message=message,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opengradient
3
- Version: 0.3.13
3
+ Version: 0.3.15
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Project-URL: Homepage, https://opengradient.ai
6
6
  Author-email: OpenGradient <oliver@opengradient.ai>
@@ -183,6 +183,39 @@ og.infer(model_cid, model_inputs, inference_mode)
183
183
  ```
184
184
  - inference mode can be `VANILLA`, `ZKML`, or `TEE`
185
185
 
186
+ ### LLM Inference
187
+ #### LLM Completion
188
+ ```python
189
+ tx_hash, response = og.llm_completion(
190
+ model_cid='meta-llama/Meta-Llama-3-8B-Instruct',
191
+ prompt="Translate the following English text to French: 'Hello, how are you?'",
192
+ max_tokens=50,
193
+ temperature=0.0
194
+ )
195
+ ```
196
+
197
+ #### LLM Chat
198
+ ```python
199
+ # create messages history
200
+ messages = [
201
+ {
202
+ "role": "system",
203
+ "content": "You are a helpful AI assistant.",
204
+ "name": "HAL"
205
+ },
206
+ {
207
+ "role": "user",
208
+ "content": "Hello! How are you doing? Can you repeat my name?",
209
+ }]
210
+
211
+ # run LLM inference
212
+ tx_hash, finish_reason, message = og.llm_chat(
213
+ model_cid=og.LLM.MISTRAL_7B_INSTRUCT_V3,
214
+ messages=messages
215
+ )
216
+ ```
217
+
218
+
186
219
 
187
220
  ## Using the CLI
188
221
 
@@ -223,8 +256,18 @@ opengradient infer QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ VANILLA --inpu
223
256
  ```
224
257
 
225
258
  #### Run LLM Inference
259
+ We also have explicit support for using LLMs through the completion and chat commands in the CLI.
260
+
261
+ For example, you can run a competion inference with Llama-3 using the following command:
262
+
263
+ ``` bash
264
+ opengradient completion --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "hello who are you?" --max-tokens 50
265
+ ```
266
+
267
+ Or you can use files instead of text input in order to simplify your command:
268
+
226
269
  ```bash
227
- opengradient llm --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "Translate to French: Hello, how are you?" --max-tokens 50 --temperature 0.7
270
+ opengradient chat --model "mistralai/Mistral-7B-Instruct-v0.3" --messages-file messages.json --tools-file tools.json --max-tokens 200
228
271
  ```
229
272
 
230
273
  For more information read the OpenGradient [documentation](https://docs.opengradient.ai/).
@@ -1,4 +1,4 @@
1
- opengradient/__init__.py,sha256=5aeZn5QVFepwTyjcO7GBHGlYUVwJCQa8QWYSHUaHqdM,4015
1
+ opengradient/__init__.py,sha256=ATwimOCIZli1u5WKTMfhtH3VDeACqgK29BFAihyquvM,4015
2
2
  opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
3
3
  opengradient/cli.py,sha256=kdYR_AFKHV99HtO_son7vHpM5jWVZe8FO0iMWxJ7pJE,24444
4
4
  opengradient/client.py,sha256=RdlTz60NJKVJihYY6oVYLfNOg6RGnJbfG-2UIxUk-ws,37069
@@ -8,13 +8,13 @@ opengradient/types.py,sha256=QTEsygwT5AnIf8Dg9mexvVUe49nCo9N0pgZOOIp3trc,2214
8
8
  opengradient/utils.py,sha256=lUDPmyPqLwpZI-owyN6Rm3QvUjOn5pLN5G1QyriVm-E,6994
9
9
  opengradient/abi/inference.abi,sha256=MR5u9npZ-Yx2EqRW17_M-UnGgFF3mMEMepOwaZ-Bkgc,7040
10
10
  opengradient/llm/__init__.py,sha256=n_11WFPoU8YtGc6wg9cK6gEy9zBISf1183Loip3dAbI,62
11
- opengradient/llm/chat.py,sha256=Ia-K0og-DkZ1_IFgbgscn_gpLMXZX54F3fvfSrUq5M8,3849
11
+ opengradient/llm/chat.py,sha256=F32yN1o8EvRbzZSJkUwI0-FmSVAWMuMTI9ho7wgW5hk,4470
12
12
  opengradient/proto/__init__.py,sha256=AhaSmrqV0TXGzCKaoPV8-XUvqs2fGAJBM2aOmDpkNbE,55
13
13
  opengradient/proto/infer.proto,sha256=13eaEMcppxkBF8yChptsX9HooWFwJKze7oLZNl-LEb8,1217
14
14
  opengradient/proto/infer_pb2.py,sha256=wg2vjLQCNv6HRhYuIqgj9xivi3nO4IPz6E5wh2dhDqY,3446
15
15
  opengradient/proto/infer_pb2_grpc.py,sha256=y5GYwD1EdNs892xx58jdfyA0fO5QC7k3uZOtImTHMiE,6891
16
- opengradient-0.3.13.dist-info/METADATA,sha256=XiQ5o2HcbeKfAX27tiHRQ9oQyJNYmkQSnmLPtl_Elz8,7612
17
- opengradient-0.3.13.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
18
- opengradient-0.3.13.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
19
- opengradient-0.3.13.dist-info/licenses/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
20
- opengradient-0.3.13.dist-info/RECORD,,
16
+ opengradient-0.3.15.dist-info/METADATA,sha256=j1LRy_faAg69fDQNEJhByO7kiPtd6jO-DQbNhDk99h0,8692
17
+ opengradient-0.3.15.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
18
+ opengradient-0.3.15.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
19
+ opengradient-0.3.15.dist-info/licenses/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
20
+ opengradient-0.3.15.dist-info/RECORD,,