opengradient 0.3.14__tar.gz → 0.3.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {opengradient-0.3.14 → opengradient-0.3.15}/PKG-INFO +45 -2
  2. {opengradient-0.3.14 → opengradient-0.3.15}/README.md +44 -1
  3. {opengradient-0.3.14 → opengradient-0.3.15}/pyproject.toml +1 -1
  4. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/__init__.py +1 -1
  5. {opengradient-0.3.14 → opengradient-0.3.15}/.gitignore +0 -0
  6. {opengradient-0.3.14 → opengradient-0.3.15}/LICENSE +0 -0
  7. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/abi/inference.abi +0 -0
  8. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/account.py +0 -0
  9. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/cli.py +0 -0
  10. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/client.py +0 -0
  11. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/defaults.py +0 -0
  12. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/exceptions.py +0 -0
  13. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/llm/__init__.py +0 -0
  14. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/llm/chat.py +0 -0
  15. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/proto/__init__.py +0 -0
  16. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/proto/infer.proto +0 -0
  17. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/proto/infer_pb2.py +0 -0
  18. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  19. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/types.py +0 -0
  20. {opengradient-0.3.14 → opengradient-0.3.15}/src/opengradient/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opengradient
3
- Version: 0.3.14
3
+ Version: 0.3.15
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Project-URL: Homepage, https://opengradient.ai
6
6
  Author-email: OpenGradient <oliver@opengradient.ai>
@@ -183,6 +183,39 @@ og.infer(model_cid, model_inputs, inference_mode)
183
183
  ```
184
184
  - inference mode can be `VANILLA`, `ZKML`, or `TEE`
185
185
 
186
+ ### LLM Inference
187
+ #### LLM Completion
188
+ ```python
189
+ tx_hash, response = og.llm_completion(
190
+ model_cid='meta-llama/Meta-Llama-3-8B-Instruct',
191
+ prompt="Translate the following English text to French: 'Hello, how are you?'",
192
+ max_tokens=50,
193
+ temperature=0.0
194
+ )
195
+ ```
196
+
197
+ #### LLM Chat
198
+ ```python
199
+ # create messages history
200
+ messages = [
201
+ {
202
+ "role": "system",
203
+ "content": "You are a helpful AI assistant.",
204
+ "name": "HAL"
205
+ },
206
+ {
207
+ "role": "user",
208
+ "content": "Hello! How are you doing? Can you repeat my name?",
209
+ }]
210
+
211
+ # run LLM inference
212
+ tx_hash, finish_reason, message = og.llm_chat(
213
+ model_cid=og.LLM.MISTRAL_7B_INSTRUCT_V3,
214
+ messages=messages
215
+ )
216
+ ```
217
+
218
+
186
219
 
187
220
  ## Using the CLI
188
221
 
@@ -223,8 +256,18 @@ opengradient infer QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ VANILLA --inpu
223
256
  ```
224
257
 
225
258
  #### Run LLM Inference
259
+ We also have explicit support for using LLMs through the completion and chat commands in the CLI.
260
+
261
+ For example, you can run a competion inference with Llama-3 using the following command:
262
+
263
+ ``` bash
264
+ opengradient completion --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "hello who are you?" --max-tokens 50
265
+ ```
266
+
267
+ Or you can use files instead of text input in order to simplify your command:
268
+
226
269
  ```bash
227
- opengradient llm --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "Translate to French: Hello, how are you?" --max-tokens 50 --temperature 0.7
270
+ opengradient chat --model "mistralai/Mistral-7B-Instruct-v0.3" --messages-file messages.json --tools-file tools.json --max-tokens 200
228
271
  ```
229
272
 
230
273
  For more information read the OpenGradient [documentation](https://docs.opengradient.ai/).
@@ -51,6 +51,39 @@ og.infer(model_cid, model_inputs, inference_mode)
51
51
  ```
52
52
  - inference mode can be `VANILLA`, `ZKML`, or `TEE`
53
53
 
54
+ ### LLM Inference
55
+ #### LLM Completion
56
+ ```python
57
+ tx_hash, response = og.llm_completion(
58
+ model_cid='meta-llama/Meta-Llama-3-8B-Instruct',
59
+ prompt="Translate the following English text to French: 'Hello, how are you?'",
60
+ max_tokens=50,
61
+ temperature=0.0
62
+ )
63
+ ```
64
+
65
+ #### LLM Chat
66
+ ```python
67
+ # create messages history
68
+ messages = [
69
+ {
70
+ "role": "system",
71
+ "content": "You are a helpful AI assistant.",
72
+ "name": "HAL"
73
+ },
74
+ {
75
+ "role": "user",
76
+ "content": "Hello! How are you doing? Can you repeat my name?",
77
+ }]
78
+
79
+ # run LLM inference
80
+ tx_hash, finish_reason, message = og.llm_chat(
81
+ model_cid=og.LLM.MISTRAL_7B_INSTRUCT_V3,
82
+ messages=messages
83
+ )
84
+ ```
85
+
86
+
54
87
 
55
88
  ## Using the CLI
56
89
 
@@ -91,8 +124,18 @@ opengradient infer QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ VANILLA --inpu
91
124
  ```
92
125
 
93
126
  #### Run LLM Inference
127
+ We also have explicit support for using LLMs through the completion and chat commands in the CLI.
128
+
129
+ For example, you can run a competion inference with Llama-3 using the following command:
130
+
131
+ ``` bash
132
+ opengradient completion --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "hello who are you?" --max-tokens 50
133
+ ```
134
+
135
+ Or you can use files instead of text input in order to simplify your command:
136
+
94
137
  ```bash
95
- opengradient llm --model "meta-llama/Meta-Llama-3-8B-Instruct" --prompt "Translate to French: Hello, how are you?" --max-tokens 50 --temperature 0.7
138
+ opengradient chat --model "mistralai/Mistral-7B-Instruct-v0.3" --messages-file messages.json --tools-file tools.json --max-tokens 200
96
139
  ```
97
140
 
98
141
  For more information read the OpenGradient [documentation](https://docs.opengradient.ai/).
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.3.14"
7
+ version = "0.3.15"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
@@ -5,7 +5,7 @@ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
5
  from .types import InferenceMode, LLM
6
6
  from . import llm
7
7
 
8
- __version__ = "0.3.14"
8
+ __version__ = "0.3.15"
9
9
 
10
10
  _client = None
11
11
 
File without changes
File without changes