opengradient 0.3.5__tar.gz → 0.3.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {opengradient-0.3.5/src/opengradient.egg-info → opengradient-0.3.8}/PKG-INFO +2 -6
  2. {opengradient-0.3.5 → opengradient-0.3.8}/pyproject.toml +11 -6
  3. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/__init__.py +23 -23
  4. opengradient-0.3.8/src/opengradient/abi/inference.abi +1 -0
  5. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/account.py +4 -3
  6. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/cli.py +185 -31
  7. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/client.py +211 -51
  8. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/defaults.py +1 -1
  9. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/types.py +12 -6
  10. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/utils.py +11 -5
  11. {opengradient-0.3.5 → opengradient-0.3.8/src/opengradient.egg-info}/PKG-INFO +2 -6
  12. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient.egg-info/SOURCES.txt +1 -2
  13. opengradient-0.3.5/src/opengradient/abi/inference.abi +0 -1
  14. opengradient-0.3.5/src/opengradient/abi/llm.abi +0 -1
  15. {opengradient-0.3.5 → opengradient-0.3.8}/LICENSE +0 -0
  16. {opengradient-0.3.5 → opengradient-0.3.8}/README.md +0 -0
  17. {opengradient-0.3.5 → opengradient-0.3.8}/setup.cfg +0 -0
  18. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient/exceptions.py +0 -0
  19. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient.egg-info/dependency_links.txt +0 -0
  20. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient.egg-info/entry_points.txt +0 -0
  21. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient.egg-info/requires.txt +0 -0
  22. {opengradient-0.3.5 → opengradient-0.3.8}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.5
3
+ Version: 0.3.8
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -4,20 +4,16 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.3.5"
7
+ version = "0.3.8"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
11
11
  readme = "README.md"
12
- requires-python = ">=3.7"
12
+ requires-python = ">=3.10"
13
13
  classifiers = [
14
14
  "Development Status :: 3 - Alpha",
15
15
  "Intended Audience :: Developers",
16
16
  "License :: OSI Approved :: MIT License",
17
- "Programming Language :: Python :: 3",
18
- "Programming Language :: Python :: 3.7",
19
- "Programming Language :: Python :: 3.8",
20
- "Programming Language :: Python :: 3.9",
21
17
  "Programming Language :: Python :: 3.10",
22
18
  "Programming Language :: Python :: 3.11",
23
19
  "Programming Language :: Python :: 3.12",
@@ -140,3 +136,12 @@ exclude = ["tests*", "stresstest*"]
140
136
 
141
137
  [tool.setuptools.exclude-package-data]
142
138
  "*" = ["*.ipynb", "*.pyc", "*.pyo", ".gitignore", "requirements.txt", "conftest.py"]
139
+
140
+ [tool.ruff]
141
+ line-length = 140
142
+ target-version = "py310" # Specify your Python version
143
+ select = ["E", "F", "I", "N"]
144
+ ignore = []
145
+
146
+ [tool.ruff.mccabe]
147
+ max-complexity = 10
@@ -1,11 +1,10 @@
1
+ from typing import Dict, List, Optional, Tuple
2
+
1
3
  from .client import Client
2
- from .defaults import *
3
- from .types import InferenceMode
4
- from typing import List, Dict, Optional, Tuple
5
- import os
4
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
+ from .types import InferenceMode, LLM
6
6
 
7
- __version__ = "0.3.5"
8
- __all__ = ['init', 'upload', 'create_model', 'create_version', 'infer', 'infer_llm', 'login', 'list_files', 'InferenceMode']
7
+ __version__ = "0.3.8"
9
8
 
10
9
  _client = None
11
10
 
@@ -45,31 +44,32 @@ def infer(model_cid, inference_mode, model_input):
45
44
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
46
45
  return _client.infer(model_cid, inference_mode, model_input)
47
46
 
48
- def infer_llm(model_cid: str,
49
- prompt: str,
50
- max_tokens: int = 100,
51
- stop_sequence: Optional[List[str]] = None,
52
- temperature: float = 0.0) -> Tuple[str, str]:
47
+ def llm_completion(model_cid: LLM,
48
+ prompt: str,
49
+ max_tokens: int = 100,
50
+ stop_sequence: Optional[List[str]] = None,
51
+ temperature: float = 0.0) -> Tuple[str, str]:
53
52
  if _client is None:
54
53
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
55
- return _client.infer_llm(model_cid, prompt, max_tokens, stop_sequence, temperature)
54
+ return _client.llm_completion(model_cid, prompt, max_tokens, stop_sequence, temperature)
56
55
 
57
- def login(email: str, password: str):
56
+ def llm_chat(model_cid: LLM,
57
+ messages: List[Dict],
58
+ max_tokens: int = 100,
59
+ stop_sequence: Optional[List[str]] = None,
60
+ temperature: float = 0.0,
61
+ tools: Optional[List[Dict]] = None,
62
+ tool_choice: Optional[str] = None):
58
63
  if _client is None:
59
64
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
60
- return _client.login(email, password)
65
+ return _client.llm_chat(model_cid, messages, max_tokens, stop_sequence, temperature, tools, tool_choice)
61
66
 
62
- def list_files(model_name: str, version: str) -> List[Dict]:
67
+ def login(email: str, password: str):
63
68
  if _client is None:
64
69
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
65
- return _client.list_files(model_name, version)
70
+ return _client.login(email, password)
66
71
 
67
- def create_model_from_huggingface(repo_id: str, model_name: str, model_desc: str):
72
+ def list_files(model_name: str, version: str) -> List[Dict]:
68
73
  if _client is None:
69
74
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
70
-
71
- with tempfile.TemporaryDirectory() as temp_dir:
72
- snapshot_download(repo_id, local_dir=temp_dir)
73
- result = create_model(model_name, model_desc, temp_dir)
74
-
75
- return result
75
+ return _client.list_files(model_name, version)
@@ -0,0 +1 @@
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1,8 +1,9 @@
1
- from eth_account import Account
1
+ import hashlib
2
+ import os
2
3
  import secrets
3
4
  from collections import namedtuple
4
- import os
5
- import hashlib
5
+
6
+ from eth_account import Account
6
7
 
7
8
  EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
9
 
@@ -1,18 +1,26 @@
1
- import click
2
- import opengradient
3
- import json
4
1
  import ast
5
- from pathlib import Path
2
+ import json
6
3
  import logging
7
- from pprint import pformat
8
- from typing import List
9
4
  import webbrowser
10
- import sys
5
+ from pathlib import Path
6
+ from typing import List, Dict, Optional
7
+ from enum import Enum
8
+ from . import types
11
9
 
10
+ import click
11
+
12
+ import opengradient
13
+
14
+ from .account import EthAccount, generate_eth_account
12
15
  from .client import Client
13
- from .defaults import *
16
+ from .defaults import (
17
+ DEFAULT_BLOCKCHAIN_EXPLORER,
18
+ DEFAULT_HUB_SIGNUP_URL,
19
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS,
20
+ DEFAULT_OG_FAUCET_URL,
21
+ DEFAULT_RPC_URL,
22
+ )
14
23
  from .types import InferenceMode
15
- from .account import EthAccount, generate_eth_account
16
24
 
17
25
  OG_CONFIG_FILE = Path.home() / '.opengradient_config.json'
18
26
 
@@ -50,13 +58,19 @@ class DictParamType(click.ParamType):
50
58
 
51
59
  Dict = DictParamType()
52
60
 
53
- # Support inference modes
61
+ # Supported inference modes
54
62
  InferenceModes = {
55
63
  "VANILLA": InferenceMode.VANILLA,
56
64
  "ZKML": InferenceMode.ZKML,
57
65
  "TEE": InferenceMode.TEE,
58
66
  }
59
67
 
68
+ # Supported LLMs
69
+ LlmModels = {
70
+ "meta-llama/Meta-Llama-3-8B-Instruct",
71
+ "meta-llama/Llama-3.2-3B-Instruct",
72
+ "mistralai/Mistral-7B-Instruct-v0.3"
73
+ }
60
74
 
61
75
  def initialize_config(ctx):
62
76
  """Interactively initialize OpenGradient config"""
@@ -307,11 +321,11 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
307
321
  click.echo() # Add a newline for better spacing
308
322
  click.secho("✅ Transaction successful", fg="green", bold=True)
309
323
  click.echo("──────────────────────────────────────")
310
- click.echo(f"Transaction hash: ", nl=False)
324
+ click.echo("Transaction hash: ", nl=False)
311
325
  click.secho(tx_hash, fg="cyan", bold=True)
312
326
 
313
327
  block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
314
- click.echo(f"Block explorer link: ", nl=False)
328
+ click.echo("Block explorer link: ", nl=False)
315
329
  click.secho(block_explorer_link, fg="blue", underline=True)
316
330
  click.echo()
317
331
 
@@ -325,28 +339,28 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
325
339
  click.echo(f"Error running inference: {str(e)}")
326
340
 
327
341
  @cli.command()
328
- @click.option('--model', '-m', 'model_cid', required=True, help='CID of the LLM model to run inference on')
329
- @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM')
330
- @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM output')
342
+ @click.option('--model', '-m', 'model_cid', type=click.Choice(LlmModels), required=True, help='CID of the LLM model to run inference on')
343
+ @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM completion')
344
+ @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM completion output')
331
345
  @click.option('--stop-sequence', multiple=True, help='Stop sequences for LLM')
332
346
  @click.option('--temperature', type=float, default=0.0, help='Temperature for LLM inference (0.0 to 1.0)')
333
347
  @click.pass_context
334
- def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
348
+ def completion(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
335
349
  """
336
- Run inference on an LLM model.
350
+ Run completion inference on an LLM model.
337
351
 
338
- This command runs inference on the specified LLM model using the provided prompt and parameters.
352
+ This command runs a completion inference on the specified LLM model using the provided prompt and parameters.
339
353
 
340
354
  Example usage:
341
355
 
342
356
  \b
343
- opengradient llm --model Qm... --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
344
- opengradient llm -m Qm... -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
357
+ opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
358
+ opengradient completion -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
345
359
  """
346
360
  client: Client = ctx.obj['client']
347
361
  try:
348
- click.echo(f"Running LLM inference for model \"{model_cid}\"\n")
349
- tx_hash, llm_output = client.infer_llm(
362
+ click.echo(f"Running LLM completion inference for model \"{model_cid}\"\n")
363
+ tx_hash, llm_output = client.llm_completion(
350
364
  model_cid=model_cid,
351
365
  prompt=prompt,
352
366
  max_tokens=max_tokens,
@@ -354,19 +368,19 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
354
368
  temperature=temperature
355
369
  )
356
370
 
357
- print_llm_inference_result(model_cid, tx_hash, llm_output)
371
+ print_llm_completion_result(model_cid, tx_hash, llm_output)
358
372
  except Exception as e:
359
- click.echo(f"Error running LLM inference: {str(e)}")
373
+ click.echo(f"Error running LLM completion: {str(e)}")
360
374
 
361
- def print_llm_inference_result(model_cid, tx_hash, llm_output):
362
- click.secho(f"✅ LLM Inference Successful", fg="green", bold=True)
375
+ def print_llm_completion_result(model_cid, tx_hash, llm_output):
376
+ click.secho("✅ LLM completion Successful", fg="green", bold=True)
363
377
  click.echo("──────────────────────────────────────")
364
- click.echo(f"Model CID: ", nl=False)
378
+ click.echo("Model CID: ", nl=False)
365
379
  click.secho(model_cid, fg="cyan", bold=True)
366
- click.echo(f"Transaction hash: ", nl=False)
380
+ click.echo("Transaction hash: ", nl=False)
367
381
  click.secho(tx_hash, fg="cyan", bold=True)
368
382
  block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
369
- click.echo(f"Block explorer link: ", nl=False)
383
+ click.echo("Block explorer link: ", nl=False)
370
384
  click.secho(block_explorer_link, fg="blue", underline=True)
371
385
  click.echo("──────────────────────────────────────")
372
386
  click.secho("LLM Output:", fg="yellow", bold=True)
@@ -374,6 +388,146 @@ def print_llm_inference_result(model_cid, tx_hash, llm_output):
374
388
  click.echo(llm_output)
375
389
  click.echo()
376
390
 
391
+
392
+ @cli.command()
393
+ @click.option('--model', '-m', 'model_cid',
394
+ type=click.Choice([e.value for e in types.LLM]),
395
+ required=True,
396
+ help='CID of the LLM model to run inference on')
397
+ @click.option('--messages',
398
+ type=str,
399
+ required=False,
400
+ help='Input messages for the chat inference in JSON format')
401
+ @click.option('--messages-file',
402
+ type=click.Path(exists=True, path_type=Path),
403
+ required=False,
404
+ help='Path to JSON file containing input messages for the chat inference')
405
+ @click.option('--max-tokens',
406
+ type=int,
407
+ default=100,
408
+ help='Maximum number of tokens for LLM output')
409
+ @click.option('--stop-sequence',
410
+ type=str,
411
+ default=None,
412
+ multiple=True,
413
+ help='Stop sequences for LLM')
414
+ @click.option('--temperature',
415
+ type=float,
416
+ default=0.0,
417
+ help='Temperature for LLM inference (0.0 to 1.0)')
418
+ @click.option('--tools',
419
+ type=str,
420
+ default="[]",
421
+ help='Tool configurations in JSON format')
422
+ @click.option('--tools-file',
423
+ type=click.Path(exists=True, path_type=Path),
424
+ required=False,
425
+ help='Path to JSON file containing tool configurations')
426
+ @click.option('--tool-choice',
427
+ type=str,
428
+ default='',
429
+ help='Specific tool choice for the LLM')
430
+ @click.pass_context
431
+ def chat(
432
+ ctx,
433
+ model_cid: str,
434
+ messages: Optional[str],
435
+ messages_file: Optional[Path],
436
+ max_tokens: int,
437
+ stop_sequence: List[str],
438
+ temperature: float,
439
+ tools: Optional[str],
440
+ tools_file: Optional[Path],
441
+ tool_choice: Optional[str]):
442
+ """
443
+ Run chat inference on an LLM model.
444
+
445
+ This command runs a chat inference on the specified LLM model using the provided messages and parameters.
446
+
447
+ Example usage:
448
+
449
+ \b
450
+ opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
451
+ opengradient chat -m mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --stop-sequence "." --stop-sequence "\n"
452
+ """
453
+ # TODO (Kyle): ^^^^^^^ Edit description with more examples using tools
454
+ client: Client = ctx.obj['client']
455
+ try:
456
+ click.echo(f"Running LLM chat inference for model \"{model_cid}\"\n")
457
+ if not messages and not messages_file:
458
+ click.echo("Must specify either messages or messages-file")
459
+ ctx.exit(1)
460
+ return
461
+ if messages and messages_file:
462
+ click.echo("Cannot have both messages and messages_file")
463
+ ctx.exit(1)
464
+ return
465
+
466
+ if messages:
467
+ try:
468
+ messages = json.loads(messages)
469
+ except Exception as e:
470
+ click.echo(f"Failed to parse messages: {e}")
471
+ ctx.exit(1)
472
+ else:
473
+ with messages_file.open('r') as file:
474
+ messages = json.load(file)
475
+
476
+ # Parse tools if provided
477
+ if (tools or tools != "[]") and tools_file:
478
+ click.echo("Cannot have both tools and tools_file")
479
+ click.exit(1)
480
+ return
481
+
482
+ parsed_tools=[]
483
+ if tools:
484
+ try:
485
+ parsed_tools = json.loads(tools)
486
+ if not isinstance(parsed_tools, list):
487
+ click.echo("Tools must be a JSON array")
488
+ ctx.exit(1)
489
+ return
490
+ except json.JSONDecodeError as e:
491
+ click.echo(f"Failed to parse tools JSON: {e}")
492
+ ctx.exit(1)
493
+ return
494
+
495
+ if tools_file:
496
+ try:
497
+ with tools_file.open('r') as file:
498
+ parsed_tools = json.load(file)
499
+ if not isinstance(parsed_tools, list):
500
+ click.echo("Tools must be a JSON array")
501
+ ctx.exit(1)
502
+ return
503
+ except Exception as e:
504
+ click.echo("Failed to load JSON from tools_file: %s" % e)
505
+ ctx.exit(1)
506
+ return
507
+
508
+ if not tools and not tools_file:
509
+ parsed_tools = None
510
+
511
+ tx_hash, finish_reason, llm_chat_output = client.llm_chat(
512
+ model_cid=model_cid,
513
+ messages=messages,
514
+ max_tokens=max_tokens,
515
+ stop_sequence=list(stop_sequence),
516
+ temperature=temperature,
517
+ tools=parsed_tools,
518
+ tool_choice=tool_choice,
519
+ )
520
+
521
+ # TODO (Kyle): Make this prettier
522
+ print("TX Hash: ", tx_hash)
523
+ print("Finish reason: ", finish_reason)
524
+ print("Chat output: ", llm_chat_output)
525
+ except Exception as e:
526
+ click.echo(f"Error running LLM chat inference: {str(e)}")
527
+
528
+ def print_llm_chat_result():
529
+ pass
530
+
377
531
  @cli.command()
378
532
  def create_account():
379
533
  """Create a new test account for OpenGradient inference and model management"""
@@ -389,7 +543,7 @@ def create_account_impl() -> EthAccount:
389
543
  click.echo("Step 1: Create Account on OpenGradient Hub")
390
544
  click.echo("-" * 50)
391
545
 
392
- click.echo(f"Please create an account on the OpenGradient Hub")
546
+ click.echo("Please create an account on the OpenGradient Hub")
393
547
  webbrowser.open(DEFAULT_HUB_SIGNUP_URL, new=2)
394
548
  click.confirm("Have you successfully created your account on the OpenGradient Hub?", abort=True)
395
549
 
@@ -402,7 +556,7 @@ def create_account_impl() -> EthAccount:
402
556
  click.echo("\n" + "-" * 50)
403
557
  click.echo("Step 3: Fund Your Account")
404
558
  click.echo("-" * 50)
405
- click.echo(f"Please fund your account clicking 'Request' on the Faucet website")
559
+ click.echo("Please fund your account clicking 'Request' on the Faucet website")
406
560
  webbrowser.open(DEFAULT_OG_FAUCET_URL + eth_account.address, new=2)
407
561
  click.confirm("Have you successfully funded your account using the Faucet?", abort=True)
408
562
 
@@ -1,15 +1,19 @@
1
- import requests
2
- import os
3
1
  import json
4
- from web3 import Web3
5
- from opengradient.exceptions import OpenGradientError
6
- from opengradient.types import InferenceMode
7
- from opengradient import utils
8
- import numpy as np
9
2
  import logging
10
- from typing import Dict, Optional, Tuple, Union, List
11
- from web3.exceptions import ContractLogicError
3
+ import os
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
12
6
  import firebase
7
+ import numpy as np
8
+ import requests
9
+ from web3 import Web3
10
+ from web3.exceptions import ContractLogicError
11
+ from web3.logs import DISCARD
12
+
13
+ from opengradient import utils
14
+ from opengradient.exceptions import OpenGradientError
15
+ from opengradient.types import InferenceMode, LLM
16
+
13
17
 
14
18
  class Client:
15
19
  FIREBASE_CONFIG = {
@@ -100,7 +104,7 @@ class Client:
100
104
  if not self.user:
101
105
  raise ValueError("User not authenticated")
102
106
 
103
- url = f"https://api.opengradient.ai/api/v0/models/"
107
+ url = "https://api.opengradient.ai/api/v0/models/"
104
108
  headers = {
105
109
  'Authorization': f'Bearer {self.user["idToken"]}',
106
110
  'Content-Type': 'application/json'
@@ -186,15 +190,15 @@ class Client:
186
190
  logging.debug(f"Full server response: {json_response}")
187
191
 
188
192
  if isinstance(json_response, list) and not json_response:
189
- logging.info(f"Server returned an empty list. Assuming version was created successfully.")
193
+ logging.info("Server returned an empty list. Assuming version was created successfully.")
190
194
  return {"versionString": "Unknown", "note": "Created based on empty response"}
191
195
  elif isinstance(json_response, dict):
192
- versionString = json_response.get('versionString')
193
- if not versionString:
196
+ version_string = json_response.get('versionString')
197
+ if not version_string:
194
198
  logging.warning(f"'versionString' not found in response. Response: {json_response}")
195
199
  return {"versionString": "Unknown", "note": "Version ID not provided in response"}
196
- logging.info(f"Version creation successful. Version ID: {versionString}")
197
- return {"versionString": versionString}
200
+ logging.info(f"Version creation successful. Version ID: {version_string}")
201
+ return {"versionString": version_string}
198
202
  else:
199
203
  logging.error(f"Unexpected response type: {type(json_response)}. Content: {json_response}")
200
204
  raise Exception(f"Unexpected response type: {type(json_response)}")
@@ -295,7 +299,12 @@ class Client:
295
299
  logging.error(f"Unexpected error during upload: {str(e)}", exc_info=True)
296
300
  raise OpenGradientError(f"Unexpected error during upload: {str(e)}")
297
301
 
298
- def infer(self, model_cid: str, inference_mode: InferenceMode, model_input: Dict[str, Union[str, int, float, List, np.ndarray]]) -> Tuple[str, Dict[str, np.ndarray]]:
302
+ def infer(
303
+ self,
304
+ model_cid: str,
305
+ inference_mode: InferenceMode,
306
+ model_input: Dict[str, Union[str, int, float, List, np.ndarray]]
307
+ ) -> Tuple[str, Dict[str, np.ndarray]]:
299
308
  """
300
309
  Perform inference on a model.
301
310
 
@@ -376,19 +385,11 @@ class Client:
376
385
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
377
386
 
378
387
  # Process the InferenceResult event
379
- inference_result = None
380
- for log in tx_receipt['logs']:
381
- try:
382
- decoded_log = contract.events.InferenceResult().process_log(log)
383
- inference_result = decoded_log
384
- break
385
- except:
386
- continue
387
-
388
- if inference_result is None:
389
- logging.error("InferenceResult event not found in transaction logs")
390
- logging.debug(f"Transaction receipt logs: {tx_receipt['logs']}")
388
+ parsed_logs = contract.events.InferenceResult().process_receipt(tx_receipt, errors=DISCARD)
389
+
390
+ if len(parsed_logs) < 1:
391
391
  raise OpenGradientError("InferenceResult event not found in transaction logs")
392
+ inference_result = parsed_logs[0]
392
393
 
393
394
  # Extract the ModelOutput from the event
394
395
  event_data = inference_result['args']
@@ -410,24 +411,24 @@ class Client:
410
411
  logging.error(f"Error in infer method: {str(e)}", exc_info=True)
411
412
  raise OpenGradientError(f"Inference failed: {str(e)}")
412
413
 
413
- def infer_llm(self,
414
- model_cid: str,
415
- prompt: str,
416
- max_tokens: int = 100,
417
- stop_sequence: Optional[List[str]] = None,
418
- temperature: float = 0.0) -> Tuple[str, str]:
414
+ def llm_completion(self,
415
+ model_cid: LLM,
416
+ prompt: str,
417
+ max_tokens: int = 100,
418
+ stop_sequence: Optional[List[str]] = None,
419
+ temperature: float = 0.0) -> Tuple[str, str]:
419
420
  """
420
421
  Perform inference on an LLM model using completions.
421
422
 
422
423
  Args:
423
- model_cid (str): The unique content identifier for the model.
424
+ model_cid (LLM): The unique content identifier for the model.
424
425
  prompt (str): The input prompt for the LLM.
425
426
  max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
426
427
  stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
427
428
  temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
428
429
 
429
430
  Returns:
430
- Tuple[str, str]: The transaction hash and the LLM output.
431
+ Tuple[str, str]: The transaction hash and the LLM completion output.
431
432
 
432
433
  Raises:
433
434
  OpenGradientError: If the inference fails.
@@ -435,7 +436,7 @@ class Client:
435
436
  try:
436
437
  self._initialize_web3()
437
438
 
438
- abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'llm.abi')
439
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
439
440
  with open(abi_path, 'r') as abi_file:
440
441
  llm_abi = json.load(abi_file)
441
442
  contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
@@ -452,7 +453,166 @@ class Client:
452
453
  logging.debug(f"Prepared LLM request: {llm_request}")
453
454
 
454
455
  # Prepare run function
455
- run_function = contract.functions.runLLM(llm_request)
456
+ run_function = contract.functions.runLLMCompletion(llm_request)
457
+
458
+ # Build transaction
459
+ nonce = self._w3.eth.get_transaction_count(self.wallet_address)
460
+ estimated_gas = run_function.estimate_gas({'from': self.wallet_address})
461
+ gas_limit = int(estimated_gas * 1.2)
462
+
463
+ transaction = run_function.build_transaction({
464
+ 'from': self.wallet_address,
465
+ 'nonce': nonce,
466
+ 'gas': gas_limit,
467
+ 'gasPrice': self._w3.eth.gas_price,
468
+ })
469
+
470
+ # Sign and send transaction
471
+ signed_tx = self._w3.eth.account.sign_transaction(transaction, self.private_key)
472
+ tx_hash = self._w3.eth.send_raw_transaction(signed_tx.raw_transaction)
473
+ logging.debug(f"Transaction sent. Hash: {tx_hash.hex()}")
474
+
475
+ # Wait for transaction receipt
476
+ tx_receipt = self._w3.eth.wait_for_transaction_receipt(tx_hash)
477
+
478
+ if tx_receipt['status'] == 0:
479
+ raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
480
+
481
+ # Process the LLMResult event
482
+ parsed_logs = contract.events.LLMCompletionResult().process_receipt(tx_receipt, errors=DISCARD)
483
+
484
+ if len(parsed_logs) < 1:
485
+ raise OpenGradientError("LLM completion result event not found in transaction logs")
486
+ llm_result = parsed_logs[0]
487
+
488
+ llm_answer = llm_result['args']['response']['answer']
489
+ return tx_hash.hex(), llm_answer
490
+
491
+ except ContractLogicError as e:
492
+ logging.error(f"Contract logic error: {str(e)}", exc_info=True)
493
+ raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
494
+ except Exception as e:
495
+ logging.error(f"Error in infer completion method: {str(e)}", exc_info=True)
496
+ raise OpenGradientError(f"LLM inference failed: {str(e)}")
497
+
498
+ def llm_chat(self,
499
+ model_cid: str,
500
+ messages: List[Dict],
501
+ max_tokens: int = 100,
502
+ stop_sequence: Optional[List[str]] = None,
503
+ temperature: float = 0.0,
504
+ tools: Optional[List[Dict]] = [],
505
+ tool_choice: Optional[str] = None) -> Tuple[str, str]:
506
+ """
507
+ Perform inference on an LLM model using chat.
508
+
509
+ Args:
510
+ model_cid (LLM): The unique content identifier for the model.
511
+ messages (dict): The messages that will be passed into the chat.
512
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create)
513
+ Example:
514
+ [
515
+ {
516
+ "role": "system",
517
+ "content": "You are a helpful assistant."
518
+ },
519
+ {
520
+ "role": "user",
521
+ "content": "Hello!"
522
+ }
523
+ ]
524
+ max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
525
+ stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
526
+ temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
527
+ tools (List[dict], optional): Set of tools
528
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
529
+ Example:
530
+ [
531
+ {
532
+ "type": "function",
533
+ "function": {
534
+ "name": "get_current_weather",
535
+ "description": "Get the current weather in a given location",
536
+ "parameters": {
537
+ "type": "object",
538
+ "properties": {
539
+ "location": {
540
+ "type": "string",
541
+ "description": "The city and state, e.g. San Francisco, CA"
542
+ },
543
+ "unit": {
544
+ "type": "string",
545
+ "enum": ["celsius", "fahrenheit"]
546
+ }
547
+ },
548
+ "required": ["location"]
549
+ }
550
+ }
551
+ }
552
+ ]
553
+ tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
554
+
555
+ Returns:
556
+ Tuple[str, str, dict]: The transaction hash, finish reason, and a dictionary struct of LLM chat messages.
557
+
558
+ Raises:
559
+ OpenGradientError: If the inference fails.
560
+ """
561
+ try:
562
+ self._initialize_web3()
563
+
564
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
565
+ with open(abi_path, 'r') as abi_file:
566
+ llm_abi = json.load(abi_file)
567
+ contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
568
+
569
+ # For incoming chat messages, tool_calls can be empty. Add an empty array so that it will fit the ABI.
570
+ for message in messages:
571
+ if 'tool_calls' not in message:
572
+ message['tool_calls'] = []
573
+ if 'tool_call_id' not in message:
574
+ message['tool_call_id'] = ""
575
+ if 'name' not in message:
576
+ message['name'] = ""
577
+
578
+ # Create simplified tool structure for smart contract
579
+ #
580
+ # struct ToolDefinition {
581
+ # string description;
582
+ # string name;
583
+ # string parameters; // This must be a JSON
584
+ # }
585
+ converted_tools = []
586
+ if tools is not None:
587
+ for tool in tools:
588
+ function = tool['function']
589
+
590
+ converted_tool = {}
591
+ converted_tool['name'] = function['name']
592
+ converted_tool['description'] = function['description']
593
+ if (parameters := function.get('parameters')) is not None:
594
+ try:
595
+ converted_tool['parameters'] = json.dumps(parameters)
596
+ except Exception as e:
597
+ raise OpenGradientError("Chat LLM failed to convert parameters into JSON: %s", e)
598
+
599
+ converted_tools.append(converted_tool)
600
+
601
+ # Prepare LLM input
602
+ llm_request = {
603
+ "mode": InferenceMode.VANILLA,
604
+ "modelCID": model_cid,
605
+ "messages": messages,
606
+ "max_tokens": max_tokens,
607
+ "stop_sequence": stop_sequence or [],
608
+ "temperature": int(temperature * 100), # Scale to 0-100 range
609
+ "tools": converted_tools or [],
610
+ "tool_choice": tool_choice if tool_choice else ("" if tools is None else "auto")
611
+ }
612
+ logging.debug(f"Prepared LLM request: {llm_request}")
613
+
614
+ # Prepare run function
615
+ run_function = contract.functions.runLLMChat(llm_request)
456
616
 
457
617
  # Build transaction
458
618
  nonce = self._w3.eth.get_transaction_count(self.wallet_address)
@@ -478,27 +638,27 @@ class Client:
478
638
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
479
639
 
480
640
  # Process the LLMResult event
481
- llm_result = None
482
- for log in tx_receipt['logs']:
483
- try:
484
- decoded_log = contract.events.LLMResult().process_log(log)
485
- llm_result = decoded_log['args']['response']['answer']
486
- break
487
- except:
488
- continue
641
+ parsed_logs = contract.events.LLMChatResult().process_receipt(tx_receipt, errors=DISCARD)
489
642
 
490
- if llm_result is None:
491
- raise OpenGradientError("LLMResult event not found in transaction logs")
643
+ if len(parsed_logs) < 1:
644
+ raise OpenGradientError("LLM chat result event not found in transaction logs")
645
+ llm_result = parsed_logs[0]['args']['response']
492
646
 
493
- logging.debug(f"LLM output: {llm_result}")
647
+ # Turn tool calls into normal dicts
648
+ message = dict(llm_result['message'])
649
+ if (tool_calls := message.get('tool_calls')) != None:
650
+ new_tool_calls = []
651
+ for tool_call in tool_calls:
652
+ new_tool_calls.append(dict(tool_call))
653
+ message['tool_calls'] = new_tool_calls
494
654
 
495
- return tx_hash.hex(), llm_result
655
+ return (tx_hash.hex(), llm_result['finish_reason'], message)
496
656
 
497
657
  except ContractLogicError as e:
498
658
  logging.error(f"Contract logic error: {str(e)}", exc_info=True)
499
659
  raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
500
660
  except Exception as e:
501
- logging.error(f"Error in infer_llm method: {str(e)}", exc_info=True)
661
+ logging.error(f"Error in infer chat method: {str(e)}", exc_info=True)
502
662
  raise OpenGradientError(f"LLM inference failed: {str(e)}")
503
663
 
504
664
 
@@ -3,5 +3,5 @@
3
3
  DEFAULT_RPC_URL="http://18.218.115.248:8545"
4
4
  DEFAULT_OG_FAUCET_URL="http://18.218.115.248:8080/?address="
5
5
  DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
6
- DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x24Ec56879245C707220Af7234d2fF3F22cA9Aa63"
6
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS="0xF78F7d5a7e9f484f0924Cc21347029715bD3B8f4"
7
7
  DEFAULT_BLOCKCHAIN_EXPLORER="http://3.145.62.2/tx/"
@@ -1,5 +1,6 @@
1
- from typing import List, Tuple, Union
2
1
  from dataclasses import dataclass
2
+ from typing import List, Tuple, Union
3
+ from enum import Enum
3
4
 
4
5
  @dataclass
5
6
  class Number:
@@ -9,7 +10,7 @@ class Number:
9
10
  @dataclass
10
11
  class NumberTensor:
11
12
  name: str
12
- values: List[Tuple[int, int]] # (int128, int128)[]
13
+ values: List[Tuple[int, int]]
13
14
 
14
15
  @dataclass
15
16
  class StringTensor:
@@ -37,7 +38,7 @@ class AbiFunction:
37
38
  name: str
38
39
  inputs: List[Union[str, 'AbiFunction']]
39
40
  outputs: List[Union[str, 'AbiFunction']]
40
- stateMutability: str
41
+ state_mutability: str
41
42
 
42
43
  @dataclass
43
44
  class Abi:
@@ -54,7 +55,7 @@ class Abi:
54
55
  name=item['name'],
55
56
  inputs=inputs,
56
57
  outputs=outputs,
57
- stateMutability=item['stateMutability']
58
+ state_mutability=item['stateMutability']
58
59
  ))
59
60
  return cls(functions=functions)
60
61
 
@@ -67,8 +68,13 @@ class Abi:
67
68
  name=item['name'],
68
69
  inputs=Abi._parse_inputs_outputs(item['components']),
69
70
  outputs=[],
70
- stateMutability=''
71
+ state_mutability=''
71
72
  ))
72
73
  else:
73
74
  result.append(f"{item['name']}:{item['type']}")
74
- return result
75
+ return result
76
+
77
+ class LLM(str, Enum):
78
+ META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
79
+ LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
80
+ MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
@@ -1,9 +1,11 @@
1
- import numpy as np
2
1
  import logging
3
2
  from decimal import Decimal
4
3
  from typing import Dict, List, Tuple
4
+
5
+ import numpy as np
5
6
  from web3.datastructures import AttributeDict
6
7
 
8
+
7
9
  def convert_to_fixed_point(number: float) -> Tuple[int, int]:
8
10
  """
9
11
  Converts input number to the Number tensor used by the sequencer.
@@ -36,14 +38,16 @@ def convert_to_float32(value: int, decimals: int) -> np.float32:
36
38
  """
37
39
  return np.float32(Decimal(value) / (10 ** Decimal(decimals)))
38
40
 
39
- def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
41
+ def convert_to_model_input(
42
+ inputs: Dict[str, np.ndarray]
43
+ ) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
40
44
  """
41
45
  Expect SDK input to be a dict with the format
42
46
  key: tensor name
43
47
  value: np.array / list
44
48
 
45
- Return a tuple of (number tensors, string tensors) depending on the input type. Each number and string tensor converted
46
- to a numpy array and flattened and the shape saved.
49
+ Return a tuple of (number tensors, string tensors) depending on the input type.
50
+ Each number and string tensor converted to a numpy array and flattened and the shape saved.
47
51
  """
48
52
  logging.debug("Converting the following input dictionary to ModelInput: %s", inputs)
49
53
  number_tensors = []
@@ -60,7 +64,9 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
60
64
 
61
65
  # Check if type is np array
62
66
  if not isinstance(tensor_data, np.ndarray):
63
- raise TypeError("Inference input must be list, numpy array, or type (str, int, float): %s" % type(tensor_data))
67
+ raise TypeError(
68
+ "Inference input must be list, numpy array, or type (str, int, float): %s"
69
+ % type(tensor_data))
64
70
 
65
71
  # Flatten list and retain shape
66
72
  shape = tensor_data.shape
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.5
3
+ Version: 0.3.8
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -15,5 +15,4 @@ src/opengradient.egg-info/dependency_links.txt
15
15
  src/opengradient.egg-info/entry_points.txt
16
16
  src/opengradient.egg-info/requires.txt
17
17
  src/opengradient.egg-info/top_level.txt
18
- src/opengradient/abi/inference.abi
19
- src/opengradient/abi/llm.abi
18
+ src/opengradient/abi/inference.abi
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LlmResponse","name":"response","type":"tuple"}],"name":"LLMResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LlmInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LlmInferenceRequest","name":"request","type":"tuple"}],"name":"runLLM","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LlmResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
File without changes
File without changes
File without changes