opengradient 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -1,8 +1,10 @@
1
+ from typing import Dict, List, Optional, Tuple
2
+
1
3
  from .client import Client
2
- from .defaults import *
3
- from .types import InferenceMode
4
- from typing import List, Dict, Optional, Tuple
5
- __version__ = "0.3.4"
4
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
+ from .types import InferenceMode, LLM
6
+
7
+ __version__ = "0.3.7"
6
8
 
7
9
  _client = None
8
10
 
@@ -42,14 +44,25 @@ def infer(model_cid, inference_mode, model_input):
42
44
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
43
45
  return _client.infer(model_cid, inference_mode, model_input)
44
46
 
45
- def infer_llm(model_cid: str,
46
- prompt: str,
47
- max_tokens: int = 100,
48
- stop_sequence: Optional[List[str]] = None,
49
- temperature: float = 0.0) -> Tuple[str, str]:
47
+ def llm_completion(model_cid: LLM,
48
+ prompt: str,
49
+ max_tokens: int = 100,
50
+ stop_sequence: Optional[List[str]] = None,
51
+ temperature: float = 0.0) -> Tuple[str, str]:
52
+ if _client is None:
53
+ raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
54
+ return _client.llm_completion(model_cid, prompt, max_tokens, stop_sequence, temperature)
55
+
56
+ def llm_chat(model_cid: LLM,
57
+ messages: List[Dict],
58
+ max_tokens: int = 100,
59
+ stop_sequence: Optional[List[str]] = None,
60
+ temperature: float = 0.0,
61
+ tools: Optional[List[Dict]] = None,
62
+ tool_choice: Optional[str] = None):
50
63
  if _client is None:
51
64
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
52
- return _client.infer_llm(model_cid, prompt, max_tokens, stop_sequence, temperature)
65
+ return _client.llm_chat(model_cid, messages, max_tokens, stop_sequence, temperature, tools, tool_choice)
53
66
 
54
67
  def login(email: str, password: str):
55
68
  if _client is None:
@@ -1 +1 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
opengradient/account.py CHANGED
@@ -1,8 +1,9 @@
1
- from eth_account import Account
1
+ import hashlib
2
+ import os
2
3
  import secrets
3
4
  from collections import namedtuple
4
- import os
5
- import hashlib
5
+
6
+ from eth_account import Account
6
7
 
7
8
  EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
9
 
opengradient/cli.py CHANGED
@@ -1,18 +1,26 @@
1
- import click
2
- import opengradient
3
- import json
4
1
  import ast
5
- from pathlib import Path
2
+ import json
6
3
  import logging
7
- from pprint import pformat
8
- from typing import List
9
4
  import webbrowser
10
- import sys
5
+ from pathlib import Path
6
+ from typing import List, Dict, Optional
7
+ from enum import Enum
8
+ from . import types
11
9
 
10
+ import click
11
+
12
+ import opengradient
13
+
14
+ from .account import EthAccount, generate_eth_account
12
15
  from .client import Client
13
- from .defaults import *
16
+ from .defaults import (
17
+ DEFAULT_BLOCKCHAIN_EXPLORER,
18
+ DEFAULT_HUB_SIGNUP_URL,
19
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS,
20
+ DEFAULT_OG_FAUCET_URL,
21
+ DEFAULT_RPC_URL,
22
+ )
14
23
  from .types import InferenceMode
15
- from .account import EthAccount, generate_eth_account
16
24
 
17
25
  OG_CONFIG_FILE = Path.home() / '.opengradient_config.json'
18
26
 
@@ -50,13 +58,19 @@ class DictParamType(click.ParamType):
50
58
 
51
59
  Dict = DictParamType()
52
60
 
53
- # Support inference modes
61
+ # Supported inference modes
54
62
  InferenceModes = {
55
63
  "VANILLA": InferenceMode.VANILLA,
56
64
  "ZKML": InferenceMode.ZKML,
57
65
  "TEE": InferenceMode.TEE,
58
66
  }
59
67
 
68
+ # Supported LLMs
69
+ LlmModels = {
70
+ "meta-llama/Meta-Llama-3-8B-Instruct",
71
+ "meta-llama/Llama-3.2-3B-Instruct",
72
+ "mistralai/Mistral-7B-Instruct-v0.3"
73
+ }
60
74
 
61
75
  def initialize_config(ctx):
62
76
  """Interactively initialize OpenGradient config"""
@@ -118,7 +132,7 @@ def cli(ctx):
118
132
  try:
119
133
  ctx.obj['client'] = Client(private_key=ctx.obj['private_key'],
120
134
  rpc_url=ctx.obj['rpc_url'],
121
- contract_address=ctx.obj['contract_address'],
135
+ contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
122
136
  email=ctx.obj.get('email'),
123
137
  password=ctx.obj.get('password'))
124
138
  except Exception as e:
@@ -301,12 +315,23 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
301
315
  with input_file.open('r') as file:
302
316
  model_input = json.load(file)
303
317
 
304
- click.echo(f"Running {inference_mode} inference for model \"{model_cid}\"\n")
318
+ click.echo(f"Running {inference_mode} inference for model \"{model_cid}\"")
305
319
  tx_hash, model_output = client.infer(model_cid=model_cid, inference_mode=InferenceModes[inference_mode], model_input=model_input)
306
320
 
307
- click.secho("Success!", fg="green")
308
- click.echo(f"Transaction hash: {tx_hash}")
309
- click.echo(f"Inference result:\n{pformat(model_output, indent=2, width=120)}")
321
+ click.echo() # Add a newline for better spacing
322
+ click.secho("Transaction successful", fg="green", bold=True)
323
+ click.echo("──────────────────────────────────────")
324
+ click.echo("Transaction hash: ", nl=False)
325
+ click.secho(tx_hash, fg="cyan", bold=True)
326
+
327
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
328
+ click.echo("Block explorer link: ", nl=False)
329
+ click.secho(block_explorer_link, fg="blue", underline=True)
330
+ click.echo()
331
+
332
+ click.secho("Inference result:", fg="green")
333
+ formatted_output = json.dumps(model_output, indent=2, default=lambda x: x.tolist() if hasattr(x, 'tolist') else str(x))
334
+ click.echo(formatted_output)
310
335
  except json.JSONDecodeError as e:
311
336
  click.echo(f"Error decoding JSON: {e}", err=True)
312
337
  click.echo(f"Error occurred on line {e.lineno}, column {e.colno}", err=True)
@@ -314,28 +339,28 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
314
339
  click.echo(f"Error running inference: {str(e)}")
315
340
 
316
341
  @cli.command()
317
- @click.option('--model', '-m', 'model_cid', required=True, help='CID of the LLM model to run inference on')
318
- @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM')
319
- @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM output')
342
+ @click.option('--model', '-m', 'model_cid', type=click.Choice(LlmModels), required=True, help='CID of the LLM model to run inference on')
343
+ @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM completion')
344
+ @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM completion output')
320
345
  @click.option('--stop-sequence', multiple=True, help='Stop sequences for LLM')
321
346
  @click.option('--temperature', type=float, default=0.0, help='Temperature for LLM inference (0.0 to 1.0)')
322
347
  @click.pass_context
323
- def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
348
+ def completion(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
324
349
  """
325
- Run inference on an LLM model.
350
+ Run completion inference on an LLM model.
326
351
 
327
- This command runs inference on the specified LLM model using the provided prompt and parameters.
352
+ This command runs a completion inference on the specified LLM model using the provided prompt and parameters.
328
353
 
329
354
  Example usage:
330
355
 
331
356
  \b
332
- opengradient llm --model Qm... --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
333
- opengradient llm -m Qm... -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
357
+ opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
358
+ opengradient completion -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
334
359
  """
335
360
  client: Client = ctx.obj['client']
336
361
  try:
337
- click.echo(f"Running LLM inference for model \"{model_cid}\"\n")
338
- tx_hash, llm_output = client.infer_llm(
362
+ click.echo(f"Running LLM completion inference for model \"{model_cid}\"\n")
363
+ tx_hash, llm_output = client.llm_completion(
339
364
  model_cid=model_cid,
340
365
  prompt=prompt,
341
366
  max_tokens=max_tokens,
@@ -343,11 +368,165 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
343
368
  temperature=temperature
344
369
  )
345
370
 
346
- click.secho("Success!", fg="green")
347
- click.echo(f"Transaction hash: {tx_hash}")
348
- click.echo(f"LLM output:\n{llm_output}")
371
+ print_llm_completion_result(model_cid, tx_hash, llm_output)
372
+ except Exception as e:
373
+ click.echo(f"Error running LLM completion: {str(e)}")
374
+
375
+ def print_llm_completion_result(model_cid, tx_hash, llm_output):
376
+ click.secho("✅ LLM completion Successful", fg="green", bold=True)
377
+ click.echo("──────────────────────────────────────")
378
+ click.echo("Model CID: ", nl=False)
379
+ click.secho(model_cid, fg="cyan", bold=True)
380
+ click.echo("Transaction hash: ", nl=False)
381
+ click.secho(tx_hash, fg="cyan", bold=True)
382
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
383
+ click.echo("Block explorer link: ", nl=False)
384
+ click.secho(block_explorer_link, fg="blue", underline=True)
385
+ click.echo("──────────────────────────────────────")
386
+ click.secho("LLM Output:", fg="yellow", bold=True)
387
+ click.echo()
388
+ click.echo(llm_output)
389
+ click.echo()
390
+
391
+
392
+ @cli.command()
393
+ @click.option('--model', '-m', 'model_cid',
394
+ type=click.Choice([e.value for e in types.LLM]),
395
+ required=True,
396
+ help='CID of the LLM model to run inference on')
397
+ @click.option('--messages',
398
+ type=str,
399
+ required=False,
400
+ help='Input messages for the chat inference in JSON format')
401
+ @click.option('--messages-file',
402
+ type=click.Path(exists=True, path_type=Path),
403
+ required=False,
404
+ help='Path to JSON file containing input messages for the chat inference')
405
+ @click.option('--max-tokens',
406
+ type=int,
407
+ default=100,
408
+ help='Maximum number of tokens for LLM output')
409
+ @click.option('--stop-sequence',
410
+ type=str,
411
+ default=None,
412
+ multiple=True,
413
+ help='Stop sequences for LLM')
414
+ @click.option('--temperature',
415
+ type=float,
416
+ default=0.0,
417
+ help='Temperature for LLM inference (0.0 to 1.0)')
418
+ @click.option('--tools',
419
+ type=str,
420
+ default="[]",
421
+ help='Tool configurations in JSON format')
422
+ @click.option('--tools-file',
423
+ type=click.Path(exists=True, path_type=Path),
424
+ required=False,
425
+ help='Path to JSON file containing tool configurations')
426
+ @click.option('--tool-choice',
427
+ type=str,
428
+ default='',
429
+ help='Specific tool choice for the LLM')
430
+ @click.pass_context
431
+ def chat(
432
+ ctx,
433
+ model_cid: str,
434
+ messages: Optional[str],
435
+ messages_file: Optional[Path],
436
+ max_tokens: int,
437
+ stop_sequence: List[str],
438
+ temperature: float,
439
+ tools: Optional[str],
440
+ tools_file: Optional[Path],
441
+ tool_choice: Optional[str]):
442
+ """
443
+ Run chat inference on an LLM model.
444
+
445
+ This command runs a chat inference on the specified LLM model using the provided messages and parameters.
446
+
447
+ Example usage:
448
+
449
+ \b
450
+ opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
451
+ opengradient chat -m mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --stop-sequence "." --stop-sequence "\n"
452
+ """
453
+ # TODO (Kyle): ^^^^^^^ Edit description with more examples using tools
454
+ client: Client = ctx.obj['client']
455
+ try:
456
+ click.echo(f"Running LLM chat inference for model \"{model_cid}\"\n")
457
+ if not messages and not messages_file:
458
+ click.echo("Must specify either messages or messages-file")
459
+ ctx.exit(1)
460
+ return
461
+ if messages and messages_file:
462
+ click.echo("Cannot have both messages and messages_file")
463
+ ctx.exit(1)
464
+ return
465
+
466
+ if messages:
467
+ try:
468
+ messages = json.loads(messages)
469
+ except Exception as e:
470
+ click.echo(f"Failed to parse messages: {e}")
471
+ ctx.exit(1)
472
+ else:
473
+ with messages_file.open('r') as file:
474
+ messages = json.load(file)
475
+
476
+ # Parse tools if provided
477
+ if (tools or tools != "[]") and tools_file:
478
+ click.echo("Cannot have both tools and tools_file")
479
+ click.exit(1)
480
+ return
481
+
482
+ parsed_tools=[]
483
+ if tools:
484
+ try:
485
+ parsed_tools = json.loads(tools)
486
+ if not isinstance(parsed_tools, list):
487
+ click.echo("Tools must be a JSON array")
488
+ ctx.exit(1)
489
+ return
490
+ except json.JSONDecodeError as e:
491
+ click.echo(f"Failed to parse tools JSON: {e}")
492
+ ctx.exit(1)
493
+ return
494
+
495
+ if tools_file:
496
+ try:
497
+ with tools_file.open('r') as file:
498
+ parsed_tools = json.load(file)
499
+ if not isinstance(parsed_tools, list):
500
+ click.echo("Tools must be a JSON array")
501
+ ctx.exit(1)
502
+ return
503
+ except Exception as e:
504
+ click.echo("Failed to load JSON from tools_file: %s" % e)
505
+ ctx.exit(1)
506
+ return
507
+
508
+ if not tools and not tools_file:
509
+ parsed_tools = None
510
+
511
+ tx_hash, finish_reason, llm_chat_output = client.llm_chat(
512
+ model_cid=model_cid,
513
+ messages=messages,
514
+ max_tokens=max_tokens,
515
+ stop_sequence=list(stop_sequence),
516
+ temperature=temperature,
517
+ tools=parsed_tools,
518
+ tool_choice=tool_choice,
519
+ )
520
+
521
+ # TODO (Kyle): Make this prettier
522
+ print("TX Hash: ", tx_hash)
523
+ print("Finish reason: ", finish_reason)
524
+ print("Chat output: ", llm_chat_output)
349
525
  except Exception as e:
350
- click.echo(f"Error running LLM inference: {str(e)}")
526
+ click.echo(f"Error running LLM chat inference: {str(e)}")
527
+
528
+ def print_llm_chat_result():
529
+ pass
351
530
 
352
531
  @cli.command()
353
532
  def create_account():
@@ -364,7 +543,7 @@ def create_account_impl() -> EthAccount:
364
543
  click.echo("Step 1: Create Account on OpenGradient Hub")
365
544
  click.echo("-" * 50)
366
545
 
367
- click.echo(f"Please create an account on the OpenGradient Hub")
546
+ click.echo("Please create an account on the OpenGradient Hub")
368
547
  webbrowser.open(DEFAULT_HUB_SIGNUP_URL, new=2)
369
548
  click.confirm("Have you successfully created your account on the OpenGradient Hub?", abort=True)
370
549
 
@@ -377,7 +556,7 @@ def create_account_impl() -> EthAccount:
377
556
  click.echo("\n" + "-" * 50)
378
557
  click.echo("Step 3: Fund Your Account")
379
558
  click.echo("-" * 50)
380
- click.echo(f"Please fund your account clicking 'Request' on the Faucet website")
559
+ click.echo("Please fund your account clicking 'Request' on the Faucet website")
381
560
  webbrowser.open(DEFAULT_OG_FAUCET_URL + eth_account.address, new=2)
382
561
  click.confirm("Have you successfully funded your account using the Faucet?", abort=True)
383
562
 
opengradient/client.py CHANGED
@@ -1,15 +1,19 @@
1
- import requests
2
- import os
3
1
  import json
4
- from web3 import Web3
5
- from opengradient.exceptions import OpenGradientError
6
- from opengradient.types import InferenceMode
7
- from opengradient import utils
8
- import numpy as np
9
2
  import logging
10
- from typing import Dict, Optional, Tuple, Union, List
11
- from web3.exceptions import ContractLogicError
3
+ import os
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
12
6
  import firebase
7
+ import numpy as np
8
+ import requests
9
+ from web3 import Web3
10
+ from web3.exceptions import ContractLogicError
11
+ from web3.logs import DISCARD
12
+
13
+ from opengradient import utils
14
+ from opengradient.exceptions import OpenGradientError
15
+ from opengradient.types import InferenceMode, LLM
16
+
13
17
 
14
18
  class Client:
15
19
  FIREBASE_CONFIG = {
@@ -100,7 +104,7 @@ class Client:
100
104
  if not self.user:
101
105
  raise ValueError("User not authenticated")
102
106
 
103
- url = f"https://api.opengradient.ai/api/v0/models/"
107
+ url = "https://api.opengradient.ai/api/v0/models/"
104
108
  headers = {
105
109
  'Authorization': f'Bearer {self.user["idToken"]}',
106
110
  'Content-Type': 'application/json'
@@ -186,15 +190,15 @@ class Client:
186
190
  logging.debug(f"Full server response: {json_response}")
187
191
 
188
192
  if isinstance(json_response, list) and not json_response:
189
- logging.info(f"Server returned an empty list. Assuming version was created successfully.")
193
+ logging.info("Server returned an empty list. Assuming version was created successfully.")
190
194
  return {"versionString": "Unknown", "note": "Created based on empty response"}
191
195
  elif isinstance(json_response, dict):
192
- versionString = json_response.get('versionString')
193
- if not versionString:
196
+ version_string = json_response.get('versionString')
197
+ if not version_string:
194
198
  logging.warning(f"'versionString' not found in response. Response: {json_response}")
195
199
  return {"versionString": "Unknown", "note": "Version ID not provided in response"}
196
- logging.info(f"Version creation successful. Version ID: {versionString}")
197
- return {"versionString": versionString}
200
+ logging.info(f"Version creation successful. Version ID: {version_string}")
201
+ return {"versionString": version_string}
198
202
  else:
199
203
  logging.error(f"Unexpected response type: {type(json_response)}. Content: {json_response}")
200
204
  raise Exception(f"Unexpected response type: {type(json_response)}")
@@ -295,7 +299,12 @@ class Client:
295
299
  logging.error(f"Unexpected error during upload: {str(e)}", exc_info=True)
296
300
  raise OpenGradientError(f"Unexpected error during upload: {str(e)}")
297
301
 
298
- def infer(self, model_cid: str, inference_mode: InferenceMode, model_input: Dict[str, Union[str, int, float, List, np.ndarray]]) -> Tuple[str, Dict[str, np.ndarray]]:
302
+ def infer(
303
+ self,
304
+ model_cid: str,
305
+ inference_mode: InferenceMode,
306
+ model_input: Dict[str, Union[str, int, float, List, np.ndarray]]
307
+ ) -> Tuple[str, Dict[str, np.ndarray]]:
299
308
  """
300
309
  Perform inference on a model.
301
310
 
@@ -376,19 +385,11 @@ class Client:
376
385
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
377
386
 
378
387
  # Process the InferenceResult event
379
- inference_result = None
380
- for log in tx_receipt['logs']:
381
- try:
382
- decoded_log = contract.events.InferenceResult().process_log(log)
383
- inference_result = decoded_log
384
- break
385
- except:
386
- continue
387
-
388
- if inference_result is None:
389
- logging.error("InferenceResult event not found in transaction logs")
390
- logging.debug(f"Transaction receipt logs: {tx_receipt['logs']}")
388
+ parsed_logs = contract.events.InferenceResult().process_receipt(tx_receipt, errors=DISCARD)
389
+
390
+ if len(parsed_logs) < 1:
391
391
  raise OpenGradientError("InferenceResult event not found in transaction logs")
392
+ inference_result = parsed_logs[0]
392
393
 
393
394
  # Extract the ModelOutput from the event
394
395
  event_data = inference_result['args']
@@ -410,24 +411,24 @@ class Client:
410
411
  logging.error(f"Error in infer method: {str(e)}", exc_info=True)
411
412
  raise OpenGradientError(f"Inference failed: {str(e)}")
412
413
 
413
- def infer_llm(self,
414
- model_cid: str,
415
- prompt: str,
416
- max_tokens: int = 100,
417
- stop_sequence: Optional[List[str]] = None,
418
- temperature: float = 0.0) -> Tuple[str, str]:
414
+ def llm_completion(self,
415
+ model_cid: LLM,
416
+ prompt: str,
417
+ max_tokens: int = 100,
418
+ stop_sequence: Optional[List[str]] = None,
419
+ temperature: float = 0.0) -> Tuple[str, str]:
419
420
  """
420
421
  Perform inference on an LLM model using completions.
421
422
 
422
423
  Args:
423
- model_cid (str): The unique content identifier for the model.
424
+ model_cid (LLM): The unique content identifier for the model.
424
425
  prompt (str): The input prompt for the LLM.
425
426
  max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
426
427
  stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
427
428
  temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
428
429
 
429
430
  Returns:
430
- Tuple[str, str]: The transaction hash and the LLM output.
431
+ Tuple[str, str]: The transaction hash and the LLM completion output.
431
432
 
432
433
  Raises:
433
434
  OpenGradientError: If the inference fails.
@@ -435,7 +436,7 @@ class Client:
435
436
  try:
436
437
  self._initialize_web3()
437
438
 
438
- abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'llm.abi')
439
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
439
440
  with open(abi_path, 'r') as abi_file:
440
441
  llm_abi = json.load(abi_file)
441
442
  contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
@@ -452,7 +453,166 @@ class Client:
452
453
  logging.debug(f"Prepared LLM request: {llm_request}")
453
454
 
454
455
  # Prepare run function
455
- run_function = contract.functions.runLLM(llm_request)
456
+ run_function = contract.functions.runLLMCompletion(llm_request)
457
+
458
+ # Build transaction
459
+ nonce = self._w3.eth.get_transaction_count(self.wallet_address)
460
+ estimated_gas = run_function.estimate_gas({'from': self.wallet_address})
461
+ gas_limit = int(estimated_gas * 1.2)
462
+
463
+ transaction = run_function.build_transaction({
464
+ 'from': self.wallet_address,
465
+ 'nonce': nonce,
466
+ 'gas': gas_limit,
467
+ 'gasPrice': self._w3.eth.gas_price,
468
+ })
469
+
470
+ # Sign and send transaction
471
+ signed_tx = self._w3.eth.account.sign_transaction(transaction, self.private_key)
472
+ tx_hash = self._w3.eth.send_raw_transaction(signed_tx.raw_transaction)
473
+ logging.debug(f"Transaction sent. Hash: {tx_hash.hex()}")
474
+
475
+ # Wait for transaction receipt
476
+ tx_receipt = self._w3.eth.wait_for_transaction_receipt(tx_hash)
477
+
478
+ if tx_receipt['status'] == 0:
479
+ raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
480
+
481
+ # Process the LLMResult event
482
+ parsed_logs = contract.events.LLMCompletionResult().process_receipt(tx_receipt, errors=DISCARD)
483
+
484
+ if len(parsed_logs) < 1:
485
+ raise OpenGradientError("LLM completion result event not found in transaction logs")
486
+ llm_result = parsed_logs[0]
487
+
488
+ llm_answer = llm_result['args']['response']['answer']
489
+ return tx_hash.hex(), llm_answer
490
+
491
+ except ContractLogicError as e:
492
+ logging.error(f"Contract logic error: {str(e)}", exc_info=True)
493
+ raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
494
+ except Exception as e:
495
+ logging.error(f"Error in infer completion method: {str(e)}", exc_info=True)
496
+ raise OpenGradientError(f"LLM inference failed: {str(e)}")
497
+
498
+ def llm_chat(self,
499
+ model_cid: str,
500
+ messages: List[Dict],
501
+ max_tokens: int = 100,
502
+ stop_sequence: Optional[List[str]] = None,
503
+ temperature: float = 0.0,
504
+ tools: Optional[List[Dict]] = [],
505
+ tool_choice: Optional[str] = None) -> Tuple[str, str]:
506
+ """
507
+ Perform inference on an LLM model using chat.
508
+
509
+ Args:
510
+ model_cid (LLM): The unique content identifier for the model.
511
+ messages (dict): The messages that will be passed into the chat.
512
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create)
513
+ Example:
514
+ [
515
+ {
516
+ "role": "system",
517
+ "content": "You are a helpful assistant."
518
+ },
519
+ {
520
+ "role": "user",
521
+ "content": "Hello!"
522
+ }
523
+ ]
524
+ max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
525
+ stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
526
+ temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
527
+ tools (List[dict], optional): Set of tools
528
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
529
+ Example:
530
+ [
531
+ {
532
+ "type": "function",
533
+ "function": {
534
+ "name": "get_current_weather",
535
+ "description": "Get the current weather in a given location",
536
+ "parameters": {
537
+ "type": "object",
538
+ "properties": {
539
+ "location": {
540
+ "type": "string",
541
+ "description": "The city and state, e.g. San Francisco, CA"
542
+ },
543
+ "unit": {
544
+ "type": "string",
545
+ "enum": ["celsius", "fahrenheit"]
546
+ }
547
+ },
548
+ "required": ["location"]
549
+ }
550
+ }
551
+ }
552
+ ]
553
+ tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
554
+
555
+ Returns:
556
+ Tuple[str, str, dict]: The transaction hash, finish reason, and a dictionary struct of LLM chat messages.
557
+
558
+ Raises:
559
+ OpenGradientError: If the inference fails.
560
+ """
561
+ try:
562
+ self._initialize_web3()
563
+
564
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
565
+ with open(abi_path, 'r') as abi_file:
566
+ llm_abi = json.load(abi_file)
567
+ contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
568
+
569
+ # For incoming chat messages, tool_calls can be empty. Add an empty array so that it will fit the ABI.
570
+ for message in messages:
571
+ if 'tool_calls' not in message:
572
+ message['tool_calls'] = []
573
+ if 'tool_call_id' not in message:
574
+ message['tool_call_id'] = ""
575
+ if 'name' not in message:
576
+ message['name'] = ""
577
+
578
+ # Create simplified tool structure for smart contract
579
+ #
580
+ # struct ToolDefinition {
581
+ # string description;
582
+ # string name;
583
+ # string parameters; // This must be a JSON
584
+ # }
585
+ converted_tools = []
586
+ if tools is not None:
587
+ for tool in tools:
588
+ function = tool['function']
589
+
590
+ converted_tool = {}
591
+ converted_tool['name'] = function['name']
592
+ converted_tool['description'] = function['description']
593
+ if (parameters := function.get('parameters')) is not None:
594
+ try:
595
+ converted_tool['parameters'] = json.dumps(parameters)
596
+ except Exception as e:
597
+ raise OpenGradientError("Chat LLM failed to convert parameters into JSON: %s", e)
598
+
599
+ converted_tools.append(converted_tool)
600
+
601
+ # Prepare LLM input
602
+ llm_request = {
603
+ "mode": InferenceMode.VANILLA,
604
+ "modelCID": model_cid,
605
+ "messages": messages,
606
+ "max_tokens": max_tokens,
607
+ "stop_sequence": stop_sequence or [],
608
+ "temperature": int(temperature * 100), # Scale to 0-100 range
609
+ "tools": converted_tools or [],
610
+ "tool_choice": tool_choice if tool_choice else ("" if tools is None else "auto")
611
+ }
612
+ logging.debug(f"Prepared LLM request: {llm_request}")
613
+
614
+ # Prepare run function
615
+ run_function = contract.functions.runLLMChat(llm_request)
456
616
 
457
617
  # Build transaction
458
618
  nonce = self._w3.eth.get_transaction_count(self.wallet_address)
@@ -478,27 +638,27 @@ class Client:
478
638
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
479
639
 
480
640
  # Process the LLMResult event
481
- llm_result = None
482
- for log in tx_receipt['logs']:
483
- try:
484
- decoded_log = contract.events.LLMResult().process_log(log)
485
- llm_result = decoded_log['args']['response']['answer']
486
- break
487
- except:
488
- continue
641
+ parsed_logs = contract.events.LLMChatResult().process_receipt(tx_receipt, errors=DISCARD)
489
642
 
490
- if llm_result is None:
491
- raise OpenGradientError("LLMResult event not found in transaction logs")
643
+ if len(parsed_logs) < 1:
644
+ raise OpenGradientError("LLM chat result event not found in transaction logs")
645
+ llm_result = parsed_logs[0]['args']['response']
492
646
 
493
- logging.debug(f"LLM output: {llm_result}")
647
+ # Turn tool calls into normal dicts
648
+ message = dict(llm_result['message'])
649
+ if (tool_calls := message.get('tool_calls')) != None:
650
+ new_tool_calls = []
651
+ for tool_call in tool_calls:
652
+ new_tool_calls.append(dict(tool_call))
653
+ message['tool_calls'] = new_tool_calls
494
654
 
495
- return tx_hash.hex(), llm_result
655
+ return (tx_hash.hex(), llm_result['finish_reason'], message)
496
656
 
497
657
  except ContractLogicError as e:
498
658
  logging.error(f"Contract logic error: {str(e)}", exc_info=True)
499
659
  raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
500
660
  except Exception as e:
501
- logging.error(f"Error in infer_llm method: {str(e)}", exc_info=True)
661
+ logging.error(f"Error in infer chat method: {str(e)}", exc_info=True)
502
662
  raise OpenGradientError(f"LLM inference failed: {str(e)}")
503
663
 
504
664
 
opengradient/defaults.py CHANGED
@@ -3,4 +3,5 @@
3
3
  DEFAULT_RPC_URL="http://18.218.115.248:8545"
4
4
  DEFAULT_OG_FAUCET_URL="http://18.218.115.248:8080/?address="
5
5
  DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
6
- DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x350E0A430b2B1563481833a99523Cfd17a530e4e"
6
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS="0xF78F7d5a7e9f484f0924Cc21347029715bD3B8f4"
7
+ DEFAULT_BLOCKCHAIN_EXPLORER="http://3.145.62.2/tx/"
opengradient/types.py CHANGED
@@ -1,5 +1,6 @@
1
- from typing import List, Tuple, Union
2
1
  from dataclasses import dataclass
2
+ from typing import List, Tuple, Union
3
+ from enum import Enum
3
4
 
4
5
  @dataclass
5
6
  class Number:
@@ -9,7 +10,7 @@ class Number:
9
10
  @dataclass
10
11
  class NumberTensor:
11
12
  name: str
12
- values: List[Tuple[int, int]] # (int128, int128)[]
13
+ values: List[Tuple[int, int]]
13
14
 
14
15
  @dataclass
15
16
  class StringTensor:
@@ -37,7 +38,7 @@ class AbiFunction:
37
38
  name: str
38
39
  inputs: List[Union[str, 'AbiFunction']]
39
40
  outputs: List[Union[str, 'AbiFunction']]
40
- stateMutability: str
41
+ state_mutability: str
41
42
 
42
43
  @dataclass
43
44
  class Abi:
@@ -54,7 +55,7 @@ class Abi:
54
55
  name=item['name'],
55
56
  inputs=inputs,
56
57
  outputs=outputs,
57
- stateMutability=item['stateMutability']
58
+ state_mutability=item['stateMutability']
58
59
  ))
59
60
  return cls(functions=functions)
60
61
 
@@ -67,8 +68,13 @@ class Abi:
67
68
  name=item['name'],
68
69
  inputs=Abi._parse_inputs_outputs(item['components']),
69
70
  outputs=[],
70
- stateMutability=''
71
+ state_mutability=''
71
72
  ))
72
73
  else:
73
74
  result.append(f"{item['name']}:{item['type']}")
74
- return result
75
+ return result
76
+
77
+ class LLM(str, Enum):
78
+ META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
79
+ LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
80
+ MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
opengradient/utils.py CHANGED
@@ -1,9 +1,11 @@
1
- import numpy as np
2
1
  import logging
3
2
  from decimal import Decimal
4
3
  from typing import Dict, List, Tuple
4
+
5
+ import numpy as np
5
6
  from web3.datastructures import AttributeDict
6
7
 
8
+
7
9
  def convert_to_fixed_point(number: float) -> Tuple[int, int]:
8
10
  """
9
11
  Converts input number to the Number tensor used by the sequencer.
@@ -36,14 +38,16 @@ def convert_to_float32(value: int, decimals: int) -> np.float32:
36
38
  """
37
39
  return np.float32(Decimal(value) / (10 ** Decimal(decimals)))
38
40
 
39
- def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
41
+ def convert_to_model_input(
42
+ inputs: Dict[str, np.ndarray]
43
+ ) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
40
44
  """
41
45
  Expect SDK input to be a dict with the format
42
46
  key: tensor name
43
47
  value: np.array / list
44
48
 
45
- Return a tuple of (number tensors, string tensors) depending on the input type. Each number and string tensor converted
46
- to a numpy array and flattened and the shape saved.
49
+ Return a tuple of (number tensors, string tensors) depending on the input type.
50
+ Each number and string tensor converted to a numpy array and flattened and the shape saved.
47
51
  """
48
52
  logging.debug("Converting the following input dictionary to ModelInput: %s", inputs)
49
53
  number_tensors = []
@@ -60,7 +64,9 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
60
64
 
61
65
  # Check if type is np array
62
66
  if not isinstance(tensor_data, np.ndarray):
63
- raise TypeError("Inference input must be list, numpy array, or type (str, int, float): %s" % type(tensor_data))
67
+ raise TypeError(
68
+ "Inference input must be list, numpy array, or type (str, int, float): %s"
69
+ % type(tensor_data))
64
70
 
65
71
  # Flatten list and retain shape
66
72
  shape = tensor_data.shape
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -0,0 +1,15 @@
1
+ opengradient/__init__.py,sha256=_n-w4S35NVarz8_d1l2dQ3750zWQ42woGm02XZoxwK8,3110
2
+ opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
3
+ opengradient/cli.py,sha256=Mv7iAyeBFUxaXL1hDIgHgn5x-GCMdi2BQgY6zrOkg_4,23003
4
+ opengradient/client.py,sha256=yaexPTr-CSPQXR5AOrhHDmygKCq37Q_FIVy8Mtq6THk,31253
5
+ opengradient/defaults.py,sha256=jweJ6QyzNY0oO22OUA8B-uJPA90cyedhMT9J38MmGjw,319
6
+ opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
+ opengradient/types.py,sha256=Hkp0cXveMtNKxMpcwIHl1n4OGgYxx0HQM70qA05SZV8,2076
8
+ opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
9
+ opengradient/abi/inference.abi,sha256=VMxv4pli9ESYL2hCpbU41Z_WweCBy_3EcTYkCWCb-rU,6623
10
+ opengradient-0.3.6.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
11
+ opengradient-0.3.6.dist-info/METADATA,sha256=FWD86cWEvUXfyH6-gu3-oSAc5lOezUKFN7jejuPs3Ug,7608
12
+ opengradient-0.3.6.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
13
+ opengradient-0.3.6.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
14
+ opengradient-0.3.6.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
15
+ opengradient-0.3.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.3.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
opengradient/abi/llm.abi DELETED
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LlmResponse","name":"response","type":"tuple"}],"name":"LLMResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LlmInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LlmInferenceRequest","name":"request","type":"tuple"}],"name":"runLLM","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LlmResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1,16 +0,0 @@
1
- opengradient/__init__.py,sha256=6V8HyFFkkqnGSwrbj36AuLijHs0pOxxnbBeHWy8LQ6w,2447
2
- opengradient/account.py,sha256=s1C4hAtc8vcHObWjwxwlYJA041S6DTbr7-rK6qiWPsQ,1149
3
- opengradient/cli.py,sha256=T59Z2S3AsMVS6TLgsSgxW9esssvYP5ZmVJrYE6p4oW4,16105
4
- opengradient/client.py,sha256=DCDp2EWPF62ZQnx2_cM0wPghRxgn213VnR65R8yZBVY,23964
5
- opengradient/defaults.py,sha256=pDfsmPoUzdLG55n-hwh0CMBFxKR2rdNcjqCcwTWc6iw,267
6
- opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
- opengradient/types.py,sha256=EoJN-DkQrJ2WTUv8OenlrlWJWFY2jPGTl-T8C_OVjp8,1849
8
- opengradient/utils.py,sha256=F1Nj-GMNFQFxCtbGgWQq1RP4TSurbpQxJV3yKeEo1b0,6482
9
- opengradient/abi/inference.abi,sha256=u8FsW0s1YeRjUb9eLS1k_qh_5f_cwOdr0bii-tAdxh0,2683
10
- opengradient/abi/llm.abi,sha256=zhiPFyBT09EI3QU5DVoKHo7e8T9PFcfIQ3RHDYetm4M,3609
11
- opengradient-0.3.4.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
12
- opengradient-0.3.4.dist-info/METADATA,sha256=7mWZNlCuVtuDsSK34t1zHQ4UYUL6nXTwzIhCfwZgDoE,7805
13
- opengradient-0.3.4.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
14
- opengradient-0.3.4.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
15
- opengradient-0.3.4.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
16
- opengradient-0.3.4.dist-info/RECORD,,