opengradient 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -4,7 +4,7 @@ from .client import Client
4
4
  from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
5
  from .types import InferenceMode, LLM
6
6
 
7
- __version__ = "0.3.5"
7
+ __version__ = "0.3.9"
8
8
 
9
9
  _client = None
10
10
 
@@ -1 +1 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"tool_call_id","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
opengradient/cli.py CHANGED
@@ -3,7 +3,9 @@ import json
3
3
  import logging
4
4
  import webbrowser
5
5
  from pathlib import Path
6
- from typing import List
6
+ from typing import List, Dict, Optional
7
+ from enum import Enum
8
+ from . import types
7
9
 
8
10
  import click
9
11
 
@@ -338,27 +340,27 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
338
340
 
339
341
  @cli.command()
340
342
  @click.option('--model', '-m', 'model_cid', type=click.Choice(LlmModels), required=True, help='CID of the LLM model to run inference on')
341
- @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM')
342
- @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM output')
343
+ @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM completion')
344
+ @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM completion output')
343
345
  @click.option('--stop-sequence', multiple=True, help='Stop sequences for LLM')
344
346
  @click.option('--temperature', type=float, default=0.0, help='Temperature for LLM inference (0.0 to 1.0)')
345
347
  @click.pass_context
346
- def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
348
+ def completion(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float):
347
349
  """
348
- Run inference on an LLM model.
350
+ Run completion inference on an LLM model.
349
351
 
350
- This command runs inference on the specified LLM model using the provided prompt and parameters.
352
+ This command runs a completion inference on the specified LLM model using the provided prompt and parameters.
351
353
 
352
354
  Example usage:
353
355
 
354
356
  \b
355
- opengradient llm --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
356
- opengradient llm -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
357
+ opengradient completion --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
358
+ opengradient completion -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
357
359
  """
358
360
  client: Client = ctx.obj['client']
359
361
  try:
360
- click.echo(f"Running LLM inference for model \"{model_cid}\"\n")
361
- tx_hash, llm_output = client.infer_llm(
362
+ click.echo(f"Running LLM completion inference for model \"{model_cid}\"\n")
363
+ tx_hash, llm_output = client.llm_completion(
362
364
  model_cid=model_cid,
363
365
  prompt=prompt,
364
366
  max_tokens=max_tokens,
@@ -366,12 +368,12 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
366
368
  temperature=temperature
367
369
  )
368
370
 
369
- print_llm_inference_result(model_cid, tx_hash, llm_output)
371
+ print_llm_completion_result(model_cid, tx_hash, llm_output)
370
372
  except Exception as e:
371
- click.echo(f"Error running LLM inference: {str(e)}")
373
+ click.echo(f"Error running LLM completion: {str(e)}")
372
374
 
373
- def print_llm_inference_result(model_cid, tx_hash, llm_output):
374
- click.secho("✅ LLM Inference Successful", fg="green", bold=True)
375
+ def print_llm_completion_result(model_cid, tx_hash, llm_output):
376
+ click.secho("✅ LLM completion Successful", fg="green", bold=True)
375
377
  click.echo("──────────────────────────────────────")
376
378
  click.echo("Model CID: ", nl=False)
377
379
  click.secho(model_cid, fg="cyan", bold=True)
@@ -386,6 +388,145 @@ def print_llm_inference_result(model_cid, tx_hash, llm_output):
386
388
  click.echo(llm_output)
387
389
  click.echo()
388
390
 
391
+
392
+ @cli.command()
393
+ @click.option('--model', '-m', 'model_cid',
394
+ type=click.Choice([e.value for e in types.LLM]),
395
+ required=True,
396
+ help='CID of the LLM model to run inference on')
397
+ @click.option('--messages',
398
+ type=str,
399
+ required=False,
400
+ help='Input messages for the chat inference in JSON format')
401
+ @click.option('--messages-file',
402
+ type=click.Path(exists=True, path_type=Path),
403
+ required=False,
404
+ help='Path to JSON file containing input messages for the chat inference')
405
+ @click.option('--max-tokens',
406
+ type=int,
407
+ default=100,
408
+ help='Maximum number of tokens for LLM output')
409
+ @click.option('--stop-sequence',
410
+ type=str,
411
+ default=None,
412
+ multiple=True,
413
+ help='Stop sequences for LLM')
414
+ @click.option('--temperature',
415
+ type=float,
416
+ default=0.0,
417
+ help='Temperature for LLM inference (0.0 to 1.0)')
418
+ @click.option('--tools',
419
+ type=str,
420
+ default=None,
421
+ help='Tool configurations in JSON format')
422
+ @click.option('--tools-file',
423
+ type=click.Path(exists=True, path_type=Path),
424
+ required=False,
425
+ help='Path to JSON file containing tool configurations')
426
+ @click.option('--tool-choice',
427
+ type=str,
428
+ default='',
429
+ help='Specific tool choice for the LLM')
430
+ @click.pass_context
431
+ def chat(
432
+ ctx,
433
+ model_cid: str,
434
+ messages: Optional[str],
435
+ messages_file: Optional[Path],
436
+ max_tokens: int,
437
+ stop_sequence: List[str],
438
+ temperature: float,
439
+ tools: Optional[str],
440
+ tools_file: Optional[Path],
441
+ tool_choice: Optional[str]):
442
+ """
443
+ Run chat inference on an LLM model.
444
+
445
+ This command runs a chat inference on the specified LLM model using the provided messages and parameters.
446
+
447
+ Example usage:
448
+
449
+ \b
450
+ opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
451
+ opengradient chat -m mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --stop-sequence "." --stop-sequence "\n"
452
+ """
453
+ client: Client = ctx.obj['client']
454
+ try:
455
+ click.echo(f"Running LLM chat inference for model \"{model_cid}\"\n")
456
+ if not messages and not messages_file:
457
+ click.echo("Must specify either messages or messages-file")
458
+ ctx.exit(1)
459
+ return
460
+ if messages and messages_file:
461
+ click.echo("Cannot have both messages and messages_file")
462
+ ctx.exit(1)
463
+ return
464
+
465
+ if messages:
466
+ try:
467
+ messages = json.loads(messages)
468
+ except Exception as e:
469
+ click.echo(f"Failed to parse messages: {e}")
470
+ ctx.exit(1)
471
+ else:
472
+ with messages_file.open('r') as file:
473
+ messages = json.load(file)
474
+
475
+ # Parse tools if provided
476
+ if tools is not None and tools != "[]" and tools_file:
477
+ click.echo("Cannot have both tools and tools_file")
478
+ ctx.exit(1)
479
+ return
480
+
481
+ parsed_tools=[]
482
+ if tools:
483
+ try:
484
+ parsed_tools = json.loads(tools)
485
+ if not isinstance(parsed_tools, list):
486
+ click.echo("Tools must be a JSON array")
487
+ ctx.exit(1)
488
+ return
489
+ except json.JSONDecodeError as e:
490
+ click.echo(f"Failed to parse tools JSON: {e}")
491
+ ctx.exit(1)
492
+ return
493
+
494
+ if tools_file:
495
+ try:
496
+ with tools_file.open('r') as file:
497
+ parsed_tools = json.load(file)
498
+ if not isinstance(parsed_tools, list):
499
+ click.echo("Tools must be a JSON array")
500
+ ctx.exit(1)
501
+ return
502
+ except Exception as e:
503
+ click.echo("Failed to load JSON from tools_file: %s" % e)
504
+ ctx.exit(1)
505
+ return
506
+
507
+ if not tools and not tools_file:
508
+ parsed_tools = None
509
+
510
+ tx_hash, finish_reason, llm_chat_output = client.llm_chat(
511
+ model_cid=model_cid,
512
+ messages=messages,
513
+ max_tokens=max_tokens,
514
+ stop_sequence=list(stop_sequence),
515
+ temperature=temperature,
516
+ tools=parsed_tools,
517
+ tool_choice=tool_choice,
518
+ )
519
+
520
+ # TODO (Kyle): Make this prettier
521
+ print("TX Hash: ", tx_hash)
522
+ print("Finish reason: ", finish_reason)
523
+ print("Chat output: ", llm_chat_output)
524
+ except Exception as e:
525
+ click.echo(f"Error running LLM chat inference: {str(e)}")
526
+
527
+ def print_llm_chat_result():
528
+ pass
529
+
389
530
  @cli.command()
390
531
  def create_account():
391
532
  """Create a new test account for OpenGradient inference and model management"""
opengradient/client.py CHANGED
@@ -553,7 +553,7 @@ class Client:
553
553
  tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
554
554
 
555
555
  Returns:
556
- Tuple[str, str]: The transaction hash and the LLM chat output.
556
+ Tuple[str, str, dict]: The transaction hash, finish reason, and a dictionary struct of LLM chat messages.
557
557
 
558
558
  Raises:
559
559
  OpenGradientError: If the inference fails.
@@ -570,6 +570,10 @@ class Client:
570
570
  for message in messages:
571
571
  if 'tool_calls' not in message:
572
572
  message['tool_calls'] = []
573
+ if 'tool_call_id' not in message:
574
+ message['tool_call_id'] = ""
575
+ if 'name' not in message:
576
+ message['name'] = ""
573
577
 
574
578
  # Create simplified tool structure for smart contract
575
579
  #
@@ -638,9 +642,17 @@ class Client:
638
642
 
639
643
  if len(parsed_logs) < 1:
640
644
  raise OpenGradientError("LLM chat result event not found in transaction logs")
641
- llm_result = parsed_logs[0]
645
+ llm_result = parsed_logs[0]['args']['response']
646
+
647
+ # Turn tool calls into normal dicts
648
+ message = dict(llm_result['message'])
649
+ if (tool_calls := message.get('tool_calls')) != None:
650
+ new_tool_calls = []
651
+ for tool_call in tool_calls:
652
+ new_tool_calls.append(dict(tool_call))
653
+ message['tool_calls'] = new_tool_calls
642
654
 
643
- return tx_hash.hex(), llm_result
655
+ return (tx_hash.hex(), llm_result['finish_reason'], message)
644
656
 
645
657
  except ContractLogicError as e:
646
658
  logging.error(f"Contract logic error: {str(e)}", exc_info=True)
opengradient/defaults.py CHANGED
@@ -3,5 +3,5 @@
3
3
  DEFAULT_RPC_URL="http://18.218.115.248:8545"
4
4
  DEFAULT_OG_FAUCET_URL="http://18.218.115.248:8080/?address="
5
5
  DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
6
- DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x24Ec56879245C707220Af7234d2fF3F22cA9Aa63"
6
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS="0xF78F7d5a7e9f484f0924Cc21347029715bD3B8f4"
7
7
  DEFAULT_BLOCKCHAIN_EXPLORER="http://3.145.62.2/tx/"
opengradient/types.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from dataclasses import dataclass
2
2
  from typing import List, Tuple, Union
3
-
3
+ from enum import Enum
4
4
 
5
5
  @dataclass
6
6
  class Number:
@@ -74,7 +74,8 @@ class Abi:
74
74
  result.append(f"{item['name']}:{item['type']}")
75
75
  return result
76
76
 
77
- class LLM:
77
+ class LLM(str, Enum):
78
78
  META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
79
79
  LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
80
- MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
80
+ MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
81
+ HERMES_3_LLAMA_3_1_70B = "NousResearch/Hermes-3-Llama-3.1-70B"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.7
3
+ Version: 0.3.9
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -0,0 +1,15 @@
1
+ opengradient/__init__.py,sha256=eExTI4XJMp1rPcUy5jH2xi_P4qKph0P98Qf4xJ3yQ4I,3110
2
+ opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
3
+ opengradient/cli.py,sha256=0SRt9iQcCHxR1QmsF54-KhdpqHx9_va0UyckoPQcYwg,22937
4
+ opengradient/client.py,sha256=yaexPTr-CSPQXR5AOrhHDmygKCq37Q_FIVy8Mtq6THk,31253
5
+ opengradient/defaults.py,sha256=jweJ6QyzNY0oO22OUA8B-uJPA90cyedhMT9J38MmGjw,319
6
+ opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
+ opengradient/types.py,sha256=kRCkxJ2xD6Y5eLmrrS61t66qm7jzNF0Qbnntl1_FMKk,2143
8
+ opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
9
+ opengradient/abi/inference.abi,sha256=VMxv4pli9ESYL2hCpbU41Z_WweCBy_3EcTYkCWCb-rU,6623
10
+ opengradient-0.3.9.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
11
+ opengradient-0.3.9.dist-info/METADATA,sha256=ZAayPv7Ugn-NZqpjDEXTm56djR8iePhbHWr5Ajx-Ma8,7608
12
+ opengradient-0.3.9.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
13
+ opengradient-0.3.9.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
14
+ opengradient-0.3.9.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
15
+ opengradient-0.3.9.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,15 +0,0 @@
1
- opengradient/__init__.py,sha256=3Td4Ne2WDJ7yKCIOwmKA3fokftE8uUxhNdC7Uezbacc,3110
2
- opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
3
- opengradient/cli.py,sha256=fqM6OrUoblEJeJgqfyneUVV-DSH4Cip8AhzpadZRG_k,17904
4
- opengradient/client.py,sha256=ptEgua0NaKbR1SoRMxqE1_0q2SMnHM-hpYHEHxYzBmc,30620
5
- opengradient/defaults.py,sha256=YI84_wWTvWxPMQIuKiSair2wffATnhitE3Ll2P1jHMU,319
6
- opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
- opengradient/types.py,sha256=jKZDo2R39Z99x5POMgQgkb6neSUvGUdBZjgqS_tBQuA,2044
8
- opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
9
- opengradient/abi/inference.abi,sha256=d3UDkyVTnA4coExFdFI0NtEudTRX2wVsbuQ84uO_2so,6262
10
- opengradient-0.3.7.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
11
- opengradient-0.3.7.dist-info/METADATA,sha256=Q4uavsx1jnlZPmgxWf4ypABXblhzgUZ2_q04EvvDTVI,7608
12
- opengradient-0.3.7.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
13
- opengradient-0.3.7.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
14
- opengradient-0.3.7.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
15
- opengradient-0.3.7.dist-info/RECORD,,