opengradient 0.3.4__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -1,8 +1,10 @@
1
+ from typing import Dict, List, Optional, Tuple
2
+
1
3
  from .client import Client
2
- from .defaults import *
3
- from .types import InferenceMode
4
- from typing import List, Dict, Optional, Tuple
5
- __version__ = "0.3.4"
4
+ from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
5
+ from .types import InferenceMode, LLM
6
+
7
+ __version__ = "0.3.5"
6
8
 
7
9
  _client = None
8
10
 
@@ -42,14 +44,25 @@ def infer(model_cid, inference_mode, model_input):
42
44
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
43
45
  return _client.infer(model_cid, inference_mode, model_input)
44
46
 
45
- def infer_llm(model_cid: str,
46
- prompt: str,
47
- max_tokens: int = 100,
48
- stop_sequence: Optional[List[str]] = None,
49
- temperature: float = 0.0) -> Tuple[str, str]:
47
+ def llm_completion(model_cid: LLM,
48
+ prompt: str,
49
+ max_tokens: int = 100,
50
+ stop_sequence: Optional[List[str]] = None,
51
+ temperature: float = 0.0) -> Tuple[str, str]:
52
+ if _client is None:
53
+ raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
54
+ return _client.llm_completion(model_cid, prompt, max_tokens, stop_sequence, temperature)
55
+
56
+ def llm_chat(model_cid: LLM,
57
+ messages: List[Dict],
58
+ max_tokens: int = 100,
59
+ stop_sequence: Optional[List[str]] = None,
60
+ temperature: float = 0.0,
61
+ tools: Optional[List[Dict]] = None,
62
+ tool_choice: Optional[str] = None):
50
63
  if _client is None:
51
64
  raise RuntimeError("OpenGradient client not initialized. Call og.init() first.")
52
- return _client.infer_llm(model_cid, prompt, max_tokens, stop_sequence, temperature)
65
+ return _client.llm_chat(model_cid, messages, max_tokens, stop_sequence, temperature, tools, tool_choice)
53
66
 
54
67
  def login(email: str, password: str):
55
68
  if _client is None:
@@ -1 +1 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
1
+ [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"indexed":false,"internalType":"struct LLMChatResponse","name":"response","type":"tuple"}],"name":"LLMChatResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LLMCompletionResponse","name":"response","type":"tuple"}],"name":"LLMCompletionResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage[]","name":"messages","type":"tuple[]"},{"components":[{"internalType":"string","name":"description","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"parameters","type":"string"}],"internalType":"struct ToolDefinition[]","name":"tools","type":"tuple[]"},{"internalType":"string","name":"tool_choice","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMChatRequest","name":"request","type":"tuple"}],"name":"runLLMChat","outputs":[{"components":[{"internalType":"string","name":"finish_reason","type":"string"},{"components":[{"internalType":"string","name":"role","type":"string"},{"internalType":"string","name":"content","type":"string"},{"components":[{"internalType":"string","name":"id","type":"string"},{"internalType":"string","name":"name","type":"string"},{"internalType":"string","name":"arguments","type":"string"}],"internalType":"struct ToolCall[]","name":"tool_calls","type":"tuple[]"}],"internalType":"struct ChatMessage","name":"message","type":"tuple"}],"internalType":"struct LLMChatResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LLMInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LLMCompletionRequest","name":"request","type":"tuple"}],"name":"runLLMCompletion","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LLMCompletionResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
opengradient/account.py CHANGED
@@ -1,8 +1,9 @@
1
- from eth_account import Account
1
+ import hashlib
2
+ import os
2
3
  import secrets
3
4
  from collections import namedtuple
4
- import os
5
- import hashlib
5
+
6
+ from eth_account import Account
6
7
 
7
8
  EthAccount = namedtuple('EthAccount', ['address', 'private_key'])
8
9
 
opengradient/cli.py CHANGED
@@ -1,18 +1,24 @@
1
- import click
2
- import opengradient
3
- import json
4
1
  import ast
5
- from pathlib import Path
2
+ import json
6
3
  import logging
7
- from pprint import pformat
8
- from typing import List
9
4
  import webbrowser
10
- import sys
5
+ from pathlib import Path
6
+ from typing import List
7
+
8
+ import click
9
+
10
+ import opengradient
11
11
 
12
+ from .account import EthAccount, generate_eth_account
12
13
  from .client import Client
13
- from .defaults import *
14
+ from .defaults import (
15
+ DEFAULT_BLOCKCHAIN_EXPLORER,
16
+ DEFAULT_HUB_SIGNUP_URL,
17
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS,
18
+ DEFAULT_OG_FAUCET_URL,
19
+ DEFAULT_RPC_URL,
20
+ )
14
21
  from .types import InferenceMode
15
- from .account import EthAccount, generate_eth_account
16
22
 
17
23
  OG_CONFIG_FILE = Path.home() / '.opengradient_config.json'
18
24
 
@@ -50,13 +56,19 @@ class DictParamType(click.ParamType):
50
56
 
51
57
  Dict = DictParamType()
52
58
 
53
- # Support inference modes
59
+ # Supported inference modes
54
60
  InferenceModes = {
55
61
  "VANILLA": InferenceMode.VANILLA,
56
62
  "ZKML": InferenceMode.ZKML,
57
63
  "TEE": InferenceMode.TEE,
58
64
  }
59
65
 
66
+ # Supported LLMs
67
+ LlmModels = {
68
+ "meta-llama/Meta-Llama-3-8B-Instruct",
69
+ "meta-llama/Llama-3.2-3B-Instruct",
70
+ "mistralai/Mistral-7B-Instruct-v0.3"
71
+ }
60
72
 
61
73
  def initialize_config(ctx):
62
74
  """Interactively initialize OpenGradient config"""
@@ -118,7 +130,7 @@ def cli(ctx):
118
130
  try:
119
131
  ctx.obj['client'] = Client(private_key=ctx.obj['private_key'],
120
132
  rpc_url=ctx.obj['rpc_url'],
121
- contract_address=ctx.obj['contract_address'],
133
+ contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
122
134
  email=ctx.obj.get('email'),
123
135
  password=ctx.obj.get('password'))
124
136
  except Exception as e:
@@ -301,12 +313,23 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
301
313
  with input_file.open('r') as file:
302
314
  model_input = json.load(file)
303
315
 
304
- click.echo(f"Running {inference_mode} inference for model \"{model_cid}\"\n")
316
+ click.echo(f"Running {inference_mode} inference for model \"{model_cid}\"")
305
317
  tx_hash, model_output = client.infer(model_cid=model_cid, inference_mode=InferenceModes[inference_mode], model_input=model_input)
306
318
 
307
- click.secho("Success!", fg="green")
308
- click.echo(f"Transaction hash: {tx_hash}")
309
- click.echo(f"Inference result:\n{pformat(model_output, indent=2, width=120)}")
319
+ click.echo() # Add a newline for better spacing
320
+ click.secho("Transaction successful", fg="green", bold=True)
321
+ click.echo("──────────────────────────────────────")
322
+ click.echo("Transaction hash: ", nl=False)
323
+ click.secho(tx_hash, fg="cyan", bold=True)
324
+
325
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
326
+ click.echo("Block explorer link: ", nl=False)
327
+ click.secho(block_explorer_link, fg="blue", underline=True)
328
+ click.echo()
329
+
330
+ click.secho("Inference result:", fg="green")
331
+ formatted_output = json.dumps(model_output, indent=2, default=lambda x: x.tolist() if hasattr(x, 'tolist') else str(x))
332
+ click.echo(formatted_output)
310
333
  except json.JSONDecodeError as e:
311
334
  click.echo(f"Error decoding JSON: {e}", err=True)
312
335
  click.echo(f"Error occurred on line {e.lineno}, column {e.colno}", err=True)
@@ -314,7 +337,7 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
314
337
  click.echo(f"Error running inference: {str(e)}")
315
338
 
316
339
  @cli.command()
317
- @click.option('--model', '-m', 'model_cid', required=True, help='CID of the LLM model to run inference on')
340
+ @click.option('--model', '-m', 'model_cid', type=click.Choice(LlmModels), required=True, help='CID of the LLM model to run inference on')
318
341
  @click.option('--prompt', '-p', required=True, help='Input prompt for the LLM')
319
342
  @click.option('--max-tokens', type=int, default=100, help='Maximum number of tokens for LLM output')
320
343
  @click.option('--stop-sequence', multiple=True, help='Stop sequences for LLM')
@@ -329,8 +352,8 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
329
352
  Example usage:
330
353
 
331
354
  \b
332
- opengradient llm --model Qm... --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
333
- opengradient llm -m Qm... -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
355
+ opengradient llm --model meta-llama/Meta-Llama-3-8B-Instruct --prompt "Hello, how are you?" --max-tokens 50 --temperature 0.7
356
+ opengradient llm -m meta-llama/Meta-Llama-3-8B-Instruct -p "Translate to French: Hello world" --stop-sequence "." --stop-sequence "\n"
334
357
  """
335
358
  client: Client = ctx.obj['client']
336
359
  try:
@@ -343,12 +366,26 @@ def llm(ctx, model_cid: str, prompt: str, max_tokens: int, stop_sequence: List[s
343
366
  temperature=temperature
344
367
  )
345
368
 
346
- click.secho("Success!", fg="green")
347
- click.echo(f"Transaction hash: {tx_hash}")
348
- click.echo(f"LLM output:\n{llm_output}")
369
+ print_llm_inference_result(model_cid, tx_hash, llm_output)
349
370
  except Exception as e:
350
371
  click.echo(f"Error running LLM inference: {str(e)}")
351
372
 
373
+ def print_llm_inference_result(model_cid, tx_hash, llm_output):
374
+ click.secho("✅ LLM Inference Successful", fg="green", bold=True)
375
+ click.echo("──────────────────────────────────────")
376
+ click.echo("Model CID: ", nl=False)
377
+ click.secho(model_cid, fg="cyan", bold=True)
378
+ click.echo("Transaction hash: ", nl=False)
379
+ click.secho(tx_hash, fg="cyan", bold=True)
380
+ block_explorer_link = f"{DEFAULT_BLOCKCHAIN_EXPLORER}0x{tx_hash}"
381
+ click.echo("Block explorer link: ", nl=False)
382
+ click.secho(block_explorer_link, fg="blue", underline=True)
383
+ click.echo("──────────────────────────────────────")
384
+ click.secho("LLM Output:", fg="yellow", bold=True)
385
+ click.echo()
386
+ click.echo(llm_output)
387
+ click.echo()
388
+
352
389
  @cli.command()
353
390
  def create_account():
354
391
  """Create a new test account for OpenGradient inference and model management"""
@@ -364,7 +401,7 @@ def create_account_impl() -> EthAccount:
364
401
  click.echo("Step 1: Create Account on OpenGradient Hub")
365
402
  click.echo("-" * 50)
366
403
 
367
- click.echo(f"Please create an account on the OpenGradient Hub")
404
+ click.echo("Please create an account on the OpenGradient Hub")
368
405
  webbrowser.open(DEFAULT_HUB_SIGNUP_URL, new=2)
369
406
  click.confirm("Have you successfully created your account on the OpenGradient Hub?", abort=True)
370
407
 
@@ -377,7 +414,7 @@ def create_account_impl() -> EthAccount:
377
414
  click.echo("\n" + "-" * 50)
378
415
  click.echo("Step 3: Fund Your Account")
379
416
  click.echo("-" * 50)
380
- click.echo(f"Please fund your account clicking 'Request' on the Faucet website")
417
+ click.echo("Please fund your account clicking 'Request' on the Faucet website")
381
418
  webbrowser.open(DEFAULT_OG_FAUCET_URL + eth_account.address, new=2)
382
419
  click.confirm("Have you successfully funded your account using the Faucet?", abort=True)
383
420
 
opengradient/client.py CHANGED
@@ -1,15 +1,19 @@
1
- import requests
2
- import os
3
1
  import json
4
- from web3 import Web3
5
- from opengradient.exceptions import OpenGradientError
6
- from opengradient.types import InferenceMode
7
- from opengradient import utils
8
- import numpy as np
9
2
  import logging
10
- from typing import Dict, Optional, Tuple, Union, List
11
- from web3.exceptions import ContractLogicError
3
+ import os
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+
12
6
  import firebase
7
+ import numpy as np
8
+ import requests
9
+ from web3 import Web3
10
+ from web3.exceptions import ContractLogicError
11
+ from web3.logs import DISCARD
12
+
13
+ from opengradient import utils
14
+ from opengradient.exceptions import OpenGradientError
15
+ from opengradient.types import InferenceMode, LLM
16
+
13
17
 
14
18
  class Client:
15
19
  FIREBASE_CONFIG = {
@@ -100,7 +104,7 @@ class Client:
100
104
  if not self.user:
101
105
  raise ValueError("User not authenticated")
102
106
 
103
- url = f"https://api.opengradient.ai/api/v0/models/"
107
+ url = "https://api.opengradient.ai/api/v0/models/"
104
108
  headers = {
105
109
  'Authorization': f'Bearer {self.user["idToken"]}',
106
110
  'Content-Type': 'application/json'
@@ -186,15 +190,15 @@ class Client:
186
190
  logging.debug(f"Full server response: {json_response}")
187
191
 
188
192
  if isinstance(json_response, list) and not json_response:
189
- logging.info(f"Server returned an empty list. Assuming version was created successfully.")
193
+ logging.info("Server returned an empty list. Assuming version was created successfully.")
190
194
  return {"versionString": "Unknown", "note": "Created based on empty response"}
191
195
  elif isinstance(json_response, dict):
192
- versionString = json_response.get('versionString')
193
- if not versionString:
196
+ version_string = json_response.get('versionString')
197
+ if not version_string:
194
198
  logging.warning(f"'versionString' not found in response. Response: {json_response}")
195
199
  return {"versionString": "Unknown", "note": "Version ID not provided in response"}
196
- logging.info(f"Version creation successful. Version ID: {versionString}")
197
- return {"versionString": versionString}
200
+ logging.info(f"Version creation successful. Version ID: {version_string}")
201
+ return {"versionString": version_string}
198
202
  else:
199
203
  logging.error(f"Unexpected response type: {type(json_response)}. Content: {json_response}")
200
204
  raise Exception(f"Unexpected response type: {type(json_response)}")
@@ -295,7 +299,12 @@ class Client:
295
299
  logging.error(f"Unexpected error during upload: {str(e)}", exc_info=True)
296
300
  raise OpenGradientError(f"Unexpected error during upload: {str(e)}")
297
301
 
298
- def infer(self, model_cid: str, inference_mode: InferenceMode, model_input: Dict[str, Union[str, int, float, List, np.ndarray]]) -> Tuple[str, Dict[str, np.ndarray]]:
302
+ def infer(
303
+ self,
304
+ model_cid: str,
305
+ inference_mode: InferenceMode,
306
+ model_input: Dict[str, Union[str, int, float, List, np.ndarray]]
307
+ ) -> Tuple[str, Dict[str, np.ndarray]]:
299
308
  """
300
309
  Perform inference on a model.
301
310
 
@@ -376,19 +385,11 @@ class Client:
376
385
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
377
386
 
378
387
  # Process the InferenceResult event
379
- inference_result = None
380
- for log in tx_receipt['logs']:
381
- try:
382
- decoded_log = contract.events.InferenceResult().process_log(log)
383
- inference_result = decoded_log
384
- break
385
- except:
386
- continue
387
-
388
- if inference_result is None:
389
- logging.error("InferenceResult event not found in transaction logs")
390
- logging.debug(f"Transaction receipt logs: {tx_receipt['logs']}")
388
+ parsed_logs = contract.events.InferenceResult().process_receipt(tx_receipt, errors=DISCARD)
389
+
390
+ if len(parsed_logs) < 1:
391
391
  raise OpenGradientError("InferenceResult event not found in transaction logs")
392
+ inference_result = parsed_logs[0]
392
393
 
393
394
  # Extract the ModelOutput from the event
394
395
  event_data = inference_result['args']
@@ -410,24 +411,24 @@ class Client:
410
411
  logging.error(f"Error in infer method: {str(e)}", exc_info=True)
411
412
  raise OpenGradientError(f"Inference failed: {str(e)}")
412
413
 
413
- def infer_llm(self,
414
- model_cid: str,
415
- prompt: str,
416
- max_tokens: int = 100,
417
- stop_sequence: Optional[List[str]] = None,
418
- temperature: float = 0.0) -> Tuple[str, str]:
414
+ def llm_completion(self,
415
+ model_cid: LLM,
416
+ prompt: str,
417
+ max_tokens: int = 100,
418
+ stop_sequence: Optional[List[str]] = None,
419
+ temperature: float = 0.0) -> Tuple[str, str]:
419
420
  """
420
421
  Perform inference on an LLM model using completions.
421
422
 
422
423
  Args:
423
- model_cid (str): The unique content identifier for the model.
424
+ model_cid (LLM): The unique content identifier for the model.
424
425
  prompt (str): The input prompt for the LLM.
425
426
  max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
426
427
  stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
427
428
  temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
428
429
 
429
430
  Returns:
430
- Tuple[str, str]: The transaction hash and the LLM output.
431
+ Tuple[str, str]: The transaction hash and the LLM completion output.
431
432
 
432
433
  Raises:
433
434
  OpenGradientError: If the inference fails.
@@ -435,7 +436,7 @@ class Client:
435
436
  try:
436
437
  self._initialize_web3()
437
438
 
438
- abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'llm.abi')
439
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
439
440
  with open(abi_path, 'r') as abi_file:
440
441
  llm_abi = json.load(abi_file)
441
442
  contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
@@ -452,7 +453,7 @@ class Client:
452
453
  logging.debug(f"Prepared LLM request: {llm_request}")
453
454
 
454
455
  # Prepare run function
455
- run_function = contract.functions.runLLM(llm_request)
456
+ run_function = contract.functions.runLLMCompletion(llm_request)
456
457
 
457
458
  # Build transaction
458
459
  nonce = self._w3.eth.get_transaction_count(self.wallet_address)
@@ -478,19 +479,166 @@ class Client:
478
479
  raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
479
480
 
480
481
  # Process the LLMResult event
481
- llm_result = None
482
- for log in tx_receipt['logs']:
483
- try:
484
- decoded_log = contract.events.LLMResult().process_log(log)
485
- llm_result = decoded_log['args']['response']['answer']
486
- break
487
- except:
488
- continue
482
+ parsed_logs = contract.events.LLMCompletionResult().process_receipt(tx_receipt, errors=DISCARD)
483
+
484
+ if len(parsed_logs) < 1:
485
+ raise OpenGradientError("LLM completion result event not found in transaction logs")
486
+ llm_result = parsed_logs[0]
489
487
 
490
- if llm_result is None:
491
- raise OpenGradientError("LLMResult event not found in transaction logs")
488
+ llm_answer = llm_result['args']['response']['answer']
489
+ return tx_hash.hex(), llm_answer
490
+
491
+ except ContractLogicError as e:
492
+ logging.error(f"Contract logic error: {str(e)}", exc_info=True)
493
+ raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
494
+ except Exception as e:
495
+ logging.error(f"Error in infer completion method: {str(e)}", exc_info=True)
496
+ raise OpenGradientError(f"LLM inference failed: {str(e)}")
497
+
498
+ def llm_chat(self,
499
+ model_cid: str,
500
+ messages: List[Dict],
501
+ max_tokens: int = 100,
502
+ stop_sequence: Optional[List[str]] = None,
503
+ temperature: float = 0.0,
504
+ tools: Optional[List[Dict]] = [],
505
+ tool_choice: Optional[str] = None) -> Tuple[str, str]:
506
+ """
507
+ Perform inference on an LLM model using chat.
508
+
509
+ Args:
510
+ model_cid (LLM): The unique content identifier for the model.
511
+ messages (dict): The messages that will be passed into the chat.
512
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create)
513
+ Example:
514
+ [
515
+ {
516
+ "role": "system",
517
+ "content": "You are a helpful assistant."
518
+ },
519
+ {
520
+ "role": "user",
521
+ "content": "Hello!"
522
+ }
523
+ ]
524
+ max_tokens (int): Maximum number of tokens for LLM output. Default is 100.
525
+ stop_sequence (List[str], optional): List of stop sequences for LLM. Default is None.
526
+ temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
527
+ tools (List[dict], optional): Set of tools
528
+ This should be in OpenAI API format (https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools)
529
+ Example:
530
+ [
531
+ {
532
+ "type": "function",
533
+ "function": {
534
+ "name": "get_current_weather",
535
+ "description": "Get the current weather in a given location",
536
+ "parameters": {
537
+ "type": "object",
538
+ "properties": {
539
+ "location": {
540
+ "type": "string",
541
+ "description": "The city and state, e.g. San Francisco, CA"
542
+ },
543
+ "unit": {
544
+ "type": "string",
545
+ "enum": ["celsius", "fahrenheit"]
546
+ }
547
+ },
548
+ "required": ["location"]
549
+ }
550
+ }
551
+ }
552
+ ]
553
+ tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
554
+
555
+ Returns:
556
+ Tuple[str, str]: The transaction hash and the LLM chat output.
557
+
558
+ Raises:
559
+ OpenGradientError: If the inference fails.
560
+ """
561
+ try:
562
+ self._initialize_web3()
563
+
564
+ abi_path = os.path.join(os.path.dirname(__file__), 'abi', 'inference.abi')
565
+ with open(abi_path, 'r') as abi_file:
566
+ llm_abi = json.load(abi_file)
567
+ contract = self._w3.eth.contract(address=self.contract_address, abi=llm_abi)
568
+
569
+ # For incoming chat messages, tool_calls can be empty. Add an empty array so that it will fit the ABI.
570
+ for message in messages:
571
+ if 'tool_calls' not in message:
572
+ message['tool_calls'] = []
573
+
574
+ # Create simplified tool structure for smart contract
575
+ #
576
+ # struct ToolDefinition {
577
+ # string description;
578
+ # string name;
579
+ # string parameters; // This must be a JSON
580
+ # }
581
+ converted_tools = []
582
+ if tools is not None:
583
+ for tool in tools:
584
+ function = tool['function']
585
+
586
+ converted_tool = {}
587
+ converted_tool['name'] = function['name']
588
+ converted_tool['description'] = function['description']
589
+ if (parameters := function.get('parameters')) is not None:
590
+ try:
591
+ converted_tool['parameters'] = json.dumps(parameters)
592
+ except Exception as e:
593
+ raise OpenGradientError("Chat LLM failed to convert parameters into JSON: %s", e)
594
+
595
+ converted_tools.append(converted_tool)
596
+
597
+ # Prepare LLM input
598
+ llm_request = {
599
+ "mode": InferenceMode.VANILLA,
600
+ "modelCID": model_cid,
601
+ "messages": messages,
602
+ "max_tokens": max_tokens,
603
+ "stop_sequence": stop_sequence or [],
604
+ "temperature": int(temperature * 100), # Scale to 0-100 range
605
+ "tools": converted_tools or [],
606
+ "tool_choice": tool_choice if tool_choice else ("" if tools is None else "auto")
607
+ }
608
+ logging.debug(f"Prepared LLM request: {llm_request}")
609
+
610
+ # Prepare run function
611
+ run_function = contract.functions.runLLMChat(llm_request)
612
+
613
+ # Build transaction
614
+ nonce = self._w3.eth.get_transaction_count(self.wallet_address)
615
+ estimated_gas = run_function.estimate_gas({'from': self.wallet_address})
616
+ gas_limit = int(estimated_gas * 1.2)
617
+
618
+ transaction = run_function.build_transaction({
619
+ 'from': self.wallet_address,
620
+ 'nonce': nonce,
621
+ 'gas': gas_limit,
622
+ 'gasPrice': self._w3.eth.gas_price,
623
+ })
624
+
625
+ # Sign and send transaction
626
+ signed_tx = self._w3.eth.account.sign_transaction(transaction, self.private_key)
627
+ tx_hash = self._w3.eth.send_raw_transaction(signed_tx.raw_transaction)
628
+ logging.debug(f"Transaction sent. Hash: {tx_hash.hex()}")
629
+
630
+ # Wait for transaction receipt
631
+ tx_receipt = self._w3.eth.wait_for_transaction_receipt(tx_hash)
632
+
633
+ if tx_receipt['status'] == 0:
634
+ raise ContractLogicError(f"Transaction failed. Receipt: {tx_receipt}")
635
+
636
+ # Process the LLMResult event
637
+ parsed_logs = contract.events.LLMChatResult().process_receipt(tx_receipt, errors=DISCARD)
492
638
 
493
- logging.debug(f"LLM output: {llm_result}")
639
+ if len(parsed_logs) < 1:
640
+ raise OpenGradientError("LLM chat result event not found in transaction logs")
641
+ llm_result = parsed_logs[0]
494
642
 
495
643
  return tx_hash.hex(), llm_result
496
644
 
@@ -498,7 +646,7 @@ class Client:
498
646
  logging.error(f"Contract logic error: {str(e)}", exc_info=True)
499
647
  raise OpenGradientError(f"LLM inference failed due to contract logic error: {str(e)}")
500
648
  except Exception as e:
501
- logging.error(f"Error in infer_llm method: {str(e)}", exc_info=True)
649
+ logging.error(f"Error in infer chat method: {str(e)}", exc_info=True)
502
650
  raise OpenGradientError(f"LLM inference failed: {str(e)}")
503
651
 
504
652
 
opengradient/defaults.py CHANGED
@@ -3,4 +3,5 @@
3
3
  DEFAULT_RPC_URL="http://18.218.115.248:8545"
4
4
  DEFAULT_OG_FAUCET_URL="http://18.218.115.248:8080/?address="
5
5
  DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
6
- DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x350E0A430b2B1563481833a99523Cfd17a530e4e"
6
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x24Ec56879245C707220Af7234d2fF3F22cA9Aa63"
7
+ DEFAULT_BLOCKCHAIN_EXPLORER="http://3.145.62.2/tx/"
opengradient/types.py CHANGED
@@ -1,5 +1,6 @@
1
- from typing import List, Tuple, Union
2
1
  from dataclasses import dataclass
2
+ from typing import List, Tuple, Union
3
+
3
4
 
4
5
  @dataclass
5
6
  class Number:
@@ -9,7 +10,7 @@ class Number:
9
10
  @dataclass
10
11
  class NumberTensor:
11
12
  name: str
12
- values: List[Tuple[int, int]] # (int128, int128)[]
13
+ values: List[Tuple[int, int]]
13
14
 
14
15
  @dataclass
15
16
  class StringTensor:
@@ -37,7 +38,7 @@ class AbiFunction:
37
38
  name: str
38
39
  inputs: List[Union[str, 'AbiFunction']]
39
40
  outputs: List[Union[str, 'AbiFunction']]
40
- stateMutability: str
41
+ state_mutability: str
41
42
 
42
43
  @dataclass
43
44
  class Abi:
@@ -54,7 +55,7 @@ class Abi:
54
55
  name=item['name'],
55
56
  inputs=inputs,
56
57
  outputs=outputs,
57
- stateMutability=item['stateMutability']
58
+ state_mutability=item['stateMutability']
58
59
  ))
59
60
  return cls(functions=functions)
60
61
 
@@ -67,8 +68,13 @@ class Abi:
67
68
  name=item['name'],
68
69
  inputs=Abi._parse_inputs_outputs(item['components']),
69
70
  outputs=[],
70
- stateMutability=''
71
+ state_mutability=''
71
72
  ))
72
73
  else:
73
74
  result.append(f"{item['name']}:{item['type']}")
74
- return result
75
+ return result
76
+
77
+ class LLM:
78
+ META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
79
+ LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
80
+ MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
opengradient/utils.py CHANGED
@@ -1,9 +1,11 @@
1
- import numpy as np
2
1
  import logging
3
2
  from decimal import Decimal
4
3
  from typing import Dict, List, Tuple
4
+
5
+ import numpy as np
5
6
  from web3.datastructures import AttributeDict
6
7
 
8
+
7
9
  def convert_to_fixed_point(number: float) -> Tuple[int, int]:
8
10
  """
9
11
  Converts input number to the Number tensor used by the sequencer.
@@ -36,14 +38,16 @@ def convert_to_float32(value: int, decimals: int) -> np.float32:
36
38
  """
37
39
  return np.float32(Decimal(value) / (10 ** Decimal(decimals)))
38
40
 
39
- def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
41
+ def convert_to_model_input(
42
+ inputs: Dict[str, np.ndarray]
43
+ ) -> Tuple[List[Tuple[str, List[Tuple[int, int]]]], List[Tuple[str, List[str]]]]:
40
44
  """
41
45
  Expect SDK input to be a dict with the format
42
46
  key: tensor name
43
47
  value: np.array / list
44
48
 
45
- Return a tuple of (number tensors, string tensors) depending on the input type. Each number and string tensor converted
46
- to a numpy array and flattened and the shape saved.
49
+ Return a tuple of (number tensors, string tensors) depending on the input type.
50
+ Each number and string tensor converted to a numpy array and flattened and the shape saved.
47
51
  """
48
52
  logging.debug("Converting the following input dictionary to ModelInput: %s", inputs)
49
53
  number_tensors = []
@@ -60,7 +64,9 @@ def convert_to_model_input(inputs: Dict[str, np.ndarray]) -> Tuple[List[Tuple[st
60
64
 
61
65
  # Check if type is np array
62
66
  if not isinstance(tensor_data, np.ndarray):
63
- raise TypeError("Inference input must be list, numpy array, or type (str, int, float): %s" % type(tensor_data))
67
+ raise TypeError(
68
+ "Inference input must be list, numpy array, or type (str, int, float): %s"
69
+ % type(tensor_data))
64
70
 
65
71
  # Flatten list and retain shape
66
72
  shape = tensor_data.shape
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opengradient
3
- Version: 0.3.4
3
+ Version: 0.3.7
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -29,14 +29,10 @@ Project-URL: Homepage, https://opengradient.ai
29
29
  Classifier: Development Status :: 3 - Alpha
30
30
  Classifier: Intended Audience :: Developers
31
31
  Classifier: License :: OSI Approved :: MIT License
32
- Classifier: Programming Language :: Python :: 3
33
- Classifier: Programming Language :: Python :: 3.7
34
- Classifier: Programming Language :: Python :: 3.8
35
- Classifier: Programming Language :: Python :: 3.9
36
32
  Classifier: Programming Language :: Python :: 3.10
37
33
  Classifier: Programming Language :: Python :: 3.11
38
34
  Classifier: Programming Language :: Python :: 3.12
39
- Requires-Python: >=3.7
35
+ Requires-Python: >=3.10
40
36
  Description-Content-Type: text/markdown
41
37
  License-File: LICENSE
42
38
  Requires-Dist: aiohappyeyeballs==2.4.3
@@ -0,0 +1,15 @@
1
+ opengradient/__init__.py,sha256=3Td4Ne2WDJ7yKCIOwmKA3fokftE8uUxhNdC7Uezbacc,3110
2
+ opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
3
+ opengradient/cli.py,sha256=fqM6OrUoblEJeJgqfyneUVV-DSH4Cip8AhzpadZRG_k,17904
4
+ opengradient/client.py,sha256=ptEgua0NaKbR1SoRMxqE1_0q2SMnHM-hpYHEHxYzBmc,30620
5
+ opengradient/defaults.py,sha256=YI84_wWTvWxPMQIuKiSair2wffATnhitE3Ll2P1jHMU,319
6
+ opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
+ opengradient/types.py,sha256=jKZDo2R39Z99x5POMgQgkb6neSUvGUdBZjgqS_tBQuA,2044
8
+ opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
9
+ opengradient/abi/inference.abi,sha256=d3UDkyVTnA4coExFdFI0NtEudTRX2wVsbuQ84uO_2so,6262
10
+ opengradient-0.3.7.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
11
+ opengradient-0.3.7.dist-info/METADATA,sha256=Q4uavsx1jnlZPmgxWf4ypABXblhzgUZ2_q04EvvDTVI,7608
12
+ opengradient-0.3.7.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
13
+ opengradient-0.3.7.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
14
+ opengradient-0.3.7.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
15
+ opengradient-0.3.7.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.1.0)
2
+ Generator: setuptools (75.3.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
opengradient/abi/llm.abi DELETED
@@ -1 +0,0 @@
1
- [{"anonymous":false,"inputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"indexed":false,"internalType":"struct ModelOutput","name":"output","type":"tuple"}],"name":"InferenceResult","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"indexed":false,"internalType":"struct LlmResponse","name":"response","type":"tuple"}],"name":"LLMResult","type":"event"},{"inputs":[{"internalType":"string","name":"modelId","type":"string"},{"internalType":"enum ModelInferenceMode","name":"inferenceMode","type":"uint8"},{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"}],"internalType":"struct ModelInput","name":"modelInput","type":"tuple"}],"name":"run","outputs":[{"components":[{"components":[{"internalType":"string","name":"name","type":"string"},{"components":[{"internalType":"int128","name":"value","type":"int128"},{"internalType":"int128","name":"decimals","type":"int128"}],"internalType":"struct TensorLib.Number[]","name":"values","type":"tuple[]"},{"internalType":"uint32[]","name":"shape","type":"uint32[]"}],"internalType":"struct TensorLib.MultiDimensionalNumberTensor[]","name":"numbers","type":"tuple[]"},{"components":[{"internalType":"string","name":"name","type":"string"},{"internalType":"string[]","name":"values","type":"string[]"}],"internalType":"struct TensorLib.StringTensor[]","name":"strings","type":"tuple[]"},{"internalType":"bool","name":"is_simulation_result","type":"bool"}],"internalType":"struct ModelOutput","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"components":[{"internalType":"enum LlmInferenceMode","name":"mode","type":"uint8"},{"internalType":"string","name":"modelCID","type":"string"},{"internalType":"string","name":"prompt","type":"string"},{"internalType":"uint32","name":"max_tokens","type":"uint32"},{"internalType":"string[]","name":"stop_sequence","type":"string[]"},{"internalType":"uint32","name":"temperature","type":"uint32"}],"internalType":"struct LlmInferenceRequest","name":"request","type":"tuple"}],"name":"runLLM","outputs":[{"components":[{"internalType":"string","name":"answer","type":"string"}],"internalType":"struct LlmResponse","name":"","type":"tuple"}],"stateMutability":"nonpayable","type":"function"}]
@@ -1,16 +0,0 @@
1
- opengradient/__init__.py,sha256=6V8HyFFkkqnGSwrbj36AuLijHs0pOxxnbBeHWy8LQ6w,2447
2
- opengradient/account.py,sha256=s1C4hAtc8vcHObWjwxwlYJA041S6DTbr7-rK6qiWPsQ,1149
3
- opengradient/cli.py,sha256=T59Z2S3AsMVS6TLgsSgxW9esssvYP5ZmVJrYE6p4oW4,16105
4
- opengradient/client.py,sha256=DCDp2EWPF62ZQnx2_cM0wPghRxgn213VnR65R8yZBVY,23964
5
- opengradient/defaults.py,sha256=pDfsmPoUzdLG55n-hwh0CMBFxKR2rdNcjqCcwTWc6iw,267
6
- opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
7
- opengradient/types.py,sha256=EoJN-DkQrJ2WTUv8OenlrlWJWFY2jPGTl-T8C_OVjp8,1849
8
- opengradient/utils.py,sha256=F1Nj-GMNFQFxCtbGgWQq1RP4TSurbpQxJV3yKeEo1b0,6482
9
- opengradient/abi/inference.abi,sha256=u8FsW0s1YeRjUb9eLS1k_qh_5f_cwOdr0bii-tAdxh0,2683
10
- opengradient/abi/llm.abi,sha256=zhiPFyBT09EI3QU5DVoKHo7e8T9PFcfIQ3RHDYetm4M,3609
11
- opengradient-0.3.4.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
12
- opengradient-0.3.4.dist-info/METADATA,sha256=7mWZNlCuVtuDsSK34t1zHQ4UYUL6nXTwzIhCfwZgDoE,7805
13
- opengradient-0.3.4.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
14
- opengradient-0.3.4.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
15
- opengradient-0.3.4.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
16
- opengradient-0.3.4.dist-info/RECORD,,