opengradient 0.4.3__py3-none-any.whl → 0.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
opengradient/__init__.py CHANGED
@@ -8,7 +8,7 @@ from .client import Client
8
8
  from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
9
9
  from .types import LLM, TEE_LLM, HistoricalInputQuery, InferenceMode, LlmInferenceMode, SchedulerParams
10
10
 
11
- from . import llm, mltools
11
+ from . import llm, alphasense
12
12
 
13
13
  _client = None
14
14
 
@@ -367,7 +367,7 @@ __all__ = [
367
367
  "read_workflow_result",
368
368
  "run_workflow",
369
369
  "llm",
370
- "mltools"
370
+ "alphasense",
371
371
  ]
372
372
 
373
373
  __pdoc__ = {
@@ -377,7 +377,7 @@ __pdoc__ = {
377
377
  "defaults": False,
378
378
  "exceptions": False,
379
379
  "llm": True,
380
- "mltools": True,
380
+ "alphasense": True,
381
381
  "proto": False,
382
382
  "types": False,
383
383
  "utils": False,
@@ -0,0 +1,11 @@
1
+ """
2
+ OpenGradient AlphaSense Tools
3
+ """
4
+
5
+ from .run_model_tool import *
6
+ from .read_workflow_tool import *
7
+ from .types import ToolType
8
+
9
+ __all__ = ["create_run_model_tool", "create_read_workflow_tool", "ToolType"]
10
+
11
+ __pdoc__ = {"run_model_tool": False, "read_workflow_tool": False, "types": False}
@@ -0,0 +1,77 @@
1
+ from typing import Callable
2
+
3
+ from langchain_core.tools import BaseTool, StructuredTool
4
+
5
+ import opengradient as og
6
+ from .types import ToolType
7
+
8
+
9
+ def create_read_workflow_tool(
10
+ tool_type: ToolType,
11
+ workflow_contract_address: str,
12
+ tool_name: str,
13
+ tool_description: str,
14
+ output_formatter: Callable[..., str] = lambda x: x,
15
+ ) -> BaseTool:
16
+ """
17
+ Creates a tool that reads results from a workflow contract on OpenGradient.
18
+
19
+ This function generates a tool that can be integrated into either a LangChain pipeline
20
+ or a Swarm system, allowing the workflow results to be retrieved and formatted as part
21
+ of a chain of operations.
22
+
23
+ Args:
24
+ tool_type (ToolType): Specifies the framework to create the tool for. Use
25
+ ToolType.LANGCHAIN for LangChain integration or ToolType.SWARM for Swarm
26
+ integration.
27
+ workflow_contract_address (str): The address of the workflow contract from which
28
+ to read results.
29
+ tool_name (str): The name to assign to the created tool. This will be used to
30
+ identify and invoke the tool within the agent.
31
+ tool_description (str): A description of what the tool does and how it processes
32
+ the workflow results.
33
+ output_formatter (Callable[..., str], optional): A function that takes the workflow output
34
+ and formats it into a string. This ensures the output is compatible with
35
+ the tool framework. Default returns string as is.
36
+
37
+ Returns:
38
+ BaseTool: For ToolType.LANGCHAIN, returns a LangChain StructuredTool.
39
+ Callable: For ToolType.SWARM, returns a decorated function with appropriate metadata.
40
+
41
+ Raises:
42
+ ValueError: If an invalid tool_type is provided.
43
+
44
+ Examples:
45
+ >>> def format_output(output):
46
+ ... return f"Workflow status: {output.get('status', 'Unknown')}"
47
+ >>> # Create a LangChain tool
48
+ >>> langchain_tool = create_read_workflow_tool(
49
+ ... tool_type=ToolType.LANGCHAIN,
50
+ ... workflow_contract_address="0x123...",
51
+ ... tool_name="workflow_reader",
52
+ ... output_formatter=format_output,
53
+ ... tool_description="Reads and formats workflow execution results"
54
+ ... )
55
+ >>> # Create a Swarm tool
56
+ >>> swarm_tool = create_read_workflow_tool(
57
+ ... tool_type=ToolType.SWARM,
58
+ ... workflow_contract_address="0x123...",
59
+ ... tool_name="workflow_reader",
60
+ ... output_formatter=format_output,
61
+ ... tool_description="Reads and formats workflow execution results"
62
+ ... )
63
+ """
64
+
65
+ # define runnable
66
+ def read_workflow():
67
+ output = og.read_workflow_result(contract_address=workflow_contract_address)
68
+ return output_formatter(output)
69
+
70
+ if tool_type == ToolType.LANGCHAIN:
71
+ return StructuredTool.from_function(func=read_workflow, name=tool_name, description=tool_description, args_schema=None)
72
+ elif tool_type == ToolType.SWARM:
73
+ read_workflow.__name__ = tool_name
74
+ read_workflow.__doc__ = tool_description
75
+ return read_workflow
76
+ else:
77
+ raise ValueError(f"Invalid tooltype: {tool_type}")
@@ -5,24 +5,15 @@ from langchain_core.tools import BaseTool, StructuredTool
5
5
  from pydantic import BaseModel
6
6
 
7
7
  import opengradient as og
8
+ from .types import ToolType
8
9
 
9
10
 
10
- class ToolType(str, Enum):
11
- """Indicates the framework the tool is compatible with."""
12
-
13
- LANGCHAIN = "langchain"
14
- SWARM = "swarm"
15
-
16
- def __str__(self) -> str:
17
- return self.value
18
-
19
-
20
- def create_og_model_tool(
11
+ def create_run_model_tool(
21
12
  tool_type: ToolType,
22
13
  model_cid: str,
23
14
  tool_name: str,
24
15
  input_getter: Callable,
25
- output_formatter: Callable[..., str],
16
+ output_formatter: Callable[..., str] = lambda x: x,
26
17
  input_schema: Type[BaseModel] = None,
27
18
  tool_description: str = "Executes the given ML model",
28
19
  inference_mode: og.InferenceMode = og.InferenceMode.VANILLA,
@@ -44,9 +35,9 @@ def create_og_model_tool(
44
35
  and invoke the tool within the agent.
45
36
  input_getter (Callable): A function that returns the input data required by the model.
46
37
  The function should return data in a format compatible with the model's expectations.
47
- output_formatter (Callable[..., str]): A function that takes the model output and
38
+ output_formatter (Callable[..., str], optional): A function that takes the model output and
48
39
  formats it into a string. This is required to ensure the output is compatible
49
- with the tool framework.
40
+ with the tool framework. Default returns string as is.
50
41
  input_schema (Type[BaseModel], optional): A Pydantic BaseModel class defining the
51
42
  input schema. This will be used directly for LangChain tools and converted
52
43
  to appropriate annotations for Swarm tools. Default is None.
@@ -0,0 +1,11 @@
1
+ from enum import Enum
2
+
3
+
4
+ class ToolType(str, Enum):
5
+ """Indicates the framework the tool is compatible with."""
6
+
7
+ LANGCHAIN = "langchain"
8
+ SWARM = "swarm"
9
+
10
+ def __str__(self) -> str:
11
+ return self.value
opengradient/client.py CHANGED
@@ -99,7 +99,7 @@ class Client:
99
99
  raise ValueError("User not authenticated")
100
100
 
101
101
  url = "https://api.opengradient.ai/api/v0/models/"
102
- headers = {"Authorization": f'Bearer {self._hub_user["idToken"]}', "Content-Type": "application/json"}
102
+ headers = {"Authorization": f"Bearer {self._hub_user['idToken']}", "Content-Type": "application/json"}
103
103
  payload = {"name": model_name, "description": model_desc}
104
104
 
105
105
  try:
@@ -156,7 +156,7 @@ class Client:
156
156
  raise ValueError("User not authenticated")
157
157
 
158
158
  url = f"https://api.opengradient.ai/api/v0/models/{model_name}/versions"
159
- headers = {"Authorization": f'Bearer {self._hub_user["idToken"]}', "Content-Type": "application/json"}
159
+ headers = {"Authorization": f"Bearer {self._hub_user['idToken']}", "Content-Type": "application/json"}
160
160
  payload = {"notes": notes, "is_major": is_major}
161
161
 
162
162
  try:
@@ -220,7 +220,7 @@ class Client:
220
220
  raise FileNotFoundError(f"Model file not found: {model_path}")
221
221
 
222
222
  url = f"https://api.opengradient.ai/api/v0/models/{model_name}/versions/{version}/files"
223
- headers = {"Authorization": f'Bearer {self._hub_user["idToken"]}'}
223
+ headers = {"Authorization": f"Bearer {self._hub_user['idToken']}"}
224
224
 
225
225
  logging.info(f"Starting upload for file: {model_path}")
226
226
  logging.info(f"File size: {os.path.getsize(model_path)} bytes")
@@ -335,6 +335,7 @@ class Client:
335
335
  if len(parsed_logs) < 1:
336
336
  raise OpenGradientError("InferenceResult event not found in transaction logs")
337
337
 
338
+ # TODO: This should return a ModelOutput class object
338
339
  model_output = utils.convert_to_model_output(parsed_logs[0]["args"])
339
340
  return tx_hash.hex(), model_output
340
341
 
@@ -373,7 +374,7 @@ class Client:
373
374
  if inference_mode != LlmInferenceMode.VANILLA and inference_mode != LlmInferenceMode.TEE:
374
375
  raise OpenGradientError("Invalid inference mode %s: Inference mode must be VANILLA or TEE" % inference_mode)
375
376
 
376
- if inference_mode == LlmInferenceMode.TEE and model_cid not in TEE_LLM:
377
+ if inference_mode == LlmInferenceMode.TEE and model_cid not in [llm.value for llm in TEE_LLM]:
377
378
  raise OpenGradientError("That model CID is not supported yet supported for TEE inference")
378
379
 
379
380
  contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
@@ -591,7 +592,7 @@ class Client:
591
592
  raise ValueError("User not authenticated")
592
593
 
593
594
  url = f"https://api.opengradient.ai/api/v0/models/{model_name}/versions/{version}/files"
594
- headers = {"Authorization": f'Bearer {self._hub_user["idToken"]}'}
595
+ headers = {"Authorization": f"Bearer {self._hub_user['idToken']}"}
595
596
 
596
597
  logging.debug(f"List Files URL: {url}")
597
598
  logging.debug(f"Headers: {headers}")
@@ -765,18 +766,19 @@ class Client:
765
766
 
766
767
  print("📦 Deploying workflow contract...")
767
768
 
768
-
769
769
  # Create contract instance
770
770
  contract = self._blockchain.eth.contract(abi=abi, bytecode=bytecode)
771
771
 
772
772
  # Deploy contract with constructor arguments
773
- transaction = contract.constructor().build_transaction({
774
- "from": self._wallet_account.address,
775
- "nonce": self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending"),
776
- "gas": 15000000,
777
- "gasPrice": self._blockchain.eth.gas_price,
778
- "chainId": self._blockchain.eth.chain_id,
779
- })
773
+ transaction = contract.constructor().build_transaction(
774
+ {
775
+ "from": self._wallet_account.address,
776
+ "nonce": self._blockchain.eth.get_transaction_count(self._wallet_account.address, "pending"),
777
+ "gas": 15000000,
778
+ "gasPrice": self._blockchain.eth.gas_price,
779
+ "chainId": self._blockchain.eth.chain_id,
780
+ }
781
+ )
780
782
 
781
783
  signed_txn = self._wallet_account.sign_transaction(transaction)
782
784
  tx_hash = self._blockchain.eth.send_raw_transaction(signed_txn.raw_transaction)
@@ -837,7 +839,7 @@ class Client:
837
839
 
838
840
  return contract_address
839
841
 
840
- def read_workflow_result(self, contract_address: str) -> Any:
842
+ def read_workflow_result(self, contract_address: str) -> ModelOutput:
841
843
  """
842
844
  Reads the latest inference result from a deployed workflow contract.
843
845
 
@@ -845,7 +847,7 @@ class Client:
845
847
  contract_address (str): Address of the deployed workflow contract
846
848
 
847
849
  Returns:
848
- Any: The inference result from the contract
850
+ ModelOutput: The inference result from the contract
849
851
 
850
852
  Raises:
851
853
  ContractLogicError: If the transaction fails
@@ -856,7 +858,8 @@ class Client:
856
858
 
857
859
  # Get the result
858
860
  result = contract.functions.getInferenceResult().call()
859
- return result
861
+
862
+ return utils.convert_array_to_model_output(result)
860
863
 
861
864
  def run_workflow(self, contract_address: str) -> ModelOutput:
862
865
  """
@@ -898,7 +901,8 @@ class Client:
898
901
 
899
902
  # Get the inference result from the contract
900
903
  result = contract.functions.getInferenceResult().call()
901
- return result
904
+
905
+ return utils.convert_array_to_model_output(result)
902
906
 
903
907
 
904
908
  def run_with_retry(txn_function, max_retries=DEFAULT_MAX_RETRY, retry_delay=DEFAULT_RETRY_DELAY_SEC):
@@ -910,21 +914,23 @@ def run_with_retry(txn_function, max_retries=DEFAULT_MAX_RETRY, retry_delay=DEFA
910
914
  max_retries (int): Maximum number of retry attempts
911
915
  retry_delay (float): Delay in seconds between retries for nonce issues
912
916
  """
913
- NONCE_TOO_LOW = 'nonce too low'
914
- NONCE_TOO_HIGH = 'nonce too high'
917
+ NONCE_TOO_LOW = "nonce too low"
918
+ NONCE_TOO_HIGH = "nonce too high"
919
+ INVALID_NONCE = "invalid nonce"
915
920
 
916
921
  effective_retries = max_retries if max_retries is not None else DEFAULT_MAX_RETRY
917
-
922
+
918
923
  for attempt in range(effective_retries):
919
924
  try:
920
925
  return txn_function()
921
926
  except Exception as e:
922
927
  error_msg = str(e).lower()
923
-
924
- if NONCE_TOO_LOW in error_msg or NONCE_TOO_HIGH in error_msg:
925
- if attempt == max_retries - 1:
928
+
929
+ nonce_errors = [INVALID_NONCE, NONCE_TOO_LOW, NONCE_TOO_HIGH]
930
+ if any(error in error_msg for error in nonce_errors):
931
+ if attempt == effective_retries - 1:
926
932
  raise OpenGradientError(f"Transaction failed after {effective_retries} attempts: {e}")
927
933
  time.sleep(retry_delay)
928
934
  continue
929
-
935
+
930
936
  raise
opengradient/defaults.py CHANGED
@@ -1,8 +1,8 @@
1
1
  # Default variables
2
- DEFAULT_RPC_URL="http://18.188.176.119:8545"
3
- DEFAULT_OG_FAUCET_URL="https://faucet.opengradient.ai/?address="
4
- DEFAULT_HUB_SIGNUP_URL="https://hub.opengradient.ai/signup"
5
- DEFAULT_INFERENCE_CONTRACT_ADDRESS="0x8383C9bD7462F12Eb996DD02F78234C0421A6FaE"
6
- DEFAULT_BLOCKCHAIN_EXPLORER="https://explorer.opengradient.ai/tx/"
7
- DEFAULT_IMAGE_GEN_HOST="18.217.25.69"
8
- DEFAULT_IMAGE_GEN_PORT=5125
2
+ DEFAULT_RPC_URL = "http://18.188.176.119:8545"
3
+ DEFAULT_OG_FAUCET_URL = "https://faucet.opengradient.ai/?address="
4
+ DEFAULT_HUB_SIGNUP_URL = "https://hub.opengradient.ai/signup"
5
+ DEFAULT_INFERENCE_CONTRACT_ADDRESS = "0x8383C9bD7462F12Eb996DD02F78234C0421A6FaE"
6
+ DEFAULT_BLOCKCHAIN_EXPLORER = "https://explorer.opengradient.ai/tx/"
7
+ DEFAULT_IMAGE_GEN_HOST = "18.217.25.69"
8
+ DEFAULT_IMAGE_GEN_PORT = 5125
opengradient/types.py CHANGED
@@ -2,6 +2,7 @@ import time
2
2
  from dataclasses import dataclass
3
3
  from enum import Enum, IntEnum
4
4
  from typing import Dict, List, Optional, Tuple, Union
5
+ import numpy as np
5
6
 
6
7
 
7
8
  class CandleOrder(IntEnum):
@@ -88,8 +89,13 @@ class LlmInferenceMode:
88
89
 
89
90
  @dataclass
90
91
  class ModelOutput:
91
- numbers: List[NumberTensor]
92
- strings: List[StringTensor]
92
+ """
93
+ Model output struct based on translations from smart contract.
94
+ """
95
+
96
+ numbers: Dict[str, np.ndarray]
97
+ strings: Dict[str, np.ndarray]
98
+ jsons: Dict[str, np.ndarray] # Converts to JSON dictionary
93
99
  is_simulation_result: bool
94
100
 
95
101
 
@@ -130,9 +136,9 @@ class Abi:
130
136
 
131
137
  class LLM(str, Enum):
132
138
  """Enum for available LLM models"""
139
+
133
140
  META_LLAMA_3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
134
141
  LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
135
- MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
136
142
  QWEN_2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
137
143
  META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
138
144
  DOBBY_UNHINGED_3_1_8B = "SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B"
@@ -141,6 +147,7 @@ class LLM(str, Enum):
141
147
 
142
148
  class TEE_LLM(str, Enum):
143
149
  """Enum for LLM models available for TEE execution"""
150
+
144
151
  META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
145
152
 
146
153
 
opengradient/utils.py CHANGED
@@ -6,6 +6,8 @@ from typing import Dict, List, Tuple
6
6
  import numpy as np
7
7
  from web3.datastructures import AttributeDict
8
8
 
9
+ from .types import ModelOutput
10
+
9
11
 
10
12
  def convert_to_fixed_point(number: float) -> Tuple[int, int]:
11
13
  """
@@ -170,3 +172,49 @@ def convert_to_model_output(event_data: AttributeDict) -> Dict[str, np.ndarray]:
170
172
  logging.debug(f"Parsed output: {output_dict}")
171
173
 
172
174
  return output_dict
175
+
176
+
177
+ def convert_array_to_model_output(array_data: List) -> ModelOutput:
178
+ """
179
+ Converts inference output (in array form) into a user-readable ModelOutput class.
180
+ This expects data from the smart contract returned as 4 element array:
181
+ array_data[0] = NumberTensor
182
+ array_data[1] = StringTensor
183
+ array_data[2] = JsonTensor
184
+ array_data[3] = Bool
185
+ """
186
+ # Parse number tensors
187
+ number_data = {}
188
+ for tensor in array_data[0]:
189
+ name = tensor[0]
190
+ values = tensor[1]
191
+ shape = tensor[2]
192
+
193
+ # Convert from fixed point into np.float32
194
+ converted_values = []
195
+ for value in values:
196
+ converted_values.append(convert_to_float32(value=value[0], decimals=value[1]))
197
+
198
+ number_data[name] = np.array(converted_values).reshape(shape)
199
+
200
+ # Parse string tensors
201
+ string_data = {}
202
+ for tensor in array_data[1]:
203
+ name = tensor[0]
204
+ values = tensor[1]
205
+ shape = tensor[2]
206
+ string_data[name] = np.array(values).reshape(shape)
207
+
208
+ # Parse JSON tensors
209
+ json_data = {}
210
+ for tensor in array_data[2]:
211
+ name = tensor[0]
212
+ value = tensor[1]
213
+ json_data[name] = np.array(json.loads(value))
214
+
215
+ return ModelOutput(
216
+ numbers=number_data,
217
+ strings=string_data,
218
+ jsons=json_data,
219
+ is_simulation_result=array_data[3],
220
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: opengradient
3
- Version: 0.4.3
3
+ Version: 0.4.5
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -0,0 +1,27 @@
1
+ opengradient/__init__.py,sha256=RMjHgZ_Ef_l2cjN5a-81XYYx0LtWGKNZCg90-1zl3k0,12893
2
+ opengradient/account.py,sha256=5wrYpws_1lozjOFjLCTHtxgoxK-LmObDAaVy9eDcJY4,1145
3
+ opengradient/cli.py,sha256=5PNwcVwmkJb8A8Apj_JjkG9vDDyNEq0j51omzzQWHIo,25237
4
+ opengradient/client.py,sha256=tPqeRT5e5zwB2xF-57z3ZcsdMYEGHvlhdBQTKL4QPP4,41811
5
+ opengradient/defaults.py,sha256=_WzWawpJ5j2V7sbTYGA3L_gqdftslLLqlRPQ2LBsCHY,417
6
+ opengradient/exceptions.py,sha256=88tfegboGtlehQcwhxsl6ZzhLJWZWlkf_bkHTiCtXpo,3391
7
+ opengradient/types.py,sha256=OeQsS95p3FdSF6O04xX2pEvnyemHonyNs_QxevDAdS8,4268
8
+ opengradient/utils.py,sha256=PIECAkk0lvj8TfBItugv01Hi_cJai09jGW34zhzsJ7E,8321
9
+ opengradient/abi/ModelExecutorHistorical.abi,sha256=AEceI9y-VyeaAvHna6AaCH8XOgF8nzSLGEHWCr89Ddw,2625
10
+ opengradient/abi/inference.abi,sha256=MR5u9npZ-Yx2EqRW17_M-UnGgFF3mMEMepOwaZ-Bkgc,7040
11
+ opengradient/alphasense/__init__.py,sha256=Ah6IpoPTb6UkY7ImOWLJs3tjlxDJx6vZVR7p5IwP_Ks,292
12
+ opengradient/alphasense/read_workflow_tool.py,sha256=p7lMFVgu4wZvvkjqlajBbLOhaUmmb7spra-wJhBm7B0,3205
13
+ opengradient/alphasense/run_model_tool.py,sha256=UamcotxZcKfSGhFFmRTz6VRMvQdrL96yI3dCfKYsgo4,4971
14
+ opengradient/alphasense/types.py,sha256=uxk4JQKbaS2cM3ZiKpdHQb234OJ5ylprNR5vi01QFzA,220
15
+ opengradient/llm/__init__.py,sha256=b_msjZstmTRD20LOaZbBxxigtnL7vxh7CziiyVlqpAo,1104
16
+ opengradient/llm/og_langchain.py,sha256=F9gdrwqMFPjLUakUfSjc4Obx8qIvf7HIphe6eM3ObZo,4322
17
+ opengradient/llm/og_openai.py,sha256=9MA3HDotjcJbbcGJ7DYZUA990QHQLdWpxAYhJSsTiz0,3708
18
+ opengradient/proto/__init__.py,sha256=AhaSmrqV0TXGzCKaoPV8-XUvqs2fGAJBM2aOmDpkNbE,55
19
+ opengradient/proto/infer.proto,sha256=13eaEMcppxkBF8yChptsX9HooWFwJKze7oLZNl-LEb8,1217
20
+ opengradient/proto/infer_pb2.py,sha256=sGWDDVumYhXoCJTG9rLyvKu4XyaEjPE_b038kbNlj7w,3484
21
+ opengradient/proto/infer_pb2_grpc.py,sha256=q42_eZ7OZCMTXdWocYA4Ka3B0c3B74dOhfqdaIOO5AU,6700
22
+ opengradient-0.4.5.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
23
+ opengradient-0.4.5.dist-info/METADATA,sha256=yc7mqP1VaPx6IeBVJ4cSoGrepN_0cJz1XBhvF02UAmA,5832
24
+ opengradient-0.4.5.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
25
+ opengradient-0.4.5.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
26
+ opengradient-0.4.5.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
27
+ opengradient-0.4.5.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- """
2
- OpenGradient AlphaSense Tools
3
- """
4
-
5
- from .model_tool import *
6
-
7
- __all__ = ["create_og_model_tool", "ToolType"]
8
-
9
- __pdoc__ = {"model_tool": False}
@@ -1,25 +0,0 @@
1
- opengradient/__init__.py,sha256=81JA72atkYcZqE1o8ZUT77bHTlUGyZS8-xQUh2-uH5E,12883
2
- opengradient/account.py,sha256=5wrYpws_1lozjOFjLCTHtxgoxK-LmObDAaVy9eDcJY4,1145
3
- opengradient/cli.py,sha256=5PNwcVwmkJb8A8Apj_JjkG9vDDyNEq0j51omzzQWHIo,25237
4
- opengradient/client.py,sha256=clDMNOjqC8x61bq5P3NXJp5__I5zAfSTTnXpIXXTpAg,41505
5
- opengradient/defaults.py,sha256=XQ9q-zC-QYaa1ONhGhniK1ZCO_ryiTjAWbYO3QCvXZ8,403
6
- opengradient/exceptions.py,sha256=88tfegboGtlehQcwhxsl6ZzhLJWZWlkf_bkHTiCtXpo,3391
7
- opengradient/types.py,sha256=oLJlC5hE84n63VN7M4YnN6Q1I-RlHt6VIvk-coPkMpM,4159
8
- opengradient/utils.py,sha256=hf1dQvOHdCFthrAr_Wif_PNn6-C3zZaa3QCZX1HMWoA,6911
9
- opengradient/abi/ModelExecutorHistorical.abi,sha256=AEceI9y-VyeaAvHna6AaCH8XOgF8nzSLGEHWCr89Ddw,2625
10
- opengradient/abi/inference.abi,sha256=MR5u9npZ-Yx2EqRW17_M-UnGgFF3mMEMepOwaZ-Bkgc,7040
11
- opengradient/llm/__init__.py,sha256=b_msjZstmTRD20LOaZbBxxigtnL7vxh7CziiyVlqpAo,1104
12
- opengradient/llm/og_langchain.py,sha256=F9gdrwqMFPjLUakUfSjc4Obx8qIvf7HIphe6eM3ObZo,4322
13
- opengradient/llm/og_openai.py,sha256=9MA3HDotjcJbbcGJ7DYZUA990QHQLdWpxAYhJSsTiz0,3708
14
- opengradient/mltools/__init__.py,sha256=JU1VDzYb-rpko_hn9IlwjOvy_dfW1hvxBCBC2xrnvUM,147
15
- opengradient/mltools/model_tool.py,sha256=dNoznwAiauff961EzM_9raYGdwcPLIqxIsPE1yCuWPU,5086
16
- opengradient/proto/__init__.py,sha256=AhaSmrqV0TXGzCKaoPV8-XUvqs2fGAJBM2aOmDpkNbE,55
17
- opengradient/proto/infer.proto,sha256=13eaEMcppxkBF8yChptsX9HooWFwJKze7oLZNl-LEb8,1217
18
- opengradient/proto/infer_pb2.py,sha256=sGWDDVumYhXoCJTG9rLyvKu4XyaEjPE_b038kbNlj7w,3484
19
- opengradient/proto/infer_pb2_grpc.py,sha256=q42_eZ7OZCMTXdWocYA4Ka3B0c3B74dOhfqdaIOO5AU,6700
20
- opengradient-0.4.3.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
21
- opengradient-0.4.3.dist-info/METADATA,sha256=Lp8Bujqlo5R1uYhaQQlwNzkNcCar8VCCE5SQElfWMnw,5832
22
- opengradient-0.4.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
23
- opengradient-0.4.3.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
24
- opengradient-0.4.3.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
25
- opengradient-0.4.3.dist-info/RECORD,,