opengradient 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opengradient/__init__.py +2 -1
- opengradient/cli.py +3 -4
- opengradient/llm/__init__.py +5 -0
- opengradient/llm/chat.py +118 -0
- opengradient/types.py +2 -1
- {opengradient-0.3.8.dist-info → opengradient-0.3.10.dist-info}/METADATA +1 -2
- opengradient-0.3.10.dist-info/RECORD +17 -0
- {opengradient-0.3.8.dist-info → opengradient-0.3.10.dist-info}/WHEEL +1 -1
- opengradient-0.3.8.dist-info/RECORD +0 -15
- {opengradient-0.3.8.dist-info → opengradient-0.3.10.dist-info}/LICENSE +0 -0
- {opengradient-0.3.8.dist-info → opengradient-0.3.10.dist-info}/entry_points.txt +0 -0
- {opengradient-0.3.8.dist-info → opengradient-0.3.10.dist-info}/top_level.txt +0 -0
opengradient/__init__.py
CHANGED
|
@@ -3,8 +3,9 @@ from typing import Dict, List, Optional, Tuple
|
|
|
3
3
|
from .client import Client
|
|
4
4
|
from .defaults import DEFAULT_INFERENCE_CONTRACT_ADDRESS, DEFAULT_RPC_URL
|
|
5
5
|
from .types import InferenceMode, LLM
|
|
6
|
+
from . import llm
|
|
6
7
|
|
|
7
|
-
__version__ = "0.3.
|
|
8
|
+
__version__ = "0.3.10"
|
|
8
9
|
|
|
9
10
|
_client = None
|
|
10
11
|
|
opengradient/cli.py
CHANGED
|
@@ -417,7 +417,7 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output):
|
|
|
417
417
|
help='Temperature for LLM inference (0.0 to 1.0)')
|
|
418
418
|
@click.option('--tools',
|
|
419
419
|
type=str,
|
|
420
|
-
default=
|
|
420
|
+
default=None,
|
|
421
421
|
help='Tool configurations in JSON format')
|
|
422
422
|
@click.option('--tools-file',
|
|
423
423
|
type=click.Path(exists=True, path_type=Path),
|
|
@@ -450,7 +450,6 @@ def chat(
|
|
|
450
450
|
opengradient chat --model meta-llama/Meta-Llama-3-8B-Instruct --messages '[{"role":"user","content":"hello"}]' --max-tokens 50 --temperature 0.7
|
|
451
451
|
opengradient chat -m mistralai/Mistral-7B-Instruct-v0.3 --messages-file messages.json --stop-sequence "." --stop-sequence "\n"
|
|
452
452
|
"""
|
|
453
|
-
# TODO (Kyle): ^^^^^^^ Edit description with more examples using tools
|
|
454
453
|
client: Client = ctx.obj['client']
|
|
455
454
|
try:
|
|
456
455
|
click.echo(f"Running LLM chat inference for model \"{model_cid}\"\n")
|
|
@@ -474,9 +473,9 @@ def chat(
|
|
|
474
473
|
messages = json.load(file)
|
|
475
474
|
|
|
476
475
|
# Parse tools if provided
|
|
477
|
-
if
|
|
476
|
+
if tools is not None and tools != "[]" and tools_file:
|
|
478
477
|
click.echo("Cannot have both tools and tools_file")
|
|
479
|
-
|
|
478
|
+
ctx.exit(1)
|
|
480
479
|
return
|
|
481
480
|
|
|
482
481
|
parsed_tools=[]
|
opengradient/llm/chat.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from typing import List, Dict, Optional, Any, Sequence, Union
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
from langchain.chat_models.base import BaseChatModel
|
|
5
|
+
from langchain.schema import (
|
|
6
|
+
AIMessage,
|
|
7
|
+
HumanMessage,
|
|
8
|
+
SystemMessage,
|
|
9
|
+
BaseMessage,
|
|
10
|
+
ChatResult,
|
|
11
|
+
ChatGeneration,
|
|
12
|
+
)
|
|
13
|
+
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
|
|
14
|
+
from langchain_core.tools import BaseTool
|
|
15
|
+
from langchain_core.messages import ToolCall
|
|
16
|
+
|
|
17
|
+
from opengradient import Client
|
|
18
|
+
from opengradient.defaults import DEFAULT_RPC_URL, DEFAULT_INFERENCE_CONTRACT_ADDRESS
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class OpenGradientChatModel(BaseChatModel):
|
|
22
|
+
"""OpenGradient adapter class for LangChain chat model"""
|
|
23
|
+
|
|
24
|
+
client: Client = None
|
|
25
|
+
model_cid: str = None
|
|
26
|
+
max_tokens: int = None
|
|
27
|
+
tools: List[Dict] = []
|
|
28
|
+
|
|
29
|
+
def __init__(self, private_key: str, model_cid: str, max_tokens: int = 300):
|
|
30
|
+
super().__init__()
|
|
31
|
+
self.client = Client(
|
|
32
|
+
private_key=private_key,
|
|
33
|
+
rpc_url=DEFAULT_RPC_URL,
|
|
34
|
+
contract_address=DEFAULT_INFERENCE_CONTRACT_ADDRESS,
|
|
35
|
+
email=None,
|
|
36
|
+
password=None)
|
|
37
|
+
self.model_cid = model_cid
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def _llm_type(self) -> str:
|
|
41
|
+
return "opengradient"
|
|
42
|
+
|
|
43
|
+
def bind_tools(
|
|
44
|
+
self,
|
|
45
|
+
tools: Sequence[Union[BaseTool, Dict]],
|
|
46
|
+
) -> "OpenGradientChatModel":
|
|
47
|
+
"""Bind tools to the model."""
|
|
48
|
+
tool_dicts = []
|
|
49
|
+
for tool in tools:
|
|
50
|
+
if isinstance(tool, BaseTool):
|
|
51
|
+
tool_dicts.append({
|
|
52
|
+
"type": "function",
|
|
53
|
+
"function": {
|
|
54
|
+
"name": tool.name,
|
|
55
|
+
"description": tool.description,
|
|
56
|
+
"parameters": tool.args_schema.schema() if hasattr(tool, "args_schema") else {}
|
|
57
|
+
}
|
|
58
|
+
})
|
|
59
|
+
else:
|
|
60
|
+
tool_dicts.append(tool)
|
|
61
|
+
|
|
62
|
+
self.tools = tool_dicts
|
|
63
|
+
return self
|
|
64
|
+
|
|
65
|
+
def _generate(
|
|
66
|
+
self,
|
|
67
|
+
messages: List[BaseMessage],
|
|
68
|
+
stop: Optional[List[str]] = None,
|
|
69
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
70
|
+
**kwargs: Any,
|
|
71
|
+
) -> ChatResult:
|
|
72
|
+
chat_messages = []
|
|
73
|
+
for message in messages:
|
|
74
|
+
if isinstance(message, SystemMessage):
|
|
75
|
+
chat_messages.append({"role": "system", "content": message.content})
|
|
76
|
+
elif isinstance(message, HumanMessage):
|
|
77
|
+
chat_messages.append({"role": "user", "content": message.content})
|
|
78
|
+
elif isinstance(message, AIMessage):
|
|
79
|
+
chat_messages.append({"role": "assistant", "content": message.content})
|
|
80
|
+
|
|
81
|
+
tx_hash, finish_reason, chat_response = self.client.llm_chat(
|
|
82
|
+
model_cid=self.model_cid,
|
|
83
|
+
messages=chat_messages,
|
|
84
|
+
stop_sequence=stop,
|
|
85
|
+
max_tokens=self.max_tokens,
|
|
86
|
+
tools=self.tools
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
if "tool_calls" in chat_response:
|
|
90
|
+
tool_calls = []
|
|
91
|
+
for tool_call in chat_response["tool_calls"]:
|
|
92
|
+
tool_calls.append(
|
|
93
|
+
ToolCall(
|
|
94
|
+
id=tool_call.get("id", ""),
|
|
95
|
+
name=tool_call["name"],
|
|
96
|
+
args=json.loads(tool_call["arguments"])
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
message = AIMessage(
|
|
101
|
+
content='',
|
|
102
|
+
tool_calls=tool_calls
|
|
103
|
+
)
|
|
104
|
+
else:
|
|
105
|
+
message = AIMessage(content=chat_response["content"])
|
|
106
|
+
|
|
107
|
+
return ChatResult(
|
|
108
|
+
generations=[ChatGeneration(
|
|
109
|
+
message=message,
|
|
110
|
+
generation_info={"finish_reason": finish_reason}
|
|
111
|
+
)]
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def _identifying_params(self) -> Dict[str, Any]:
|
|
116
|
+
return {
|
|
117
|
+
"model_name": self.model_cid,
|
|
118
|
+
}
|
opengradient/types.py
CHANGED
|
@@ -77,4 +77,5 @@ class Abi:
|
|
|
77
77
|
class LLM(str, Enum):
|
|
78
78
|
META_LLAMA3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
|
|
79
79
|
LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
|
|
80
|
-
MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
80
|
+
MISTRAL_7B_INSTRUCT_V3 = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
81
|
+
HERMES_3_LLAMA_3_1_70B = "NousResearch/Hermes-3-Llama-3.1-70B"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opengradient
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.10
|
|
4
4
|
Summary: Python SDK for OpenGradient decentralized model management & inference services
|
|
5
5
|
Author-email: OpenGradient <oliver@opengradient.ai>
|
|
6
6
|
License: MIT License
|
|
@@ -87,7 +87,6 @@ Requires-Dist: keyring==24.3.1
|
|
|
87
87
|
Requires-Dist: more-itertools==10.5.0
|
|
88
88
|
Requires-Dist: msgpack==1.1.0
|
|
89
89
|
Requires-Dist: multidict==6.1.0
|
|
90
|
-
Requires-Dist: numpy==2.1.1
|
|
91
90
|
Requires-Dist: packaging==24.1
|
|
92
91
|
Requires-Dist: pandas==2.2.3
|
|
93
92
|
Requires-Dist: parsimonious==0.10.0
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
opengradient/__init__.py,sha256=SdOj_l9rvKPGKzLbrnwBlOzLi23Qjpnji5mHPIMmyvQ,3129
|
|
2
|
+
opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
|
|
3
|
+
opengradient/cli.py,sha256=0SRt9iQcCHxR1QmsF54-KhdpqHx9_va0UyckoPQcYwg,22937
|
|
4
|
+
opengradient/client.py,sha256=yaexPTr-CSPQXR5AOrhHDmygKCq37Q_FIVy8Mtq6THk,31253
|
|
5
|
+
opengradient/defaults.py,sha256=jweJ6QyzNY0oO22OUA8B-uJPA90cyedhMT9J38MmGjw,319
|
|
6
|
+
opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
|
|
7
|
+
opengradient/types.py,sha256=kRCkxJ2xD6Y5eLmrrS61t66qm7jzNF0Qbnntl1_FMKk,2143
|
|
8
|
+
opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
|
|
9
|
+
opengradient/abi/inference.abi,sha256=VMxv4pli9ESYL2hCpbU41Z_WweCBy_3EcTYkCWCb-rU,6623
|
|
10
|
+
opengradient/llm/__init__.py,sha256=n_11WFPoU8YtGc6wg9cK6gEy9zBISf1183Loip3dAbI,62
|
|
11
|
+
opengradient/llm/chat.py,sha256=B7HOaj8Bk9aE1Wdhi4pXrxFLsmFx3O4ezQ_x7owK99Y,3812
|
|
12
|
+
opengradient-0.3.10.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
|
|
13
|
+
opengradient-0.3.10.dist-info/METADATA,sha256=Q6l-xAAG0WbMVi8ipXJTTXx0Xk2Ptqitq3AlsiJaKXw,7581
|
|
14
|
+
opengradient-0.3.10.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
|
15
|
+
opengradient-0.3.10.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
|
|
16
|
+
opengradient-0.3.10.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
|
|
17
|
+
opengradient-0.3.10.dist-info/RECORD,,
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
opengradient/__init__.py,sha256=OYFubsCX5iHd8Yc_T17nB_CDkOdgsiDvgBFx1UaOk0s,3110
|
|
2
|
-
opengradient/account.py,sha256=2B7rtCXQDX-yF4U69h8B9-OUreJU4IqoGXG_1Hn9nWs,1150
|
|
3
|
-
opengradient/cli.py,sha256=Mv7iAyeBFUxaXL1hDIgHgn5x-GCMdi2BQgY6zrOkg_4,23003
|
|
4
|
-
opengradient/client.py,sha256=yaexPTr-CSPQXR5AOrhHDmygKCq37Q_FIVy8Mtq6THk,31253
|
|
5
|
-
opengradient/defaults.py,sha256=jweJ6QyzNY0oO22OUA8B-uJPA90cyedhMT9J38MmGjw,319
|
|
6
|
-
opengradient/exceptions.py,sha256=v4VmUGTvvtjhCZAhR24Ga42z3q-DzR1Y5zSqP_yn2Xk,3366
|
|
7
|
-
opengradient/types.py,sha256=Hkp0cXveMtNKxMpcwIHl1n4OGgYxx0HQM70qA05SZV8,2076
|
|
8
|
-
opengradient/utils.py,sha256=_dEIhepJXjJFfHLGTUwXloZJXnlQbvwqHSPu08548jI,6532
|
|
9
|
-
opengradient/abi/inference.abi,sha256=VMxv4pli9ESYL2hCpbU41Z_WweCBy_3EcTYkCWCb-rU,6623
|
|
10
|
-
opengradient-0.3.8.dist-info/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
|
|
11
|
-
opengradient-0.3.8.dist-info/METADATA,sha256=CF-pugA2hzgyhEFxFOo53LL_no6eZ8GiaJIw8qEocr8,7608
|
|
12
|
-
opengradient-0.3.8.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
|
13
|
-
opengradient-0.3.8.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
|
|
14
|
-
opengradient-0.3.8.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
|
|
15
|
-
opengradient-0.3.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|