opengradient 0.4.5__tar.gz → 0.4.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {opengradient-0.4.5/src/opengradient.egg-info → opengradient-0.4.6}/PKG-INFO +1 -1
  2. {opengradient-0.4.5 → opengradient-0.4.6}/pyproject.toml +1 -1
  3. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/cli.py +4 -4
  4. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/client.py +20 -7
  5. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/llm/og_langchain.py +3 -1
  6. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/llm/og_openai.py +3 -1
  7. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/types.py +18 -1
  8. {opengradient-0.4.5 → opengradient-0.4.6/src/opengradient.egg-info}/PKG-INFO +1 -1
  9. {opengradient-0.4.5 → opengradient-0.4.6}/LICENSE +0 -0
  10. {opengradient-0.4.5 → opengradient-0.4.6}/MANIFEST.in +0 -0
  11. {opengradient-0.4.5 → opengradient-0.4.6}/README.md +0 -0
  12. {opengradient-0.4.5 → opengradient-0.4.6}/setup.cfg +0 -0
  13. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/__init__.py +0 -0
  14. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/abi/ModelExecutorHistorical.abi +0 -0
  15. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/abi/inference.abi +0 -0
  16. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/account.py +0 -0
  17. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/alphasense/__init__.py +0 -0
  18. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  19. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/alphasense/run_model_tool.py +0 -0
  20. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/alphasense/types.py +0 -0
  21. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/defaults.py +0 -0
  22. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/exceptions.py +0 -0
  23. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/llm/__init__.py +0 -0
  24. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/proto/__init__.py +0 -0
  25. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/proto/infer.proto +0 -0
  26. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/proto/infer_pb2.py +0 -0
  27. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  28. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient/utils.py +0 -0
  29. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient.egg-info/SOURCES.txt +0 -0
  30. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient.egg-info/dependency_links.txt +0 -0
  31. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient.egg-info/entry_points.txt +0 -0
  32. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient.egg-info/requires.txt +0 -0
  33. {opengradient-0.4.5 → opengradient-0.4.6}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: opengradient
3
- Version: 0.4.5
3
+ Version: 0.4.6
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.4.5"
7
+ version = "0.4.6"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "oliver@opengradient.ai"}]
10
10
  license = {file = "LICENSE"}
@@ -375,7 +375,7 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
375
375
  client: Client = ctx.obj["client"]
376
376
  try:
377
377
  click.echo(f'Running LLM completion inference for model "{model_cid}"\n')
378
- tx_hash, llm_output = client.llm_completion(
378
+ completion_output = client.llm_completion(
379
379
  model_cid=model_cid,
380
380
  inference_mode=LlmInferenceModes[inference_mode],
381
381
  prompt=prompt,
@@ -384,7 +384,7 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
384
384
  temperature=temperature,
385
385
  )
386
386
 
387
- print_llm_completion_result(model_cid, tx_hash, llm_output)
387
+ print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output)
388
388
  except Exception as e:
389
389
  click.echo(f"Error running LLM completion: {str(e)}")
390
390
 
@@ -517,7 +517,7 @@ def chat(
517
517
  if not tools and not tools_file:
518
518
  parsed_tools = None
519
519
 
520
- tx_hash, finish_reason, llm_chat_output = client.llm_chat(
520
+ completion_output = client.llm_chat(
521
521
  model_cid=model_cid,
522
522
  inference_mode=LlmInferenceModes[inference_mode],
523
523
  messages=messages,
@@ -528,7 +528,7 @@ def chat(
528
528
  tool_choice=tool_choice,
529
529
  )
530
530
 
531
- print_llm_chat_result(model_cid, tx_hash, finish_reason, llm_chat_output)
531
+ print_llm_chat_result(model_cid, completion_output.transaction_hash, completion_output.finish_reason, completion_output.chat_output)
532
532
  except Exception as e:
533
533
  click.echo(f"Error running LLM chat inference: {str(e)}")
534
534
 
@@ -18,7 +18,7 @@ from web3.logs import DISCARD
18
18
  from . import utils
19
19
  from .exceptions import OpenGradientError
20
20
  from .proto import infer_pb2, infer_pb2_grpc
21
- from .types import LLM, TEE_LLM, HistoricalInputQuery, InferenceMode, LlmInferenceMode, ModelOutput, SchedulerParams
21
+ from .types import LLM, TEE_LLM, HistoricalInputQuery, InferenceMode, LlmInferenceMode, ModelOutput, TextGenerationOutput, SchedulerParams
22
22
  from .defaults import DEFAULT_IMAGE_GEN_HOST, DEFAULT_IMAGE_GEN_PORT
23
23
 
24
24
  _FIREBASE_CONFIG = {
@@ -350,7 +350,7 @@ class Client:
350
350
  stop_sequence: Optional[List[str]] = None,
351
351
  temperature: float = 0.0,
352
352
  max_retries: Optional[int] = None,
353
- ) -> Tuple[str, str]:
353
+ ) -> TextGenerationOutput:
354
354
  """
355
355
  Perform inference on an LLM model using completions.
356
356
 
@@ -363,7 +363,9 @@ class Client:
363
363
  temperature (float): Temperature for LLM inference, between 0 and 1. Default is 0.0.
364
364
 
365
365
  Returns:
366
- Tuple[str, str]: The transaction hash and the LLM completion output.
366
+ TextGenerationOutput: Generated text results including:
367
+ - Transaction hash
368
+ - String of completion output
367
369
 
368
370
  Raises:
369
371
  OpenGradientError: If the inference fails.
@@ -418,7 +420,11 @@ class Client:
418
420
  raise OpenGradientError("LLM completion result event not found in transaction logs")
419
421
 
420
422
  llm_answer = parsed_logs[0]["args"]["response"]["answer"]
421
- return tx_hash.hex(), llm_answer
423
+
424
+ return TextGenerationOutput(
425
+ transaction_hash=tx_hash.hex(),
426
+ completion_output=llm_answer
427
+ )
422
428
 
423
429
  return run_with_retry(execute_transaction, max_retries)
424
430
 
@@ -433,7 +439,7 @@ class Client:
433
439
  tools: Optional[List[Dict]] = [],
434
440
  tool_choice: Optional[str] = None,
435
441
  max_retries: Optional[int] = None,
436
- ) -> Tuple[str, str]:
442
+ ) -> TextGenerationOutput:
437
443
  """
438
444
  Perform inference on an LLM model using chat.
439
445
 
@@ -485,7 +491,10 @@ class Client:
485
491
  tool_choice (str, optional): Sets a specific tool to choose. Default value is "auto".
486
492
 
487
493
  Returns:
488
- Tuple[str, str, dict]: The transaction hash, finish reason, and a dictionary struct of LLM chat messages.
494
+ TextGenerationOutput: Generated text results including:
495
+ - Transaction hash
496
+ - Finish reason (tool_call, stop, etc.)
497
+ - Dictionary of chat message output (role, content, tool_call, etc.)
489
498
 
490
499
  Raises:
491
500
  OpenGradientError: If the inference fails.
@@ -570,7 +579,11 @@ class Client:
570
579
  if (tool_calls := message.get("tool_calls")) is not None:
571
580
  message["tool_calls"] = [dict(tool_call) for tool_call in tool_calls]
572
581
 
573
- return tx_hash.hex(), llm_result["finish_reason"], message
582
+ return TextGenerationOutput(
583
+ transaction_hash=tx_hash.hex(),
584
+ finish_reason=llm_result["finish_reason"],
585
+ chat_output=message,
586
+ )
574
587
 
575
588
  return run_with_retry(execute_transaction, max_retries)
576
589
 
@@ -91,7 +91,7 @@ class OpenGradientChatModel(BaseChatModel):
91
91
  else:
92
92
  raise ValueError(f"Unexpected message type: {message}")
93
93
 
94
- _, finish_reason, chat_response = self.client.llm_chat(
94
+ chat_output = self.client.llm_chat(
95
95
  model_cid=self.model_cid,
96
96
  messages=sdk_messages,
97
97
  stop_sequence=stop,
@@ -99,6 +99,8 @@ class OpenGradientChatModel(BaseChatModel):
99
99
  tools=self.tools,
100
100
  inference_mode=LlmInferenceMode.VANILLA,
101
101
  )
102
+ finish_reason = chat_output.finish_reason
103
+ chat_response = chat_output.chat_output
102
104
 
103
105
  if "tool_calls" in chat_response and chat_response["tool_calls"]:
104
106
  tool_calls = []
@@ -26,7 +26,7 @@ class OGCompletions(object):
26
26
  # convert OpenAI message format so it's compatible with the SDK
27
27
  sdk_messages = OGCompletions.convert_to_abi_compatible(messages)
28
28
 
29
- _, finish_reason, chat_completion = self.client.llm_chat(
29
+ chat_output = self.client.llm_chat(
30
30
  model_cid=model,
31
31
  messages=sdk_messages,
32
32
  max_tokens=200,
@@ -35,6 +35,8 @@ class OGCompletions(object):
35
35
  temperature=0.25,
36
36
  inference_mode=og.LlmInferenceMode.VANILLA,
37
37
  )
38
+ finish_reason = chat_output.finish_reason
39
+ chat_completion = chat_output.chat_output
38
40
 
39
41
  choice = {
40
42
  "index": 0, # Add missing index field
@@ -1,7 +1,7 @@
1
1
  import time
2
2
  from dataclasses import dataclass
3
3
  from enum import Enum, IntEnum
4
- from typing import Dict, List, Optional, Tuple, Union
4
+ from typing import Dict, List, Optional, Tuple, Union, DefaultDict
5
5
  import numpy as np
6
6
 
7
7
 
@@ -98,6 +98,23 @@ class ModelOutput:
98
98
  jsons: Dict[str, np.ndarray] # Converts to JSON dictionary
99
99
  is_simulation_result: bool
100
100
 
101
+ @dataclass
102
+ class TextGenerationOutput:
103
+ """
104
+ Output structure for text generation requests.
105
+ """
106
+ transaction_hash: str
107
+ """Blockchain hash for the transaction."""
108
+
109
+ finish_reason: str | None = ""
110
+ """Reason for completion (e.g., 'tool_call', 'stop', 'error'). Empty string if not applicable."""
111
+
112
+ chat_output: Dict | None = DefaultDict
113
+ """Dictionary of chat response containing role, message content, tool call parameters, etc.. Empty dict if not applicable."""
114
+
115
+ completion_output: str | None = ""
116
+ """Raw text output from completion-style generation. Empty string if not applicable."""
117
+
101
118
 
102
119
  @dataclass
103
120
  class AbiFunction:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: opengradient
3
- Version: 0.4.5
3
+ Version: 0.4.6
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <oliver@opengradient.ai>
6
6
  License: MIT License
File without changes
File without changes
File without changes
File without changes