opengradient 0.5.0a3__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {opengradient-0.5.0a3/src/opengradient.egg-info → opengradient-0.5.1}/PKG-INFO +1 -1
  2. {opengradient-0.5.0a3 → opengradient-0.5.1}/pyproject.toml +1 -1
  3. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/client.py +36 -29
  4. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/types.py +50 -3
  5. {opengradient-0.5.0a3 → opengradient-0.5.1/src/opengradient.egg-info}/PKG-INFO +1 -1
  6. {opengradient-0.5.0a3 → opengradient-0.5.1}/LICENSE +0 -0
  7. {opengradient-0.5.0a3 → opengradient-0.5.1}/README.md +0 -0
  8. {opengradient-0.5.0a3 → opengradient-0.5.1}/setup.cfg +0 -0
  9. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/__init__.py +0 -0
  10. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/abi/InferencePrecompile.abi +0 -0
  11. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  12. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  13. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/abi/inference.abi +0 -0
  14. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/account.py +0 -0
  15. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/alphasense/__init__.py +0 -0
  16. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  17. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/alphasense/run_model_tool.py +0 -0
  18. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/alphasense/types.py +0 -0
  19. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  20. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/cli.py +0 -0
  21. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/defaults.py +0 -0
  22. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/exceptions.py +0 -0
  23. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/llm/__init__.py +0 -0
  24. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/llm/og_langchain.py +0 -0
  25. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/llm/og_openai.py +0 -0
  26. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/proto/__init__.py +0 -0
  27. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/proto/infer.proto +0 -0
  28. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/proto/infer_pb2.py +0 -0
  29. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  30. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/utils.py +0 -0
  31. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/workflow_models/__init__.py +0 -0
  32. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/workflow_models/constants.py +0 -0
  33. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/workflow_models/types.py +0 -0
  34. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/workflow_models/utils.py +0 -0
  35. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient/workflow_models/workflow_models.py +0 -0
  36. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient.egg-info/SOURCES.txt +0 -0
  37. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient.egg-info/dependency_links.txt +0 -0
  38. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient.egg-info/entry_points.txt +0 -0
  39. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient.egg-info/requires.txt +0 -0
  40. {opengradient-0.5.0a3 → opengradient-0.5.1}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.5.0a3
3
+ Version: 0.5.1
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <kyle@vannalabs.ai>
6
6
  License-Expression: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.5.0a3"
7
+ version = "0.5.1"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "kyle@vannalabs.ai"}]
10
10
  readme = "README.md"
@@ -61,6 +61,9 @@ DEFAULT_RETRY_DELAY_SEC = 1
61
61
 
62
62
  PRECOMPILE_CONTRACT_ADDRESS = "0x00000000000000000000000000000000000000F4"
63
63
 
64
+ X402_PROCESSING_HASH_HEADER = "x-processing-hash"
65
+ X402_PLACEHOLDER_API_KEY = "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
66
+
64
67
  class Client:
65
68
  _inference_hub_contract_address: str
66
69
  _blockchain: Web3
@@ -456,7 +459,11 @@ class Client:
456
459
  OpenGradientError: If the inference fails.
457
460
  """
458
461
  # Check if this is a local model or external
459
- if not local_model and not self._is_local_model(model_cid):
462
+ # TODO (Kyle): separate TEE and Vanilla completion requests
463
+ if inference_mode == LlmInferenceMode.TEE:
464
+ if model_cid not in TEE_LLM:
465
+ return OpenGradientError("That model CID is not supported yet for TEE inference")
466
+
460
467
  return self._external_llm_completion(
461
468
  model=model_cid,
462
469
  prompt=prompt,
@@ -467,11 +474,11 @@ class Client:
467
474
 
468
475
  # Original local model logic
469
476
  def execute_transaction():
470
- if inference_mode != LlmInferenceMode.VANILLA and inference_mode != LlmInferenceMode.TEE:
477
+ if inference_mode != LlmInferenceMode.VANILLA:
471
478
  raise OpenGradientError("Invalid inference mode %s: Inference mode must be VANILLA or TEE" % inference_mode)
472
479
 
473
- if inference_mode == LlmInferenceMode.TEE and model_cid not in [llm.value for llm in TEE_LLM]:
474
- raise OpenGradientError("That model CID is not supported yet supported for TEE inference")
480
+ if model_cid not in [llm.value for llm in LLM]:
481
+ raise OpenGradientError("That model CID is not yet supported for inference")
475
482
 
476
483
  contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
477
484
 
@@ -525,7 +532,7 @@ class Client:
525
532
  api_key = self._get_api_key_for_model(model)
526
533
 
527
534
  if api_key:
528
- print("External LLM completion using API key")
535
+ logging.debug("External LLM completions using API key")
529
536
  url = f"{self._llm_server_url}/v1/completions"
530
537
 
531
538
  headers = {
@@ -573,8 +580,7 @@ class Client:
573
580
  ) as client:
574
581
  headers = {
575
582
  "Content-Type": "application/json",
576
- # "Authorization": "Bearer special-key"
577
- "Authorization": "Bearer 0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
583
+ "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
578
584
  }
579
585
 
580
586
  payload = {
@@ -595,15 +601,11 @@ class Client:
595
601
  result = json.loads(content.decode())
596
602
  payment_hash = ""
597
603
 
598
- print("Payment response headers: ", response.headers)
599
- print("Payment response content: ", result)
600
-
601
- if "X-Payment-Response" in response.headers:
602
- payment_response = decode_x_payment_response(response.headers["X-Payment-Response"])
603
- payment_hash = payment_response["transaction"]
604
+ if X402_PROCESSING_HASH_HEADER in response.headers:
605
+ payment_hash = response.headers[X402_PROCESSING_HASH_HEADER]
604
606
 
605
607
  return TextGenerationOutput(
606
- transaction_hash="external", # No blockchain transaction for external
608
+ transaction_hash="external",
607
609
  completion_output=result.get("completion"),
608
610
  payment_hash=payment_hash
609
611
  )
@@ -629,7 +631,7 @@ class Client:
629
631
 
630
632
  def llm_chat(
631
633
  self,
632
- model_cid: str, # Changed from LLM to str
634
+ model_cid: str,
633
635
  messages: List[Dict],
634
636
  inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
635
637
  max_tokens: int = 100,
@@ -662,7 +664,11 @@ class Client:
662
664
  OpenGradientError: If the inference fails.
663
665
  """
664
666
  # Check if this is a local model or external
665
- if not local_model and not self._is_local_model(model_cid):
667
+ # TODO (Kyle): separate TEE and Vanilla completion requests
668
+ if inference_mode == LlmInferenceMode.TEE:
669
+ if model_cid not in TEE_LLM:
670
+ return OpenGradientError("That model CID is not supported yet for TEE inference")
671
+
666
672
  return self._external_llm_chat(
667
673
  model=model_cid,
668
674
  messages=messages,
@@ -675,11 +681,11 @@ class Client:
675
681
 
676
682
  # Original local model logic
677
683
  def execute_transaction():
678
- if inference_mode != LlmInferenceMode.VANILLA and inference_mode != LlmInferenceMode.TEE:
684
+ if inference_mode != LlmInferenceMode.VANILLA:
679
685
  raise OpenGradientError("Invalid inference mode %s: Inference mode must be VANILLA or TEE" % inference_mode)
680
-
681
- if inference_mode == LlmInferenceMode.TEE and model_cid not in TEE_LLM:
682
- raise OpenGradientError("That model CID is not supported yet supported for TEE inference")
686
+
687
+ if model_cid not in [llm.value for llm in LLM]:
688
+ raise OpenGradientError("That model CID is not yet supported for inference")
683
689
 
684
690
  contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
685
691
 
@@ -768,7 +774,7 @@ class Client:
768
774
  api_key = self._get_api_key_for_model(model)
769
775
 
770
776
  if api_key:
771
- print("External LLM completion using API key")
777
+ logging.debug("External LLM completion using API key")
772
778
  url = f"{self._llm_server_url}/v1/chat/completions"
773
779
 
774
780
  headers = {
@@ -821,7 +827,7 @@ class Client:
821
827
  ) as client:
822
828
  headers = {
823
829
  "Content-Type": "application/json",
824
- "Authorization": "Bearer special-key"
830
+ "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
825
831
  }
826
832
 
827
833
  payload = {
@@ -846,16 +852,17 @@ class Client:
846
852
  result = json.loads(content.decode())
847
853
 
848
854
  payment_hash = ""
849
- print("Payment response headers: ", response.headers)
850
- print("Payment response content: ", result)
851
- if "X-Payment-Response" in response.headers:
852
- payment_response = decode_x_payment_response(response.headers["X-Payment-Response"])
853
- payment_hash = payment_response["transaction"]
855
+ if X402_PROCESSING_HASH_HEADER in response.headers:
856
+ payment_hash = response.headers[X402_PROCESSING_HASH_HEADER]
857
+
858
+ choices = result.get("choices")
859
+ if not choices:
860
+ raise OpenGradientError(f"Invalid response: 'choices' missing or empty in {result}")
854
861
 
855
862
  return TextGenerationOutput(
856
863
  transaction_hash="external",
857
- finish_reason=result["choices"][0].get("finish_reason"),
858
- chat_output=result["choices"][0].get("message"),
864
+ finish_reason=choices[0].get("finish_reason"),
865
+ chat_output=choices[0].get("message"),
859
866
  payment_hash=payment_hash
860
867
  )
861
868
 
@@ -193,18 +193,65 @@ class Abi:
193
193
  class LLM(str, Enum):
194
194
  """Enum for available LLM models"""
195
195
 
196
+ # Existing open-source OG hosted models
196
197
  META_LLAMA_3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
197
198
  LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
198
199
  QWEN_2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
199
200
  META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
200
201
  DOBBY_UNHINGED_3_1_8B = "SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B"
201
202
  DOBBY_LEASHED_3_1_8B = "SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B"
202
-
203
+
204
+ # OpenAI models via TEE
205
+ GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
206
+ GPT_4O = "gpt-4o"
207
+ O4_MINI = "o4-mini"
208
+
209
+ # Anthropic models via TEE
210
+ CLAUDE_3_7_SONNET = "claude-3.7-sonnet"
211
+ CLAUDE_3_5_HAIKU = "claude-3.5-haiku"
212
+ CLAUDE_4_0_SONNET = "claude-4.0-sonnet"
213
+
214
+ # Google models via TEE
215
+ GEMINI_2_5_FLASH = "gemini-2.5-flash"
216
+ GEMINI_2_5_PRO = "gemini-2.5-pro"
217
+ GEMINI_2_0_FLASH = "gemini-2.0-flash"
218
+
219
+ # xAI Grok models via TEE
220
+ GROK_3_MINI_BETA = "grok-3-mini-beta"
221
+ GROK_3_BETA = "grok-3-beta"
222
+ GROK_2_1212 = "grok-2-1212"
223
+ GROK_2_VISION_LATEST = "grok-2-vision-latest"
224
+ GROK_4_1_FAST = "grok-4.1-fast"
225
+ GROK_4_1_FAST_NON_REASONING = "grok-4-1-fast-non-reasoning"
203
226
 
204
227
  class TEE_LLM(str, Enum):
205
228
  """Enum for LLM models available for TEE execution"""
206
-
207
- META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
229
+
230
+ # Existing (Currently turned off)
231
+ # META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
232
+
233
+ # OpenAI models via TEE
234
+ GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
235
+ GPT_4O = "gpt-4o"
236
+ O4_MINI = "o4-mini"
237
+
238
+ # Anthropic models via TEE
239
+ CLAUDE_3_7_SONNET = "claude-3.7-sonnet"
240
+ CLAUDE_3_5_HAIKU = "claude-3.5-haiku"
241
+ CLAUDE_4_0_SONNET = "claude-4.0-sonnet"
242
+
243
+ # Google models via TEE
244
+ GEMINI_2_5_FLASH = "gemini-2.5-flash"
245
+ GEMINI_2_5_PRO = "gemini-2.5-pro"
246
+ GEMINI_2_0_FLASH = "gemini-2.0-flash"
247
+
248
+ # xAI Grok models via TEE
249
+ GROK_3_MINI_BETA = "grok-3-mini-beta"
250
+ GROK_3_BETA = "grok-3-beta"
251
+ GROK_2_1212 = "grok-2-1212"
252
+ GROK_2_VISION_LATEST = "grok-2-vision-latest"
253
+ GROK_4_1_FAST = "grok-4.1-fast"
254
+ GROK_4_1_FAST_NON_REASONING = "grok-4-1-fast-non-reasoning"
208
255
 
209
256
 
210
257
  @dataclass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.5.0a3
3
+ Version: 0.5.1
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <kyle@vannalabs.ai>
6
6
  License-Expression: MIT
File without changes
File without changes
File without changes