opengradient 0.5.2__tar.gz → 0.5.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {opengradient-0.5.2/src/opengradient.egg-info → opengradient-0.5.7}/PKG-INFO +2 -1
  2. {opengradient-0.5.2 → opengradient-0.5.7}/pyproject.toml +2 -1
  3. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/cli.py +13 -2
  4. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/client.py +13 -2
  5. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/types.py +45 -38
  6. {opengradient-0.5.2 → opengradient-0.5.7/src/opengradient.egg-info}/PKG-INFO +2 -1
  7. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient.egg-info/requires.txt +1 -0
  8. {opengradient-0.5.2 → opengradient-0.5.7}/LICENSE +0 -0
  9. {opengradient-0.5.2 → opengradient-0.5.7}/README.md +0 -0
  10. {opengradient-0.5.2 → opengradient-0.5.7}/setup.cfg +0 -0
  11. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/__init__.py +0 -0
  12. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/abi/InferencePrecompile.abi +0 -0
  13. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/abi/PriceHistoryInference.abi +0 -0
  14. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/abi/WorkflowScheduler.abi +0 -0
  15. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/abi/inference.abi +0 -0
  16. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/account.py +0 -0
  17. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/alphasense/__init__.py +0 -0
  18. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/alphasense/read_workflow_tool.py +0 -0
  19. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/alphasense/run_model_tool.py +0 -0
  20. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/alphasense/types.py +0 -0
  21. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/bin/PriceHistoryInference.bin +0 -0
  22. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/defaults.py +0 -0
  23. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/exceptions.py +0 -0
  24. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/llm/__init__.py +0 -0
  25. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/llm/og_langchain.py +0 -0
  26. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/llm/og_openai.py +0 -0
  27. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/proto/__init__.py +0 -0
  28. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/proto/infer.proto +0 -0
  29. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/proto/infer_pb2.py +0 -0
  30. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/proto/infer_pb2_grpc.py +0 -0
  31. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/utils.py +0 -0
  32. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/workflow_models/__init__.py +0 -0
  33. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/workflow_models/constants.py +0 -0
  34. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/workflow_models/types.py +0 -0
  35. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/workflow_models/utils.py +0 -0
  36. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient/workflow_models/workflow_models.py +0 -0
  37. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient.egg-info/SOURCES.txt +0 -0
  38. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient.egg-info/dependency_links.txt +0 -0
  39. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient.egg-info/entry_points.txt +0 -0
  40. {opengradient-0.5.2 → opengradient-0.5.7}/src/opengradient.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.5.2
3
+ Version: 0.5.7
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <kyle@vannalabs.ai>
6
6
  License-Expression: MIT
@@ -24,6 +24,7 @@ Requires-Dist: langchain>=0.3.7
24
24
  Requires-Dist: openai>=1.58.1
25
25
  Requires-Dist: pydantic>=2.9.2
26
26
  Requires-Dist: og-test-x402==0.0.1
27
+ Requires-Dist: x402==0.2.1
27
28
  Dynamic: license-file
28
29
 
29
30
  # OpenGradient Python SDK
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "opengradient"
7
- version = "0.5.2"
7
+ version = "0.5.7"
8
8
  description = "Python SDK for OpenGradient decentralized model management & inference services"
9
9
  authors = [{name = "OpenGradient", email = "kyle@vannalabs.ai"}]
10
10
  readme = "README.md"
@@ -30,6 +30,7 @@ dependencies = [
30
30
  "openai>=1.58.1",
31
31
  "pydantic>=2.9.2",
32
32
  "og-test-x402==0.0.1",
33
+ "x402==0.2.1"
33
34
  ]
34
35
 
35
36
  [project.scripts]
@@ -20,7 +20,7 @@ from .defaults import (
20
20
  DEFAULT_API_URL,
21
21
  DEFAULT_LLM_SERVER_URL,
22
22
  )
23
- from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM
23
+ from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM, x402SettlementMode
24
24
 
25
25
  OG_CONFIG_FILE = Path.home() / ".opengradient_config.json"
26
26
 
@@ -74,6 +74,12 @@ LlmInferenceModes = {
74
74
  }
75
75
 
76
76
 
77
+ x402SettlementModes = {
78
+ "settle-batch": x402SettlementMode.SETTLE_BATCH,
79
+ "settle": x402SettlementMode.SETTLE,
80
+ "settle-metadata": x402SettlementMode.SETTLE_METADATA,
81
+ }
82
+
77
83
  def initialize_config(ctx):
78
84
  """Interactively initialize OpenGradient config"""
79
85
  if ctx.obj: # Check if config data already exists
@@ -422,8 +428,9 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
422
428
  @click.option("--stop-sequence", multiple=True, help="Stop sequences for LLM")
423
429
  @click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
424
430
  @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
431
+ @click.option("--x402-settlement-mode", "x402_settlement_mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
425
432
  @click.pass_context
426
- def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
433
+ def completion(ctx, model_cid: str, inference_mode: str, x402_settlement_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
427
434
  """
428
435
  Run completion inference on an LLM model (local or external).
429
436
 
@@ -464,6 +471,7 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
464
471
  stop_sequence=list(stop_sequence),
465
472
  temperature=temperature,
466
473
  local_model=local,
474
+ x402_settlement_mode=x402_settlement_mode,
467
475
  )
468
476
 
469
477
  print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output, is_local)
@@ -529,6 +537,7 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output, is_local=True):
529
537
  )
530
538
  @click.option("--tool-choice", type=str, default="", help="Specific tool choice for the LLM")
531
539
  @click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
540
+ @click.option("--x402-settlement-mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
532
541
  @click.pass_context
533
542
  def chat(
534
543
  ctx,
@@ -542,6 +551,7 @@ def chat(
542
551
  tools: Optional[str],
543
552
  tools_file: Optional[Path],
544
553
  tool_choice: Optional[str],
554
+ x402_settlement_mode: Optional[str],
545
555
  local: bool,
546
556
  ):
547
557
  """
@@ -637,6 +647,7 @@ def chat(
637
647
  tools=parsed_tools,
638
648
  tool_choice=tool_choice,
639
649
  local_model=local,
650
+ x402_settlement_mode=x402_settlement_mode,
640
651
  )
641
652
 
642
653
  print_llm_chat_result(
@@ -23,6 +23,7 @@ from .proto import infer_pb2, infer_pb2_grpc
23
23
  from .types import (
24
24
  LLM,
25
25
  TEE_LLM,
26
+ x402SettlementMode,
26
27
  HistoricalInputQuery,
27
28
  InferenceMode,
28
29
  LlmInferenceMode,
@@ -436,6 +437,7 @@ class Client:
436
437
  inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
437
438
  max_retries: Optional[int] = None,
438
439
  local_model: Optional[bool] = False,
440
+ x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
439
441
  ) -> TextGenerationOutput:
440
442
  """
441
443
  Perform inference on an LLM model using completions.
@@ -470,6 +472,7 @@ class Client:
470
472
  max_tokens=max_tokens,
471
473
  stop_sequence=stop_sequence,
472
474
  temperature=temperature,
475
+ x402_settlement_mode=x402_settlement_mode,
473
476
  )
474
477
 
475
478
  # Original local model logic
@@ -516,6 +519,7 @@ class Client:
516
519
  max_tokens: int = 100,
517
520
  stop_sequence: Optional[List[str]] = None,
518
521
  temperature: float = 0.0,
522
+ x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
519
523
  ) -> TextGenerationOutput:
520
524
  """
521
525
  Route completion request to external LLM server with x402 payments.
@@ -584,7 +588,8 @@ class Client:
584
588
  ) as client:
585
589
  headers = {
586
590
  "Content-Type": "application/json",
587
- "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
591
+ "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}",
592
+ "X-SETTLEMENT-TYPE": x402_settlement_mode,
588
593
  }
589
594
 
590
595
  payload = {
@@ -645,6 +650,7 @@ class Client:
645
650
  tool_choice: Optional[str] = None,
646
651
  max_retries: Optional[int] = None,
647
652
  local_model: Optional[bool] = False,
653
+ x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
648
654
  ) -> TextGenerationOutput:
649
655
  """
650
656
  Perform inference on an LLM model using chat.
@@ -681,6 +687,7 @@ class Client:
681
687
  temperature=temperature,
682
688
  tools=tools,
683
689
  tool_choice=tool_choice,
690
+ x402_settlement_mode=x402_settlement_mode,
684
691
  )
685
692
 
686
693
  # Original local model logic
@@ -760,6 +767,7 @@ class Client:
760
767
  temperature: float = 0.0,
761
768
  tools: Optional[List[Dict]] = None,
762
769
  tool_choice: Optional[str] = None,
770
+ x402_settlement_mode: x402SettlementMode = x402SettlementMode.SETTLE_BATCH,
763
771
  ) -> TextGenerationOutput:
764
772
  """
765
773
  Route chat request to external LLM server with x402 payments.
@@ -835,7 +843,8 @@ class Client:
835
843
  ) as client:
836
844
  headers = {
837
845
  "Content-Type": "application/json",
838
- "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
846
+ "Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}",
847
+ "X-SETTLEMENT-TYPE": x402_settlement_mode
839
848
  }
840
849
 
841
850
  payload = {
@@ -858,6 +867,8 @@ class Client:
858
867
  # Read the response content
859
868
  content = await response.aread()
860
869
  result = json.loads(content.decode())
870
+ # print(f"Response: {response}")
871
+ # print(f"Response Headers: {response.headers}")
861
872
 
862
873
  payment_hash = ""
863
874
  if X402_PROCESSING_HASH_HEADER in response.headers:
@@ -1,10 +1,15 @@
1
1
  import time
2
2
  from dataclasses import dataclass
3
- from enum import Enum, IntEnum
3
+ from enum import Enum, IntEnum, StrEnum
4
4
  from typing import Dict, List, Optional, Tuple, Union, DefaultDict
5
5
  import numpy as np
6
6
 
7
7
 
8
+ class x402SettlementMode(StrEnum):
9
+ SETTLE = "settle"
10
+ SETTLE_METADATA = "settle-metadata"
11
+ SETTLE_BATCH = "settle-batch"
12
+
8
13
  class CandleOrder(IntEnum):
9
14
  ASCENDING = 0
10
15
  DESCENDING = 1
@@ -193,36 +198,37 @@ class Abi:
193
198
  class LLM(str, Enum):
194
199
  """Enum for available LLM models"""
195
200
 
196
- # Existing open-source OG hosted models
197
- META_LLAMA_3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
198
- LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
199
- QWEN_2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
200
- META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
201
- DOBBY_UNHINGED_3_1_8B = "SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B"
202
- DOBBY_LEASHED_3_1_8B = "SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B"
201
+ # # Existing open-source OG hosted models
202
+ # META_LLAMA_3_8B_INSTRUCT = "meta-llama/Meta-Llama-3-8B-Instruct"
203
+ # LLAMA_3_2_3B_INSTRUCT = "meta-llama/Llama-3.2-3B-Instruct"
204
+ # QWEN_2_5_72B_INSTRUCT = "Qwen/Qwen2.5-72B-Instruct"
205
+ # META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
206
+ # DOBBY_UNHINGED_3_1_8B = "SentientAGI/Dobby-Mini-Unhinged-Llama-3.1-8B"
207
+ # DOBBY_LEASHED_3_1_8B = "SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B"
203
208
 
204
209
  # OpenAI models via TEE
205
- GPT_4_1_2025_04_14 = "OpenAI/gpt-4.1-2025-04-14"
206
- GPT_4O = "OpenAI/gpt-4o"
207
- O4_MINI = "OpenAI/o4-mini"
210
+ GPT_4_1_2025_04_14 = "openai/gpt-4.1-2025-04-14"
211
+ GPT_4O = "openai/gpt-4o"
212
+ O4_MINI = "openai/o4-mini"
208
213
 
209
214
  # Anthropic models via TEE
210
- CLAUDE_3_7_SONNET = "Anthropic/claude-3.7-sonnet"
211
- CLAUDE_3_5_HAIKU = "Anthropic/claude-3.5-haiku"
212
- CLAUDE_4_0_SONNET = "Anthropic/claude-4.0-sonnet"
215
+ CLAUDE_3_7_SONNET = "anthropic/claude-3.7-sonnet"
216
+ CLAUDE_3_5_HAIKU = "anthropic/claude-3.5-haiku"
217
+ CLAUDE_4_0_SONNET = "anthropic/claude-4.0-sonnet"
213
218
 
214
219
  # Google models via TEE
215
- GEMINI_2_5_FLASH = "Google/gemini-2.5-flash"
216
- GEMINI_2_5_PRO = "Google/gemini-2.5-pro"
217
- GEMINI_2_0_FLASH = "Google/gemini-2.0-flash"
220
+ GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
221
+ GEMINI_2_5_PRO = "google/gemini-2.5-pro"
222
+ GEMINI_2_0_FLASH = "google/gemini-2.0-flash"
223
+ GEMINI_2_5_FLASH_LITE = "google/gemini-2.5-flash-lite"
218
224
 
219
225
  # xAI Grok models via TEE
220
- GROK_3_MINI_BETA = "xAI/grok-3-mini-beta"
221
- GROK_3_BETA = "xAI/grok-3-beta"
222
- GROK_2_1212 = "grok-2-1212"
223
- GROK_2_VISION_LATEST = "xAI/grok-2-vision-latest"
224
- GROK_4_1_FAST = "xAI/grok-4.1-fast"
225
- GROK_4_1_FAST_NON_REASONING = "xAI/grok-4-1-fast-non-reasoning"
226
+ GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta"
227
+ GROK_3_BETA = "x-ai/grok-3-beta"
228
+ GROK_2_1212 = "x-ai/grok-2-1212"
229
+ GROK_2_VISION_LATEST = "x-ai/grok-2-vision-latest"
230
+ GROK_4_1_FAST = "x-ai/grok-4.1-fast"
231
+ GROK_4_1_FAST_NON_REASONING = "x-ai/grok-4-1-fast-non-reasoning"
226
232
 
227
233
  class TEE_LLM(str, Enum):
228
234
  """Enum for LLM models available for TEE execution"""
@@ -231,27 +237,28 @@ class TEE_LLM(str, Enum):
231
237
  # META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
232
238
 
233
239
  # OpenAI models via TEE
234
- GPT_4_1_2025_04_14 = "OpenAI/gpt-4.1-2025-04-14"
235
- GPT_4O = "OpenAI/gpt-4o"
236
- O4_MINI = "OpenAI/o4-mini"
240
+ GPT_4_1_2025_04_14 = "openai/gpt-4.1-2025-04-14"
241
+ GPT_4O = "openai/gpt-4o"
242
+ O4_MINI = "openai/o4-mini"
237
243
 
238
244
  # Anthropic models via TEE
239
- CLAUDE_3_7_SONNET = "Anthropic/claude-3.7-sonnet"
240
- CLAUDE_3_5_HAIKU = "Anthropic/claude-3.5-haiku"
241
- CLAUDE_4_0_SONNET = "Anthropic/claude-4.0-sonnet"
245
+ CLAUDE_3_7_SONNET = "anthropic/claude-3.7-sonnet"
246
+ CLAUDE_3_5_HAIKU = "anthropic/claude-3.5-haiku"
247
+ CLAUDE_4_0_SONNET = "anthropic/claude-4.0-sonnet"
242
248
 
243
249
  # Google models via TEE
244
- GEMINI_2_5_FLASH = "Google/gemini-2.5-flash"
245
- GEMINI_2_5_PRO = "Google/gemini-2.5-pro"
246
- GEMINI_2_0_FLASH = "Google/gemini-2.0-flash"
250
+ GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
251
+ GEMINI_2_5_PRO = "google/gemini-2.5-pro"
252
+ GEMINI_2_0_FLASH = "google/gemini-2.0-flash"
253
+ GEMINI_2_5_FLASH_LITE = "google/gemini-2.5-flash-lite"
247
254
 
248
255
  # xAI Grok models via TEE
249
- GROK_3_MINI_BETA = "xAI/grok-3-mini-beta"
250
- GROK_3_BETA = "xAI/grok-3-beta"
251
- GROK_2_1212 = "xAI/grok-2-1212"
252
- GROK_2_VISION_LATEST = "xAI/grok-2-vision-latest"
253
- GROK_4_1_FAST = "xAI/grok-4.1-fast"
254
- GROK_4_1_FAST_NON_REASONING = "xAI/grok-4-1-fast-non-reasoning"
256
+ GROK_3_MINI_BETA = "x-ai/grok-3-mini-beta"
257
+ GROK_3_BETA = "x-ai/grok-3-beta"
258
+ GROK_2_1212 = "x-ai/grok-2-1212"
259
+ GROK_2_VISION_LATEST = "x-ai/grok-2-vision-latest"
260
+ GROK_4_1_FAST = "x-ai/grok-4.1-fast"
261
+ GROK_4_1_FAST_NON_REASONING = "x-ai/grok-4-1-fast-non-reasoning"
255
262
 
256
263
 
257
264
  @dataclass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opengradient
3
- Version: 0.5.2
3
+ Version: 0.5.7
4
4
  Summary: Python SDK for OpenGradient decentralized model management & inference services
5
5
  Author-email: OpenGradient <kyle@vannalabs.ai>
6
6
  License-Expression: MIT
@@ -24,6 +24,7 @@ Requires-Dist: langchain>=0.3.7
24
24
  Requires-Dist: openai>=1.58.1
25
25
  Requires-Dist: pydantic>=2.9.2
26
26
  Requires-Dist: og-test-x402==0.0.1
27
+ Requires-Dist: x402==0.2.1
27
28
  Dynamic: license-file
28
29
 
29
30
  # OpenGradient Python SDK
@@ -9,3 +9,4 @@ langchain>=0.3.7
9
9
  openai>=1.58.1
10
10
  pydantic>=2.9.2
11
11
  og-test-x402==0.0.1
12
+ x402==0.2.1
File without changes
File without changes
File without changes