opengradient 0.5.1__py3-none-any.whl → 0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opengradient/cli.py +13 -2
- opengradient/client.py +23 -6
- opengradient/types.py +36 -31
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/METADATA +1 -1
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/RECORD +9 -9
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/WHEEL +0 -0
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/entry_points.txt +0 -0
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/licenses/LICENSE +0 -0
- {opengradient-0.5.1.dist-info → opengradient-0.5.3.dist-info}/top_level.txt +0 -0
opengradient/cli.py
CHANGED
|
@@ -20,7 +20,7 @@ from .defaults import (
|
|
|
20
20
|
DEFAULT_API_URL,
|
|
21
21
|
DEFAULT_LLM_SERVER_URL,
|
|
22
22
|
)
|
|
23
|
-
from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM
|
|
23
|
+
from .types import InferenceMode, LlmInferenceMode, LLM, TEE_LLM, x402SettlementMode
|
|
24
24
|
|
|
25
25
|
OG_CONFIG_FILE = Path.home() / ".opengradient_config.json"
|
|
26
26
|
|
|
@@ -74,6 +74,12 @@ LlmInferenceModes = {
|
|
|
74
74
|
}
|
|
75
75
|
|
|
76
76
|
|
|
77
|
+
x402SettlementModes = {
|
|
78
|
+
"settle-batch": x402SettlementMode.SETTLE_BATCH,
|
|
79
|
+
"settle": x402SettlementMode.SETTLE,
|
|
80
|
+
"settle-metadata": x402SettlementMode.SETTLE_METADATA,
|
|
81
|
+
}
|
|
82
|
+
|
|
77
83
|
def initialize_config(ctx):
|
|
78
84
|
"""Interactively initialize OpenGradient config"""
|
|
79
85
|
if ctx.obj: # Check if config data already exists
|
|
@@ -422,8 +428,9 @@ def infer(ctx, model_cid: str, inference_mode: str, input_data, input_file: Path
|
|
|
422
428
|
@click.option("--stop-sequence", multiple=True, help="Stop sequences for LLM")
|
|
423
429
|
@click.option("--temperature", type=float, default=0.0, help="Temperature for LLM inference (0.0 to 1.0)")
|
|
424
430
|
@click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
|
|
431
|
+
@click.option("--x402-settlement-mode", "x402_settlement_mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
|
|
425
432
|
@click.pass_context
|
|
426
|
-
def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
|
|
433
|
+
def completion(ctx, model_cid: str, inference_mode: str, x402_settlement_mode: str, prompt: str, max_tokens: int, stop_sequence: List[str], temperature: float, local: bool):
|
|
427
434
|
"""
|
|
428
435
|
Run completion inference on an LLM model (local or external).
|
|
429
436
|
|
|
@@ -464,6 +471,7 @@ def completion(ctx, model_cid: str, inference_mode: str, prompt: str, max_tokens
|
|
|
464
471
|
stop_sequence=list(stop_sequence),
|
|
465
472
|
temperature=temperature,
|
|
466
473
|
local_model=local,
|
|
474
|
+
x402_settlement_mode=x402_settlement_mode,
|
|
467
475
|
)
|
|
468
476
|
|
|
469
477
|
print_llm_completion_result(model_cid, completion_output.transaction_hash, completion_output.completion_output, is_local)
|
|
@@ -529,6 +537,7 @@ def print_llm_completion_result(model_cid, tx_hash, llm_output, is_local=True):
|
|
|
529
537
|
)
|
|
530
538
|
@click.option("--tool-choice", type=str, default="", help="Specific tool choice for the LLM")
|
|
531
539
|
@click.option("--local", is_flag=True, help="Force use of local model even if not in LLM enum")
|
|
540
|
+
@click.option("--x402-settlement-mode", type=click.Choice(x402SettlementModes.keys()), default="settle-batch", help="Settlement mode for x402 payload")
|
|
532
541
|
@click.pass_context
|
|
533
542
|
def chat(
|
|
534
543
|
ctx,
|
|
@@ -542,6 +551,7 @@ def chat(
|
|
|
542
551
|
tools: Optional[str],
|
|
543
552
|
tools_file: Optional[Path],
|
|
544
553
|
tool_choice: Optional[str],
|
|
554
|
+
x402_settlement_mode: Optional[str],
|
|
545
555
|
local: bool,
|
|
546
556
|
):
|
|
547
557
|
"""
|
|
@@ -637,6 +647,7 @@ def chat(
|
|
|
637
647
|
tools=parsed_tools,
|
|
638
648
|
tool_choice=tool_choice,
|
|
639
649
|
local_model=local,
|
|
650
|
+
x402_settlement_mode=x402_settlement_mode,
|
|
640
651
|
)
|
|
641
652
|
|
|
642
653
|
print_llm_chat_result(
|
opengradient/client.py
CHANGED
|
@@ -23,6 +23,7 @@ from .proto import infer_pb2, infer_pb2_grpc
|
|
|
23
23
|
from .types import (
|
|
24
24
|
LLM,
|
|
25
25
|
TEE_LLM,
|
|
26
|
+
x402SettlementMode,
|
|
26
27
|
HistoricalInputQuery,
|
|
27
28
|
InferenceMode,
|
|
28
29
|
LlmInferenceMode,
|
|
@@ -436,6 +437,7 @@ class Client:
|
|
|
436
437
|
inference_mode: LlmInferenceMode = LlmInferenceMode.VANILLA,
|
|
437
438
|
max_retries: Optional[int] = None,
|
|
438
439
|
local_model: Optional[bool] = False,
|
|
440
|
+
x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
|
|
439
441
|
) -> TextGenerationOutput:
|
|
440
442
|
"""
|
|
441
443
|
Perform inference on an LLM model using completions.
|
|
@@ -465,11 +467,12 @@ class Client:
|
|
|
465
467
|
return OpenGradientError("That model CID is not supported yet for TEE inference")
|
|
466
468
|
|
|
467
469
|
return self._external_llm_completion(
|
|
468
|
-
model=model_cid,
|
|
470
|
+
model=model_cid.split('/')[1],
|
|
469
471
|
prompt=prompt,
|
|
470
472
|
max_tokens=max_tokens,
|
|
471
473
|
stop_sequence=stop_sequence,
|
|
472
474
|
temperature=temperature,
|
|
475
|
+
x402_settlement_mode=x402_settlement_mode,
|
|
473
476
|
)
|
|
474
477
|
|
|
475
478
|
# Original local model logic
|
|
@@ -479,12 +482,16 @@ class Client:
|
|
|
479
482
|
|
|
480
483
|
if model_cid not in [llm.value for llm in LLM]:
|
|
481
484
|
raise OpenGradientError("That model CID is not yet supported for inference")
|
|
485
|
+
|
|
486
|
+
model_name = model_cid
|
|
487
|
+
if model_cid in [llm.value for llm in TEE_LLM]:
|
|
488
|
+
model_name = model_cid.split('/')[1]
|
|
482
489
|
|
|
483
490
|
contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
|
|
484
491
|
|
|
485
492
|
llm_request = {
|
|
486
493
|
"mode": inference_mode.value,
|
|
487
|
-
"modelCID":
|
|
494
|
+
"modelCID": model_name,
|
|
488
495
|
"prompt": prompt,
|
|
489
496
|
"max_tokens": max_tokens,
|
|
490
497
|
"stop_sequence": stop_sequence or [],
|
|
@@ -512,6 +519,7 @@ class Client:
|
|
|
512
519
|
max_tokens: int = 100,
|
|
513
520
|
stop_sequence: Optional[List[str]] = None,
|
|
514
521
|
temperature: float = 0.0,
|
|
522
|
+
x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
|
|
515
523
|
) -> TextGenerationOutput:
|
|
516
524
|
"""
|
|
517
525
|
Route completion request to external LLM server with x402 payments.
|
|
@@ -580,7 +588,8 @@ class Client:
|
|
|
580
588
|
) as client:
|
|
581
589
|
headers = {
|
|
582
590
|
"Content-Type": "application/json",
|
|
583
|
-
"Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
|
|
591
|
+
"Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}",
|
|
592
|
+
"X-SETTLEMENT-TYPE": x402_settlement_mode,
|
|
584
593
|
}
|
|
585
594
|
|
|
586
595
|
payload = {
|
|
@@ -641,6 +650,7 @@ class Client:
|
|
|
641
650
|
tool_choice: Optional[str] = None,
|
|
642
651
|
max_retries: Optional[int] = None,
|
|
643
652
|
local_model: Optional[bool] = False,
|
|
653
|
+
x402_settlement_mode: Optional[x402SettlementMode] = x402SettlementMode.SETTLE_BATCH,
|
|
644
654
|
) -> TextGenerationOutput:
|
|
645
655
|
"""
|
|
646
656
|
Perform inference on an LLM model using chat.
|
|
@@ -670,13 +680,14 @@ class Client:
|
|
|
670
680
|
return OpenGradientError("That model CID is not supported yet for TEE inference")
|
|
671
681
|
|
|
672
682
|
return self._external_llm_chat(
|
|
673
|
-
model=model_cid,
|
|
683
|
+
model=model_cid.split('/')[1],
|
|
674
684
|
messages=messages,
|
|
675
685
|
max_tokens=max_tokens,
|
|
676
686
|
stop_sequence=stop_sequence,
|
|
677
687
|
temperature=temperature,
|
|
678
688
|
tools=tools,
|
|
679
689
|
tool_choice=tool_choice,
|
|
690
|
+
x402_settlement_mode=x402_settlement_mode,
|
|
680
691
|
)
|
|
681
692
|
|
|
682
693
|
# Original local model logic
|
|
@@ -686,6 +697,10 @@ class Client:
|
|
|
686
697
|
|
|
687
698
|
if model_cid not in [llm.value for llm in LLM]:
|
|
688
699
|
raise OpenGradientError("That model CID is not yet supported for inference")
|
|
700
|
+
|
|
701
|
+
model_name = model_cid
|
|
702
|
+
if model_cid in [llm.value for llm in TEE_LLM]:
|
|
703
|
+
model_name = model_cid.split('/')[1]
|
|
689
704
|
|
|
690
705
|
contract = self._blockchain.eth.contract(address=self._inference_hub_contract_address, abi=self._inference_abi)
|
|
691
706
|
|
|
@@ -713,7 +728,7 @@ class Client:
|
|
|
713
728
|
|
|
714
729
|
llm_request = {
|
|
715
730
|
"mode": inference_mode.value,
|
|
716
|
-
"modelCID":
|
|
731
|
+
"modelCID": model_name,
|
|
717
732
|
"messages": messages,
|
|
718
733
|
"max_tokens": max_tokens,
|
|
719
734
|
"stop_sequence": stop_sequence or [],
|
|
@@ -752,6 +767,7 @@ class Client:
|
|
|
752
767
|
temperature: float = 0.0,
|
|
753
768
|
tools: Optional[List[Dict]] = None,
|
|
754
769
|
tool_choice: Optional[str] = None,
|
|
770
|
+
x402_settlement_mode: x402SettlementMode = x402SettlementMode.SETTLE_BATCH,
|
|
755
771
|
) -> TextGenerationOutput:
|
|
756
772
|
"""
|
|
757
773
|
Route chat request to external LLM server with x402 payments.
|
|
@@ -827,7 +843,8 @@ class Client:
|
|
|
827
843
|
) as client:
|
|
828
844
|
headers = {
|
|
829
845
|
"Content-Type": "application/json",
|
|
830
|
-
"Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}"
|
|
846
|
+
"Authorization": f"Bearer {X402_PLACEHOLDER_API_KEY}",
|
|
847
|
+
"X-SETTLEMENT-TYPE": x402_settlement_mode
|
|
831
848
|
}
|
|
832
849
|
|
|
833
850
|
payload = {
|
opengradient/types.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
import time
|
|
2
2
|
from dataclasses import dataclass
|
|
3
|
-
from enum import Enum, IntEnum
|
|
3
|
+
from enum import Enum, IntEnum, StrEnum
|
|
4
4
|
from typing import Dict, List, Optional, Tuple, Union, DefaultDict
|
|
5
5
|
import numpy as np
|
|
6
6
|
|
|
7
7
|
|
|
8
|
+
class x402SettlementMode(StrEnum):
|
|
9
|
+
SETTLE = "settle"
|
|
10
|
+
SETTLE_METADATA = "settle-metadata"
|
|
11
|
+
SETTLE_BATCH = "settle-batch"
|
|
12
|
+
|
|
8
13
|
class CandleOrder(IntEnum):
|
|
9
14
|
ASCENDING = 0
|
|
10
15
|
DESCENDING = 1
|
|
@@ -202,27 +207,27 @@ class LLM(str, Enum):
|
|
|
202
207
|
DOBBY_LEASHED_3_1_8B = "SentientAGI/Dobby-Mini-Leashed-Llama-3.1-8B"
|
|
203
208
|
|
|
204
209
|
# OpenAI models via TEE
|
|
205
|
-
GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
|
|
206
|
-
GPT_4O = "gpt-4o"
|
|
207
|
-
O4_MINI = "o4-mini"
|
|
210
|
+
GPT_4_1_2025_04_14 = "OpenAI/gpt-4.1-2025-04-14"
|
|
211
|
+
GPT_4O = "OpenAI/gpt-4o"
|
|
212
|
+
O4_MINI = "OpenAI/o4-mini"
|
|
208
213
|
|
|
209
214
|
# Anthropic models via TEE
|
|
210
|
-
CLAUDE_3_7_SONNET = "claude-3.7-sonnet"
|
|
211
|
-
CLAUDE_3_5_HAIKU = "claude-3.5-haiku"
|
|
212
|
-
CLAUDE_4_0_SONNET = "claude-4.0-sonnet"
|
|
215
|
+
CLAUDE_3_7_SONNET = "Anthropic/claude-3.7-sonnet"
|
|
216
|
+
CLAUDE_3_5_HAIKU = "Anthropic/claude-3.5-haiku"
|
|
217
|
+
CLAUDE_4_0_SONNET = "Anthropic/claude-4.0-sonnet"
|
|
213
218
|
|
|
214
219
|
# Google models via TEE
|
|
215
|
-
GEMINI_2_5_FLASH = "gemini-2.5-flash"
|
|
216
|
-
GEMINI_2_5_PRO = "gemini-2.5-pro"
|
|
217
|
-
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
|
220
|
+
GEMINI_2_5_FLASH = "Google/gemini-2.5-flash"
|
|
221
|
+
GEMINI_2_5_PRO = "Google/gemini-2.5-pro"
|
|
222
|
+
GEMINI_2_0_FLASH = "Google/gemini-2.0-flash"
|
|
218
223
|
|
|
219
224
|
# xAI Grok models via TEE
|
|
220
|
-
GROK_3_MINI_BETA = "grok-3-mini-beta"
|
|
221
|
-
GROK_3_BETA = "grok-3-beta"
|
|
222
|
-
GROK_2_1212 = "grok-2-1212"
|
|
223
|
-
GROK_2_VISION_LATEST = "grok-2-vision-latest"
|
|
224
|
-
GROK_4_1_FAST = "grok-4.1-fast"
|
|
225
|
-
GROK_4_1_FAST_NON_REASONING = "grok-4-1-fast-non-reasoning"
|
|
225
|
+
GROK_3_MINI_BETA = "xAI/grok-3-mini-beta"
|
|
226
|
+
GROK_3_BETA = "xAI/grok-3-beta"
|
|
227
|
+
GROK_2_1212 = "xAI/grok-2-1212"
|
|
228
|
+
GROK_2_VISION_LATEST = "xAI/grok-2-vision-latest"
|
|
229
|
+
GROK_4_1_FAST = "xAI/grok-4.1-fast"
|
|
230
|
+
GROK_4_1_FAST_NON_REASONING = "xAI/grok-4-1-fast-non-reasoning"
|
|
226
231
|
|
|
227
232
|
class TEE_LLM(str, Enum):
|
|
228
233
|
"""Enum for LLM models available for TEE execution"""
|
|
@@ -231,27 +236,27 @@ class TEE_LLM(str, Enum):
|
|
|
231
236
|
# META_LLAMA_3_1_70B_INSTRUCT = "meta-llama/Llama-3.1-70B-Instruct"
|
|
232
237
|
|
|
233
238
|
# OpenAI models via TEE
|
|
234
|
-
GPT_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
|
|
235
|
-
GPT_4O = "gpt-4o"
|
|
236
|
-
O4_MINI = "o4-mini"
|
|
239
|
+
GPT_4_1_2025_04_14 = "OpenAI/gpt-4.1-2025-04-14"
|
|
240
|
+
GPT_4O = "OpenAI/gpt-4o"
|
|
241
|
+
O4_MINI = "OpenAI/o4-mini"
|
|
237
242
|
|
|
238
243
|
# Anthropic models via TEE
|
|
239
|
-
CLAUDE_3_7_SONNET = "claude-3.7-sonnet"
|
|
240
|
-
CLAUDE_3_5_HAIKU = "claude-3.5-haiku"
|
|
241
|
-
CLAUDE_4_0_SONNET = "claude-4.0-sonnet"
|
|
244
|
+
CLAUDE_3_7_SONNET = "Anthropic/claude-3.7-sonnet"
|
|
245
|
+
CLAUDE_3_5_HAIKU = "Anthropic/claude-3.5-haiku"
|
|
246
|
+
CLAUDE_4_0_SONNET = "Anthropic/claude-4.0-sonnet"
|
|
242
247
|
|
|
243
248
|
# Google models via TEE
|
|
244
|
-
GEMINI_2_5_FLASH = "gemini-2.5-flash"
|
|
245
|
-
GEMINI_2_5_PRO = "gemini-2.5-pro"
|
|
246
|
-
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
|
249
|
+
GEMINI_2_5_FLASH = "Google/gemini-2.5-flash"
|
|
250
|
+
GEMINI_2_5_PRO = "Google/gemini-2.5-pro"
|
|
251
|
+
GEMINI_2_0_FLASH = "Google/gemini-2.0-flash"
|
|
247
252
|
|
|
248
253
|
# xAI Grok models via TEE
|
|
249
|
-
GROK_3_MINI_BETA = "grok-3-mini-beta"
|
|
250
|
-
GROK_3_BETA = "grok-3-beta"
|
|
251
|
-
GROK_2_1212 = "grok-2-1212"
|
|
252
|
-
GROK_2_VISION_LATEST = "grok-2-vision-latest"
|
|
253
|
-
GROK_4_1_FAST = "grok-4.1-fast"
|
|
254
|
-
GROK_4_1_FAST_NON_REASONING = "grok-4-1-fast-non-reasoning"
|
|
254
|
+
GROK_3_MINI_BETA = "xAI/grok-3-mini-beta"
|
|
255
|
+
GROK_3_BETA = "xAI/grok-3-beta"
|
|
256
|
+
GROK_2_1212 = "xAI/grok-2-1212"
|
|
257
|
+
GROK_2_VISION_LATEST = "xAI/grok-2-vision-latest"
|
|
258
|
+
GROK_4_1_FAST = "xAI/grok-4.1-fast"
|
|
259
|
+
GROK_4_1_FAST_NON_REASONING = "xAI/grok-4-1-fast-non-reasoning"
|
|
255
260
|
|
|
256
261
|
|
|
257
262
|
@dataclass
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
opengradient/__init__.py,sha256=wVg0KTFNBl7RnZF9huR5-m_q1E7tO-YyQwY7AD9JFoc,12635
|
|
2
2
|
opengradient/account.py,sha256=5wrYpws_1lozjOFjLCTHtxgoxK-LmObDAaVy9eDcJY4,1145
|
|
3
|
-
opengradient/cli.py,sha256=
|
|
4
|
-
opengradient/client.py,sha256=
|
|
3
|
+
opengradient/cli.py,sha256=RksBEGVcZgUg6ng53Fgz-Ncv1erBwdADgblB2HmKkwk,29868
|
|
4
|
+
opengradient/client.py,sha256=4_Rr5oQ-wrj-LTA54uWF_FplkpkRWT1K2ectD8SSEoA,62991
|
|
5
5
|
opengradient/defaults.py,sha256=w8-dr5ciF2TGnqbm_ib0Yz4U0YL5ikpNqkcPVpmXzP8,673
|
|
6
6
|
opengradient/exceptions.py,sha256=88tfegboGtlehQcwhxsl6ZzhLJWZWlkf_bkHTiCtXpo,3391
|
|
7
|
-
opengradient/types.py,sha256=
|
|
7
|
+
opengradient/types.py,sha256=0JqnLiUXmqpunpnK3V0vrQz6ymFO1xG9TgAXjyMyxUE,7693
|
|
8
8
|
opengradient/utils.py,sha256=ZUq4OBIml2vsC0tRqus4Zwb_e3g4woo00apByrafuVw,8058
|
|
9
9
|
opengradient/abi/InferencePrecompile.abi,sha256=reepTHg6Q01UrFP0Gexc-JayplsvOLPfG7jrEZ-cV28,10197
|
|
10
10
|
opengradient/abi/PriceHistoryInference.abi,sha256=ZB3fZdx1kaFlp2wt1vTbTZZG1k8HPvmNtkG5Q8Bnajw,5098
|
|
@@ -27,9 +27,9 @@ opengradient/workflow_models/constants.py,sha256=viIkb_LGcfVprqQNaA80gBTj6cfYam0
|
|
|
27
27
|
opengradient/workflow_models/types.py,sha256=Z22hF6c8Y4D2GlzVEIBODGwsqSjSrQvUcpZ7R-mIJdI,409
|
|
28
28
|
opengradient/workflow_models/utils.py,sha256=ySfpuiOBqLTlfto6ZxZf2vc7K6RGIja0l4eaVm5AOzY,1503
|
|
29
29
|
opengradient/workflow_models/workflow_models.py,sha256=d4C_gs39DAfy4cdY9Ee6GMXpPfzwvKFpmxzK1A7LNgU,3900
|
|
30
|
-
opengradient-0.5.
|
|
31
|
-
opengradient-0.5.
|
|
32
|
-
opengradient-0.5.
|
|
33
|
-
opengradient-0.5.
|
|
34
|
-
opengradient-0.5.
|
|
35
|
-
opengradient-0.5.
|
|
30
|
+
opengradient-0.5.3.dist-info/licenses/LICENSE,sha256=xEcvQ3AxZOtDkrqkys2Mm6Y9diEnaSeQRKvxi-JGnNA,1069
|
|
31
|
+
opengradient-0.5.3.dist-info/METADATA,sha256=cIUG2yYqEVmN27jL0gHUaRYmMwg89ZfNiP0gtPg32kg,3992
|
|
32
|
+
opengradient-0.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
33
|
+
opengradient-0.5.3.dist-info/entry_points.txt,sha256=yUKTaJx8RXnybkob0J62wVBiCp_1agVbgw9uzsmaeJc,54
|
|
34
|
+
opengradient-0.5.3.dist-info/top_level.txt,sha256=oC1zimVLa2Yi1LQz8c7x-0IQm92milb5ax8gHBHwDqU,13
|
|
35
|
+
opengradient-0.5.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|