together 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +5 -16
- together/cli/cli.py +8 -9
- together/commands/chat.py +3 -6
- together/commands/complete.py +36 -41
- together/commands/files.py +8 -22
- together/commands/finetune.py +16 -15
- together/commands/image.py +7 -8
- together/commands/models.py +25 -35
- together/complete.py +31 -86
- together/error.py +12 -2
- together/files.py +24 -86
- together/finetune.py +79 -181
- together/image.py +8 -42
- together/models.py +62 -110
- together/utils.py +175 -0
- together/version.py +1 -1
- {together-0.2.3.dist-info → together-0.2.5.dist-info}/METADATA +114 -24
- together-0.2.5.dist-info/RECORD +25 -0
- together/config.py +0 -389
- together/utils/utils.py +0 -75
- together-0.2.3.dist-info/RECORD +0 -26
- /together/{utils → tools}/__init__.py +0 -0
- /together/{utils → tools}/conversation.py +0 -0
- {together-0.2.3.dist-info → together-0.2.5.dist-info}/WHEEL +0 -0
- {together-0.2.3.dist-info → together-0.2.5.dist-info}/entry_points.txt +0 -0
- {together-0.2.3.dist-info → together-0.2.5.dist-info}/licenses/LICENSE +0 -0
together/__init__.py
CHANGED
|
@@ -1,17 +1,12 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import sys
|
|
2
3
|
import urllib.parse
|
|
3
4
|
|
|
4
|
-
from .config import (
|
|
5
|
-
finetune_model_names,
|
|
6
|
-
jokes_list,
|
|
7
|
-
min_samples,
|
|
8
|
-
model_info_dict,
|
|
9
|
-
)
|
|
10
5
|
from .version import VERSION
|
|
11
6
|
|
|
12
7
|
|
|
13
8
|
version = VERSION
|
|
14
|
-
|
|
9
|
+
|
|
15
10
|
user_agent = f"TogetherPythonOfficial/{version}"
|
|
16
11
|
|
|
17
12
|
api_key = os.environ.get("TOGETHER_API_KEY", None)
|
|
@@ -26,13 +21,14 @@ default_text_model = "togethercomputer/RedPajama-INCITE-7B-Chat"
|
|
|
26
21
|
default_image_model = "runwayml/stable-diffusion-v1-5"
|
|
27
22
|
log_level = "WARNING"
|
|
28
23
|
|
|
29
|
-
|
|
30
|
-
|
|
24
|
+
min_samples = 100
|
|
25
|
+
|
|
31
26
|
from .complete import Complete
|
|
32
27
|
from .error import *
|
|
33
28
|
from .files import Files
|
|
34
29
|
from .finetune import Finetune
|
|
35
30
|
from .image import Image
|
|
31
|
+
from .models import Models
|
|
36
32
|
|
|
37
33
|
|
|
38
34
|
__all__ = [
|
|
@@ -44,17 +40,10 @@ __all__ = [
|
|
|
44
40
|
"api_base_instances",
|
|
45
41
|
"default_text_model",
|
|
46
42
|
"default_image_model",
|
|
47
|
-
"get_logger",
|
|
48
|
-
"verify_api_key",
|
|
49
|
-
"extract_time",
|
|
50
|
-
"parse_timestamp",
|
|
51
43
|
"Models",
|
|
52
44
|
"Complete",
|
|
53
45
|
"Files",
|
|
54
46
|
"Finetune",
|
|
55
47
|
"Image",
|
|
56
|
-
"model_info_dict",
|
|
57
|
-
"finetune_model_names",
|
|
58
48
|
"min_samples",
|
|
59
|
-
"jokes_list",
|
|
60
49
|
]
|
together/cli/cli.py
CHANGED
|
@@ -2,8 +2,11 @@
|
|
|
2
2
|
import argparse
|
|
3
3
|
|
|
4
4
|
import together
|
|
5
|
-
from together import get_logger
|
|
6
5
|
from together.commands import chat, complete, files, finetune, image, models
|
|
6
|
+
from together.utils import get_logger
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
logger = get_logger(str(__name__))
|
|
7
10
|
|
|
8
11
|
|
|
9
12
|
def main() -> None:
|
|
@@ -29,9 +32,10 @@ def main() -> None:
|
|
|
29
32
|
)
|
|
30
33
|
|
|
31
34
|
parser.add_argument(
|
|
32
|
-
"--
|
|
35
|
+
"--verbose",
|
|
36
|
+
"-v",
|
|
33
37
|
default=together.log_level,
|
|
34
|
-
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
|
|
38
|
+
choices=["CRITICAL", "ERROR", "WARNING", "SUCCESS", "INFO", "DEBUG", "TRACE"],
|
|
35
39
|
type=str,
|
|
36
40
|
help="Set logging level. Defaults to WARNING. DEBUG will show all logs.",
|
|
37
41
|
required=False,
|
|
@@ -49,12 +53,7 @@ def main() -> None:
|
|
|
49
53
|
args = parser.parse_args()
|
|
50
54
|
|
|
51
55
|
# Setup logging
|
|
52
|
-
|
|
53
|
-
get_logger(__name__, log_level=args.log)
|
|
54
|
-
except Exception:
|
|
55
|
-
get_logger(__name__, log_level=together.log_level)
|
|
56
|
-
|
|
57
|
-
together.log_level = args.log
|
|
56
|
+
together.log_level = args.verbose
|
|
58
57
|
|
|
59
58
|
try:
|
|
60
59
|
args.func(args)
|
together/commands/chat.py
CHANGED
|
@@ -4,13 +4,11 @@ import argparse
|
|
|
4
4
|
import cmd
|
|
5
5
|
|
|
6
6
|
import together
|
|
7
|
-
import together.
|
|
8
|
-
from together import Complete
|
|
7
|
+
import together.tools.conversation as convo
|
|
8
|
+
from together import Complete
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
def add_parser(
|
|
12
|
-
subparsers: argparse._SubParsersAction[argparse.ArgumentParser],
|
|
13
|
-
) -> None:
|
|
11
|
+
def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
|
|
14
12
|
COMMAND_NAME = "chat"
|
|
15
13
|
subparser = subparsers.add_parser(COMMAND_NAME)
|
|
16
14
|
|
|
@@ -127,7 +125,6 @@ class OpenChatKitShell(cmd.Cmd):
|
|
|
127
125
|
|
|
128
126
|
|
|
129
127
|
def _run_complete(args: argparse.Namespace) -> None:
|
|
130
|
-
get_logger(__name__, log_level=args.log)
|
|
131
128
|
if args.prompt_id not in args.stop:
|
|
132
129
|
args.stop.append(args.prompt_id)
|
|
133
130
|
|
together/commands/complete.py
CHANGED
|
@@ -2,18 +2,18 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import argparse
|
|
4
4
|
import json
|
|
5
|
-
import logging
|
|
6
5
|
import re
|
|
7
|
-
import sys
|
|
8
6
|
from typing import Any, Dict, List
|
|
9
7
|
|
|
10
8
|
import together
|
|
11
|
-
from together import Complete
|
|
9
|
+
from together import Complete
|
|
10
|
+
from together.utils import get_logger
|
|
12
11
|
|
|
13
12
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
13
|
+
logger = get_logger(str(__name__))
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
|
|
17
17
|
COMMAND_NAME = "complete"
|
|
18
18
|
subparser = subparsers.add_parser(COMMAND_NAME)
|
|
19
19
|
|
|
@@ -98,46 +98,36 @@ def _enforce_stop_tokens(text: str, stop: List[str]) -> str:
|
|
|
98
98
|
return re.split("|".join(stop), text)[0]
|
|
99
99
|
|
|
100
100
|
|
|
101
|
-
def no_streamer(
|
|
102
|
-
args: argparse.Namespace, response: Dict[str, Any], logger: logging.Logger
|
|
103
|
-
) -> None:
|
|
101
|
+
def no_streamer(args: argparse.Namespace, response: Dict[str, Any]) -> None:
|
|
104
102
|
if args.raw:
|
|
105
103
|
print(json.dumps(response, indent=4))
|
|
106
|
-
sys.exit()
|
|
107
|
-
|
|
108
|
-
if "output" in response.keys():
|
|
109
|
-
try:
|
|
110
|
-
text = str(response["output"]["choices"][0]["text"])
|
|
111
|
-
except Exception:
|
|
112
|
-
try:
|
|
113
|
-
logger.critical(f"Error raised: {response['output']['error']}")
|
|
114
|
-
raise together.ResponseError(response["output"]["error"])
|
|
115
|
-
except Exception as e:
|
|
116
|
-
logger.critical(f"Error raised: {e}")
|
|
117
|
-
raise together.ResponseError(e)
|
|
118
|
-
|
|
119
|
-
# if args.stop is not None:
|
|
120
|
-
# text = _enforce_stop_tokens(text, args.stop)
|
|
121
|
-
|
|
122
|
-
elif "error" in response.keys():
|
|
123
|
-
if response["error"] == "Returned error: no instance":
|
|
124
|
-
logger.critical(
|
|
125
|
-
f"No running instances for {args.model}. You can start an instance by navigating to the Together Playground at api.together.xyz"
|
|
126
|
-
)
|
|
127
|
-
raise together.InstanceError(model=args.model)
|
|
128
|
-
else:
|
|
129
|
-
logger.critical(f"Error raised: {response['error']}")
|
|
130
104
|
|
|
131
105
|
else:
|
|
132
|
-
|
|
133
|
-
|
|
106
|
+
if "output" in response.keys():
|
|
107
|
+
if "choices" in dict(response["output"]).keys():
|
|
108
|
+
text = str(response["output"]["choices"][0]["text"])
|
|
109
|
+
print(text.strip())
|
|
110
|
+
elif "error" in dict(response["output"]).keys():
|
|
111
|
+
raise together.ResponseError(response["output"]["error"])
|
|
112
|
+
else:
|
|
113
|
+
raise together.ResponseError(
|
|
114
|
+
f"Unknown error occured. Received unhandled response: {response}"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
elif "error" in response.keys():
|
|
118
|
+
if response["error"] == "Returned error: no instance":
|
|
119
|
+
message = f"No running instances for {args.model}. You can start an instance by navigating to the Together Playground at api.together.xyz"
|
|
120
|
+
raise together.InstanceError(model=args.model, message=message)
|
|
121
|
+
else:
|
|
122
|
+
raise together.ResponseError(
|
|
123
|
+
message=f"Error raised: {response['error']}"
|
|
124
|
+
)
|
|
134
125
|
|
|
135
|
-
|
|
126
|
+
else:
|
|
127
|
+
raise together.ResponseError("Unknown response received. Please try again.")
|
|
136
128
|
|
|
137
129
|
|
|
138
130
|
def _run_complete(args: argparse.Namespace) -> None:
|
|
139
|
-
logger = get_logger(__name__, log_level=args.log)
|
|
140
|
-
|
|
141
131
|
complete = Complete()
|
|
142
132
|
|
|
143
133
|
if args.no_stream:
|
|
@@ -152,7 +142,7 @@ def _run_complete(args: argparse.Namespace) -> None:
|
|
|
152
142
|
repetition_penalty=args.repetition_penalty,
|
|
153
143
|
logprobs=args.logprobs,
|
|
154
144
|
)
|
|
155
|
-
no_streamer(args, response
|
|
145
|
+
no_streamer(args, response)
|
|
156
146
|
else:
|
|
157
147
|
for text in complete.create_streaming(
|
|
158
148
|
prompt=args.prompt,
|
|
@@ -163,6 +153,11 @@ def _run_complete(args: argparse.Namespace) -> None:
|
|
|
163
153
|
top_p=args.top_p,
|
|
164
154
|
top_k=args.top_k,
|
|
165
155
|
repetition_penalty=args.repetition_penalty,
|
|
156
|
+
raw=args.raw,
|
|
166
157
|
):
|
|
167
|
-
|
|
168
|
-
|
|
158
|
+
if not args.raw:
|
|
159
|
+
print(text, end="", flush=True)
|
|
160
|
+
else:
|
|
161
|
+
print(text)
|
|
162
|
+
if not args.raw:
|
|
163
|
+
print("\n")
|
together/commands/files.py
CHANGED
|
@@ -3,7 +3,8 @@ from __future__ import annotations
|
|
|
3
3
|
import argparse
|
|
4
4
|
import json
|
|
5
5
|
|
|
6
|
-
from together import Files
|
|
6
|
+
from together import Files
|
|
7
|
+
from together.utils import extract_time
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
|
|
@@ -33,15 +34,6 @@ def _add_check(parser: argparse._SubParsersAction[argparse.ArgumentParser]) -> N
|
|
|
33
34
|
help="Local file to upload",
|
|
34
35
|
type=str,
|
|
35
36
|
)
|
|
36
|
-
subparser.add_argument(
|
|
37
|
-
"--model",
|
|
38
|
-
"-m",
|
|
39
|
-
default=None,
|
|
40
|
-
metavar="MODELNAME",
|
|
41
|
-
help="check data for this model's special tokens",
|
|
42
|
-
type=str,
|
|
43
|
-
required=False,
|
|
44
|
-
)
|
|
45
37
|
subparser.set_defaults(func=_run_check)
|
|
46
38
|
|
|
47
39
|
|
|
@@ -117,37 +109,31 @@ def _add_retrieve_content(
|
|
|
117
109
|
|
|
118
110
|
|
|
119
111
|
def _run_list(args: argparse.Namespace) -> None:
|
|
120
|
-
|
|
121
|
-
response = files.list()
|
|
112
|
+
response = Files.list()
|
|
122
113
|
response["data"].sort(key=extract_time)
|
|
123
114
|
print(json.dumps(response, indent=4))
|
|
124
115
|
|
|
125
116
|
|
|
126
117
|
def _run_check(args: argparse.Namespace) -> None:
|
|
127
|
-
|
|
128
|
-
response = files.check(args.file, args.model)
|
|
118
|
+
response = Files.check(args.file)
|
|
129
119
|
print(json.dumps(response, indent=4))
|
|
130
120
|
|
|
131
121
|
|
|
132
122
|
def _run_upload(args: argparse.Namespace) -> None:
|
|
133
|
-
|
|
134
|
-
response = files.upload(file=args.file, check=not args.no_check, model=args.model)
|
|
123
|
+
response = Files.upload(file=args.file, check=not args.no_check, model=args.model)
|
|
135
124
|
print(json.dumps(response, indent=4))
|
|
136
125
|
|
|
137
126
|
|
|
138
127
|
def _run_delete(args: argparse.Namespace) -> None:
|
|
139
|
-
|
|
140
|
-
response = files.delete(args.file_id)
|
|
128
|
+
response = Files.delete(args.file_id)
|
|
141
129
|
print(json.dumps(response, indent=4))
|
|
142
130
|
|
|
143
131
|
|
|
144
132
|
def _run_retrieve(args: argparse.Namespace) -> None:
|
|
145
|
-
|
|
146
|
-
response = files.retrieve(args.file_id)
|
|
133
|
+
response = Files.retrieve(args.file_id)
|
|
147
134
|
print(json.dumps(response, indent=4))
|
|
148
135
|
|
|
149
136
|
|
|
150
137
|
def _run_retrieve_content(args: argparse.Namespace) -> None:
|
|
151
|
-
|
|
152
|
-
output = files.retrieve_content(args.file_id, args.output)
|
|
138
|
+
output = Files.retrieve_content(args.file_id, args.output)
|
|
153
139
|
print(output)
|
together/commands/finetune.py
CHANGED
|
@@ -4,7 +4,8 @@ import argparse
|
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
6
6
|
|
|
7
|
-
from together import Finetune
|
|
7
|
+
from together import Finetune
|
|
8
|
+
from together.utils import parse_timestamp
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
|
|
@@ -152,6 +153,12 @@ def _add_create(parser: argparse._SubParsersAction[argparse.ArgumentParser]) ->
|
|
|
152
153
|
help="Do not report metrics to wandb.ai.",
|
|
153
154
|
action="store_true",
|
|
154
155
|
)
|
|
156
|
+
subparser.add_argument(
|
|
157
|
+
"--quiet",
|
|
158
|
+
default=False,
|
|
159
|
+
action="store_true",
|
|
160
|
+
help="Indicates whether to disable checking",
|
|
161
|
+
)
|
|
155
162
|
|
|
156
163
|
subparser.set_defaults(func=_run_create)
|
|
157
164
|
|
|
@@ -308,14 +315,14 @@ def _run_create(args: argparse.Namespace) -> None:
|
|
|
308
315
|
suffix=args.suffix,
|
|
309
316
|
estimate_price=args.estimate_price,
|
|
310
317
|
wandb_api_key=args.wandb_api_key if not args.no_wandb_api_key else None,
|
|
318
|
+
confirm_inputs=not args.quiet,
|
|
311
319
|
)
|
|
312
320
|
|
|
313
321
|
print(json.dumps(response, indent=4))
|
|
314
322
|
|
|
315
323
|
|
|
316
324
|
def _run_list(args: argparse.Namespace) -> None:
|
|
317
|
-
|
|
318
|
-
response = finetune.list()
|
|
325
|
+
response = Finetune.list()
|
|
319
326
|
data_list = response["data"]
|
|
320
327
|
sorted_data = sorted(data_list, key=lambda x: parse_timestamp(x["created_at"]))
|
|
321
328
|
response["data"] = sorted_data
|
|
@@ -323,38 +330,32 @@ def _run_list(args: argparse.Namespace) -> None:
|
|
|
323
330
|
|
|
324
331
|
|
|
325
332
|
def _run_retrieve(args: argparse.Namespace) -> None:
|
|
326
|
-
|
|
327
|
-
response = finetune.retrieve(args.fine_tune_id)
|
|
333
|
+
response = Finetune.retrieve(args.fine_tune_id)
|
|
328
334
|
print(json.dumps(response, indent=4))
|
|
329
335
|
|
|
330
336
|
|
|
331
337
|
def _run_cancel(args: argparse.Namespace) -> None:
|
|
332
|
-
|
|
333
|
-
response = finetune.cancel(args.fine_tune_id)
|
|
338
|
+
response = Finetune.cancel(args.fine_tune_id)
|
|
334
339
|
print(json.dumps(response, indent=4))
|
|
335
340
|
|
|
336
341
|
|
|
337
342
|
def _run_list_events(args: argparse.Namespace) -> None:
|
|
338
|
-
|
|
339
|
-
response = finetune.list_events(args.fine_tune_id)
|
|
343
|
+
response = Finetune.list_events(args.fine_tune_id)
|
|
340
344
|
print(json.dumps(response, indent=4))
|
|
341
345
|
|
|
342
346
|
|
|
343
347
|
def _run_download(args: argparse.Namespace) -> None:
|
|
344
|
-
|
|
345
|
-
response = finetune.download(args.fine_tune_id, args.output, args.checkpoint_step)
|
|
348
|
+
response = Finetune.download(args.fine_tune_id, args.output, args.checkpoint_step)
|
|
346
349
|
print(response)
|
|
347
350
|
|
|
348
351
|
|
|
349
352
|
def _run_status(args: argparse.Namespace) -> None:
|
|
350
|
-
|
|
351
|
-
response = finetune.get_job_status(args.fine_tune_id)
|
|
353
|
+
response = Finetune.get_job_status(args.fine_tune_id)
|
|
352
354
|
print(response)
|
|
353
355
|
|
|
354
356
|
|
|
355
357
|
def _run_checkpoint(args: argparse.Namespace) -> None:
|
|
356
|
-
|
|
357
|
-
checkpoints = finetune.get_checkpoints(args.fine_tune_id)
|
|
358
|
+
checkpoints = Finetune.get_checkpoints(args.fine_tune_id)
|
|
358
359
|
print(json.dumps(checkpoints, indent=4))
|
|
359
360
|
print(f"\n{len(checkpoints)} checkpoints found")
|
|
360
361
|
|
together/commands/image.py
CHANGED
|
@@ -3,12 +3,15 @@ from __future__ import annotations
|
|
|
3
3
|
import argparse
|
|
4
4
|
import base64
|
|
5
5
|
import json
|
|
6
|
-
import logging
|
|
7
6
|
import sys
|
|
8
7
|
from typing import Any, Dict
|
|
9
8
|
|
|
10
9
|
import together
|
|
11
|
-
from together import Image
|
|
10
|
+
from together import Image
|
|
11
|
+
from together.utils import get_logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
logger = get_logger(str(__name__))
|
|
12
15
|
|
|
13
16
|
|
|
14
17
|
def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
|
|
@@ -88,9 +91,7 @@ def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser])
|
|
|
88
91
|
subparser.set_defaults(func=_run_complete)
|
|
89
92
|
|
|
90
93
|
|
|
91
|
-
def _save_image(
|
|
92
|
-
args: argparse.Namespace, response: Dict[str, Any], logger: logging.Logger
|
|
93
|
-
) -> None:
|
|
94
|
+
def _save_image(args: argparse.Namespace, response: Dict[str, Any]) -> None:
|
|
94
95
|
if args.raw:
|
|
95
96
|
print(json.dumps(response, indent=4))
|
|
96
97
|
sys.exit()
|
|
@@ -122,8 +123,6 @@ def _save_image(
|
|
|
122
123
|
|
|
123
124
|
|
|
124
125
|
def _run_complete(args: argparse.Namespace) -> None:
|
|
125
|
-
logger = get_logger(__name__, log_level=args.log)
|
|
126
|
-
|
|
127
126
|
complete = Image()
|
|
128
127
|
|
|
129
128
|
response = complete.create(
|
|
@@ -137,4 +136,4 @@ def _run_complete(args: argparse.Namespace) -> None:
|
|
|
137
136
|
negative_prompt=args.negative_prompt,
|
|
138
137
|
)
|
|
139
138
|
|
|
140
|
-
_save_image(args, response
|
|
139
|
+
_save_image(args, response)
|
together/commands/models.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import argparse
|
|
4
4
|
import json
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import together
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def add_parser(
|
|
@@ -107,8 +107,7 @@ def _add_ready(
|
|
|
107
107
|
|
|
108
108
|
|
|
109
109
|
def _run_list(args: argparse.Namespace) -> None:
|
|
110
|
-
|
|
111
|
-
response = models.list()
|
|
110
|
+
response = together.Models.list()
|
|
112
111
|
if args.raw:
|
|
113
112
|
print(json.dumps(response, indent=4))
|
|
114
113
|
else:
|
|
@@ -119,35 +118,29 @@ def _run_list(args: argparse.Namespace) -> None:
|
|
|
119
118
|
|
|
120
119
|
|
|
121
120
|
def _run_info(args: argparse.Namespace) -> None:
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
print(json.dumps(i, indent=4))
|
|
142
|
-
else:
|
|
143
|
-
model_info = {key: i[key] for key in visible_keys if key in i}
|
|
144
|
-
print(json.dumps(model_info, indent=4))
|
|
145
|
-
break
|
|
121
|
+
if not args.raw:
|
|
122
|
+
hidden_keys = [
|
|
123
|
+
"_id",
|
|
124
|
+
"modelInstanceConfig",
|
|
125
|
+
"created_at",
|
|
126
|
+
"update_at",
|
|
127
|
+
"pricing",
|
|
128
|
+
"show_in_playground",
|
|
129
|
+
"access",
|
|
130
|
+
"pricing_tier",
|
|
131
|
+
"hardware_label",
|
|
132
|
+
"depth",
|
|
133
|
+
]
|
|
134
|
+
else:
|
|
135
|
+
hidden_keys = []
|
|
136
|
+
|
|
137
|
+
model_info = together.Models.info(args.model, hidden_keys=hidden_keys)
|
|
138
|
+
|
|
139
|
+
print(json.dumps(model_info, indent=4))
|
|
146
140
|
|
|
147
141
|
|
|
148
142
|
def _run_instances(args: argparse.Namespace) -> None:
|
|
149
|
-
|
|
150
|
-
response = models.instances()
|
|
143
|
+
response = together.Models.instances()
|
|
151
144
|
if args.raw:
|
|
152
145
|
print(json.dumps(response, indent=4))
|
|
153
146
|
else:
|
|
@@ -156,18 +149,15 @@ def _run_instances(args: argparse.Namespace) -> None:
|
|
|
156
149
|
|
|
157
150
|
|
|
158
151
|
def _run_start(args: argparse.Namespace) -> None:
|
|
159
|
-
|
|
160
|
-
response = models.start(args.model)
|
|
152
|
+
response = together.Models.start(args.model)
|
|
161
153
|
print(json.dumps(response, indent=4))
|
|
162
154
|
|
|
163
155
|
|
|
164
156
|
def _run_stop(args: argparse.Namespace) -> None:
|
|
165
|
-
|
|
166
|
-
response = models.stop(args.model)
|
|
157
|
+
response = together.Models.stop(args.model)
|
|
167
158
|
print(json.dumps(response, indent=4))
|
|
168
159
|
|
|
169
160
|
|
|
170
161
|
def _run_ready(args: argparse.Namespace) -> None:
|
|
171
|
-
|
|
172
|
-
response = models.ready(args.model)
|
|
162
|
+
response = together.Models.ready(args.model)
|
|
173
163
|
print(json.dumps(response, indent=4))
|