together 0.2.4__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
together/__init__.py CHANGED
@@ -1,17 +1,12 @@
1
1
  import os
2
+ import sys
2
3
  import urllib.parse
3
4
 
4
- from .config import (
5
- finetune_model_names,
6
- jokes_list,
7
- min_samples,
8
- model_info_dict,
9
- )
10
5
  from .version import VERSION
11
6
 
12
7
 
13
8
  version = VERSION
14
- # print(version)
9
+
15
10
  user_agent = f"TogetherPythonOfficial/{version}"
16
11
 
17
12
  api_key = os.environ.get("TOGETHER_API_KEY", None)
@@ -26,6 +21,8 @@ default_text_model = "togethercomputer/RedPajama-INCITE-7B-Chat"
26
21
  default_image_model = "runwayml/stable-diffusion-v1-5"
27
22
  log_level = "WARNING"
28
23
 
24
+ min_samples = 100
25
+
29
26
  from .complete import Complete
30
27
  from .error import *
31
28
  from .files import Files
@@ -48,8 +45,5 @@ __all__ = [
48
45
  "Files",
49
46
  "Finetune",
50
47
  "Image",
51
- "model_info_dict",
52
- "finetune_model_names",
53
48
  "min_samples",
54
- "jokes_list",
55
49
  ]
together/cli/cli.py CHANGED
@@ -3,7 +3,10 @@ import argparse
3
3
 
4
4
  import together
5
5
  from together.commands import chat, complete, files, finetune, image, models
6
- from together.utils.utils import get_logger
6
+ from together.utils import get_logger
7
+
8
+
9
+ logger = get_logger(str(__name__))
7
10
 
8
11
 
9
12
  def main() -> None:
@@ -29,9 +32,10 @@ def main() -> None:
29
32
  )
30
33
 
31
34
  parser.add_argument(
32
- "--log",
35
+ "--verbose",
36
+ "-v",
33
37
  default=together.log_level,
34
- choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
38
+ choices=["CRITICAL", "ERROR", "WARNING", "SUCCESS", "INFO", "DEBUG", "TRACE"],
35
39
  type=str,
36
40
  help="Set logging level. Defaults to WARNING. DEBUG will show all logs.",
37
41
  required=False,
@@ -49,12 +53,7 @@ def main() -> None:
49
53
  args = parser.parse_args()
50
54
 
51
55
  # Setup logging
52
- try:
53
- get_logger(__name__, log_level=args.log)
54
- except Exception:
55
- get_logger(__name__, log_level=together.log_level)
56
-
57
- together.log_level = args.log
56
+ together.log_level = args.verbose
58
57
 
59
58
  try:
60
59
  args.func(args)
together/commands/chat.py CHANGED
@@ -4,14 +4,11 @@ import argparse
4
4
  import cmd
5
5
 
6
6
  import together
7
- import together.utils.conversation as convo
7
+ import together.tools.conversation as convo
8
8
  from together import Complete
9
- from together.utils.utils import get_logger
10
9
 
11
10
 
12
- def add_parser(
13
- subparsers: argparse._SubParsersAction[argparse.ArgumentParser],
14
- ) -> None:
11
+ def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
15
12
  COMMAND_NAME = "chat"
16
13
  subparser = subparsers.add_parser(COMMAND_NAME)
17
14
 
@@ -128,7 +125,6 @@ class OpenChatKitShell(cmd.Cmd):
128
125
 
129
126
 
130
127
  def _run_complete(args: argparse.Namespace) -> None:
131
- get_logger(__name__, log_level=args.log)
132
128
  if args.prompt_id not in args.stop:
133
129
  args.stop.append(args.prompt_id)
134
130
 
@@ -2,19 +2,18 @@ from __future__ import annotations
2
2
 
3
3
  import argparse
4
4
  import json
5
- import logging
6
5
  import re
7
- import sys
8
6
  from typing import Any, Dict, List
9
7
 
10
8
  import together
11
9
  from together import Complete
12
- from together.utils.utils import get_logger
10
+ from together.utils import get_logger
13
11
 
14
12
 
15
- def add_parser(
16
- subparsers: argparse._SubParsersAction[argparse.ArgumentParser],
17
- ) -> None:
13
+ logger = get_logger(str(__name__))
14
+
15
+
16
+ def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
18
17
  COMMAND_NAME = "complete"
19
18
  subparser = subparsers.add_parser(COMMAND_NAME)
20
19
 
@@ -99,46 +98,36 @@ def _enforce_stop_tokens(text: str, stop: List[str]) -> str:
99
98
  return re.split("|".join(stop), text)[0]
100
99
 
101
100
 
102
- def no_streamer(
103
- args: argparse.Namespace, response: Dict[str, Any], logger: logging.Logger
104
- ) -> None:
101
+ def no_streamer(args: argparse.Namespace, response: Dict[str, Any]) -> None:
105
102
  if args.raw:
106
103
  print(json.dumps(response, indent=4))
107
- sys.exit()
108
-
109
- if "output" in response.keys():
110
- try:
111
- text = str(response["output"]["choices"][0]["text"])
112
- except Exception:
113
- try:
114
- logger.critical(f"Error raised: {response['output']['error']}")
115
- raise together.ResponseError(response["output"]["error"])
116
- except Exception as e:
117
- logger.critical(f"Error raised: {e}")
118
- raise together.ResponseError(e)
119
-
120
- # if args.stop is not None:
121
- # text = _enforce_stop_tokens(text, args.stop)
122
-
123
- elif "error" in response.keys():
124
- if response["error"] == "Returned error: no instance":
125
- logger.critical(
126
- f"No running instances for {args.model}. You can start an instance by navigating to the Together Playground at api.together.xyz"
127
- )
128
- raise together.InstanceError(model=args.model)
129
- else:
130
- logger.critical(f"Error raised: {response['error']}")
131
104
 
132
105
  else:
133
- logger.critical("Unknown response received")
134
- raise together.ResponseError("Unknown response received. Please try again.")
106
+ if "output" in response.keys():
107
+ if "choices" in dict(response["output"]).keys():
108
+ text = str(response["output"]["choices"][0]["text"])
109
+ print(text.strip())
110
+ elif "error" in dict(response["output"]).keys():
111
+ raise together.ResponseError(response["output"]["error"])
112
+ else:
113
+ raise together.ResponseError(
114
+ f"Unknown error occured. Received unhandled response: {response}"
115
+ )
116
+
117
+ elif "error" in response.keys():
118
+ if response["error"] == "Returned error: no instance":
119
+ message = f"No running instances for {args.model}. You can start an instance by navigating to the Together Playground at api.together.xyz"
120
+ raise together.InstanceError(model=args.model, message=message)
121
+ else:
122
+ raise together.ResponseError(
123
+ message=f"Error raised: {response['error']}"
124
+ )
135
125
 
136
- print(text.strip())
126
+ else:
127
+ raise together.ResponseError("Unknown response received. Please try again.")
137
128
 
138
129
 
139
130
  def _run_complete(args: argparse.Namespace) -> None:
140
- logger = get_logger(__name__, log_level=args.log)
141
-
142
131
  complete = Complete()
143
132
 
144
133
  if args.no_stream:
@@ -153,7 +142,7 @@ def _run_complete(args: argparse.Namespace) -> None:
153
142
  repetition_penalty=args.repetition_penalty,
154
143
  logprobs=args.logprobs,
155
144
  )
156
- no_streamer(args, response, logger)
145
+ no_streamer(args, response)
157
146
  else:
158
147
  for text in complete.create_streaming(
159
148
  prompt=args.prompt,
@@ -164,6 +153,11 @@ def _run_complete(args: argparse.Namespace) -> None:
164
153
  top_p=args.top_p,
165
154
  top_k=args.top_k,
166
155
  repetition_penalty=args.repetition_penalty,
156
+ raw=args.raw,
167
157
  ):
168
- print(text, end="", flush=True)
169
- print("\n")
158
+ if not args.raw:
159
+ print(text, end="", flush=True)
160
+ else:
161
+ print(text)
162
+ if not args.raw:
163
+ print("\n")
@@ -4,7 +4,7 @@ import argparse
4
4
  import json
5
5
 
6
6
  from together import Files
7
- from together.utils.utils import extract_time
7
+ from together.utils import extract_time
8
8
 
9
9
 
10
10
  def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
@@ -34,15 +34,6 @@ def _add_check(parser: argparse._SubParsersAction[argparse.ArgumentParser]) -> N
34
34
  help="Local file to upload",
35
35
  type=str,
36
36
  )
37
- subparser.add_argument(
38
- "--model",
39
- "-m",
40
- default=None,
41
- metavar="MODELNAME",
42
- help="check data for this model's special tokens",
43
- type=str,
44
- required=False,
45
- )
46
37
  subparser.set_defaults(func=_run_check)
47
38
 
48
39
 
@@ -118,37 +109,31 @@ def _add_retrieve_content(
118
109
 
119
110
 
120
111
  def _run_list(args: argparse.Namespace) -> None:
121
- files = Files()
122
- response = files.list()
112
+ response = Files.list()
123
113
  response["data"].sort(key=extract_time)
124
114
  print(json.dumps(response, indent=4))
125
115
 
126
116
 
127
117
  def _run_check(args: argparse.Namespace) -> None:
128
- files = Files()
129
- response = files.check(args.file, args.model)
118
+ response = Files.check(args.file)
130
119
  print(json.dumps(response, indent=4))
131
120
 
132
121
 
133
122
  def _run_upload(args: argparse.Namespace) -> None:
134
- files = Files()
135
- response = files.upload(file=args.file, check=not args.no_check, model=args.model)
123
+ response = Files.upload(file=args.file, check=not args.no_check, model=args.model)
136
124
  print(json.dumps(response, indent=4))
137
125
 
138
126
 
139
127
  def _run_delete(args: argparse.Namespace) -> None:
140
- files = Files()
141
- response = files.delete(args.file_id)
128
+ response = Files.delete(args.file_id)
142
129
  print(json.dumps(response, indent=4))
143
130
 
144
131
 
145
132
  def _run_retrieve(args: argparse.Namespace) -> None:
146
- files = Files()
147
- response = files.retrieve(args.file_id)
133
+ response = Files.retrieve(args.file_id)
148
134
  print(json.dumps(response, indent=4))
149
135
 
150
136
 
151
137
  def _run_retrieve_content(args: argparse.Namespace) -> None:
152
- files = Files()
153
- output = files.retrieve_content(args.file_id, args.output)
138
+ output = Files.retrieve_content(args.file_id, args.output)
154
139
  print(output)
@@ -5,7 +5,7 @@ import json
5
5
  import os
6
6
 
7
7
  from together import Finetune
8
- from together.utils.utils import parse_timestamp
8
+ from together.utils import parse_timestamp
9
9
 
10
10
 
11
11
  def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
@@ -153,6 +153,12 @@ def _add_create(parser: argparse._SubParsersAction[argparse.ArgumentParser]) ->
153
153
  help="Do not report metrics to wandb.ai.",
154
154
  action="store_true",
155
155
  )
156
+ subparser.add_argument(
157
+ "--quiet",
158
+ default=False,
159
+ action="store_true",
160
+ help="Indicates whether to disable checking",
161
+ )
156
162
 
157
163
  subparser.set_defaults(func=_run_create)
158
164
 
@@ -309,14 +315,14 @@ def _run_create(args: argparse.Namespace) -> None:
309
315
  suffix=args.suffix,
310
316
  estimate_price=args.estimate_price,
311
317
  wandb_api_key=args.wandb_api_key if not args.no_wandb_api_key else None,
318
+ confirm_inputs=not args.quiet,
312
319
  )
313
320
 
314
321
  print(json.dumps(response, indent=4))
315
322
 
316
323
 
317
324
  def _run_list(args: argparse.Namespace) -> None:
318
- finetune = Finetune()
319
- response = finetune.list()
325
+ response = Finetune.list()
320
326
  data_list = response["data"]
321
327
  sorted_data = sorted(data_list, key=lambda x: parse_timestamp(x["created_at"]))
322
328
  response["data"] = sorted_data
@@ -324,38 +330,32 @@ def _run_list(args: argparse.Namespace) -> None:
324
330
 
325
331
 
326
332
  def _run_retrieve(args: argparse.Namespace) -> None:
327
- finetune = Finetune()
328
- response = finetune.retrieve(args.fine_tune_id)
333
+ response = Finetune.retrieve(args.fine_tune_id)
329
334
  print(json.dumps(response, indent=4))
330
335
 
331
336
 
332
337
  def _run_cancel(args: argparse.Namespace) -> None:
333
- finetune = Finetune()
334
- response = finetune.cancel(args.fine_tune_id)
338
+ response = Finetune.cancel(args.fine_tune_id)
335
339
  print(json.dumps(response, indent=4))
336
340
 
337
341
 
338
342
  def _run_list_events(args: argparse.Namespace) -> None:
339
- finetune = Finetune()
340
- response = finetune.list_events(args.fine_tune_id)
343
+ response = Finetune.list_events(args.fine_tune_id)
341
344
  print(json.dumps(response, indent=4))
342
345
 
343
346
 
344
347
  def _run_download(args: argparse.Namespace) -> None:
345
- finetune = Finetune()
346
- response = finetune.download(args.fine_tune_id, args.output, args.checkpoint_step)
348
+ response = Finetune.download(args.fine_tune_id, args.output, args.checkpoint_step)
347
349
  print(response)
348
350
 
349
351
 
350
352
  def _run_status(args: argparse.Namespace) -> None:
351
- finetune = Finetune()
352
- response = finetune.get_job_status(args.fine_tune_id)
353
+ response = Finetune.get_job_status(args.fine_tune_id)
353
354
  print(response)
354
355
 
355
356
 
356
357
  def _run_checkpoint(args: argparse.Namespace) -> None:
357
- finetune = Finetune()
358
- checkpoints = finetune.get_checkpoints(args.fine_tune_id)
358
+ checkpoints = Finetune.get_checkpoints(args.fine_tune_id)
359
359
  print(json.dumps(checkpoints, indent=4))
360
360
  print(f"\n{len(checkpoints)} checkpoints found")
361
361
 
@@ -3,13 +3,15 @@ from __future__ import annotations
3
3
  import argparse
4
4
  import base64
5
5
  import json
6
- import logging
7
6
  import sys
8
7
  from typing import Any, Dict
9
8
 
10
9
  import together
11
10
  from together import Image
12
- from together.utils.utils import get_logger
11
+ from together.utils import get_logger
12
+
13
+
14
+ logger = get_logger(str(__name__))
13
15
 
14
16
 
15
17
  def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser]) -> None:
@@ -89,9 +91,7 @@ def add_parser(subparsers: argparse._SubParsersAction[argparse.ArgumentParser])
89
91
  subparser.set_defaults(func=_run_complete)
90
92
 
91
93
 
92
- def _save_image(
93
- args: argparse.Namespace, response: Dict[str, Any], logger: logging.Logger
94
- ) -> None:
94
+ def _save_image(args: argparse.Namespace, response: Dict[str, Any]) -> None:
95
95
  if args.raw:
96
96
  print(json.dumps(response, indent=4))
97
97
  sys.exit()
@@ -123,8 +123,6 @@ def _save_image(
123
123
 
124
124
 
125
125
  def _run_complete(args: argparse.Namespace) -> None:
126
- logger = get_logger(__name__, log_level=args.log)
127
-
128
126
  complete = Image()
129
127
 
130
128
  response = complete.create(
@@ -138,4 +136,4 @@ def _run_complete(args: argparse.Namespace) -> None:
138
136
  negative_prompt=args.negative_prompt,
139
137
  )
140
138
 
141
- _save_image(args, response, logger)
139
+ _save_image(args, response)
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import argparse
4
4
  import json
5
5
 
6
- from together import Models
6
+ import together
7
7
 
8
8
 
9
9
  def add_parser(
@@ -107,8 +107,7 @@ def _add_ready(
107
107
 
108
108
 
109
109
  def _run_list(args: argparse.Namespace) -> None:
110
- models = Models()
111
- response = models.list()
110
+ response = together.Models.list()
112
111
  if args.raw:
113
112
  print(json.dumps(response, indent=4))
114
113
  else:
@@ -119,35 +118,29 @@ def _run_list(args: argparse.Namespace) -> None:
119
118
 
120
119
 
121
120
  def _run_info(args: argparse.Namespace) -> None:
122
- models = Models()
123
- response = models.list()
124
-
125
- # list of keys to display by default from models info dict
126
- visible_keys = [
127
- "name",
128
- "display_name",
129
- "display_type",
130
- "description",
131
- "creator_organization",
132
- "hardware_label",
133
- "pricing_tier",
134
- "config",
135
- "base",
136
- ]
137
-
138
- for i in response:
139
- if i["name"] == args.model:
140
- if args.raw:
141
- print(json.dumps(i, indent=4))
142
- else:
143
- model_info = {key: i[key] for key in visible_keys if key in i}
144
- print(json.dumps(model_info, indent=4))
145
- break
121
+ if not args.raw:
122
+ hidden_keys = [
123
+ "_id",
124
+ "modelInstanceConfig",
125
+ "created_at",
126
+ "update_at",
127
+ "pricing",
128
+ "show_in_playground",
129
+ "access",
130
+ "pricing_tier",
131
+ "hardware_label",
132
+ "depth",
133
+ ]
134
+ else:
135
+ hidden_keys = []
136
+
137
+ model_info = together.Models.info(args.model, hidden_keys=hidden_keys)
138
+
139
+ print(json.dumps(model_info, indent=4))
146
140
 
147
141
 
148
142
  def _run_instances(args: argparse.Namespace) -> None:
149
- models = Models()
150
- response = models.instances()
143
+ response = together.Models.instances()
151
144
  if args.raw:
152
145
  print(json.dumps(response, indent=4))
153
146
  else:
@@ -156,18 +149,15 @@ def _run_instances(args: argparse.Namespace) -> None:
156
149
 
157
150
 
158
151
  def _run_start(args: argparse.Namespace) -> None:
159
- models = Models()
160
- response = models.start(args.model)
152
+ response = together.Models.start(args.model)
161
153
  print(json.dumps(response, indent=4))
162
154
 
163
155
 
164
156
  def _run_stop(args: argparse.Namespace) -> None:
165
- models = Models()
166
- response = models.stop(args.model)
157
+ response = together.Models.stop(args.model)
167
158
  print(json.dumps(response, indent=4))
168
159
 
169
160
 
170
161
  def _run_ready(args: argparse.Namespace) -> None:
171
- models = Models()
172
- response = models.ready(args.model)
162
+ response = together.Models.ready(args.model)
173
163
  print(json.dumps(response, indent=4))
together/complete.py CHANGED
@@ -1,22 +1,14 @@
1
1
  import json
2
2
  from typing import Any, Dict, Iterator, List, Optional
3
3
 
4
- import requests
5
- import sseclient # type: ignore
6
-
7
4
  import together
8
- from together.utils.utils import get_logger, verify_api_key
5
+ from together.utils import create_post_request, get_logger, sse_client
9
6
 
10
7
 
11
- logger = get_logger(str(__name__), log_level=together.log_level)
8
+ logger = get_logger(str(__name__))
12
9
 
13
10
 
14
11
  class Complete:
15
- def __init__(
16
- self,
17
- ) -> None:
18
- verify_api_key(logger)
19
-
20
12
  @classmethod
21
13
  def create(
22
14
  self,
@@ -45,46 +37,16 @@ class Complete:
45
37
  "logprobs": logprobs,
46
38
  }
47
39
 
48
- # HTTP headers for authorization
49
- headers = {
50
- "Authorization": f"Bearer {together.api_key}",
51
- "Content-Type": "application/json",
52
- "User-Agent": together.user_agent,
53
- }
54
-
55
40
  # send request
56
- try:
57
- response = requests.post(
58
- together.api_base_complete,
59
- headers=headers,
60
- json=parameter_payload,
61
- )
62
- except requests.exceptions.RequestException as e:
63
- logger.critical(f"Response error raised: {e}")
64
- raise together.ResponseError(e)
65
-
66
- if response.status_code == 429:
67
- logger.critical(
68
- f"""No running instances for {model}.
69
- You can start an instance with one of the following methods:
70
- 1. navigating to the Together Playground at api.together.ai
71
- 2. starting one in python using together.Models.start(model_name)
72
- 3. `$ together models start <MODEL_NAME>` at the command line.
73
- See `together.Models.list()` in python or `$ together models list` in command line
74
- to get an updated list of valid model names.
75
- """
76
- )
77
- raise together.InstanceError(model=model)
78
-
79
- response.raise_for_status()
80
-
41
+ response = create_post_request(
42
+ url=together.api_base_complete, json=parameter_payload
43
+ )
44
+ if not response:
45
+ return {}
81
46
  try:
82
47
  response_json = dict(response.json())
83
48
 
84
49
  except Exception as e:
85
- logger.critical(
86
- f"Error raised: {e}\nResponse status code = {response.status_code}"
87
- )
88
50
  raise together.JSONError(e, http_status=response.status_code)
89
51
  return response_json
90
52
 
@@ -99,6 +61,7 @@ class Complete:
99
61
  top_p: Optional[float] = 0.7,
100
62
  top_k: Optional[int] = 50,
101
63
  repetition_penalty: Optional[float] = None,
64
+ raw: Optional[bool] = False,
102
65
  ) -> Iterator[str]:
103
66
  """
104
67
  Prints streaming responses and returns the completed text.
@@ -118,48 +81,30 @@ class Complete:
118
81
  "repetition_penalty": repetition_penalty,
119
82
  "stream_tokens": True,
120
83
  }
121
- # HTTP headers for authorization
122
- headers = {
123
- "Authorization": f"Bearer {together.api_key}",
124
- "Content-Type": "application/json",
125
- "User-Agent": together.user_agent,
126
- }
127
84
 
128
85
  # send request
129
- try:
130
- response = requests.post(
131
- together.api_base_complete,
132
- headers=headers,
133
- json=parameter_payload,
134
- stream=True,
135
- )
136
- except requests.exceptions.RequestException as e:
137
- logger.critical(f"Response error raised: {e}")
138
- raise together.ResponseError(e)
139
-
140
- if response.status_code == 200:
141
- output = ""
142
- client = sseclient.SSEClient(response)
143
- for event in client.events():
144
- if event.data != "[DONE]":
145
- text = json.loads(event.data)["choices"][0]["text"]
86
+ response = create_post_request(
87
+ url=together.api_base_complete, json=parameter_payload, stream=True
88
+ )
89
+ if not response:
90
+ return {}
91
+ output = ""
92
+ client = sse_client(response)
93
+ for event in client.events():
94
+ if raw:
95
+ yield str(event.data)
96
+ elif event.data != "[DONE]":
97
+ json_response = dict(json.loads(event.data))
98
+ if "error" in json_response.keys():
99
+ raise together.ResponseError(
100
+ json_response["error"]["error"],
101
+ request_id=json_response["error"]["request_id"],
102
+ )
103
+ elif "choices" in json_response.keys():
104
+ text = json_response["choices"][0]["text"]
146
105
  output += text
147
106
  yield text
148
- elif response.status_code == 429:
149
- logger.critical(
150
- f"""No running instances for {model}.
151
- You can start an instance with one of the following methods:
152
- 1. navigating to the Together Playground at api.together.ai
153
- 2. starting one in python using together.Models.start(model_name)
154
- 3. `$ together models start <MODEL_NAME>` at the command line.
155
- See `together.Models.list()` in python or `$ together models list` in command line
156
- to get an updated list of valid model names.
157
- """
158
- )
159
- raise together.InstanceError(model=model)
160
- else:
161
- logger.critical(
162
- f"Unknown error raised.\nResponse status code = {response.status_code}"
163
- )
164
- response.raise_for_status()
165
- raise together.ResponseError(http_status=response.status_code)
107
+ else:
108
+ raise together.ResponseError(
109
+ f"Unknown error occured. Received unhandled response: {event.data}"
110
+ )