seekrai 0.0.1__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. seekrai/__init__.py +0 -15
  2. seekrai/abstract/api_requestor.py +121 -297
  3. seekrai/client.py +10 -11
  4. seekrai/constants.py +36 -16
  5. seekrai/error.py +1 -8
  6. seekrai/filemanager.py +40 -79
  7. seekrai/resources/chat/completions.py +13 -13
  8. seekrai/resources/completions.py +4 -4
  9. seekrai/resources/embeddings.py +4 -2
  10. seekrai/resources/files.py +17 -9
  11. seekrai/resources/finetune.py +57 -82
  12. seekrai/resources/images.py +2 -2
  13. seekrai/resources/models.py +115 -15
  14. seekrai/types/__init__.py +5 -4
  15. seekrai/types/common.py +1 -2
  16. seekrai/types/files.py +23 -19
  17. seekrai/types/finetune.py +20 -26
  18. seekrai/types/models.py +26 -20
  19. seekrai/utils/_log.py +3 -3
  20. seekrai/utils/api_helpers.py +2 -2
  21. seekrai/utils/tools.py +1 -1
  22. seekrai-0.1.1.dist-info/METADATA +165 -0
  23. seekrai-0.1.1.dist-info/RECORD +39 -0
  24. seekrai/cli/__init__.py +0 -0
  25. seekrai/cli/api/__init__.py +0 -0
  26. seekrai/cli/api/chat.py +0 -245
  27. seekrai/cli/api/completions.py +0 -107
  28. seekrai/cli/api/files.py +0 -125
  29. seekrai/cli/api/finetune.py +0 -175
  30. seekrai/cli/api/images.py +0 -82
  31. seekrai/cli/api/models.py +0 -42
  32. seekrai/cli/cli.py +0 -77
  33. seekrai/legacy/__init__.py +0 -0
  34. seekrai/legacy/base.py +0 -27
  35. seekrai/legacy/complete.py +0 -91
  36. seekrai/legacy/embeddings.py +0 -25
  37. seekrai/legacy/files.py +0 -140
  38. seekrai/legacy/finetune.py +0 -173
  39. seekrai/legacy/images.py +0 -25
  40. seekrai/legacy/models.py +0 -44
  41. seekrai-0.0.1.dist-info/METADATA +0 -401
  42. seekrai-0.0.1.dist-info/RECORD +0 -56
  43. {seekrai-0.0.1.dist-info → seekrai-0.1.1.dist-info}/LICENSE +0 -0
  44. {seekrai-0.0.1.dist-info → seekrai-0.1.1.dist-info}/WHEEL +0 -0
  45. {seekrai-0.0.1.dist-info → seekrai-0.1.1.dist-info}/entry_points.txt +0 -0
seekrai/types/models.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from enum import Enum
4
- from typing import Literal
4
+ from typing import List, Literal
5
5
 
6
6
  from seekrai.types.abstract import BaseModel
7
7
  from seekrai.types.common import ObjectType
@@ -10,21 +10,20 @@ from seekrai.types.common import ObjectType
10
10
  class ModelType(str, Enum):
11
11
  CHAT = "chat"
12
12
  LANGUAGE = "language"
13
- CODE = "code"
14
- IMAGE = "image"
15
13
  EMBEDDING = "embedding"
16
- MODERATION = "moderation"
14
+ OBJECT_DETECTION = "object_detection"
15
+ IMAGE_CLASSIFICATION = "image_classification"
17
16
 
18
17
 
19
- class PricingObject(BaseModel):
20
- input: float | None = None
21
- output: float | None = None
22
- hourly: float | None = None
23
- base: float | None = None
24
- finetune: float | None = None
18
+ # class PricingObject(BaseModel):
19
+ # input: float | None = None
20
+ # output: float | None = None
21
+ # hourly: float | None = None
22
+ # base: float | None = None
23
+ # finetune: float | None = None
25
24
 
26
25
 
27
- class ModelObject(BaseModel):
26
+ class ModelResponse(BaseModel):
28
27
  # model id
29
28
  id: str
30
29
  # object type
@@ -32,12 +31,19 @@ class ModelObject(BaseModel):
32
31
  created: int | None = None
33
32
  # model type
34
33
  type: ModelType | None = None
35
- # pretty name
36
- display_name: str | None = None
37
- # model creator organization
38
- organization: str | None = None
39
- # link to model resource
40
- link: str | None = None
41
- license: str | None = None
42
- context_length: int | None = None
43
- pricing: PricingObject
34
+ name: str | None = None
35
+ bytes: int | None = None
36
+ # # model creator organization
37
+ # organization: str | None = None
38
+ # # link to model resource
39
+ # link: str | None = None
40
+ # license: str | None = None
41
+ # context_length: int | None = None
42
+ # pricing: PricingObject
43
+
44
+
45
+ class ModelList(BaseModel):
46
+ # object type
47
+ object: Literal["list"] | None = None
48
+ # list of fine-tune job objects
49
+ data: List[ModelResponse] | None = None
seekrai/utils/_log.py CHANGED
@@ -11,14 +11,14 @@ import seekrai
11
11
 
12
12
  logger = logging.getLogger("seekrai")
13
13
 
14
- SEEKRFLOW_LOG = os.environ.get("SEEKRFLOW_LOG")
14
+ SEEKR_LOG = os.environ.get("SEEKR_LOG")
15
15
 
16
16
 
17
17
  def _console_log_level() -> str | None:
18
18
  if seekrai.log in ["debug", "info"]:
19
19
  return seekrai.log
20
- elif SEEKRFLOW_LOG in ["debug", "info"]:
21
- return SEEKRFLOW_LOG
20
+ elif SEEKR_LOG in ["debug", "info"]:
21
+ return SEEKR_LOG
22
22
  else:
23
23
  return None
24
24
 
@@ -78,7 +78,7 @@ def default_api_key(api_key: str | None = None) -> str | None:
78
78
  """
79
79
  if api_key:
80
80
  return api_key
81
- if os.environ.get("SEEKRFLOW_API_KEY"):
82
- return os.environ.get("SEEKRFLOW_API_KEY")
81
+ if os.environ.get("SEEKR_API_KEY"):
82
+ return os.environ.get("SEEKR_API_KEY")
83
83
 
84
84
  raise error.AuthenticationError(seekrai.constants.MISSING_API_KEY_MESSAGE)
seekrai/utils/tools.py CHANGED
@@ -7,7 +7,7 @@ from datetime import datetime
7
7
 
8
8
  logger = logging.getLogger("seekrai")
9
9
 
10
- SEEKRFLOW_LOG = os.environ.get("SEEKRFLOW_LOG")
10
+ SEEKR_LOG = os.environ.get("SEEKR_LOG")
11
11
 
12
12
  NANODOLLAR = 1_000_000_000
13
13
 
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.1
2
+ Name: seekrai
3
+ Version: 0.1.1
4
+ Summary: Python client for SeekrAI
5
+ Home-page: https://gitlab.cb.ntent.com/ml/seekr-py
6
+ License: Apache-2.0
7
+ Author: SeekrFlow
8
+ Author-email: support@seekr.com
9
+ Requires-Python: >=3.9,<4.0
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Operating System :: POSIX :: Linux
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.9
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Requires-Dist: click (>=8.1.7,<9.0.0)
18
+ Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
19
+ Requires-Dist: filelock (>=3.13.1,<4.0.0)
20
+ Requires-Dist: httpx (>=0.27.0,<0.28.0)
21
+ Requires-Dist: numpy (>=1.23.5) ; python_version < "3.12"
22
+ Requires-Dist: numpy (>=1.26.0) ; python_version >= "3.12"
23
+ Requires-Dist: pillow (>=10.3.0,<11.0.0)
24
+ Requires-Dist: pyarrow (>=10.0.1)
25
+ Requires-Dist: pydantic (>=2.7,<3.0)
26
+ Requires-Dist: requests (>=2.31.0,<3.0.0)
27
+ Requires-Dist: tabulate (>=0.9.0,<0.10.0)
28
+ Requires-Dist: tqdm (>=4.66.2,<5.0.0)
29
+ Requires-Dist: typer (>=0.9,<0.13)
30
+ Project-URL: Homepage, https://www.seekr.com/
31
+ Project-URL: Repository, https://gitlab.cb.ntent.com/ml/seekr-py
32
+ Description-Content-Type: text/markdown
33
+
34
+ The Seekr Python Library is the official Python client for SeekrFlow's API platform, providing a convenient way for interacting with the REST APIs and enables easy integrations with Python 3.8+ applications with easy to use synchronous and asynchronous clients.
35
+
36
+ # Installation
37
+
38
+ To install Seekr Python Library from PyPi, simply run:
39
+
40
+ ```shell Shell
41
+ pip install --upgrade seekrai
42
+ ```
43
+
44
+ ## Setting up API Key
45
+
46
+ > 🚧 You will need to create an account with [Seekr.com](https://seekr.com/) to obtain a SeekrFlow API Key.
47
+
48
+ ### Setting environment variable
49
+
50
+ ```shell
51
+ export SEEKR_API_KEY=xxxxx
52
+ ```
53
+
54
+ ### Using the client
55
+
56
+ ```python
57
+ from seekrai import SeekrFlow
58
+
59
+ client = SeekrFlow(api_key="xxxxx")
60
+ ```
61
+
62
+ # Usage – Python Client
63
+
64
+ ## Chat Completions
65
+
66
+ ```python
67
+ import os
68
+ from seekrai import SeekrFlow
69
+
70
+ client = SeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
71
+
72
+ response = client.chat.completions.create(
73
+ model="meta-llama-3-8b-instruct",
74
+ messages=[{"role": "user", "content": "tell me about new york"}],
75
+ )
76
+ print(response.choices[0].message.content)
77
+ ```
78
+
79
+ ### Streaming
80
+
81
+ ```python
82
+ import os
83
+ from seekrai import SeekrFlow
84
+
85
+ client = SeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
86
+ stream = client.chat.completions.create(
87
+ model="meta-llama-3-8b-instruct",
88
+ messages=[{"role": "user", "content": "tell me about new york"}],
89
+ stream=True,
90
+ )
91
+
92
+ for chunk in stream:
93
+ print(chunk.choices[0].delta.content or "", end="", flush=True)
94
+ ```
95
+
96
+ ### Async usage
97
+
98
+ ```python
99
+ import os, asyncio
100
+ from seekrai import AsyncSeekrFlow
101
+
102
+ async_client = AsyncSeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
103
+ messages = [
104
+ "What are the top things to do in San Francisco?",
105
+ "What country is Paris in?",
106
+ ]
107
+
108
+
109
+ async def async_chat_completion(messages):
110
+ async_client = AsyncSeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
111
+ tasks = [
112
+ async_client.chat.completions.create(
113
+ model="meta-llama-3-8b-instruct",
114
+ messages=[{"role": "user", "content": message}],
115
+ )
116
+ for message in messages
117
+ ]
118
+ responses = await asyncio.gather(*tasks)
119
+
120
+ for response in responses:
121
+ print(response.choices[0].message.content)
122
+
123
+
124
+ asyncio.run(async_chat_completion(messages))
125
+ ```
126
+
127
+ ## Files
128
+
129
+ The files API is used for fine-tuning and allows developers to upload data to fine-tune on. It also has several methods to list all files, retrieve files, and delete files
130
+
131
+ ```python
132
+ import os
133
+ from seekrai import SeekrFlow
134
+
135
+ client = SeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
136
+
137
+ client.files.upload(file="somedata.parquet") # uploads a file
138
+ client.files.list() # lists all uploaded files
139
+ client.files.retrieve(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # retrieves a specific file
140
+ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a file
141
+ ```
142
+
143
+ ## Fine-tunes
144
+
145
+ The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrieve statuses and get checkpoints.
146
+
147
+ ```python
148
+ import os
149
+ from seekrai import SeekrFlow
150
+
151
+ client = SeekrFlow(api_key=os.environ.get("SEEKR_API_KEY"))
152
+
153
+ client.fine_tuning.create(
154
+ training_file='file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
155
+ model='meta-llama-3-8b-instruct',
156
+ n_epochs=3,
157
+ n_checkpoints=1,
158
+ batch_size=4,
159
+ learning_rate=1e-5,
160
+ suffix='my-demo-finetune',
161
+ )
162
+ client.fine_tuning.list() # lists all fine-tuned jobs
163
+ client.fine_tuning.retrieve(id="ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b") # retrieves information on finetune event
164
+ ```
165
+
@@ -0,0 +1,39 @@
1
+ seekrai/__init__.py,sha256=HC6iy-IdwqecabH-6a80Lsy9qO2PBToAI0WqEErV41c,935
2
+ seekrai/abstract/__init__.py,sha256=wNiOTW9TJpUgfCJCG-wAbhhWWH2PtoVpAuL3nxvQGps,56
3
+ seekrai/abstract/api_requestor.py,sha256=-HExlMVUD52pXWhnJTrlVbqWj3Cpi5ut-lz-YrzkUV8,18027
4
+ seekrai/client.py,sha256=PpI8-lRePkzhHLuiPD7ikuxImw0DAFljlmU6osL_Asg,4780
5
+ seekrai/constants.py,sha256=Z5VWSveTfNV3igjjhxZ1S6KOB8gwWA5rKbdfyW8lWuM,1855
6
+ seekrai/error.py,sha256=rAYL8qEd8INwYMMKvhS-HKeC3QkWL4Wq-zfazFU-zBg,4861
7
+ seekrai/filemanager.py,sha256=bFmkJRtw7uavjfnoCV4Bykph-zle4AoJargsgHe-ybE,9917
8
+ seekrai/resources/__init__.py,sha256=6m1J5kXqJFHqDzQkWy5Us-q3Ug1YZWWP1oQtJ_IGsRM,694
9
+ seekrai/resources/chat/__init__.py,sha256=KmtPupgECtEN80NyvcnSmieTAFXhwmVxhMHP0qhspA4,618
10
+ seekrai/resources/chat/completions.py,sha256=v0Nwr7lJk6d5fZ-s6ut2BLDr9Vay8qGL9ZQLRgyOve0,11047
11
+ seekrai/resources/completions.py,sha256=w3La3zPMlN00y-b-tJwLgvZVH-xK_dKC6ktI5Ggn1us,8564
12
+ seekrai/resources/embeddings.py,sha256=3lohUrkdFqzSg8FgS7p4r87jwjE0NXU1PilWv278quk,2705
13
+ seekrai/resources/files.py,sha256=cl3qjiMLZiTQuNypydISq6JczSOiPfFRlNEAdB3CU6o,4921
14
+ seekrai/resources/finetune.py,sha256=fXLkVCTnfoeq4TvjhEWQsdIwO247tcpfWCVTlWZczDw,11271
15
+ seekrai/resources/images.py,sha256=E48lAe7YsZ2WXBHR_qz4SF7P4Y-U7t61m_bWNS91pM0,4802
16
+ seekrai/resources/models.py,sha256=4swgCu95Y-v3TF0yixHGW9djLYlNsQEFO64zcor-PTs,4863
17
+ seekrai/seekrflow_response.py,sha256=5RFEQzamDy7sTSDkxSsZQThZ3biNmeCPeHWdrFId5Go,1320
18
+ seekrai/types/__init__.py,sha256=YFbBhp3f0HJGdKYFAy1TVMxQp8pe4ggLVXjsnMVAGVc,1555
19
+ seekrai/types/abstract.py,sha256=TqWFQV_6bPblywfCH-r8FCkXWvPkc9KlJ4QVgyrnaMc,642
20
+ seekrai/types/chat_completions.py,sha256=GgqEcfdjkWecPVZEHRENctQ1SQ6KB4CculxyKhkvnHo,3571
21
+ seekrai/types/common.py,sha256=79fvG60otq-H4QL0OULgmxUAAOKKas4YSYycLDLl7To,1430
22
+ seekrai/types/completions.py,sha256=lm9AFdZR3Xg5AHPkV-qETHikkwMJmkHrLGr5GG-YR-M,2171
23
+ seekrai/types/embeddings.py,sha256=OANoLNOs0aceS8NppVvvcNYQbF7-pAOAmcr30pw64OU,749
24
+ seekrai/types/error.py,sha256=uTKISs9aRC4_6zwirtNkanxepN8KY-SqCq0kNbfZylQ,370
25
+ seekrai/types/files.py,sha256=A2B8OPasUQVSkNdxkzpv2y9I2B7vuNCq64KGqwRanKo,2025
26
+ seekrai/types/finetune.py,sha256=mMS_3vJnJG2bBGWHCo3t98iieglwe_kR9sOrxfnO3xY,5825
27
+ seekrai/types/images.py,sha256=Fusj8OhVYFsT8kz636lRGGivLbPXo_ZNgakKwmzJi3U,914
28
+ seekrai/types/models.py,sha256=1ZfW9WwayApkISRizDntjkWhYNv-wkbrRVIfHn2QuC4,1242
29
+ seekrai/utils/__init__.py,sha256=dfbiYEc47EBVRkq6C4O9y6tTGuPuV3LbV3__v01Mbds,658
30
+ seekrai/utils/_log.py,sha256=Cayw5B394H2WGVTXPXS2AN8znQdxsgrLqADXgqmokvU,1649
31
+ seekrai/utils/api_helpers.py,sha256=0Y8BblNIr9h_R12zdmhkxgTlxgoRkbq84QNi4nNWGu8,2385
32
+ seekrai/utils/files.py,sha256=B61Pwra49MVVWjPtdkx4hBtAuUe9UI63hdNus87Uq0o,7164
33
+ seekrai/utils/tools.py,sha256=jgJTL-dOIouDbEJLdQpQfpXhqaz_poQYS52adyUtBjo,1781
34
+ seekrai/version.py,sha256=q6iGQVFor8zXiPP5F-3vy9TndOxKv5JXbaNJ2kdOQws,125
35
+ seekrai-0.1.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
36
+ seekrai-0.1.1.dist-info/METADATA,sha256=mhkNjlZLyOPwkwd8PJQ5hCQO0jslB9sDov-UJBSfGZk,4840
37
+ seekrai-0.1.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
38
+ seekrai-0.1.1.dist-info/entry_points.txt,sha256=N49yOEGi1sK7Xr13F_rkkcOxQ88suyiMoOmRhUHTZ_U,48
39
+ seekrai-0.1.1.dist-info/RECORD,,
seekrai/cli/__init__.py DELETED
File without changes
File without changes
seekrai/cli/api/chat.py DELETED
@@ -1,245 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import cmd
4
- import json
5
- from typing import List, Tuple
6
-
7
- import click
8
-
9
- from seekrai import seekrflow
10
- from seekrai.types.chat_completions import (
11
- ChatCompletionChoicesChunk,
12
- ChatCompletionChunk,
13
- ChatCompletionResponse,
14
- )
15
-
16
-
17
- class ChatShell(cmd.Cmd):
18
- intro = "Type /exit to exit, /help, or /? to list commands.\n"
19
- prompt = ">>> "
20
-
21
- def __init__(
22
- self,
23
- client: SeekrFlow,
24
- model: str,
25
- max_tokens: int | None = None,
26
- stop: List[str] | None = None,
27
- temperature: float | None = None,
28
- top_p: float | None = None,
29
- top_k: int | None = None,
30
- repetition_penalty: float | None = None,
31
- safety_model: str | None = None,
32
- system_message: str | None = None,
33
- ) -> None:
34
- super().__init__()
35
- self.client = client
36
- self.model = model
37
- self.max_tokens = max_tokens
38
- self.stop = stop
39
- self.temperature = temperature
40
- self.top_p = top_p
41
- self.top_k = top_k
42
- self.repetition_penalty = repetition_penalty
43
- self.safety_model = safety_model
44
- self.system_message = system_message
45
-
46
- self.messages = (
47
- [{"role": "system", "content": self.system_message}]
48
- if self.system_message
49
- else []
50
- )
51
-
52
- def precmd(self, line: str) -> str:
53
- if line.startswith("/"):
54
- return line[1:]
55
- else:
56
- return "say " + line
57
-
58
- def do_say(self, arg: str) -> None:
59
- self.messages.append({"role": "user", "content": arg})
60
-
61
- output = ""
62
-
63
- for chunk in self.client.chat.completions.create(
64
- messages=self.messages,
65
- model=self.model,
66
- max_tokens=self.max_tokens,
67
- stop=self.stop,
68
- temperature=self.temperature,
69
- top_p=self.top_p,
70
- top_k=self.top_k,
71
- repetition_penalty=self.repetition_penalty,
72
- safety_model=self.safety_model,
73
- stream=True,
74
- ):
75
- # assertions for type checking
76
- assert isinstance(chunk, ChatCompletionChunk)
77
- assert chunk.choices
78
- assert chunk.choices[0].delta
79
- assert chunk.choices[0].delta.content
80
-
81
- token = chunk.choices[0].delta.content
82
-
83
- click.echo(token, nl=False)
84
-
85
- output += token
86
-
87
- click.echo("\n")
88
-
89
- self.messages.append({"role": "assistant", "content": output})
90
-
91
- def do_reset(self, arg: str) -> None:
92
- self.messages = (
93
- [{"role": "system", "content": self.system_message}]
94
- if self.system_message
95
- else []
96
- )
97
-
98
- def do_exit(self, arg: str) -> bool:
99
- return True
100
-
101
-
102
- @click.command(name="chat.interactive")
103
- @click.pass_context
104
- @click.option("--model", type=str, required=True, help="Model name")
105
- @click.option("--max-tokens", type=int, help="Max tokens to generate")
106
- @click.option(
107
- "--stop", type=str, multiple=True, help="List of strings to stop generation"
108
- )
109
- @click.option("--temperature", type=float, help="Sampling temperature")
110
- @click.option("--top-p", type=int, help="Top p sampling")
111
- @click.option("--top-k", type=float, help="Top k sampling")
112
- @click.option("--safety-model", type=str, help="Moderation model")
113
- @click.option("--system-message", type=str, help="System message to use for the chat")
114
- def interactive(
115
- ctx: click.Context,
116
- model: str,
117
- max_tokens: int | None = None,
118
- stop: List[str] | None = None,
119
- temperature: float | None = None,
120
- top_p: float | None = None,
121
- top_k: int | None = None,
122
- repetition_penalty: float | None = None,
123
- safety_model: str | None = None,
124
- system_message: str | None = None,
125
- ) -> None:
126
- """Interactive chat shell"""
127
- client: SeekrFlow = ctx.obj
128
-
129
- ChatShell(
130
- client=client,
131
- model=model,
132
- max_tokens=max_tokens,
133
- stop=stop,
134
- temperature=temperature,
135
- top_p=top_p,
136
- top_k=top_k,
137
- repetition_penalty=repetition_penalty,
138
- safety_model=safety_model,
139
- system_message=system_message,
140
- ).cmdloop()
141
-
142
-
143
- @click.command(name="chat.completions")
144
- @click.pass_context
145
- @click.option(
146
- "--message",
147
- type=(str, str),
148
- multiple=True,
149
- required=True,
150
- help="Message to generate chat completions from",
151
- )
152
- @click.option("--model", type=str, required=True, help="Model name")
153
- @click.option("--max-tokens", type=int, help="Max tokens to generate")
154
- @click.option(
155
- "--stop", type=str, multiple=True, help="List of strings to stop generation"
156
- )
157
- @click.option("--temperature", type=float, help="Sampling temperature")
158
- @click.option("--top-p", type=int, help="Top p sampling")
159
- @click.option("--top-k", type=float, help="Top k sampling")
160
- @click.option("--repetition-penalty", type=float, help="Repetition penalty")
161
- @click.option("--no-stream", is_flag=True, help="Disable streaming")
162
- @click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
163
- @click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
164
- @click.option("--n", type=int, help="Number of output generations")
165
- @click.option("--safety-model", type=str, help="Moderation model")
166
- @click.option("--raw", is_flag=True, help="Output raw JSON")
167
- def chat(
168
- ctx: click.Context,
169
- message: List[Tuple[str, str]],
170
- model: str,
171
- max_tokens: int | None = None,
172
- stop: List[str] | None = None,
173
- temperature: float | None = None,
174
- top_p: float | None = None,
175
- top_k: int | None = None,
176
- repetition_penalty: float | None = None,
177
- no_stream: bool = False,
178
- logprobs: int | None = None,
179
- echo: bool | None = None,
180
- n: int | None = None,
181
- safety_model: str | None = None,
182
- raw: bool = False,
183
- ) -> None:
184
- """Generate chat completions from messages"""
185
- client: SeekrFlow = ctx.obj
186
-
187
- messages = [{"role": msg[0], "content": msg[1]} for msg in message]
188
-
189
- response = client.chat.completions.create(
190
- model=model,
191
- messages=messages,
192
- top_p=top_p,
193
- top_k=top_k,
194
- temperature=temperature,
195
- max_tokens=max_tokens,
196
- stop=stop,
197
- repetition_penalty=repetition_penalty,
198
- stream=not no_stream,
199
- logprobs=logprobs,
200
- echo=echo,
201
- n=n,
202
- safety_model=safety_model,
203
- )
204
-
205
- if not no_stream:
206
- for chunk in response:
207
- # assertions for type checking
208
- assert isinstance(chunk, ChatCompletionChunk)
209
- assert chunk.choices
210
-
211
- if raw:
212
- click.echo(f"{json.dumps(chunk.model_dump())}")
213
- continue
214
-
215
- should_print_header = len(chunk.choices) > 1
216
- for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
217
- assert isinstance(stream_choice, ChatCompletionChoicesChunk)
218
- assert stream_choice.delta
219
-
220
- if should_print_header:
221
- click.echo(f"\n===== Completion {stream_choice.index} =====\n")
222
- click.echo(f"{stream_choice.delta.content}", nl=False)
223
-
224
- if should_print_header:
225
- click.echo("\n")
226
-
227
- # new line after stream ends
228
- click.echo("\n")
229
- else:
230
- # assertions for type checking
231
- assert isinstance(response, ChatCompletionResponse)
232
- assert isinstance(response.choices, list)
233
-
234
- if raw:
235
- click.echo(f"{json.dumps(response.model_dump(), indent=4)}")
236
- return
237
-
238
- should_print_header = len(response.choices) > 1
239
- for i, choice in enumerate(response.choices):
240
- if should_print_header:
241
- click.echo(f"===== Completion {i} =====")
242
- click.echo(choice.message.content) # type: ignore
243
-
244
- if should_print_header:
245
- click.echo("\n")
@@ -1,107 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- from typing import List
5
-
6
- import click
7
-
8
- from seekrai import seekrflow
9
- from seekrai.types import CompletionChunk
10
- from seekrai.types.completions import CompletionChoicesChunk, CompletionResponse
11
-
12
-
13
- @click.command()
14
- @click.pass_context
15
- @click.argument("prompt", type=str, required=True)
16
- @click.option("--model", type=str, required=True, help="Model name")
17
- @click.option("--no-stream", is_flag=True, help="Disable streaming")
18
- @click.option("--max-tokens", type=int, help="Max tokens to generate")
19
- @click.option(
20
- "--stop", type=str, multiple=True, help="List of strings to stop generation"
21
- )
22
- @click.option("--temperature", type=float, help="Sampling temperature")
23
- @click.option("--top-p", type=int, help="Top p sampling")
24
- @click.option("--top-k", type=float, help="Top k sampling")
25
- @click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
26
- @click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
27
- @click.option("--n", type=int, help="Number of output generations")
28
- @click.option("--safety-model", type=str, help="Moderation model")
29
- @click.option("--raw", is_flag=True, help="Return raw JSON response")
30
- def completions(
31
- ctx: click.Context,
32
- prompt: str,
33
- model: str,
34
- max_tokens: int | None = 512,
35
- stop: List[str] | None = None,
36
- temperature: float | None = None,
37
- top_p: float | None = None,
38
- top_k: int | None = None,
39
- repetition_penalty: float | None = None,
40
- no_stream: bool = False,
41
- logprobs: int | None = None,
42
- echo: bool | None = None,
43
- n: int | None = None,
44
- safety_model: str | None = None,
45
- raw: bool = False,
46
- ) -> None:
47
- """Generate text completions"""
48
- client: SeekrFlow = ctx.obj
49
-
50
- response = client.completions.create(
51
- model=model,
52
- prompt=prompt,
53
- top_p=top_p,
54
- top_k=top_k,
55
- temperature=temperature,
56
- max_tokens=max_tokens,
57
- stop=stop,
58
- repetition_penalty=repetition_penalty,
59
- stream=not no_stream,
60
- logprobs=logprobs,
61
- echo=echo,
62
- n=n,
63
- safety_model=safety_model,
64
- )
65
-
66
- if not no_stream:
67
- for chunk in response:
68
- # assertions for type checking
69
- assert isinstance(chunk, CompletionChunk)
70
- assert chunk.choices
71
-
72
- if raw:
73
- click.echo(f"{json.dumps(chunk.model_dump())}")
74
- continue
75
-
76
- should_print_header = len(chunk.choices) > 1
77
- for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
78
- # assertions for type checking
79
- assert isinstance(stream_choice, CompletionChoicesChunk)
80
- assert stream_choice.delta
81
-
82
- if should_print_header:
83
- click.echo(f"\n===== Completion {stream_choice.index} =====\n")
84
- click.echo(f"{stream_choice.delta.content}", nl=False)
85
-
86
- if should_print_header:
87
- click.echo("\n")
88
-
89
- # new line after stream ends
90
- click.echo("\n")
91
- else:
92
- # assertions for type checking
93
- assert isinstance(response, CompletionResponse)
94
- assert isinstance(response.choices, list)
95
-
96
- if raw:
97
- click.echo(f"{json.dumps(response.model_dump(), indent=4)}")
98
- return
99
-
100
- should_print_header = len(response.choices) > 1
101
- for i, choice in enumerate(response.choices):
102
- if should_print_header:
103
- click.echo(f"===== Completion {i} =====")
104
- click.echo(choice.text)
105
-
106
- if should_print_header or not choice.text.endswith("\n"):
107
- click.echo("\n")