ommlds 0.0.0.dev475__py3-none-any.whl → 0.0.0.dev476__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. ommlds/.omlish-manifests.json +85 -1
  2. ommlds/__about__.py +1 -1
  3. ommlds/backends/groq/_marshal.py +23 -0
  4. ommlds/backends/groq/protocol.py +184 -0
  5. ommlds/cli/{sessions/chat/backends → backends}/catalog.py +35 -3
  6. ommlds/cli/backends/configs.py +9 -0
  7. ommlds/cli/backends/inject.py +31 -36
  8. ommlds/cli/{sessions/chat/backends → backends}/injection.py +1 -1
  9. ommlds/cli/{sessions/chat/backends → backends}/types.py +11 -1
  10. ommlds/cli/{sessions/chat/content → content}/messages.py +1 -1
  11. ommlds/cli/{sessions/chat/content → content}/strings.py +1 -1
  12. ommlds/cli/inject.py +0 -6
  13. ommlds/cli/inputs/asyncs.py +32 -0
  14. ommlds/cli/{sessions/chat/chat/user/inputs.py → inputs/sync.py} +0 -30
  15. ommlds/cli/main.py +267 -113
  16. ommlds/cli/rendering/__init__.py +0 -0
  17. ommlds/cli/rendering/configs.py +9 -0
  18. ommlds/cli/{sessions/chat/rendering → rendering}/inject.py +4 -5
  19. ommlds/cli/{sessions/chat/rendering → rendering}/markdown.py +1 -1
  20. ommlds/cli/{sessions/chat/rendering → rendering}/raw.py +1 -1
  21. ommlds/cli/{sessions/chat/rendering → rendering}/types.py +1 -1
  22. ommlds/cli/secrets.py +21 -0
  23. ommlds/cli/sessions/base.py +1 -1
  24. ommlds/cli/sessions/chat/chat/ai/configs.py +11 -0
  25. ommlds/cli/sessions/chat/chat/ai/inject.py +7 -11
  26. ommlds/cli/sessions/chat/chat/ai/rendering.py +4 -4
  27. ommlds/cli/sessions/chat/chat/ai/services.py +2 -2
  28. ommlds/cli/sessions/chat/chat/state/configs.py +11 -0
  29. ommlds/cli/sessions/chat/chat/state/inject.py +6 -10
  30. ommlds/cli/sessions/chat/chat/state/inmemory.py +1 -2
  31. ommlds/cli/sessions/chat/chat/state/storage.py +1 -2
  32. ommlds/cli/sessions/chat/chat/state/types.py +1 -1
  33. ommlds/cli/sessions/chat/chat/user/configs.py +17 -0
  34. ommlds/cli/sessions/chat/chat/user/inject.py +13 -19
  35. ommlds/cli/sessions/chat/chat/user/interactive.py +3 -3
  36. ommlds/cli/sessions/chat/configs.py +15 -26
  37. ommlds/cli/sessions/chat/inject.py +18 -35
  38. ommlds/cli/sessions/chat/session.py +1 -1
  39. ommlds/cli/sessions/chat/tools/configs.py +13 -0
  40. ommlds/cli/sessions/chat/tools/inject.py +6 -10
  41. ommlds/cli/sessions/chat/tools/injection.py +1 -0
  42. ommlds/cli/sessions/chat/tools/rendering.py +1 -1
  43. ommlds/cli/sessions/completion/configs.py +2 -2
  44. ommlds/cli/sessions/completion/inject.py +14 -0
  45. ommlds/cli/sessions/completion/session.py +7 -11
  46. ommlds/cli/sessions/embedding/configs.py +2 -2
  47. ommlds/cli/sessions/embedding/inject.py +14 -0
  48. ommlds/cli/sessions/embedding/session.py +7 -11
  49. ommlds/cli/state/storage.py +1 -1
  50. ommlds/minichain/backends/catalogs/strings.py +1 -1
  51. ommlds/minichain/backends/impls/groq/__init__.py +0 -0
  52. ommlds/minichain/backends/impls/groq/chat.py +69 -0
  53. ommlds/minichain/backends/impls/groq/names.py +35 -0
  54. ommlds/minichain/backends/impls/groq/protocol.py +46 -0
  55. ommlds/minichain/backends/impls/groq/stream.py +121 -0
  56. ommlds/minichain/backends/impls/openai/chat.py +3 -3
  57. ommlds/minichain/backends/impls/openai/names.py +27 -3
  58. ommlds/minichain/backends/impls/openai/stream.py +2 -2
  59. ommlds/wiki/utils/xml.py +5 -5
  60. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/METADATA +5 -5
  61. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/RECORD +68 -55
  62. ommlds/cli/backends/standard.py +0 -20
  63. ommlds/cli/main2.py +0 -220
  64. ommlds/cli/sessions/chat/backends/inject.py +0 -53
  65. /ommlds/{cli/sessions/chat/backends → backends/groq}/__init__.py +0 -0
  66. /ommlds/cli/{sessions/chat/content → content}/__init__.py +0 -0
  67. /ommlds/cli/{sessions/chat/rendering → inputs}/__init__.py +0 -0
  68. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/WHEEL +0 -0
  69. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/entry_points.txt +0 -0
  70. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/licenses/LICENSE +0 -0
  71. {ommlds-0.0.0.dev475.dist-info → ommlds-0.0.0.dev476.dist-info}/top_level.txt +0 -0
ommlds/cli/main.py CHANGED
@@ -1,176 +1,331 @@
1
1
  """
2
- See:
3
- - https://github.com/simonw/llm
4
- - https://github.com/TheR1D/shell_gpt
5
- - https://github.com/paul-gauthier/aider
2
+ TODO:
3
+ - bootstrap lol
6
4
  """
5
+ import abc
7
6
  import functools
8
- import os.path
9
7
  import typing as ta
10
8
 
11
9
  import anyio
12
10
 
13
- from omdev.home.secrets import load_secrets
14
11
  from omlish import check
12
+ from omlish import dataclasses as dc
15
13
  from omlish import inject as inj
16
14
  from omlish import lang
17
15
  from omlish.argparse import all as ap
18
16
  from omlish.logs import all as logs
19
- from omlish.subprocesses.editor import edit_text_with_user_editor
20
- from omlish.subprocesses.sync import subprocesses
21
17
 
22
- from .. import minichain as mc
23
18
  from .inject import bind_main
19
+ from .secrets import install_secrets
24
20
  from .sessions.base import Session
25
21
  from .sessions.chat.configs import ChatConfig
26
22
  from .sessions.completion.configs import CompletionConfig
27
23
  from .sessions.embedding.configs import EmbeddingConfig
28
24
 
29
25
 
30
- if ta.TYPE_CHECKING:
31
- import PIL.Image as pimg # noqa
32
- else:
33
- pimg = lang.proxy_import('PIL.Image')
26
+ ##
34
27
 
35
28
 
36
- ##
29
+ MAIN_EXTRA_ARGS: ta.Sequence[ap.Arg] = [
30
+ ap.arg('-v', '--verbose', action='store_true'),
31
+ ]
37
32
 
38
33
 
39
- async def _a_main(args: ta.Any = None) -> None:
40
- parser = ap.ArgumentParser()
41
- parser.add_argument('prompt', nargs='*')
34
+ def _process_main_extra_args(args: ap.Namespace) -> None:
35
+ if args.verbose:
36
+ logs.configure_standard_logging('DEBUG')
37
+ else:
38
+ logs.configure_standard_logging('INFO')
39
+ logs.silence_noisy_loggers()
42
40
 
43
- parser.add_argument('-b', '--backend', default='openai')
44
41
 
45
- parser.add_argument('-m', '--model-name')
42
+ ##
46
43
 
47
- parser.add_argument('-C', '--completion', action='store_true')
48
44
 
49
- parser.add_argument('-n', '--new', action='store_true')
50
- parser.add_argument('--ephemeral', action='store_true')
45
+ class Profile(lang.Abstract):
46
+ @abc.abstractmethod
47
+ def run(self, argv: ta.Sequence[str]) -> ta.Awaitable[None]:
48
+ raise NotImplementedError
51
49
 
52
- parser.add_argument('-e', '--editor', action='store_true')
53
- parser.add_argument('-i', '--interactive', action='store_true')
54
- parser.add_argument('-c', '--code', action='store_true')
55
- parser.add_argument('-s', '--stream', action='store_true')
56
- parser.add_argument('-M', '--markdown', action='store_true')
57
50
 
58
- parser.add_argument('-E', '--embed', action='store_true')
59
- parser.add_argument('-j', '--image', action='store_true')
51
+ ##
60
52
 
61
- parser.add_argument('-v', '--verbose', action='store_true')
62
53
 
63
- parser.add_argument('--enable-fs-tools', action='store_true')
64
- parser.add_argument('--enable-todo-tools', action='store_true')
65
- parser.add_argument('--enable-unsafe-tools-do-not-use-lol', action='store_true')
66
- parser.add_argument('--enable-test-weather-tool', action='store_true')
67
- parser.add_argument('--dangerous-no-tool-confirmation', action='store_true')
54
+ # class ChatAspect(lang.Abstract):
55
+ # def get_parser_args(self) -> ta.Sequence[ap.Arg]: ...
56
+ # def set_args(self, args: ap.Namespace) -> None: ...
57
+ # def configure(self, cfg: ChatConfig) -> ChatConfig: ...
68
58
 
69
- args = parser.parse_args(args)
59
+
60
+ class ChatProfile(Profile):
61
+ _args: ap.Namespace
70
62
 
71
63
  #
72
64
 
73
- if args.verbose:
74
- logs.configure_standard_logging('DEBUG')
75
- else:
76
- logs.configure_standard_logging('INFO')
77
- logs.silence_noisy_loggers()
65
+ BACKEND_ARGS: ta.ClassVar[ta.Sequence[ap.Arg]] = [
66
+ ap.arg('-b', '--backend', group='backend'),
67
+ ]
68
+
69
+ def configure_backend(self, cfg: ChatConfig) -> ChatConfig:
70
+ return dc.replace(
71
+ cfg,
72
+ backend=dc.replace(
73
+ cfg.backend,
74
+ backend=self._args.backend,
75
+ ),
76
+ )
78
77
 
79
78
  #
80
79
 
81
- content: mc.Content | None
80
+ INPUT_ARGS: ta.ClassVar[ta.Sequence[ap.Arg]] = [
81
+ ap.arg('message', nargs='*', group='input'),
82
+ ap.arg('-i', '--interactive', action='store_true', group='input'),
83
+ ap.arg('-e', '--editor', action='store_true', group='input'),
84
+ ]
85
+
86
+ def configure_input(self, cfg: ChatConfig) -> ChatConfig:
87
+ if self._args.editor:
88
+ check.arg(not self._args.interactive)
89
+ check.arg(not self._args.message)
90
+ raise NotImplementedError
91
+
92
+ elif self._args.interactive:
93
+ check.arg(not self._args.message)
94
+ return dc.replace(
95
+ cfg,
96
+ user=dc.replace(
97
+ cfg.user,
98
+ interactive=True,
99
+ ),
100
+ )
101
+
102
+ elif self._args.message:
103
+ # TODO: '-' -> stdin
104
+ return dc.replace(
105
+ cfg,
106
+ user=dc.replace(
107
+ cfg.user,
108
+ initial_user_content=' '.join(self._args.message),
109
+ ),
110
+ )
82
111
 
83
- if args.image:
84
- content = mc.ImageContent(pimg.open(check.non_empty_str(check.single(args.prompt))))
112
+ else:
113
+ raise ValueError('Must specify input')
85
114
 
86
- elif args.editor:
87
- check.arg(not args.prompt)
88
- if (ec := edit_text_with_user_editor('', subprocesses)) is None:
89
- return
90
- content = ec
115
+ #
91
116
 
92
- elif args.interactive:
93
- if args.prompt:
94
- raise ValueError('Must not provide prompt')
95
- content = None
117
+ STATE_ARGS: ta.ClassVar[ta.Sequence[ap.Arg]] = [
118
+ ap.arg('-n', '--new', action='store_true', group='state'),
119
+ ap.arg('--ephemeral', action='store_true', group='state'),
120
+ ]
121
+
122
+ def configure_state(self, cfg: ChatConfig) -> ChatConfig:
123
+ return dc.replace(
124
+ cfg,
125
+ state=dc.replace(
126
+ cfg.state,
127
+ state='ephemeral' if self._args.ephemeral else 'new' if self._args.new else 'continue',
128
+ ),
129
+ )
96
130
 
97
- elif args.code:
98
- if args.prompt:
99
- content = ' '.join(args.prompt)
100
- else:
101
- content = None
131
+ #
102
132
 
103
- elif not args.prompt:
104
- raise ValueError('Must provide prompt')
133
+ OUTPUT_ARGS: ta.ClassVar[ta.Sequence[ap.Arg]] = [
134
+ ap.arg('-s', '--stream', action='store_true', group='output'),
135
+ ap.arg('-M', '--markdown', action='store_true', group='output'),
136
+ ]
137
+
138
+ def configure_output(self, cfg: ChatConfig) -> ChatConfig:
139
+ return dc.replace(
140
+ cfg,
141
+ ai=dc.replace(
142
+ cfg.ai,
143
+ stream=bool(self._args.stream),
144
+ ),
145
+ rendering=dc.replace(
146
+ cfg.rendering,
147
+ markdown=bool(self._args.markdown),
148
+ ),
149
+ )
105
150
 
106
151
  #
107
152
 
108
- # FIXME: lol garbage
109
- for key in [
110
- 'OPENAI_API_KEY',
111
- 'HUGGINGFACE_TOKEN',
112
- 'TAVILY_API_KEY',
113
- 'ANTHROPIC_API_KEY',
114
- 'MISTRAL_API_KEY',
115
- 'GEMINI_API_KEY',
116
- ]:
117
- if (sec := load_secrets().try_get(key.lower())) is not None:
118
- os.environ[key] = sec.reveal()
153
+ TOOLS_ARGS: ta.ClassVar[ta.Sequence[ap.Arg]] = [
154
+ ap.arg('--enable-fs-tools', action='store_true', group='tools'),
155
+ ap.arg('--enable-todo-tools', action='store_true', group='tools'),
156
+ # ap.arg('--enable-unsafe-tools-do-not-use-lol', action='store_true', group='tools'),
157
+ ap.arg('--enable-test-weather-tool', action='store_true', group='tools'),
158
+ ]
159
+
160
+ def configure_tools(self, cfg: ChatConfig) -> ChatConfig:
161
+ return dc.replace(
162
+ cfg,
163
+ ai=dc.replace(
164
+ cfg.ai,
165
+ enable_tools=(
166
+ self._args.enable_fs_tools or
167
+ self._args.enable_todo_tools or
168
+ # self._args.enable_unsafe_tools_do_not_use_lol or
169
+ self._args.enable_test_weather_tool or
170
+ self._args.code
171
+ ),
172
+ ),
173
+ tools=dc.replace(
174
+ cfg.tools,
175
+ enabled_tools={ # noqa
176
+ *(cfg.tools.enabled_tools or []),
177
+ *(['fs'] if self._args.enable_fs_tools else []),
178
+ *(['todo'] if self._args.enable_todo_tools else []),
179
+ *(['weather'] if self._args.enable_test_weather_tool else []),
180
+ },
181
+ ),
182
+ )
119
183
 
120
184
  #
121
185
 
122
- session_cfg: ta.Any
186
+ CODE_CONFIG: ta.ClassVar[ta.Sequence[ap.Arg]] = [
187
+ ap.arg('-c', '--code', action='store_true', group='code'),
188
+ ]
123
189
 
124
- if args.embed:
125
- session_cfg = EmbeddingConfig(
126
- check.not_none(content), # noqa
127
- backend=args.backend,
128
- )
190
+ def configure_code(self, cfg: ChatConfig) -> ChatConfig:
191
+ if not self._args.code:
192
+ return cfg
129
193
 
130
- elif args.completion:
131
- session_cfg = CompletionConfig(
132
- check.not_none(content), # noqa
133
- backend=args.backend,
194
+ cfg = dc.replace(
195
+ cfg,
196
+ ai=dc.replace(
197
+ cfg.ai,
198
+ enable_tools=True,
199
+ ),
134
200
  )
135
201
 
136
- else:
137
- system_content: mc.Content | None = None
138
- if (args.new or args.ephemeral) and args.code:
202
+ if self._args.new or self._args.ephemeral:
139
203
  from ..minichain.lib.code.prompts import CODE_AGENT_SYSTEM_PROMPT
140
204
  system_content = CODE_AGENT_SYSTEM_PROMPT
141
205
 
142
- session_cfg = ChatConfig(
206
+ cfg = dc.replace(
207
+ cfg,
208
+ user=dc.replace(
209
+ cfg.user,
210
+ initial_system_content=system_content,
211
+ ),
212
+ )
213
+
214
+ return cfg
215
+
216
+ #
217
+
218
+ async def run(self, argv: ta.Sequence[str]) -> None:
219
+ parser = ap.ArgumentParser()
220
+
221
+ for grp_name, grp_args in [
222
+ ('backend', self.BACKEND_ARGS),
223
+ ('input', self.INPUT_ARGS),
224
+ ('state', self.STATE_ARGS),
225
+ ('output', self.OUTPUT_ARGS),
226
+ ('tools', self.TOOLS_ARGS),
227
+ ('code', self.CODE_CONFIG),
228
+ ]:
229
+ grp = parser.add_argument_group(grp_name)
230
+ for a in grp_args:
231
+ grp.add_argument(*a.args, **a.kwargs)
232
+
233
+ self._args = parser.parse_args(argv)
234
+
235
+ cfg = ChatConfig()
236
+ cfg = self.configure_backend(cfg)
237
+ cfg = self.configure_input(cfg)
238
+ cfg = self.configure_state(cfg)
239
+ cfg = self.configure_output(cfg)
240
+ cfg = self.configure_tools(cfg)
241
+ cfg = self.configure_code(cfg)
242
+
243
+ with inj.create_managed_injector(bind_main(
244
+ session_cfg=cfg,
245
+ )) as injector:
246
+ await injector[Session].run()
247
+
248
+
249
+ ##
250
+
251
+
252
+ class CompletionProfile(Profile):
253
+ async def run(self, argv: ta.Sequence[str]) -> None:
254
+ parser = ap.ArgumentParser()
255
+ parser.add_argument('prompt', nargs='*')
256
+ parser.add_argument('-b', '--backend', default='openai')
257
+ args = parser.parse_args(argv)
258
+
259
+ content = ' '.join(args.prompt)
260
+
261
+ cfg = CompletionConfig(
262
+ check.non_empty_str(content),
143
263
  backend=args.backend,
144
- model_name=args.model_name,
145
- state='ephemeral' if args.ephemeral else 'new' if args.new else 'continue',
146
- initial_system_content=system_content,
147
- initial_user_content=content, # noqa
148
- interactive=bool(args.interactive),
149
- markdown=bool(args.markdown),
150
- stream=bool(args.stream),
151
- enable_tools=(
152
- args.enable_fs_tools or
153
- args.enable_todo_tools or
154
- args.enable_unsafe_tools_do_not_use_lol or
155
- args.enable_test_weather_tool or
156
- args.code
157
- ),
158
- enabled_tools={ # noqa
159
- *(['fs'] if args.enable_fs_tools else []),
160
- *(['todo'] if args.enable_todo_tools else []),
161
- *(['weather'] if args.enable_test_weather_tool else []),
162
- # FIXME: enable_unsafe_tools_do_not_use_lol
163
- },
164
- dangerous_no_tool_confirmation=bool(args.dangerous_no_tool_confirmation),
165
264
  )
166
265
 
167
- #
266
+ with inj.create_managed_injector(bind_main(
267
+ session_cfg=cfg,
268
+ )) as injector:
269
+ await injector[Session].run()
270
+
271
+
272
+ ##
273
+
274
+
275
+ class EmbedProfile(Profile):
276
+ async def run(self, argv: ta.Sequence[str]) -> None:
277
+ parser = ap.ArgumentParser()
278
+ parser.add_argument('prompt', nargs='*')
279
+ parser.add_argument('-b', '--backend', default='openai')
280
+ args = parser.parse_args(argv)
281
+
282
+ content = ' '.join(args.prompt)
283
+
284
+ cfg = EmbeddingConfig(
285
+ check.non_empty_str(content),
286
+ backend=args.backend,
287
+ )
288
+
289
+ with inj.create_managed_injector(bind_main(
290
+ session_cfg=cfg,
291
+ )) as injector:
292
+ await injector[Session].run()
293
+
294
+
295
+ ##
296
+
297
+
298
+ PROFILE_TYPES: ta.Mapping[str, type[Profile]] = {
299
+ 'chat': ChatProfile,
300
+ 'complete': CompletionProfile,
301
+ 'embed': EmbedProfile,
302
+ }
303
+
304
+
305
+ ##
306
+
307
+
308
+ MAIN_PROFILE_ARGS: ta.Sequence[ap.Arg] = [
309
+ ap.arg('-p', '--profile', default='chat'),
310
+ ap.arg('args', nargs=ap.REMAINDER),
311
+ ]
312
+
313
+
314
+ async def _a_main(argv: ta.Any = None) -> None:
315
+ parser = ap.ArgumentParser()
316
+
317
+ for a in [*MAIN_PROFILE_ARGS, *MAIN_EXTRA_ARGS]:
318
+ parser.add_argument(*a.args, **a.kwargs)
319
+
320
+ args, unk_args = parser.parse_known_args(argv)
321
+
322
+ _process_main_extra_args(args)
323
+
324
+ install_secrets()
168
325
 
169
- with inj.create_managed_injector(bind_main(
170
- session_cfg=session_cfg,
171
- enable_backend_strings=isinstance(session_cfg, ChatConfig),
172
- )) as injector:
173
- await injector[Session].run()
326
+ profile_cls = PROFILE_TYPES[args.profile]
327
+ profile = profile_cls()
328
+ await profile.run([*unk_args, *args.args])
174
329
 
175
330
 
176
331
  def _main(args: ta.Any = None) -> None:
@@ -179,7 +334,6 @@ def _main(args: ta.Any = None) -> None:
179
334
  _a_main,
180
335
  args,
181
336
  ),
182
- # backend='trio',
183
337
  ) # noqa
184
338
 
185
339
 
File without changes
@@ -0,0 +1,9 @@
1
+ from omlish import dataclasses as dc
2
+
3
+
4
+ ##
5
+
6
+
7
+ @dc.dataclass(frozen=True, kw_only=True)
8
+ class RenderingConfig:
9
+ markdown: bool = False
@@ -1,6 +1,8 @@
1
1
  from omlish import inject as inj
2
2
  from omlish import lang
3
3
 
4
+ from .configs import RenderingConfig
5
+
4
6
 
5
7
  with lang.auto_proxy_import(globals()):
6
8
  from . import markdown as _markdown
@@ -11,13 +13,10 @@ with lang.auto_proxy_import(globals()):
11
13
  ##
12
14
 
13
15
 
14
- def bind_rendering(
15
- *,
16
- markdown: bool = False,
17
- ) -> inj.Elements:
16
+ def bind_rendering(cfg: RenderingConfig = RenderingConfig()) -> inj.Elements:
18
17
  els: list[inj.Elemental] = []
19
18
 
20
- if markdown:
19
+ if cfg.markdown:
21
20
  els.extend([
22
21
  inj.bind(_types.ContentRendering, to_ctor=_markdown.MarkdownContentRendering, singleton=True),
23
22
  inj.bind(_types.StreamContentRendering, to_ctor=_markdown.MarkdownStreamContentRendering, singleton=True),
@@ -3,7 +3,7 @@ import typing as ta
3
3
  from omdev.tui import rich
4
4
  from omlish import lang
5
5
 
6
- from ..... import minichain as mc
6
+ from ... import minichain as mc
7
7
  from ..content.strings import ContentStringifier
8
8
  from ..content.strings import HasContentStringifier
9
9
  from .types import ContentRendering
@@ -2,7 +2,7 @@ import typing as ta
2
2
 
3
3
  from omlish import lang
4
4
 
5
- from ..... import minichain as mc
5
+ from ... import minichain as mc
6
6
  from ..content.strings import ContentStringifier
7
7
  from ..content.strings import HasContentStringifier
8
8
  from .types import ContentRendering
@@ -3,7 +3,7 @@ import typing as ta
3
3
 
4
4
  from omlish import lang
5
5
 
6
- from ..... import minichain as mc
6
+ from ... import minichain as mc
7
7
 
8
8
 
9
9
  ##
ommlds/cli/secrets.py ADDED
@@ -0,0 +1,21 @@
1
+ import os
2
+
3
+ from omdev.home.secrets import load_secrets
4
+
5
+
6
+ ##
7
+
8
+
9
+ def install_secrets() -> None:
10
+ # FIXME: lol garbage
11
+ for key in [
12
+ 'ANTHROPIC_API_KEY',
13
+ 'GEMINI_API_KEY',
14
+ 'GROQ_API_KEY',
15
+ 'HUGGINGFACE_TOKEN',
16
+ 'MISTRAL_API_KEY',
17
+ 'OPENAI_API_KEY',
18
+ 'TAVILY_API_KEY',
19
+ ]:
20
+ if (sec := load_secrets().try_get(key.lower())) is not None:
21
+ os.environ[key] = sec.reveal()
@@ -1,7 +1,7 @@
1
1
  import abc
2
- import dataclasses as dc
3
2
  import typing as ta
4
3
 
4
+ from omlish import dataclasses as dc
5
5
  from omlish import lang
6
6
  from omlish.configs import all as cfgs
7
7
 
@@ -0,0 +1,11 @@
1
+ from omlish import dataclasses as dc
2
+
3
+
4
+ ##
5
+
6
+
7
+ @dc.dataclass(frozen=True, kw_only=True)
8
+ class AiConfig:
9
+ stream: bool = False
10
+ silent: bool = False
11
+ enable_tools: bool = False
@@ -2,6 +2,7 @@ from omlish import inject as inj
2
2
  from omlish import lang
3
3
 
4
4
  from ...... import minichain as mc
5
+ from .configs import AiConfig
5
6
  from .injection import chat_options_providers
6
7
 
7
8
 
@@ -15,12 +16,7 @@ with lang.auto_proxy_import(globals()):
15
16
  ##
16
17
 
17
18
 
18
- def bind_ai(
19
- *,
20
- stream: bool = False,
21
- silent: bool = False,
22
- enable_tools: bool = False,
23
- ) -> inj.Elements:
19
+ def bind_ai(cfg: AiConfig = AiConfig()) -> inj.Elements:
24
20
  els: list[inj.Elemental] = []
25
21
 
26
22
  #
@@ -38,12 +34,12 @@ def bind_ai(
38
34
 
39
35
  ai_stack = inj.wrapper_binder_helper(_types.AiChatGenerator)
40
36
 
41
- if stream:
37
+ if cfg.stream:
42
38
  stream_ai_stack = inj.wrapper_binder_helper(_types.StreamAiChatGenerator)
43
39
 
44
40
  els.append(stream_ai_stack.push_bind(to_ctor=_services.ChatChoicesStreamServiceStreamAiChatGenerator, singleton=True)) # noqa
45
41
 
46
- if not silent:
42
+ if not cfg.silent:
47
43
  els.append(stream_ai_stack.push_bind(to_ctor=_rendering.RenderingStreamAiChatGenerator, singleton=True))
48
44
 
49
45
  els.extend([
@@ -54,17 +50,17 @@ def bind_ai(
54
50
  else:
55
51
  els.append(ai_stack.push_bind(to_ctor=_services.ChatChoicesServiceAiChatGenerator, singleton=True))
56
52
 
57
- if not silent:
53
+ if not cfg.silent:
58
54
  els.append(ai_stack.push_bind(to_ctor=_rendering.RenderingAiChatGenerator, singleton=True))
59
55
 
60
- if enable_tools:
56
+ if cfg.enable_tools:
61
57
  els.append(ai_stack.push_bind(to_ctor=_tools.ToolExecutingAiChatGenerator, singleton=True))
62
58
 
63
59
  els.append(inj.bind(_types.AiChatGenerator, to_key=ai_stack.top))
64
60
 
65
61
  #
66
62
 
67
- if enable_tools:
63
+ if cfg.enable_tools:
68
64
  def _provide_tools_chat_choices_options_provider(tc: mc.ToolCatalog) -> _services.ChatChoicesServiceOptionsProvider: # noqa
69
65
  return _services.ChatChoicesServiceOptionsProvider(lambda: [
70
66
  mc.Tool(tce.spec)
@@ -1,10 +1,10 @@
1
1
  import typing as ta
2
2
 
3
3
  from ...... import minichain as mc
4
- from ...content.messages import MessageContentExtractor
5
- from ...content.messages import MessageContentExtractorImpl
6
- from ...rendering.types import ContentRendering
7
- from ...rendering.types import StreamContentRendering
4
+ from .....content.messages import MessageContentExtractor
5
+ from .....content.messages import MessageContentExtractorImpl
6
+ from .....rendering.types import ContentRendering
7
+ from .....rendering.types import StreamContentRendering
8
8
  from .types import AiChatGenerator
9
9
  from .types import StreamAiChatGenerator
10
10
 
@@ -4,8 +4,8 @@ from omlish import check
4
4
  from omlish import lang
5
5
 
6
6
  from ...... import minichain as mc
7
- from ...backends.types import ChatChoicesServiceBackendProvider
8
- from ...backends.types import ChatChoicesStreamServiceBackendProvider
7
+ from .....backends.types import ChatChoicesServiceBackendProvider
8
+ from .....backends.types import ChatChoicesStreamServiceBackendProvider
9
9
  from .types import AiChatGenerator
10
10
  from .types import StreamAiChatGenerator
11
11
 
@@ -0,0 +1,11 @@
1
+ import typing as ta
2
+
3
+ from omlish import dataclasses as dc
4
+
5
+
6
+ ##
7
+
8
+
9
+ @dc.dataclass(frozen=True, kw_only=True)
10
+ class StateConfig:
11
+ state: ta.Literal['new', 'continue', 'ephemeral'] = 'continue'