ommlds 0.0.0.dev441__py3-none-any.whl → 0.0.0.dev442__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. ommlds/.omlish-manifests.json +3 -3
  2. ommlds/__about__.py +1 -1
  3. ommlds/backends/google/__init__.py +0 -0
  4. ommlds/backends/google/protocol/__init__.py +0 -0
  5. ommlds/backends/google/protocol/types.py +75 -0
  6. ommlds/backends/openai/protocol/__init__.py +9 -28
  7. ommlds/backends/openai/protocol/_marshal.py +26 -0
  8. ommlds/backends/openai/protocol/chatcompletion/chunk.py +54 -31
  9. ommlds/backends/openai/protocol/chatcompletion/contentpart.py +41 -44
  10. ommlds/backends/openai/protocol/chatcompletion/message.py +45 -43
  11. ommlds/backends/openai/protocol/chatcompletion/request.py +99 -69
  12. ommlds/backends/openai/protocol/chatcompletion/response.py +61 -45
  13. ommlds/backends/openai/protocol/chatcompletion/responseformat.py +21 -20
  14. ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +12 -7
  15. ommlds/backends/openai/protocol/completionusage.py +19 -15
  16. ommlds/cli/sessions/chat/interactive.py +1 -1
  17. ommlds/cli/sessions/chat/prompt.py +4 -4
  18. ommlds/cli/sessions/completion/completion.py +1 -1
  19. ommlds/cli/sessions/embedding/embedding.py +1 -1
  20. ommlds/minichain/backends/impls/anthropic/chat.py +1 -1
  21. ommlds/minichain/backends/impls/anthropic/stream.py +17 -15
  22. ommlds/minichain/backends/impls/duckduckgo/search.py +1 -1
  23. ommlds/minichain/backends/impls/google/chat.py +19 -14
  24. ommlds/minichain/backends/impls/google/search.py +1 -1
  25. ommlds/minichain/backends/impls/llamacpp/chat.py +1 -1
  26. ommlds/minichain/backends/impls/llamacpp/completion.py +1 -1
  27. ommlds/minichain/backends/impls/llamacpp/stream.py +1 -1
  28. ommlds/minichain/backends/impls/mistral.py +1 -1
  29. ommlds/minichain/backends/impls/mlx/chat.py +1 -1
  30. ommlds/minichain/backends/impls/openai/chat.py +1 -1
  31. ommlds/minichain/backends/impls/openai/completion.py +1 -1
  32. ommlds/minichain/backends/impls/openai/embedding.py +1 -1
  33. ommlds/minichain/backends/impls/openai/stream.py +9 -1
  34. ommlds/minichain/backends/impls/tinygrad/chat.py +2 -2
  35. ommlds/minichain/backends/impls/transformers/sentence.py +1 -1
  36. ommlds/minichain/backends/impls/transformers/transformers.py +2 -2
  37. ommlds/minichain/chat/choices/adapters.py +2 -2
  38. ommlds/minichain/chat/choices/services.py +1 -1
  39. ommlds/minichain/chat/history.py +2 -2
  40. ommlds/minichain/chat/services.py +1 -1
  41. ommlds/minichain/chat/stream/adapters.py +2 -2
  42. ommlds/minichain/chat/stream/services.py +1 -1
  43. ommlds/minichain/chat/transforms/services.py +4 -4
  44. ommlds/minichain/services/facades.py +3 -3
  45. ommlds/minichain/services/services.py +1 -1
  46. ommlds/minichain/stream/wrap.py +2 -2
  47. ommlds/server/server.py +2 -2
  48. ommlds/tools/git.py +4 -4
  49. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/METADATA +5 -5
  50. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/RECORD +54 -50
  51. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/WHEEL +0 -0
  52. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/entry_points.txt +0 -0
  53. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/licenses/LICENSE +0 -0
  54. {ommlds-0.0.0.dev441.dist-info → ommlds-0.0.0.dev442.dist-info}/top_level.txt +0 -0
@@ -4,10 +4,13 @@ https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
4
4
  import typing as ta
5
5
 
6
6
  from omlish import check
7
+ from omlish import marshal as msh
7
8
  from omlish import typedvalues as tv
8
9
  from omlish.formats import json
9
10
  from omlish.http import all as http
10
11
 
12
+ from .....backends.google.protocol.types import GenerateContentRequest
13
+ from .....backends.google.protocol.types import GenerateContentResponse
11
14
  from ....chat.choices.services import ChatChoicesRequest
12
15
  from ....chat.choices.services import ChatChoicesResponse
13
16
  from ....chat.choices.services import static_check_is_chat_choices_service
@@ -57,25 +60,25 @@ class GoogleChatChoicesService:
57
60
  AiMessage: 'assistant',
58
61
  }
59
62
 
60
- def invoke(
63
+ async def invoke(
61
64
  self,
62
65
  request: ChatChoicesRequest,
63
66
  ) -> ChatChoicesResponse:
64
67
  key = check.not_none(self._api_key).reveal()
65
68
 
66
- req_dct = {
67
- 'contents': [
68
- {
69
- 'role': self.ROLES_MAP[type(m)],
70
- 'parts': [
71
- {
72
- 'text': self._get_msg_content(m),
73
- },
74
- ],
75
- }
69
+ g_req = GenerateContentRequest(
70
+ contents=[
71
+ GenerateContentRequest.Content(
72
+ parts=[GenerateContentRequest.Content.Part(
73
+ text=check.not_none(self._get_msg_content(m)),
74
+ )],
75
+ role=self.ROLES_MAP[type(m)], # type: ignore[arg-type]
76
+ )
76
77
  for m in request.v
77
78
  ],
78
- }
79
+ )
80
+
81
+ req_dct = msh.marshal(g_req)
79
82
 
80
83
  model_name = MODEL_NAMES.resolve(self._model_name.v)
81
84
 
@@ -88,7 +91,9 @@ class GoogleChatChoicesService:
88
91
 
89
92
  resp_dct = json.loads(check.not_none(resp.data).decode('utf-8'))
90
93
 
94
+ g_resp = msh.unmarshal(resp_dct, GenerateContentResponse)
95
+
91
96
  return ChatChoicesResponse([
92
- AiChoice(AiMessage(c['content']['parts'][0]['text']))
93
- for c in resp_dct['candidates']
97
+ AiChoice(AiMessage(c.content.parts[0].text))
98
+ for c in g_resp.candidates
94
99
  ])
@@ -88,7 +88,7 @@ class CseSearchService:
88
88
  self._cse_id = cse_id
89
89
  self._cse_api_key = cse_api_key
90
90
 
91
- def invoke(
91
+ async def invoke(
92
92
  self,
93
93
  request: SearchRequest,
94
94
  ) -> SearchResponse:
@@ -54,7 +54,7 @@ class LlamacppChatChoicesService:
54
54
  temperatur=Temperature,
55
55
  )
56
56
 
57
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
57
+ async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
58
58
  kwargs: dict = dict(
59
59
  # temperature=0,
60
60
  max_tokens=1024,
@@ -51,7 +51,7 @@ class LlamacppCompletionService:
51
51
  temperatur=Temperature,
52
52
  )
53
53
 
54
- def invoke(self, request: CompletionRequest) -> CompletionResponse:
54
+ async def invoke(self, request: CompletionRequest) -> CompletionResponse:
55
55
  kwargs: dict = dict(
56
56
  # temperature=0,
57
57
  max_tokens=1024,
@@ -49,7 +49,7 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
49
49
  verbose=False,
50
50
  )))
51
51
 
52
- def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
52
+ async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
53
53
  lcu.install_logging_hook()
54
54
 
55
55
  with UseResources.or_new(request.options) as rs:
@@ -50,7 +50,7 @@ class MistralChatChoicesService:
50
50
  else:
51
51
  raise TypeError(m)
52
52
 
53
- def invoke(
53
+ async def invoke(
54
54
  self,
55
55
  request: ChatChoicesRequest,
56
56
  ) -> ChatChoicesResponse:
@@ -96,7 +96,7 @@ class MlxChatChoicesService(lang.ExitStacked):
96
96
  max_tokens=MaxTokens,
97
97
  )
98
98
 
99
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
99
+ async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
100
100
  loaded_model = self._load_model()
101
101
 
102
102
  tokenizer = loaded_model.tokenization.tokenizer
@@ -47,7 +47,7 @@ class OpenaiChatChoicesService:
47
47
  self._api_key = ApiKey.pop_secret(cc, env='OPENAI_API_KEY')
48
48
  self._default_options: tv.TypedValues = DefaultOptions.pop(cc)
49
49
 
50
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
50
+ async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
51
51
  # check.isinstance(request, ChatRequest)
52
52
 
53
53
  rh = OpenaiChatRequestHandler(
@@ -29,7 +29,7 @@ class OpenaiCompletionService:
29
29
  with tv.consume(*configs) as cc:
30
30
  self._api_key = ApiKey.pop_secret(cc, env='OPENAI_API_KEY')
31
31
 
32
- def invoke(self, t: CompletionRequest) -> CompletionResponse:
32
+ async def invoke(self, t: CompletionRequest) -> CompletionResponse:
33
33
  raw_request = dict(
34
34
  model=self.DEFAULT_MODEL_NAME,
35
35
  prompt=t.v,
@@ -28,7 +28,7 @@ class OpenaiEmbeddingService:
28
28
  with tv.consume(*configs) as cc:
29
29
  self._api_key = ApiKey.pop_secret(cc, env='OPENAI_API_KEY')
30
30
 
31
- def invoke(self, request: EmbeddingRequest) -> EmbeddingResponse:
31
+ async def invoke(self, request: EmbeddingRequest) -> EmbeddingResponse:
32
32
  raw_request = dict(
33
33
  model=self.model,
34
34
  input=check.isinstance(request.v, str),
@@ -1,12 +1,17 @@
1
+ """
2
+ https://platform.openai.com/docs/api-reference/responses-streaming
3
+ """
1
4
  import typing as ta
2
5
 
3
6
  from omlish import check
7
+ from omlish import marshal as msh
4
8
  from omlish import typedvalues as tv
5
9
  from omlish.formats import json
6
10
  from omlish.http import all as http
7
11
  from omlish.http import sse
8
12
  from omlish.io.buffers import DelimitingBuffer
9
13
 
14
+ from .....backends.openai.protocol.chatcompletion.chunk import ChatCompletionChunk
10
15
  from ....chat.choices.services import ChatChoicesOutputs
11
16
  from ....chat.stream.services import ChatChoicesStreamRequest
12
17
  from ....chat.stream.services import ChatChoicesStreamResponse
@@ -43,7 +48,7 @@ class OpenaiChatChoicesStreamService:
43
48
 
44
49
  READ_CHUNK_SIZE = 64 * 1024
45
50
 
46
- def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
51
+ async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
47
52
  # check.isinstance(request, ChatRequest)
48
53
 
49
54
  rh = OpenaiChatRequestHandler(
@@ -99,6 +104,9 @@ class OpenaiChatChoicesStreamService:
99
104
 
100
105
  check.state(sj['object'] == 'chat.completion.chunk')
101
106
 
107
+ ccc = msh.unmarshal(sj, ChatCompletionChunk) # noqa
108
+ # print(ccc)
109
+
102
110
  # FIXME: stop reason
103
111
  if not sj['choices']:
104
112
  continue
@@ -116,7 +116,7 @@ class BaseTinygradLlama3ChatService(lang.ExitStacked, lang.Abstract):
116
116
  # )
117
117
  @static_check_is_chat_choices_service
118
118
  class TinygradLlama3ChatChoicesService(BaseTinygradLlama3ChatService):
119
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
119
+ async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
120
120
  llm = self._load_model()
121
121
  toks = _prepare_toks(llm, request.v, request.options)
122
122
 
@@ -136,7 +136,7 @@ class TinygradLlama3ChatChoicesService(BaseTinygradLlama3ChatService):
136
136
  # )
137
137
  @static_check_is_chat_choices_stream_service
138
138
  class TinygradLlama3ChatChoicesStreamService(BaseTinygradLlama3ChatService):
139
- def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
139
+ async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
140
140
  with UseResources.or_new(request.options) as rs:
141
141
  llm = self._load_model()
142
142
  toks = _prepare_toks(
@@ -33,7 +33,7 @@ class SentenceTransformersEmbeddingService:
33
33
  with tv.consume(*configs) as cc:
34
34
  self._model_path = cc.pop(ModelPath(self.DEFAULT_MODEL))
35
35
 
36
- def invoke(self, request: EmbeddingRequest) -> EmbeddingResponse:
36
+ async def invoke(self, request: EmbeddingRequest) -> EmbeddingResponse:
37
37
  mdl = stfm.SentenceTransformer(
38
38
  self._model_path.v,
39
39
  )
@@ -59,7 +59,7 @@ class TransformersCompletionService(lang.ExitStacked):
59
59
  self._pipeline_kwargs = cc.pop(TransformersPipelineKwargs, [])
60
60
  self._huggingface_hub_token = HuggingfaceHubToken.pop_secret(cc, env='HUGGINGFACE_HUB_TOKEN')
61
61
 
62
- def invoke(self, request: CompletionRequest) -> CompletionResponse:
62
+ async def invoke(self, request: CompletionRequest) -> CompletionResponse:
63
63
  pkw: dict[str, ta.Any] = dict(
64
64
  model=self._model_path.v,
65
65
  device='mps' if sys.platform == 'darwin' else 'cuda',
@@ -162,7 +162,7 @@ class TransformersChatChoicesService(lang.ExitStacked):
162
162
  **pkw,
163
163
  )
164
164
 
165
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
165
+ async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
166
166
  check.empty(request.options)
167
167
 
168
168
  pipeline = self._load_pipeline()
@@ -17,6 +17,6 @@ from .types import ChatChoicesOutputs
17
17
  class ChatChoicesServiceChatService:
18
18
  service: ChatChoicesService
19
19
 
20
- def invoke(self, request: ChatRequest) -> Response[AiMessage, ChatChoicesOutputs]:
21
- resp = self.service.invoke(request)
20
+ async def invoke(self, request: ChatRequest) -> Response[AiMessage, ChatChoicesOutputs]:
21
+ resp = await self.service.invoke(request)
22
22
  return Response(check.single(resp.v).m, resp.outputs)
@@ -36,5 +36,5 @@ def static_check_is_chat_choices_service[T: ChatChoicesService](t: type[T]) -> t
36
36
  @static_check_is_chat_choices_service
37
37
  class AbstractChatChoicesService(lang.Abstract):
38
38
  @abc.abstractmethod
39
- def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
39
+ def invoke(self, request: ChatChoicesRequest) -> ta.Awaitable[ChatChoicesResponse]:
40
40
  raise NotImplementedError
@@ -63,9 +63,9 @@ class HistoryAddingChatService:
63
63
  self._inner = inner
64
64
  self._history = history
65
65
 
66
- def invoke(self, request: ChatRequest) -> ChatResponse:
66
+ async def invoke(self, request: ChatRequest) -> ChatResponse:
67
67
  new_req = dc.replace(request, v=[*self._history.get(), *request.v])
68
- response = self._inner.invoke(new_req)
68
+ response = await self._inner.invoke(new_req)
69
69
  self._history.add(
70
70
  *request.v,
71
71
  response.v,
@@ -36,5 +36,5 @@ def static_check_is_chat_service[T: ChatService](t: type[T]) -> type[T]:
36
36
  @static_check_is_chat_service
37
37
  class AbstractChatService(lang.Abstract):
38
38
  @abc.abstractmethod
39
- def invoke(self, request: ChatRequest) -> ChatResponse:
39
+ def invoke(self, request: ChatRequest) -> ta.Awaitable[ChatResponse]:
40
40
  raise NotImplementedError
@@ -27,13 +27,13 @@ class ChatChoicesStreamServiceChatChoicesService:
27
27
  parts: list[str]
28
28
  trs: list[ToolExecRequest]
29
29
 
30
- def invoke(self, request: ChatChoicesRequest) -> Response[
30
+ async def invoke(self, request: ChatChoicesRequest) -> Response[
31
31
  AiChoices,
32
32
  ChatChoicesOutputs | ChatChoicesStreamOutputs,
33
33
  ]:
34
34
  lst: list[ChatChoicesStreamServiceChatChoicesService._Choice] = []
35
35
 
36
- resp = self.service.invoke(request)
36
+ resp = await self.service.invoke(request)
37
37
  with resp.v as resp_v:
38
38
  i = -1 # noqa
39
39
  for i, cs in enumerate(resp_v):
@@ -41,7 +41,7 @@ def static_check_is_chat_choices_stream_service[T: ChatChoicesStreamService](t:
41
41
  @static_check_is_chat_choices_stream_service
42
42
  class AbstractChatChoicesStreamService(lang.Abstract):
43
43
  @abc.abstractmethod
44
- def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
44
+ def invoke(self, request: ChatChoicesStreamRequest) -> ta.Awaitable[ChatChoicesStreamResponse]:
45
45
  raise NotImplementedError
46
46
 
47
47
 
@@ -19,10 +19,10 @@ class RequestChatTransformingChatService:
19
19
  ct: ChatTransform
20
20
  svc: ChatService
21
21
 
22
- def invoke(self, request: ChatRequest) -> ChatResponse:
22
+ async def invoke(self, request: ChatRequest) -> ChatResponse:
23
23
  new_chat = self.ct.transform_chat(request.v)
24
24
  new_req = dc.replace(request, v=new_chat)
25
- return self.svc.invoke(new_req)
25
+ return await self.svc.invoke(new_req)
26
26
 
27
27
 
28
28
  #
@@ -34,7 +34,7 @@ class ResponseMessageTransformingChatService:
34
34
  mt: MessageTransform[AiMessage]
35
35
  svc: ChatService
36
36
 
37
- def invoke(self, request: ChatRequest) -> ChatResponse:
38
- orig_resp = self.svc.invoke(request)
37
+ async def invoke(self, request: ChatRequest) -> ChatResponse:
38
+ orig_resp = await self.svc.invoke(request)
39
39
  new_msg = self.mt.transform_message(orig_resp.v)
40
40
  return dc.replace(orig_resp, v=check.isinstance(new_msg, AiMessage))
@@ -44,15 +44,15 @@ class ServiceFacade(
44
44
  ],
45
45
  ]
46
46
 
47
- def invoke(self, request: Request[RequestV, OptionT]) -> Response[ResponseV, OutputT]:
47
+ def invoke(self, request: Request[RequestV, OptionT]) -> ta.Awaitable[Response[ResponseV, OutputT]]:
48
48
  return self.service.invoke(request)
49
49
 
50
50
  @ta.overload
51
- def __call__(self, request: Request[RequestV, OptionT]) -> Response[ResponseV, OutputT]:
51
+ def __call__(self, request: Request[RequestV, OptionT]) -> ta.Awaitable[Response[ResponseV, OutputT]]:
52
52
  ...
53
53
 
54
54
  @ta.overload
55
- def __call__(self, v: RequestV, *options: OptionT) -> Response[ResponseV, OutputT]:
55
+ def __call__(self, v: RequestV, *options: OptionT) -> ta.Awaitable[Response[ResponseV, OutputT]]:
56
56
  ...
57
57
 
58
58
  def __call__(self, o, *args):
@@ -11,4 +11,4 @@ from .responses import ResponseT_co
11
11
 
12
12
  @ta.runtime_checkable
13
13
  class Service(lang.ProtocolForbiddenAsBaseClass, ta.Protocol[RequestT_contra, ResponseT_co]):
14
- def invoke(self, request: RequestT_contra) -> ResponseT_co: ...
14
+ def invoke(self, request: RequestT_contra) -> ta.Awaitable[ResponseT_co]: ...
@@ -43,9 +43,9 @@ class WrappedStreamService(ta.Generic[StreamRequestT, V, OutputT, StreamOutputT]
43
43
 
44
44
  #
45
45
 
46
- def invoke(self, request: StreamRequestT) -> StreamResponse[V, OutputT, StreamOutputT]:
46
+ async def invoke(self, request: StreamRequestT) -> StreamResponse[V, OutputT, StreamOutputT]:
47
47
  with Resources.new() as rs:
48
- in_response = self._inner.invoke(self._process_request(request))
48
+ in_response = await self._inner.invoke(self._process_request(request))
49
49
  in_vs: ResponseGenerator[V, OutputT] = rs.enter_context(in_response.v)
50
50
  out_vs = self._process_vs(in_vs)
51
51
 
ommlds/server/server.py CHANGED
@@ -47,10 +47,10 @@ class McServerHandler(HttpHandler_):
47
47
 
48
48
  log.info('Server got prompt: %s', prompt)
49
49
 
50
- resp = self.llm.invoke(mc.ChatChoicesRequest(
50
+ resp = lang.sync_await(self.llm.invoke(mc.ChatChoicesRequest(
51
51
  [mc.UserMessage(prompt)],
52
52
  # Temperature(.1),
53
- ))
53
+ )))
54
54
  resp_txt = check.isinstance(resp.v[0].m.c, str)
55
55
 
56
56
  log.info('Server got response: %s', resp_txt)
ommlds/tools/git.py CHANGED
@@ -64,10 +64,10 @@ class OpenaiGitAiBackend(GitAiBackend['OpenaiGitAiBackend.Config']):
64
64
 
65
65
  llm = OpenaiChatChoicesService()
66
66
 
67
- resp = llm.invoke(mc.ChatChoicesRequest(
67
+ resp = lang.sync_await(llm.invoke(mc.ChatChoicesRequest(
68
68
  [mc.UserMessage(prompt)],
69
69
  # FIXME: *((MaxTokens(self._config.max_tokens),) if self._config.max_tokens is not None else ()),
70
- ))
70
+ )))
71
71
  return check.not_empty(check.isinstance(resp.v[0].m.c, str))
72
72
 
73
73
 
@@ -93,10 +93,10 @@ class MlxGitAiBackend(GitAiBackend['MlxGitAiBackend.Config']):
93
93
 
94
94
  def _run_prompt(self, prompt: str) -> str:
95
95
  with mc_mlx_chat.MlxChatChoicesService(mc.ModelRepo.parse(self._config.model)) as llm:
96
- resp = llm.invoke(mc.ChatChoicesRequest(
96
+ resp = lang.sync_await(llm.invoke(mc.ChatChoicesRequest(
97
97
  [mc.UserMessage(prompt)],
98
98
  # FIXME: *((MaxTokens(self._config.max_tokens),) if self._config.max_tokens is not None else ()),
99
- ))
99
+ )))
100
100
  text = check.not_empty(check.isinstance(resp.v[0].m.c, str))
101
101
 
102
102
  text = _strip_markdown_code_block(text)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ommlds
3
- Version: 0.0.0.dev441
3
+ Version: 0.0.0.dev442
4
4
  Summary: ommlds
5
5
  Author: wrmsr
6
6
  License-Expression: BSD-3-Clause
@@ -14,8 +14,8 @@ Classifier: Programming Language :: Python :: 3.13
14
14
  Requires-Python: >=3.13
15
15
  Description-Content-Type: text/markdown
16
16
  License-File: LICENSE
17
- Requires-Dist: omdev==0.0.0.dev441
18
- Requires-Dist: omlish==0.0.0.dev441
17
+ Requires-Dist: omdev==0.0.0.dev442
18
+ Requires-Dist: omlish==0.0.0.dev442
19
19
  Provides-Extra: all
20
20
  Requires-Dist: llama-cpp-python~=0.3; extra == "all"
21
21
  Requires-Dist: mlx~=0.29; extra == "all"
@@ -32,7 +32,7 @@ Requires-Dist: numpy>=1.26; extra == "all"
32
32
  Requires-Dist: pytesseract~=0.3; extra == "all"
33
33
  Requires-Dist: rapidocr-onnxruntime~=1.4; extra == "all"
34
34
  Requires-Dist: pillow~=11.3; extra == "all"
35
- Requires-Dist: ddgs~=9.5; extra == "all"
35
+ Requires-Dist: ddgs~=9.6; extra == "all"
36
36
  Requires-Dist: mwparserfromhell~=0.7; extra == "all"
37
37
  Requires-Dist: wikitextparser~=0.56; extra == "all"
38
38
  Requires-Dist: lxml>=5.3; python_version < "3.13" and extra == "all"
@@ -57,7 +57,7 @@ Requires-Dist: rapidocr-onnxruntime~=1.4; extra == "ocr"
57
57
  Provides-Extra: pillow
58
58
  Requires-Dist: pillow~=11.3; extra == "pillow"
59
59
  Provides-Extra: search
60
- Requires-Dist: ddgs~=9.5; extra == "search"
60
+ Requires-Dist: ddgs~=9.6; extra == "search"
61
61
  Provides-Extra: wiki
62
62
  Requires-Dist: mwparserfromhell~=0.7; extra == "wiki"
63
63
  Requires-Dist: wikitextparser~=0.56; extra == "wiki"