ommlds 0.0.0.dev467__py3-none-any.whl → 0.0.0.dev468__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ommlds might be problematic. Click here for more details.

@@ -170,6 +170,21 @@
170
170
  "attr": null,
171
171
  "file": "ommlds/minichain/backends/impls/llamacpp/chat.py",
172
172
  "line": 33,
173
+ "value": {
174
+ "!.minichain.backends.strings.manifests.BackendStringsManifest": {
175
+ "service_cls_names": [
176
+ "ChatChoicesService"
177
+ ],
178
+ "backend_name": "llamacpp",
179
+ "model_names": null
180
+ }
181
+ }
182
+ },
183
+ {
184
+ "module": ".minichain.backends.impls.llamacpp.chat",
185
+ "attr": null,
186
+ "file": "ommlds/minichain/backends/impls/llamacpp/chat.py",
187
+ "line": 42,
173
188
  "value": {
174
189
  "!.minichain.registries.manifests.RegistryManifest": {
175
190
  "module": "ommlds.minichain.backends.impls.llamacpp.chat",
@@ -200,6 +215,21 @@
200
215
  "attr": null,
201
216
  "file": "ommlds/minichain/backends/impls/llamacpp/stream.py",
202
217
  "line": 32,
218
+ "value": {
219
+ "!.minichain.backends.strings.manifests.BackendStringsManifest": {
220
+ "service_cls_names": [
221
+ "ChatChoicesStreamService"
222
+ ],
223
+ "backend_name": "llamacpp",
224
+ "model_names": null
225
+ }
226
+ }
227
+ },
228
+ {
229
+ "module": ".minichain.backends.impls.llamacpp.stream",
230
+ "attr": null,
231
+ "file": "ommlds/minichain/backends/impls/llamacpp/stream.py",
232
+ "line": 41,
203
233
  "value": {
204
234
  "!.minichain.registries.manifests.RegistryManifest": {
205
235
  "module": "ommlds.minichain.backends.impls.llamacpp.stream",
@@ -271,6 +301,52 @@
271
301
  }
272
302
  }
273
303
  },
304
+ {
305
+ "module": ".minichain.backends.impls.ollama.chat",
306
+ "attr": null,
307
+ "file": "ommlds/minichain/backends/impls/ollama/chat.py",
308
+ "line": 38,
309
+ "value": {
310
+ "!.minichain.backends.strings.manifests.BackendStringsManifest": {
311
+ "service_cls_names": [
312
+ "ChatChoicesService",
313
+ "ChatChoicesStreamService"
314
+ ],
315
+ "backend_name": "ollama",
316
+ "model_names": null
317
+ }
318
+ }
319
+ },
320
+ {
321
+ "module": ".minichain.backends.impls.ollama.chat",
322
+ "attr": null,
323
+ "file": "ommlds/minichain/backends/impls/ollama/chat.py",
324
+ "line": 93,
325
+ "value": {
326
+ "!.minichain.registries.manifests.RegistryManifest": {
327
+ "module": "ommlds.minichain.backends.impls.ollama.chat",
328
+ "attr": "OllamaChatChoicesService",
329
+ "name": "ollama",
330
+ "aliases": null,
331
+ "type": "ChatChoicesService"
332
+ }
333
+ }
334
+ },
335
+ {
336
+ "module": ".minichain.backends.impls.ollama.chat",
337
+ "attr": null,
338
+ "file": "ommlds/minichain/backends/impls/ollama/chat.py",
339
+ "line": 139,
340
+ "value": {
341
+ "!.minichain.registries.manifests.RegistryManifest": {
342
+ "module": "ommlds.minichain.backends.impls.ollama.chat",
343
+ "attr": "OllamaChatChoicesStreamService",
344
+ "name": "ollama",
345
+ "aliases": null,
346
+ "type": "ChatChoicesStreamService"
347
+ }
348
+ }
349
+ },
274
350
  {
275
351
  "module": ".minichain.backends.impls.openai.chat",
276
352
  "attr": null,
@@ -450,7 +526,23 @@
450
526
  "module": ".minichain.backends.impls.transformers.transformers",
451
527
  "attr": null,
452
528
  "file": "ommlds/minichain/backends/impls/transformers/transformers.py",
453
- "line": 43,
529
+ "line": 46,
530
+ "value": {
531
+ "!.minichain.backends.strings.manifests.BackendStringsManifest": {
532
+ "service_cls_names": [
533
+ "ChatChoicesService",
534
+ "ChatChoicesStreamService"
535
+ ],
536
+ "backend_name": "transformers",
537
+ "model_names": null
538
+ }
539
+ }
540
+ },
541
+ {
542
+ "module": ".minichain.backends.impls.transformers.transformers",
543
+ "attr": null,
544
+ "file": "ommlds/minichain/backends/impls/transformers/transformers.py",
545
+ "line": 62,
454
546
  "value": {
455
547
  "!.minichain.registries.manifests.RegistryManifest": {
456
548
  "module": "ommlds.minichain.backends.impls.transformers.transformers",
@@ -467,7 +559,7 @@
467
559
  "module": ".minichain.backends.impls.transformers.transformers",
468
560
  "attr": null,
469
561
  "file": "ommlds/minichain/backends/impls/transformers/transformers.py",
470
- "line": 131,
562
+ "line": 189,
471
563
  "value": {
472
564
  "!.minichain.registries.manifests.RegistryManifest": {
473
565
  "module": "ommlds.minichain.backends.impls.transformers.transformers",
@@ -480,6 +572,21 @@
480
572
  }
481
573
  }
482
574
  },
575
+ {
576
+ "module": ".minichain.backends.impls.transformers.transformers",
577
+ "attr": null,
578
+ "file": "ommlds/minichain/backends/impls/transformers/transformers.py",
579
+ "line": 219,
580
+ "value": {
581
+ "!.minichain.registries.manifests.RegistryManifest": {
582
+ "module": "ommlds.minichain.backends.impls.transformers.transformers",
583
+ "attr": "TransformersChatChoicesStreamService",
584
+ "name": "transformers",
585
+ "aliases": null,
586
+ "type": "ChatChoicesStreamService"
587
+ }
588
+ }
589
+ },
483
590
  {
484
591
  "module": ".minichain.chat.choices.services",
485
592
  "attr": null,
ommlds/__about__.py CHANGED
@@ -37,8 +37,8 @@ class Project(ProjectBase):
37
37
  ],
38
38
 
39
39
  'huggingface': [
40
- 'huggingface-hub ~= 0.35',
41
- 'datasets ~= 4.2',
40
+ 'huggingface-hub ~= 0.36',
41
+ 'datasets ~= 4.3',
42
42
  ],
43
43
 
44
44
  'numpy': [
File without changes
@@ -0,0 +1,170 @@
1
+ """
2
+ https://docs.ollama.com/api
3
+ """
4
+ import typing as ta
5
+
6
+ from omlish import dataclasses as dc
7
+ from omlish import lang
8
+
9
+
10
+ ##
11
+
12
+
13
+ @dc.dataclass(frozen=True, kw_only=True)
14
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
15
+ class Options:
16
+ # loading
17
+ numa: bool | None = None
18
+ num_ctx: int | None = None
19
+ num_batch: int | None = None
20
+ num_gpu: int | None = None
21
+ main_gpu: int | None = None
22
+ low_vram: bool | None = None
23
+ f16_kv: bool | None = None
24
+ logits_all: bool | None = None
25
+ vocab_only: bool | None = None
26
+ use_mmap: bool | None = None
27
+ use_mlock: bool | None = None
28
+ embedding_only: bool | None = None
29
+ num_thread: int | None = None
30
+
31
+ # querying
32
+ num_keep: int | None = None
33
+ seed: int | None = None
34
+ num_predict: int | None = None
35
+ top_k: int | None = None
36
+ top_p: float | None = None
37
+ tfs_z: float | None = None
38
+ typical_p: float | None = None
39
+ repeat_last_n: int | None = None
40
+ temperature: float | None = None
41
+ repeat_penalty: float | None = None
42
+ presence_penalty: float | None = None
43
+ frequency_penalty: float | None = None
44
+ mirostat: int | None = None
45
+ mirostat_tau: float | None = None
46
+ mirostat_eta: float | None = None
47
+ penalize_newline: bool | None = None
48
+ stop: ta.Sequence[str] | None = None
49
+
50
+
51
+ ##
52
+
53
+
54
+ @dc.dataclass(frozen=True, kw_only=True)
55
+ class BaseRequest(lang.Abstract):
56
+ model: str
57
+
58
+
59
+ @dc.dataclass(frozen=True, kw_only=True)
60
+ class BaseStreamableRequest(BaseRequest, lang.Abstract):
61
+ stream: bool | None = None
62
+
63
+
64
+ ##
65
+
66
+
67
+ @dc.dataclass(frozen=True, kw_only=True)
68
+ class BaseGenerateRequest(BaseStreamableRequest, lang.Abstract):
69
+ options: Options | None = None
70
+ format: ta.Literal['', 'json'] | None = None # TODO: jsonschema
71
+ keep_alive: float | str | None = None
72
+
73
+
74
+ @dc.dataclass(frozen=True, kw_only=True)
75
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
76
+ class GenerateRequest(BaseGenerateRequest):
77
+ prompt: str | None = None
78
+ suffix: str | None = None
79
+ system: str | None = None
80
+ template: str | None = None
81
+ context: ta.Sequence[int] | None = None
82
+ raw: bool | None = None
83
+ images: ta.Sequence[bytes] | None = None
84
+ think: bool | ta.Literal['low', 'medium', 'high'] | None = None
85
+
86
+
87
+ #
88
+
89
+
90
+ @dc.dataclass(frozen=True, kw_only=True)
91
+ class BaseGenerateResponse(lang.Abstract):
92
+ model: str | None = None
93
+ created_at: str | None = None
94
+ done: bool | None = None
95
+ done_reason: str | None = None
96
+ total_duration: int | None = None
97
+ load_duration: int | None = None
98
+ prompt_eval_count: int | None = None
99
+ prompt_eval_duration: int | None = None
100
+ eval_count: int | None = None
101
+ eval_duration: int | None = None
102
+
103
+
104
+ @dc.dataclass(frozen=True, kw_only=True)
105
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
106
+ class GenerateResponse(BaseGenerateResponse):
107
+ response: str
108
+ thinking: str | None = None
109
+ context: ta.Sequence[int] | None = None
110
+
111
+
112
+ ##
113
+
114
+
115
+ Role: ta.TypeAlias = ta.Literal[
116
+ 'system',
117
+ 'user',
118
+ 'assistant',
119
+ 'tool',
120
+ ]
121
+
122
+
123
+ @dc.dataclass(frozen=True, kw_only=True)
124
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
125
+ class Message:
126
+ role: Role
127
+ content: str | None = None
128
+ thinking: str | None = None
129
+ images: ta.Sequence[bytes] | None = None
130
+ tool_name: str | None = None
131
+
132
+ @dc.dataclass(frozen=True, kw_only=True)
133
+ class ToolCall:
134
+ @dc.dataclass(frozen=True, kw_only=True)
135
+ class Function:
136
+ name: str
137
+ arguments: ta.Mapping[str, ta.Any]
138
+
139
+ function: Function
140
+
141
+ tool_calls: ta.Sequence[ToolCall] | None = None
142
+
143
+
144
+ @dc.dataclass(frozen=True, kw_only=True)
145
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
146
+ class Tool:
147
+ type: str | None = 'function'
148
+
149
+ @dc.dataclass(frozen=True, kw_only=True)
150
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
151
+ class Function:
152
+ name: str | None = None
153
+ description: str | None = None
154
+ parameters: ta.Any | None = None
155
+
156
+ function: Function | None = None
157
+
158
+
159
+ @dc.dataclass(frozen=True, kw_only=True)
160
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
161
+ class ChatRequest(BaseGenerateRequest):
162
+ messages: ta.Sequence[Message] | None = None
163
+ tools: ta.Sequence[Tool] | None = None
164
+ think: bool | ta.Literal['low', 'medium', 'high'] | None = None
165
+
166
+
167
+ @dc.dataclass(frozen=True, kw_only=True)
168
+ @dc.extra_class_params(default_repr_fn=dc.opt_repr)
169
+ class ChatResponse(BaseGenerateResponse):
170
+ message: Message
File without changes
@@ -0,0 +1,73 @@
1
+ import functools
2
+ import typing as ta
3
+
4
+ import transformers as tfm
5
+
6
+
7
+ T = ta.TypeVar('T')
8
+ P = ta.ParamSpec('P')
9
+
10
+
11
+ ##
12
+
13
+
14
+ class CancellableTextStreamer(tfm.TextStreamer):
15
+ class Callback(ta.Protocol):
16
+ def __call__(self, text: str, *, stream_end: bool) -> None: ...
17
+
18
+ def __init__(
19
+ self,
20
+ tokenizer: tfm.AutoTokenizer,
21
+ callback: Callback,
22
+ *,
23
+ skip_prompt: bool = False,
24
+ **decode_kwargs: ta.Any,
25
+ ) -> None:
26
+ super().__init__(
27
+ tokenizer,
28
+ skip_prompt=skip_prompt,
29
+ **decode_kwargs,
30
+ )
31
+
32
+ self.callback = callback
33
+
34
+ _cancelled: bool = False
35
+
36
+ #
37
+
38
+ @property
39
+ def cancelled(self) -> bool:
40
+ return self._cancelled
41
+
42
+ def cancel(self) -> None:
43
+ self._cancelled = True
44
+
45
+ class Cancelled(BaseException): # noqa
46
+ pass
47
+
48
+ @staticmethod
49
+ def ignoring_cancelled(fn: ta.Callable[P, T]) -> ta.Callable[P, T | None]:
50
+ @functools.wraps(fn)
51
+ def inner(*args, **kwargs):
52
+ try:
53
+ return fn(*args, **kwargs)
54
+ except CancellableTextStreamer.Cancelled:
55
+ pass
56
+
57
+ return inner
58
+
59
+ def _maybe_raise_cancelled(self) -> None:
60
+ if self._cancelled:
61
+ raise CancellableTextStreamer.Cancelled
62
+
63
+ #
64
+
65
+ def put(self, value: ta.Any) -> None:
66
+ self._maybe_raise_cancelled()
67
+ super().put(value)
68
+ self._maybe_raise_cancelled()
69
+
70
+ def on_finalized_text(self, text: str, stream_end: bool = False) -> None:
71
+ self._maybe_raise_cancelled()
72
+ self.callback(text, stream_end=stream_end)
73
+ self._maybe_raise_cancelled()
@@ -558,6 +558,10 @@ with _lang.auto_proxy_init(
558
558
  )
559
559
 
560
560
  from .standard import ( # noqa
561
+ Device,
562
+
563
+ ApiUrl,
564
+
561
565
  ApiKey,
562
566
 
563
567
  DefaultOptions,
@@ -30,6 +30,15 @@ from .format import get_msg_content
30
30
  ##
31
31
 
32
32
 
33
+ # @omlish-manifest $.minichain.backends.strings.manifests.BackendStringsManifest(
34
+ # ['ChatChoicesService'],
35
+ # 'llamacpp',
36
+ # )
37
+
38
+
39
+ ##
40
+
41
+
33
42
  # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
34
43
  # name='llamacpp',
35
44
  # type='ChatChoicesService',
@@ -29,6 +29,15 @@ from .format import get_msg_content
29
29
  ##
30
30
 
31
31
 
32
+ # @omlish-manifest $.minichain.backends.strings.manifests.BackendStringsManifest(
33
+ # ['ChatChoicesStreamService'],
34
+ # 'llamacpp',
35
+ # )
36
+
37
+
38
+ ##
39
+
40
+
32
41
  # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
33
42
  # name='llamacpp',
34
43
  # type='ChatChoicesStreamService',
@@ -76,18 +85,25 @@ class LlamacppChatChoicesStreamService(lang.ExitStacked):
76
85
  rs.enter_context(lang.defer(close_output))
77
86
 
78
87
  async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
88
+ last_role: ta.Any = None
89
+
79
90
  for chunk in output:
80
91
  check.state(chunk['object'] == 'chat.completion.chunk')
81
- l: list[AiChoiceDeltas] = []
82
- for choice in chunk['choices']:
83
- # FIXME: check role is assistant
84
- # FIXME: stop reason
85
- if not (delta := choice.get('delta', {})):
86
- continue
87
- if not (content := delta.get('content', '')):
88
- continue
89
- l.append(AiChoiceDeltas([ContentAiChoiceDelta(content)]))
90
- await sink.emit(AiChoicesDeltas(l))
92
+
93
+ choice = check.single(chunk['choices'])
94
+
95
+ if not (delta := choice.get('delta', {})):
96
+ continue
97
+
98
+ # FIXME: check role is assistant
99
+ if (role := delta.get('role')) != last_role:
100
+ last_role = role
101
+
102
+ # FIXME: stop reason
103
+
104
+ if (content := delta.get('content', '')):
105
+ await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(content)])]))
106
+
91
107
  return None
92
108
 
93
109
  return await new_stream_response(rs, inner)
File without changes
@@ -0,0 +1,196 @@
1
+ import typing as ta
2
+
3
+ from omlish import check
4
+ from omlish import lang
5
+ from omlish import marshal as msh
6
+ from omlish import typedvalues as tv
7
+ from omlish.formats import json
8
+ from omlish.http import all as http
9
+ from omlish.io.buffers import DelimitingBuffer
10
+
11
+ from .....backends.ollama import protocol as pt
12
+ from ....chat.choices.services import ChatChoicesOutputs
13
+ from ....chat.choices.services import ChatChoicesRequest
14
+ from ....chat.choices.services import ChatChoicesResponse
15
+ from ....chat.choices.services import static_check_is_chat_choices_service
16
+ from ....chat.choices.types import AiChoice
17
+ from ....chat.messages import AiMessage
18
+ from ....chat.messages import AnyAiMessage
19
+ from ....chat.messages import Message
20
+ from ....chat.messages import SystemMessage
21
+ from ....chat.messages import UserMessage
22
+ from ....chat.stream.services import ChatChoicesStreamRequest
23
+ from ....chat.stream.services import ChatChoicesStreamResponse
24
+ from ....chat.stream.services import static_check_is_chat_choices_stream_service
25
+ from ....chat.stream.types import AiChoiceDeltas
26
+ from ....chat.stream.types import AiChoicesDeltas
27
+ from ....chat.stream.types import ContentAiChoiceDelta
28
+ from ....models.configs import ModelName
29
+ from ....resources import UseResources
30
+ from ....standard import ApiUrl
31
+ from ....stream.services import StreamResponseSink
32
+ from ....stream.services import new_stream_response
33
+
34
+
35
+ ##
36
+
37
+
38
+ # @omlish-manifest $.minichain.backends.strings.manifests.BackendStringsManifest(
39
+ # [
40
+ # 'ChatChoicesService',
41
+ # 'ChatChoicesStreamService',
42
+ # ],
43
+ # 'ollama',
44
+ # )
45
+
46
+
47
+ ##
48
+
49
+
50
+ class BaseOllamaChatChoicesService(lang.Abstract):
51
+ DEFAULT_API_URL: ta.ClassVar[ApiUrl] = ApiUrl('http://localhost:11434/api')
52
+ DEFAULT_MODEL_NAME: ta.ClassVar[ModelName] = ModelName('llama3.2')
53
+
54
+ def __init__(
55
+ self,
56
+ *configs: ApiUrl | ModelName,
57
+ ) -> None:
58
+ super().__init__()
59
+
60
+ with tv.consume(*configs) as cc:
61
+ self._api_url = cc.pop(self.DEFAULT_API_URL)
62
+ self._model_name = cc.pop(self.DEFAULT_MODEL_NAME)
63
+
64
+ #
65
+
66
+ ROLE_MAP: ta.ClassVar[ta.Mapping[type[Message], pt.Role]] = {
67
+ SystemMessage: 'system',
68
+ UserMessage: 'user',
69
+ AiMessage: 'assistant',
70
+ }
71
+
72
+ @classmethod
73
+ def _get_message_content(cls, m: Message) -> str | None:
74
+ if isinstance(m, (AiMessage, UserMessage, SystemMessage)):
75
+ return check.isinstance(m.c, str)
76
+ else:
77
+ raise TypeError(m)
78
+
79
+ @classmethod
80
+ def _build_request_messages(cls, mc_msgs: ta.Iterable[Message]) -> ta.Sequence[pt.Message]:
81
+ messages: list[pt.Message] = []
82
+ for m in mc_msgs:
83
+ messages.append(pt.Message(
84
+ role=cls.ROLE_MAP[type(m)],
85
+ content=cls._get_message_content(m),
86
+ ))
87
+ return messages
88
+
89
+
90
+ ##
91
+
92
+
93
+ # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
94
+ # name='ollama',
95
+ # type='ChatChoicesService',
96
+ # )
97
+ @static_check_is_chat_choices_service
98
+ class OllamaChatChoicesService(BaseOllamaChatChoicesService):
99
+ async def invoke(
100
+ self,
101
+ request: ChatChoicesRequest,
102
+ ) -> ChatChoicesResponse:
103
+ messages = self._build_request_messages(request.v)
104
+
105
+ a_req = pt.ChatRequest(
106
+ model=self._model_name.v,
107
+ messages=messages,
108
+ # tools=tools or None,
109
+ stream=False,
110
+ )
111
+
112
+ raw_request = msh.marshal(a_req)
113
+
114
+ raw_response = http.request(
115
+ self._api_url.v.removesuffix('/') + '/chat',
116
+ data=json.dumps(raw_request).encode('utf-8'),
117
+ )
118
+
119
+ json_response = json.loads(check.not_none(raw_response.data).decode('utf-8'))
120
+
121
+ resp = msh.unmarshal(json_response, pt.ChatResponse)
122
+
123
+ out: list[AnyAiMessage] = []
124
+ if resp.message.role == 'assistant':
125
+ out.append(AiMessage(
126
+ check.not_none(resp.message.content),
127
+ ))
128
+ else:
129
+ raise TypeError(resp.message.role)
130
+
131
+ return ChatChoicesResponse([
132
+ AiChoice(out),
133
+ ])
134
+
135
+
136
+ ##
137
+
138
+
139
+ # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
140
+ # name='ollama',
141
+ # type='ChatChoicesStreamService',
142
+ # )
143
+ @static_check_is_chat_choices_stream_service
144
+ class OllamaChatChoicesStreamService(BaseOllamaChatChoicesService):
145
+ READ_CHUNK_SIZE = 64 * 1024
146
+
147
+ async def invoke(
148
+ self,
149
+ request: ChatChoicesStreamRequest,
150
+ ) -> ChatChoicesStreamResponse:
151
+ messages = self._build_request_messages(request.v)
152
+
153
+ a_req = pt.ChatRequest(
154
+ model=self._model_name.v,
155
+ messages=messages,
156
+ # tools=tools or None,
157
+ stream=True,
158
+ )
159
+
160
+ raw_request = msh.marshal(a_req)
161
+
162
+ http_request = http.HttpRequest(
163
+ self._api_url.v.removesuffix('/') + '/chat',
164
+ data=json.dumps(raw_request).encode('utf-8'),
165
+ )
166
+
167
+ async with UseResources.or_new(request.options) as rs:
168
+ http_client = rs.enter_context(http.client())
169
+ http_response = rs.enter_context(http_client.stream_request(http_request))
170
+
171
+ async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
172
+ db = DelimitingBuffer([b'\r', b'\n', b'\r\n'])
173
+ while True:
174
+ # FIXME: read1 not on response stream protocol
175
+ b = http_response.stream.read1(self.READ_CHUNK_SIZE) # type: ignore[attr-defined]
176
+ for l in db.feed(b):
177
+ if isinstance(l, DelimitingBuffer.Incomplete):
178
+ # FIXME: handle
179
+ return []
180
+
181
+ lj = json.loads(l.decode('utf-8'))
182
+ lp: pt.ChatResponse = msh.unmarshal(lj, pt.ChatResponse)
183
+
184
+ check.state(lp.message.role == 'assistant')
185
+ check.none(lp.message.tool_name)
186
+ check.state(not lp.message.tool_calls)
187
+
188
+ if (c := lp.message.content):
189
+ await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(
190
+ c,
191
+ )])]))
192
+
193
+ if not b:
194
+ return []
195
+
196
+ return await new_stream_response(rs, inner)
@@ -16,23 +16,42 @@ from ....chat.choices.services import ChatChoicesRequest
16
16
  from ....chat.choices.services import ChatChoicesResponse
17
17
  from ....chat.choices.services import static_check_is_chat_choices_service
18
18
  from ....chat.choices.types import AiChoice
19
+ from ....chat.choices.types import ChatChoicesOutputs
19
20
  from ....chat.messages import AiMessage
20
21
  from ....chat.messages import Message
21
22
  from ....chat.messages import SystemMessage
22
23
  from ....chat.messages import ToolUseMessage
23
24
  from ....chat.messages import ToolUseResultMessage
24
25
  from ....chat.messages import UserMessage
26
+ from ....chat.stream.services import ChatChoicesStreamRequest
27
+ from ....chat.stream.services import ChatChoicesStreamResponse
28
+ from ....chat.stream.services import static_check_is_chat_choices_stream_service
29
+ from ....chat.stream.types import AiChoiceDeltas # noqa
30
+ from ....chat.stream.types import AiChoicesDeltas # noqa
31
+ from ....chat.stream.types import ContentAiChoiceDelta # noqa
25
32
  from ....completion import CompletionRequest
26
33
  from ....completion import CompletionResponse
27
34
  from ....completion import static_check_is_completion_service
28
35
  from ....configs import Config
29
36
  from ....models.configs import ModelPath
37
+ from ....resources import UseResources
38
+ from ....stream.services import StreamResponseSink
39
+ from ....stream.services import new_stream_response
30
40
  from ...impls.huggingface.configs import HuggingfaceHubToken
31
41
 
32
42
 
33
43
  ##
34
44
 
35
45
 
46
+ # @omlish-manifest $.minichain.backends.strings.manifests.BackendStringsManifest(
47
+ # ['ChatChoicesService', 'ChatChoicesStreamService'],
48
+ # 'transformers',
49
+ # )
50
+
51
+
52
+ ##
53
+
54
+
36
55
  class TransformersPipelineKwargs(Config, tv.ScalarTypedValue[ta.Mapping[str, ta.Any]]):
37
56
  pass
38
57
 
@@ -128,13 +147,10 @@ def build_chat_message(m: Message) -> ta.Mapping[str, ta.Any]:
128
147
  raise TypeError(m)
129
148
 
130
149
 
131
- # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
132
- # name='transformers',
133
- # aliases=['tfm'],
134
- # type='ChatChoicesService',
135
- # )
136
- @static_check_is_chat_choices_service
137
- class TransformersChatChoicesService(lang.ExitStacked):
150
+ ##
151
+
152
+
153
+ class BaseTransformersChatChoicesService(lang.ExitStacked):
138
154
  DEFAULT_MODEL: ta.ClassVar[str] = (
139
155
  'meta-llama/Llama-3.2-1B-Instruct'
140
156
  )
@@ -166,16 +182,79 @@ class TransformersChatChoicesService(lang.ExitStacked):
166
182
  **pkw,
167
183
  )
168
184
 
185
+
186
+ ##
187
+
188
+
189
+ # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
190
+ # name='transformers',
191
+ # aliases=['tfm'],
192
+ # type='ChatChoicesService',
193
+ # )
194
+ @static_check_is_chat_choices_service
195
+ class TransformersChatChoicesService(BaseTransformersChatChoicesService):
169
196
  async def invoke(self, request: ChatChoicesRequest) -> ChatChoicesResponse:
170
197
  check.empty(request.options)
171
198
 
172
199
  pipeline = self._load_pipeline()
173
200
 
174
- output = pipeline(
175
- [
176
- build_chat_message(m)
177
- for m in request.v
178
- ],
179
- )
201
+ inputs = [
202
+ build_chat_message(m)
203
+ for m in request.v
204
+ ]
205
+
206
+ outputs = pipeline(inputs)
207
+
208
+ gts = check.single(outputs)['generated_text']
209
+ ugt, agt = gts
210
+ check.state(ugt['role'] == 'user')
211
+ check.state(agt['role'] == 'assistant')
212
+
213
+ return ChatChoicesResponse([AiChoice([AiMessage(agt['content'])])])
214
+
215
+
216
+ ##
217
+
218
+
219
+ # @omlish-manifest $.minichain.registries.manifests.RegistryManifest(
220
+ # name='transformers',
221
+ # type='ChatChoicesStreamService',
222
+ # )
223
+ @static_check_is_chat_choices_stream_service
224
+ class TransformersChatChoicesStreamService(BaseTransformersChatChoicesService):
225
+ async def invoke(self, request: ChatChoicesStreamRequest) -> ChatChoicesStreamResponse:
226
+ check.empty(request.options)
180
227
 
181
- return ChatChoicesResponse([AiChoice([output])])
228
+ pipeline = self._load_pipeline() # noqa
229
+
230
+ inputs = [ # noqa
231
+ build_chat_message(m)
232
+ for m in request.v
233
+ ]
234
+
235
+ async with UseResources.or_new(request.options) as rs:
236
+ async def inner(sink: StreamResponseSink[AiChoicesDeltas]) -> ta.Sequence[ChatChoicesOutputs] | None:
237
+ # last_role: ta.Any = None
238
+ #
239
+ # for chunk in output:
240
+ # check.state(chunk['object'] == 'chat.completion.chunk')
241
+ #
242
+ # choice = check.single(chunk['choices'])
243
+ #
244
+ # if not (delta := choice.get('delta', {})):
245
+ # continue
246
+ #
247
+ # # FIXME: check role is assistant
248
+ # if (role := delta.get('role')) != last_role:
249
+ # last_role = role
250
+ #
251
+ # # FIXME: stop reason
252
+ #
253
+ # if (content := delta.get('content', '')):
254
+ # await sink.emit(AiChoicesDeltas([AiChoiceDeltas([ContentAiChoiceDelta(content)])]))
255
+ #
256
+ # return None
257
+
258
+ raise NotImplementedError
259
+
260
+ return await new_stream_response(rs, inner)
@@ -25,6 +25,13 @@ class Device(tv.UniqueScalarTypedValue[ta.Any], Config):
25
25
  ##
26
26
 
27
27
 
28
+ class ApiUrl(tv.UniqueScalarTypedValue[str], Config):
29
+ pass
30
+
31
+
32
+ ##
33
+
34
+
28
35
  @dc.dataclass(frozen=True)
29
36
  class SecretConfig(Config, lang.Abstract):
30
37
  v: sec.SecretRefOrStr = dc.field() | sec.secret_field
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ommlds
3
- Version: 0.0.0.dev467
3
+ Version: 0.0.0.dev468
4
4
  Summary: ommlds
5
5
  Author: wrmsr
6
6
  License-Expression: BSD-3-Clause
@@ -14,8 +14,8 @@ Classifier: Programming Language :: Python :: 3.13
14
14
  Requires-Python: >=3.13
15
15
  Description-Content-Type: text/markdown
16
16
  License-File: LICENSE
17
- Requires-Dist: omdev==0.0.0.dev467
18
- Requires-Dist: omlish==0.0.0.dev467
17
+ Requires-Dist: omdev==0.0.0.dev468
18
+ Requires-Dist: omlish==0.0.0.dev468
19
19
  Provides-Extra: all
20
20
  Requires-Dist: llama-cpp-python~=0.3; extra == "all"
21
21
  Requires-Dist: mlx~=0.29; extra == "all"
@@ -26,8 +26,8 @@ Requires-Dist: tokenizers~=0.22; extra == "all"
26
26
  Requires-Dist: torch~=2.9; extra == "all"
27
27
  Requires-Dist: transformers~=4.57; extra == "all"
28
28
  Requires-Dist: sentence-transformers~=5.1; extra == "all"
29
- Requires-Dist: huggingface-hub~=0.35; extra == "all"
30
- Requires-Dist: datasets~=4.2; extra == "all"
29
+ Requires-Dist: huggingface-hub~=0.36; extra == "all"
30
+ Requires-Dist: datasets~=4.3; extra == "all"
31
31
  Requires-Dist: numpy>=1.26; extra == "all"
32
32
  Requires-Dist: pytesseract~=0.3; extra == "all"
33
33
  Requires-Dist: rapidocr-onnxruntime~=1.4; extra == "all"
@@ -47,8 +47,8 @@ Requires-Dist: torch~=2.9; extra == "backends"
47
47
  Requires-Dist: transformers~=4.57; extra == "backends"
48
48
  Requires-Dist: sentence-transformers~=5.1; extra == "backends"
49
49
  Provides-Extra: huggingface
50
- Requires-Dist: huggingface-hub~=0.35; extra == "huggingface"
51
- Requires-Dist: datasets~=4.2; extra == "huggingface"
50
+ Requires-Dist: huggingface-hub~=0.36; extra == "huggingface"
51
+ Requires-Dist: datasets~=4.3; extra == "huggingface"
52
52
  Provides-Extra: numpy
53
53
  Requires-Dist: numpy>=1.26; extra == "numpy"
54
54
  Provides-Extra: ocr
@@ -1,5 +1,5 @@
1
- ommlds/.omlish-manifests.json,sha256=MyJQsh5T1CMMXcGdxwqI9abQ8-j-ZlRGRluiikbeKRY,18414
2
- ommlds/__about__.py,sha256=uAJgr2I_m_oZPlV5P8XLFeYpBlEM-DdzeyF6O5OK_qs,1759
1
+ ommlds/.omlish-manifests.json,sha256=u1WF90X6xpzZW21a4h5zzPyP4a3T30V08RjQz5HGABM,21555
2
+ ommlds/__about__.py,sha256=t2rQF0yXpWFcCb2dvgzGR3I35HKGvGSn-EfhaUWVl5s,1759
3
3
  ommlds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ommlds/huggingface.py,sha256=JfEyfKOxU3-SY_ojtXBJFNeD-NIuKjvMe3GL3e93wNA,1175
5
5
  ommlds/_hacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -37,6 +37,8 @@ ommlds/backends/mlx/tokenization/detokenization/base.py,sha256=Tezf8Anh-w7BxpNQs
37
37
  ommlds/backends/mlx/tokenization/detokenization/bpe.py,sha256=cIw6-r-cyXTfZdyfGRgohrElMIqeLKfMRb8R1H_56nY,3659
38
38
  ommlds/backends/mlx/tokenization/detokenization/naive.py,sha256=6L-SvphzP1z16cmVB4QC9VraF7khE8ZcvKqIwwFqN6U,1779
39
39
  ommlds/backends/mlx/tokenization/detokenization/spm.py,sha256=IYSnEm-C0z_o5TKLJE_Rj6P0nNd-prT6psVPKsERWAE,1751
40
+ ommlds/backends/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ ommlds/backends/ollama/protocol.py,sha256=1rBZOIb080MsWMfgU4d59wDQhW5EiyBYKgnFbBnLatg,4437
40
42
  ommlds/backends/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
43
  ommlds/backends/openai/protocol/__init__.py,sha256=gYPUQ94GfoIAgU5TGoaC0OVGkWuplrHg-s83ynT9f-4,1750
42
44
  ommlds/backends/openai/protocol/_common.py,sha256=r4EXmw1fBFHjU5vbWTDvlM_fsafdIVg3d3PNw4F9m-Q,313
@@ -75,6 +77,8 @@ ommlds/backends/torch/__init__.py,sha256=Id8dKbxMLlp3ux62ohu9JKoXPSrM0ZXUK0eCDTY
75
77
  ommlds/backends/torch/backends.py,sha256=Bo-ZdW1n9NswvptT8bL9CssEOKwusDuBMaXVjRS8zrA,3528
76
78
  ommlds/backends/torch/devices.py,sha256=KWkeyArPdUwVqckQTJPkN-4GQdv39cpOgCMv_XfkLkQ,776
77
79
  ommlds/backends/torch/purge.py,sha256=sp6XUxNLoVCepxIPKw3tevHn-cQqgorILvIQzixauiI,1834
80
+ ommlds/backends/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
+ ommlds/backends/transformers/streamers.py,sha256=Hu_9lp_kUilKjOfs7Ixqr2NoA5FuRn2eRh8JdvaBDYc,1688
78
82
  ommlds/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
79
83
  ommlds/cli/__main__.py,sha256=1ffCb0fcUOJMzxROJmJRXQ8PSOVYv7KrcuBtT95cf0c,140
80
84
  ommlds/cli/inject.py,sha256=WhTDabJz9b1NRRHVH-UyVN5nj6UncvIeTvgkGrcE9vc,666
@@ -147,7 +151,7 @@ ommlds/cli/state/storage.py,sha256=tRPmgCANRrw7A5Qr700OaH58F6S96O37I8Ivrbo7_gI,3
147
151
  ommlds/datasets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
148
152
  ommlds/datasets/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
149
153
  ommlds/datasets/lib/movies.py,sha256=LmdfoXsZU9XMM_r-sxCLv_s06BFzwWO4xUj6sc9XVcI,1961
150
- ommlds/minichain/__init__.py,sha256=EqNJpuMwqkkdlNmipjaoC30yAqH7c8oziszlkCcXBrQ,10982
154
+ ommlds/minichain/__init__.py,sha256=5S2GfZW4qWF1fin2Ee8YHT4XuD_vEvtCVWTIXHOGrwo,11016
151
155
  ommlds/minichain/_marshal.py,sha256=n9PGWrHhvAmGIc7KDOYt3IF9Z6G0ncXskyICTp3Ji6k,1923
152
156
  ommlds/minichain/_typedvalues.py,sha256=Vl1Edt5khC0e5RPFBPmPCxn0IzrfVd0NHzAjAN2E6Kc,2183
153
157
  ommlds/minichain/completion.py,sha256=lQ0LfCIYZsvDqteHhhDIv16D2_gn_xMfEL0ouywE5Yo,1033
@@ -157,7 +161,7 @@ ommlds/minichain/json.py,sha256=0_5rV5Zi2qPOvXi2CLAc5DF7FN3jK3ABbjoKdjtTuVo,360
157
161
  ommlds/minichain/metadata.py,sha256=2jik8gEm_VMnknPuPwqRssTg0MClRFUrXz_IsyEgUt4,878
158
162
  ommlds/minichain/resources.py,sha256=HfcydnyFmXVRspYw-32-lvM_OfrZQdPEebAt3ivLev0,4436
159
163
  ommlds/minichain/search.py,sha256=azRzWcYhcm9IgSHquqLwtbwowtYCRAtPLSm7Gvt9iNo,1262
160
- ommlds/minichain/standard.py,sha256=uKXvdUNLxdUu7suCBsVOjJtnYVC2hjD_tmz3Ra7H6Jg,2510
164
+ ommlds/minichain/standard.py,sha256=cGXaGtC5iM9Q2lCcbhLtvEcPGKhcJUIh3UWyNgOssRM,2580
161
165
  ommlds/minichain/types.py,sha256=K6RRjpUi17UEG0cqPrrvbVANU0iRVh3WLiH-y6oEWFI,414
162
166
  ommlds/minichain/utils.py,sha256=NTsBu_pSZnLdZc1R1Se70rb_9J-IoB6VRwjhwzh3PwY,490
163
167
  ommlds/minichain/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -185,12 +189,14 @@ ommlds/minichain/backends/impls/huggingface/__init__.py,sha256=47DEQpj8HBSa-_TIm
185
189
  ommlds/minichain/backends/impls/huggingface/configs.py,sha256=6jsBtPNXOP57PcpxNTVLGWLc-18Iwn_lDbGouwCJTIQ,258
186
190
  ommlds/minichain/backends/impls/huggingface/repos.py,sha256=8BDxJmra9elSQL2vzp2nr2p4Hpq56A3zTk7hTTnfJU4,861
187
191
  ommlds/minichain/backends/impls/llamacpp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
188
- ommlds/minichain/backends/impls/llamacpp/chat.py,sha256=YeBzlA_3gcuF6KF0HIE7abUp28_o1Kil-SujyQNAHyE,5508
192
+ ommlds/minichain/backends/impls/llamacpp/chat.py,sha256=J6Jslx9atAtWvLdrVtvRboQUBzRX7Z5aHlo0dK5X78A,5649
189
193
  ommlds/minichain/backends/impls/llamacpp/completion.py,sha256=oJ2I6wUoIPXYLm9Vc7dwOPgqbevatTjNBZ-jXeM24tQ,2372
190
194
  ommlds/minichain/backends/impls/llamacpp/format.py,sha256=fcLMwk7r7FbNrYCH39G3fDRInKvlPIqcoxyLj95CooA,778
191
- ommlds/minichain/backends/impls/llamacpp/stream.py,sha256=uGog3xPNqCjGgyZjXEjhlxKbIbakWbapjANAEsmW-U4,3378
195
+ ommlds/minichain/backends/impls/llamacpp/stream.py,sha256=uzrXr2HhshgFe3Z0g8KTPc6Dr2kPsyxZabIy2d6IOBg,3547
192
196
  ommlds/minichain/backends/impls/mlx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
193
197
  ommlds/minichain/backends/impls/mlx/chat.py,sha256=sMlhgiFZrxAC-kKkLSJ6c-2uJn0IHZXH4EiPET_-CKI,7458
198
+ ommlds/minichain/backends/impls/ollama/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
199
+ ommlds/minichain/backends/impls/ollama/chat.py,sha256=UK19riOph-ptIz9zW7PucGWvVEtWHOHvwp7hoKurDNw,6393
194
200
  ommlds/minichain/backends/impls/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
195
201
  ommlds/minichain/backends/impls/openai/chat.py,sha256=eMRjxPNrzrRjaw83LJuYzP9DGvwGyY2ObJSZub4Z9bY,2658
196
202
  ommlds/minichain/backends/impls/openai/completion.py,sha256=0XTC08mZzbW23Y2DNW2xfRR0eDX4nTyejF8CR1BdHZs,1756
@@ -207,7 +213,7 @@ ommlds/minichain/backends/impls/tokenizers/tokens.py,sha256=_8Q49k5YroG5wQI0cuK6
207
213
  ommlds/minichain/backends/impls/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
208
214
  ommlds/minichain/backends/impls/transformers/sentence.py,sha256=1bFJ-ND3MOkj7mNsPuISrQCpqTs7npmmNmYcc2go-Fk,1393
209
215
  ommlds/minichain/backends/impls/transformers/tokens.py,sha256=uS3-IWOJRUMBfPDVRrp3SCaXdE1yzEdKHQcyv0JZQIw,2089
210
- ommlds/minichain/backends/impls/transformers/transformers.py,sha256=Bb1RnvDlo8bzu24ByhDacDC0sN7R7KYZnPZ9hjbViBg,5287
216
+ ommlds/minichain/backends/impls/transformers/transformers.py,sha256=U4O-MiVH3dRXf-UNSoKZueZVM8XvAm2mMr30qQUHhFY,8000
211
217
  ommlds/minichain/backends/strings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
212
218
  ommlds/minichain/backends/strings/manifests.py,sha256=kmlanVUAZqIh0P95Mm8H20e8ib3gEgYHHUlkCXDQGFk,413
213
219
  ommlds/minichain/backends/strings/parsing.py,sha256=2wChk9Z8fhqJTk8_91f8QFjKcSZygOQM_rVk-P4NnKw,1772
@@ -367,9 +373,9 @@ ommlds/wiki/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
367
373
  ommlds/wiki/utils/io.py,sha256=UKgDJGtmpnWvIqVd2mJc2QNPOqlToEY1GEveNp6_pMo,7088
368
374
  ommlds/wiki/utils/progress.py,sha256=EhvKcMFYtsarCQhIahlO6f0SboyAKP3UwUyrnVnP-Vk,3222
369
375
  ommlds/wiki/utils/xml.py,sha256=vVV8Ctn13aaRM9eYfs9Wd6rHn5WOCEUzQ44fIhOvJdg,3754
370
- ommlds-0.0.0.dev467.dist-info/licenses/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
371
- ommlds-0.0.0.dev467.dist-info/METADATA,sha256=NvYqf0PtfEdrj2en5RFFpTHB_cGjm41groVHgy54WZQ,3224
372
- ommlds-0.0.0.dev467.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
373
- ommlds-0.0.0.dev467.dist-info/entry_points.txt,sha256=Z5YWtX7ClfiCKdW-dd_CSVvM0h4yQpJPi-2G3q6gNFo,35
374
- ommlds-0.0.0.dev467.dist-info/top_level.txt,sha256=Rbnk5d5wi58vnAXx13WFZqdQ4VX8hBCS2hEL3WeXOhY,7
375
- ommlds-0.0.0.dev467.dist-info/RECORD,,
376
+ ommlds-0.0.0.dev468.dist-info/licenses/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
377
+ ommlds-0.0.0.dev468.dist-info/METADATA,sha256=k1H1yGwCqmZETx1eTEKBsNZ_whn2QWxWap9pdErGIkw,3224
378
+ ommlds-0.0.0.dev468.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
379
+ ommlds-0.0.0.dev468.dist-info/entry_points.txt,sha256=Z5YWtX7ClfiCKdW-dd_CSVvM0h4yQpJPi-2G3q6gNFo,35
380
+ ommlds-0.0.0.dev468.dist-info/top_level.txt,sha256=Rbnk5d5wi58vnAXx13WFZqdQ4VX8hBCS2hEL3WeXOhY,7
381
+ ommlds-0.0.0.dev468.dist-info/RECORD,,