ommlds 0.0.0.dev440__py3-none-any.whl → 0.0.0.dev480__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (271) hide show
  1. ommlds/.omlish-manifests.json +332 -35
  2. ommlds/__about__.py +15 -9
  3. ommlds/_hacks/__init__.py +4 -0
  4. ommlds/_hacks/funcs.py +110 -0
  5. ommlds/_hacks/names.py +158 -0
  6. ommlds/_hacks/params.py +73 -0
  7. ommlds/_hacks/patches.py +0 -3
  8. ommlds/backends/anthropic/protocol/_marshal.py +2 -2
  9. ommlds/backends/anthropic/protocol/sse/_marshal.py +1 -1
  10. ommlds/backends/anthropic/protocol/sse/assemble.py +23 -7
  11. ommlds/backends/anthropic/protocol/sse/events.py +13 -0
  12. ommlds/backends/anthropic/protocol/types.py +30 -9
  13. ommlds/backends/google/protocol/__init__.py +3 -0
  14. ommlds/backends/google/protocol/_marshal.py +16 -0
  15. ommlds/backends/google/protocol/types.py +626 -0
  16. ommlds/backends/groq/_marshal.py +23 -0
  17. ommlds/backends/groq/protocol.py +249 -0
  18. ommlds/backends/mlx/generation.py +1 -1
  19. ommlds/backends/mlx/loading.py +58 -1
  20. ommlds/backends/ollama/__init__.py +0 -0
  21. ommlds/backends/ollama/protocol.py +170 -0
  22. ommlds/backends/openai/protocol/__init__.py +9 -28
  23. ommlds/backends/openai/protocol/_common.py +18 -0
  24. ommlds/backends/openai/protocol/_marshal.py +27 -0
  25. ommlds/backends/openai/protocol/chatcompletion/chunk.py +58 -31
  26. ommlds/backends/openai/protocol/chatcompletion/contentpart.py +49 -44
  27. ommlds/backends/openai/protocol/chatcompletion/message.py +55 -43
  28. ommlds/backends/openai/protocol/chatcompletion/request.py +114 -66
  29. ommlds/backends/openai/protocol/chatcompletion/response.py +71 -45
  30. ommlds/backends/openai/protocol/chatcompletion/responseformat.py +27 -20
  31. ommlds/backends/openai/protocol/chatcompletion/tokenlogprob.py +16 -7
  32. ommlds/backends/openai/protocol/completionusage.py +24 -15
  33. ommlds/backends/tavily/__init__.py +0 -0
  34. ommlds/backends/tavily/protocol.py +301 -0
  35. ommlds/backends/tinygrad/models/llama3/__init__.py +22 -14
  36. ommlds/backends/transformers/__init__.py +0 -0
  37. ommlds/backends/transformers/filecache.py +109 -0
  38. ommlds/backends/transformers/streamers.py +73 -0
  39. ommlds/cli/asyncs.py +30 -0
  40. ommlds/cli/backends/catalog.py +93 -0
  41. ommlds/cli/backends/configs.py +9 -0
  42. ommlds/cli/backends/inject.py +31 -36
  43. ommlds/cli/backends/injection.py +16 -0
  44. ommlds/cli/backends/types.py +46 -0
  45. ommlds/cli/content/__init__.py +0 -0
  46. ommlds/cli/content/messages.py +34 -0
  47. ommlds/cli/content/strings.py +42 -0
  48. ommlds/cli/inject.py +15 -32
  49. ommlds/cli/inputs/__init__.py +0 -0
  50. ommlds/cli/inputs/asyncs.py +32 -0
  51. ommlds/cli/inputs/sync.py +75 -0
  52. ommlds/cli/main.py +270 -110
  53. ommlds/cli/rendering/__init__.py +0 -0
  54. ommlds/cli/rendering/configs.py +9 -0
  55. ommlds/cli/rendering/inject.py +31 -0
  56. ommlds/cli/rendering/markdown.py +52 -0
  57. ommlds/cli/rendering/raw.py +73 -0
  58. ommlds/cli/rendering/types.py +21 -0
  59. ommlds/cli/secrets.py +21 -0
  60. ommlds/cli/sessions/base.py +1 -1
  61. ommlds/cli/sessions/chat/chat/__init__.py +0 -0
  62. ommlds/cli/sessions/chat/chat/ai/__init__.py +0 -0
  63. ommlds/cli/sessions/chat/chat/ai/configs.py +11 -0
  64. ommlds/cli/sessions/chat/chat/ai/inject.py +74 -0
  65. ommlds/cli/sessions/chat/chat/ai/injection.py +14 -0
  66. ommlds/cli/sessions/chat/chat/ai/rendering.py +70 -0
  67. ommlds/cli/sessions/chat/chat/ai/services.py +79 -0
  68. ommlds/cli/sessions/chat/chat/ai/tools.py +44 -0
  69. ommlds/cli/sessions/chat/chat/ai/types.py +28 -0
  70. ommlds/cli/sessions/chat/chat/state/__init__.py +0 -0
  71. ommlds/cli/sessions/chat/chat/state/configs.py +11 -0
  72. ommlds/cli/sessions/chat/chat/state/inject.py +36 -0
  73. ommlds/cli/sessions/chat/chat/state/inmemory.py +33 -0
  74. ommlds/cli/sessions/chat/chat/state/storage.py +52 -0
  75. ommlds/cli/sessions/chat/chat/state/types.py +38 -0
  76. ommlds/cli/sessions/chat/chat/user/__init__.py +0 -0
  77. ommlds/cli/sessions/chat/chat/user/configs.py +17 -0
  78. ommlds/cli/sessions/chat/chat/user/inject.py +62 -0
  79. ommlds/cli/sessions/chat/chat/user/interactive.py +31 -0
  80. ommlds/cli/sessions/chat/chat/user/oneshot.py +25 -0
  81. ommlds/cli/sessions/chat/chat/user/types.py +15 -0
  82. ommlds/cli/sessions/chat/configs.py +27 -0
  83. ommlds/cli/sessions/chat/driver.py +43 -0
  84. ommlds/cli/sessions/chat/inject.py +33 -65
  85. ommlds/cli/sessions/chat/phases/__init__.py +0 -0
  86. ommlds/cli/sessions/chat/phases/inject.py +27 -0
  87. ommlds/cli/sessions/chat/phases/injection.py +14 -0
  88. ommlds/cli/sessions/chat/phases/manager.py +29 -0
  89. ommlds/cli/sessions/chat/phases/types.py +29 -0
  90. ommlds/cli/sessions/chat/session.py +27 -0
  91. ommlds/cli/sessions/chat/tools/__init__.py +0 -0
  92. ommlds/cli/sessions/chat/tools/configs.py +22 -0
  93. ommlds/cli/sessions/chat/tools/confirmation.py +46 -0
  94. ommlds/cli/sessions/chat/tools/execution.py +66 -0
  95. ommlds/cli/sessions/chat/tools/fs/__init__.py +0 -0
  96. ommlds/cli/sessions/chat/tools/fs/configs.py +12 -0
  97. ommlds/cli/sessions/chat/tools/fs/inject.py +35 -0
  98. ommlds/cli/sessions/chat/tools/inject.py +88 -0
  99. ommlds/cli/sessions/chat/tools/injection.py +44 -0
  100. ommlds/cli/sessions/chat/tools/rendering.py +58 -0
  101. ommlds/cli/sessions/chat/tools/todo/__init__.py +0 -0
  102. ommlds/cli/sessions/chat/tools/todo/configs.py +12 -0
  103. ommlds/cli/sessions/chat/tools/todo/inject.py +31 -0
  104. ommlds/cli/sessions/chat/tools/weather/__init__.py +0 -0
  105. ommlds/cli/sessions/chat/tools/weather/configs.py +12 -0
  106. ommlds/cli/sessions/chat/tools/weather/inject.py +22 -0
  107. ommlds/cli/{tools/weather.py → sessions/chat/tools/weather/tools.py} +1 -1
  108. ommlds/cli/sessions/completion/configs.py +21 -0
  109. ommlds/cli/sessions/completion/inject.py +42 -0
  110. ommlds/cli/sessions/completion/session.py +35 -0
  111. ommlds/cli/sessions/embedding/configs.py +21 -0
  112. ommlds/cli/sessions/embedding/inject.py +42 -0
  113. ommlds/cli/sessions/embedding/session.py +33 -0
  114. ommlds/cli/sessions/inject.py +28 -11
  115. ommlds/cli/state/__init__.py +0 -0
  116. ommlds/cli/state/inject.py +28 -0
  117. ommlds/cli/{state.py → state/storage.py} +41 -24
  118. ommlds/minichain/__init__.py +84 -24
  119. ommlds/minichain/_marshal.py +49 -9
  120. ommlds/minichain/_typedvalues.py +2 -4
  121. ommlds/minichain/backends/catalogs/base.py +20 -1
  122. ommlds/minichain/backends/catalogs/simple.py +2 -2
  123. ommlds/minichain/backends/catalogs/strings.py +10 -8
  124. ommlds/minichain/backends/impls/anthropic/chat.py +65 -27
  125. ommlds/minichain/backends/impls/anthropic/names.py +10 -8
  126. ommlds/minichain/backends/impls/anthropic/protocol.py +109 -0
  127. ommlds/minichain/backends/impls/anthropic/stream.py +111 -43
  128. ommlds/minichain/backends/impls/duckduckgo/search.py +1 -1
  129. ommlds/minichain/backends/impls/dummy/__init__.py +0 -0
  130. ommlds/minichain/backends/impls/dummy/chat.py +69 -0
  131. ommlds/minichain/backends/impls/google/chat.py +114 -22
  132. ommlds/minichain/backends/impls/google/search.py +7 -2
  133. ommlds/minichain/backends/impls/google/stream.py +219 -0
  134. ommlds/minichain/backends/impls/google/tools.py +149 -0
  135. ommlds/minichain/backends/impls/groq/__init__.py +0 -0
  136. ommlds/minichain/backends/impls/groq/chat.py +75 -0
  137. ommlds/minichain/backends/impls/groq/names.py +48 -0
  138. ommlds/minichain/backends/impls/groq/protocol.py +143 -0
  139. ommlds/minichain/backends/impls/groq/stream.py +125 -0
  140. ommlds/minichain/backends/impls/llamacpp/chat.py +33 -18
  141. ommlds/minichain/backends/impls/llamacpp/completion.py +1 -1
  142. ommlds/minichain/backends/impls/llamacpp/format.py +4 -2
  143. ommlds/minichain/backends/impls/llamacpp/stream.py +37 -20
  144. ommlds/minichain/backends/impls/mistral.py +20 -5
  145. ommlds/minichain/backends/impls/mlx/chat.py +96 -22
  146. ommlds/minichain/backends/impls/ollama/__init__.py +0 -0
  147. ommlds/minichain/backends/impls/ollama/chat.py +199 -0
  148. ommlds/minichain/backends/impls/openai/chat.py +18 -8
  149. ommlds/minichain/backends/impls/openai/completion.py +10 -3
  150. ommlds/minichain/backends/impls/openai/embedding.py +10 -3
  151. ommlds/minichain/backends/impls/openai/format.py +131 -106
  152. ommlds/minichain/backends/impls/openai/names.py +31 -5
  153. ommlds/minichain/backends/impls/openai/stream.py +43 -25
  154. ommlds/minichain/backends/impls/tavily.py +66 -0
  155. ommlds/minichain/backends/impls/tinygrad/chat.py +23 -16
  156. ommlds/minichain/backends/impls/transformers/sentence.py +1 -1
  157. ommlds/minichain/backends/impls/transformers/tokens.py +1 -1
  158. ommlds/minichain/backends/impls/transformers/transformers.py +155 -34
  159. ommlds/minichain/backends/strings/parsing.py +1 -1
  160. ommlds/minichain/backends/strings/resolving.py +4 -1
  161. ommlds/minichain/chat/_marshal.py +16 -9
  162. ommlds/minichain/chat/choices/adapters.py +4 -4
  163. ommlds/minichain/chat/choices/services.py +1 -1
  164. ommlds/minichain/chat/choices/stream/__init__.py +0 -0
  165. ommlds/minichain/chat/choices/stream/adapters.py +35 -0
  166. ommlds/minichain/chat/choices/stream/joining.py +31 -0
  167. ommlds/minichain/chat/choices/stream/services.py +45 -0
  168. ommlds/minichain/chat/choices/stream/types.py +43 -0
  169. ommlds/minichain/chat/choices/types.py +2 -2
  170. ommlds/minichain/chat/history.py +3 -3
  171. ommlds/minichain/chat/messages.py +55 -19
  172. ommlds/minichain/chat/services.py +3 -3
  173. ommlds/minichain/chat/stream/_marshal.py +16 -0
  174. ommlds/minichain/chat/stream/joining.py +85 -0
  175. ommlds/minichain/chat/stream/services.py +15 -21
  176. ommlds/minichain/chat/stream/types.py +32 -19
  177. ommlds/minichain/chat/tools/execution.py +8 -7
  178. ommlds/minichain/chat/tools/ids.py +9 -15
  179. ommlds/minichain/chat/tools/parsing.py +17 -26
  180. ommlds/minichain/chat/transforms/base.py +29 -38
  181. ommlds/minichain/chat/transforms/metadata.py +30 -4
  182. ommlds/minichain/chat/transforms/services.py +9 -11
  183. ommlds/minichain/content/_marshal.py +44 -20
  184. ommlds/minichain/content/json.py +13 -0
  185. ommlds/minichain/content/materialize.py +14 -21
  186. ommlds/minichain/content/prepare.py +4 -0
  187. ommlds/minichain/content/transforms/interleave.py +1 -1
  188. ommlds/minichain/content/transforms/squeeze.py +1 -1
  189. ommlds/minichain/content/transforms/stringify.py +1 -1
  190. ommlds/minichain/json.py +20 -0
  191. ommlds/minichain/lib/code/__init__.py +0 -0
  192. ommlds/minichain/lib/code/prompts.py +6 -0
  193. ommlds/minichain/lib/fs/binfiles.py +108 -0
  194. ommlds/minichain/lib/fs/context.py +126 -0
  195. ommlds/minichain/lib/fs/errors.py +101 -0
  196. ommlds/minichain/lib/fs/suggestions.py +36 -0
  197. ommlds/minichain/lib/fs/tools/__init__.py +0 -0
  198. ommlds/minichain/lib/fs/tools/edit.py +104 -0
  199. ommlds/minichain/lib/fs/tools/ls.py +38 -0
  200. ommlds/minichain/lib/fs/tools/read.py +115 -0
  201. ommlds/minichain/lib/fs/tools/recursivels/__init__.py +0 -0
  202. ommlds/minichain/lib/fs/tools/recursivels/execution.py +40 -0
  203. ommlds/minichain/lib/todo/__init__.py +0 -0
  204. ommlds/minichain/lib/todo/context.py +54 -0
  205. ommlds/minichain/lib/todo/tools/__init__.py +0 -0
  206. ommlds/minichain/lib/todo/tools/read.py +44 -0
  207. ommlds/minichain/lib/todo/tools/write.py +335 -0
  208. ommlds/minichain/lib/todo/types.py +60 -0
  209. ommlds/minichain/llms/_marshal.py +25 -17
  210. ommlds/minichain/llms/types.py +4 -0
  211. ommlds/minichain/registries/globals.py +18 -4
  212. ommlds/minichain/resources.py +66 -43
  213. ommlds/minichain/search.py +1 -1
  214. ommlds/minichain/services/_marshal.py +46 -39
  215. ommlds/minichain/services/facades.py +3 -3
  216. ommlds/minichain/services/services.py +1 -1
  217. ommlds/minichain/standard.py +8 -0
  218. ommlds/minichain/stream/services.py +152 -38
  219. ommlds/minichain/stream/wrap.py +22 -24
  220. ommlds/minichain/tools/_marshal.py +1 -1
  221. ommlds/minichain/tools/execution/catalog.py +2 -1
  222. ommlds/minichain/tools/execution/context.py +34 -14
  223. ommlds/minichain/tools/execution/errors.py +15 -0
  224. ommlds/minichain/tools/execution/executors.py +8 -3
  225. ommlds/minichain/tools/execution/reflect.py +40 -5
  226. ommlds/minichain/tools/fns.py +46 -9
  227. ommlds/minichain/tools/jsonschema.py +14 -5
  228. ommlds/minichain/tools/reflect.py +54 -18
  229. ommlds/minichain/tools/types.py +33 -1
  230. ommlds/minichain/utils.py +27 -0
  231. ommlds/minichain/vectors/_marshal.py +11 -10
  232. ommlds/nanochat/LICENSE +21 -0
  233. ommlds/nanochat/__init__.py +0 -0
  234. ommlds/nanochat/rustbpe/LICENSE +21 -0
  235. ommlds/nanochat/tokenizers.py +406 -0
  236. ommlds/server/server.py +3 -3
  237. ommlds/specs/__init__.py +0 -0
  238. ommlds/specs/mcp/__init__.py +0 -0
  239. ommlds/specs/mcp/_marshal.py +23 -0
  240. ommlds/specs/mcp/protocol.py +266 -0
  241. ommlds/tools/git.py +27 -10
  242. ommlds/tools/ocr.py +8 -9
  243. ommlds/wiki/analyze.py +2 -2
  244. ommlds/wiki/text/mfh.py +1 -5
  245. ommlds/wiki/text/wtp.py +1 -3
  246. ommlds/wiki/utils/xml.py +5 -5
  247. {ommlds-0.0.0.dev440.dist-info → ommlds-0.0.0.dev480.dist-info}/METADATA +24 -21
  248. ommlds-0.0.0.dev480.dist-info/RECORD +427 -0
  249. ommlds/cli/backends/standard.py +0 -20
  250. ommlds/cli/sessions/chat/base.py +0 -42
  251. ommlds/cli/sessions/chat/interactive.py +0 -73
  252. ommlds/cli/sessions/chat/printing.py +0 -96
  253. ommlds/cli/sessions/chat/prompt.py +0 -143
  254. ommlds/cli/sessions/chat/state.py +0 -109
  255. ommlds/cli/sessions/chat/tools.py +0 -91
  256. ommlds/cli/sessions/completion/completion.py +0 -44
  257. ommlds/cli/sessions/embedding/embedding.py +0 -42
  258. ommlds/cli/tools/config.py +0 -13
  259. ommlds/cli/tools/inject.py +0 -64
  260. ommlds/minichain/chat/stream/adapters.py +0 -69
  261. ommlds/minichain/lib/fs/ls/execution.py +0 -32
  262. ommlds-0.0.0.dev440.dist-info/RECORD +0 -303
  263. /ommlds/{cli/tools → backends/google}/__init__.py +0 -0
  264. /ommlds/{minichain/lib/fs/ls → backends/groq}/__init__.py +0 -0
  265. /ommlds/{huggingface.py → backends/huggingface.py} +0 -0
  266. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/rendering.py +0 -0
  267. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/running.py +0 -0
  268. {ommlds-0.0.0.dev440.dist-info → ommlds-0.0.0.dev480.dist-info}/WHEEL +0 -0
  269. {ommlds-0.0.0.dev440.dist-info → ommlds-0.0.0.dev480.dist-info}/entry_points.txt +0 -0
  270. {ommlds-0.0.0.dev440.dist-info → ommlds-0.0.0.dev480.dist-info}/licenses/LICENSE +0 -0
  271. {ommlds-0.0.0.dev440.dist-info → ommlds-0.0.0.dev480.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,626 @@
1
+ """
2
+ https://ai.google.dev/api/generate-content
3
+ """
4
+ import typing as ta
5
+
6
+ from omlish import dataclasses as dc
7
+ from omlish import lang
8
+ from omlish import marshal as msh
9
+
10
+
11
+ ##
12
+
13
+
14
+ def _set_class_marshal_options(cls):
15
+ msh.update_object_metadata(
16
+ cls,
17
+ field_naming=msh.Naming.LOW_CAMEL,
18
+ field_defaults=msh.FieldMetadata(
19
+ options=msh.FieldOptions(
20
+ omit_if=lang.is_none,
21
+ ),
22
+ ),
23
+ )
24
+
25
+ return cls
26
+
27
+
28
+ @dc.dataclass(frozen=True, kw_only=True)
29
+ @_set_class_marshal_options
30
+ @msh.update_fields_metadata(
31
+ ['data'],
32
+ marshaler=msh.Base64MarshalerUnmarshaler(bytes),
33
+ unmarshaler=msh.Base64MarshalerUnmarshaler(bytes),
34
+ )
35
+ class Blob(lang.Final):
36
+ mine_type: str
37
+ data: bytes
38
+
39
+
40
+ @dc.dataclass(frozen=True, kw_only=True)
41
+ @_set_class_marshal_options
42
+ class FunctionCall(lang.Final):
43
+ id: str | None = None
44
+ name: str
45
+ args: ta.Mapping[str, ta.Any] | None = None
46
+
47
+
48
+ Scheduling: ta.TypeAlias = ta.Literal[
49
+ # This value is unused.
50
+ 'SCHEDULING_UNSPECIFIED',
51
+
52
+ # Only add the result to the conversation context, do not interrupt or trigger generation.
53
+ 'SILENT',
54
+
55
+ # Add the result to the conversation context, and prompt to generate output without interrupting ongoing
56
+ # generation.
57
+ 'WHEN_IDLE',
58
+
59
+ # Add the result to the conversation context, interrupt ongoing generation and prompt to generate output.
60
+ 'INTERRUPT',
61
+ ]
62
+
63
+
64
+ @dc.dataclass(frozen=True, kw_only=True)
65
+ @_set_class_marshal_options
66
+ class FunctionResponse(lang.Final):
67
+ id: str | None = None
68
+ name: str
69
+ response: ta.Mapping[str, ta.Any] | None = None
70
+ will_continue: bool | None = None
71
+ scheduling: Scheduling | None = None
72
+
73
+
74
+ @dc.dataclass(frozen=True, kw_only=True)
75
+ @_set_class_marshal_options
76
+ class FileData(lang.Final):
77
+ mime_type: str
78
+ file_uri: str
79
+
80
+
81
+ Language: ta.TypeAlias = ta.Literal[
82
+ # Unspecified language. This value should not be used.
83
+ 'LANGUAGE_UNSPECIFIED',
84
+
85
+ # Python >= 3.10, with numpy and simpy available.
86
+ 'PYTHON',
87
+ ]
88
+
89
+
90
+ @dc.dataclass(frozen=True, kw_only=True)
91
+ @_set_class_marshal_options
92
+ class ExecutableCode(lang.Final):
93
+ language: Language
94
+ code: str
95
+
96
+
97
+ Outcome: ta.TypeAlias = ta.Literal[
98
+ # Unspecified status. This value should not be used.
99
+ 'OUTCOME_UNSPECIFIED',
100
+
101
+ # Code execution completed successfully.
102
+ 'OUTCOME_OK',
103
+
104
+ # Code execution finished but with a failure. stderr should contain the reason.
105
+ 'OUTCOME_FAILED',
106
+
107
+ # Code execution ran for too long, and was cancelled. There may or may not be a partial output present.
108
+ 'OUTCOME_DEADLINE_EXCEEDED',
109
+ ]
110
+
111
+
112
+ @dc.dataclass(frozen=True, kw_only=True)
113
+ @_set_class_marshal_options
114
+ class CodeExecutionResult(lang.Final):
115
+ outcome: Outcome
116
+ output: str
117
+
118
+
119
+ @dc.dataclass(frozen=True, kw_only=True)
120
+ @_set_class_marshal_options
121
+ class VideoMetadata(lang.Final):
122
+ start_offset: str # Duration
123
+ end_offset: str # Duration
124
+ fps: float
125
+
126
+
127
+ @dc.dataclass(frozen=True, kw_only=True)
128
+ @msh.update_fields_metadata(
129
+ ['thought_signature'],
130
+ marshaler=msh.OptionalMarshaler(msh.Base64MarshalerUnmarshaler(bytes)),
131
+ unmarshaler=msh.OptionalUnmarshaler(msh.Base64MarshalerUnmarshaler(bytes)),
132
+ )
133
+ @_set_class_marshal_options
134
+ class Part(lang.Final):
135
+ # TODO: data: msh.oneof ...
136
+ text: str | None = None
137
+ inline_data: Blob | None = None
138
+ function_call: FunctionCall | None = None
139
+ function_response: FunctionResponse | None = None
140
+ file_data: FileData | None = None
141
+ executable_code: ExecutableCode | None = None
142
+ code_execution_result: CodeExecutionResult | None = None
143
+
144
+ thought: bool | None = None
145
+ thought_signature: bytes | None = None
146
+
147
+ # TODO: metadata: msh.oneof ...
148
+ video_metadata: VideoMetadata | None = None
149
+
150
+
151
+ ContentRole: ta.TypeAlias = ta.Literal['user', 'model']
152
+
153
+
154
+ @dc.dataclass(frozen=True, kw_only=True)
155
+ @_set_class_marshal_options
156
+ class Content(lang.Final):
157
+ parts: ta.Sequence[Part] | None = None
158
+ role: ContentRole | None = None
159
+
160
+
161
+ ##
162
+
163
+
164
+ Type: ta.TypeAlias = ta.Literal[
165
+ # Not specified, should not be used.
166
+ 'TYPE_UNSPECIFIED',
167
+
168
+ # String type.
169
+ 'STRING',
170
+
171
+ # Number type.
172
+ 'NUMBER',
173
+
174
+ # Integer type.
175
+ 'INTEGER',
176
+
177
+ # Boolean type.
178
+ 'BOOLEAN',
179
+
180
+ # Array type.
181
+ 'ARRAY',
182
+
183
+ # Object type.
184
+ 'OBJECT',
185
+
186
+ # Null type.
187
+ 'NULL',
188
+ ]
189
+
190
+
191
+ Struct: ta.TypeAlias = ta.Mapping[str, 'Value']
192
+
193
+
194
+ @dc.dataclass(frozen=True)
195
+ class Value(lang.Abstract, lang.Sealed):
196
+ """https://protobuf.dev/reference/protobuf/google.protobuf/#value"""
197
+
198
+
199
+ @dc.dataclass(frozen=True)
200
+ @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
201
+ class NullValue(Value, lang.Final):
202
+ null_value: None = None
203
+
204
+
205
+ @dc.dataclass(frozen=True)
206
+ @_set_class_marshal_options
207
+ class NumberValue(Value, lang.Final):
208
+ number_value: float
209
+
210
+
211
+ @dc.dataclass(frozen=True)
212
+ @_set_class_marshal_options
213
+ class StringValue(Value, lang.Final):
214
+ string_value: str
215
+
216
+
217
+ @dc.dataclass(frozen=True)
218
+ @_set_class_marshal_options
219
+ class BoolValue(Value, lang.Final):
220
+ bool_value: bool
221
+
222
+
223
+ @dc.dataclass(frozen=True)
224
+ @_set_class_marshal_options
225
+ class StructValue(Value, lang.Final):
226
+ struct_value: Struct
227
+
228
+
229
+ @dc.dataclass(frozen=True)
230
+ @_set_class_marshal_options
231
+ class ListValue(Value, lang.Final):
232
+ list_value: ta.Sequence[Value]
233
+
234
+
235
+ @dc.dataclass(frozen=True, kw_only=True)
236
+ @_set_class_marshal_options
237
+ class Schema(lang.Final):
238
+ type: Type | None = None # FIXME: required
239
+ format: str | None = None
240
+ title: str | None = None
241
+ description: str | None = None
242
+ nullable: bool | None = None
243
+ enum: ta.Sequence[str] | None = None
244
+ max_items: str | None = None # int64
245
+ min_items: str | None = None # int64
246
+ properties: ta.Mapping[str, 'Schema'] | None = None
247
+ required: ta.Sequence[str] | None = None
248
+ min_properties: str | None = None # int64
249
+ max_properties: str | None = None # int64
250
+ min_length: str | None = None # int64
251
+ max_length: str | None = None # int64
252
+ pattern: str | None = None
253
+ example: Value | None = None
254
+ any_of: ta.Sequence['Schema'] | None = None
255
+ property_ordering: ta.Sequence[str] | None = None
256
+ default: Value | None = None
257
+ items: ta.Optional['Schema'] = None
258
+ minimum: float | None = None
259
+ maximum: float | None = None
260
+
261
+
262
+ FunctionBehavior: ta.TypeAlias = ta.Literal[
263
+ #This value is unused.
264
+ 'UNSPECIFIED',
265
+
266
+ # If set, the system will wait to receive the function response before continuing the conversation.
267
+ 'BLOCKING',
268
+
269
+ # If set, the system will not wait to receive the function response. Instead, it will attempt to handle function
270
+ # responses as they become available while maintaining the conversation between the user and the model.
271
+ 'NON_BLOCKING',
272
+ ]
273
+
274
+
275
+ @dc.dataclass(frozen=True, kw_only=True)
276
+ @_set_class_marshal_options
277
+ class FunctionDeclaration(lang.Final):
278
+ name: str
279
+ description: str
280
+
281
+ behavior: FunctionBehavior | None = None
282
+
283
+ parameters: Schema | None = None
284
+ parameters_json_schema: Value | None = None
285
+
286
+ response: Schema | None = None
287
+ response_json_schema: Value | None = None
288
+
289
+
290
+ DynamicRetrievalMode: ta.TypeAlias = ta.Literal[
291
+ # Always trigger retrieval.
292
+ 'MODE_UNSPECIFIED',
293
+
294
+ # Run retrieval only when system decides it is necessary.
295
+ 'MODE_DYNAMIC',
296
+ ]
297
+
298
+
299
+ @dc.dataclass(frozen=True, kw_only=True)
300
+ @_set_class_marshal_options
301
+ class DynamicRetrievalConfig(lang.Final):
302
+ mode: DynamicRetrievalMode | None = None
303
+
304
+ dynamic_threshold: int | float | None = None
305
+
306
+
307
+ @dc.dataclass(frozen=True, kw_only=True)
308
+ @_set_class_marshal_options
309
+ class GoogleSearchRetrieval(lang.Final):
310
+ dynamic_retrieval_config: DynamicRetrievalConfig
311
+
312
+
313
+ @dc.dataclass(frozen=True, kw_only=True)
314
+ @_set_class_marshal_options
315
+ class CodeExecution(lang.Final):
316
+ pass
317
+
318
+
319
+ @dc.dataclass(frozen=True, kw_only=True)
320
+ @_set_class_marshal_options
321
+ class Interval(lang.Final):
322
+ start_time: str # Timestamp
323
+ end_time: str # Timestamp
324
+
325
+
326
+ @dc.dataclass(frozen=True, kw_only=True)
327
+ @_set_class_marshal_options
328
+ class GoogleSearch(lang.Final):
329
+ time_range_filter: Interval | None = None
330
+
331
+
332
+ @dc.dataclass(frozen=True, kw_only=True)
333
+ @_set_class_marshal_options
334
+ class UrlContext(lang.Final):
335
+ pass
336
+
337
+
338
+ @dc.dataclass(frozen=True, kw_only=True)
339
+ @_set_class_marshal_options
340
+ class Tool(lang.Final):
341
+ function_declarations: ta.Sequence[FunctionDeclaration] | None = None
342
+ google_search_retrieval: GoogleSearchRetrieval | None = None
343
+ code_execution: CodeExecution | None = None
344
+ google_search: GoogleSearch | None = None
345
+ url_context: UrlContext | None = None
346
+
347
+
348
+ FunctionCallingMode: ta.TypeAlias = ta.Literal[
349
+ # Unspecified function calling mode. This value should not be used.
350
+ 'MODE_UNSPECIFIED',
351
+
352
+ # Default model behavior, model decides to predict either a function call or a natural language response.
353
+ 'AUTO',
354
+
355
+ # Model is constrained to always predicting a function call only. If "allowedFunctionNames" are set, the predicted
356
+ # function call will be limited to any one of "allowedFunctionNames", else the predicted function call will be any
357
+ # one of the provided "functionDeclarations".
358
+ 'ANY',
359
+
360
+ # Model will not predict any function call. Model behavior is same as when not passing any function declarations.
361
+ 'NONE',
362
+
363
+ # Model decides to predict either a function call or a natural language response, but will validate function calls
364
+ # with constrained decoding. If "allowedFunctionNames" are set, the predicted function call will be limited to any
365
+ # one of "allowedFunctionNames", else the predicted function call will be any one of the provided
366
+ # "functionDeclarations".
367
+ 'VALIDATED',
368
+ ]
369
+
370
+
371
+ @dc.dataclass(frozen=True, kw_only=True)
372
+ @_set_class_marshal_options
373
+ class FunctionCallingConfig(lang.Final):
374
+ mode: FunctionCallingMode | None = None
375
+ allowed_function_names: ta.Sequence[str] | None = None
376
+
377
+
378
+ @dc.dataclass(frozen=True, kw_only=True)
379
+ @_set_class_marshal_options
380
+ class ToolConfig(lang.Final):
381
+ function_calling_config: FunctionCallingConfig | None = None
382
+
383
+
384
+ HarmCategory: ta.TypeAlias = ta.Literal[
385
+ # Category is unspecified.
386
+ 'HARM_CATEGORY_UNSPECIFIED',
387
+
388
+ # PaLM - Negative or harmful comments targeting identity and/or protected attribute.
389
+ 'HARM_CATEGORY_DEROGATORY',
390
+
391
+ # PaLM - Content that is rude, disrespectful, or profane.
392
+ 'HARM_CATEGORY_TOXICITY',
393
+
394
+ # PaLM - Describes scenarios depicting violence against an individual or group, or general descriptions of gore.
395
+ 'HARM_CATEGORY_VIOLENCE',
396
+
397
+ # PaLM - Contains references to sexual acts or other lewd content.
398
+ 'HARM_CATEGORY_SEXUAL',
399
+
400
+ # PaLM - Promotes unchecked medical advice.
401
+ 'HARM_CATEGORY_MEDICAL',
402
+
403
+ # PaLM - Dangerous content that promotes, facilitates, or encourages harmful acts.
404
+ 'HARM_CATEGORY_DANGEROUS',
405
+
406
+ # Gemini - Harassment content.
407
+ 'HARM_CATEGORY_HARASSMENT',
408
+
409
+ # Gemini - Hate speech and content.
410
+ 'HARM_CATEGORY_HATE_SPEECH',
411
+
412
+ # Gemini - Sexually explicit content.
413
+ 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
414
+
415
+ # Gemini - Dangerous content.
416
+ 'HARM_CATEGORY_DANGEROUS_CONTENT',
417
+
418
+ # Gemini - Content that may be used to harm civic integrity. DEPRECATED: use enableEnhancedCivicAnswers instead.
419
+ 'HARM_CATEGORY_CIVIC_INTEGRITY',
420
+ ]
421
+
422
+
423
+ HarmBlockThreshold: ta.TypeAlias = ta.Literal[
424
+ # Threshold is unspecified.
425
+ 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
426
+
427
+ # Content with NEGLIGIBLE will be allowed.
428
+ 'BLOCK_LOW_AND_ABOVE',
429
+
430
+ # Content with NEGLIGIBLE and LOW will be allowed.
431
+ 'BLOCK_MEDIUM_AND_ABOVE',
432
+
433
+ # Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
434
+ 'BLOCK_ONLY_HIGH',
435
+
436
+ # All content will be allowed.
437
+ 'BLOCK_NONE',
438
+
439
+ # Turn off the safety filter.
440
+ 'OFF',
441
+ ]
442
+
443
+
444
+ @dc.dataclass(frozen=True, kw_only=True)
445
+ @_set_class_marshal_options
446
+ class SafetySetting(lang.Final):
447
+ category: HarmCategory
448
+ threshold: HarmBlockThreshold
449
+
450
+
451
+ @dc.dataclass(frozen=True, kw_only=True)
452
+ @_set_class_marshal_options
453
+ class ThinkingConfig(lang.Final):
454
+ include_thoughts: bool | None = None
455
+ thinking_budget: int | None = None
456
+
457
+
458
+ Modality: ta.TypeAlias = ta.Literal[
459
+ # Default value.
460
+ 'MODALITY_UNSPECIFIED',
461
+
462
+ # Indicates the model should return text.
463
+ 'TEXT',
464
+
465
+ # Indicates the model should return images.
466
+ 'IMAGE',
467
+
468
+ # Indicates the model should return audio.
469
+ 'AUDIO',
470
+ ]
471
+
472
+
473
+ MediaResolution: ta.TypeAlias = ta.Literal[
474
+ # Media resolution has not been set.
475
+ 'MEDIA_RESOLUTION_UNSPECIFIED',
476
+
477
+ # Media resolution set to low (64 tokens).
478
+ 'MEDIA_RESOLUTION_LOW',
479
+
480
+ # Media resolution set to medium (256 tokens).
481
+ 'MEDIA_RESOLUTION_MEDIUM',
482
+
483
+ # Media resolution set to high (zoomed reframing with 256 tokens).
484
+ 'MEDIA_RESOLUTION_HIGH',
485
+ ]
486
+
487
+
488
+ @dc.dataclass(frozen=True, kw_only=True)
489
+ @_set_class_marshal_options
490
+ class GenerationConfig(lang.Final):
491
+ stop_sequences: ta.Sequence[str] | None = None
492
+
493
+ response_mime_type: str | None = None
494
+ response_schema: Schema | None = None
495
+ response_json_schema: Value | None = None
496
+ response_modalities: ta.Sequence[Modality] | None = None
497
+
498
+ candidate_count: int | None = None
499
+ max_output_tokens: int | None = None
500
+ temperature: float | None = None
501
+ top_p: float | None = None
502
+ top_k: int | None = None
503
+ seed: int | None = None
504
+ presence_penalty: float | None = None
505
+ frequency_penalty: float | None = None
506
+
507
+ response_logprobs: bool | None = None
508
+ logprobs: int | None = None
509
+
510
+ enable_enhanced_civic_answers: bool | None = None
511
+
512
+ # speech_config: SpeechConfig | None = None
513
+
514
+ thinking_config: ThinkingConfig | None = None
515
+
516
+ media_resolution: MediaResolution | None = None
517
+
518
+
519
+ @dc.dataclass(frozen=True, kw_only=True)
520
+ @_set_class_marshal_options
521
+ class GenerateContentRequest(lang.Final):
522
+ """https://ai.google.dev/api/generate-content#request-body"""
523
+
524
+ contents: ta.Sequence[Content] | None = None
525
+ tools: ta.Sequence[Tool] | None = None
526
+ tool_config: ToolConfig | None = None
527
+ safety_settings: ta.Sequence[SafetySetting] | None = None
528
+ system_instruction: Content | None = None
529
+ generation_config: GenerationConfig | None = None
530
+ cached_content: str | None = None
531
+
532
+
533
+ FinishReason: ta.TypeAlias = ta.Literal[
534
+ # Default value. This value is unused.
535
+ 'FINISH_REASON_UNSPECIFIED',
536
+
537
+ # Natural stop point of the model or provided stop sequence.
538
+ 'STOP',
539
+
540
+ # The maximum number of tokens as specified in the request was reached.
541
+ 'MAX_TOKENS',
542
+
543
+ # The response candidate content was flagged for safety reasons.
544
+ 'SAFETY',
545
+
546
+ # The response candidate content was flagged for recitation reasons.
547
+ 'RECITATION',
548
+
549
+ # The response candidate content was flagged for using an unsupported language.
550
+ 'LANGUAGE',
551
+
552
+ # Unknown reason.
553
+ 'OTHER',
554
+
555
+ # Token generation stopped because the content contains forbidden terms.
556
+ 'BLOCKLIST',
557
+
558
+ # Token generation stopped for potentially containing prohibited content.
559
+ 'PROHIBITED_CONTENT',
560
+
561
+ # Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information
562
+ # (SPII).
563
+ 'SPII',
564
+
565
+ # The function call generated by the model is invalid.
566
+ 'MALFORMED_FUNCTION_CALL',
567
+
568
+ # Token generation stopped because generated images contain safety violations.
569
+ 'IMAGE_SAFETY',
570
+
571
+ # Model generated a tool call but no tools were enabled in the request.
572
+ 'UNEXPECTED_TOOL_CALL',
573
+
574
+ # Model called too many tools consecutively, thus the system exited execution.
575
+ 'TOO_MANY_TOOL_CALLS',
576
+ ]
577
+
578
+
579
+ @dc.dataclass(frozen=True, kw_only=True)
580
+ @_set_class_marshal_options
581
+ class GenerateContentResponse(lang.Final):
582
+ """https://ai.google.dev/api/generate-content#v1beta.GenerateContentResponse"""
583
+
584
+ @dc.dataclass(frozen=True, kw_only=True)
585
+ @_set_class_marshal_options
586
+ class Candidate(lang.Final):
587
+ content: Content | None = None
588
+ finish_reason: FinishReason | None = None
589
+ finish_message: str | None = None
590
+ # safety_ratings: ta.Sequence[SafetyRating] | None = None
591
+ # citation_metadata: CitationMetadata | None = None
592
+ token_count: int | None = None
593
+ # grounding_attributions: ta.Sequence[GroundingAttribution] | None = None
594
+ # grounding_metadata: GroundingMetadata | None = None
595
+ avg_logprobs: float | None = None
596
+ # logprobs_result: LogprobsResult | None = None
597
+ # url_context_metadata: UrlContextMetadata | None = None
598
+ index: int | None = None
599
+
600
+ candidates: ta.Sequence[Candidate] | None = None
601
+
602
+ @dc.dataclass(frozen=True, kw_only=True)
603
+ @_set_class_marshal_options
604
+ class UsageMetadata(lang.Final):
605
+ prompt_token_count: int | None = None
606
+ cached_content_token_count: int | None = None
607
+ candidates_token_count: int | None = None
608
+ total_token_count: int | None = None
609
+ thoughts_token_count: int | None = None
610
+
611
+ @dc.dataclass(frozen=True, kw_only=True)
612
+ @_set_class_marshal_options
613
+ class ModalityTokenCount:
614
+ modality: str | None = None
615
+ token_count: int | None = None
616
+
617
+ prompt_tokens_details: ta.Sequence[ModalityTokenCount] | None = None
618
+ cache_tokens_details: ta.Sequence[ModalityTokenCount] | None = None
619
+ candidates_tokens_details: ta.Sequence[ModalityTokenCount] | None = None
620
+ tool_use_prompt_tokens_details: ta.Sequence[ModalityTokenCount] | None = None
621
+
622
+ usage_metadata: UsageMetadata | None = None
623
+
624
+ model_version: str | None = None
625
+
626
+ response_id: str | None = None
@@ -0,0 +1,23 @@
1
+ from omlish import lang
2
+ from omlish import marshal as msh
3
+
4
+ from .protocol import ChatCompletionRequest
5
+
6
+
7
+ ##
8
+
9
+
10
+ @lang.static_init
11
+ def _install_standard_marshaling() -> None:
12
+ for root_cls, tag_field in [
13
+ (ChatCompletionRequest.Message, 'role'),
14
+ ]:
15
+ msh.install_standard_factories(*msh.standard_polymorphism_factories(
16
+ msh.polymorphism_from_subclasses(
17
+ root_cls,
18
+ naming=msh.Naming.SNAKE,
19
+ strip_suffix=msh.AutoStripSuffix,
20
+ ),
21
+ msh.FieldTypeTagging(tag_field),
22
+ unions='partial',
23
+ ))