ommlds 0.0.0.dev448__py3-none-any.whl → 0.0.0.dev450__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. ommlds/.omlish-manifests.json +1 -1
  2. ommlds/backends/google/protocol/__init__.py +3 -0
  3. ommlds/backends/google/protocol/_marshal.py +16 -0
  4. ommlds/backends/google/protocol/types.py +303 -76
  5. ommlds/backends/mlx/generation.py +1 -1
  6. ommlds/cli/main.py +27 -6
  7. ommlds/cli/sessions/chat/code.py +114 -0
  8. ommlds/cli/sessions/chat/interactive.py +2 -5
  9. ommlds/cli/sessions/chat/printing.py +1 -4
  10. ommlds/cli/sessions/chat/prompt.py +8 -1
  11. ommlds/cli/sessions/chat/state.py +1 -0
  12. ommlds/cli/sessions/chat/tools.py +17 -7
  13. ommlds/cli/tools/config.py +1 -0
  14. ommlds/cli/tools/inject.py +11 -3
  15. ommlds/minichain/__init__.py +4 -0
  16. ommlds/minichain/backends/impls/google/chat.py +66 -11
  17. ommlds/minichain/backends/impls/google/tools.py +149 -0
  18. ommlds/minichain/lib/code/prompts.py +6 -0
  19. ommlds/minichain/lib/fs/binfiles.py +108 -0
  20. ommlds/minichain/lib/fs/context.py +112 -0
  21. ommlds/minichain/lib/fs/errors.py +95 -0
  22. ommlds/minichain/lib/fs/suggestions.py +36 -0
  23. ommlds/minichain/lib/fs/tools/__init__.py +0 -0
  24. ommlds/minichain/lib/fs/tools/ls.py +38 -0
  25. ommlds/minichain/lib/fs/tools/read.py +115 -0
  26. ommlds/minichain/lib/fs/tools/recursivels/__init__.py +0 -0
  27. ommlds/minichain/lib/fs/tools/recursivels/execution.py +40 -0
  28. ommlds/minichain/lib/todo/__init__.py +0 -0
  29. ommlds/minichain/lib/todo/context.py +27 -0
  30. ommlds/minichain/lib/todo/tools/__init__.py +0 -0
  31. ommlds/minichain/lib/todo/tools/read.py +39 -0
  32. ommlds/minichain/lib/todo/tools/write.py +275 -0
  33. ommlds/minichain/lib/todo/types.py +55 -0
  34. ommlds/minichain/tools/execution/context.py +34 -14
  35. ommlds/minichain/tools/execution/errors.py +15 -0
  36. ommlds/minichain/tools/execution/reflect.py +0 -3
  37. ommlds/minichain/tools/jsonschema.py +11 -1
  38. ommlds/minichain/tools/reflect.py +47 -15
  39. ommlds/minichain/tools/types.py +9 -0
  40. ommlds/minichain/utils.py +27 -0
  41. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/METADATA +3 -3
  42. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/RECORD +49 -29
  43. ommlds/minichain/lib/fs/ls/execution.py +0 -32
  44. /ommlds/minichain/lib/{fs/ls → code}/__init__.py +0 -0
  45. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/rendering.py +0 -0
  46. /ommlds/minichain/lib/fs/{ls → tools/recursivels}/running.py +0 -0
  47. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/WHEEL +0 -0
  48. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/entry_points.txt +0 -0
  49. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/licenses/LICENSE +0 -0
  50. {ommlds-0.0.0.dev448.dist-info → ommlds-0.0.0.dev450.dist-info}/top_level.txt +0 -0
@@ -96,7 +96,7 @@
96
96
  "module": ".minichain.backends.impls.google.chat",
97
97
  "attr": null,
98
98
  "file": "ommlds/minichain/backends/impls/google/chat.py",
99
- "line": 29,
99
+ "line": 33,
100
100
  "value": {
101
101
  "!.minichain.registries.manifests.RegistryManifest": {
102
102
  "module": "ommlds.minichain.backends.impls.google.chat",
@@ -0,0 +1,3 @@
1
+ from omlish import marshal as _msh # noqa
2
+
3
+ _msh.register_global_module_import('._marshal', __package__)
@@ -0,0 +1,16 @@
1
+ from omlish import lang
2
+ from omlish import marshal as msh
3
+
4
+ from .types import Value
5
+
6
+
7
+ ##
8
+
9
+
10
+ @lang.static_init
11
+ def _install_standard_marshalling() -> None:
12
+ msh.install_standard_factories(
13
+ *msh.standard_polymorphism_factories(
14
+ msh.polymorphism_from_subclasses(Value),
15
+ ),
16
+ )
@@ -1,3 +1,6 @@
1
+ """
2
+ https://ai.google.dev/api/generate-content
3
+ """
1
4
  import typing as ta
2
5
 
3
6
  from omlish import dataclasses as dc
@@ -8,19 +11,36 @@ from omlish import marshal as msh
8
11
  ##
9
12
 
10
13
 
14
+ def _set_class_marshal_options(cls):
15
+ msh.update_object_metadata(
16
+ cls,
17
+ field_naming=msh.Naming.LOW_CAMEL,
18
+ field_defaults=msh.FieldMetadata(
19
+ options=msh.FieldOptions(
20
+ omit_if=lang.is_none,
21
+ ),
22
+ ),
23
+ )
24
+
25
+ return cls
26
+
27
+
11
28
  @dc.dataclass(frozen=True, kw_only=True)
12
- @msh.update_fields_metadata(omit_if=lang.is_none)
13
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
29
+ @_set_class_marshal_options
30
+ @msh.update_fields_metadata(
31
+ ['data'],
32
+ marshaler=msh.Base64MarshalerUnmarshaler(bytes),
33
+ unmarshaler=msh.Base64MarshalerUnmarshaler(bytes),
34
+ )
14
35
  class Blob(lang.Final):
15
36
  mine_type: str
16
37
  data: bytes
17
38
 
18
39
 
19
40
  @dc.dataclass(frozen=True, kw_only=True)
20
- @msh.update_fields_metadata(omit_if=lang.is_none)
21
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
41
+ @_set_class_marshal_options
22
42
  class FunctionCall(lang.Final):
23
- id: str
43
+ id: str | None = None
24
44
  name: str
25
45
  args: ta.Mapping[str, ta.Any] | None = None
26
46
 
@@ -42,10 +62,9 @@ Scheduling: ta.TypeAlias = ta.Literal[
42
62
 
43
63
 
44
64
  @dc.dataclass(frozen=True, kw_only=True)
45
- @msh.update_fields_metadata(omit_if=lang.is_none)
46
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
65
+ @_set_class_marshal_options
47
66
  class FunctionResponse(lang.Final):
48
- id: str
67
+ id: str | None = None
49
68
  name: str
50
69
  response: ta.Mapping[str, ta.Any] | None = None
51
70
  will_continue: bool | None = None
@@ -53,8 +72,7 @@ class FunctionResponse(lang.Final):
53
72
 
54
73
 
55
74
  @dc.dataclass(frozen=True, kw_only=True)
56
- @msh.update_fields_metadata(omit_if=lang.is_none)
57
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
75
+ @_set_class_marshal_options
58
76
  class FileData(lang.Final):
59
77
  mime_type: str
60
78
  file_uri: str
@@ -70,8 +88,7 @@ Language: ta.TypeAlias = ta.Literal[
70
88
 
71
89
 
72
90
  @dc.dataclass(frozen=True, kw_only=True)
73
- @msh.update_fields_metadata(omit_if=lang.is_none)
74
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
91
+ @_set_class_marshal_options
75
92
  class ExecutableCode(lang.Final):
76
93
  language: Language
77
94
  code: str
@@ -93,16 +110,14 @@ Outcome: ta.TypeAlias = ta.Literal[
93
110
 
94
111
 
95
112
  @dc.dataclass(frozen=True, kw_only=True)
96
- @msh.update_fields_metadata(omit_if=lang.is_none)
97
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
113
+ @_set_class_marshal_options
98
114
  class CodeExecutionResult(lang.Final):
99
115
  outcome: Outcome
100
116
  output: str
101
117
 
102
118
 
103
119
  @dc.dataclass(frozen=True, kw_only=True)
104
- @msh.update_fields_metadata(omit_if=lang.is_none)
105
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
120
+ @_set_class_marshal_options
106
121
  class VideoMetadata(lang.Final):
107
122
  start_offset: str # Duration
108
123
  end_offset: str # Duration
@@ -110,8 +125,12 @@ class VideoMetadata(lang.Final):
110
125
 
111
126
 
112
127
  @dc.dataclass(frozen=True, kw_only=True)
113
- @msh.update_fields_metadata(omit_if=lang.is_none)
114
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
128
+ @msh.update_fields_metadata(
129
+ ['thought_signature'],
130
+ marshaler=msh.OptionalMarshaler(msh.Base64MarshalerUnmarshaler(bytes)),
131
+ unmarshaler=msh.OptionalUnmarshaler(msh.Base64MarshalerUnmarshaler(bytes)),
132
+ )
133
+ @_set_class_marshal_options
115
134
  class Part(lang.Final):
116
135
  # TODO: data: msh.oneof ...
117
136
  text: str | None = None
@@ -130,8 +149,7 @@ class Part(lang.Final):
130
149
 
131
150
 
132
151
  @dc.dataclass(frozen=True, kw_only=True)
133
- @msh.update_fields_metadata(omit_if=lang.is_none)
134
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
152
+ @_set_class_marshal_options
135
153
  class Content(lang.Final):
136
154
  parts: ta.Sequence[Part] | None = None
137
155
  role: ta.Literal['user', 'model'] | None = None
@@ -170,60 +188,51 @@ Type: ta.TypeAlias = ta.Literal[
170
188
  Struct: ta.TypeAlias = ta.Mapping[str, 'Value']
171
189
 
172
190
 
173
- @dc.dataclass(frozen=True, kw_only=True)
174
- @msh.update_fields_metadata(omit_if=lang.is_none)
175
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
191
+ @dc.dataclass(frozen=True)
176
192
  class Value(lang.Abstract, lang.Sealed):
177
- pass
193
+ """https://protobuf.dev/reference/protobuf/google.protobuf/#value"""
178
194
 
179
195
 
180
- @dc.dataclass(frozen=True, kw_only=True)
181
- @msh.update_fields_metadata(omit_if=lang.is_none)
196
+ @dc.dataclass(frozen=True)
182
197
  @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
183
198
  class NullValue(Value, lang.Final):
184
199
  null_value: None = None
185
200
 
186
201
 
187
- @dc.dataclass(frozen=True, kw_only=True)
188
- @msh.update_fields_metadata(omit_if=lang.is_none)
189
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
202
+ @dc.dataclass(frozen=True)
203
+ @_set_class_marshal_options
190
204
  class NumberValue(Value, lang.Final):
191
205
  number_value: float
192
206
 
193
207
 
194
- @dc.dataclass(frozen=True, kw_only=True)
195
- @msh.update_fields_metadata(omit_if=lang.is_none)
196
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
208
+ @dc.dataclass(frozen=True)
209
+ @_set_class_marshal_options
197
210
  class StringValue(Value, lang.Final):
198
211
  string_value: str
199
212
 
200
213
 
201
- @dc.dataclass(frozen=True, kw_only=True)
202
- @msh.update_fields_metadata(omit_if=lang.is_none)
203
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
214
+ @dc.dataclass(frozen=True)
215
+ @_set_class_marshal_options
204
216
  class BoolValue(Value, lang.Final):
205
217
  bool_value: bool
206
218
 
207
219
 
208
- @dc.dataclass(frozen=True, kw_only=True)
209
- @msh.update_fields_metadata(omit_if=lang.is_none)
210
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
220
+ @dc.dataclass(frozen=True)
221
+ @_set_class_marshal_options
211
222
  class StructValue(Value, lang.Final):
212
223
  struct_value: Struct
213
224
 
214
225
 
215
- @dc.dataclass(frozen=True, kw_only=True)
216
- @msh.update_fields_metadata(omit_if=lang.is_none)
217
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
226
+ @dc.dataclass(frozen=True)
227
+ @_set_class_marshal_options
218
228
  class ListValue(Value, lang.Final):
219
229
  list_value: ta.Sequence[Value]
220
230
 
221
231
 
222
232
  @dc.dataclass(frozen=True, kw_only=True)
223
- @msh.update_fields_metadata(omit_if=lang.is_none)
224
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
233
+ @_set_class_marshal_options
225
234
  class Schema(lang.Final):
226
- type: Type | None = None
235
+ type: Type | None = None # FIXME: required
227
236
  format: str | None = None
228
237
  title: str | None = None
229
238
  description: str | None = None
@@ -261,19 +270,18 @@ FunctionBehavior: ta.TypeAlias = ta.Literal[
261
270
 
262
271
 
263
272
  @dc.dataclass(frozen=True, kw_only=True)
264
- @msh.update_fields_metadata(omit_if=lang.is_none)
265
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
273
+ @_set_class_marshal_options
266
274
  class FunctionDeclaration(lang.Final):
267
275
  name: str
268
276
  description: str
269
277
 
270
- behavior: FunctionBehavior
278
+ behavior: FunctionBehavior | None = None
271
279
 
272
- parameters: Schema
273
- parameters_json_schema: Value
280
+ parameters: Schema | None = None
281
+ parameters_json_schema: Value | None = None
274
282
 
275
- response: Schema
276
- response_json_schema: Value
283
+ response: Schema | None = None
284
+ response_json_schema: Value | None = None
277
285
 
278
286
 
279
287
  DynamicRetrievalMode: ta.TypeAlias = ta.Literal[
@@ -286,8 +294,7 @@ DynamicRetrievalMode: ta.TypeAlias = ta.Literal[
286
294
 
287
295
 
288
296
  @dc.dataclass(frozen=True, kw_only=True)
289
- @msh.update_fields_metadata(omit_if=lang.is_none)
290
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
297
+ @_set_class_marshal_options
291
298
  class DynamicRetrievalConfig(lang.Final):
292
299
  mode: DynamicRetrievalMode | None = None
293
300
 
@@ -295,44 +302,38 @@ class DynamicRetrievalConfig(lang.Final):
295
302
 
296
303
 
297
304
  @dc.dataclass(frozen=True, kw_only=True)
298
- @msh.update_fields_metadata(omit_if=lang.is_none)
299
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
305
+ @_set_class_marshal_options
300
306
  class GoogleSearchRetrieval(lang.Final):
301
307
  dynamic_retrieval_config: DynamicRetrievalConfig
302
308
 
303
309
 
304
310
  @dc.dataclass(frozen=True, kw_only=True)
305
- @msh.update_fields_metadata(omit_if=lang.is_none)
306
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
311
+ @_set_class_marshal_options
307
312
  class CodeExecution(lang.Final):
308
313
  pass
309
314
 
310
315
 
311
316
  @dc.dataclass(frozen=True, kw_only=True)
312
- @msh.update_fields_metadata(omit_if=lang.is_none)
313
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
317
+ @_set_class_marshal_options
314
318
  class Interval(lang.Final):
315
319
  start_time: str # Timestamp
316
320
  end_time: str # Timestamp
317
321
 
318
322
 
319
323
  @dc.dataclass(frozen=True, kw_only=True)
320
- @msh.update_fields_metadata(omit_if=lang.is_none)
321
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
324
+ @_set_class_marshal_options
322
325
  class GoogleSearch(lang.Final):
323
326
  time_range_filter: Interval | None = None
324
327
 
325
328
 
326
329
  @dc.dataclass(frozen=True, kw_only=True)
327
- @msh.update_fields_metadata(omit_if=lang.is_none)
328
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
330
+ @_set_class_marshal_options
329
331
  class UrlContext(lang.Final):
330
332
  pass
331
333
 
332
334
 
333
335
  @dc.dataclass(frozen=True, kw_only=True)
334
- @msh.update_fields_metadata(omit_if=lang.is_none)
335
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
336
+ @_set_class_marshal_options
336
337
  class Tool(lang.Final):
337
338
  function_declarations: ta.Sequence[FunctionDeclaration] | None = None
338
339
  google_search_retrieval: GoogleSearchRetrieval | None = None
@@ -341,34 +342,261 @@ class Tool(lang.Final):
341
342
  url_context: UrlContext | None = None
342
343
 
343
344
 
345
+ FunctionCallingMode: ta.TypeAlias = ta.Literal[
346
+ # Unspecified function calling mode. This value should not be used.
347
+ 'MODE_UNSPECIFIED',
348
+
349
+ # Default model behavior, model decides to predict either a function call or a natural language response.
350
+ 'AUTO',
351
+
352
+ # Model is constrained to always predicting a function call only. If "allowedFunctionNames" are set, the predicted
353
+ # function call will be limited to any one of "allowedFunctionNames", else the predicted function call will be any
354
+ # one of the provided "functionDeclarations".
355
+ 'ANY',
356
+
357
+ # Model will not predict any function call. Model behavior is same as when not passing any function declarations.
358
+ 'NONE',
359
+
360
+ # Model decides to predict either a function call or a natural language response, but will validate function calls
361
+ # with constrained decoding. If "allowedFunctionNames" are set, the predicted function call will be limited to any
362
+ # one of "allowedFunctionNames", else the predicted function call will be any one of the provided
363
+ # "functionDeclarations".
364
+ 'VALIDATED',
365
+ ]
366
+
367
+
344
368
  @dc.dataclass(frozen=True, kw_only=True)
345
- @msh.update_fields_metadata(omit_if=lang.is_none)
346
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
369
+ @_set_class_marshal_options
370
+ class FunctionCallingConfig(lang.Final):
371
+ mode: FunctionCallingMode | None = None
372
+ allowed_function_names: ta.Sequence[str] | None = None
373
+
374
+
375
+ @dc.dataclass(frozen=True, kw_only=True)
376
+ @_set_class_marshal_options
377
+ class ToolConfig(lang.Final):
378
+ function_calling_config: FunctionCallingConfig | None = None
379
+
380
+
381
+ HarmCategory: ta.TypeAlias = ta.Literal[
382
+ # Category is unspecified.
383
+ 'HARM_CATEGORY_UNSPECIFIED',
384
+
385
+ # PaLM - Negative or harmful comments targeting identity and/or protected attribute.
386
+ 'HARM_CATEGORY_DEROGATORY',
387
+
388
+ # PaLM - Content that is rude, disrespectful, or profane.
389
+ 'HARM_CATEGORY_TOXICITY',
390
+
391
+ # PaLM - Describes scenarios depicting violence against an individual or group, or general descriptions of gore.
392
+ 'HARM_CATEGORY_VIOLENCE',
393
+
394
+ # PaLM - Contains references to sexual acts or other lewd content.
395
+ 'HARM_CATEGORY_SEXUAL',
396
+
397
+ # PaLM - Promotes unchecked medical advice.
398
+ 'HARM_CATEGORY_MEDICAL',
399
+
400
+ # PaLM - Dangerous content that promotes, facilitates, or encourages harmful acts.
401
+ 'HARM_CATEGORY_DANGEROUS',
402
+
403
+ # Gemini - Harassment content.
404
+ 'HARM_CATEGORY_HARASSMENT',
405
+
406
+ # Gemini - Hate speech and content.
407
+ 'HARM_CATEGORY_HATE_SPEECH',
408
+
409
+ # Gemini - Sexually explicit content.
410
+ 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
411
+
412
+ # Gemini - Dangerous content.
413
+ 'HARM_CATEGORY_DANGEROUS_CONTENT',
414
+
415
+ # Gemini - Content that may be used to harm civic integrity. DEPRECATED: use enableEnhancedCivicAnswers instead.
416
+ 'HARM_CATEGORY_CIVIC_INTEGRITY',
417
+ ]
418
+
419
+
420
+ HarmBlockThreshold: ta.TypeAlias = ta.Literal[
421
+ # Threshold is unspecified.
422
+ 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
423
+
424
+ # Content with NEGLIGIBLE will be allowed.
425
+ 'BLOCK_LOW_AND_ABOVE',
426
+
427
+ # Content with NEGLIGIBLE and LOW will be allowed.
428
+ 'BLOCK_MEDIUM_AND_ABOVE',
429
+
430
+ # Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
431
+ 'BLOCK_ONLY_HIGH',
432
+
433
+ # All content will be allowed.
434
+ 'BLOCK_NONE',
435
+
436
+ # Turn off the safety filter.
437
+ 'OFF',
438
+ ]
439
+
440
+
441
+ @dc.dataclass(frozen=True, kw_only=True)
442
+ @_set_class_marshal_options
443
+ class SafetySetting(lang.Final):
444
+ category: HarmCategory
445
+ threshold: HarmBlockThreshold
446
+
447
+
448
+ @dc.dataclass(frozen=True, kw_only=True)
449
+ @_set_class_marshal_options
450
+ class ThinkingConfig(lang.Final):
451
+ include_thoughts: bool | None = None
452
+ thinking_budget: int | None = None
453
+
454
+
455
+ Modality: ta.TypeAlias = ta.Literal[
456
+ # Default value.
457
+ 'MODALITY_UNSPECIFIED',
458
+
459
+ # Indicates the model should return text.
460
+ 'TEXT',
461
+
462
+ # Indicates the model should return images.
463
+ 'IMAGE',
464
+
465
+ # Indicates the model should return audio.
466
+ 'AUDIO',
467
+ ]
468
+
469
+
470
+ MediaResolution: ta.TypeAlias = ta.Literal[
471
+ # Media resolution has not been set.
472
+ 'MEDIA_RESOLUTION_UNSPECIFIED',
473
+
474
+ # Media resolution set to low (64 tokens).
475
+ 'MEDIA_RESOLUTION_LOW',
476
+
477
+ # Media resolution set to medium (256 tokens).
478
+ 'MEDIA_RESOLUTION_MEDIUM',
479
+
480
+ # Media resolution set to high (zoomed reframing with 256 tokens).
481
+ 'MEDIA_RESOLUTION_HIGH',
482
+ ]
483
+
484
+
485
+ @dc.dataclass(frozen=True, kw_only=True)
486
+ @_set_class_marshal_options
487
+ class GenerationConfig(lang.Final):
488
+ stop_sequences: ta.Sequence[str] | None = None
489
+
490
+ response_mime_type: str | None = None
491
+ response_schema: Schema | None = None
492
+ response_json_schema: Value | None = None
493
+ response_modalities: ta.Sequence[Modality] | None = None
494
+
495
+ candidate_count: int | None = None
496
+ max_output_tokens: int | None = None
497
+ temperature: float | None = None
498
+ top_p: float | None = None
499
+ top_k: int | None = None
500
+ seed: int | None = None
501
+ presence_penalty: float | None = None
502
+ frequency_penalty: float | None = None
503
+
504
+ response_logprobs: bool | None = None
505
+ logprobs: int | None = None
506
+
507
+ enable_enhanced_civic_answers: bool | None = None
508
+
509
+ # speech_config: SpeechConfig | None = None
510
+
511
+ thinking_config: ThinkingConfig | None = None
512
+
513
+ media_resolution: MediaResolution | None = None
514
+
515
+
516
+ @dc.dataclass(frozen=True, kw_only=True)
517
+ @_set_class_marshal_options
347
518
  class GenerateContentRequest(lang.Final):
348
519
  """https://ai.google.dev/api/generate-content#request-body"""
349
520
 
350
521
  contents: ta.Sequence[Content] | None = None
522
+ tools: ta.Sequence[Tool] | None = None
523
+ tool_config: ToolConfig | None = None
524
+ safety_settings: ta.Sequence[SafetySetting] | None = None
525
+ system_instruction: Content | None = None
526
+ generation_config: GenerationConfig | None = None
527
+ cached_content: str | None = None
528
+
529
+
530
+ FinishReason: ta.TypeAlias = ta.Literal[
531
+ # Default value. This value is unused.
532
+ 'FINISH_REASON_UNSPECIFIED',
533
+
534
+ # Natural stop point of the model or provided stop sequence.
535
+ 'STOP',
536
+
537
+ # The maximum number of tokens as specified in the request was reached.
538
+ 'MAX_TOKENS',
539
+
540
+ # The response candidate content was flagged for safety reasons.
541
+ 'SAFETY',
542
+
543
+ # The response candidate content was flagged for recitation reasons.
544
+ 'RECITATION',
545
+
546
+ # The response candidate content was flagged for using an unsupported language.
547
+ 'LANGUAGE',
548
+
549
+ # Unknown reason.
550
+ 'OTHER',
551
+
552
+ # Token generation stopped because the content contains forbidden terms.
553
+ 'BLOCKLIST',
554
+
555
+ # Token generation stopped for potentially containing prohibited content.
556
+ 'PROHIBITED_CONTENT',
557
+
558
+ # Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information
559
+ # (SPII).
560
+ 'SPII',
561
+
562
+ # The function call generated by the model is invalid.
563
+ 'MALFORMED_FUNCTION_CALL',
564
+
565
+ # Token generation stopped because generated images contain safety violations.
566
+ 'IMAGE_SAFETY',
567
+
568
+ # Model generated a tool call but no tools were enabled in the request.
569
+ 'UNEXPECTED_TOOL_CALL',
570
+
571
+ # Model called too many tools consecutively, thus the system exited execution.
572
+ 'TOO_MANY_TOOL_CALLS',
573
+ ]
351
574
 
352
575
 
353
576
  @dc.dataclass(frozen=True, kw_only=True)
354
- @msh.update_fields_metadata(omit_if=lang.is_none)
355
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
577
+ @_set_class_marshal_options
356
578
  class GenerateContentResponse(lang.Final):
357
579
  """https://ai.google.dev/api/generate-content#v1beta.GenerateContentResponse"""
358
580
 
359
581
  @dc.dataclass(frozen=True, kw_only=True)
360
- @msh.update_fields_metadata(omit_if=lang.is_none)
361
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
582
+ @_set_class_marshal_options
362
583
  class Candidate(lang.Final):
363
584
  content: Content | None = None
364
- finish_reason: ta.Literal['STOP'] | None = None
585
+ finish_reason: FinishReason | None = None
586
+ # safety_ratings: ta.Sequence[SafetyRating] | None = None
587
+ # citation_metadata: CitationMetadata | None = None
588
+ token_count: int | None = None
589
+ # grounding_attributions: ta.Sequence[GroundingAttribution] | None = None
590
+ # grounding_metadata: GroundingMetadata | None = None
591
+ avg_logprobs: float | None = None
592
+ # logprobs_result: LogprobsResult | None = None
593
+ # url_context_metadata: UrlContextMetadata | None = None
365
594
  index: int | None = None
366
595
 
367
596
  candidates: ta.Sequence[Candidate] | None = None
368
597
 
369
598
  @dc.dataclass(frozen=True, kw_only=True)
370
- @msh.update_fields_metadata(omit_if=lang.is_none)
371
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
599
+ @_set_class_marshal_options
372
600
  class UsageMetadata(lang.Final):
373
601
  prompt_token_count: int | None = None
374
602
  cached_content_token_count: int | None = None
@@ -377,8 +605,7 @@ class GenerateContentResponse(lang.Final):
377
605
  thoughts_token_count: int | None = None
378
606
 
379
607
  @dc.dataclass(frozen=True, kw_only=True)
380
- @msh.update_fields_metadata(omit_if=lang.is_none)
381
- @msh.update_object_metadata(field_naming=msh.Naming.LOW_CAMEL)
608
+ @_set_class_marshal_options
382
609
  class ModalityTokenCount:
383
610
  modality: str | None = None
384
611
  token_count: int | None = None
@@ -254,7 +254,7 @@ def stream_generate(
254
254
  add_special_tokens=add_special_tokens,
255
255
  )
256
256
 
257
- prompt = mx.array(prompt) # type: ignore[arg-type]
257
+ prompt = mx.array(ta.cast(ta.Any, prompt))
258
258
 
259
259
  detokenizer = tokenization.detokenizer
260
260
  detokenizer.reset()