llm-gemini 0.19.1__py3-none-any.whl → 0.20a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.19.1
3
+ Version: 0.20a1
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
- License: Apache-2.0
6
+ License-Expression: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/simonw/llm-gemini
8
8
  Project-URL: Changelog, https://github.com/simonw/llm-gemini/releases
9
9
  Project-URL: Issues, https://github.com/simonw/llm-gemini/issues
10
10
  Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
- Classifier: License :: OSI Approved :: Apache Software License
12
11
  Description-Content-Type: text/markdown
13
12
  License-File: LICENSE
14
- Requires-Dist: llm>=0.23
13
+ Requires-Dist: llm>=0.26a0
15
14
  Requires-Dist: httpx
16
15
  Requires-Dist: ijson
17
16
  Provides-Extra: test
@@ -0,0 +1,7 @@
1
+ llm_gemini.py,sha256=sNxkRsDYpg7jwpcJn8tsSeBpvcyCCKW7-p_7lfggtk0,21322
2
+ llm_gemini-0.20a1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
+ llm_gemini-0.20a1.dist-info/METADATA,sha256=LfqVHeYfLTArvNEVZ9TcMmWXsQWH9Z1k_RZIx8RdJXo,8069
4
+ llm_gemini-0.20a1.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
5
+ llm_gemini-0.20a1.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
+ llm_gemini-0.20a1.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
+ llm_gemini-0.20a1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.7.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
llm_gemini.py CHANGED
@@ -148,6 +148,7 @@ class _SharedGemini:
148
148
  key_env_var = "LLM_GEMINI_KEY"
149
149
  can_stream = True
150
150
  supports_schema = True
151
+ supports_tools = True
151
152
 
152
153
  attachment_types = (
153
154
  # Text
@@ -273,14 +274,57 @@ class _SharedGemini:
273
274
  )
274
275
  if response.prompt.prompt:
275
276
  parts.append({"text": response.prompt.prompt})
277
+ if response.prompt.tool_results:
278
+ parts.extend(
279
+ [
280
+ {
281
+ "function_response": {
282
+ "name": tool_result.name,
283
+ "response": {
284
+ "output": tool_result.output,
285
+ },
286
+ }
287
+ }
288
+ for tool_result in response.prompt.tool_results
289
+ ]
290
+ )
276
291
  messages.append({"role": "user", "parts": parts})
277
- messages.append(
278
- {"role": "model", "parts": [{"text": response.text_or_raise()}]}
279
- )
292
+ model_parts = []
293
+ response_text = response.text_or_raise()
294
+ if response_text:
295
+ model_parts.append({"text": response_text})
296
+ tool_calls = response.tool_calls_or_raise()
297
+ if tool_calls:
298
+ model_parts.extend(
299
+ [
300
+ {
301
+ "function_call": {
302
+ "name": tool_call.name,
303
+ "args": tool_call.arguments,
304
+ }
305
+ }
306
+ for tool_call in tool_calls
307
+ ]
308
+ )
309
+ messages.append({"role": "model", "parts": model_parts})
280
310
 
281
311
  parts = []
282
312
  if prompt.prompt:
283
313
  parts.append({"text": prompt.prompt})
314
+ if prompt.tool_results:
315
+ parts.extend(
316
+ [
317
+ {
318
+ "function_response": {
319
+ "name": tool_result.name,
320
+ "response": {
321
+ "output": tool_result.output,
322
+ },
323
+ }
324
+ }
325
+ for tool_result in prompt.tool_results
326
+ ]
327
+ )
284
328
  for attachment in prompt.attachments:
285
329
  mime_type = resolve_type(attachment)
286
330
  parts.append(
@@ -300,17 +344,34 @@ class _SharedGemini:
300
344
  "contents": self.build_messages(prompt, conversation),
301
345
  "safetySettings": SAFETY_SETTINGS,
302
346
  }
347
+ if prompt.system:
348
+ body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
349
+
350
+ tools = []
303
351
  if prompt.options and prompt.options.code_execution:
304
- body["tools"] = [{"codeExecution": {}}]
352
+ tools.append({"codeExecution": {}})
305
353
  if prompt.options and self.can_google_search and prompt.options.google_search:
306
354
  tool_name = (
307
355
  "google_search_retrieval"
308
356
  if self.model_id in GOOGLE_SEARCH_MODELS_USING_SEARCH_RETRIEVAL
309
357
  else "google_search"
310
358
  )
311
- body["tools"] = [{tool_name: {}}]
312
- if prompt.system:
313
- body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
359
+ tools.append({tool_name: {}})
360
+ if prompt.tools:
361
+ tools.append(
362
+ {
363
+ "functionDeclarations": [
364
+ {
365
+ "name": tool.name,
366
+ "description": tool.description,
367
+ "parameters": tool.input_schema,
368
+ }
369
+ for tool in prompt.tools
370
+ ]
371
+ }
372
+ )
373
+ if tools:
374
+ body["tools"] = tools
314
375
 
315
376
  generation_config = {}
316
377
 
@@ -321,6 +382,7 @@ class _SharedGemini:
321
382
  "response_schema": cleanup_schema(copy.deepcopy(prompt.schema)),
322
383
  }
323
384
  )
385
+
324
386
  if self.can_thinking_budget and prompt.options.thinking_budget is not None:
325
387
  generation_config["thinking_config"] = {
326
388
  "thinking_budget": prompt.options.thinking_budget
@@ -348,7 +410,14 @@ class _SharedGemini:
348
410
 
349
411
  return body
350
412
 
351
- def process_part(self, part):
413
+ def process_part(self, part, response):
414
+ if "functionCall" in part:
415
+ response.add_tool_call(
416
+ llm.ToolCall(
417
+ name=part["functionCall"]["name"],
418
+ arguments=part["functionCall"]["args"],
419
+ )
420
+ )
352
421
  if "text" in part:
353
422
  return part["text"]
354
423
  elif "executableCode" in part:
@@ -357,10 +426,10 @@ class _SharedGemini:
357
426
  return f'```\n{part["codeExecutionResult"]["output"].strip()}\n```\n'
358
427
  return ""
359
428
 
360
- def process_candidates(self, candidates):
429
+ def process_candidates(self, candidates, response):
361
430
  # We only use the first candidate
362
431
  for part in candidates[0]["content"]["parts"]:
363
- yield self.process_part(part)
432
+ yield self.process_part(part, response)
364
433
 
365
434
  def set_usage(self, response):
366
435
  try:
@@ -404,7 +473,9 @@ class GeminiPro(_SharedGemini, llm.KeyModel):
404
473
  if isinstance(event, dict) and "error" in event:
405
474
  raise llm.ModelError(event["error"]["message"])
406
475
  try:
407
- yield from self.process_candidates(event["candidates"])
476
+ yield from self.process_candidates(
477
+ event["candidates"], response
478
+ )
408
479
  except KeyError:
409
480
  yield ""
410
481
  gathered.append(event)
@@ -437,7 +508,7 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncKeyModel):
437
508
  raise llm.ModelError(event["error"]["message"])
438
509
  try:
439
510
  for chunk in self.process_candidates(
440
- event["candidates"]
511
+ event["candidates"], response
441
512
  ):
442
513
  yield chunk
443
514
  except KeyError:
@@ -1,7 +0,0 @@
1
- llm_gemini.py,sha256=GJp1oDwcdLXy2QAYwp7jGN0KZsbmj_pJcqktYRC139Q,18700
2
- llm_gemini-0.19.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
- llm_gemini-0.19.1.dist-info/METADATA,sha256=PsXdnxt56uLdmbYnnCF9PZgQUi2EBx9uZEgEFW0l3IQ,8119
4
- llm_gemini-0.19.1.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
5
- llm_gemini-0.19.1.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
- llm_gemini-0.19.1.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
- llm_gemini-0.19.1.dist-info/RECORD,,