appkit-assistant 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,10 +21,8 @@ from appkit_assistant.backend.database.models import (
21
21
  MCPServer,
22
22
  )
23
23
  from appkit_assistant.backend.processors.mcp_mixin import MCPCapabilities
24
- from appkit_assistant.backend.processors.processor_base import (
25
- ProcessorBase,
26
- mcp_oauth_redirect_uri,
27
- )
24
+ from appkit_assistant.backend.processors.processor_base import mcp_oauth_redirect_uri
25
+ from appkit_assistant.backend.processors.streaming_base import StreamingProcessorBase
28
26
  from appkit_assistant.backend.schemas import (
29
27
  AIModel,
30
28
  Chunk,
@@ -32,7 +30,6 @@ from appkit_assistant.backend.schemas import (
32
30
  Message,
33
31
  MessageType,
34
32
  )
35
- from appkit_assistant.backend.services.chunk_factory import ChunkFactory
36
33
  from appkit_assistant.backend.services.system_prompt_builder import SystemPromptBuilder
37
34
 
38
35
  logger = logging.getLogger(__name__)
@@ -98,7 +95,7 @@ class MCPSessionWrapper(NamedTuple):
98
95
  name: str
99
96
 
100
97
 
101
- class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
98
+ class GeminiResponsesProcessor(StreamingProcessorBase, MCPCapabilities):
102
99
  """Gemini processor using the GenAI API with native MCP support."""
103
100
 
104
101
  def __init__(
@@ -107,10 +104,9 @@ class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
107
104
  api_key: str | None = None,
108
105
  oauth_redirect_uri: str = default_oauth_redirect_uri,
109
106
  ) -> None:
107
+ StreamingProcessorBase.__init__(self, models, "gemini_responses")
110
108
  MCPCapabilities.__init__(self, oauth_redirect_uri, "gemini_responses")
111
- self.models = models
112
109
  self.client: genai.Client | None = None
113
- self._chunk_factory = ChunkFactory(processor_name="gemini_responses")
114
110
  self._system_prompt_builder = SystemPromptBuilder()
115
111
 
116
112
  if api_key:
@@ -125,9 +121,9 @@ class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
125
121
 
126
122
  logger.debug("Using redirect URI for MCP OAuth: %s", oauth_redirect_uri)
127
123
 
128
- def get_supported_models(self) -> dict[str, AIModel]:
129
- """Get supported models."""
130
- return self.models
124
+ def _get_event_handlers(self) -> dict[str, Any]:
125
+ """Get event handlers (empty for Gemini - uses chunk-based handling)."""
126
+ return {}
131
127
 
132
128
  async def process(
133
129
  self,
@@ -194,7 +190,7 @@ class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
194
190
 
195
191
  except Exception as e:
196
192
  logger.exception("Error in Gemini processor: %s", str(e))
197
- yield self._chunk_factory.error(f"Error: {e!s}")
193
+ yield self.chunk_factory.error(f"Error: {e!s}")
198
194
 
199
195
  async def _create_mcp_sessions(
200
196
  self, servers: list[MCPServer], user_id: int | None
@@ -316,89 +312,99 @@ class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
316
312
  logger.info("Processing cancelled by user")
317
313
  break
318
314
 
319
- response = await self.client.aio.models.generate_content(
315
+ # Use streaming API
316
+ stream = await self.client.aio.models.generate_content_stream(
320
317
  model=model_name, contents=current_contents, config=config
321
318
  )
322
319
 
323
- if not response.candidates:
320
+ # Collect function calls and stream text as it arrives
321
+ collected_function_calls: list[Any] = []
322
+ collected_parts: list[types.Part] = []
323
+ streamed_text = ""
324
+
325
+ async for chunk in stream:
326
+ if cancellation_token and cancellation_token.is_set():
327
+ logger.info("Processing cancelled by user")
328
+ return
329
+
330
+ if not chunk.candidates or not chunk.candidates[0].content:
331
+ continue
332
+
333
+ for part in chunk.candidates[0].content.parts:
334
+ # Stream text immediately
335
+ if part.text:
336
+ streamed_text += part.text
337
+ yield self.chunk_factory.text(part.text, delta=part.text)
338
+ # Collect function calls
339
+ if part.function_call is not None:
340
+ collected_function_calls.append(part.function_call)
341
+ collected_parts.append(part)
342
+
343
+ # If no function calls, we're done (text was already streamed)
344
+ if not collected_function_calls:
324
345
  return
325
346
 
326
- candidate = response.candidates[0]
327
- content = candidate.content
328
-
329
- # Check for function calls
330
- function_calls = [
331
- part.function_call
332
- for part in content.parts
333
- if part.function_call is not None
334
- ]
335
-
336
- if function_calls:
337
- # Add model response with function calls to conversation
338
- current_contents.append(content)
339
-
340
- # Execute tool calls and collect results
341
- function_responses = []
342
- for fc in function_calls:
343
- # Parse unique tool name: server_name__tool_name
344
- server_name, original_tool_name = self._parse_unique_tool_name(
345
- fc.name
346
- )
347
-
348
- # Generate a unique tool call ID
349
- tool_call_id = f"mcp_{uuid.uuid4().hex[:32]}"
347
+ # Build the model's response content with function calls
348
+ model_response_parts = []
349
+ if streamed_text:
350
+ model_response_parts.append(types.Part(text=streamed_text))
351
+ model_response_parts.extend(collected_parts)
352
+ current_contents.append(
353
+ types.Content(role="model", parts=model_response_parts)
354
+ )
350
355
 
351
- # Yield TOOL_CALL chunk to show in UI (use original name)
352
- yield self._chunk_factory.tool_call(
353
- f"Benutze Werkzeug: {server_name}.{original_tool_name}",
354
- tool_name=original_tool_name,
355
- tool_id=tool_call_id,
356
- server_label=server_name,
357
- status="starting",
358
- )
356
+ # Execute tool calls and collect results
357
+ function_responses = []
358
+ for fc in collected_function_calls:
359
+ # Parse unique tool name: server_name__tool_name
360
+ server_name, original_tool_name = self._parse_unique_tool_name(fc.name)
361
+
362
+ # Generate a unique tool call ID
363
+ tool_call_id = f"mcp_{uuid.uuid4().hex[:32]}"
364
+
365
+ # Yield TOOL_CALL chunk to show in UI (use original name)
366
+ yield self.chunk_factory.tool_call(
367
+ f"Benutze Werkzeug: {server_name}.{original_tool_name}",
368
+ tool_name=original_tool_name,
369
+ tool_id=tool_call_id,
370
+ server_label=server_name,
371
+ status="starting",
372
+ )
359
373
 
360
- result = await self._execute_mcp_tool(
361
- fc.name, fc.args, tool_contexts
362
- )
374
+ result = await self._execute_mcp_tool(fc.name, fc.args, tool_contexts)
363
375
 
364
- # Yield TOOL_RESULT chunk with preview
365
- preview = (
366
- result[:TOOL_RESULT_PREVIEW_LENGTH]
367
- if len(result) > TOOL_RESULT_PREVIEW_LENGTH
368
- else result
369
- )
370
- yield self._chunk_factory.tool_result(
371
- preview,
372
- tool_id=tool_call_id,
373
- status="completed",
374
- )
376
+ # Yield TOOL_RESULT chunk with preview
377
+ preview = (
378
+ result[:TOOL_RESULT_PREVIEW_LENGTH]
379
+ if len(result) > TOOL_RESULT_PREVIEW_LENGTH
380
+ else result
381
+ )
382
+ yield self.chunk_factory.tool_result(
383
+ preview,
384
+ tool_id=tool_call_id,
385
+ status="completed",
386
+ )
375
387
 
376
- function_responses.append(
377
- types.Part(
378
- function_response=types.FunctionResponse(
379
- name=fc.name,
380
- response={"result": result},
381
- )
388
+ function_responses.append(
389
+ types.Part(
390
+ function_response=types.FunctionResponse(
391
+ name=fc.name,
392
+ response={"result": result},
382
393
  )
383
394
  )
384
- logger.debug(
385
- "Tool %s executed, result length: %d",
386
- fc.name,
387
- len(str(result)),
388
- )
389
-
390
- # Add function responses
391
- current_contents.append(
392
- types.Content(role="user", parts=function_responses)
395
+ )
396
+ logger.debug(
397
+ "Tool %s executed, result length: %d",
398
+ fc.name,
399
+ len(str(result)),
393
400
  )
394
401
 
395
- # Continue to next round
396
- continue
402
+ # Add function responses
403
+ current_contents.append(
404
+ types.Content(role="user", parts=function_responses)
405
+ )
397
406
 
398
- # No function calls - yield text response and finish
399
- if text := self._extract_text_from_parts(content.parts):
400
- yield self._chunk_factory.text(text, delta=text)
401
- return
407
+ # Continue to next round
402
408
 
403
409
  logger.warning("Max tool rounds (%d) exceeded", max_tool_rounds)
404
410
 
@@ -668,5 +674,5 @@ class GeminiResponsesProcessor(ProcessorBase, MCPCapabilities):
668
674
  return None
669
675
 
670
676
  if text := self._extract_text_from_parts(chunk.candidates[0].content.parts):
671
- return self._chunk_factory.text(text, delta=text)
677
+ return self.chunk_factory.text(text, delta=text)
672
678
  return None
@@ -69,7 +69,7 @@ class ThreadListState(rx.State):
69
69
  async def initialize(self) -> AsyncGenerator[Any, Any]:
70
70
  """Initialize thread list - load summaries from database."""
71
71
  async with self:
72
- if self._initialized:
72
+ if self._initialized or self.loading:
73
73
  return
74
74
  self.loading = True
75
75
  yield
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: appkit-assistant
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/jenreh/appkit
6
6
  Project-URL: Documentation, https://github.com/jenreh/appkit/tree/main/docs
@@ -12,7 +12,7 @@ appkit_assistant/backend/models/openai.py,sha256=VZ9bxOZFM4VVALdx1yfV9kIwD9N1fAA
12
12
  appkit_assistant/backend/models/perplexity.py,sha256=16-pYghTIJdXAghHozVfpv_hHn2T8mVIC-2E3bhC0bQ,1237
13
13
  appkit_assistant/backend/processors/__init__.py,sha256=DqN8cgPNUNCkCzwqeuYIlwj_S9Nh-t4K4sm3KkyJx0M,1053
14
14
  appkit_assistant/backend/processors/claude_responses_processor.py,sha256=PfA9KRVcMxOpDd8AXYHWU_YZYSByymB1-r-SAYTPEHY,27430
15
- appkit_assistant/backend/processors/gemini_responses_processor.py,sha256=IfBH3n8VzSXT4biKsj4hR2TfItyIPhJXwUGBGmCf3Ag,24654
15
+ appkit_assistant/backend/processors/gemini_responses_processor.py,sha256=wuhQNomj8_qnQ7_ZRMk_nAsNCkAx21mkDIziyzKuCuE,25210
16
16
  appkit_assistant/backend/processors/lorem_ipsum_processor.py,sha256=iZLVCuYPb_lBG-p3Ug2QvuL28kEhtwhWL2Yy_WiYbrU,5201
17
17
  appkit_assistant/backend/processors/mcp_mixin.py,sha256=Uj60p21GXAeNSmcfwMhUOuaWwGfr2ssAKSDjGwFMwls,9824
18
18
  appkit_assistant/backend/processors/openai_base.py,sha256=hLg1uIlrcfQjsewQhBKg8_1kjnk1-Pc9Y1KVYUe8_BA,2348
@@ -51,8 +51,8 @@ appkit_assistant/state/file_manager_state.py,sha256=G0ZkgzFIqa6H2ctc5uID2aSxt-e7
51
51
  appkit_assistant/state/mcp_oauth_state.py,sha256=vWiCWolRY-sSUJGPEGHS-rUwlpomGKfpejZck9EImas,7703
52
52
  appkit_assistant/state/mcp_server_state.py,sha256=TQOhnXEfuA5bWFh-5f5R5LfTZErXFXNZa4t0_vP1bGM,13174
53
53
  appkit_assistant/state/system_prompt_state.py,sha256=E2jbBIGfgifvJRZFmEmeooWv5xihUfPbhFe8MzZAS0E,7714
54
- appkit_assistant/state/thread_list_state.py,sha256=u26A5FOliTu7sBZVW3vQbO4uC2V0ss1D2DGKqrF8JDg,14066
54
+ appkit_assistant/state/thread_list_state.py,sha256=LQs7sjBTszORhr_Czm4mZxWVjb2vO7Z5UutMOX6ansM,14082
55
55
  appkit_assistant/state/thread_state.py,sha256=udIJWCORMuASIOZMnZidoyC6Te-qyDBkZfi7MOIHmKs,42937
56
- appkit_assistant-1.0.0.dist-info/METADATA,sha256=uhumVvQjPltU8QMKA0Dcf0in0iR0auGv1NduhnrQpwY,9574
57
- appkit_assistant-1.0.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
58
- appkit_assistant-1.0.0.dist-info/RECORD,,
56
+ appkit_assistant-1.0.2.dist-info/METADATA,sha256=ZuRH9ilUPky1H7-SHFyzCUTxa2_coqaAfkiCQz4iYMo,9574
57
+ appkit_assistant-1.0.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
58
+ appkit_assistant-1.0.2.dist-info/RECORD,,