appkit-assistant 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,123 @@
1
+ """
2
+ Lorem Ipsum processor for generating random text responses.
3
+ """
4
+
5
+ import asyncio
6
+ import logging
7
+ import random
8
+ from collections.abc import AsyncGenerator
9
+
10
+ from appkit_assistant.backend.models import (
11
+ AIModel,
12
+ Chunk,
13
+ ChunkType,
14
+ MCPServer,
15
+ Message,
16
+ )
17
+ from appkit_assistant.backend.processor import Processor
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # List of Lorem Ipsum paragraphs for random selection
22
+ LOREM_PARAGRAPHS = [
23
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", # noqa: E501
24
+ "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.", # noqa: E501
25
+ "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.", # noqa: E501
26
+ "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", # noqa: E501
27
+ "Integer posuere erat a ante venenatis dapibus posuere velit aliquet.",
28
+ "Cras mattis consectetur purus sit amet fermentum. Nullam quis risus eget urna mollis ornare vel eu leo.", # noqa: E501
29
+ "Donec sed odio dui. Maecenas faucibus mollis interdum. Cras justo odio, dapibus ac facilisis in, egestas eget quam.", # noqa: E501
30
+ "Vestibulum id ligula porta felis euismod semper. Lorem ipsum dolor sit amet, consectetur adipiscing elit.", # noqa: E501
31
+ ]
32
+
33
+ # Models supported by this processor as a dictionary with model ID as the key
34
+ LOREM_MODELS = {
35
+ "lorem-short": AIModel(
36
+ id="lorem-short",
37
+ text="Lorem Ipsum",
38
+ icon="codesandbox",
39
+ model="lorem-short",
40
+ stream=True,
41
+ )
42
+ }
43
+
44
+
45
+ class LoremIpsumProcessor(Processor):
46
+ """Processor that generates Lorem Ipsum text responses."""
47
+
48
+ def __init__(self, models: dict[str, AIModel] = LOREM_MODELS) -> None:
49
+ """Initialize the Lorem Ipsum processor."""
50
+ self.models = models
51
+ logger.debug("Lorem Ipsum processor initialized")
52
+
53
+ async def process(
54
+ self,
55
+ messages: list[Message], # noqa: ARG002
56
+ model_id: str,
57
+ files: list[str] | None = None, # noqa: ARG002
58
+ mcp_servers: list[MCPServer] | None = None, # noqa: ARG002
59
+ ) -> AsyncGenerator[Chunk, None]:
60
+ """
61
+ Generate a Lorem Ipsum response of varying lengths based on the model_id.
62
+
63
+ Args:
64
+ messages: List of messages (ignored for this processor).
65
+ model_id: The model ID (determines response length).
66
+ files: Optional list of files (ignored for this processor).
67
+ mcp_servers: Optional list of MCP servers (ignored for this processor).
68
+
69
+ Returns:
70
+ An async generator that yields Chunk objects with text content.
71
+ """
72
+ if model_id not in self.models:
73
+ raise ValueError(f"Model {model_id} not supported by Lorem Ipsum processor")
74
+
75
+ # Simulate thinking process
76
+ yield Chunk(
77
+ type=ChunkType.THINKING,
78
+ text="I think i need to generate Lorem Ipsum content...",
79
+ chunk_metadata={"source": "lorem_ipsum", "model": model_id},
80
+ )
81
+ await asyncio.sleep(0.5)
82
+
83
+ num_paragraphs = random.randint(4, 8) # noqa: S311
84
+ for i in range(num_paragraphs):
85
+ paragraph = random.choice(LOREM_PARAGRAPHS) # noqa: S311
86
+ words = paragraph.split()
87
+ for word in words:
88
+ content = word + " "
89
+ await asyncio.sleep(0.01)
90
+ yield Chunk(
91
+ type=ChunkType.TEXT,
92
+ text=content,
93
+ chunk_metadata={
94
+ "source": "lorem_ipsum",
95
+ "paragraph": str(i + 1),
96
+ "total_paragraphs": str(num_paragraphs),
97
+ },
98
+ )
99
+
100
+ if i < num_paragraphs - 1:
101
+ yield Chunk(
102
+ type=ChunkType.TEXT,
103
+ text="\n\n",
104
+ chunk_metadata={
105
+ "source": "lorem_ipsum",
106
+ "type": "paragraph_separator",
107
+ },
108
+ )
109
+
110
+ yield Chunk(
111
+ type=ChunkType.THINKING,
112
+ text="So, generated enough Lorem Ipsum for you!",
113
+ chunk_metadata={"source": "lorem_ipsum", "model": model_id},
114
+ )
115
+
116
+ def get_supported_models(self) -> dict[str, AIModel]:
117
+ """
118
+ Get dictionary of supported models.
119
+
120
+ Returns:
121
+ Dictionary mapping model IDs to AIModel objects.
122
+ """
123
+ return self.models
@@ -0,0 +1,73 @@
1
+ """
2
+ OpenAI processor for generating AI responses using OpenAI's API.
3
+ """
4
+
5
+ import logging
6
+ from abc import ABC, abstractmethod
7
+ from collections.abc import AsyncGenerator
8
+ from typing import Any
9
+
10
+ from openai import AsyncAzureOpenAI, AsyncOpenAI
11
+
12
+ from appkit_assistant.backend.models import (
13
+ AIModel,
14
+ Chunk,
15
+ MCPServer,
16
+ Message,
17
+ )
18
+ from appkit_assistant.backend.processor import Processor
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class BaseOpenAIProcessor(Processor, ABC):
24
+ """Base class for OpenAI processors with common initialization and utilities."""
25
+
26
+ def __init__(
27
+ self,
28
+ models: dict[str, AIModel],
29
+ api_key: str | None = None,
30
+ base_url: str | None = None,
31
+ is_azure: bool = False,
32
+ ) -> None:
33
+ """Initialize the base OpenAI processor.
34
+
35
+ Args:
36
+ models: Dictionary of supported AI models
37
+ api_key: API key for OpenAI/Azure OpenAI
38
+ base_url: Base URL for the API
39
+ is_azure: Whether to use Azure OpenAI client
40
+ """
41
+ self.api_key = api_key
42
+ self.base_url = base_url
43
+ self.models = models
44
+ self.is_azure = is_azure
45
+ self.client = None
46
+
47
+ if self.api_key and self.base_url and is_azure:
48
+ self.client = AsyncAzureOpenAI(
49
+ api_key=self.api_key,
50
+ azure_endpoint=self.base_url,
51
+ api_version="2025-04-01-preview",
52
+ )
53
+ elif self.api_key and self.base_url:
54
+ self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
55
+ elif self.api_key:
56
+ self.client = AsyncOpenAI(api_key=self.api_key)
57
+ else:
58
+ logger.warning("No API key found. Processor will not work.")
59
+
60
+ @abstractmethod
61
+ async def process(
62
+ self,
63
+ messages: list[Message],
64
+ model_id: str,
65
+ files: list[str] | None = None,
66
+ mcp_servers: list[MCPServer] | None = None,
67
+ payload: dict[str, Any] | None = None,
68
+ ) -> AsyncGenerator[Chunk, None]:
69
+ """Process messages and generate AI response chunks."""
70
+
71
+ def get_supported_models(self) -> dict[str, AIModel]:
72
+ """Return supported models if API key is available."""
73
+ return self.models if self.api_key else {}
@@ -0,0 +1,117 @@
1
+ import logging
2
+ from collections.abc import AsyncGenerator
3
+ from typing import Any
4
+
5
+ from openai import AsyncStream
6
+ from openai.types.chat import ChatCompletionMessageParam
7
+
8
+ from appkit_assistant.backend.models import (
9
+ Chunk,
10
+ ChunkType,
11
+ MCPServer,
12
+ Message,
13
+ MessageType,
14
+ )
15
+ from appkit_assistant.backend.processors.openai_base import BaseOpenAIProcessor
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class OpenAIChatCompletionsProcessor(BaseOpenAIProcessor):
21
+ """Processor that generates responses using OpenAI's Chat Completions API."""
22
+
23
+ async def process(
24
+ self,
25
+ messages: list[Message],
26
+ model_id: str,
27
+ files: list[str] | None = None, # noqa: ARG002
28
+ mcp_servers: list[MCPServer] | None = None,
29
+ payload: dict[str, Any] | None = None,
30
+ ) -> AsyncGenerator[Chunk, None]:
31
+ """Process messages using the Chat Completions API.
32
+
33
+ Args:
34
+ messages: List of messages to process
35
+ model_id: ID of the model to use
36
+ files: File attachments (not used in chat completions)
37
+ mcp_servers: MCP servers (will log warning if provided)
38
+ payload: Additional payload parameters
39
+ """
40
+ if not self.client:
41
+ raise ValueError("OpenAI Client not initialized.")
42
+
43
+ if model_id not in self.models:
44
+ raise ValueError(f"Model {model_id} not supported by OpenAI processor")
45
+
46
+ if mcp_servers:
47
+ logger.warning(
48
+ "MCP servers provided to ChatCompletionsProcessor but not supported. "
49
+ "Use OpenAIResponsesProcessor for MCP functionality."
50
+ )
51
+
52
+ model = self.models[model_id]
53
+
54
+ try:
55
+ chat_messages = self._convert_messages_to_openai_format(messages)
56
+ session = await self.client.chat.completions.create(
57
+ model=model.model,
58
+ messages=chat_messages[:-1],
59
+ stream=model.stream,
60
+ temperature=model.temperature,
61
+ extra_body=payload,
62
+ )
63
+
64
+ if isinstance(session, AsyncStream):
65
+ async for event in session:
66
+ if event.choices and event.choices[0].delta:
67
+ content = event.choices[0].delta.content
68
+ if content:
69
+ yield self._create_chunk(content, model.model, stream=True)
70
+ else:
71
+ content = session.choices[0].message.content
72
+ if content:
73
+ yield self._create_chunk(content, model.model)
74
+ except Exception as e:
75
+ raise e
76
+
77
+ def _create_chunk(self, content: str, model: str, stream: bool = False) -> Chunk:
78
+ return Chunk(
79
+ type=ChunkType.TEXT,
80
+ text=content,
81
+ chunk_metadata={
82
+ "source": "chat_completions",
83
+ "streaming": str(stream),
84
+ "model": model,
85
+ },
86
+ )
87
+
88
+ def _convert_messages_to_openai_format(
89
+ self, messages: list[Message]
90
+ ) -> list[ChatCompletionMessageParam]:
91
+ """Convert internal messages to OpenAI chat completion format.
92
+
93
+ Notes:
94
+ - OpenAI Chat Completions requires that after any system messages,
95
+ user/tool messages must alternate with assistant messages. To
96
+ ensure this, merge consecutive user (human) or assistant messages
97
+ into a single message by concatenating their text with a blank
98
+ line separator.
99
+ """
100
+ formatted: list[ChatCompletionMessageParam] = []
101
+ role_map = {
102
+ MessageType.HUMAN: "user",
103
+ MessageType.SYSTEM: "system",
104
+ MessageType.ASSISTANT: "assistant",
105
+ }
106
+
107
+ for msg in messages or []:
108
+ if msg.type not in role_map:
109
+ continue
110
+ role = role_map[msg.type]
111
+ if formatted and role != "system" and formatted[-1]["role"] == role:
112
+ # Merge consecutive user/assistant messages
113
+ formatted[-1]["content"] = formatted[-1]["content"] + "\n\n" + msg.text
114
+ else:
115
+ formatted.append({"role": role, "content": msg.text})
116
+
117
+ return formatted