dandy 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. dandy-0.0.3/CHANGELOG.md +30 -0
  2. dandy-0.0.3/CONTRIBUTORS.md +0 -0
  3. dandy-0.0.3/LICENSE.md +22 -0
  4. dandy-0.0.3/MANIFEST.in +9 -0
  5. dandy-0.0.3/PKG-INFO +21 -0
  6. dandy-0.0.3/README.md +11 -0
  7. dandy-0.0.3/dandy/__init__.py +1 -0
  8. dandy-0.0.3/dandy/__pycache__/__init__.cpython-311.pyc +0 -0
  9. dandy-0.0.3/dandy/bot/__init__.py +2 -0
  10. dandy-0.0.3/dandy/bot/bot.py +8 -0
  11. dandy-0.0.3/dandy/bot/exceptions.py +2 -0
  12. dandy-0.0.3/dandy/bot/llm_bot.py +30 -0
  13. dandy-0.0.3/dandy/contrib/__init__.py +0 -0
  14. dandy-0.0.3/dandy/contrib/bots/__init__.py +1 -0
  15. dandy-0.0.3/dandy/contrib/bots/choice_llm_bot.py +148 -0
  16. dandy-0.0.3/dandy/core/__init__.py +0 -0
  17. dandy-0.0.3/dandy/core/exceptions.py +2 -0
  18. dandy-0.0.3/dandy/core/singleton.py +7 -0
  19. dandy-0.0.3/dandy/core/type_vars.py +6 -0
  20. dandy-0.0.3/dandy/core/url.py +38 -0
  21. dandy-0.0.3/dandy/handler/__init__.py +0 -0
  22. dandy-0.0.3/dandy/handler/handler.py +9 -0
  23. dandy-0.0.3/dandy/llm/__init__.py +0 -0
  24. dandy-0.0.3/dandy/llm/config.py +95 -0
  25. dandy-0.0.3/dandy/llm/exceptions.py +12 -0
  26. dandy-0.0.3/dandy/llm/prompt/__init__.py +2 -0
  27. dandy-0.0.3/dandy/llm/prompt/prompt.py +208 -0
  28. dandy-0.0.3/dandy/llm/prompt/snippet.py +125 -0
  29. dandy-0.0.3/dandy/llm/prompt/tests/__init__.py +0 -0
  30. dandy-0.0.3/dandy/llm/prompt/tests/test_prompt.py +18 -0
  31. dandy-0.0.3/dandy/llm/service/__init__.py +1 -0
  32. dandy-0.0.3/dandy/llm/service/messages.py +6 -0
  33. dandy-0.0.3/dandy/llm/service/prompts.py +48 -0
  34. dandy-0.0.3/dandy/llm/service/request.py +18 -0
  35. dandy-0.0.3/dandy/llm/service/service.py +123 -0
  36. dandy-0.0.3/dandy/llm/service/tests/__init__.py +0 -0
  37. dandy-0.0.3/dandy/llm/service/tests/test_service.py +26 -0
  38. dandy-0.0.3/dandy/llm/tests/__init__.py +0 -0
  39. dandy-0.0.3/dandy/llm/tests/configs.py +18 -0
  40. dandy-0.0.3/dandy/llm/tests/models.py +18 -0
  41. dandy-0.0.3/dandy/llm/tests/prompts.py +8 -0
  42. dandy-0.0.3/dandy/llm/utils.py +42 -0
  43. dandy-0.0.3/dandy/workflow/__init__.py +0 -0
  44. dandy-0.0.3/dandy/workflow/exceptions.py +5 -0
  45. dandy-0.0.3/dandy/workflow/workflow.py +9 -0
  46. dandy-0.0.3/dandy.egg-info/PKG-INFO +21 -0
  47. dandy-0.0.3/dandy.egg-info/SOURCES.txt +63 -0
  48. dandy-0.0.3/dandy.egg-info/dependency_links.txt +1 -0
  49. dandy-0.0.3/dandy.egg-info/not-zip-safe +1 -0
  50. dandy-0.0.3/dandy.egg-info/requires.txt +1 -0
  51. dandy-0.0.3/dandy.egg-info/top_level.txt +2 -0
  52. dandy-0.0.3/pyproject.toml +6 -0
  53. dandy-0.0.3/setup.cfg +7 -0
  54. dandy-0.0.3/setup.py +32 -0
  55. dandy-0.0.3/tests/__init__.py +0 -0
  56. dandy-0.0.3/tests/bots/__init__.py +0 -0
  57. dandy-0.0.3/tests/bots/existing_work_orders_bot.py +10 -0
  58. dandy-0.0.3/tests/bots/work_order_comparison_bot.py +33 -0
  59. dandy-0.0.3/tests/factories.py +41 -0
  60. dandy-0.0.3/tests/models/__init__.py +0 -0
  61. dandy-0.0.3/tests/models/work_order_models.py +13 -0
  62. dandy-0.0.3/tests/test_dandy.py +12 -0
  63. dandy-0.0.3/tests/workflows/__init__.py +0 -0
  64. dandy-0.0.3/tests/workflows/find_similar_work_orders_workflows.py +6 -0
@@ -0,0 +1,30 @@
1
+ # Changelog
2
+
3
+ # v0.0.2
4
+
5
+ ### Features
6
+ - LLM Service Retries
7
+ - LLM Prompt Validation Retries
8
+ - Single and Multiple Choice LLM Bots added into contributions
9
+ - Custom Exceptions that are based on the DandyException
10
+ - Much improved testing on base LLM service
11
+
12
+ # v0.0.1
13
+
14
+ ### Features
15
+ - LLM Service
16
+ - LLM Prompts
17
+ - Bot
18
+ - LlmBot
19
+ - Workflow
20
+
21
+ # v0.0.0
22
+
23
+ ### Features
24
+ - Initial Release
25
+
26
+ ### Changes
27
+ - Initial Release
28
+
29
+ ### Fixes
30
+ - Initial Release
File without changes
dandy-0.0.3/LICENSE.md ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2024 Stratus Advanced Technologies and Contributors.
2
+
3
+ Permission is hereby granted, free of charge, to any person
4
+ obtaining a copy of this software and associated documentation
5
+ files (the "Software"), to deal in the Software without
6
+ restriction, including without limitation the rights to use,
7
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ copies of the Software, and to permit persons to whom the
9
+ Software is furnished to do so, subject to the following
10
+ conditions:
11
+
12
+ The above copyright notice and this permission notice shall be
13
+ included in all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
+ OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,9 @@
1
+ include README.md
2
+ include LICENSE.md
3
+ include CHANGELOG.md
4
+ include CONTRIBUTORS.md
5
+ recursive-include dandy *
6
+ exclude *.db
7
+ recursive-exclude tests
8
+ recursive-exclude docs
9
+ recursive-exclude .github
dandy-0.0.3/PKG-INFO ADDED
@@ -0,0 +1,21 @@
1
+ Metadata-Version: 2.1
2
+ Name: dandy
3
+ Version: 0.0.3
4
+ Summary: Intelligence Bot Framework
5
+ Home-page: https://github.com/stratusadv/dandy
6
+ Author: Nathan Johnson
7
+ Author-email: info@stratusadv.com
8
+ License: MIT
9
+ Keywords: dandy,ai,llm,agent,prompt,gpt
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
18
+ Requires-Python: >=3.11
19
+ License-File: LICENSE.md
20
+
21
+ Dandy is a framework for developing programmatic intelligent bots and workflows. It's opinionated, magical, and designed to be incredibly pythonic.
dandy-0.0.3/README.md ADDED
@@ -0,0 +1,11 @@
1
+ <p align="center">
2
+ <img src="./docs/images/dandy_logo_512.png" alt="Dandy AI Framework">
3
+ </p>
4
+
5
+ ## What
6
+
7
+ Dandy is a framework for developing programmatic intelligent bots and workflows. It's opinionated, magical, and designed to be incredibly pythonic.
8
+
9
+ ## Why
10
+
11
+ In the pursuit of delivering incredible outcomes to our client we felt we needed a framework that could handle the demands of the future when it comes to artificial intelligence.
@@ -0,0 +1 @@
1
+ __version__ = "0.0.3"
@@ -0,0 +1,2 @@
1
+ from .bot import Bot
2
+ from .llm_bot import LlmBot
@@ -0,0 +1,8 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any
3
+
4
+ from dandy.handler.handler import Handler
5
+
6
+
7
+ class Bot(Handler, ABC):
8
+ ...
@@ -0,0 +1,2 @@
1
+ class BotException(Exception):
2
+ pass
@@ -0,0 +1,30 @@
1
+ from abc import ABC
2
+ from typing import Type
3
+
4
+ from dandy.bot.bot import Bot
5
+ from dandy.core.type_vars import ModelType
6
+ from dandy.llm.config import LlmConfig
7
+ from dandy.llm.prompt import Prompt
8
+
9
+
10
+ class LlmBot(Bot, ABC):
11
+ role_prompt: Prompt
12
+ instructions_prompt: Prompt
13
+ llm_config: LlmConfig
14
+
15
+ @classmethod
16
+ def process_prompt_to_model_object(
17
+ cls,
18
+ prompt: Prompt,
19
+ model: Type[ModelType],
20
+ ) -> ModelType:
21
+
22
+ return cls.llm_config.service.process_prompt_to_model_object(
23
+ prompt=prompt,
24
+ model=model,
25
+ prefix_system_prompt=(
26
+ Prompt()
27
+ .prompt(cls.role_prompt)
28
+ .prompt(cls.instructions_prompt)
29
+ )
30
+ )
File without changes
@@ -0,0 +1 @@
1
+ from dandy.contrib.bots.choice_llm_bot import SingleChoiceLlmBot, MultipleChoiceLlmBot
@@ -0,0 +1,148 @@
1
+ from abc import ABC
2
+ from enum import Enum
3
+ from typing import Tuple, List, Union, overload, Type
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from dandy.bot import LlmBot
8
+ from dandy.bot.exceptions import BotException
9
+ from dandy.llm.prompt import Prompt
10
+
11
+
12
+ NO_CHOICE_FOUND_RESPONSE = 'no-choice-match-found'
13
+
14
+
15
+ class SingleChoiceResponse(BaseModel):
16
+ selected_choice: str
17
+
18
+
19
+ class MultipleChoiceResponse(BaseModel):
20
+ selected_choices: List[str]
21
+
22
+
23
+ class _ChoiceLlmBot(LlmBot, ABC):
24
+ role_prompt = (
25
+ Prompt()
26
+ .text('You are an choice bot.')
27
+ )
28
+
29
+ @classmethod
30
+ def process(
31
+ cls,
32
+ user_input: str,
33
+ choices: Union[Type[Enum], List[str], Tuple[str]],
34
+ choice_response_model: Union[Type[SingleChoiceResponse], Type[MultipleChoiceResponse]]
35
+ ) -> Union[SingleChoiceResponse, MultipleChoiceResponse]:
36
+
37
+ prompt = (
38
+ Prompt()
39
+ .text('This is the user input:')
40
+ .text(user_input, triple_quote=True)
41
+ .text('These are the choices:')
42
+ )
43
+
44
+ if isinstance(choices, type) and issubclass(choices, Enum):
45
+ prompt.unordered_random_list([choice.value for choice in choices], triple_quote=True)
46
+ elif isinstance(choices, (list, tuple)):
47
+ prompt.unordered_random_list(choices, triple_quote=True)
48
+ else:
49
+ raise BotException('Choices must be an Enum, a list or a tuple.')
50
+
51
+ return cls.process_prompt_to_model_object(
52
+ prompt=prompt,
53
+ model=choice_response_model
54
+ )
55
+
56
+
57
+ class _ChoiceOverloadMixin:
58
+ @classmethod
59
+ @overload
60
+ def process(
61
+ cls,
62
+ user_input: str,
63
+ choices: Union[List[str], Tuple[str]],
64
+ choice_response_model: Type[BaseModel]
65
+ ) -> Union[str, List[str], None]:
66
+ ...
67
+
68
+ @classmethod
69
+ @overload
70
+ def process(
71
+ cls,
72
+ user_input: str,
73
+ choices: Type[Enum],
74
+ choice_response_model: Type[BaseModel]
75
+ ) -> Union[Enum, List[Enum], None]:
76
+ ...
77
+
78
+ @classmethod
79
+ def process(
80
+ cls,
81
+ user_input: str,
82
+ choices: Union[Type[Enum], List[str], Tuple[str]],
83
+ **kwargs
84
+ ) -> Union[Enum, List[Enum], str, List[str], None]:
85
+ ...
86
+
87
+
88
+ class SingleChoiceLlmBot(_ChoiceLlmBot, _ChoiceOverloadMixin):
89
+ instructions_prompt = (
90
+ Prompt()
91
+ .text('Your job is to identify the intent of the user input and match it to the provided choices.')
92
+ .text(f'If there is no good matches in the choices reply with value "{NO_CHOICE_FOUND_RESPONSE}".')
93
+ )
94
+
95
+ @classmethod
96
+ def process(
97
+ cls,
98
+ user_input: str,
99
+ choices: Union[Type[Enum], List[str], Tuple[str]],
100
+ **kwargs
101
+ ) -> Union[Enum, str, None]:
102
+
103
+ choice_response = super().process(
104
+ user_input=user_input,
105
+ choices=choices,
106
+ choice_response_model=SingleChoiceResponse,
107
+ )
108
+
109
+ selected_choice = choice_response.selected_choice
110
+ if selected_choice == NO_CHOICE_FOUND_RESPONSE:
111
+ return None
112
+ else:
113
+ if isinstance(choices, type) and issubclass(choices, Enum):
114
+ return choices(selected_choice)
115
+ else:
116
+ return selected_choice
117
+
118
+
119
+ class MultipleChoiceLlmBot(_ChoiceLlmBot, _ChoiceOverloadMixin):
120
+ instructions_prompt = (
121
+ Prompt()
122
+ .text('Your job is to identify the intent of the user input and match it to the provided choices.')
123
+ .text('Return as many choices as you see relevant to the user input.')
124
+ .text(f'If there is no good matches in the choices reply with value "{NO_CHOICE_FOUND_RESPONSE}".')
125
+ )
126
+
127
+ @classmethod
128
+ def process(
129
+ cls,
130
+ user_input: str,
131
+ choices: Union[Type[Enum], List[str], Tuple[str]],
132
+ **kwargs
133
+ ) -> Union[List[Enum], List[str], None]:
134
+
135
+ choice_response = super().process(
136
+ user_input=user_input,
137
+ choices=choices,
138
+ choice_response_model=MultipleChoiceResponse
139
+ )
140
+
141
+ select_choices = choice_response.selected_choices
142
+ if NO_CHOICE_FOUND_RESPONSE in select_choices:
143
+ return None
144
+ else:
145
+ if isinstance(choices, type) and issubclass(choices, Enum):
146
+ return [choices(choice) for choice in select_choices]
147
+ else:
148
+ return select_choices
File without changes
@@ -0,0 +1,2 @@
1
+ class DandyException(Exception):
2
+ pass
@@ -0,0 +1,7 @@
1
+ class Singleton:
2
+ _instance = None
3
+
4
+ def __new__(cls, *args, **kwargs):
5
+ if cls._instance is None:
6
+ cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
7
+ return cls._instance
@@ -0,0 +1,6 @@
1
+ from typing import TypeVar
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ ModelType = TypeVar('ModelType', bound=BaseModel)
@@ -0,0 +1,38 @@
1
+ from dataclasses import dataclass
2
+ from typing import List, Dict
3
+ from urllib.parse import urlencode, urlparse, ParseResult, quote
4
+
5
+
6
+ @dataclass(kw_only=True)
7
+ class Url:
8
+ host: str
9
+ path_parameters: List[str] = list
10
+ query_parameters: Dict[str, str] = dict
11
+
12
+
13
+ @property
14
+ def parsed_url(self) -> ParseResult:
15
+ return urlparse(self.host)
16
+
17
+ @property
18
+ def path(self) -> str:
19
+ return self.path_parameters_to_str + self.query_parameters_to_str
20
+
21
+ @property
22
+ def is_https(self) -> bool:
23
+ return self.parsed_url.scheme == 'https'
24
+
25
+ @property
26
+ def path_parameters_to_str(self) -> str:
27
+ if self.path_parameters:
28
+ return '/' + '/'.join([quote(parameter) for parameter in self.path_parameters])
29
+
30
+ return ''
31
+
32
+ @property
33
+ def query_parameters_to_str(self) -> str:
34
+ if self.query_parameters:
35
+ query = urlencode(self.query_parameters)
36
+ return '?' + query
37
+
38
+ return ''
File without changes
@@ -0,0 +1,9 @@
1
+ from abc import abstractmethod, ABC
2
+ from typing import Any
3
+
4
+
5
+ class Handler(ABC):
6
+ @classmethod
7
+ @abstractmethod
8
+ def process(cls, **kwargs: Any) -> Any:
9
+ ...
File without changes
@@ -0,0 +1,95 @@
1
+ from abc import abstractmethod
2
+ from typing import Optional, List
3
+
4
+ from dandy.core.url import Url
5
+ from dandy.llm.service import Service
6
+
7
+
8
+ class LlmConfig:
9
+ def __init__(
10
+ self,
11
+ host: str,
12
+ port: int,
13
+ model: str,
14
+ path_parameters: Optional[List[str]] = None,
15
+ query_parameters: Optional[dict] = None,
16
+ headers: Optional[dict] = None,
17
+ api_key: Optional[str] = None,
18
+ retry_count: int = 10,
19
+ ):
20
+ if headers is None:
21
+ headers = {
22
+ "Content-Type": "application/json",
23
+ "Accept": "application/json",
24
+ }
25
+
26
+ if api_key is not None:
27
+ headers["Authorization"] = f"Bearer {api_key}"
28
+
29
+ self.url=Url(
30
+ host=host,
31
+ path_parameters=path_parameters,
32
+ query_parameters=query_parameters,
33
+ )
34
+ self.port=port
35
+ self.model=model
36
+ self.headers=headers
37
+
38
+ self.retry_count = retry_count
39
+
40
+ @property
41
+ def service(self):
42
+ return Service(self)
43
+
44
+ @abstractmethod
45
+ def get_response_content(self, response) -> str:
46
+ ...
47
+
48
+
49
+ class OllamaLlmConfig(LlmConfig):
50
+ def __init__(
51
+ self,
52
+ host: str,
53
+ port: int,
54
+ model: str,
55
+ api_key: Optional[str] = None,
56
+ ):
57
+ super().__init__(
58
+ host=host,
59
+ port=port,
60
+ model=model,
61
+ path_parameters=[
62
+ 'api',
63
+ 'chat',
64
+ ],
65
+ api_key=api_key,
66
+ )
67
+
68
+ def get_response_content(self, response) -> str:
69
+ return response['message']['content']
70
+
71
+
72
+ class OpenaiLlmConfig(LlmConfig):
73
+ def __init__(
74
+ self,
75
+ host: str,
76
+ port: int,
77
+ model: str,
78
+ api_key: Optional[str] = None,
79
+ ):
80
+ super().__init__(
81
+ host=host,
82
+ port=port,
83
+ model=model,
84
+ path_parameters=[
85
+ 'v1',
86
+ 'chat',
87
+ 'completions',
88
+ ],
89
+ api_key=api_key,
90
+ )
91
+
92
+ def get_response_content(self, response) -> str:
93
+ return response['choices'][0]['message']['content']
94
+
95
+
@@ -0,0 +1,12 @@
1
+ from typing import List
2
+
3
+ from dandy.core.exceptions import DandyException
4
+
5
+
6
+ class LlmException(DandyException):
7
+ pass
8
+
9
+
10
+ class LlmValidationException(LlmException):
11
+ def __init__(self, name: str, choices: List[str]):
12
+ super().__init__(f'Did not get a valid response format from llm service')
@@ -0,0 +1,2 @@
1
+ from dandy.llm.prompt.prompt import Prompt
2
+ from dandy.llm.prompt.snippet import Snippet