primfunctions 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.4
2
+ Name: primfunctions
3
+ Version: 0.1.0
4
+ Summary: Prim AI Functions
5
+ Author-email: Prim AI <derek@primai.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: black>=23.3.0
9
+
10
+ # prim-functions
11
+
12
+ A comprehensive SDK for interacting with the Prim AI Functions.
13
+
14
+ ## Table of Contents
15
+ - [Prerequisites](#prerequisites)
16
+ - [Installation](#installation)
17
+ - [Development Setup](#development-setup)
18
+
19
+ ## Prerequisites
20
+
21
+ - Python 3.12+
22
+ - [uv](https://docs.astral.sh/uv/)
23
+
24
+ If you do not have a uv virtual environment, you can create one with:
25
+ ```bash
26
+ uv venv --python 3.12
27
+ ```
28
+
29
+ **Note:** We recommend using uv to install the package due to the incredible speed of the package manager. The package can still be installed via pip directly, but it will be slower.
30
+
31
+ ## Installation
32
+
33
+ ### From PyPI
34
+
35
+ ```bash
36
+ pip install primfunctions
37
+ ```
38
+
39
+ ### From Source
40
+
41
+ ```bash
42
+ uv pip install .
43
+ ```
44
+
45
+ ## Development Setup
46
+
47
+ ### Git Hooks
48
+ This project includes git hooks to ensure code quality. To set up the git hooks:
49
+
50
+ ```bash
51
+ python setup_hooks.py
52
+ ```
53
+
54
+ This will create a pre-commit hook that automatically runs black on Python files before each commit.
@@ -0,0 +1,45 @@
1
+ # prim-functions
2
+
3
+ A comprehensive SDK for interacting with the Prim AI Functions.
4
+
5
+ ## Table of Contents
6
+ - [Prerequisites](#prerequisites)
7
+ - [Installation](#installation)
8
+ - [Development Setup](#development-setup)
9
+
10
+ ## Prerequisites
11
+
12
+ - Python 3.12+
13
+ - [uv](https://docs.astral.sh/uv/)
14
+
15
+ If you do not have a uv virtual environment, you can create one with:
16
+ ```bash
17
+ uv venv --python 3.12
18
+ ```
19
+
20
+ **Note:** We recommend using uv to install the package due to the incredible speed of the package manager. The package can still be installed via pip directly, but it will be slower.
21
+
22
+ ## Installation
23
+
24
+ ### From PyPI
25
+
26
+ ```bash
27
+ pip install primfunctions
28
+ ```
29
+
30
+ ### From Source
31
+
32
+ ```bash
33
+ uv pip install .
34
+ ```
35
+
36
+ ## Development Setup
37
+
38
+ ### Git Hooks
39
+ This project includes git hooks to ensure code quality. To set up the git hooks:
40
+
41
+ ```bash
42
+ python setup_hooks.py
43
+ ```
44
+
45
+ This will create a pre-commit hook that automatically runs black on Python files before each commit.
File without changes
@@ -0,0 +1,47 @@
1
+ from enum import Enum
2
+
3
+
4
+ class LLMProvider(Enum):
5
+ OPENAI = "openai"
6
+ GEMINI = "gemini"
7
+
8
+
9
+ class Configuration:
10
+ def __init__(
11
+ self,
12
+ input_type: str = "mic",
13
+ default_voice: str = "nova",
14
+ llm_provider: LLMProvider = LLMProvider.OPENAI,
15
+ stt_model: str = "nova-3",
16
+ stt_prompt: str = "",
17
+ stt_endpointing: int = 300,
18
+ recording_enabled: bool = False,
19
+ timeout: int = 5,
20
+ ):
21
+ self.input_type = input_type
22
+ self.default_voice = default_voice
23
+ self.stt_model = stt_model
24
+ self.stt_prompt = stt_prompt
25
+ self.stt_endpointing = stt_endpointing
26
+ self.recording_enabled = recording_enabled
27
+ self.timeout = timeout
28
+
29
+ if isinstance(llm_provider, str):
30
+ self.llm_provider = LLMProvider(llm_provider)
31
+ else:
32
+ self.llm_provider = llm_provider
33
+
34
+ def set_llm_provider(self, llm_provider: LLMProvider):
35
+ self.llm_provider = llm_provider
36
+
37
+ def __dict__(self):
38
+ return {
39
+ "input_type": self.input_type,
40
+ "default_voice": self.default_voice,
41
+ "llm_provider": self.llm_provider.value,
42
+ "stt_model": self.stt_model,
43
+ "stt_prompt": self.stt_prompt,
44
+ "stt_endpointing": self.stt_endpointing,
45
+ "recording_enabled": self.recording_enabled,
46
+ "timeout": self.timeout,
47
+ }
@@ -0,0 +1,128 @@
1
+ from typing import List, Dict, Any
2
+
3
+ from .configuration import Configuration, LLMProvider
4
+
5
+
6
+ class Context:
7
+ def __init__(
8
+ self,
9
+ agent_id: str = "",
10
+ environment: str = "",
11
+ session_id: str = "",
12
+ configuration: Configuration = Configuration(),
13
+ data: dict = {},
14
+ variables: dict = {},
15
+ ):
16
+ self.agent_id = agent_id
17
+ self.environment = environment
18
+ self.session_id = session_id
19
+ self.configuration = configuration
20
+ self.data = data
21
+ self.variables = variables
22
+ self.history = []
23
+
24
+ def serialize(self) -> dict:
25
+ return {
26
+ "agent_id": self.agent_id,
27
+ "environment": self.environment,
28
+ "session_id": self.session_id,
29
+ "configuration": self.configuration.__dict__(),
30
+ "data": self.data,
31
+ "history": self.history,
32
+ "variables": self.variables,
33
+ }
34
+
35
+ def deserialize(self, state: dict):
36
+ self.agent_id = state.get("agent_id", self.agent_id)
37
+ self.environment = state.get("environment", self.environment)
38
+ self.session_id = state.get("session_id", self.session_id)
39
+ self.configuration = Configuration(
40
+ **state.get("configuration", self.configuration.__dict__())
41
+ )
42
+ self.data = state.get("data", self.data)
43
+ self.history = state.get("history", self.history)
44
+ self.variables = state.get("variables", self.variables)
45
+
46
+ def set_data(self, key: str, value: Any):
47
+ self.data[key] = value
48
+
49
+ def get_data(self, key: str, default: Any = None):
50
+ return self.data.get(key, default)
51
+
52
+ # Backward compatible methods - these work with OpenAI format internally
53
+ def add_system_message(self, message: str):
54
+ """Add system message. For Gemini, this will be converted when getting formatted history."""
55
+ self.history.append({"role": "system", "content": message})
56
+
57
+ def add_assistant_message(self, message: str):
58
+ """Add assistant message. For Gemini, this will be converted to 'model' role when getting formatted history."""
59
+ self.history.append({"role": "assistant", "content": message})
60
+
61
+ def add_user_message(self, message: str):
62
+ """Add user message. Works the same for both OpenAI and Gemini."""
63
+ self.history.append({"role": "user", "content": message})
64
+
65
+ # New generic methods for flexibility
66
+ def add_message(self, role: str, content: str):
67
+ """Generic method to add any message with specified role."""
68
+ self.history.append({"role": role, "content": content})
69
+
70
+ def get_history(self, turns: int = 0) -> List[Dict[str, str]]:
71
+ """Get history in original OpenAI format for backward compatibility."""
72
+ if turns == 0:
73
+ return self.history
74
+ return self.history[-(turns * 2) :]
75
+
76
+ def get_history_message(self, turns: int = 0) -> List[Dict[str, str]]:
77
+ """Get history formatted for the current model provider."""
78
+ history = self.get_history(turns)
79
+
80
+ if self.configuration.llm_provider == LLMProvider.OPENAI:
81
+ return history
82
+ elif self.configuration.llm_provider == LLMProvider.GEMINI:
83
+ return self._convert_to_gemini_format(history)
84
+ else:
85
+ return history
86
+
87
+ def _convert_to_gemini_format(
88
+ self, history: List[Dict[str, str]]
89
+ ) -> List[Dict[str, str]]:
90
+ """Convert OpenAI format to Gemini format."""
91
+ converted = []
92
+ system_messages = []
93
+
94
+ for message in history:
95
+ role = message["role"]
96
+ content = message["content"]
97
+
98
+ if role == "system":
99
+ # Collect system messages to prepend to first user message
100
+ system_messages.append(content)
101
+ elif role == "assistant":
102
+ # Convert assistant to model for Gemini
103
+ converted.append({"role": "model", "content": content})
104
+ elif role == "user":
105
+ # If we have accumulated system messages, prepend them to this user message
106
+ if system_messages:
107
+ system_context = "\n".join(system_messages)
108
+ content = f"System: {system_context}\n\nUser: {content}"
109
+ system_messages = [] # Clear after using
110
+ converted.append({"role": "user", "content": content})
111
+ else:
112
+ # Unknown role, keep as is
113
+ converted.append(message)
114
+
115
+ # If there are remaining system messages at the end, add them as a user message
116
+ if system_messages:
117
+ system_content = "\n".join(system_messages)
118
+ converted.append({"role": "user", "content": f"System: {system_content}"})
119
+
120
+ return converted
121
+
122
+ def set_llm_provider(self, provider: LLMProvider):
123
+ """Change the llm provider for this context."""
124
+ self.configuration.llm_provider = provider
125
+
126
+ def get_llm_provider(self) -> LLMProvider:
127
+ """Get the current llm provider."""
128
+ return self.configuration.llm_provider
@@ -0,0 +1,145 @@
1
+ import json
2
+
3
+
4
+ class Event:
5
+ def __init__(self, name: str, data: dict = {}):
6
+ self.name = name
7
+ self.data = data
8
+
9
+
10
+ class CustomEvent(Event):
11
+ def __init__(self, name: str, data: dict = {}):
12
+ super().__init__(name, {**data, "custom": True})
13
+
14
+
15
+ class StartEvent(Event):
16
+ def __init__(self):
17
+ super().__init__("start")
18
+
19
+
20
+ class StopEvent(Event):
21
+ def __init__(self):
22
+ super().__init__("stop")
23
+
24
+
25
+ class InterruptEvent(Event):
26
+ def __init__(self):
27
+ super().__init__("interrupt")
28
+
29
+
30
+ class TimeoutEvent(Event):
31
+ def __init__(self):
32
+ super().__init__("timeout")
33
+
34
+
35
+ class TextEvent(Event):
36
+ def __init__(self, source: str, text: str):
37
+ super().__init__("text", {"source": source, "text": text})
38
+
39
+
40
+ class TextToSpeechEvent(Event):
41
+ def __init__(self, text: str, voice="nova", cache=True, interruptable=True):
42
+ super().__init__(
43
+ "text_to_speech",
44
+ {
45
+ "text": text,
46
+ "voice": voice,
47
+ "cache": cache,
48
+ "interruptable": interruptable,
49
+ },
50
+ )
51
+
52
+
53
+ class AudioEvent(Event):
54
+ def __init__(self, path: str):
55
+ super().__init__("audio", {"path": path})
56
+
57
+
58
+ class SilenceEvent(Event):
59
+ def __init__(self, duration: int):
60
+ super().__init__("silence", {"duration": duration})
61
+
62
+
63
+ class TransferCallEvent(Event):
64
+ def __init__(self, phone_number: str):
65
+ super().__init__("transfer_call", {"phone_number": phone_number})
66
+
67
+
68
+ class WarmTransferEvent(Event):
69
+ def __init__(self, phone_number: str, data: dict):
70
+ super().__init__("warm_transfer", {"phone_number": phone_number, "data": data})
71
+
72
+
73
+ class MergeCallEvent(Event):
74
+ def __init__(self, call_sid: str):
75
+ super().__init__("merge_call", {"call_sid": call_sid})
76
+
77
+
78
+ class ContextUpdateEvent(Event):
79
+ def __init__(self, context: dict):
80
+ super().__init__("context", {"context": context})
81
+
82
+
83
+ class ErrorEvent(Event):
84
+ def __init__(self, message: str):
85
+ super().__init__("error", {"message": message})
86
+
87
+
88
+ class LogEvent(Event):
89
+ def __init__(self, message: str):
90
+ super().__init__("log", {"message": message})
91
+
92
+
93
+ class CollectPaymentEvent(Event):
94
+ def __init__(self, amount: float):
95
+ super().__init__("collect_payment", {"amount": amount})
96
+
97
+
98
+ class CollectPaymentSuccessEvent(Event):
99
+ def __init__(self):
100
+ super().__init__("collect_payment_success")
101
+
102
+
103
+ class AddSupervisorContextEvent(Event):
104
+ def __init__(self, context: str):
105
+ super().__init__("add_supervisor_context", {"context": context})
106
+
107
+
108
+ def event_to_str(event: Event) -> str:
109
+ return json.dumps({"name": event.name, "data": event.data})
110
+
111
+
112
+ def event_from_str(event_str: str) -> Event:
113
+ event = json.loads(event_str)
114
+ name = event["name"]
115
+ data = event["data"]
116
+
117
+ event_types = {
118
+ "audio": lambda: AudioEvent(data["path"]),
119
+ "context": lambda: ContextUpdateEvent(data["context"]),
120
+ "error": lambda: ErrorEvent(data["message"]),
121
+ "interrupt": InterruptEvent,
122
+ "log": lambda: LogEvent(data["message"]),
123
+ "merge_call": lambda: MergeCallEvent(data["call_sid"]),
124
+ "silence": lambda: SilenceEvent(data["duration"]),
125
+ "start": StartEvent,
126
+ "stop": StopEvent,
127
+ "text_to_speech": lambda: TextToSpeechEvent(data["text"], data["voice"]),
128
+ "text": lambda: TextEvent(data["source"], data["text"]),
129
+ "timeout": TimeoutEvent,
130
+ "transfer_call": lambda: TransferCallEvent(data["phone_number"]),
131
+ "warm_transfer": lambda: WarmTransferEvent(data["phone_number"], data["data"]),
132
+ "collect_payment": lambda: CollectPaymentEvent(data["amount"]),
133
+ "collect_payment_success": CollectPaymentSuccessEvent,
134
+ }
135
+
136
+ if name in event_types:
137
+ return event_types[name]()
138
+
139
+ raise ValueError(f"Unknown event type: {name}")
140
+
141
+
142
+ def format_event(event: Event) -> bytes:
143
+ event_string = event_to_str(event)
144
+
145
+ return bytes(f"{event_string}\n", "utf-8")
File without changes
@@ -0,0 +1,193 @@
1
+ # Helper function to handle streaming response and chunking
2
+ async def stream_sentences(streaming_response, punctuation_marks=None, clean_text=True):
3
+ """
4
+ Streams OpenAI or Google Gemini response and yields complete sentences as strings.
5
+
6
+ Args:
7
+ streaming_response: The streaming response from OpenAI or Google Gemini
8
+ punctuation_marks: Optional set of punctuation marks to use for sentence boundaries
9
+ Defaults to ['.', '!', '?', '\n']
10
+ clean_text: Whether to clean markdown and special characters for speech
11
+ Defaults to True
12
+
13
+ Yields:
14
+ str: Complete sentences as they are formed
15
+ """
16
+ import asyncio
17
+
18
+ if punctuation_marks is None:
19
+ punctuation_marks = [".", "!", "?", "\n"]
20
+
21
+ sentence_buffer = ""
22
+
23
+ # Check if this is an async iterator (OpenAI) or sync iterator (Gemini)
24
+ if hasattr(streaming_response, "__aiter__"):
25
+ # OpenAI async streaming
26
+ async for chunk in streaming_response:
27
+ tool_calls = _extract_tool_calls_from_chunk(chunk)
28
+ if tool_calls:
29
+ yield {"tool_calls": tool_calls}
30
+
31
+ content = _extract_content_from_chunk(chunk)
32
+
33
+ if content:
34
+ sentence_buffer += content
35
+
36
+ # Check if we have a complete sentence (ends with punctuation)
37
+ if any(punct in sentence_buffer for punct in punctuation_marks):
38
+ # Find the last sentence boundary
39
+ last_sentence_end = max(
40
+ (sentence_buffer.rfind(punct) for punct in punctuation_marks),
41
+ default=-1,
42
+ )
43
+
44
+ if last_sentence_end != -1:
45
+ # Extract complete sentences
46
+ complete_sentences = sentence_buffer[: last_sentence_end + 1]
47
+ # Keep remaining text in buffer
48
+ sentence_buffer = sentence_buffer[last_sentence_end + 1 :]
49
+
50
+ # Clean and yield complete sentences
51
+ if clean_text:
52
+ complete_sentences = _clean_text_for_speech(
53
+ complete_sentences
54
+ )
55
+ yield {"content": complete_sentences}
56
+ else:
57
+ # Gemini sync streaming - wrap in async to prevent blocking
58
+ for chunk in streaming_response:
59
+ content = _extract_content_from_chunk(chunk)
60
+
61
+ if content:
62
+ sentence_buffer += content
63
+
64
+ # Check if we have a complete sentence (ends with punctuation)
65
+ if any(punct in sentence_buffer for punct in punctuation_marks):
66
+ # Find the last sentence boundary
67
+ last_sentence_end = max(
68
+ (sentence_buffer.rfind(punct) for punct in punctuation_marks),
69
+ default=-1,
70
+ )
71
+
72
+ if last_sentence_end != -1:
73
+ # Extract complete sentences
74
+ complete_sentences = sentence_buffer[: last_sentence_end + 1]
75
+ # Keep remaining text in buffer
76
+ sentence_buffer = sentence_buffer[last_sentence_end + 1 :]
77
+
78
+ # Clean and yield complete sentences
79
+ if clean_text:
80
+ complete_sentences = _clean_text_for_speech(
81
+ complete_sentences
82
+ )
83
+ yield {"content": complete_sentences}
84
+
85
+ # Yield control to prevent blocking the event loop
86
+ await asyncio.sleep(0)
87
+
88
+ # Handle any remaining text in buffer
89
+ if sentence_buffer.strip():
90
+ if clean_text:
91
+ sentence_buffer = _clean_text_for_speech(sentence_buffer)
92
+ yield {"content": sentence_buffer}
93
+
94
+
95
+ def _extract_tool_calls_from_chunk(chunk):
96
+ if hasattr(chunk, "choices") and chunk.choices:
97
+ if hasattr(chunk.choices[0], "delta") and hasattr(
98
+ chunk.choices[0].delta, "tool_calls"
99
+ ):
100
+ return chunk.choices[0].delta.tool_calls or ""
101
+ return ""
102
+
103
+
104
+ def _extract_content_from_chunk(chunk):
105
+ """
106
+ Extract content from streaming chunk, supporting OpenAI and Direct Gemini API formats only.
107
+
108
+ Args:
109
+ chunk: The streaming chunk from either OpenAI or Google Gemini (direct API)
110
+
111
+ Returns:
112
+ str: The content text from the chunk, or empty string if no content
113
+ """
114
+ # OpenAI format: chunk.choices[0].delta.content
115
+ if hasattr(chunk, "choices") and chunk.choices:
116
+ if hasattr(chunk.choices[0], "delta") and hasattr(
117
+ chunk.choices[0].delta, "content"
118
+ ):
119
+ return chunk.choices[0].delta.content or ""
120
+
121
+ # Google Gemini Direct API format: chunk.text
122
+ if hasattr(chunk, "text"):
123
+ return chunk.text or ""
124
+
125
+ return ""
126
+
127
+
128
+ def _clean_text_for_speech(text):
129
+ """
130
+ Clean text for better speech synthesis by removing/replacing problematic characters.
131
+
132
+ Args:
133
+ text: The text to clean
134
+
135
+ Returns:
136
+ str: Cleaned text suitable for speech synthesis
137
+ """
138
+ import re
139
+
140
+ if not text:
141
+ return text
142
+
143
+ # Remove markdown formatting
144
+ text = re.sub(r"\*\*(.*?)\*\*", r"\1", text) # **bold** -> bold
145
+ text = re.sub(r"\*(.*?)\*", r"\1", text) # *italic* -> italic
146
+ text = re.sub(r"__(.*?)__", r"\1", text) # __bold__ -> bold
147
+ text = re.sub(r"_(.*?)_", r"\1", text) # _italic_ -> italic
148
+ text = re.sub(r"~~(.*?)~~", r"\1", text) # ~~strikethrough~~ -> strikethrough
149
+ text = re.sub(r"`(.*?)`", r"\1", text) # `code` -> code
150
+
151
+ # Remove markdown headers
152
+ text = re.sub(r"^#{1,6}\s*", "", text, flags=re.MULTILINE) # # Header -> Header
153
+
154
+ # Replace common symbols with spoken equivalents
155
+ replacements = {
156
+ "#": "hashtag ",
157
+ "@": "at ",
158
+ "&": "and ",
159
+ "%": "percent ",
160
+ "$": "dollar ",
161
+ "+": "plus ",
162
+ "=": "equals ",
163
+ "<": "less than ",
164
+ ">": "greater than ",
165
+ "|": "pipe ",
166
+ "\\": "backslash ",
167
+ "/": "slash ",
168
+ "^": "caret ",
169
+ "~": "tilde ",
170
+ }
171
+
172
+ for symbol, replacement in replacements.items():
173
+ text = text.replace(symbol, replacement)
174
+
175
+ # Remove brackets and their content (often contains technical info)
176
+ text = re.sub(r"\[.*?\]", "", text) # [link text] ->
177
+ text = re.sub(r"\{.*?\}", "", text) # {code} ->
178
+
179
+ # Clean up URLs (replace with "link")
180
+ text = re.sub(r"https?://\S+", "link", text)
181
+ text = re.sub(r"www\.\S+", "link", text)
182
+
183
+ # Clean up email addresses
184
+ text = re.sub(r"\S+@\S+\.\S+", "email address", text)
185
+
186
+ # Clean up multiple spaces and newlines
187
+ text = re.sub(r"\s+", " ", text) # Multiple spaces -> single space
188
+ text = re.sub(r"\n+", ". ", text) # Multiple newlines -> period space
189
+
190
+ # Remove leading/trailing whitespace
191
+ text = text.strip()
192
+
193
+ return text
@@ -0,0 +1,54 @@
1
+ Metadata-Version: 2.4
2
+ Name: primfunctions
3
+ Version: 0.1.0
4
+ Summary: Prim AI Functions
5
+ Author-email: Prim AI <derek@primai.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: black>=23.3.0
9
+
10
+ # prim-functions
11
+
12
+ A comprehensive SDK for interacting with the Prim AI Functions.
13
+
14
+ ## Table of Contents
15
+ - [Prerequisites](#prerequisites)
16
+ - [Installation](#installation)
17
+ - [Development Setup](#development-setup)
18
+
19
+ ## Prerequisites
20
+
21
+ - Python 3.12+
22
+ - [uv](https://docs.astral.sh/uv/)
23
+
24
+ If you do not have a uv virtual environment, you can create one with:
25
+ ```bash
26
+ uv venv --python 3.12
27
+ ```
28
+
29
+ **Note:** We recommend using uv to install the package due to the incredible speed of the package manager. The package can still be installed via pip directly, but it will be slower.
30
+
31
+ ## Installation
32
+
33
+ ### From PyPI
34
+
35
+ ```bash
36
+ pip install primfunctions
37
+ ```
38
+
39
+ ### From Source
40
+
41
+ ```bash
42
+ uv pip install .
43
+ ```
44
+
45
+ ## Development Setup
46
+
47
+ ### Git Hooks
48
+ This project includes git hooks to ensure code quality. To set up the git hooks:
49
+
50
+ ```bash
51
+ python setup_hooks.py
52
+ ```
53
+
54
+ This will create a pre-commit hook that automatically runs black on Python files before each commit.
@@ -0,0 +1,13 @@
1
+ README.md
2
+ pyproject.toml
3
+ primfunctions/__init__.py
4
+ primfunctions/configuration.py
5
+ primfunctions/context.py
6
+ primfunctions/events.py
7
+ primfunctions.egg-info/PKG-INFO
8
+ primfunctions.egg-info/SOURCES.txt
9
+ primfunctions.egg-info/dependency_links.txt
10
+ primfunctions.egg-info/requires.txt
11
+ primfunctions.egg-info/top_level.txt
12
+ primfunctions/utils/__init__.py
13
+ primfunctions/utils/streaming.py
@@ -0,0 +1 @@
1
+ black>=23.3.0
@@ -0,0 +1 @@
1
+ primfunctions
@@ -0,0 +1,16 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "primfunctions"
7
+ version = "0.1.0"
8
+ description = "Prim AI Functions"
9
+ authors = [
10
+ { name = "Prim AI", email = "derek@primai.com" }
11
+ ]
12
+ readme = "README.md"
13
+ requires-python = ">=3.7"
14
+ dependencies = [
15
+ "black>=23.3.0"
16
+ ]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+