openaivec 0.12.5__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. openaivec/__init__.py +13 -4
  2. openaivec/_cache/__init__.py +12 -0
  3. openaivec/_cache/optimize.py +109 -0
  4. openaivec/_cache/proxy.py +806 -0
  5. openaivec/{di.py → _di.py} +36 -12
  6. openaivec/_embeddings.py +203 -0
  7. openaivec/{log.py → _log.py} +2 -2
  8. openaivec/_model.py +113 -0
  9. openaivec/{prompt.py → _prompt.py} +95 -28
  10. openaivec/_provider.py +207 -0
  11. openaivec/_responses.py +511 -0
  12. openaivec/_schema/__init__.py +9 -0
  13. openaivec/_schema/infer.py +340 -0
  14. openaivec/_schema/spec.py +350 -0
  15. openaivec/_serialize.py +234 -0
  16. openaivec/{util.py → _util.py} +25 -85
  17. openaivec/pandas_ext.py +1496 -318
  18. openaivec/spark.py +485 -183
  19. openaivec/task/__init__.py +9 -7
  20. openaivec/task/customer_support/__init__.py +9 -15
  21. openaivec/task/customer_support/customer_sentiment.py +17 -15
  22. openaivec/task/customer_support/inquiry_classification.py +23 -22
  23. openaivec/task/customer_support/inquiry_summary.py +14 -13
  24. openaivec/task/customer_support/intent_analysis.py +21 -19
  25. openaivec/task/customer_support/response_suggestion.py +16 -16
  26. openaivec/task/customer_support/urgency_analysis.py +24 -25
  27. openaivec/task/nlp/__init__.py +4 -4
  28. openaivec/task/nlp/dependency_parsing.py +10 -12
  29. openaivec/task/nlp/keyword_extraction.py +11 -14
  30. openaivec/task/nlp/morphological_analysis.py +12 -14
  31. openaivec/task/nlp/named_entity_recognition.py +16 -18
  32. openaivec/task/nlp/sentiment_analysis.py +14 -11
  33. openaivec/task/nlp/translation.py +6 -9
  34. openaivec/task/table/__init__.py +2 -2
  35. openaivec/task/table/fillna.py +11 -11
  36. openaivec-1.0.10.dist-info/METADATA +399 -0
  37. openaivec-1.0.10.dist-info/RECORD +39 -0
  38. {openaivec-0.12.5.dist-info → openaivec-1.0.10.dist-info}/WHEEL +1 -1
  39. openaivec/embeddings.py +0 -172
  40. openaivec/model.py +0 -67
  41. openaivec/provider.py +0 -45
  42. openaivec/responses.py +0 -393
  43. openaivec/serialize.py +0 -225
  44. openaivec-0.12.5.dist-info/METADATA +0 -696
  45. openaivec-0.12.5.dist-info/RECORD +0 -33
  46. {openaivec-0.12.5.dist-info → openaivec-1.0.10.dist-info}/licenses/LICENSE +0 -0
@@ -32,7 +32,7 @@ Specialized tasks for customer service operations:
32
32
  ### Quick Start with Default Tasks
33
33
  ```python
34
34
  from openai import OpenAI
35
- from openaivec.responses import BatchResponses
35
+ from openaivec import BatchResponses
36
36
  from openaivec.task import nlp, customer_support
37
37
 
38
38
  client = OpenAI()
@@ -90,15 +90,17 @@ results_df = df.ai.extract("sentiment")
90
90
 
91
91
  ### Spark Integration
92
92
  ```python
93
- from openaivec.spark import ResponsesUDFBuilder
93
+ from openaivec.spark import task_udf
94
94
 
95
95
  # Register UDF for large-scale processing
96
96
  spark.udf.register(
97
97
  "analyze_sentiment",
98
- ResponsesUDFBuilder.of_openai(
99
- api_key=api_key,
100
- model_name="gpt-4.1-mini"
101
- ).build_from_task(task=nlp.SENTIMENT_ANALYSIS)
98
+ task_udf(
99
+ task=nlp.SENTIMENT_ANALYSIS,
100
+ model_name="gpt-4.1-mini",
101
+ batch_size=64,
102
+ max_concurrency=8,
103
+ ),
102
104
  )
103
105
 
104
106
  # Use in Spark SQL
@@ -117,7 +119,7 @@ All tasks are built using the `PreparedTask` dataclass:
117
119
  @dataclass(frozen=True)
118
120
  class PreparedTask:
119
121
  instructions: str # Detailed prompt for the LLM
120
- response_format: Type[ResponseFormat] # Pydantic model or str for structured/plain output
122
+ response_format: type[ResponseFormat] # Pydantic model or str for structured/plain output
121
123
  temperature: float = 0.0 # Sampling temperature
122
124
  top_p: float = 1.0 # Nucleus sampling parameter
123
125
  ```
@@ -1,32 +1,26 @@
1
1
  # Function imports
2
- from .inquiry_classification import inquiry_classification
3
- from .urgency_analysis import urgency_analysis
4
- from .customer_sentiment import customer_sentiment
5
- from .intent_analysis import intent_analysis
6
- from .inquiry_summary import inquiry_summary
7
- from .response_suggestion import response_suggestion
2
+ from .customer_sentiment import CUSTOMER_SENTIMENT, customer_sentiment
8
3
 
9
4
  # Backward compatibility - constant imports
10
- from .inquiry_classification import INQUIRY_CLASSIFICATION
11
- from .urgency_analysis import URGENCY_ANALYSIS
12
- from .customer_sentiment import CUSTOMER_SENTIMENT
13
- from .intent_analysis import INTENT_ANALYSIS
14
- from .inquiry_summary import INQUIRY_SUMMARY
15
- from .response_suggestion import RESPONSE_SUGGESTION
5
+ from .inquiry_classification import INQUIRY_CLASSIFICATION, inquiry_classification
6
+ from .inquiry_summary import INQUIRY_SUMMARY, inquiry_summary
7
+ from .intent_analysis import INTENT_ANALYSIS, intent_analysis
8
+ from .response_suggestion import RESPONSE_SUGGESTION, response_suggestion
9
+ from .urgency_analysis import URGENCY_ANALYSIS, urgency_analysis
16
10
 
17
11
  __all__ = [
18
12
  # Configurable functions (recommended)
19
13
  "inquiry_classification",
20
- "urgency_analysis",
14
+ "urgency_analysis",
21
15
  "customer_sentiment",
22
16
  "intent_analysis",
23
17
  "inquiry_summary",
24
18
  "response_suggestion",
25
19
  # Backward compatibility constants
26
20
  "INQUIRY_CLASSIFICATION",
27
- "URGENCY_ANALYSIS",
21
+ "URGENCY_ANALYSIS",
28
22
  "CUSTOMER_SENTIMENT",
29
23
  "INTENT_ANALYSIS",
30
24
  "INQUIRY_SUMMARY",
31
25
  "RESPONSE_SUGGESTION",
32
- ]
26
+ ]
@@ -9,7 +9,7 @@ Example:
9
9
 
10
10
  ```python
11
11
  from openai import OpenAI
12
- from openaivec.responses import BatchResponses
12
+ from openaivec import BatchResponses
13
13
  from openaivec.task import customer_support
14
14
 
15
15
  client = OpenAI()
@@ -49,7 +49,10 @@ Example:
49
49
 
50
50
  # Extract sentiment components
51
51
  extracted_df = df.ai.extract("sentiment")
52
- print(extracted_df[["inquiry", "sentiment_satisfaction_level", "sentiment_churn_risk", "sentiment_emotional_state"]])
52
+ print(extracted_df[[
53
+ "inquiry", "sentiment_satisfaction_level",
54
+ "sentiment_churn_risk", "sentiment_emotional_state"
55
+ ]])
53
56
  ```
54
57
 
55
58
  Attributes:
@@ -58,11 +61,11 @@ Attributes:
58
61
  top_p=1.0 for deterministic output.
59
62
  """
60
63
 
61
- from typing import List, Literal
64
+ from typing import Literal
62
65
 
63
66
  from pydantic import BaseModel, Field
64
67
 
65
- from ...model import PreparedTask
68
+ from openaivec._model import PreparedTask
66
69
 
67
70
  __all__ = ["customer_sentiment"]
68
71
 
@@ -83,7 +86,7 @@ class CustomerSentiment(BaseModel):
83
86
  )
84
87
  sentiment_intensity: float = Field(description="Intensity of sentiment from 0.0 (mild) to 1.0 (extreme)")
85
88
  polarity_score: float = Field(description="Polarity score from -1.0 (very negative) to 1.0 (very positive)")
86
- tone_indicators: List[str] = Field(description="Specific words or phrases indicating tone")
89
+ tone_indicators: list[str] = Field(description="Specific words or phrases indicating tone")
87
90
  relationship_status: Literal["new", "loyal", "at_risk", "detractor", "advocate"] = Field(
88
91
  description="Customer relationship status (new, loyal, at_risk, detractor, advocate)"
89
92
  )
@@ -92,21 +95,18 @@ class CustomerSentiment(BaseModel):
92
95
  )
93
96
 
94
97
 
95
- def customer_sentiment(
96
- business_context: str = "general customer support", temperature: float = 0.0, top_p: float = 1.0
97
- ) -> PreparedTask:
98
+ def customer_sentiment(business_context: str = "general customer support") -> PreparedTask:
98
99
  """Create a configurable customer sentiment analysis task.
99
100
 
100
101
  Args:
101
102
  business_context (str): Business context for sentiment analysis.
102
- temperature (float): Sampling temperature (0.0-1.0).
103
- top_p (float): Nucleus sampling parameter (0.0-1.0).
104
103
 
105
104
  Returns:
106
105
  PreparedTask configured for customer sentiment analysis.
107
106
  """
108
107
 
109
- instructions = f"""Analyze customer sentiment in the context of support interactions, focusing on satisfaction, emotional state, and business implications.
108
+ instructions = f"""Analyze customer sentiment in the context of support interactions, focusing on
109
+ satisfaction, emotional state, and business implications.
110
110
 
111
111
  Business Context: {business_context}
112
112
 
@@ -157,13 +157,15 @@ Analyze tone indicators like:
157
157
  - Urgency: "urgent", "immediately", "ASAP", "critical"
158
158
  - Threat: "cancel", "switch", "competitor", "lawyer", "report"
159
159
 
160
- IMPORTANT: Provide analysis responses in the same language as the input text, except for the predefined categorical fields (sentiment, satisfaction_level, emotional_state, churn_risk, relationship_status, response_approach) which must use the exact English values specified above. For example, if the input is in Spanish, provide tone_indicators in Spanish, but use English values like "positive" for sentiment.
160
+ IMPORTANT: Provide analysis responses in the same language as the input text, except for the
161
+ predefined categorical fields (sentiment, satisfaction_level, emotional_state, churn_risk,
162
+ relationship_status, response_approach) which must use the exact English values specified above.
163
+ For example, if the input is in Spanish, provide tone_indicators in Spanish, but use English
164
+ values like "positive" for sentiment.
161
165
 
162
166
  Provide comprehensive sentiment analysis with business context and recommended response strategy."""
163
167
 
164
- return PreparedTask(
165
- instructions=instructions, response_format=CustomerSentiment, temperature=temperature, top_p=top_p
166
- )
168
+ return PreparedTask(instructions=instructions, response_format=CustomerSentiment)
167
169
 
168
170
 
169
171
  # Backward compatibility - default configuration
@@ -8,7 +8,7 @@ Example:
8
8
 
9
9
  ```python
10
10
  from openai import OpenAI
11
- from openaivec.responses import BatchResponses
11
+ from openaivec import BatchResponses
12
12
  from openaivec.task import customer_support
13
13
 
14
14
  client = OpenAI()
@@ -85,15 +85,18 @@ Example:
85
85
 
86
86
  # Extract classification components
87
87
  extracted_df = df.ai.extract("classification")
88
- print(extracted_df[["inquiry", "classification_category", "classification_subcategory", "classification_confidence"]])
88
+ print(extracted_df[[
89
+ "inquiry", "classification_category",
90
+ "classification_subcategory", "classification_confidence"
91
+ ]])
89
92
  ```
90
93
  """
91
94
 
92
- from typing import Dict, List, Literal, Optional
95
+ from typing import Dict, Literal
93
96
 
94
97
  from pydantic import BaseModel, Field
95
98
 
96
- from ...model import PreparedTask
99
+ from openaivec._model import PreparedTask
97
100
 
98
101
  __all__ = ["inquiry_classification"]
99
102
 
@@ -103,7 +106,7 @@ class InquiryClassification(BaseModel):
103
106
  subcategory: str = Field(description="Specific subcategory within the primary category")
104
107
  confidence: float = Field(description="Confidence score for classification (0.0-1.0)")
105
108
  routing: str = Field(description="Recommended routing destination")
106
- keywords: List[str] = Field(description="Key terms that influenced the classification")
109
+ keywords: list[str] = Field(description="Key terms that influenced the classification")
107
110
  priority: Literal["low", "medium", "high", "urgent"] = Field(
108
111
  description="Suggested priority level (low, medium, high, urgent)"
109
112
  )
@@ -111,27 +114,23 @@ class InquiryClassification(BaseModel):
111
114
 
112
115
 
113
116
  def inquiry_classification(
114
- categories: Optional[Dict[str, List[str]]] = None,
115
- routing_rules: Optional[Dict[str, str]] = None,
116
- priority_rules: Optional[Dict[str, str]] = None,
117
+ categories: Dict[str, list[str]] | None = None,
118
+ routing_rules: Dict[str, str] | None = None,
119
+ priority_rules: Dict[str, str] | None = None,
117
120
  business_context: str = "general customer support",
118
- custom_keywords: Optional[Dict[str, List[str]]] = None,
119
- temperature: float = 0.0,
120
- top_p: float = 1.0,
121
+ custom_keywords: Dict[str, list[str]] | None = None,
121
122
  ) -> PreparedTask:
122
123
  """Create a configurable inquiry classification task.
123
124
 
124
125
  Args:
125
- categories (Optional[Dict[str, List[str]]]): Dictionary mapping category names to lists of subcategories.
126
+ categories (dict[str, list[str]] | None): Dictionary mapping category names to lists of subcategories.
126
127
  Default provides standard support categories.
127
- routing_rules (Optional[Dict[str, str]]): Dictionary mapping categories to routing destinations.
128
+ routing_rules (dict[str, str] | None): Dictionary mapping categories to routing destinations.
128
129
  Default provides standard routing options.
129
- priority_rules (Optional[Dict[str, str]]): Dictionary mapping keywords/patterns to priority levels.
130
+ priority_rules (dict[str, str] | None): Dictionary mapping keywords/patterns to priority levels.
130
131
  Default uses standard priority indicators.
131
132
  business_context (str): Description of the business context to help with classification.
132
- custom_keywords (Optional[Dict[str, List[str]]]): Dictionary mapping categories to relevant keywords.
133
- temperature (float): Sampling temperature (0.0-1.0).
134
- top_p (float): Nucleus sampling parameter (0.0-1.0).
133
+ custom_keywords (dict[str, list[str]] | None): Dictionary mapping categories to relevant keywords.
135
134
 
136
135
  Returns:
137
136
  PreparedTask configured for inquiry classification.
@@ -214,7 +213,8 @@ def inquiry_classification(
214
213
  for category, keywords in custom_keywords.items():
215
214
  keywords_text += f"- {category}: {', '.join(keywords)}\n"
216
215
 
217
- instructions = f"""Classify the customer inquiry into the appropriate category and subcategory based on the configured categories and business context.
216
+ instructions = f"""Classify the customer inquiry into the appropriate category and subcategory
217
+ based on the configured categories and business context.
218
218
 
219
219
  Business Context: {business_context}
220
220
 
@@ -243,13 +243,14 @@ Consider:
243
243
  - Business impact
244
244
  - Customer type indicators
245
245
 
246
- IMPORTANT: Provide analysis responses in the same language as the input text, except for the predefined categorical fields (priority) which must use the exact English values specified above. Category, subcategory, routing, and keywords should reflect the content and can be in the input language where appropriate, but priority must use English values like "high".
246
+ IMPORTANT: Provide analysis responses in the same language as the input text, except for the
247
+ predefined categorical fields (priority) which must use the exact English values specified above.
248
+ Category, subcategory, routing, and keywords should reflect the content and can be in the input
249
+ language where appropriate, but priority must use English values like "high".
247
250
 
248
251
  Provide accurate classification with detailed reasoning."""
249
252
 
250
- return PreparedTask(
251
- instructions=instructions, response_format=InquiryClassification, temperature=temperature, top_p=top_p
252
- )
253
+ return PreparedTask(instructions=instructions, response_format=InquiryClassification)
253
254
 
254
255
 
255
256
  # Backward compatibility - default configuration
@@ -9,7 +9,7 @@ Example:
9
9
 
10
10
  ```python
11
11
  from openai import OpenAI
12
- from openaivec.responses import BatchResponses
12
+ from openaivec import BatchResponses
13
13
  from openaivec.task import customer_support
14
14
 
15
15
  client = OpenAI()
@@ -59,11 +59,11 @@ Attributes:
59
59
  top_p=1.0 for deterministic output.
60
60
  """
61
61
 
62
- from typing import List, Literal
62
+ from typing import Literal
63
63
 
64
64
  from pydantic import BaseModel, Field
65
65
 
66
- from ...model import PreparedTask
66
+ from openaivec._model import PreparedTask
67
67
 
68
68
  __all__ = ["inquiry_summary"]
69
69
 
@@ -71,15 +71,15 @@ __all__ = ["inquiry_summary"]
71
71
  class InquirySummary(BaseModel):
72
72
  summary: str = Field(description="Concise summary of the customer inquiry (2-3 sentences)")
73
73
  main_issue: str = Field(description="Primary problem or request being addressed")
74
- secondary_issues: List[str] = Field(description="Additional issues mentioned in the inquiry")
74
+ secondary_issues: list[str] = Field(description="Additional issues mentioned in the inquiry")
75
75
  customer_background: str = Field(description="Relevant customer context or history mentioned")
76
- actions_taken: List[str] = Field(description="Steps the customer has already attempted")
76
+ actions_taken: list[str] = Field(description="Steps the customer has already attempted")
77
77
  timeline: str = Field(description="Timeline of events or when the issue started")
78
78
  impact_description: str = Field(description="How the issue affects the customer")
79
79
  resolution_status: Literal["not_started", "in_progress", "needs_escalation", "resolved"] = Field(
80
80
  description="Current status (not_started, in_progress, needs_escalation, resolved)"
81
81
  )
82
- key_details: List[str] = Field(description="Important technical details, error messages, or specifics")
82
+ key_details: list[str] = Field(description="Important technical details, error messages, or specifics")
83
83
  follow_up_needed: bool = Field(description="Whether follow-up communication is required")
84
84
  summary_confidence: float = Field(description="Confidence in summary accuracy (0.0-1.0)")
85
85
 
@@ -87,16 +87,12 @@ class InquirySummary(BaseModel):
87
87
  def inquiry_summary(
88
88
  summary_length: str = "concise",
89
89
  business_context: str = "general customer support",
90
- temperature: float = 0.0,
91
- top_p: float = 1.0,
92
90
  ) -> PreparedTask:
93
91
  """Create a configurable inquiry summary task.
94
92
 
95
93
  Args:
96
94
  summary_length (str): Length of summary (concise, detailed, bullet_points).
97
95
  business_context (str): Business context for summary.
98
- temperature (float): Sampling temperature (0.0-1.0).
99
- top_p (float): Nucleus sampling parameter (0.0-1.0).
100
96
 
101
97
  Returns:
102
98
  PreparedTask configured for inquiry summarization.
@@ -108,7 +104,8 @@ def inquiry_summary(
108
104
  "bullet_points": "Create a bullet-point summary with key facts and actions",
109
105
  }
110
106
 
111
- instructions = f"""Create a comprehensive summary of the customer inquiry that captures all essential information for support agents and management.
107
+ instructions = f"""Create a comprehensive summary of the customer inquiry that captures all
108
+ essential information for support agents and management.
112
109
 
113
110
  Business Context: {business_context}
114
111
  Summary Style: {length_instructions.get(summary_length, length_instructions["concise"])}
@@ -154,11 +151,15 @@ Focus on:
154
151
  - Clear distinction between symptoms and root causes
155
152
  - Relevant background without unnecessary details
156
153
 
157
- IMPORTANT: Provide summary responses in the same language as the input text, except for the predefined categorical field (resolution_status) which must use the exact English values specified above (not_started, in_progress, needs_escalation, resolved). For example, if the input is in German, provide all summary content in German, but use English values like "in_progress" for resolution_status.
154
+ IMPORTANT: Provide summary responses in the same language as the input text, except for the
155
+ predefined categorical field (resolution_status) which must use the exact English values
156
+ specified above (not_started, in_progress, needs_escalation, resolved). For example, if the
157
+ input is in German, provide all summary content in German, but use English values like
158
+ "in_progress" for resolution_status.
158
159
 
159
160
  Provide accurate, actionable summary that enables efficient support resolution."""
160
161
 
161
- return PreparedTask(instructions=instructions, response_format=InquirySummary, temperature=temperature, top_p=top_p)
162
+ return PreparedTask(instructions=instructions, response_format=InquirySummary)
162
163
 
163
164
 
164
165
  # Backward compatibility - default configuration
@@ -8,7 +8,7 @@ Example:
8
8
 
9
9
  ```python
10
10
  from openai import OpenAI
11
- from openaivec.responses import BatchResponses
11
+ from openaivec import BatchResponses
12
12
  from openaivec.task import customer_support
13
13
 
14
14
  client = OpenAI()
@@ -52,16 +52,16 @@ Example:
52
52
  ```
53
53
 
54
54
  Attributes:
55
- INTENT_ANALYSIS (PreparedTask): A prepared task instance
56
- configured for intent analysis with temperature=0.0 and
57
- top_p=1.0 for deterministic output.
55
+ INTENT_ANALYSIS (PreparedTask): A prepared task instance configured for intent
56
+ analysis. Provide ``temperature=0.0`` and ``top_p=1.0`` to your API calls
57
+ for deterministic output.
58
58
  """
59
59
 
60
- from typing import List, Literal
60
+ from typing import Literal
61
61
 
62
62
  from pydantic import BaseModel, Field
63
63
 
64
- from ...model import PreparedTask
64
+ from openaivec._model import PreparedTask
65
65
 
66
66
  __all__ = ["intent_analysis"]
67
67
 
@@ -77,36 +77,34 @@ class IntentAnalysis(BaseModel):
77
77
  "request_feature",
78
78
  "provide_feedback",
79
79
  ] = Field(
80
- description="Primary customer intent (get_help, make_purchase, cancel_service, get_refund, report_issue, seek_information, request_feature, provide_feedback)"
80
+ description="Primary customer intent (get_help, make_purchase, cancel_service, "
81
+ "get_refund, report_issue, seek_information, request_feature, provide_feedback)"
81
82
  )
82
- secondary_intents: List[str] = Field(description="Additional intents if multiple goals are present")
83
+ secondary_intents: list[str] = Field(description="Additional intents if multiple goals are present")
83
84
  action_required: Literal[
84
85
  "provide_information", "troubleshoot", "process_request", "escalate", "redirect", "schedule_callback"
85
86
  ] = Field(
86
- description="Required action (provide_information, troubleshoot, process_request, escalate, redirect, schedule_callback)"
87
+ description="Required action (provide_information, troubleshoot, process_request, "
88
+ "escalate, redirect, schedule_callback)"
87
89
  )
88
90
  intent_confidence: float = Field(description="Confidence in intent detection (0.0-1.0)")
89
91
  success_likelihood: Literal["very_high", "high", "medium", "low", "very_low"] = Field(
90
92
  description="Likelihood of successful resolution (very_high, high, medium, low, very_low)"
91
93
  )
92
94
  customer_goal: str = Field(description="What the customer ultimately wants to achieve")
93
- implicit_needs: List[str] = Field(description="Unstated needs or concerns that may need addressing")
94
- blocking_factors: List[str] = Field(description="Potential obstacles to achieving customer goal")
95
- next_steps: List[str] = Field(description="Recommended next steps to address customer intent")
95
+ implicit_needs: list[str] = Field(description="Unstated needs or concerns that may need addressing")
96
+ blocking_factors: list[str] = Field(description="Potential obstacles to achieving customer goal")
97
+ next_steps: list[str] = Field(description="Recommended next steps to address customer intent")
96
98
  resolution_complexity: Literal["simple", "moderate", "complex", "very_complex"] = Field(
97
99
  description="Complexity of resolution (simple, moderate, complex, very_complex)"
98
100
  )
99
101
 
100
102
 
101
- def intent_analysis(
102
- business_context: str = "general customer support", temperature: float = 0.0, top_p: float = 1.0
103
- ) -> PreparedTask:
103
+ def intent_analysis(business_context: str = "general customer support") -> PreparedTask:
104
104
  """Create a configurable intent analysis task.
105
105
 
106
106
  Args:
107
107
  business_context (str): Business context for intent analysis.
108
- temperature (float): Sampling temperature (0.0-1.0).
109
- top_p (float): Nucleus sampling parameter (0.0-1.0).
110
108
 
111
109
  Returns:
112
110
  PreparedTask configured for intent analysis.
@@ -161,11 +159,15 @@ Pay attention to:
161
159
  - Urgency indicators: Time pressure affects resolution approach
162
160
  - Previous interactions: References to prior support contacts
163
161
 
164
- IMPORTANT: Provide analysis responses in the same language as the input text, except for the predefined categorical fields (primary_intent, action_required, success_likelihood, resolution_complexity) which must use the exact English values specified above. For example, if the input is in Japanese, provide customer_goal, implicit_needs, blocking_factors, next_steps, and reasoning in Japanese, but use English values like "get_help" for primary_intent.
162
+ IMPORTANT: Provide analysis responses in the same language as the input text, except for the
163
+ predefined categorical fields (primary_intent, action_required, success_likelihood,
164
+ resolution_complexity) which must use the exact English values specified above. For example,
165
+ if the input is in Japanese, provide customer_goal, implicit_needs, blocking_factors,
166
+ next_steps, and reasoning in Japanese, but use English values like "get_help" for primary_intent.
165
167
 
166
168
  Provide comprehensive intent analysis with actionable recommendations."""
167
169
 
168
- return PreparedTask(instructions=instructions, response_format=IntentAnalysis, temperature=temperature, top_p=top_p)
170
+ return PreparedTask(instructions=instructions, response_format=IntentAnalysis)
169
171
 
170
172
 
171
173
  # Backward compatibility - default configuration
@@ -9,7 +9,7 @@ Example:
9
9
 
10
10
  ```python
11
11
  from openai import OpenAI
12
- from openaivec.responses import BatchResponses
12
+ from openaivec import BatchResponses
13
13
  from openaivec.task import customer_support
14
14
 
15
15
  client = OpenAI()
@@ -57,11 +57,11 @@ Attributes:
57
57
  top_p=1.0 for deterministic output.
58
58
  """
59
59
 
60
- from typing import List, Literal
60
+ from typing import Literal
61
61
 
62
62
  from pydantic import BaseModel, Field
63
63
 
64
- from ...model import PreparedTask
64
+ from openaivec._model import PreparedTask
65
65
 
66
66
  __all__ = ["response_suggestion"]
67
67
 
@@ -77,14 +77,14 @@ class ResponseSuggestion(BaseModel):
77
77
  response_type: Literal["acknowledgment", "solution", "escalation", "information_request", "closure"] = Field(
78
78
  description="Type of response (acknowledgment, solution, escalation, information_request, closure)"
79
79
  )
80
- key_points: List[str] = Field(description="Main points that must be addressed in the response")
80
+ key_points: list[str] = Field(description="Main points that must be addressed in the response")
81
81
  follow_up_required: bool = Field(description="Whether follow-up communication is needed")
82
82
  escalation_suggested: bool = Field(description="Whether escalation to management is recommended")
83
- resources_needed: List[str] = Field(description="Additional resources or information required")
83
+ resources_needed: list[str] = Field(description="Additional resources or information required")
84
84
  estimated_resolution_time: Literal["immediate", "hours", "days", "weeks"] = Field(
85
85
  description="Estimated time to resolution (immediate, hours, days, weeks)"
86
86
  )
87
- alternative_responses: List[str] = Field(description="Alternative response options for different scenarios")
87
+ alternative_responses: list[str] = Field(description="Alternative response options for different scenarios")
88
88
  personalization_notes: str = Field(description="Suggestions for personalizing the response")
89
89
 
90
90
 
@@ -92,8 +92,6 @@ def response_suggestion(
92
92
  response_style: str = "professional",
93
93
  company_name: str = "our company",
94
94
  business_context: str = "general customer support",
95
- temperature: float = 0.0,
96
- top_p: float = 1.0,
97
95
  ) -> PreparedTask:
98
96
  """Create a configurable response suggestion task.
99
97
 
@@ -101,8 +99,6 @@ def response_suggestion(
101
99
  response_style (str): Style of response (professional, friendly, empathetic, formal).
102
100
  company_name (str): Name of the company for personalization.
103
101
  business_context (str): Business context for responses.
104
- temperature (float): Sampling temperature (0.0-1.0).
105
- top_p (float): Nucleus sampling parameter (0.0-1.0).
106
102
 
107
103
  Returns:
108
104
  PreparedTask configured for response suggestions.
@@ -115,7 +111,8 @@ def response_suggestion(
115
111
  "formal": "Use formal business language appropriate for official communications",
116
112
  }
117
113
 
118
- instructions = f"""Generate a professional, helpful response suggestion for the customer inquiry that addresses their needs effectively.
114
+ instructions = f"""Generate a professional, helpful response suggestion for the customer
115
+ inquiry that addresses their needs effectively.
119
116
 
120
117
  Business Context: {business_context}
121
118
  Company Name: {company_name}
@@ -180,13 +177,16 @@ Avoid:
180
177
  - Dismissing customer concerns
181
178
  - Lengthy responses that don't address the main issue
182
179
 
183
- IMPORTANT: Generate responses in the same language as the input text, except for the predefined categorical fields (tone, priority, response_type, estimated_resolution_time) which must use the exact English values specified above. For example, if the input is in Italian, provide suggested_response, key_points, alternative_responses, and personalization_notes in Italian, but use English values like "empathetic" for tone.
180
+ IMPORTANT: Generate responses in the same language as the input text, except for the predefined
181
+ categorical fields (tone, priority, response_type, estimated_resolution_time) which must use
182
+ the exact English values specified above. For example, if the input is in Italian, provide
183
+ suggested_response, key_points, alternative_responses, and personalization_notes in Italian,
184
+ but use English values like "empathetic" for tone.
184
185
 
185
- Generate helpful, professional response that moves toward resolution while maintaining positive customer relationship."""
186
+ Generate helpful, professional response that moves toward resolution while maintaining
187
+ positive customer relationship."""
186
188
 
187
- return PreparedTask(
188
- instructions=instructions, response_format=ResponseSuggestion, temperature=temperature, top_p=top_p
189
- )
189
+ return PreparedTask(instructions=instructions, response_format=ResponseSuggestion)
190
190
 
191
191
 
192
192
  # Backward compatibility - default configuration
@@ -8,7 +8,7 @@ Example:
8
8
 
9
9
  ```python
10
10
  from openai import OpenAI
11
- from openaivec.responses import BatchResponses
11
+ from openaivec import BatchResponses
12
12
  from openaivec.task import customer_support
13
13
 
14
14
  client = OpenAI()
@@ -96,11 +96,11 @@ Example:
96
96
  ```
97
97
  """
98
98
 
99
- from typing import Dict, List, Literal, Optional
99
+ from typing import Dict, Literal
100
100
 
101
101
  from pydantic import BaseModel, Field
102
102
 
103
- from ...model import PreparedTask
103
+ from openaivec._model import PreparedTask
104
104
 
105
105
  __all__ = ["urgency_analysis"]
106
106
 
@@ -111,10 +111,11 @@ class UrgencyAnalysis(BaseModel):
111
111
  )
112
112
  urgency_score: float = Field(description="Urgency score from 0.0 (not urgent) to 1.0 (extremely urgent)")
113
113
  response_time: Literal["immediate", "within_1_hour", "within_4_hours", "within_24_hours"] = Field(
114
- description="Recommended response time from configured times (immediate, within_1_hour, within_4_hours, within_24_hours)"
114
+ description="Recommended response time from configured times "
115
+ "(immediate, within_1_hour, within_4_hours, within_24_hours)"
115
116
  )
116
117
  escalation_required: bool = Field(description="Whether this inquiry requires escalation to management")
117
- urgency_indicators: List[str] = Field(description="Specific words or phrases that indicate urgency")
118
+ urgency_indicators: list[str] = Field(description="Specific words or phrases that indicate urgency")
118
119
  business_impact: Literal["none", "low", "medium", "high", "critical"] = Field(
119
120
  description="Potential business impact (none, low, medium, high, critical)"
120
121
  )
@@ -126,30 +127,26 @@ class UrgencyAnalysis(BaseModel):
126
127
 
127
128
 
128
129
  def urgency_analysis(
129
- urgency_levels: Optional[Dict[str, str]] = None,
130
- response_times: Optional[Dict[str, str]] = None,
131
- customer_tiers: Optional[Dict[str, str]] = None,
132
- escalation_rules: Optional[Dict[str, str]] = None,
133
- urgency_keywords: Optional[Dict[str, List[str]]] = None,
130
+ urgency_levels: Dict[str, str] | None = None,
131
+ response_times: Dict[str, str] | None = None,
132
+ customer_tiers: Dict[str, str] | None = None,
133
+ escalation_rules: Dict[str, str] | None = None,
134
+ urgency_keywords: Dict[str, list[str]] | None = None,
134
135
  business_context: str = "general customer support",
135
136
  business_hours: str = "24/7 support",
136
- sla_rules: Optional[Dict[str, str]] = None,
137
- temperature: float = 0.0,
138
- top_p: float = 1.0,
137
+ sla_rules: Dict[str, str] | None = None,
139
138
  ) -> PreparedTask:
140
139
  """Create a configurable urgency analysis task.
141
140
 
142
141
  Args:
143
- urgency_levels (Optional[Dict[str, str]]): Dictionary mapping urgency levels to descriptions.
144
- response_times (Optional[Dict[str, str]]): Dictionary mapping urgency levels to response times.
145
- customer_tiers (Optional[Dict[str, str]]): Dictionary mapping tier names to descriptions.
146
- escalation_rules (Optional[Dict[str, str]]): Dictionary mapping conditions to escalation actions.
147
- urgency_keywords (Optional[Dict[str, List[str]]]): Dictionary mapping urgency levels to indicator keywords.
142
+ urgency_levels (dict[str, str] | None): Dictionary mapping urgency levels to descriptions.
143
+ response_times (dict[str, str] | None): Dictionary mapping urgency levels to response times.
144
+ customer_tiers (dict[str, str] | None): Dictionary mapping tier names to descriptions.
145
+ escalation_rules (dict[str, str] | None): Dictionary mapping conditions to escalation actions.
146
+ urgency_keywords (dict[str, list[str]] | None): Dictionary mapping urgency levels to indicator keywords.
148
147
  business_context (str): Description of the business context.
149
148
  business_hours (str): Description of business hours for response time calculation.
150
- sla_rules (Optional[Dict[str, str]]): Dictionary mapping customer tiers to SLA requirements.
151
- temperature (float): Sampling temperature (0.0-1.0).
152
- top_p (float): Nucleus sampling parameter (0.0-1.0).
149
+ sla_rules (dict[str, str] | None): Dictionary mapping customer tiers to SLA requirements.
153
150
 
154
151
  Returns:
155
152
  PreparedTask configured for urgency analysis.
@@ -278,13 +275,15 @@ Consider:
278
275
  - Revenue or operational impact
279
276
  - Compliance or legal implications
280
277
 
281
- IMPORTANT: Provide analysis responses in the same language as the input text, except for the predefined categorical fields (urgency_level, response_time, business_impact, customer_tier) which must use the exact English values specified above. For example, if the input is in French, provide urgency_indicators and reasoning in French, but use English values like "critical" for urgency_level.
278
+ IMPORTANT: Provide analysis responses in the same language as the input text, except for the
279
+ predefined categorical fields (urgency_level, response_time, business_impact, customer_tier)
280
+ which must use the exact English values specified above. For example, if the input is in French,
281
+ provide urgency_indicators and reasoning in French, but use English values like "critical" for
282
+ urgency_level.
282
283
 
283
284
  Provide detailed analysis with clear reasoning for urgency level and response time recommendations."""
284
285
 
285
- return PreparedTask(
286
- instructions=instructions, response_format=UrgencyAnalysis, temperature=temperature, top_p=top_p
287
- )
286
+ return PreparedTask(instructions=instructions, response_format=UrgencyAnalysis)
288
287
 
289
288
 
290
289
  # Backward compatibility - default configuration