openaivec 0.14.10__py3-none-any.whl → 0.14.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -100,15 +100,13 @@ class IntentAnalysis(BaseModel):
100
100
  )
101
101
 
102
102
 
103
- def intent_analysis(
104
- business_context: str = "general customer support", temperature: float = 0.0, top_p: float = 1.0
105
- ) -> PreparedTask:
103
+ def intent_analysis(business_context: str = "general customer support", **api_kwargs) -> PreparedTask:
106
104
  """Create a configurable intent analysis task.
107
105
 
108
106
  Args:
109
107
  business_context (str): Business context for intent analysis.
110
- temperature (float): Sampling temperature (0.0-1.0).
111
- top_p (float): Nucleus sampling parameter (0.0-1.0).
108
+ **api_kwargs: Additional keyword arguments to pass to the OpenAI API,
109
+ such as temperature, top_p, etc.
112
110
 
113
111
  Returns:
114
112
  PreparedTask configured for intent analysis.
@@ -171,8 +169,8 @@ next_steps, and reasoning in Japanese, but use English values like "get_help" fo
171
169
 
172
170
  Provide comprehensive intent analysis with actionable recommendations."""
173
171
 
174
- return PreparedTask(instructions=instructions, response_format=IntentAnalysis, temperature=temperature, top_p=top_p)
172
+ return PreparedTask(instructions=instructions, response_format=IntentAnalysis, api_kwargs=api_kwargs)
175
173
 
176
174
 
177
175
  # Backward compatibility - default configuration
178
- INTENT_ANALYSIS = intent_analysis()
176
+ INTENT_ANALYSIS = intent_analysis(temperature=0.0, top_p=1.0)
@@ -92,8 +92,7 @@ def response_suggestion(
92
92
  response_style: str = "professional",
93
93
  company_name: str = "our company",
94
94
  business_context: str = "general customer support",
95
- temperature: float = 0.0,
96
- top_p: float = 1.0,
95
+ **api_kwargs,
97
96
  ) -> PreparedTask:
98
97
  """Create a configurable response suggestion task.
99
98
 
@@ -101,8 +100,8 @@ def response_suggestion(
101
100
  response_style (str): Style of response (professional, friendly, empathetic, formal).
102
101
  company_name (str): Name of the company for personalization.
103
102
  business_context (str): Business context for responses.
104
- temperature (float): Sampling temperature (0.0-1.0).
105
- top_p (float): Nucleus sampling parameter (0.0-1.0).
103
+ **api_kwargs: Additional keyword arguments to pass to the OpenAI API,
104
+ such as temperature, top_p, etc.
106
105
 
107
106
  Returns:
108
107
  PreparedTask configured for response suggestions.
@@ -190,10 +189,8 @@ but use English values like "empathetic" for tone.
190
189
  Generate helpful, professional response that moves toward resolution while maintaining
191
190
  positive customer relationship."""
192
191
 
193
- return PreparedTask(
194
- instructions=instructions, response_format=ResponseSuggestion, temperature=temperature, top_p=top_p
195
- )
192
+ return PreparedTask(instructions=instructions, response_format=ResponseSuggestion, api_kwargs=api_kwargs)
196
193
 
197
194
 
198
195
  # Backward compatibility - default configuration
199
- RESPONSE_SUGGESTION = response_suggestion()
196
+ RESPONSE_SUGGESTION = response_suggestion(temperature=0.0, top_p=1.0)
@@ -135,8 +135,7 @@ def urgency_analysis(
135
135
  business_context: str = "general customer support",
136
136
  business_hours: str = "24/7 support",
137
137
  sla_rules: Dict[str, str] | None = None,
138
- temperature: float = 0.0,
139
- top_p: float = 1.0,
138
+ **api_kwargs,
140
139
  ) -> PreparedTask:
141
140
  """Create a configurable urgency analysis task.
142
141
 
@@ -149,8 +148,8 @@ def urgency_analysis(
149
148
  business_context (str): Description of the business context.
150
149
  business_hours (str): Description of business hours for response time calculation.
151
150
  sla_rules (dict[str, str] | None): Dictionary mapping customer tiers to SLA requirements.
152
- temperature (float): Sampling temperature (0.0-1.0).
153
- top_p (float): Nucleus sampling parameter (0.0-1.0).
151
+ **api_kwargs: Additional keyword arguments to pass to the OpenAI API,
152
+ such as temperature, top_p, etc.
154
153
 
155
154
  Returns:
156
155
  PreparedTask configured for urgency analysis.
@@ -287,10 +286,8 @@ urgency_level.
287
286
 
288
287
  Provide detailed analysis with clear reasoning for urgency level and response time recommendations."""
289
288
 
290
- return PreparedTask(
291
- instructions=instructions, response_format=UrgencyAnalysis, temperature=temperature, top_p=top_p
292
- )
289
+ return PreparedTask(instructions=instructions, response_format=UrgencyAnalysis, api_kwargs=api_kwargs)
293
290
 
294
291
 
295
292
  # Backward compatibility - default configuration
296
- URGENCY_ANALYSIS = urgency_analysis()
293
+ URGENCY_ANALYSIS = urgency_analysis(temperature=0.0, top_p=1.0)
@@ -75,6 +75,5 @@ DEPENDENCY_PARSING = PreparedTask(
75
75
  "relations between words, determine the root word, and provide a tree representation of the "
76
76
  "syntactic structure.",
77
77
  response_format=DependencyParsing,
78
- temperature=0.0,
79
- top_p=1.0,
78
+ api_kwargs={"temperature": 0.0, "top_p": 1.0},
80
79
  )
@@ -75,6 +75,5 @@ KEYWORD_EXTRACTION = PreparedTask(
75
75
  instructions="Extract important keywords and phrases from the following text. Rank them "
76
76
  "by importance, provide frequency counts, identify main topics, and generate a brief summary.",
77
77
  response_format=KeywordExtraction,
78
- temperature=0.0,
79
- top_p=1.0,
78
+ api_kwargs={"temperature": 0.0, "top_p": 1.0},
80
79
  )
@@ -70,6 +70,5 @@ MORPHOLOGICAL_ANALYSIS = PreparedTask(
70
70
  "identify part-of-speech tags, provide lemmatized forms, and extract morphological features "
71
71
  "for each token.",
72
72
  response_format=MorphologicalAnalysis,
73
- temperature=0.0,
74
- top_p=1.0,
73
+ api_kwargs={"temperature": 0.0, "top_p": 1.0},
75
74
  )
@@ -78,6 +78,5 @@ NAMED_ENTITY_RECOGNITION = PreparedTask(
78
78
  "organizations, locations, dates, money, percentages, and other miscellaneous entities "
79
79
  "with their positions and confidence scores.",
80
80
  response_format=NamedEntityRecognition,
81
- temperature=0.0,
82
- top_p=1.0,
81
+ api_kwargs={"temperature": 0.0, "top_p": 1.0},
83
82
  )
@@ -78,6 +78,5 @@ SENTIMENT_ANALYSIS = PreparedTask(
78
78
  "English values specified (positive/negative/neutral for sentiment, and "
79
79
  "joy/sadness/anger/fear/surprise/disgust for emotions).",
80
80
  response_format=SentimentAnalysis,
81
- temperature=0.0,
82
- top_p=1.0,
81
+ api_kwargs={"temperature": 0.0, "top_p": 1.0},
83
82
  )
@@ -157,5 +157,5 @@ class TranslatedString(BaseModel):
157
157
  instructions = "Translate the following text into multiple languages. "
158
158
 
159
159
  MULTILINGUAL_TRANSLATION = PreparedTask(
160
- instructions=instructions, response_format=TranslatedString, temperature=0.0, top_p=1.0
160
+ instructions=instructions, response_format=TranslatedString, api_kwargs={"temperature": 0.0, "top_p": 1.0}
161
161
  )
@@ -125,7 +125,7 @@ class FillNaResponse(BaseModel):
125
125
  )
126
126
 
127
127
 
128
- def fillna(df: pd.DataFrame, target_column_name: str, max_examples: int = 500) -> PreparedTask:
128
+ def fillna(df: pd.DataFrame, target_column_name: str, max_examples: int = 500, **api_kwargs) -> PreparedTask:
129
129
  """Create a prepared task for filling missing values in a DataFrame column.
130
130
 
131
131
  Analyzes the provided DataFrame to understand data patterns and creates
@@ -141,12 +141,14 @@ def fillna(df: pd.DataFrame, target_column_name: str, max_examples: int = 500) -
141
141
  max_examples (int): Maximum number of example rows to use for few-shot
142
142
  learning. Defaults to 500. Higher values provide more context
143
143
  but increase token usage and processing time.
144
+ **api_kwargs: Additional keyword arguments to pass to the OpenAI API,
145
+ such as temperature, top_p, etc.
144
146
 
145
147
  Returns:
146
148
  PreparedTask configured for missing value imputation with:
147
149
  - Instructions based on DataFrame patterns
148
150
  - FillNaResponse format for structured output
149
- - Temperature=0.0 and top_p=1.0 for deterministic results
151
+ - Default deterministic settings (temperature=0.0, top_p=1.0)
150
152
 
151
153
  Raises:
152
154
  ValueError: If target_column_name doesn't exist in DataFrame,
@@ -180,4 +182,7 @@ def fillna(df: pd.DataFrame, target_column_name: str, max_examples: int = 500) -
180
182
  if df[target_column_name].notna().sum() == 0:
181
183
  raise ValueError(f"Column '{target_column_name}' contains no non-null values for training examples.")
182
184
  instructions = get_instructions(df, target_column_name, max_examples)
183
- return PreparedTask(instructions=instructions, response_format=FillNaResponse, temperature=0.0, top_p=1.0)
185
+ # Set default values for deterministic results if not provided
186
+ if not api_kwargs:
187
+ api_kwargs = {"temperature": 0.0, "top_p": 1.0}
188
+ return PreparedTask(instructions=instructions, response_format=FillNaResponse, api_kwargs=api_kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openaivec
3
- Version: 0.14.10
3
+ Version: 0.14.13
4
4
  Summary: Generative mutation for tabular calculation
5
5
  Project-URL: Homepage, https://microsoft.github.io/openaivec/
6
6
  Project-URL: Repository, https://github.com/microsoft/openaivec
@@ -334,26 +334,34 @@ Scale to enterprise datasets with distributed processing:
334
334
  First, obtain a Spark session and configure authentication:
335
335
 
336
336
  ```python
337
- import os
338
337
  from pyspark.sql import SparkSession
338
+ from openaivec.spark import setup, setup_azure
339
339
 
340
340
  spark = SparkSession.builder.getOrCreate()
341
- sc = spark.sparkContext
342
341
 
343
- # Configure authentication via SparkContext environment variables
344
342
  # Option 1: Using OpenAI
345
- sc.environment["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
343
+ setup(
344
+ spark,
345
+ api_key="your-openai-api-key",
346
+ responses_model_name="gpt-4.1-mini", # Optional: set default model
347
+ embeddings_model_name="text-embedding-3-small" # Optional: set default model
348
+ )
346
349
 
347
350
  # Option 2: Using Azure OpenAI
348
- # sc.environment["AZURE_OPENAI_API_KEY"] = os.environ.get("AZURE_OPENAI_API_KEY")
349
- # sc.environment["AZURE_OPENAI_BASE_URL"] = os.environ.get("AZURE_OPENAI_BASE_URL")
350
- # sc.environment["AZURE_OPENAI_API_VERSION"] = os.environ.get("AZURE_OPENAI_API_VERSION")
351
+ # setup_azure(
352
+ # spark,
353
+ # api_key="your-azure-openai-api-key",
354
+ # base_url="https://YOUR-RESOURCE-NAME.services.ai.azure.com/openai/v1/",
355
+ # api_version="preview",
356
+ # responses_model_name="my-gpt4-deployment", # Optional: set default deployment
357
+ # embeddings_model_name="my-embedding-deployment" # Optional: set default deployment
358
+ # )
351
359
  ```
352
360
 
353
361
  Next, create and register UDFs using the provided functions:
354
362
 
355
363
  ```python
356
- from openaivec.spark import responses_udf, task_udf, embeddings_udf, count_tokens_udf
364
+ from openaivec.spark import responses_udf, task_udf, embeddings_udf, count_tokens_udf, similarity_udf, parse_udf
357
365
  from pydantic import BaseModel
358
366
 
359
367
  # --- Register Responses UDF (String Output) ---
@@ -387,6 +395,9 @@ spark.udf.register(
387
395
  # --- Register Token Counting UDF ---
388
396
  spark.udf.register("count_tokens", count_tokens_udf())
389
397
 
398
+ # --- Register Similarity UDF ---
399
+ spark.udf.register("compute_similarity", similarity_udf())
400
+
390
401
  # --- Register UDFs with Pre-configured Tasks ---
391
402
  from openaivec.task import nlp, customer_support
392
403
 
@@ -414,6 +425,17 @@ spark.udf.register(
414
425
  )
415
426
  )
416
427
 
428
+ # --- Register Parse UDF (Dynamic Schema Inference) ---
429
+ spark.udf.register(
430
+ "parse_dynamic",
431
+ parse_udf(
432
+ instructions="Extract key entities and attributes from the text",
433
+ example_table_name="sample_texts", # Infer schema from examples
434
+ example_field_name="text",
435
+ max_examples=50
436
+ )
437
+ )
438
+
417
439
  ```
418
440
 
419
441
  You can now use these UDFs in Spark SQL:
@@ -691,17 +713,19 @@ steps:
691
713
  - In the notebook, import and use `openaivec.spark` functions as you normally would. For example:
692
714
 
693
715
  ```python
694
- import os
695
- from openaivec.spark import responses_udf, embeddings_udf
716
+ from openaivec.spark import setup_azure, responses_udf, embeddings_udf
696
717
 
697
718
  # In Microsoft Fabric, spark session is automatically available
698
719
  # spark = SparkSession.builder.getOrCreate()
699
- sc = spark.sparkContext
700
-
720
+
701
721
  # Configure Azure OpenAI authentication
702
- sc.environment["AZURE_OPENAI_API_KEY"] = "<your-api-key>"
703
- sc.environment["AZURE_OPENAI_BASE_URL"] = "https://YOUR-RESOURCE-NAME.services.ai.azure.com/openai/v1/"
704
- sc.environment["AZURE_OPENAI_API_VERSION"] = "preview"
722
+ setup_azure(
723
+ spark,
724
+ api_key="<your-api-key>",
725
+ base_url="https://YOUR-RESOURCE-NAME.services.ai.azure.com/openai/v1/",
726
+ api_version="preview",
727
+ responses_model_name="my-gpt4-deployment" # Your Azure deployment name
728
+ )
705
729
 
706
730
  # Register UDFs
707
731
  spark.udf.register(
@@ -0,0 +1,37 @@
1
+ openaivec/__init__.py,sha256=mXCGNNTjYbmE4CAXGvAs78soxUsoy_mxxnvaCk_CL6Y,361
2
+ openaivec/_di.py,sha256=Cl1ZoNBlQsJL1bpzoMDl08uT9pZFVSlqOdLbS3_MwPE,11462
3
+ openaivec/_dynamic.py,sha256=7ZaC59w2Edemnao57XeZVO4qmSOA-Kus6TchZC3Dd5o,14821
4
+ openaivec/_embeddings.py,sha256=nirLqOu69fTB7aSCYhbbRbwAA6ggwEYJiQoPDsHqAqQ,8200
5
+ openaivec/_log.py,sha256=LHNs6AbJzM4weaRARZFroigxR6D148d7WSIMLk1IhbU,1439
6
+ openaivec/_model.py,sha256=71oiENUKwpY58ilj1LE7fDOAhs7PUSiZRiUHKUIuu7Y,3235
7
+ openaivec/_optimize.py,sha256=3nS8VehbS7iGC1tPDDQh-iAgyKHbVYmMbCRBWM77U_U,3827
8
+ openaivec/_prompt.py,sha256=NWE7jZKYphkD856haynJLmRadPugJ68emT42pd7Ciso,20633
9
+ openaivec/_provider.py,sha256=8z8gPYY5-Z7rzDlj_NC6hR__DUqVAH7VLHJn6LalzRg,6158
10
+ openaivec/_proxy.py,sha256=AiGuC1MCFjZCRXCac-pHUI3Np3nf1HIpWY6nC9ZVCFY,29671
11
+ openaivec/_responses.py,sha256=qBrYv4qblDIs5dRvj9t96r8UfAJmy4ZvtAe6csNZ7oM,20412
12
+ openaivec/_schema.py,sha256=iOeR5J_ihZRDZtzmqvOK1ZtInKcx4OnoR38DB3VmmQw,15666
13
+ openaivec/_serialize.py,sha256=u2Om94Sc_QgJkTlW2BAGw8wd6gYDhc6IRqvS-qevFSs,8399
14
+ openaivec/_util.py,sha256=XfueAycVCQvgRLS7wF7e306b53lebORvZOBzbQjy4vE,6438
15
+ openaivec/pandas_ext.py,sha256=r2jpFqDnWcQYK3pMv5hCtOStOMltccDyLkpprLmIOls,85715
16
+ openaivec/spark.py,sha256=zaEivVOe3ukG8coa9JEUyISQ1YcMqCvAbhaarvn2SOM,32507
17
+ openaivec/task/__init__.py,sha256=RkYIKrcE83M_9Um9cSMkeGzL9kPRAovajfRvr31YxLE,6178
18
+ openaivec/task/customer_support/__init__.py,sha256=KWfGyXPdZyfGdRH17x7hPpJJ1N2EP9PPhZx0fvBAwSI,884
19
+ openaivec/task/customer_support/customer_sentiment.py,sha256=d8spZUtImjePK0xWGvIW98ghbdyOZ0KEZmaUpG8QB7M,7532
20
+ openaivec/task/customer_support/inquiry_classification.py,sha256=NKz1oTm06eU6W-plHe3T3o20lCk6M2NemVXZ4Y_IozU,9602
21
+ openaivec/task/customer_support/inquiry_summary.py,sha256=8X1J8lZwlgX6s02cs86-K0moZ5gTrX7E7WEKiY2vpiQ,6896
22
+ openaivec/task/customer_support/intent_analysis.py,sha256=Jnokzi0wTlHpuTRl5uqxdoHClYU71b9iFTzn3KNeNVM,7478
23
+ openaivec/task/customer_support/response_suggestion.py,sha256=IykZE-BJ_ENhe5frnVl4bQKpArwOuNAITGlBxlu62c0,8306
24
+ openaivec/task/customer_support/urgency_analysis.py,sha256=fdBT0Ud-InGqou-ZuFcVc3EpUNAq5N55_Q9D6D74WlQ,11531
25
+ openaivec/task/nlp/__init__.py,sha256=QoQ0egEK9IEh5hdrE07rZ_KCmC0gy_2FPrWJYRWiipY,512
26
+ openaivec/task/nlp/dependency_parsing.py,sha256=V7pd4_EbBBvdpnFDkfZh08u7kfJ7XJLq_qLkec48yr0,2832
27
+ openaivec/task/nlp/keyword_extraction.py,sha256=e6niCt8XU0EPJLGYOJXQvbfWtl7w9CgfnCE188kecb4,2819
28
+ openaivec/task/nlp/morphological_analysis.py,sha256=qTFFBkFP8CRZU87S59ju5ygXWlEBCtjYlH9Su7czLjs,2416
29
+ openaivec/task/nlp/named_entity_recognition.py,sha256=9BFKYk0PZlyNN8pItGIEFecvZew4K_F5GgY5Ub8xDtM,3052
30
+ openaivec/task/nlp/sentiment_analysis.py,sha256=u-zpqAaQYcr7I3mqMv_CTJXkfxtoLft3qm-qwmqb_p4,3100
31
+ openaivec/task/nlp/translation.py,sha256=kgWj2oN8pUId3vuHTJNx636gB49AGEKXWICA_XJgE_0,6628
32
+ openaivec/task/table/__init__.py,sha256=kJz15WDJXjyC7UIHKBvlTRhCf347PCDMH5T5fONV2sU,83
33
+ openaivec/task/table/fillna.py,sha256=zL6m5hGD4kamV7qHETnn__B59wIY540Ks0EzNgUJgdI,6888
34
+ openaivec-0.14.13.dist-info/METADATA,sha256=rB_WJhIVX11WUoA-r2Ryn57QIuTWj0q0JhjPlz6wXv4,28216
35
+ openaivec-0.14.13.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
+ openaivec-0.14.13.dist-info/licenses/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
37
+ openaivec-0.14.13.dist-info/RECORD,,
@@ -1,37 +0,0 @@
1
- openaivec/__init__.py,sha256=mXCGNNTjYbmE4CAXGvAs78soxUsoy_mxxnvaCk_CL6Y,361
2
- openaivec/_di.py,sha256=1MXaBzaH_ZenQnWKQzBY2z-egHwiteMvg7byoUH3ZZI,10658
3
- openaivec/_dynamic.py,sha256=7ZaC59w2Edemnao57XeZVO4qmSOA-Kus6TchZC3Dd5o,14821
4
- openaivec/_embeddings.py,sha256=upCjl8m9h1CihP6t7wvIH_vivOAPSgmgooAxIhnUMUw,7449
5
- openaivec/_log.py,sha256=LHNs6AbJzM4weaRARZFroigxR6D148d7WSIMLk1IhbU,1439
6
- openaivec/_model.py,sha256=toS2oBubrJa9jrdYy-87Fb2XivjXUlk_8Zn5gKUAcFI,3345
7
- openaivec/_optimize.py,sha256=3nS8VehbS7iGC1tPDDQh-iAgyKHbVYmMbCRBWM77U_U,3827
8
- openaivec/_prompt.py,sha256=zLv13q47CKV3jnETUyWAIlnjXFSEMs70c8m0yN7_Hek,20820
9
- openaivec/_provider.py,sha256=YLrEcb4aWBD1fj0n6PNcJpCtEXK6jkUuRH_WxcLDCuI,7145
10
- openaivec/_proxy.py,sha256=AiGuC1MCFjZCRXCac-pHUI3Np3nf1HIpWY6nC9ZVCFY,29671
11
- openaivec/_responses.py,sha256=lVJRa_Uc7hQJnYJRgumqwBbu6GToZqsLFS6tIAFO1Fc,24014
12
- openaivec/_schema.py,sha256=RKjDPqet1TlReYibah0R0NIvCV1VWN5SZxiaBeV0gCY,15492
13
- openaivec/_serialize.py,sha256=u2Om94Sc_QgJkTlW2BAGw8wd6gYDhc6IRqvS-qevFSs,8399
14
- openaivec/_util.py,sha256=XfueAycVCQvgRLS7wF7e306b53lebORvZOBzbQjy4vE,6438
15
- openaivec/pandas_ext.py,sha256=_MdiZWokius62zI_sTp_nd-33fMNlnRHbyqso0eF_Hw,85406
16
- openaivec/spark.py,sha256=Dbuhlk8Z89Fwk3fbWp1Ud9uTpfNyfjZOIx8ARJMnQf0,25371
17
- openaivec/task/__init__.py,sha256=lrgoc9UIox7XnxZ96dQRl88a-8QfuZRFBHshxctpMB8,6178
18
- openaivec/task/customer_support/__init__.py,sha256=KWfGyXPdZyfGdRH17x7hPpJJ1N2EP9PPhZx0fvBAwSI,884
19
- openaivec/task/customer_support/customer_sentiment.py,sha256=NHIr9nm2d2Bu1MSpxFsM3_w1UuQrQEwnHrClVbhdCUw,7612
20
- openaivec/task/customer_support/inquiry_classification.py,sha256=NUU_apX6ADi4SyGUbvflGt-v5Ka7heHXlJOHPAeVoGg,9640
21
- openaivec/task/customer_support/inquiry_summary.py,sha256=PDQvF_ZEZ9TnFhLM2yIinP-OKz_PSPeIET48P9UIgzQ,6920
22
- openaivec/task/customer_support/intent_analysis.py,sha256=uWdza2pkqnRJn3JtPWbsTAUDL1Sn-BwH-ZpN2cUxhe8,7504
23
- openaivec/task/customer_support/response_suggestion.py,sha256=Hxt5MDpdfoo5S7_I_eQ302AOIsSCyNBeaDSMMMfPYoQ,8344
24
- openaivec/task/customer_support/urgency_analysis.py,sha256=DRd4pmFnwuiNGBKxxkEkfp5CZZeDppmBUThs5NYOL9g,11569
25
- openaivec/task/nlp/__init__.py,sha256=QoQ0egEK9IEh5hdrE07rZ_KCmC0gy_2FPrWJYRWiipY,512
26
- openaivec/task/nlp/dependency_parsing.py,sha256=MhrHNCqSd-JmlQ21ISYwGYXazNVZGsVuX_v0ZpyI50w,2817
27
- openaivec/task/nlp/keyword_extraction.py,sha256=seFeuk6Z2dmlVBFoDN-tOVgCnR7jq36sTsWySjb_ric,2804
28
- openaivec/task/nlp/morphological_analysis.py,sha256=TcNGA0cYrPczr1ZxflBiokh-qdwMSvRDHq66fP7gi2c,2401
29
- openaivec/task/nlp/named_entity_recognition.py,sha256=jnVfGtf7TDCNNHrLQ5rhMYvmHc8FKXQxEzC5ib6NnVc,3037
30
- openaivec/task/nlp/sentiment_analysis.py,sha256=Np-yY0d4Kr5WEjGjq4tNFHDNarBLajJr8Q2E6K9ms3A,3085
31
- openaivec/task/nlp/translation.py,sha256=VYgiXtr2TL1tbqZkBpyVAy4ahrgd8UO4ZjhIL6xMdkI,6609
32
- openaivec/task/table/__init__.py,sha256=kJz15WDJXjyC7UIHKBvlTRhCf347PCDMH5T5fONV2sU,83
33
- openaivec/task/table/fillna.py,sha256=g_CpLnLzK1C5rCiVq15L3X0kywJK6CtSrKRYxQFuhn8,6606
34
- openaivec-0.14.10.dist-info/METADATA,sha256=BXQWevriu4qabbZM1paMO1PV_i8zmFPqiodTMwzeJnQ,27567
35
- openaivec-0.14.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
- openaivec-0.14.10.dist-info/licenses/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
37
- openaivec-0.14.10.dist-info/RECORD,,