aepp 0.5.2.post1__py3-none-any.whl → 0.5.2.post3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aepp/synchronizer.py CHANGED
@@ -20,7 +20,12 @@ from .configs import ConnectObject
20
20
 
21
21
  class Synchronizer:
22
22
  ## TO DO -> Add support for local environment
23
- def __init__(self,targets:list=None,config:'ConnectObject'=None,baseSandbox:str=None,region:str='nld2',localFolder:str|list|None=None):
23
+ def __init__(self,
24
+ targets:list|None=None,
25
+ config:ConnectObject|None=None,
26
+ baseSandbox:str|None=None,
27
+ region:str='nld2',
28
+ localFolder:str|list|None=None):
24
29
  """
25
30
  Setup the synchronizor object with the base sandbox and target sandbox.
26
31
  Arguments:
@@ -71,6 +76,19 @@ class Synchronizer:
71
76
  self.descriptorFolder = [folder / 'descriptor' for folder in self.localfolder]
72
77
  self.mergePolicyFolder = [folder / 'mergepolicy' for folder in self.localfolder]
73
78
  self.audienceFolder = [folder / 'audience' for folder in self.localfolder]
79
+ self.tagFolder = [folder / 'tag' for folder in self.localfolder]
80
+ self.dict_tag_name_id = {}
81
+ for folder in self.tagFolder:
82
+ try:
83
+ if folder.exists():
84
+ with open(folder / 'tags.json','r') as f:
85
+ tags_file = json.load(f)
86
+ for tag in tags_file:
87
+ self.dict_tag_name_id[tag['name']] = tag['id']
88
+ pass
89
+ except Exception as e:
90
+ print(f"could not load tags from folder {folder} : {e}")
91
+ pass
74
92
  if baseSandbox is not None:
75
93
  self.baseSandbox = baseSandbox
76
94
  else:
@@ -85,7 +103,7 @@ class Synchronizer:
85
103
  self.dict_baseComponents = {'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}}
86
104
  self.dict_targetComponents = {target:{'schema':{},'class':{},'fieldgroup':{},'datatype':{},'datasets':{},'identities':{},"schemaDescriptors":{},'mergePolicy':{},'audience':{}} for target in targets}
87
105
 
88
- def getSyncFieldGroupManager(self,fieldgroup:str,sandbox:str=None)-> dict:
106
+ def getSyncFieldGroupManager(self,fieldgroup:str,sandbox:str|None=None)-> dict:
89
107
  """
90
108
  Get a field group Manager from the synchronizer.
91
109
  It searches through the component cache to see if the FieldGroupManager for the target sandbox is already instantiated.
@@ -119,7 +137,7 @@ class Synchronizer:
119
137
  else:
120
138
  raise ValueError(f"the field group '{fieldgroup}' has not been synchronized to the sandbox '{sandbox}'")
121
139
 
122
- def getDatasetName(self,datasetId:str,sandbox:str=None)-> str:
140
+ def getDatasetName(self,datasetId:str,sandbox:str|None=None)-> str:
123
141
  """
124
142
  Get a dataset name from the synchronizer base on the ID of the dataset.
125
143
  Arguments:
@@ -139,7 +157,7 @@ class Synchronizer:
139
157
  else:
140
158
  raise ValueError(f"the dataset '{datasetId}' has not been synchronized to the sandbox '{sandbox}'")
141
159
 
142
- def syncComponent(self,component:Union[str,dict],componentType:str=None,force:bool=False,verbose:bool=False)-> dict:
160
+ def syncComponent(self,component:Union[str,dict],componentType:str|None=None,force:bool=False,verbose:bool=False)-> dict:
143
161
  """
144
162
  Synchronize a component to the target sandbox.
145
163
  The component could be a string (name or id of the component in the base sandbox) or a dictionary with the definition of the component.
@@ -235,6 +253,8 @@ class Synchronizer:
235
253
  for file in folder.glob('*.json'):
236
254
  ds_file = json.load(FileIO(file))
237
255
  if ds_file['id'] == component or ds_file['name'] == component:
256
+ if ds_file.get('UnifiedTags',[]) != [] and self.dict_tag_name_id is not None:
257
+ ds_file['unifiedTags'] = [self.dict_tag_name_id[tag_name] for tag_name in ds_file.get('UnifiedTags',[]) if tag_name in self.dict_tag_name_id.keys()]
238
258
  component = ds_file
239
259
  break
240
260
  if len(component) == 1: ## if the component is the catalog API response {'key': {dataset definition}}
@@ -263,6 +283,8 @@ class Synchronizer:
263
283
  for file in folder.glob('*.json'):
264
284
  au_file = json.load(FileIO(file))
265
285
  if au_file.get('id','') == component or au_file.get('name','') == component:
286
+ if au_file.get('tags',[]) != [] and self.dict_tag_name_id is not None:
287
+ au_file['tags'] = [self.dict_tag_name_id[tag_name] for tag_name in au_file.get('tags',[]) if tag_name in self.dict_tag_name_id.keys()]
266
288
  component = au_file
267
289
  break
268
290
  elif type(component) == dict:
@@ -345,6 +367,7 @@ class Synchronizer:
345
367
  raise TypeError("the baseDataType must be a DataTypeManager object")
346
368
  self.dict_baseComponents['datatype'][baseDataType.title] = baseDataType
347
369
  name_base_datatype = baseDataType.title
370
+ description_base_datatype = baseDataType.description
348
371
  for target in self.dict_targetsConfig.keys():
349
372
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
350
373
  t_datatype = None
@@ -362,7 +385,7 @@ class Synchronizer:
362
385
  base_paths = df_base['path'].tolist()
363
386
  target_paths = df_target['path'].tolist()
364
387
  diff_paths = list(set(base_paths) - set(target_paths))
365
- if len(diff_paths) > 0 or force==True: ## there are differences
388
+ if len(diff_paths) > 0 or description_base_datatype != t_datatype.description or force==True: ## there are differences
366
389
  base_datatypes_paths = baseDataType.getDataTypePaths()
367
390
  df_base_limited = df_base[df_base['origin'] == 'self'].copy() ## exclude field group native fields
368
391
  df_base_limited = df_base_limited[~df_base_limited['path'].isin(list(base_datatypes_paths.keys()))] ## exclude base of datatype rows
@@ -394,6 +417,7 @@ class Synchronizer:
394
417
  print(f"datatype '{name_base_datatype}' does not exist in target {target}, creating it")
395
418
  df_base = baseDataType.to_dataframe(full=True)
396
419
  new_datatype = datatypemanager.DataTypeManager(title=name_base_datatype,config=self.dict_targetsConfig[target],sandbox=target)
420
+ new_datatype.setDescription(description_base_datatype)
397
421
  base_datatypes_paths = baseDataType.getDataTypePaths()
398
422
  df_base_limited = df_base[df_base['origin'] == 'self'].copy() ## exclude field group native fields
399
423
  df_base_limited = df_base_limited[~df_base_limited['path'].isin(list(base_datatypes_paths.keys()))] ## exclude base of datatype rows
@@ -413,7 +437,7 @@ class Synchronizer:
413
437
  arrayBool = True
414
438
  path = path[:-4] ## removing the [] from the path
415
439
  new_datatype.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
416
- new_datatype.setDescription(baseDataType.description)
440
+ new_datatype.setDescription(description_base_datatype)
417
441
  res = new_datatype.createDataType()
418
442
  if '$id' in res.keys():
419
443
  t_datatype = datatypemanager.DataTypeManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -434,6 +458,7 @@ class Synchronizer:
434
458
  self.dict_baseComponents['fieldgroup'][baseFieldGroup.title] = baseFieldGroup
435
459
  name_base_fieldgroup = baseFieldGroup.title
436
460
  base_fg_classIds = baseFieldGroup.classIds
461
+ base_fg_description = baseFieldGroup.description
437
462
  for target in self.dict_targetsConfig.keys():
438
463
  t_fieldgroup = None
439
464
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
@@ -465,7 +490,9 @@ class Synchronizer:
465
490
  base_paths = df_base['path'].tolist()
466
491
  target_paths = df_target['path'].tolist()
467
492
  diff_paths = [path for path in base_paths if path not in target_paths]
468
- if len(diff_paths) > 0 or force==True:
493
+ if len(diff_paths) > 0 or base_fg_description != t_fieldgroup.description or force==True:
494
+ if verbose:
495
+ print(f"updating field group '{name_base_fieldgroup}' in target {target}")
469
496
  base_datatypes_paths = baseFieldGroup.getDataTypePaths()
470
497
  ## handling fieldgroup native fields
471
498
  df_base_limited = df_base[df_base['origin'] == 'fieldGroup'].copy() ## exclude datatypes
@@ -500,6 +527,8 @@ class Synchronizer:
500
527
  t_fieldgroup.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
501
528
  if len(t_fieldgroup.classIds) != len(fg_class_ids):
502
529
  t_fieldgroup.updateClassSupported(fg_class_ids)
530
+ if base_fg_description != t_fieldgroup.description:
531
+ t_fieldgroup.setDescription(base_fg_description)
503
532
  res = t_fieldgroup.updateFieldGroup()
504
533
  if '$id' not in res.keys():
505
534
  raise Exception(res)
@@ -547,7 +576,7 @@ class Synchronizer:
547
576
  arrayBool = True
548
577
  path = path[:-4] ## removing the [] from the path
549
578
  new_fieldgroup.addField(path=path,dataType='dataType',ref=tmp_t_dt.id,array=arrayBool)
550
- new_fieldgroup.setDescription(baseFieldGroup.description)
579
+ new_fieldgroup.setDescription(base_fg_description)
551
580
  res = new_fieldgroup.createFieldGroup()
552
581
  if '$id' in res.keys():
553
582
  t_fieldgroup = fieldgroupmanager.FieldGroupManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -571,6 +600,7 @@ class Synchronizer:
571
600
  self.dict_baseComponents['schema'][name_base_schema] = baseSchema
572
601
  descriptors = baseSchema.getDescriptors()
573
602
  base_field_groups_names = list(baseSchema.fieldGroups.values())
603
+ base_schema_description = baseSchema.description
574
604
  dict_base_fg_name_id = {name:fg_id for fg_id,name in baseSchema.fieldGroups.items()}
575
605
  for target in self.dict_targetsConfig.keys():
576
606
  targetSchemaAPI = schema.Schema(config=self.dict_targetsConfig[target])
@@ -582,9 +612,13 @@ class Synchronizer:
582
612
  t_schema = schemamanager.SchemaManager(targetSchemaAPI.data.schemas_altId[name_base_schema],config=self.dict_targetsConfig[target],sandbox=target)
583
613
  new_fieldgroups = [fg for fg in base_field_groups_names if fg not in t_schema.fieldGroups.values()]
584
614
  existing_fieldgroups = [fg for fg in base_field_groups_names if fg in t_schema.fieldGroups.values()]
585
- if len(new_fieldgroups) > 0 or force==True: ## if new field groups
615
+ if len(new_fieldgroups) > 0 or base_schema_description != t_schema.description or force==True: ## if new field groups
586
616
  if verbose:
587
- print('found new field groups to add to the schema')
617
+ if force == False:
618
+ print('found difference in the schema, updating it')
619
+ else:
620
+ print('force flag is set to True, updating the schema')
621
+ ## handling field groups
588
622
  for new_fieldgroup in new_fieldgroups:
589
623
  if baseSchema.tenantId[1:] not in dict_base_fg_name_id[new_fieldgroup]: ## ootb field group
590
624
  if verbose:
@@ -598,7 +632,7 @@ class Synchronizer:
598
632
  print(f"Creating new custom field group '{tmp_FieldGroup.title}'")
599
633
  self.__syncFieldGroup__(tmp_FieldGroup,verbose=verbose,force=force)
600
634
  t_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][new_fieldgroup].id)
601
- t_schema.setDescription(baseSchema.description)
635
+ t_schema.setDescription(base_schema_description)
602
636
  res = t_schema.updateSchema()
603
637
  if '$id' not in res.keys():
604
638
  raise Exception(res)
@@ -652,6 +686,7 @@ class Synchronizer:
652
686
  else:
653
687
  classId_toUse = baseClassId
654
688
  new_schema = schemamanager.SchemaManager(title=name_base_schema,config=self.dict_targetsConfig[target],schemaClass=classId_toUse,sandbox=target)
689
+ new_schema.setDescription(base_schema_description)
655
690
  for fg_name in base_field_groups_names:
656
691
  if baseSchema.tenantId[1:] not in dict_base_fg_name_id[fg_name]: ## ootb field group
657
692
  new_schema.addFieldGroup(dict_base_fg_name_id[fg_name])
@@ -665,7 +700,6 @@ class Synchronizer:
665
700
  tmp_FieldGroup = baseSchema.getFieldGroupManager(fg_name)
666
701
  self.__syncFieldGroup__(tmp_FieldGroup,force=force,verbose=verbose)
667
702
  new_schema.addFieldGroup(self.dict_targetComponents[target]['fieldgroup'][fg_name].id)
668
- new_schema.setDescription(baseSchema.description)
669
703
  res = new_schema.createSchema()
670
704
  if '$id' in res.keys():
671
705
  t_schema = schemamanager.SchemaManager(res['$id'],config=self.dict_targetsConfig[target],sandbox=target)
@@ -739,9 +773,10 @@ class Synchronizer:
739
773
  baseIdentities = identityConn.getIdentities()
740
774
  elif self.localfolder is not None:
741
775
  baseIdentities = []
742
- for file in self.identityFolder.glob('*.json'):
743
- id_file = json.load(FileIO(file))
744
- baseIdentities.append(id_file)
776
+ for folder in self.identityFolder:
777
+ for file in folder.glob('*.json'):
778
+ id_file = json.load(FileIO(file))
779
+ baseIdentities.append(id_file)
745
780
  if baseIdentityNS not in [el['xdm:namespace'].lower() for el in target_identitiesDecs]: ## identity descriptor does not exists in target schema
746
781
  def_identity = [el for el in baseIdentities if el['code'].lower() == baseIdentityNS][0]
747
782
  self.__syncIdentity__(def_identity,verbose=verbose)
@@ -852,9 +887,10 @@ class Synchronizer:
852
887
  baseIdentities = identityConn.getIdentities()
853
888
  elif self.localfolder is not None:
854
889
  baseIdentities = []
855
- for file in self.identityFolder.glob('*.json'):
856
- id_file = json.load(FileIO(file))
857
- baseIdentities.append(id_file)
890
+ for folder in self.identityFolder:
891
+ for file in folder.glob('*.json'):
892
+ id_file = json.load(FileIO(file))
893
+ baseIdentities.append(id_file)
858
894
  def_identity = [el for el in baseIdentities if el['code'] == baseIdentityNS][0]
859
895
  self.__syncIdentity__(def_identity,verbose=verbose)
860
896
  target_referenceIdentity = [desc for desc in target_descriptors if desc['@type'] == 'xdm:descriptorReferenceIdentity']
@@ -916,6 +952,7 @@ class Synchronizer:
916
952
  self.dict_baseComponents['datasets'][baseDataset['name']] = baseDataset
917
953
  base_datasetName = baseDataset['name']
918
954
  base_dataset_related_schemaId = baseDataset['schemaRef']['id']
955
+ base_dataset_unifiedTagIds = baseDataset.get('unifiedTags',[])
919
956
  if self.baseConfig is not None:
920
957
  baseSchemaAPI = schema.Schema(config=self.baseConfig)
921
958
  base_schemas = baseSchemaAPI.getSchemas()
@@ -929,7 +966,7 @@ class Synchronizer:
929
966
  for target in self.dict_targetsConfig.keys():
930
967
  targetCatalog = catalog.Catalog(config=self.dict_targetsConfig[target])
931
968
  t_datasets = targetCatalog.getDataSets()
932
- if base_datasetName not in targetCatalog.data.ids.keys(): ## only taking care if dataset does not exist
969
+ if base_datasetName not in targetCatalog.data.ids.keys(): ## if dataset does not exist
933
970
  if verbose:
934
971
  print(f"dataset '{base_datasetName}' does not exist in target {target}, creating it")
935
972
  targetSchema = schema.Schema(config=self.dict_targetsConfig[target])
@@ -940,15 +977,18 @@ class Synchronizer:
940
977
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
941
978
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
942
979
  targetSchemaId = self.dict_targetComponents[target]['schema'][base_dataset_related_schemaName].id
943
- res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId)
980
+ res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId,unifiedTags=base_dataset_unifiedTagIds)
944
981
  self.dict_targetComponents[target]['datasets'][base_datasetName] = res
945
982
  else: ## schema already exists in target
946
983
  if verbose:
947
984
  print(f"related schema '{base_dataset_related_schemaName}' does exist in target {target}, checking it")
948
985
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
949
986
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
950
- targetSchemaId = targetSchema.data.schemas_id[base_dataset_related_schemaName]
951
- res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId)
987
+ target_schema = self.dict_targetComponents[target]['schema'][base_dataset_related_schemaName]
988
+ targetSchemaId = target_schema.id
989
+ print(f"Target Schema ID: {targetSchemaId}")
990
+ print(f"unified Tags: {base_dataset_unifiedTagIds}")
991
+ res = targetCatalog.createDataSet(name=base_datasetName,schemaId=targetSchemaId,unifiedTags=base_dataset_unifiedTagIds)
952
992
  self.dict_targetComponents[target]['datasets'][base_datasetName] = res
953
993
  else: ## dataset already exists in target
954
994
  if verbose:
@@ -958,6 +998,16 @@ class Synchronizer:
958
998
  t_schemas = targetSchema.getSchemas()
959
999
  baseSchemaManager = schemamanager.SchemaManager(base_dataset_related_schemaId,config=self.baseConfig,localFolder=self.localfolder,sandbox=self.baseSandbox)
960
1000
  self.__syncSchema__(baseSchemaManager,verbose=verbose)
1001
+ if verbose:
1002
+ print(f"dataset '{base_datasetName}' schema synchronized, checking unified tags")
1003
+ if len(base_dataset_unifiedTagIds) > 0:
1004
+ t_dataset_unifiedTagIds = t_dataset.get('unifiedTags',[])
1005
+ tags_toAdd = [tagId for tagId in base_dataset_unifiedTagIds if tagId not in t_dataset_unifiedTagIds]
1006
+ if len(tags_toAdd) > 0:
1007
+ if verbose:
1008
+ print(f"adding unified tags to dataset '{base_datasetName}' in target {target}")
1009
+ t_dataset['unifiedTags'] = t_dataset_unifiedTagIds + tags_toAdd
1010
+ res = targetCatalog.putDataset(t_dataset['id'],t_dataset)
961
1011
  self.dict_targetComponents[target]['datasets'][base_datasetName] = t_dataset
962
1012
 
963
1013
  def __syncMergePolicy__(self,mergePolicy:dict,verbose:bool=False)->None:
@@ -1036,7 +1086,8 @@ class Synchronizer:
1036
1086
  "expression":baseAudience.get('expression',[]),
1037
1087
  "ansibleDataModel":baseAudience.get('ansibleDataModel',{}),
1038
1088
  "profileInstanceId":baseAudience.get('profileInstanceId',''),
1039
- "evaluationInfo":baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1089
+ "evaluationInfo":baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}}),
1090
+ "tags":baseAudience.get('tags',[])
1040
1091
  }
1041
1092
  res = targetAudiences.createAudience(audienceDef)
1042
1093
  if 'id' in res.keys():
@@ -1052,6 +1103,7 @@ class Synchronizer:
1052
1103
  t_audience['expression'] = baseAudience.get('expression',[])
1053
1104
  t_audience['ansibleDataModel'] = baseAudience.get('ansibleDataModel',{})
1054
1105
  t_audience['evaluationInfo'] = baseAudience.get('evaluationInfo',{'batch': {'enabled': True}, 'continuous': {'enabled': False},'synchronous': {'enabled': False}})
1106
+ t_audience['tags'] = baseAudience.get('tags',[])
1055
1107
  res = targetAudiences.putAudience(t_audience['id'],t_audience)
1056
1108
  self.dict_targetComponents[target]['audience'][audience_name] = res
1057
1109
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aepp
3
- Version: 0.5.2.post1
3
+ Version: 0.5.2.post3
4
4
  Summary: Package to manage AEP API endpoint and some helper functions
5
5
  Author-email: Julien Piccini <piccini.julien@gmail.com>
6
6
  License: Apache-2.0
@@ -1,7 +1,7 @@
1
- aepp/__init__.py,sha256=rsU4OMu3pJIgy8emJAD6lhAfqH0-raZ6GyIMJanNBdM,27912
2
- aepp/__version__.py,sha256=AUsss0HkU-B8aUszp_T12nUU7w7iBuYD5ALVA6BTKYA,23
1
+ aepp/__init__.py,sha256=slCyBeqU3LA_TAS59z2Ng-MnVIJ-1iKt6PrNu1QWHRk,29641
2
+ aepp/__version__.py,sha256=p2cBUcJxYGyXCezEzS7TxbGXCMvfeDr--WndrJa0ASc,23
3
3
  aepp/accesscontrol.py,sha256=PB3FcrO4bvDjdNxjHx7p_20hp4ahBXewoOSxuTGMXC8,17423
4
- aepp/catalog.py,sha256=hK9m3SAP0fhgkYqu14Tcfq14qBhw54tLCOF0mH31b1M,68237
4
+ aepp/catalog.py,sha256=4s3Uzm26gGW4a55dyDRvjhmY8laQiJ0iyzT2s2A1h6I,68484
5
5
  aepp/classmanager.py,sha256=16hx_hptg3PYwmezZCr9dLjvOkNSunih1PK3Q-iPoZY,66099
6
6
  aepp/config.py,sha256=232fcO8JaYJnS4glf8Ebnx9rCdHshZBVaVUbhoOAXkc,2543
7
7
  aepp/configs.py,sha256=5rRWJoUQDDaj3AAXWdKCZBZA_Xb7q1Hd58OkWhzwK34,16151
@@ -16,29 +16,30 @@ aepp/destination.py,sha256=_-Hrzb_LUNaRrqR4Y3EZZuTisIs0nF3KH_GZpFjryrs,24348
16
16
  aepp/destinationinstanceservice.py,sha256=zEZbKi519cOOdxWMZ3mv9ccP6yjNAlNwqrQMlzW_gO4,5378
17
17
  aepp/edge.py,sha256=F2QZApmITObXB8hRWXftHBZ82KNqVZ7iSNuovT8qnk4,16041
18
18
  aepp/exportDatasetToDataLandingZone.py,sha256=C6jg3XttFC-0mswa3ypZb6qx3MCQ8_A_3kyKspurXJA,18629
19
- aepp/fieldgroupmanager.py,sha256=4A7u3tx63HzcDiMqyZs5TRy-LN0Xsf7VGLdt1_exw2Q,103591
19
+ aepp/fieldgroupmanager.py,sha256=KuU5WDDh48C_3AYwjzRBPXHoLKxHZSpzel2_W18qsYo,103578
20
20
  aepp/flowservice.py,sha256=WizgwY6TYn1kiLxQt6Y3d7XgoLAb9imXrFXtt94hhog,107612
21
21
  aepp/hygiene.py,sha256=VEspnyu9eUlcK3wLeJYclaFaOWl5G5I5MRwmVA-RnUg,15385
22
- aepp/identity.py,sha256=E9MCIgntScMssduqKZqehT6FqSfTjWHcq7E7wESj3Zc,20833
22
+ aepp/identity.py,sha256=XH_prFTFWVtid-BbjD7B_4Z7vpebmIo4z5ecvAZf1II,20881
23
23
  aepp/ingestion.py,sha256=OamE7NDei2Ev5vXIDkMlzvdyBaN41nkIGmpAnUlQoZI,22372
24
24
  aepp/observability.py,sha256=bKe74nlXYB5E5syh7Lj4VqIgwUI3OjMxK383P05EdLU,9951
25
25
  aepp/policy.py,sha256=JbpvfCKJl2kE2McK2mn_ZI5HKd_6pTnrfMoUdyJesWQ,24924
26
26
  aepp/privacyservice.py,sha256=V6BkJeZG1LDBCyEQm9Gx0i68iRHG6uxSJiVnXzkHapI,8790
27
27
  aepp/queryservice.py,sha256=wB9GiaMwJszNjqkYjkfEDUhdT2IoI22jA3Kt_6ki4Hk,62373
28
28
  aepp/sandboxes.py,sha256=UwlSFkO2OOmH--6ISz8rxwDu2LcLH1MPqoH7yOEAZHc,29363
29
- aepp/schema.py,sha256=aLYDM5lCANNddk-NZPNxCxazg9HpELalKlFxQz55dRs,123111
30
- aepp/schemamanager.py,sha256=G3JhVikWkaT14F8vORDfGJGivarvU2AgKO1RB-1pzdM,54117
29
+ aepp/schema.py,sha256=85TBLuSWZjObTsnY_xBqlQV5eQOEABiy6_XKpAOe8BA,122999
30
+ aepp/schemamanager.py,sha256=8WhAh57Iqg2TNi74OVK7X706duUkdMipB4-EvEKVXHg,54407
31
31
  aepp/segmentation.py,sha256=oSgR2yx4nawYN5XAeHV_wefvmXEf0nb-bCguaDmp8F8,43555
32
32
  aepp/sensei.py,sha256=oYNy5BSWAEqsDkEexcQso6NfA6ntGGMnCOyHri0pJs8,7761
33
33
  aepp/som.py,sha256=XNm_Lu2wt2kpSSpldLptuER2eludFXeO9fI6i3iNCzo,34175
34
- aepp/synchronizer.py,sha256=3scwuimQJIBVdEqJ9fVsT1UgmFc9EkH3mpYxUwSoAOE,79363
34
+ aepp/synchronizer.py,sha256=18SC4zCDA43ewIlwOXyp9YNyF-6n6qSv8Pxl-ccvltk,82928
35
35
  aepp/tags.py,sha256=t2qBallTcWR4IOXcDBmrPpqjbSay1z3E2bcRijzVm1s,17641
36
36
  aepp/utils.py,sha256=tG-YVXylm38-bynqfp5N_Mzyo7mhlZj-dLo7wLoO4tM,1200
37
37
  aepp/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
- aepp/cli/__main__.py,sha256=B2W5iooeKlsWQdWrJr27Y72ibEIgBHZTTGlLe-YgOZU,77329
39
- aepp-0.5.2.post1.dist-info/licenses/LICENSE,sha256=HjYTlfne3BbS5gNHzNqJ5COCiTQLUdf87QkzRyFbE4Y,10337
40
- aepp-0.5.2.post1.dist-info/METADATA,sha256=mp5AAwkCCK-cruebhH-SMdo89di8CdHacoTi5H5BTdw,5344
41
- aepp-0.5.2.post1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
42
- aepp-0.5.2.post1.dist-info/entry_points.txt,sha256=e7HAumUTymoUiCuVRzFlcchennUBLcjxvuiimySF98Y,48
43
- aepp-0.5.2.post1.dist-info/top_level.txt,sha256=dtZJI8SzhWVgZRl68PHKZX_fD6amvDiFR-lqD9FSJvE,5
44
- aepp-0.5.2.post1.dist-info/RECORD,,
38
+ aepp/cli/__main__.py,sha256=ui3q6CdDBd5R43xg9olu-jRioSSh76vHde-S_iaUsEY,87351
39
+ aepp/cli/upsfieldsanalyzer.py,sha256=GAVBfXN6U8_BfU7doZwcuox71NMwdqQsEpuNgM2Osjc,13124
40
+ aepp-0.5.2.post3.dist-info/licenses/LICENSE,sha256=HjYTlfne3BbS5gNHzNqJ5COCiTQLUdf87QkzRyFbE4Y,10337
41
+ aepp-0.5.2.post3.dist-info/METADATA,sha256=-2p5jm8PyG-0I-faMx71Tyn0AS-t8iN_DTOwLAXGAdo,5344
42
+ aepp-0.5.2.post3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ aepp-0.5.2.post3.dist-info/entry_points.txt,sha256=e7HAumUTymoUiCuVRzFlcchennUBLcjxvuiimySF98Y,48
44
+ aepp-0.5.2.post3.dist-info/top_level.txt,sha256=dtZJI8SzhWVgZRl68PHKZX_fD6amvDiFR-lqD9FSJvE,5
45
+ aepp-0.5.2.post3.dist-info/RECORD,,