brynq-sdk-bob 2.5.0__tar.gz → 2.5.2.dev0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/PKG-INFO +1 -1
- brynq_sdk_bob-2.5.2.dev0/brynq_sdk_bob/__init__.py +349 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/payments.py +12 -10
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/people.py +7 -8
- brynq_sdk_bob-2.5.2.dev0/brynq_sdk_bob/salaries.py +38 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/employment.py +1 -1
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/payments.py +3 -2
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/people.py +2 -38
- brynq_sdk_bob-2.5.2.dev0/brynq_sdk_bob/schemas/salary.py +49 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/timeoff.py +13 -13
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/work.py +8 -8
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/PKG-INFO +1 -1
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/setup.py +1 -1
- brynq_sdk_bob-2.5.0/brynq_sdk_bob/__init__.py +0 -73
- brynq_sdk_bob-2.5.0/brynq_sdk_bob/salaries.py +0 -24
- brynq_sdk_bob-2.5.0/brynq_sdk_bob/schemas/salary.py +0 -25
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/bank.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/company.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/custom_tables.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/documents.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/employment.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/named_lists.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/payroll_history.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/__init__.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/bank.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/custom_tables.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/named_lists.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/schemas/payroll_history.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/timeoff.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob/work.py +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/SOURCES.txt +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/dependency_links.txt +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/not-zip-safe +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/requires.txt +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/top_level.txt +0 -0
- {brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/setup.cfg +0 -0
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import re
|
|
3
|
+
import inspect
|
|
4
|
+
from typing import Union, List, Optional, Literal
|
|
5
|
+
import pandas as pd
|
|
6
|
+
import requests
|
|
7
|
+
import os
|
|
8
|
+
from brynq_sdk_brynq import BrynQ
|
|
9
|
+
from brynq_sdk_functions import Functions
|
|
10
|
+
from .bank import Bank
|
|
11
|
+
from .company import Company
|
|
12
|
+
from .documents import CustomDocuments
|
|
13
|
+
from .employment import Employment
|
|
14
|
+
from .named_lists import NamedLists
|
|
15
|
+
from .payments import Payments
|
|
16
|
+
from .people import People
|
|
17
|
+
from .salaries import Salaries
|
|
18
|
+
from .timeoff import TimeOff
|
|
19
|
+
from .work import Work
|
|
20
|
+
from .custom_tables import CustomTables
|
|
21
|
+
from .payroll_history import History
|
|
22
|
+
|
|
23
|
+
class Bob(BrynQ):
|
|
24
|
+
def __init__(self, system_type: Optional[Literal['source', 'target']] = None, test_environment: bool = True, debug: bool = False, target_system: str = None):
|
|
25
|
+
super().__init__()
|
|
26
|
+
self.timeout = 3600
|
|
27
|
+
self.headers = self._get_request_headers(system_type)
|
|
28
|
+
if test_environment:
|
|
29
|
+
self.base_url = "https://api.sandbox.hibob.com/v1/"
|
|
30
|
+
else:
|
|
31
|
+
self.base_url = "https://api.hibob.com/v1/"
|
|
32
|
+
self.session = requests.Session()
|
|
33
|
+
self.session.headers.update(self.headers)
|
|
34
|
+
self.people = People(self)
|
|
35
|
+
self.salaries = Salaries(self)
|
|
36
|
+
self.work = Work(self)
|
|
37
|
+
self.bank = Bank(self)
|
|
38
|
+
self.employment = Employment(self)
|
|
39
|
+
self.payments = Payments(self)
|
|
40
|
+
self.time_off = TimeOff(self)
|
|
41
|
+
self.documents = CustomDocuments(self)
|
|
42
|
+
self.companies = Company(self)
|
|
43
|
+
self.named_lists = NamedLists(self)
|
|
44
|
+
self.custom_tables = CustomTables(self)
|
|
45
|
+
self.payroll_history = History(self)
|
|
46
|
+
self.data_interface_id = os.getenv("DATA_INTERFACE_ID")
|
|
47
|
+
self.debug = debug
|
|
48
|
+
self.bob_dir = "bob_data" # Directory to save Bob data files
|
|
49
|
+
self.setup_schema_endpoint_mapping()
|
|
50
|
+
|
|
51
|
+
def _get_request_headers(self, system_type):
|
|
52
|
+
credentials = self.interfaces.credentials.get(system='bob', system_type=system_type)
|
|
53
|
+
auth_token = base64.b64encode(f"{credentials.get('data').get('User ID')}:{credentials.get('data').get('API Token')}".encode()).decode('utf-8')
|
|
54
|
+
headers = {
|
|
55
|
+
"accept": "application/json",
|
|
56
|
+
"Authorization": f"Basic {auth_token}",
|
|
57
|
+
"Partner-Token": "001Vg00000A6FY6IAN"
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return headers
|
|
61
|
+
|
|
62
|
+
def get_paginated_result(self, request: requests.Request) -> List:
|
|
63
|
+
has_next_page = True
|
|
64
|
+
result_data = []
|
|
65
|
+
while has_next_page:
|
|
66
|
+
prepped = request.prepare()
|
|
67
|
+
prepped.headers.update(self.session.headers)
|
|
68
|
+
resp = self.session.send(prepped, timeout=self.timeout)
|
|
69
|
+
resp.raise_for_status()
|
|
70
|
+
response_data = resp.json()
|
|
71
|
+
result_data += response_data['results']
|
|
72
|
+
next_cursor = response_data.get('response_metadata').get('next_cursor')
|
|
73
|
+
# If there is no next page, set has_next_page to False, we could use the falsy value of None but this is more readable
|
|
74
|
+
has_next_page = next_cursor is not None
|
|
75
|
+
if has_next_page:
|
|
76
|
+
request.params.update({"cursor": next_cursor})
|
|
77
|
+
|
|
78
|
+
return result_data
|
|
79
|
+
|
|
80
|
+
#methods to be used in conjunction with teh scenario sdk. scenario sdks collects all schemas and correpodnign fields and passes it to the get_data_per_schema method, which needs this method to map the schema name to the corresponding endpoint.
|
|
81
|
+
def setup_schema_endpoint_mapping(self):
|
|
82
|
+
self.schema_endpoint_map = {
|
|
83
|
+
"PeopleSchema": self.people,
|
|
84
|
+
"SalarySchema": self.salaries,
|
|
85
|
+
"WorkSchema": self.work,
|
|
86
|
+
"BankSchema": self.bank,
|
|
87
|
+
"EmploymentSchema": self.employment,
|
|
88
|
+
"VariablePaymentSchema": self.payments,
|
|
89
|
+
"ActualPaymentsSchema": self.payments,
|
|
90
|
+
"TimeOffSchema": self.time_off,
|
|
91
|
+
"TimeOffBalanceSchema": self.time_off,
|
|
92
|
+
"PayrollHistorySchema": self.payroll_history,
|
|
93
|
+
"CustomTableSchema": self.custom_tables,
|
|
94
|
+
"CustomTableMetadataSchema": self.custom_tables,
|
|
95
|
+
"NamedListSchema": self.named_lists,
|
|
96
|
+
# Note: DocumentsSchema and CompanySchema don't have corresponding schema classes yet
|
|
97
|
+
# but keeping them for backward compatibility
|
|
98
|
+
"DocumentsSchema": self.documents,
|
|
99
|
+
"CompanySchema": self.companies,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
def get_data_for_schemas(self, schemas: dict[str, set], save_dir = None) -> dict:
|
|
103
|
+
"""
|
|
104
|
+
Get data for each schema using the schema-to-fields mapping from the scenario SDK.
|
|
105
|
+
|
|
106
|
+
This method integrates with the BrynQ scenario SDK to retrieve data based on schema
|
|
107
|
+
definitions. It automatically maps schema names to the appropriate Bob API endpoints
|
|
108
|
+
and retrieves only the fields specified in the schema-to-fields mapping.
|
|
109
|
+
|
|
110
|
+
NOTE:
|
|
111
|
+
"endpoint_obj" is just a variable that represents the specific Bob API client (or "endpoint") for a given type of data.
|
|
112
|
+
For example, if you want to get people data, endpoint_obj would be self.people.
|
|
113
|
+
If you want salary data, endpoint_obj would be self.salaries, and so on.
|
|
114
|
+
Each of these endpoint objects knows how to fetch data for its specific schema/table from Bob.
|
|
115
|
+
So, "endpoint_obj" is basically a shortcut to the right part of the Bob SDK that knows how to get the data you want.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
schemas: Dictionary mapping schema names to sets of fields
|
|
119
|
+
Example: {'PeopleSchema': {'firstName', 'lastName', 'email'},
|
|
120
|
+
'WorkSchema': {'title', 'department', 'site'}}
|
|
121
|
+
save_dir: Optional directory path to save parquet files. Can be a string or path object
|
|
122
|
+
(e.g., os.path.join(self.basedir, "data", "bob_to_zenegy")). If None, files are not saved to disk.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Dictionary with results for each schema containing:
|
|
126
|
+
- 'dataframe': The retrieved data as pandas DataFrame
|
|
127
|
+
- 'filepath': Path where the data was saved as parquet file (None if save_dir is None)
|
|
128
|
+
- 'fields': List of fields that were requested
|
|
129
|
+
- 'status_message': Status message about field retrieval
|
|
130
|
+
- 'status_level': Status level (INFO/WARNING/ERROR)
|
|
131
|
+
|
|
132
|
+
Integration with Scenario SDK:
|
|
133
|
+
This method is designed to work seamlessly with the BrynQ scenario SDK:
|
|
134
|
+
1. Use scenarios.get_schema_field_mapping() to get schema-to-fields mapping
|
|
135
|
+
2. Pass the mapping to this method to retrieve data
|
|
136
|
+
3. The method automatically handles endpoint mapping and field selection
|
|
137
|
+
4. Field tracking shows exactly which requested fields were returned vs missing
|
|
138
|
+
|
|
139
|
+
Example usage:
|
|
140
|
+
# Initialize Bob SDK
|
|
141
|
+
bob = Bob(system_type='source')
|
|
142
|
+
|
|
143
|
+
# Get schema-to-fields mapping from scenarios
|
|
144
|
+
schema_fields = bob.interfaces.scenarios.get_schema_field_mapping()
|
|
145
|
+
|
|
146
|
+
# Get data for specific schemas
|
|
147
|
+
results = bob.get_data_for_schemas({
|
|
148
|
+
'PeopleSchema': schema_fields['PeopleSchema'],
|
|
149
|
+
'WorkSchema': schema_fields['WorkSchema']
|
|
150
|
+
}, save_dir=os.path.join('data', 'bob_to_zenegy'))
|
|
151
|
+
|
|
152
|
+
# Access results and status messages
|
|
153
|
+
for schema_name, result in results.items():
|
|
154
|
+
print(f"Schema: {schema_name}")
|
|
155
|
+
print(f"Status: {result['status_message']}")
|
|
156
|
+
print(f"Level: {result['status_level']}")
|
|
157
|
+
print(f"Data shape: {result['dataframe'].shape}")
|
|
158
|
+
print(f"Saved to: {result['filepath']}")
|
|
159
|
+
|
|
160
|
+
# Process the data
|
|
161
|
+
people_data = results['PeopleSchema']['dataframe']
|
|
162
|
+
work_data = results['WorkSchema']['dataframe']
|
|
163
|
+
|
|
164
|
+
# Example with path object
|
|
165
|
+
custom_path = os.path.join('data', 'bob_to_zenegy')
|
|
166
|
+
results_with_path = bob.get_data_for_schemas({
|
|
167
|
+
'PeopleSchema': schema_fields['PeopleSchema']
|
|
168
|
+
}, save_dir=custom_path)
|
|
169
|
+
"""
|
|
170
|
+
results = {}
|
|
171
|
+
|
|
172
|
+
# Validate input
|
|
173
|
+
if not schemas:
|
|
174
|
+
print("Warning: No schemas provided")
|
|
175
|
+
return results
|
|
176
|
+
|
|
177
|
+
# Process each schema
|
|
178
|
+
for schema_name, fields in schemas.items():
|
|
179
|
+
# Validate schema name and fields
|
|
180
|
+
if not schema_name:
|
|
181
|
+
print("Warning: Empty schema name provided, skipping")
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
if not fields:
|
|
185
|
+
print(f"Warning: No fields provided for schema '{schema_name}', skipping")
|
|
186
|
+
continue
|
|
187
|
+
|
|
188
|
+
# Get the endpoint/service for this schema
|
|
189
|
+
endpoint_obj = self.schema_endpoint_map.get(schema_name)
|
|
190
|
+
|
|
191
|
+
if endpoint_obj is None:
|
|
192
|
+
print(f"Warning: No endpoint found for schema '{schema_name}'. Available schemas: {list(self.schema_endpoint_map.keys())}")
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
# Get data using the service endpoint
|
|
197
|
+
df_bob, status_message, status_level = self._handle_endpoint(endpoint_obj, list(fields), schema_name)
|
|
198
|
+
except Exception as e:
|
|
199
|
+
print(f"Error processing schema '{schema_name}': {str(e)}")
|
|
200
|
+
results[schema_name] = {
|
|
201
|
+
'dataframe': pd.DataFrame(),
|
|
202
|
+
'filepath': None,
|
|
203
|
+
'fields': list(fields),
|
|
204
|
+
'status_message': f"Error processing schema '{schema_name}': {str(e)}",
|
|
205
|
+
'status_level': 'ERROR'
|
|
206
|
+
}
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Save the result
|
|
210
|
+
if save_dir:
|
|
211
|
+
filename = f"bob_{schema_name.replace(' ', '_')}.parquet"
|
|
212
|
+
output_dir = save_dir if save_dir is not None else self.bob_dir
|
|
213
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
214
|
+
filepath = os.path.join(output_dir, filename)
|
|
215
|
+
df_bob.to_parquet(filepath)
|
|
216
|
+
else:
|
|
217
|
+
filepath = None
|
|
218
|
+
|
|
219
|
+
results[schema_name] = {
|
|
220
|
+
'dataframe': df_bob,
|
|
221
|
+
'filepath': filepath,
|
|
222
|
+
'fields': list(fields),
|
|
223
|
+
'status_message': status_message,
|
|
224
|
+
'status_level': status_level
|
|
225
|
+
}
|
|
226
|
+
return results
|
|
227
|
+
|
|
228
|
+
def _handle_endpoint(self, endpoint_obj, body_fields: List[str], schema_name: str) -> tuple[pd.DataFrame, str, str]:
|
|
229
|
+
"""
|
|
230
|
+
Handle data retrieval for a given endpoint object (e.g., self.people, self.work, etc.).
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
endpoint_obj: The endpoint object responsible for fetching data for a specific schema.
|
|
234
|
+
For example, this could be self.people, self.work, self.salaries, etc.
|
|
235
|
+
(Think of these as "API clients" or "data access classes" for each schema/table.)
|
|
236
|
+
body_fields: List of fields to retrieve
|
|
237
|
+
schema_name: Name of the schema being processed
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
tuple[pd.DataFrame, str, str]: Dataframe, status message, and status level
|
|
241
|
+
"""
|
|
242
|
+
get_method = endpoint_obj.get
|
|
243
|
+
|
|
244
|
+
# Check if the method accepts field_selection parameter
|
|
245
|
+
sig = inspect.signature(get_method)
|
|
246
|
+
if 'field_selection' in sig.parameters and 'person_ids' not in sig.parameters:
|
|
247
|
+
bob_data_valid, _ = get_method(field_selection=body_fields)
|
|
248
|
+
# elif 'person_id' in sig.parameters:
|
|
249
|
+
# bob_data_valid, _ = self._fetch_data_with_person_id(get_method)
|
|
250
|
+
# elif 'person_ids' in sig.parameters and 'field_selection' in sig.parameters:
|
|
251
|
+
# bob_data_valid, _ = self._fetch_data_with_person_ids(get_method, body_fields)
|
|
252
|
+
else:
|
|
253
|
+
bob_data_valid, _ = get_method()
|
|
254
|
+
df_bob = pd.DataFrame(bob_data_valid)
|
|
255
|
+
|
|
256
|
+
# Track field retrieval success/failure and handle missing fields
|
|
257
|
+
status_message, status_level = self._log_field_retrieval_status(df_bob, body_fields, schema_name)
|
|
258
|
+
|
|
259
|
+
return df_bob, status_message, status_level
|
|
260
|
+
|
|
261
|
+
def _log_field_retrieval_status(self, df_bob: pd.DataFrame, body_fields: List[str], schema_name: str) -> tuple[str, str]:
|
|
262
|
+
"""
|
|
263
|
+
Checks if the data returned from the Bob API actually contains all the fields you asked for.
|
|
264
|
+
|
|
265
|
+
This function counts how many fields you requested (body_fields)
|
|
266
|
+
and how many columns you actually got back in the DataFrame (df_bob).
|
|
267
|
+
|
|
268
|
+
- If the numbers are different, it means some fields you wanted are missing from the result.
|
|
269
|
+
- If the numbers match, you got everything you asked for.
|
|
270
|
+
- If the DataFrame is empty, then Bob API returned no data at all.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
df_bob: The DataFrame you got back from the Bob API (could be empty or missing columns).
|
|
274
|
+
body_fields: The list of field names you told the API you wanted.
|
|
275
|
+
schema_name: The name of the schema/table you were trying to get.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
tuple[str, str]:
|
|
279
|
+
- A human-readable status message (for logs or debugging).
|
|
280
|
+
- A status level string: "DEBUG" (all good or minor mismatch), or "ERROR" (no data at all).
|
|
281
|
+
"""
|
|
282
|
+
if not df_bob.empty:
|
|
283
|
+
requested_count = len(body_fields)
|
|
284
|
+
returned_count = len(df_bob.columns)
|
|
285
|
+
|
|
286
|
+
if requested_count != returned_count:
|
|
287
|
+
status_message = (f"Schema '{schema_name}' [INFO]:\n"
|
|
288
|
+
f"Requested {requested_count} fields, got {returned_count} fields\n"
|
|
289
|
+
f"Total records: {len(df_bob)}")
|
|
290
|
+
return status_message, "DEBUG"
|
|
291
|
+
else:
|
|
292
|
+
status_message = (f"Schema '{schema_name}': All {requested_count} requested fields "
|
|
293
|
+
f"successfully retrieved from Bob API ({len(df_bob)} records)")
|
|
294
|
+
return status_message, "DEBUG"
|
|
295
|
+
else:
|
|
296
|
+
return f"Schema '{schema_name}' [ERROR]: No data returned from Bob API", "ERROR"
|
|
297
|
+
|
|
298
|
+
def initialize_person_id_mapping(self) -> pd.DataFrame:
|
|
299
|
+
"""
|
|
300
|
+
Creates a mapping DataFrame between Bob's internal person ID (`root.id`) and the employee ID in the company
|
|
301
|
+
(`work.employeeIdInCompany`).
|
|
302
|
+
|
|
303
|
+
This is a utility function for situations where you need to join or map data between endpoints/scenarios that use different
|
|
304
|
+
identifiers for people. In scenarios maybe root.id is used as primary key, but in Bob, some API endpoints require you to use the employee ID.
|
|
305
|
+
This function helps you convert between them.
|
|
306
|
+
|
|
307
|
+
Note:
|
|
308
|
+
- This is NOT required for the Bob SDK to function, but is a convenience tool you can call from the interface
|
|
309
|
+
whenever you need to perform such a mapping.
|
|
310
|
+
- The mapping is especially useful when you have data from other sources (e.g., payroll, HRIS exports) that use
|
|
311
|
+
employee IDs, and you want to join or compare them with data from Bob, which often uses person IDs.
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
pd.DataFrame: A DataFrame with two columns:
|
|
315
|
+
- 'person_id': The unique person identifier in Bob (formerly `root.id`)
|
|
316
|
+
- 'employee_id_in_company': The employee ID as used in your company (formerly `work.employeeIdInCompany`)
|
|
317
|
+
|
|
318
|
+
If no people are found, returns an empty DataFrame with these columns.
|
|
319
|
+
|
|
320
|
+
Example:
|
|
321
|
+
>>> df = sdk.initialize_person_id_mapping()
|
|
322
|
+
>>> # Now you can merge/join on 'person_id' or 'employee_id_in_company' as needed
|
|
323
|
+
|
|
324
|
+
"""
|
|
325
|
+
# Only fetch the two fields needed for the mapping
|
|
326
|
+
field_selection = ['work.employeeIdInCompany', 'root.id']
|
|
327
|
+
|
|
328
|
+
# Use the Bob SDK to get the people data with just those fields
|
|
329
|
+
valid_people, _ = self.people.get(field_selection=field_selection)
|
|
330
|
+
|
|
331
|
+
# The SDK renames:
|
|
332
|
+
# root.id -> id
|
|
333
|
+
# work.employeeIdInCompany -> work_employee_id_in_company
|
|
334
|
+
|
|
335
|
+
if not valid_people.empty:
|
|
336
|
+
# Rename columns to standard names for mapping
|
|
337
|
+
valid_people = valid_people.rename(
|
|
338
|
+
columns={
|
|
339
|
+
'id': 'person_id',
|
|
340
|
+
'work_employee_id_in_company': 'employee_id_in_company'
|
|
341
|
+
}
|
|
342
|
+
)
|
|
343
|
+
self.person_id_to_employee_id_in_company = valid_people[['person_id', 'employee_id_in_company']].copy()
|
|
344
|
+
else:
|
|
345
|
+
# Return empty DataFrame with expected columns if no data
|
|
346
|
+
self.person_id_to_employee_id_in_company = pd.DataFrame(
|
|
347
|
+
columns=['person_id', 'employee_id_in_company']
|
|
348
|
+
)
|
|
349
|
+
return self.person_id_to_employee_id_in_company
|
|
@@ -9,17 +9,19 @@ class Payments:
|
|
|
9
9
|
self.bob = bob
|
|
10
10
|
self.schema = VariablePaymentSchema
|
|
11
11
|
|
|
12
|
-
def get(self,
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
data
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
12
|
+
def get(self, person_ids: List[str]) -> (pd.DataFrame, pd.DataFrame):
|
|
13
|
+
df = pd.DataFrame()
|
|
14
|
+
for person_id in person_ids:
|
|
15
|
+
resp = self.bob.session.get(url=f"{self.bob.base_url}people/{person_id}/variable", timeout=self.bob.timeout)
|
|
16
|
+
resp.raise_for_status()
|
|
17
|
+
data = resp.json()
|
|
18
|
+
df = pd.concat([df, pd.json_normalize(
|
|
19
|
+
data,
|
|
20
|
+
record_path='values'
|
|
21
|
+
)])
|
|
22
|
+
df['employee_id'] = person_id
|
|
23
|
+
df = df.reset_index(drop=True)
|
|
21
24
|
valid_payments, invalid_payments = Functions.validate_data(df=df, schema=self.schema, debug=True)
|
|
22
|
-
|
|
23
25
|
return valid_payments, invalid_payments
|
|
24
26
|
|
|
25
27
|
def get_actual_payments(
|
|
@@ -127,7 +127,7 @@ class People:
|
|
|
127
127
|
|
|
128
128
|
if isinstance(sample_value, dict):
|
|
129
129
|
# Flatten nested structure
|
|
130
|
-
nested_df = pd.json_normalize(df_result[col].tolist())
|
|
130
|
+
nested_df = pd.json_normalize(df_result[col].tolist(), max_level=10)
|
|
131
131
|
|
|
132
132
|
# Rename columns to include the original column name as prefix
|
|
133
133
|
nested_df.columns = [f"{col}.{subcol}" for subcol in nested_df.columns]
|
|
@@ -196,8 +196,7 @@ class People:
|
|
|
196
196
|
|
|
197
197
|
Args:
|
|
198
198
|
additional_fields (list[str]): Additional fields to get (not defined in the schema)
|
|
199
|
-
field_selection (list[str]): Fields to get (defined in the schema), if not provided, all fields are returned
|
|
200
|
-
Can be either pythonic field names (e.g., 'work_start_date') or Bob API field names (e.g., 'work.startDate').
|
|
199
|
+
field_selection (list[str]): Fields to get (defined in the schema), if not provided, all fields are returned
|
|
201
200
|
add_payroll_information (list[str]): List of payroll information types to include (valid options: 'entitlement', 'variable')
|
|
202
201
|
|
|
203
202
|
Returns:
|
|
@@ -211,9 +210,6 @@ class People:
|
|
|
211
210
|
response_fields = list(set(self.field_name_in_response + additional_fields))
|
|
212
211
|
|
|
213
212
|
if field_selection:
|
|
214
|
-
# Convert pythonic field names to aliases (e.g., 'work_start_date' -> 'work.startDate')
|
|
215
|
-
field_selection = PeopleSchema.convert_pythonic_to_alias(field_selection)
|
|
216
|
-
|
|
217
213
|
body_fields = [field for field in body_fields if field in field_selection]
|
|
218
214
|
response_fields = [self.endpoint_to_response.get(field) for field in field_selection if field in self.endpoint_to_response]
|
|
219
215
|
|
|
@@ -232,7 +228,7 @@ class People:
|
|
|
232
228
|
"filters": []
|
|
233
229
|
},
|
|
234
230
|
timeout=self.bob.timeout)
|
|
235
|
-
df = pd.json_normalize(resp_additional_fields.json()['employees'])
|
|
231
|
+
df = pd.json_normalize(resp_additional_fields.json()['employees'], max_level=10)
|
|
236
232
|
|
|
237
233
|
# Validate payroll types if requested
|
|
238
234
|
valid_payroll_types = []
|
|
@@ -250,6 +246,9 @@ class People:
|
|
|
250
246
|
# Keep if it's in response_fields
|
|
251
247
|
if col in response_fields:
|
|
252
248
|
columns_to_keep.append(col)
|
|
249
|
+
# Or if it starts with any response_field followed by a dot (for nested fields)
|
|
250
|
+
elif any(col.startswith(field + '.') for field in response_fields):
|
|
251
|
+
columns_to_keep.append(col)
|
|
253
252
|
# Or if it's a payroll column (original or flattened)
|
|
254
253
|
elif valid_payroll_types:
|
|
255
254
|
for payroll_type in valid_payroll_types:
|
|
@@ -337,7 +336,7 @@ class People:
|
|
|
337
336
|
"filters": []
|
|
338
337
|
},
|
|
339
338
|
timeout=self.bob.timeout)
|
|
340
|
-
df = pd.json_normalize(resp_additional_fields.json()['employees'])
|
|
339
|
+
df = pd.json_normalize(resp_additional_fields.json()['employees'], max_level=10)
|
|
341
340
|
df = df[[col for col in response_fields if col in df.columns]]
|
|
342
341
|
# Get the valid column names from PeopleSchema
|
|
343
342
|
valid_people, invalid_people = Functions.validate_data(df=df, schema=PeopleSchema, debug=True)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import requests
|
|
3
|
+
from brynq_sdk_functions import Functions
|
|
4
|
+
from .schemas.salary import SalarySchema, SalaryCreateSchema
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Salaries:
|
|
8
|
+
def __init__(self, bob):
|
|
9
|
+
self.bob = bob
|
|
10
|
+
self.schema = SalarySchema
|
|
11
|
+
|
|
12
|
+
def get(self) -> tuple[pd.DataFrame, pd.DataFrame]:
|
|
13
|
+
request = requests.Request(method='GET',
|
|
14
|
+
url=f"{self.bob.base_url}bulk/people/salaries",
|
|
15
|
+
params={"limit": 100})
|
|
16
|
+
data = self.bob.get_paginated_result(request)
|
|
17
|
+
df = pd.json_normalize(
|
|
18
|
+
data,
|
|
19
|
+
record_path='values',
|
|
20
|
+
meta=['employeeId']
|
|
21
|
+
)
|
|
22
|
+
valid_salaries, invalid_salaries = Functions.validate_data(df=df, schema=SalarySchema, debug=True)
|
|
23
|
+
|
|
24
|
+
return valid_salaries, invalid_salaries
|
|
25
|
+
|
|
26
|
+
def create(self, salary_data: dict) -> requests.Response:
|
|
27
|
+
nested_data = self.nmbrs.flat_dict_to_nested_dict(salary_data, SalaryCreateSchema)
|
|
28
|
+
salary_data = SalaryCreateSchema(**nested_data)
|
|
29
|
+
payload = salary_data.model_dump(exclude_none=True, by_alias=True)
|
|
30
|
+
|
|
31
|
+
resp = self.bob.session.post(url=f"{self.bob.base_url}people/{salary_data.employee_id}/salaries", json=payload)
|
|
32
|
+
resp.raise_for_status()
|
|
33
|
+
return resp
|
|
34
|
+
|
|
35
|
+
def delete(self, employee_id: str, salary_id: str) -> requests.Response:
|
|
36
|
+
resp = self.bob.session.delete(url=f"{self.bob.base_url}people/{employee_id}/salaries/{salary_id}")
|
|
37
|
+
resp.raise_for_status()
|
|
38
|
+
return resp
|
|
@@ -6,7 +6,7 @@ from brynq_sdk_functions import BrynQPanderaDataFrameModel
|
|
|
6
6
|
|
|
7
7
|
class EmploymentSchema(BrynQPanderaDataFrameModel):
|
|
8
8
|
id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Employment ID", alias="id")
|
|
9
|
-
employee_id: Series[
|
|
9
|
+
employee_id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Employee ID", alias="employeeId")
|
|
10
10
|
active_effective_date: Series[DateTime] = pa.Field(coerce=True, description="Active Effective Date", alias="activeEffectiveDate")
|
|
11
11
|
contract: Series[String] = pa.Field(coerce=True, nullable=True, description="Contract", alias="contract") # has a list of possible values
|
|
12
12
|
creation_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Creation Date", alias="creationDate")
|
|
@@ -21,8 +21,9 @@ class VariablePaymentSchema(BrynQPanderaDataFrameModel):
|
|
|
21
21
|
end_effective_date: Series[DateTime] = pa.Field(nullable=True, coerce=True, description="End Effective Date", alias="endEffectiveDate")
|
|
22
22
|
payment_period: Series[String] = pa.Field(coerce=True, description="Payment Period", alias="paymentPeriod")
|
|
23
23
|
effective_date: Series[DateTime] = pa.Field(coerce=True, description="Effective Date", alias="effectiveDate")
|
|
24
|
-
amount_value: Series[Float] = pa.Field(coerce=True, description="Amount Value", alias="amount.value")
|
|
25
|
-
|
|
24
|
+
amount_value: Optional[Series[Float]] = pa.Field(coerce=True, description="Amount Value", alias="amount.value")
|
|
25
|
+
amount_alternative_value: Optional[Series[Float]] = pa.Field(coerce=True, description="Amount Value", alias="amount")
|
|
26
|
+
amount_currency: Optional[Series[String]] = pa.Field(coerce=True, description="Amount Currency", alias="amount.currency")
|
|
26
27
|
change_reason: Series[String] = pa.Field(nullable=True, coerce=True, description="Change Reason", alias="change.reason")
|
|
27
28
|
change_changed_by: Series[String] = pa.Field(nullable=True, coerce=True, description="Change Changed By", alias="change.changedBy")
|
|
28
29
|
change_changed_by_id: Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True, description="Change Changed By ID", alias="change.changedById")
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
|
-
from typing import Optional
|
|
2
|
+
from typing import Optional
|
|
3
3
|
|
|
4
4
|
import pandas as pd
|
|
5
5
|
import pandera as pa
|
|
@@ -23,7 +23,7 @@ class PeopleSchema(BrynQPanderaDataFrameModel):
|
|
|
23
23
|
home_mobile_phone: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Personal Mobile Phone", alias="home.mobilePhone")
|
|
24
24
|
surname: Optional[Series[String]] = pa.Field(coerce=True, description="Surname", alias="root.surname")
|
|
25
25
|
first_name: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="First Name", alias="root.firstName")
|
|
26
|
-
full_name: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Full Name", alias="fullName")
|
|
26
|
+
full_name: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Full Name", alias="root.fullName")
|
|
27
27
|
personal_birth_date: Optional[Series[DateTime]] = pa.Field(coerce=True, nullable=True, description="Personal Birth Date", alias="personal.birthDate")
|
|
28
28
|
personal_pronouns: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Personal Pronouns", alias="personal.pronouns")
|
|
29
29
|
personal_honorific: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Personal Honorific", alias="personal.honorific")
|
|
@@ -98,39 +98,3 @@ class PeopleSchema(BrynQPanderaDataFrameModel):
|
|
|
98
98
|
|
|
99
99
|
class Config:
|
|
100
100
|
coerce = True
|
|
101
|
-
|
|
102
|
-
@classmethod
|
|
103
|
-
def convert_pythonic_to_alias(cls, pythonic_fields: List[str]) -> List[str]:
|
|
104
|
-
"""
|
|
105
|
-
Convert pythonic field names to their Bob API aliases.
|
|
106
|
-
|
|
107
|
-
Args:
|
|
108
|
-
pythonic_fields: List of pythonic field names (e.g., ['work_start_date', 'first_name'])
|
|
109
|
-
|
|
110
|
-
Returns:
|
|
111
|
-
List of Bob API field aliases (e.g., ['work.startDate', 'root.firstName'])
|
|
112
|
-
"""
|
|
113
|
-
# Get the alias mapping from the class (populated by BrynQPanderaDataFrameModel.__init_subclass__)
|
|
114
|
-
alias_map = getattr(cls, "_alias_map", {})
|
|
115
|
-
|
|
116
|
-
# Convert the provided pythonic fields to aliases
|
|
117
|
-
result = []
|
|
118
|
-
for field in pythonic_fields:
|
|
119
|
-
if field in alias_map:
|
|
120
|
-
result.append(alias_map[field])
|
|
121
|
-
else:
|
|
122
|
-
# If not found in schema, keep the field as-is (could be a custom field)
|
|
123
|
-
result.append(field)
|
|
124
|
-
|
|
125
|
-
return result
|
|
126
|
-
|
|
127
|
-
@classmethod
|
|
128
|
-
def get_pythonic_to_alias_mapping(cls) -> Dict[str, str]:
|
|
129
|
-
"""
|
|
130
|
-
Get the complete mapping from pythonic field names to Bob API aliases.
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
Dictionary mapping pythonic field names to aliases
|
|
134
|
-
"""
|
|
135
|
-
# Get the alias mapping from the class (populated by BrynQPanderaDataFrameModel.__init_subclass__)
|
|
136
|
-
return getattr(cls, "_alias_map", {})
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
import pandera as pa
|
|
3
|
+
from pandera import Bool
|
|
4
|
+
from pandera.typing import Series, String, Float, DateTime
|
|
5
|
+
import pandera.extensions as extensions
|
|
6
|
+
from brynq_sdk_functions import BrynQPanderaDataFrameModel
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
from datetime import datetime, date
|
|
9
|
+
from typing import Optional, Dict, Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SalarySchema(BrynQPanderaDataFrameModel):
|
|
13
|
+
id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Salary ID", alias="id")
|
|
14
|
+
employee_id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Employee ID", alias="employeeId")
|
|
15
|
+
pay_frequency: Series[String] = pa.Field(coerce=True, nullable=True, description="Pay Frequency", alias="payFrequency") # has a list of possible values , isin=['Monthly']
|
|
16
|
+
creation_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Creation Date", alias="creationDate")
|
|
17
|
+
is_current: Series[Bool] = pa.Field(coerce=True, description="Is Current", alias="isCurrent")
|
|
18
|
+
modification_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Modification Date", alias="modificationDate")
|
|
19
|
+
effective_date: Series[DateTime] = pa.Field(coerce=True, description="Effective Date", alias="effectiveDate")
|
|
20
|
+
end_effective_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="End Effective Date", alias="endEffectiveDate")
|
|
21
|
+
change_reason: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Reason", alias="change.reason")
|
|
22
|
+
pay_period: Series[String] = pa.Field(coerce=True, nullable=True, description="Pay Period", alias="payPeriod")
|
|
23
|
+
base_value: Series[Float] = pa.Field(coerce=True, nullable=True, description="Base Value", alias="base.value") #needs to become base.value?
|
|
24
|
+
base_currency: Series[String] = pa.Field(coerce=True, nullable=True, description="Base Currency", alias="base.currency")
|
|
25
|
+
active_effective_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Active Effective Date", alias="activeEffectiveDate")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SalaryCreateSchema(BaseModel):
|
|
29
|
+
can_be_deleted: Optional[bool] = Field(None, description="internal flag", alias="canBeDeleted")
|
|
30
|
+
work_change_type: Optional[str] = Field(None, description="The type of the change that was performed for this work entry. This will contain the ID of the value from the Change Type list.", alias="workChangeType")
|
|
31
|
+
salary_change_reason: Optional[str] = Field(None, description="Reason for the change", alias="change.reason")
|
|
32
|
+
salary_change_changed_by: Optional[str] = Field(None, description="Name of the user who changed the entry", alias="change.changedBy")
|
|
33
|
+
salary_change_changed_by_id: Optional[str] = Field(None, description="ID of the user who changed the entry", alias="change.changedById")
|
|
34
|
+
pay_frequency: Optional[str] = Field(None, description="Represents the frequency the salary is paid. This can be one of: Monthly, Semi Monthly, Weekly, or Bi-Weekly.", alias="payFrequency")
|
|
35
|
+
creation_date: Optional[date] = Field(None, description="The date this entry was created.", alias="creationDate")
|
|
36
|
+
is_current: Optional[bool] = Field(None, description="Is 'true' when this is the effective entry which is currently active.", alias="isCurrent")
|
|
37
|
+
modification_date: Optional[date] = Field(None, description="The date this entry was modified.", alias="modificationDate")
|
|
38
|
+
id: Optional[int] = Field(None, description="The id of the entry.", alias="id")
|
|
39
|
+
end_effective_date: Optional[date] = Field(None, description="For entries that are not active - this it the date this entry became not effective.", alias="endEffectiveDate")
|
|
40
|
+
active_effective_date: Optional[date] = Field(None, description="The active effective date for this entry.", alias="activeEffectiveDate")
|
|
41
|
+
custom_columns: Optional[Dict[str, Any]] = Field(None, description="If the table has custom columns, they will appear here.", alias="customColumns")
|
|
42
|
+
base_value: float = Field(..., description="Base amount value", alias="base.value")
|
|
43
|
+
base_currency: str = Field(..., description="Three-letter currency code.", alias="base.currency")
|
|
44
|
+
pay_period: str = Field(..., description="Represents the period for this salary entry. This can be one of: Annual, Hourly, Daily, Weekly, Monthly.", alias="payPeriod")
|
|
45
|
+
effective_date: Optional[date] = Field(None, description="The date this entry becomes effective. This is a mandatory field for a work entry.", alias="effectiveDate")
|
|
46
|
+
|
|
47
|
+
class Config:
|
|
48
|
+
allow_population_by_field_name = True
|
|
49
|
+
coerce = True
|
|
@@ -6,25 +6,25 @@ from brynq_sdk_functions import BrynQPanderaDataFrameModel
|
|
|
6
6
|
|
|
7
7
|
class TimeOffSchema(BrynQPanderaDataFrameModel):
|
|
8
8
|
change_type: Series[String] = pa.Field(coerce=True, description="Change Type", alias="changeType")
|
|
9
|
-
employee_id: Series[
|
|
9
|
+
employee_id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Employee ID", alias="employeeId")
|
|
10
10
|
employee_display_name: Series[String] = pa.Field(coerce=True, description="Employee Display Name", alias="employeeDisplayName")
|
|
11
11
|
employee_email: Series[String] = pa.Field(coerce=True, description="Employee Email", alias="employeeEmail")
|
|
12
12
|
request_id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Request ID", alias="requestId")
|
|
13
13
|
policy_type_display_name: Series[String] = pa.Field(coerce=True, description="Policy Type Display Name", alias="policyTypeDisplayName")
|
|
14
14
|
type: Series[String] = pa.Field(coerce=True, description="Type", alias="type")
|
|
15
|
-
start_date: Series[String] = pa.Field(coerce=True, description="Start Date", alias="startDate")
|
|
16
|
-
start_portion: Series[String] = pa.Field(coerce=True, description="Start Portion", alias="startPortion")
|
|
17
|
-
end_date: Series[String] = pa.Field(coerce=True, description="End Date", alias="endDate")
|
|
18
|
-
end_portion: Series[String] = pa.Field(coerce=True, description="End Portion", alias="endPortion")
|
|
19
|
-
day_portion: Optional[Series[String]] = pa.Field(coerce=True, description="Day Portion", alias="dayPortion")
|
|
20
|
-
date: Optional[Series[String]] = pa.Field(coerce=True, description="Date", alias="date")
|
|
21
|
-
hours_on_date: Optional[Series[Float]] = pa.Field(coerce=True, description="Hours on Date", alias="hoursOnDate")
|
|
22
|
-
daily_hours: Optional[Series[Float]] = pa.Field(coerce=True, description="Daily Hours", alias="dailyHours")
|
|
23
|
-
duration_unit: Optional[Series[String]] = pa.Field(coerce=True, description="Duration Unit", alias="durationUnit")
|
|
24
|
-
total_duration: Optional[Series[Float]] = pa.Field(coerce=True, description="Total Duration", alias="totalDuration")
|
|
25
|
-
total_cost: Optional[Series[Float]] = pa.Field(coerce=True, description="Total Cost", alias="totalCost")
|
|
15
|
+
start_date: Series[String] = pa.Field(coerce=True, nullable=True, description="Start Date", alias="startDate")
|
|
16
|
+
start_portion: Series[String] = pa.Field(coerce=True, nullable=True, description="Start Portion", alias="startPortion")
|
|
17
|
+
end_date: Series[String] = pa.Field(coerce=True, nullable=True, description="End Date", alias="endDate")
|
|
18
|
+
end_portion: Series[String] = pa.Field(coerce=True, nullable=True, description="End Portion", alias="endPortion")
|
|
19
|
+
day_portion: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Day Portion", alias="dayPortion")
|
|
20
|
+
date: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Date", alias="date")
|
|
21
|
+
hours_on_date: Optional[Series[Float]] = pa.Field(coerce=True, nullable=True, description="Hours on Date", alias="hoursOnDate")
|
|
22
|
+
daily_hours: Optional[Series[Float]] = pa.Field(coerce=True, nullable=True, description="Daily Hours", alias="dailyHours")
|
|
23
|
+
duration_unit: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Duration Unit", alias="durationUnit")
|
|
24
|
+
total_duration: Optional[Series[Float]] = pa.Field(coerce=True, nullable=True, description="Total Duration", alias="totalDuration")
|
|
25
|
+
total_cost: Optional[Series[Float]] = pa.Field(coerce=True, nullable=True, description="Total Cost", alias="totalCost")
|
|
26
26
|
change_reason: Optional[Series[String]] = pa.Field(nullable=True, coerce=True, description="Change Reason", alias="changeReason")
|
|
27
|
-
visibility: Optional[Series[String]] = pa.Field(coerce=True, description="Visibility", alias="visibility")
|
|
27
|
+
visibility: Optional[Series[String]] = pa.Field(coerce=True, nullable=True, description="Visibility", alias="visibility")
|
|
28
28
|
|
|
29
29
|
class Config:
|
|
30
30
|
coerce = True
|
|
@@ -18,14 +18,14 @@ class WorkSchema(BrynQPanderaDataFrameModel):
|
|
|
18
18
|
active_effective_date: Series[datetime] = pa.Field(coerce=True, nullable=True, description="Active Effective Date", alias="activeEffectiveDate")
|
|
19
19
|
department: Series[str] = pa.Field(coerce=True, nullable=True, description="Department", alias="department")
|
|
20
20
|
effective_date: Series[datetime] = pa.Field(coerce=True, nullable=True, description="Effective Date", alias="effectiveDate")
|
|
21
|
-
change_reason: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Reason", alias="
|
|
22
|
-
change_changed_by: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Changed By", alias="
|
|
23
|
-
change_changed_by_id: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Changed By ID", alias="
|
|
24
|
-
reports_to_id: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To ID", alias="
|
|
25
|
-
reports_to_first_name: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To First Name", alias="
|
|
26
|
-
reports_to_surname: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Surname", alias="
|
|
27
|
-
reports_to_email: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Email", alias="
|
|
28
|
-
reports_to_display_name: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Display Name", alias="
|
|
21
|
+
change_reason: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Reason", alias="change.reason")
|
|
22
|
+
change_changed_by: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Changed By", alias="change.changedBy")
|
|
23
|
+
change_changed_by_id: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Changed By ID", alias="change.changedById")
|
|
24
|
+
reports_to_id: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To ID", alias="reportsTo.id")
|
|
25
|
+
reports_to_first_name: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To First Name", alias="reportsTo.firstName")
|
|
26
|
+
reports_to_surname: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Surname", alias="reportsTo.surname")
|
|
27
|
+
reports_to_email: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Email", alias="reportsTo.email")
|
|
28
|
+
reports_to_display_name: Series[str] = pa.Field(coerce=True, nullable=True, description="Reports To Display Name", alias="reportsTo.displayName")
|
|
29
29
|
reports_to: Series[pd.Int64Dtype] = pa.Field(coerce=True, nullable=True, description="Reports To", alias="reportsTo")
|
|
30
30
|
employee_id: Series[pd.Int64Dtype] = pa.Field(coerce=True, description="Employee ID", alias="employeeId")
|
|
31
31
|
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
import re
|
|
3
|
-
from typing import Union, List, Optional, Literal
|
|
4
|
-
import pandas as pd
|
|
5
|
-
import requests
|
|
6
|
-
import os
|
|
7
|
-
from brynq_sdk_brynq import BrynQ
|
|
8
|
-
from brynq_sdk_functions import Functions
|
|
9
|
-
from .bank import Bank
|
|
10
|
-
from .company import Company
|
|
11
|
-
from .documents import CustomDocuments
|
|
12
|
-
from .employment import Employment
|
|
13
|
-
from .named_lists import NamedLists
|
|
14
|
-
from .payments import Payments
|
|
15
|
-
from .people import People
|
|
16
|
-
from .salaries import Salaries
|
|
17
|
-
from .timeoff import TimeOff
|
|
18
|
-
from .work import Work
|
|
19
|
-
from .custom_tables import CustomTables
|
|
20
|
-
|
|
21
|
-
class Bob(BrynQ):
|
|
22
|
-
def __init__(self, system_type: Optional[Literal['source', 'target']] = None, test_environment: bool = True, debug: bool = False, target_system: str = None):
|
|
23
|
-
super().__init__()
|
|
24
|
-
self.timeout = 3600
|
|
25
|
-
self.headers = self._get_request_headers(system_type)
|
|
26
|
-
if test_environment:
|
|
27
|
-
self.base_url = "https://api.sandbox.hibob.com/v1/"
|
|
28
|
-
else:
|
|
29
|
-
self.base_url = "https://api.hibob.com/v1/"
|
|
30
|
-
self.session = requests.Session()
|
|
31
|
-
self.session.headers.update(self.headers)
|
|
32
|
-
self.people = People(self)
|
|
33
|
-
self.salaries = Salaries(self)
|
|
34
|
-
self.work = Work(self)
|
|
35
|
-
self.bank = Bank(self)
|
|
36
|
-
self.employment = Employment(self)
|
|
37
|
-
self.payments = Payments(self)
|
|
38
|
-
self.time_off = TimeOff(self)
|
|
39
|
-
self.documents = CustomDocuments(self)
|
|
40
|
-
self.companies = Company(self)
|
|
41
|
-
self.named_lists = NamedLists(self)
|
|
42
|
-
self.custom_tables = CustomTables(self)
|
|
43
|
-
self.data_interface_id = os.getenv("DATA_INTERFACE_ID")
|
|
44
|
-
self.debug = debug
|
|
45
|
-
|
|
46
|
-
def _get_request_headers(self, system_type):
|
|
47
|
-
credentials = self.interfaces.credentials.get(system='bob', system_type=system_type)
|
|
48
|
-
auth_token = base64.b64encode(f"{credentials.get('data').get('User ID')}:{credentials.get('data').get('API Token')}".encode()).decode('utf-8')
|
|
49
|
-
headers = {
|
|
50
|
-
"accept": "application/json",
|
|
51
|
-
"Authorization": f"Basic {auth_token}",
|
|
52
|
-
"Partner-Token": "001Vg00000A6FY6IAN"
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
return headers
|
|
56
|
-
|
|
57
|
-
def get_paginated_result(self, request: requests.Request) -> List:
|
|
58
|
-
has_next_page = True
|
|
59
|
-
result_data = []
|
|
60
|
-
while has_next_page:
|
|
61
|
-
prepped = request.prepare()
|
|
62
|
-
prepped.headers.update(self.session.headers)
|
|
63
|
-
resp = self.session.send(prepped, timeout=self.timeout)
|
|
64
|
-
resp.raise_for_status()
|
|
65
|
-
response_data = resp.json()
|
|
66
|
-
result_data += response_data['results']
|
|
67
|
-
next_cursor = response_data.get('response_metadata').get('next_cursor')
|
|
68
|
-
# If there is no next page, set has_next_page to False, we could use the falsy value of None but this is more readable
|
|
69
|
-
has_next_page = next_cursor is not None
|
|
70
|
-
if has_next_page:
|
|
71
|
-
request.params.update({"cursor": next_cursor})
|
|
72
|
-
|
|
73
|
-
return result_data
|
|
@@ -1,24 +0,0 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
import requests
|
|
3
|
-
from brynq_sdk_functions import Functions
|
|
4
|
-
from .schemas.salary import SalarySchema
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class Salaries:
|
|
8
|
-
def __init__(self, bob):
|
|
9
|
-
self.bob = bob
|
|
10
|
-
self.schema = SalarySchema
|
|
11
|
-
|
|
12
|
-
def get(self) -> (pd.DataFrame, pd.DataFrame):
|
|
13
|
-
request = requests.Request(method='GET',
|
|
14
|
-
url=f"{self.bob.base_url}bulk/people/salaries",
|
|
15
|
-
params={"limit": 100})
|
|
16
|
-
data = self.bob.get_paginated_result(request)
|
|
17
|
-
df = pd.json_normalize(
|
|
18
|
-
data,
|
|
19
|
-
record_path='values',
|
|
20
|
-
meta=['employeeId']
|
|
21
|
-
)
|
|
22
|
-
valid_salaries, invalid_salaries = Functions.validate_data(df=df, schema=SalarySchema, debug=True)
|
|
23
|
-
|
|
24
|
-
return valid_salaries, invalid_salaries
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
import pandas as pd
|
|
2
|
-
import pandera as pa
|
|
3
|
-
from pandera import Bool
|
|
4
|
-
from pandera.typing import Series, String, Float, DateTime
|
|
5
|
-
import pandera.extensions as extensions
|
|
6
|
-
from brynq_sdk_functions import BrynQPanderaDataFrameModel
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class SalarySchema(BrynQPanderaDataFrameModel):
|
|
10
|
-
id: Series[String] = pa.Field(coerce=True, description="Salary ID", alias="id")
|
|
11
|
-
employee_id: Series[String] = pa.Field(coerce=True, description="Employee ID", alias="employeeId")
|
|
12
|
-
pay_frequency: Series[String] = pa.Field(coerce=True, nullable=True, description="Pay Frequency", alias="payFrequency") # has a list of possible values , isin=['Monthly']
|
|
13
|
-
creation_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Creation Date", alias="creationDate")
|
|
14
|
-
is_current: Series[Bool] = pa.Field(coerce=True, description="Is Current", alias="isCurrent")
|
|
15
|
-
modification_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Modification Date", alias="modificationDate")
|
|
16
|
-
effective_date: Series[DateTime] = pa.Field(coerce=True, description="Effective Date", alias="effectiveDate")
|
|
17
|
-
end_effective_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="End Effective Date", alias="endEffectiveDate")
|
|
18
|
-
change_reason: Series[str] = pa.Field(coerce=True, nullable=True, description="Change Reason", alias="change.reason")
|
|
19
|
-
pay_period: Series[String] = pa.Field(coerce=True, nullable=True, description="Pay Period", alias="payPeriod")
|
|
20
|
-
base_value: Series[Float] = pa.Field(coerce=True, nullable=True, description="Base Value", alias="base.value") #needs to become base.value?
|
|
21
|
-
base_currency: Series[String] = pa.Field(coerce=True, nullable=True, description="Base Currency", alias="base.currency")
|
|
22
|
-
active_effective_date: Series[DateTime] = pa.Field(coerce=True, nullable=True, description="Active Effective Date", alias="activeEffectiveDate")
|
|
23
|
-
|
|
24
|
-
class Config:
|
|
25
|
-
coerce = True
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{brynq_sdk_bob-2.5.0 → brynq_sdk_bob-2.5.2.dev0}/brynq_sdk_bob.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|