fivetran-connector-sdk 0.4.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,703 @@
1
+ import argparse
2
+ import docker
3
+ import grpc
4
+ import importlib.util
5
+ import inspect
6
+ import json
7
+ import os
8
+ import requests as rq
9
+ import sys
10
+
11
+ from concurrent import futures
12
+ from datetime import datetime
13
+ from docker.types import Mount
14
+ from google.protobuf import timestamp_pb2
15
+
16
+ from fivetran_connector_sdk.protos import common_pb2
17
+ from fivetran_connector_sdk.protos import connector_sdk_pb2
18
+ from fivetran_connector_sdk.protos import connector_sdk_pb2_grpc
19
+
20
+ TESTER_IMAGE_NAME = "fivetrandocker/sdk-connector-tester"
21
+ TESTER_IMAGE_VERSION = "024.0408.001"
22
+ TESTER_CONTAINER_NAME = "fivetran_connector_tester"
23
+
24
+ BUILDER_IMAGE_NAME = "fivetrandocker/connector-sdk-binary-builder"
25
+ BUILDER_IMAGE_VERSION = "024.0304.002"
26
+ BUILDER_CONTAINER_NAME = "fivetran_binary_builder"
27
+
28
+ DEBUGGING = False
29
+ TABLES = {}
30
+
31
+
32
+ class Operations:
33
+ @staticmethod
34
+ def upsert(table: str, data: dict) -> list[connector_sdk_pb2.UpdateResponse]:
35
+ _yield_check(inspect.stack())
36
+
37
+ responses = []
38
+
39
+ columns = _get_columns(table)
40
+ if not columns:
41
+ global TABLES
42
+ for field in data.keys():
43
+ columns[field] = common_pb2.Column(
44
+ name=field, type=common_pb2.DataType.UNSPECIFIED, primary_key=True)
45
+ new_table = common_pb2.Table(name=table, columns=columns.values())
46
+ TABLES[table] = new_table
47
+
48
+ responses.append(connector_sdk_pb2.UpdateResponse(
49
+ operation=connector_sdk_pb2.Operation(
50
+ schema_change=connector_sdk_pb2.SchemaChange(
51
+ without_schema=common_pb2.TableList(tables=[new_table])))))
52
+
53
+ mapped_data = _map_data_to_columns(data, columns)
54
+ record = connector_sdk_pb2.Record(
55
+ schema_name=None,
56
+ table_name=table,
57
+ type=common_pb2.OpType.UPSERT,
58
+ data=mapped_data
59
+ )
60
+
61
+ responses.append(
62
+ connector_sdk_pb2.UpdateResponse(
63
+ operation=connector_sdk_pb2.Operation(record=record)))
64
+
65
+ return responses
66
+
67
+ @staticmethod
68
+ def update(table: str, modified: dict) -> connector_sdk_pb2.UpdateResponse:
69
+ _yield_check(inspect.stack())
70
+
71
+ columns = _get_columns(table)
72
+ mapped_data = _map_data_to_columns(modified, columns)
73
+ record = connector_sdk_pb2.Record(
74
+ schema_name=None,
75
+ table_name=table,
76
+ type=common_pb2.OpType.UPDATE,
77
+ data=mapped_data
78
+ )
79
+
80
+ return connector_sdk_pb2.UpdateResponse(
81
+ operation=connector_sdk_pb2.Operation(record=record))
82
+
83
+ @staticmethod
84
+ def delete(table: str, keys: dict) -> connector_sdk_pb2.UpdateResponse:
85
+ _yield_check(inspect.stack())
86
+
87
+ columns = _get_columns(table)
88
+ mapped_data = _map_data_to_columns(keys, columns)
89
+ record = connector_sdk_pb2.Record(
90
+ schema_name=None,
91
+ table_name=table,
92
+ type=common_pb2.OpType.DELETE,
93
+ data=mapped_data
94
+ )
95
+
96
+ return connector_sdk_pb2.UpdateResponse(
97
+ operation=connector_sdk_pb2.Operation(record=record))
98
+
99
+
100
+ @staticmethod
101
+ def checkpoint(state: dict) -> connector_sdk_pb2.UpdateResponse:
102
+ _yield_check(inspect.stack())
103
+ return connector_sdk_pb2.UpdateResponse(
104
+ operation=connector_sdk_pb2.Operation(checkpoint=connector_sdk_pb2.Checkpoint(
105
+ state_json=json.dumps(state))))
106
+
107
+
108
+ def _get_columns(table: str) -> dict:
109
+ columns = {}
110
+ if table in TABLES:
111
+ for column in TABLES[table].columns:
112
+ columns[column.name] = column
113
+
114
+ return columns
115
+
116
+
117
+ def _map_data_to_columns(data: dict, columns: dict) -> dict:
118
+ mapped_data = {}
119
+ for k, v in data.items():
120
+ if v is None:
121
+ mapped_data[k] = common_pb2.ValueType(null=True)
122
+ elif isinstance(v, list):
123
+ raise ValueError("Value type cannot be list")
124
+ elif (k in columns) and columns[k].type != common_pb2.DataType.UNSPECIFIED:
125
+ if columns[k].type == common_pb2.DataType.BOOLEAN:
126
+ mapped_data[k] = common_pb2.ValueType(bool=v)
127
+ elif columns[k].type == common_pb2.DataType.SHORT:
128
+ mapped_data[k] = common_pb2.ValueType(short=v)
129
+ elif columns[k].type == common_pb2.DataType.INT:
130
+ mapped_data[k] = common_pb2.ValueType(int=v)
131
+ elif columns[k].type == common_pb2.DataType.LONG:
132
+ mapped_data[k] = common_pb2.ValueType(long=v)
133
+ elif columns[k].type == common_pb2.DataType.DECIMAL:
134
+ mapped_data[k] = common_pb2.ValueType(decimal=v)
135
+ elif columns[k].type == common_pb2.DataType.FLOAT:
136
+ mapped_data[k] = common_pb2.ValueType(float=v)
137
+ elif columns[k].type == common_pb2.DataType.DOUBLE:
138
+ mapped_data[k] = common_pb2.ValueType(double=v)
139
+ elif columns[k].type == common_pb2.DataType.NAIVE_DATE:
140
+ timestamp = timestamp_pb2.Timestamp()
141
+ dt = datetime.strptime(v, "%Y-%m-%d")
142
+ timestamp.FromDatetime(dt)
143
+ mapped_data[k] = common_pb2.ValueType(naive_date=timestamp)
144
+ elif columns[k].type == common_pb2.DataType.NAIVE_DATETIME:
145
+ if '.' not in v: v = v + ".0"
146
+ timestamp = timestamp_pb2.Timestamp()
147
+ dt = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%f")
148
+ timestamp.FromDatetime(dt)
149
+ mapped_data[k] = common_pb2.ValueType(naive_datetime=timestamp)
150
+ elif columns[k].type == common_pb2.DataType.UTC_DATETIME:
151
+ timestamp = timestamp_pb2.Timestamp()
152
+ if '.' in v:
153
+ dt = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S.%f%z")
154
+ else:
155
+ dt = datetime.strptime(v, "%Y-%m-%dT%H:%M:%S%z")
156
+ timestamp.FromDatetime(dt)
157
+ mapped_data[k] = common_pb2.ValueType(utc_datetime=timestamp)
158
+ elif columns[k].type == common_pb2.DataType.BINARY:
159
+ mapped_data[k] = common_pb2.ValueType(binary=v)
160
+ elif columns[k].type == common_pb2.DataType.XML:
161
+ mapped_data[k] = common_pb2.ValueType(xml=v)
162
+ elif columns[k].type == common_pb2.DataType.STRING:
163
+ incoming = v if isinstance(v, str) else str(v)
164
+ mapped_data[k] = common_pb2.ValueType(string=incoming)
165
+ elif columns[k].type == common_pb2.DataType.JSON:
166
+ mapped_data[k] = common_pb2.ValueType(json=json.dumps(v))
167
+ else:
168
+ raise ValueError(f"Unknown data type: {columns[k].type}")
169
+ else:
170
+ # We can infer type from the value
171
+ if isinstance(v, int):
172
+ if abs(v) > 2147483647:
173
+ mapped_data[k] = common_pb2.ValueType(long=v)
174
+ else:
175
+ mapped_data[k] = common_pb2.ValueType(int=v)
176
+ elif isinstance(v, float):
177
+ mapped_data[k] = common_pb2.ValueType(float=v)
178
+ elif isinstance(v, bool):
179
+ mapped_data[k] = common_pb2.ValueType(bool=v)
180
+ elif isinstance(v, bytes):
181
+ mapped_data[k] = common_pb2.ValueType(binary=v)
182
+ elif isinstance(v, list):
183
+ raise ValueError("Value type cannot be list")
184
+ elif isinstance(v, dict):
185
+ mapped_data[k] = common_pb2.ValueType(json=json.dumps(v))
186
+ elif isinstance(v, str):
187
+ mapped_data[k] = common_pb2.ValueType(string=v)
188
+ else:
189
+ # Convert arbitrary objects to string
190
+ mapped_data[k] = common_pb2.ValueType(string=str(v))
191
+
192
+ return mapped_data
193
+
194
+
195
+ def _yield_check(stack):
196
+ # Known issue with inspect.getmodule() and yield behavior in a frozen application.
197
+ # When using inspect.getmodule() on stack frames obtained by inspect.stack(), it fails
198
+ # to resolve the modules in a frozen application due to incompatible assumptions about
199
+ # the file paths. This can lead to unexpected behavior, such as yield returning None or
200
+ # the failure to retrieve the module inside a frozen app
201
+ # (Reference: https://github.com/pyinstaller/pyinstaller/issues/5963)
202
+ if not DEBUGGING:
203
+ return
204
+
205
+ called_method = stack[0].function
206
+ calling_code = stack[1].code_context[0]
207
+ if f"{called_method}(" in calling_code:
208
+ if 'yield' not in calling_code:
209
+ print(f"ERROR: Please add 'yield' to '{called_method}' operation on line {stack[1].lineno} in file '{stack[1].filename}'")
210
+ os._exit(1)
211
+ else:
212
+ # This should never happen
213
+ raise RuntimeError(f"Unable to find '{called_method}' function in stack")
214
+
215
+
216
+ def _check_dict(incoming: dict, string_only: bool = False):
217
+ if not incoming:
218
+ return {}
219
+
220
+ if not isinstance(incoming, dict):
221
+ raise ValueError("Configuration should be a dictionary")
222
+
223
+ if string_only:
224
+ for k, v in incoming.items():
225
+ if not isinstance(v, str):
226
+ print("ERROR: Use only string values in configuration")
227
+ os._exit(1)
228
+
229
+ return incoming
230
+
231
+
232
+ class Connector(connector_sdk_pb2_grpc.ConnectorServicer):
233
+ def __init__(self, update, schema=None):
234
+ self.schema_method = schema
235
+ self.update_method = update
236
+
237
+ self.configuration = None
238
+ self.state = None
239
+
240
+ # Call this method to deploy the connector to Fivetran platform
241
+ def deploy(self, project_path: str, deploy_key: str, group: str, connection: str, configuration: dict = None):
242
+ if not deploy_key: print("ERROR: Missing deploy key"); os._exit(1)
243
+ if not connection: print("ERROR: Missing connection name"); os._exit(1)
244
+ _check_dict(configuration)
245
+
246
+ secrets_list = []
247
+ for k, v in configuration.items():
248
+ secrets_list.append({"key": k, "value": v})
249
+
250
+ connection_config = {
251
+ "schema": connection,
252
+ "secrets_list": secrets_list,
253
+ "sync_method": "DIRECT",
254
+ "custom_payloads": [],
255
+ }
256
+ group_id, group_name = self.__get_group_info(group, deploy_key)
257
+ print(f"Deploying '{project_path}' to '{group_name}/{connection}'")
258
+ self.__write_run_py(project_path)
259
+ # TODO: we need to do this step on the server (upload code instead)
260
+ self.__create_standalone_binary(project_path)
261
+ upload_file_path = os.path.join(project_path, "dist", "__run")
262
+ if not self.__upload(upload_file_path, deploy_key,group_id,connection):
263
+ os._exit(1)
264
+ connection_id = self.__get_connection_id(connection, group, group_id, deploy_key)
265
+ if connection_id:
266
+ print(f"Connection '{connection}' already exists in group '{group}', updating configuration .. ", end="", flush=True)
267
+ self.__update_connection(connection_id, connection, group_name, connection_config, deploy_key)
268
+ self.__force_sync(connection_id, connection, deploy_key)
269
+ print("✓")
270
+ else:
271
+ response = self.__create_connection(deploy_key, group_id, connection_config)
272
+ if response.ok:
273
+ print(f"New connection with name '{connection}' created")
274
+ else:
275
+ print(f"ERROR: Failed to create new connection: {response.json()['message']}")
276
+ os._exit(1)
277
+
278
+ @staticmethod
279
+ def __force_sync(id: str, name: str, deploy_key: str):
280
+ resp = rq.post(f"https://api.fivetran.com/v1/connectors/{id}/sync",
281
+ headers={"Authorization": f"Basic {deploy_key}"},
282
+ json={"force": True})
283
+
284
+ if not resp.ok:
285
+ print(f"WARNING: Unable to start sync on connection '{name}'")
286
+
287
+ @staticmethod
288
+ def __update_connection(id: str, name: str, group: str, config: dict, deploy_key: str):
289
+ resp = rq.patch(f"https://api.fivetran.com/v1/connectors/{id}",
290
+ headers={"Authorization": f"Basic {deploy_key}"},
291
+ json={
292
+ "config": config,
293
+ "run_setup_tests": True
294
+ })
295
+
296
+ if not resp.ok:
297
+ print(f"ERROR: Unable to update connection '{name}' in group '{group}'")
298
+ os._exit(1)
299
+
300
+ @staticmethod
301
+ def __get_connection_id(name: str, group: str, group_id: str, deploy_key: str):
302
+ resp = rq.get(f"https://api.fivetran.com/v1/groups/{group_id}/connectors",
303
+ headers={"Authorization": f"Basic {deploy_key}"},
304
+ params={"schema": name})
305
+ if not resp.ok:
306
+ print(f"ERROR: Unable to fetch connection list in group '{group}'")
307
+ os._exit(1)
308
+
309
+ if resp.json()['data']['items']:
310
+ return resp.json()['data']['items'][0]['id']
311
+
312
+ return None
313
+
314
+ @staticmethod
315
+ def __create_connection(deploy_key: str, group_id: str, config: dict):
316
+ response = rq.post(f"https://api.fivetran.com/v1/connectors",
317
+ headers={"Authorization": f"Basic {deploy_key}"},
318
+ json={
319
+ "group_id": group_id,
320
+ "service": "my_built",
321
+ "config": config,
322
+ "paused": False,
323
+ "run_setup_tests": True,
324
+ "sync_frequency": "360",
325
+ })
326
+ return response
327
+
328
+ @staticmethod
329
+ def __create_standalone_binary(project_path: str):
330
+ print("Preparing artifacts ..")
331
+ print("1 of 4 .. ", end="", flush=True)
332
+ docker_client = docker.from_env()
333
+ image = f"{BUILDER_IMAGE_NAME}:{BUILDER_IMAGE_VERSION}"
334
+ result = docker_client.images.list(image)
335
+ if not result:
336
+ # Pull the builder image if missing
337
+ docker_client.images.pull(BUILDER_IMAGE_NAME, BUILDER_IMAGE_VERSION, platform="linux/amd64")
338
+
339
+ for container in docker_client.containers.list(all=True):
340
+ if container.name == BUILDER_CONTAINER_NAME:
341
+ if container.status == "running":
342
+ print("ERROR: Another deploy process is running")
343
+ os._exit(1)
344
+
345
+ container = None
346
+ try:
347
+ # TODO: Check responses in each step and look for "success" phrases
348
+ container = docker_client.containers.run(
349
+ image=image,
350
+ name=BUILDER_CONTAINER_NAME,
351
+ command="/bin/sh",
352
+ mounts=[Mount("/myapp", project_path, read_only=False, type="bind")],
353
+ tty=True,
354
+ detach=True,
355
+ working_dir="/myapp",
356
+ remove=True)
357
+ print("✓")
358
+
359
+ print("2 of 4 .. ", end="", flush=True)
360
+ resp = container.exec_run("pip install fivetran_connector_sdk")
361
+ print("✓")
362
+
363
+ print("3 of 4 .. ", end="", flush=True)
364
+ if os.path.isfile(os.path.join(project_path, "requirements.txt")):
365
+ resp = container.exec_run("pip install -r requirements.txt")
366
+ print("✓")
367
+
368
+ print("4 of 4 .. ", end="", flush=True)
369
+ resp = container.exec_run("rm __run")
370
+ resp = container.exec_run("pyinstaller --onefile --clean __run.py")
371
+ print("✓")
372
+
373
+ if not os.path.isfile(os.path.join(project_path, "dist", "__run")):
374
+ print("Failed to create deployment file")
375
+ os._exit(1)
376
+
377
+ finally:
378
+ if container:
379
+ container.stop()
380
+
381
+ @staticmethod
382
+ def __upload(local_path: str, deploy_key: str, group_id: str, connection: str) -> bool:
383
+ print("Uploading .. ", end="", flush=True)
384
+ response = rq.post(f"https://api.fivetran.com/v2/deploy/{group_id}/{connection}",
385
+ files={'file': open(local_path, 'rb')},
386
+ headers={"Authorization": f"Basic {deploy_key}"})
387
+ if response.ok:
388
+ print("✓")
389
+ return True
390
+
391
+ print("fail\nERROR: ", response.reason)
392
+ return False
393
+
394
+ @staticmethod
395
+ def __write_run_py(project_path: str):
396
+ with open(os.path.join(project_path, "__run.py"), "w") as fo:
397
+ fo.writelines([
398
+ "import sys\n",
399
+ "from connector import connector\n",
400
+ "if len(sys.argv) == 3 and sys.argv[1] == '--port':\n",
401
+ " server = connector.run(port=int(sys.argv[2]))\n",
402
+ "else:\n",
403
+ " server = connector.run()\n"
404
+ ])
405
+
406
+ @staticmethod
407
+ def __get_group_info(group: str, deploy_key: str) -> tuple[str, str]:
408
+ resp = rq.get("https://api.fivetran.com/v1/groups",
409
+ headers={"Authorization": f"Basic {deploy_key}"})
410
+
411
+ if not resp.ok:
412
+ print(f"ERROR: Unable to fetch list of groups, status code = {resp.status_code}")
413
+ os._exit(1)
414
+
415
+ # TODO: Do we need to implement pagination?
416
+ groups = resp.json()['data']['items']
417
+ if not groups:
418
+ print("ERROR: No destinations defined in the account")
419
+ os._exit(1)
420
+
421
+ if len(groups) == 1:
422
+ return groups[0]['id'], groups[0]['name']
423
+ else:
424
+ if not group:
425
+ print("ERROR: Group name is required when there are multiple destinations in the account")
426
+ os._exit(1)
427
+
428
+ for grp in groups:
429
+ if grp['name'] == group:
430
+ return grp['id'], grp['name']
431
+
432
+ print(f"ERROR: Specified group was not found in the account: {group}")
433
+ os._exit(1)
434
+
435
+ # Call this method to run the connector in production
436
+ def run(self, port: int = 50051, configuration: dict = None, state: dict = None) -> grpc.Server:
437
+ global DEBUGGING
438
+
439
+ self.configuration = _check_dict(configuration, True)
440
+ self.state = _check_dict(state)
441
+
442
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
443
+ connector_sdk_pb2_grpc.add_ConnectorServicer_to_server(self, server)
444
+ server.add_insecure_port("[::]:" + str(port))
445
+ server.start()
446
+ print("Connector started, listening on " + str(port))
447
+ if DEBUGGING:
448
+ return server
449
+ server.wait_for_termination()
450
+
451
+ # This method starts both the server and the local testing environment
452
+ def debug(self, project_path: str = None, port: int = 50051, configuration: dict = None, state: dict = None) -> bool:
453
+ global DEBUGGING
454
+ DEBUGGING = True
455
+
456
+ project_path = os.getcwd() if project_path is None else project_path
457
+ print(f"Debugging connector: {project_path}")
458
+ server = self.run(port, configuration, state)
459
+
460
+ # Uncomment this to run the tester manually
461
+ #server.wait_for_termination()
462
+
463
+ docker_client = docker.from_env()
464
+ image = f"{TESTER_IMAGE_NAME}:{TESTER_IMAGE_VERSION}"
465
+ result = docker_client.images.list(image)
466
+ # Pull the tester image if missing
467
+ if not result:
468
+ print(f"Downloading connector tester {TESTER_IMAGE_VERSION} .. ", end="", flush=True)
469
+ docker_client.images.pull(TESTER_IMAGE_NAME, TESTER_IMAGE_VERSION, platform="linux/amd64")
470
+ print("✓")
471
+
472
+ error = False
473
+ try:
474
+ for container in docker_client.containers.list(all=True):
475
+ if container.name == TESTER_CONTAINER_NAME:
476
+ if container.status == "running":
477
+ container.stop()
478
+ else:
479
+ container.remove()
480
+ break
481
+
482
+ working_dir = os.path.join(project_path, "files")
483
+ try:
484
+ os.mkdir(working_dir)
485
+ except FileExistsError:
486
+ pass
487
+
488
+ container = docker_client.containers.run(
489
+ image=image,
490
+ name=TESTER_CONTAINER_NAME,
491
+ command="--connector-sdk=true",
492
+ mounts=[Mount("/data", working_dir, read_only=False, type="bind")],
493
+ network="host",
494
+ remove=True,
495
+ detach=True,
496
+ environment=["GRPC_HOSTNAME=host.docker.internal"])
497
+
498
+ for line in container.attach(stdout=True, stderr=True, stream=True):
499
+ msg = line.decode("utf-8")
500
+ print(msg, end="")
501
+ if ("Exception in thread" in msg) or ("SEVERE:" in msg):
502
+ error = True
503
+
504
+ finally:
505
+ server.stop(grace=2.0)
506
+ return not error
507
+
508
+ # -- Methods below override ConnectorServicer methods
509
+ def ConfigurationForm(self, request, context):
510
+ if not self.configuration:
511
+ self.configuration = {}
512
+
513
+ # Not going to use the tester's configuration file
514
+ return common_pb2.ConfigurationFormResponse()
515
+
516
+ def Test(self, request, context):
517
+ return None
518
+
519
+ def Schema(self, request, context):
520
+ global TABLES
521
+
522
+ if not self.schema_method:
523
+ return connector_sdk_pb2.SchemaResponse(schema_response_not_supported=True)
524
+ else:
525
+ configuration = self.configuration if self.configuration else request.configuration
526
+ response = self.schema_method(configuration)
527
+
528
+ for entry in response:
529
+ if 'table' not in entry:
530
+ raise ValueError("Entry missing table name: " + entry)
531
+
532
+ table_name = entry['table']
533
+
534
+ if table_name in TABLES:
535
+ raise ValueError("Table already defined: " + table_name)
536
+
537
+ table = common_pb2.Table(name=table_name)
538
+ columns = {}
539
+
540
+ if "primary_key" not in entry:
541
+ ValueError("Table requires at least one primary key: " + table_name)
542
+
543
+ for pkey_name in entry["primary_key"]:
544
+ column = columns[pkey_name] if pkey_name in columns else common_pb2.Column(name=pkey_name)
545
+ column.primary_key = True
546
+ columns[pkey_name] = column
547
+
548
+ if "columns" in entry:
549
+ for name, type in entry["columns"].items():
550
+ column = columns[name] if name in columns else common_pb2.Column(name=name)
551
+
552
+ if isinstance(type, str):
553
+ if type.upper() == "BOOLEAN":
554
+ column.type = common_pb2.DataType.BOOLEAN
555
+ elif type.upper() == "SHORT":
556
+ column.type = common_pb2.DataType.SHORT
557
+ elif type.upper() == "LONG":
558
+ column.type = common_pb2.DataType.LONG
559
+ elif type.upper() == "FLOAT":
560
+ column.type = common_pb2.DataType.FLOAT
561
+ elif type.upper() == "DOUBLE":
562
+ column.type = common_pb2.DataType.DOUBLE
563
+ elif type.upper() == "NAIVE_DATE":
564
+ column.type = common_pb2.DataType.NAIVE_DATE
565
+ elif type.upper() == "NAIVE_DATETIME":
566
+ column.type = common_pb2.DataType.NAIVE_DATETIME
567
+ elif type.upper() == "UTC_DATETIME":
568
+ column.type = common_pb2.DataType.UTC_DATETIME
569
+ elif type.upper() == "BINARY":
570
+ column.type = common_pb2.DataType.BINARY
571
+ elif type.upper() == "XML":
572
+ column.type = common_pb2.DataType.XML
573
+ elif type.upper() == "STRING":
574
+ column.type = common_pb2.DataType.STRING
575
+ elif type.upper() == "JSON":
576
+ column.type = common_pb2.DataType.JSON
577
+ else:
578
+ raise ValueError("Unrecognized column type: ", str(type))
579
+
580
+ elif isinstance(type, dict):
581
+ if type['type'].upper() != "DECIMAL":
582
+ raise ValueError("Expecting DECIMAL data type")
583
+ column.type = common_pb2.DataType.DECIMAL
584
+ column.decimal.precision = type['precision']
585
+ column.decimal.scale = type['scale']
586
+
587
+ else:
588
+ raise ValueError("Unrecognized column type: ", str(type))
589
+
590
+ if name in entry["primary_key"]:
591
+ column.primary_key = True
592
+
593
+ columns[name] = column
594
+
595
+ table.columns.extend(columns.values())
596
+ TABLES[table_name] = table
597
+
598
+ return connector_sdk_pb2.SchemaResponse(without_schema=common_pb2.TableList(tables=TABLES.values()))
599
+
600
+ def Update(self, request, context):
601
+ configuration = self.configuration if self.configuration else request.configuration
602
+ state = self.state if self.state else json.loads(request.state_json)
603
+
604
+ try:
605
+ for resp in self.update_method(configuration=configuration, state=state):
606
+ if isinstance(resp, list):
607
+ for r in resp:
608
+ yield r
609
+ else:
610
+ yield resp
611
+
612
+ except TypeError as e:
613
+ if str(e) != "'NoneType' object is not iterable":
614
+ raise e
615
+
616
+
617
+ def find_connector_object(project_path):
618
+ module_name = "connector_connector_code"
619
+ connector_py = os.path.join(project_path, "connector.py")
620
+ spec = importlib.util.spec_from_file_location(module_name, connector_py)
621
+ module = importlib.util.module_from_spec(spec)
622
+ sys.modules[module_name] = module
623
+ spec.loader.exec_module(module)
624
+ for obj in dir(module):
625
+ if not obj.startswith('__'): # Exclude built-in attributes
626
+ obj_attr = getattr(module, obj)
627
+ if '<fivetran_connector_sdk.Connector object at' in str(obj_attr):
628
+ return obj_attr
629
+
630
+ print("Unable to find connector object")
631
+ sys.exit(1)
632
+
633
+
634
+ def main():
635
+ parser = argparse.ArgumentParser(allow_abbrev=False)
636
+
637
+ # Positional
638
+ parser.add_argument("command", help="debug|run|deploy")
639
+ parser.add_argument("project_path", nargs='?', default=os.getcwd(), help="Path to connector project directory")
640
+
641
+ # Optional (Not all of these are valid with every mutually exclusive option below)
642
+ parser.add_argument("--port", type=int, default=None, help="Provide port number to run gRPC server")
643
+ parser.add_argument("--state", type=str, default=None, help="Provide state as JSON string or file")
644
+ parser.add_argument("--configuration", type=str, default=None, help="Provide secrets as JSON string or file")
645
+ parser.add_argument("--deploy-key", type=str, default=None, help="Provide deploy key")
646
+ parser.add_argument("--group", type=str, default=None, help="Group name of the destination")
647
+ parser.add_argument("--connection", type=str, default=None, help="Connection name (aka 'destination schema')")
648
+
649
+ args = parser.parse_args()
650
+
651
+ connector_object = find_connector_object(args.project_path)
652
+
653
+ # Process optional args
654
+ ft_group = args.group if args.group else os.getenv('GROUP', None)
655
+ ft_connection = args.connection if args.connection else os.getenv('CONNECTION', None)
656
+ deploy_key = args.deploy_key if args.deploy_key else os.getenv('DEPLOY_KEY', None)
657
+ configuration = args.configuration if args.configuration else os.getenv('CONFIGURATION', None)
658
+ state = args.state if args.state else os.getenv('STATE', None)
659
+
660
+ if configuration:
661
+ json_filepath = os.path.join(args.project_path, args.configuration)
662
+ if os.path.isfile(json_filepath):
663
+ with open(json_filepath, 'r') as fi:
664
+ configuration = json.load(fi)
665
+ else:
666
+ if configuration.lstrip().startswith("{"):
667
+ configuration = json.loads(configuration)
668
+ else:
669
+ raise ValueError("Unrecognized format for configuration")
670
+ else:
671
+ configuration = {}
672
+
673
+ if state:
674
+ json_filepath = os.path.join(args.project_path, args.state)
675
+ if os.path.isfile(json_filepath):
676
+ with open(json_filepath, 'r') as fi:
677
+ state = json.load(fi)
678
+ elif state.lstrip().startswith("{"):
679
+ state = json.loads(state)
680
+ else:
681
+ state = {}
682
+
683
+ if args.command.lower() == "deploy":
684
+ if args.port:
685
+ print("WARNING: 'port' parameter is not used for 'deploy' command")
686
+ if args.state:
687
+ print("WARNING: 'state' parameter is not used for 'deploy' command")
688
+ connector_object.deploy(args.project_path, deploy_key, ft_group, ft_connection, configuration)
689
+
690
+ elif args.command.lower() == "debug":
691
+ port = 50051 if not args.port else args.port
692
+ connector_object.debug(args.project_path, port, configuration, state)
693
+
694
+ elif args.command.lower() == "run":
695
+ port = 50051 if not args.port else args.port
696
+ connector_object.run(port, configuration, state)
697
+
698
+ else:
699
+ raise NotImplementedError("Invalid command: ", args.command)
700
+
701
+
702
+ if __name__ == "__main__":
703
+ main()