pgdbpool 0.97rc1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pgdbpool-0.97rc1/PKG-INFO +77 -0
- pgdbpool-0.97rc1/README.md +63 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/PKG-INFO +77 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/SOURCES.txt +12 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/dependency_links.txt +1 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/requires.txt +1 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/top_level.txt +1 -0
- pgdbpool-0.97rc1/pgdbpool.egg-info/zip-safe +1 -0
- pgdbpool-0.97rc1/pyproject.toml +25 -0
- pgdbpool-0.97rc1/setup.cfg +7 -0
- pgdbpool-0.97rc1/setup.py +21 -0
- pgdbpool-0.97rc1/src/__init__.py +1 -0
- pgdbpool-0.97rc1/src/pool.py +374 -0
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pgdbpool
|
|
3
|
+
Version: 0.97rc1
|
|
4
|
+
Summary: A tiny database de-multiplexer primarily scoped for Web- / Application Server.
|
|
5
|
+
Author: Claus Prüfer
|
|
6
|
+
Author-email: Claus Prüfer <pruefer@webcodex.de>
|
|
7
|
+
Project-URL: Homepage, https://github.com/clauspruefer/python-db-pool
|
|
8
|
+
Project-URL: Issues, https://github.com/clauspruefer/python-db-pool/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# Python PgDatabase-Pool Module
|
|
16
|
+
|
|
17
|
+
## 1. Primary Scope
|
|
18
|
+
|
|
19
|
+
The **pgdbpool** Python Module is a tiny PostgreSQL Database Connection De-Multiplexer primarily scoped for *Web- / Application Server*.
|
|
20
|
+
|
|
21
|
+
## 2. Current Implementation
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
|
|
25
|
+
+----------------------+ +--------------- - - -
|
|
26
|
+
| WebServer Service.py | -- Handler Con #1 ----> | PostgreSQL
|
|
27
|
+
| Request / Thread #1 | | Backend
|
|
28
|
+
+----------------------+ |
|
|
29
|
+
|
|
|
30
|
+
+----------------------+ |
|
|
31
|
+
| WebServer Service.py | -- Handler Con #2 ----> |
|
|
32
|
+
| Request / Thread #2 | |
|
|
33
|
+
+----------------------+ +--------------- - - -
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### 2.1. Concept / Simplicity
|
|
37
|
+
|
|
38
|
+
If configured in a Web-Servers WSGI Python Script, the Pooling-Logic is quite simple.
|
|
39
|
+
|
|
40
|
+
1. Check if a free connection in the pool exists
|
|
41
|
+
2. Check if connection usable (SQL ping)
|
|
42
|
+
3. Use connection and protect it from beeing used until querie(s) finished
|
|
43
|
+
4. Release connection for usage again
|
|
44
|
+
5. Try reconnecting to endpoint if connection has been lost
|
|
45
|
+
|
|
46
|
+
## 3. Thread Safety / Global Interpreter Lock
|
|
47
|
+
|
|
48
|
+
Currently Thread Safety is guaranteed by `lock = threading.Lock()` which implies a Kernel Mutex syscall().
|
|
49
|
+
|
|
50
|
+
The concept works, but the GIL (Python Global Interpreter Lock) thwarts our plans 😞.
|
|
51
|
+
|
|
52
|
+
In detail: if used in a threaded Web-Server setup, it does not really scale well on heavy loads.
|
|
53
|
+
|
|
54
|
+
>[!IMPORTANT]
|
|
55
|
+
> Take a closer look at **"6. Future"**, problem solved probably.
|
|
56
|
+
|
|
57
|
+
## 4. Dependencies / Installation
|
|
58
|
+
|
|
59
|
+
**Python 3** and **psycopg2** module is required.
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
# apt-get install python3-psycopg2
|
|
63
|
+
# pip install pgdbpool
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## 5. Documentation / Examples
|
|
67
|
+
|
|
68
|
+
See documentation [./doc](./doc) for detailed explanation / illustrative examples.
|
|
69
|
+
|
|
70
|
+
## 6. Future
|
|
71
|
+
|
|
72
|
+
DB-Pooling also should be usable in FalconAS Python Application Server (https://github.com/WEBcodeX1/http-1.2/).
|
|
73
|
+
|
|
74
|
+
The model here: 1 Process == 1 Python Interpreter (threading-less), GIL Problem solved :grin:.
|
|
75
|
+
|
|
76
|
+
>[!NOTE]
|
|
77
|
+
> Also a Pool should be configurable to use multiple (read-loadbalanced) PostgreSQL Endpoints.
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# Python PgDatabase-Pool Module
|
|
2
|
+
|
|
3
|
+
## 1. Primary Scope
|
|
4
|
+
|
|
5
|
+
The **pgdbpool** Python Module is a tiny PostgreSQL Database Connection De-Multiplexer primarily scoped for *Web- / Application Server*.
|
|
6
|
+
|
|
7
|
+
## 2. Current Implementation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
|
|
11
|
+
+----------------------+ +--------------- - - -
|
|
12
|
+
| WebServer Service.py | -- Handler Con #1 ----> | PostgreSQL
|
|
13
|
+
| Request / Thread #1 | | Backend
|
|
14
|
+
+----------------------+ |
|
|
15
|
+
|
|
|
16
|
+
+----------------------+ |
|
|
17
|
+
| WebServer Service.py | -- Handler Con #2 ----> |
|
|
18
|
+
| Request / Thread #2 | |
|
|
19
|
+
+----------------------+ +--------------- - - -
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
### 2.1. Concept / Simplicity
|
|
23
|
+
|
|
24
|
+
If configured in a Web-Servers WSGI Python Script, the Pooling-Logic is quite simple.
|
|
25
|
+
|
|
26
|
+
1. Check if a free connection in the pool exists
|
|
27
|
+
2. Check if connection usable (SQL ping)
|
|
28
|
+
3. Use connection and protect it from beeing used until querie(s) finished
|
|
29
|
+
4. Release connection for usage again
|
|
30
|
+
5. Try reconnecting to endpoint if connection has been lost
|
|
31
|
+
|
|
32
|
+
## 3. Thread Safety / Global Interpreter Lock
|
|
33
|
+
|
|
34
|
+
Currently Thread Safety is guaranteed by `lock = threading.Lock()` which implies a Kernel Mutex syscall().
|
|
35
|
+
|
|
36
|
+
The concept works, but the GIL (Python Global Interpreter Lock) thwarts our plans 😞.
|
|
37
|
+
|
|
38
|
+
In detail: if used in a threaded Web-Server setup, it does not really scale well on heavy loads.
|
|
39
|
+
|
|
40
|
+
>[!IMPORTANT]
|
|
41
|
+
> Take a closer look at **"6. Future"**, problem solved probably.
|
|
42
|
+
|
|
43
|
+
## 4. Dependencies / Installation
|
|
44
|
+
|
|
45
|
+
**Python 3** and **psycopg2** module is required.
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
# apt-get install python3-psycopg2
|
|
49
|
+
# pip install pgdbpool
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## 5. Documentation / Examples
|
|
53
|
+
|
|
54
|
+
See documentation [./doc](./doc) for detailed explanation / illustrative examples.
|
|
55
|
+
|
|
56
|
+
## 6. Future
|
|
57
|
+
|
|
58
|
+
DB-Pooling also should be usable in FalconAS Python Application Server (https://github.com/WEBcodeX1/http-1.2/).
|
|
59
|
+
|
|
60
|
+
The model here: 1 Process == 1 Python Interpreter (threading-less), GIL Problem solved :grin:.
|
|
61
|
+
|
|
62
|
+
>[!NOTE]
|
|
63
|
+
> Also a Pool should be configurable to use multiple (read-loadbalanced) PostgreSQL Endpoints.
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pgdbpool
|
|
3
|
+
Version: 0.97rc1
|
|
4
|
+
Summary: A tiny database de-multiplexer primarily scoped for Web- / Application Server.
|
|
5
|
+
Author: Claus Prüfer
|
|
6
|
+
Author-email: Claus Prüfer <pruefer@webcodex.de>
|
|
7
|
+
Project-URL: Homepage, https://github.com/clauspruefer/python-db-pool
|
|
8
|
+
Project-URL: Issues, https://github.com/clauspruefer/python-db-pool/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Requires-Python: >=3.8
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
# Python PgDatabase-Pool Module
|
|
16
|
+
|
|
17
|
+
## 1. Primary Scope
|
|
18
|
+
|
|
19
|
+
The **pgdbpool** Python Module is a tiny PostgreSQL Database Connection De-Multiplexer primarily scoped for *Web- / Application Server*.
|
|
20
|
+
|
|
21
|
+
## 2. Current Implementation
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
|
|
25
|
+
+----------------------+ +--------------- - - -
|
|
26
|
+
| WebServer Service.py | -- Handler Con #1 ----> | PostgreSQL
|
|
27
|
+
| Request / Thread #1 | | Backend
|
|
28
|
+
+----------------------+ |
|
|
29
|
+
|
|
|
30
|
+
+----------------------+ |
|
|
31
|
+
| WebServer Service.py | -- Handler Con #2 ----> |
|
|
32
|
+
| Request / Thread #2 | |
|
|
33
|
+
+----------------------+ +--------------- - - -
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### 2.1. Concept / Simplicity
|
|
37
|
+
|
|
38
|
+
If configured in a Web-Servers WSGI Python Script, the Pooling-Logic is quite simple.
|
|
39
|
+
|
|
40
|
+
1. Check if a free connection in the pool exists
|
|
41
|
+
2. Check if connection usable (SQL ping)
|
|
42
|
+
3. Use connection and protect it from beeing used until querie(s) finished
|
|
43
|
+
4. Release connection for usage again
|
|
44
|
+
5. Try reconnecting to endpoint if connection has been lost
|
|
45
|
+
|
|
46
|
+
## 3. Thread Safety / Global Interpreter Lock
|
|
47
|
+
|
|
48
|
+
Currently Thread Safety is guaranteed by `lock = threading.Lock()` which implies a Kernel Mutex syscall().
|
|
49
|
+
|
|
50
|
+
The concept works, but the GIL (Python Global Interpreter Lock) thwarts our plans 😞.
|
|
51
|
+
|
|
52
|
+
In detail: if used in a threaded Web-Server setup, it does not really scale well on heavy loads.
|
|
53
|
+
|
|
54
|
+
>[!IMPORTANT]
|
|
55
|
+
> Take a closer look at **"6. Future"**, problem solved probably.
|
|
56
|
+
|
|
57
|
+
## 4. Dependencies / Installation
|
|
58
|
+
|
|
59
|
+
**Python 3** and **psycopg2** module is required.
|
|
60
|
+
|
|
61
|
+
```bash
|
|
62
|
+
# apt-get install python3-psycopg2
|
|
63
|
+
# pip install pgdbpool
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## 5. Documentation / Examples
|
|
67
|
+
|
|
68
|
+
See documentation [./doc](./doc) for detailed explanation / illustrative examples.
|
|
69
|
+
|
|
70
|
+
## 6. Future
|
|
71
|
+
|
|
72
|
+
DB-Pooling also should be usable in FalconAS Python Application Server (https://github.com/WEBcodeX1/http-1.2/).
|
|
73
|
+
|
|
74
|
+
The model here: 1 Process == 1 Python Interpreter (threading-less), GIL Problem solved :grin:.
|
|
75
|
+
|
|
76
|
+
>[!NOTE]
|
|
77
|
+
> Also a Pool should be configurable to use multiple (read-loadbalanced) PostgreSQL Endpoints.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
setup.cfg
|
|
4
|
+
setup.py
|
|
5
|
+
pgdbpool.egg-info/PKG-INFO
|
|
6
|
+
pgdbpool.egg-info/SOURCES.txt
|
|
7
|
+
pgdbpool.egg-info/dependency_links.txt
|
|
8
|
+
pgdbpool.egg-info/requires.txt
|
|
9
|
+
pgdbpool.egg-info/top_level.txt
|
|
10
|
+
pgdbpool.egg-info/zip-safe
|
|
11
|
+
src/__init__.py
|
|
12
|
+
src/pool.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
psycopg2
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pgdbpool
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "pgdbpool"
|
|
7
|
+
version = "0.97rc1"
|
|
8
|
+
authors = [
|
|
9
|
+
{ name="Claus Prüfer", email="pruefer@webcodex.de" },
|
|
10
|
+
]
|
|
11
|
+
description = "A tiny database de-multiplexer primarily scoped for Web- / Application Server."
|
|
12
|
+
readme = "README.md"
|
|
13
|
+
requires-python = ">=3.8"
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Programming Language :: Python :: 3",
|
|
16
|
+
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
|
17
|
+
"Operating System :: OS Independent",
|
|
18
|
+
]
|
|
19
|
+
dependencies = [
|
|
20
|
+
"psycopg2"
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[project.urls]
|
|
24
|
+
Homepage = "https://github.com/clauspruefer/python-db-pool"
|
|
25
|
+
Issues = "https://github.com/clauspruefer/python-db-pool/issues"
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from setuptools import setup
|
|
2
|
+
|
|
3
|
+
setup(
|
|
4
|
+
|
|
5
|
+
name = 'pgdbpool',
|
|
6
|
+
author = 'Claus Prüfer',
|
|
7
|
+
author_email = 'pruefer@webcodex.de',
|
|
8
|
+
description = 'A tiny database de-multiplexer primarily scoped for Web- / Application Server.',
|
|
9
|
+
long_description = open('./README.md').read(),
|
|
10
|
+
|
|
11
|
+
packages = [
|
|
12
|
+
'pgdbpool'
|
|
13
|
+
],
|
|
14
|
+
|
|
15
|
+
package_dir = {
|
|
16
|
+
'pgdbpool': 'src/'
|
|
17
|
+
},
|
|
18
|
+
|
|
19
|
+
zip_safe = True
|
|
20
|
+
|
|
21
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
import copy
|
|
5
|
+
import logging
|
|
6
|
+
import threading
|
|
7
|
+
|
|
8
|
+
import psycopg2
|
|
9
|
+
|
|
10
|
+
from psycopg2 import extras
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DBConnectionError(Exception):
|
|
14
|
+
"""
|
|
15
|
+
Exception Class, raised on Database Connection Error.
|
|
16
|
+
"""
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DBQueryError(Exception):
|
|
21
|
+
"""
|
|
22
|
+
Exception Class, raised on Database Query Error.
|
|
23
|
+
"""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class DBOfflineError(Exception):
|
|
28
|
+
"""
|
|
29
|
+
Exception Class, raised if Database is not pingable.
|
|
30
|
+
"""
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class UnconfiguredGroupError(Exception):
|
|
35
|
+
"""
|
|
36
|
+
Exception Class, raised if Group Configuration is invalid.
|
|
37
|
+
"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def conn_iter(connection_group):
|
|
42
|
+
|
|
43
|
+
logger = logging.getLogger(__name__)
|
|
44
|
+
|
|
45
|
+
connection_id = 0
|
|
46
|
+
max_pool_size = Connection.get_max_pool_size(connection_group)
|
|
47
|
+
|
|
48
|
+
while True:
|
|
49
|
+
connection = (connection_group, connection_id)
|
|
50
|
+
(conn_ref, status) = Connection.get_connection(connection)
|
|
51
|
+
logger.debug('iterator group:{} id:{} conn_ref:{} status:{}'.format(
|
|
52
|
+
connection_group,
|
|
53
|
+
connection_id,
|
|
54
|
+
conn_ref,
|
|
55
|
+
status
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
if status == 'free':
|
|
60
|
+
Connection.set_connection_status(connection, 'occupied')
|
|
61
|
+
yield (connection_id, conn_ref)
|
|
62
|
+
connection_id += 1
|
|
63
|
+
else:
|
|
64
|
+
connection_id += 1
|
|
65
|
+
if Connection.get_connection_count(connection) == max_pool_size:
|
|
66
|
+
yield None
|
|
67
|
+
if connection_id == max_pool_size:
|
|
68
|
+
connection_id = 0
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def conn_iter_locked(iterator):
|
|
72
|
+
lock = threading.Lock()
|
|
73
|
+
while True:
|
|
74
|
+
try:
|
|
75
|
+
with lock:
|
|
76
|
+
value = next(iterator)
|
|
77
|
+
yield value
|
|
78
|
+
except StopIteration:
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class Connection(object):
|
|
83
|
+
"""
|
|
84
|
+
Connection Class.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
@classmethod
|
|
88
|
+
def init(cls, config):
|
|
89
|
+
"""
|
|
90
|
+
"""
|
|
91
|
+
cls.logger = logging.getLogger(__name__)
|
|
92
|
+
cls._config = config
|
|
93
|
+
cls._init_class()
|
|
94
|
+
|
|
95
|
+
@classmethod
|
|
96
|
+
def _init_class(cls):
|
|
97
|
+
"""
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
db_config = cls._config['db']
|
|
101
|
+
|
|
102
|
+
statement_timeout = 'statement_timeout={}'.format(db_config['query_timeout'])
|
|
103
|
+
temp_buffers = 'temp_buffers={}MB'.format(db_config['session_tmp_buffer'])
|
|
104
|
+
|
|
105
|
+
os.environ['PGOPTIONS'] = '-c {timeout} -c {buffers}'.format(
|
|
106
|
+
timeout = statement_timeout,
|
|
107
|
+
buffers = temp_buffers
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
cls._setup_groups()
|
|
111
|
+
|
|
112
|
+
@classmethod
|
|
113
|
+
def _setup_groups(cls):
|
|
114
|
+
"""
|
|
115
|
+
"""
|
|
116
|
+
for group in cls._config['groups']:
|
|
117
|
+
cls._config['groups'][group]['connection_iter'] = conn_iter_locked(
|
|
118
|
+
conn_iter(group)
|
|
119
|
+
)
|
|
120
|
+
cls._setup_connections(group)
|
|
121
|
+
|
|
122
|
+
@classmethod
|
|
123
|
+
def _setup_connections(cls, group):
|
|
124
|
+
"""
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
group_container = cls._config['groups'][group]
|
|
128
|
+
group_container['connections'] = []
|
|
129
|
+
connection_container = group_container['connections']
|
|
130
|
+
|
|
131
|
+
for id in range(0, group_container['connection_count']):
|
|
132
|
+
connection_container.append(
|
|
133
|
+
(None, 'connecting')
|
|
134
|
+
)
|
|
135
|
+
cls.connect((group, id))
|
|
136
|
+
cls.logger.debug(cls._config)
|
|
137
|
+
|
|
138
|
+
@classmethod
|
|
139
|
+
def get_max_pool_size(cls, group):
|
|
140
|
+
"""
|
|
141
|
+
"""
|
|
142
|
+
return cls._config['groups'][group]['connection_count']
|
|
143
|
+
|
|
144
|
+
@classmethod
|
|
145
|
+
def get_connection_iter_container(cls, group):
|
|
146
|
+
"""
|
|
147
|
+
"""
|
|
148
|
+
return cls._config['groups'][group]['connection_iter']
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
def get_connection_container(cls, connection):
|
|
152
|
+
"""
|
|
153
|
+
"""
|
|
154
|
+
(group, id) = connection
|
|
155
|
+
return cls._config['groups'][group]['connections'][id]
|
|
156
|
+
|
|
157
|
+
@classmethod
|
|
158
|
+
def get_connection(cls, connection):
|
|
159
|
+
"""
|
|
160
|
+
"""
|
|
161
|
+
return cls.get_connection_container(connection)
|
|
162
|
+
|
|
163
|
+
@classmethod
|
|
164
|
+
def get_connection_count(cls, connection):
|
|
165
|
+
"""
|
|
166
|
+
"""
|
|
167
|
+
connection_count = 0
|
|
168
|
+
(group, id) = connection
|
|
169
|
+
connections = cls._config['groups'][group]['connections']
|
|
170
|
+
for (conn_ref, status) in connections:
|
|
171
|
+
if status == 'occupied':
|
|
172
|
+
connection_count += 1
|
|
173
|
+
return connection_count
|
|
174
|
+
|
|
175
|
+
@classmethod
|
|
176
|
+
def set_connection_status(cls, connection, status):
|
|
177
|
+
"""
|
|
178
|
+
"""
|
|
179
|
+
assert status in ['occupied', 'free'], 'status must be free or occupied'
|
|
180
|
+
lock = threading.Lock()
|
|
181
|
+
with lock:
|
|
182
|
+
(group, id) = connection
|
|
183
|
+
connections = cls._config['groups'][group]['connections']
|
|
184
|
+
connection = connections[id]
|
|
185
|
+
new_connection = (connection[0], status)
|
|
186
|
+
# del(connections[id])
|
|
187
|
+
connections[id] = new_connection
|
|
188
|
+
cls.logger.debug('set status id:{} status:{} con_ref:{}'.format(
|
|
189
|
+
id,
|
|
190
|
+
status,
|
|
191
|
+
new_connection[0]
|
|
192
|
+
)
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
@classmethod
|
|
196
|
+
def get_next_connection(cls, group):
|
|
197
|
+
"""
|
|
198
|
+
"""
|
|
199
|
+
try:
|
|
200
|
+
return next(cls.get_connection_iter_container(group))
|
|
201
|
+
except KeyError:
|
|
202
|
+
raise UnconfiguredGroupError
|
|
203
|
+
|
|
204
|
+
@classmethod
|
|
205
|
+
def connect(cls, connection):
|
|
206
|
+
"""
|
|
207
|
+
"""
|
|
208
|
+
|
|
209
|
+
(conn_group, conn_id) = connection
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
|
|
213
|
+
lock = threading.Lock()
|
|
214
|
+
|
|
215
|
+
db_container = cls._config['db']
|
|
216
|
+
group_container = cls._config['groups'][conn_group]
|
|
217
|
+
|
|
218
|
+
with lock:
|
|
219
|
+
|
|
220
|
+
group_container['connections'][conn_id] = (
|
|
221
|
+
psycopg2.connect(
|
|
222
|
+
dbname = db_container['name'],
|
|
223
|
+
user = db_container['user'],
|
|
224
|
+
host = db_container['host'],
|
|
225
|
+
password = db_container['pass'],
|
|
226
|
+
sslmode = db_container['ssl'],
|
|
227
|
+
connect_timeout = db_container['connect_timeout']
|
|
228
|
+
),
|
|
229
|
+
'free'
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
conn_container = group_container['connections'][conn_id]
|
|
233
|
+
connection = conn_container[0]
|
|
234
|
+
|
|
235
|
+
if 'autocommit' in group_container and group_container['autocommit'] is True:
|
|
236
|
+
extension = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
|
|
237
|
+
connection.set_isolation_level(extension)
|
|
238
|
+
|
|
239
|
+
if 'sqlprepare' in group_container and group_container['sqlprepare'] is True:
|
|
240
|
+
tmpCursor = connection.cursor(
|
|
241
|
+
cursor_factory = psycopg2.extras.DictCursor
|
|
242
|
+
)
|
|
243
|
+
tmpCursor.callproc('"SQLPrepare"."PrepareQueries"')
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
raise DBConnectionError
|
|
247
|
+
|
|
248
|
+
@classmethod
|
|
249
|
+
def reconnect(cls, connection):
|
|
250
|
+
"""
|
|
251
|
+
"""
|
|
252
|
+
try:
|
|
253
|
+
Query.check_db(connection)
|
|
254
|
+
except DBOfflineError:
|
|
255
|
+
for i in range(0, 10):
|
|
256
|
+
try:
|
|
257
|
+
Connection.connect(connection)
|
|
258
|
+
return
|
|
259
|
+
except Exception as e:
|
|
260
|
+
time.sleep(cls._config['db']['connection_retry_sleep'])
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class Query(object):
|
|
264
|
+
"""
|
|
265
|
+
Query Class.
|
|
266
|
+
"""
|
|
267
|
+
|
|
268
|
+
@staticmethod
|
|
269
|
+
def execute_prepared(connection, sql_params):
|
|
270
|
+
"""
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
assert sql_params is not None, "sql_params must be given."
|
|
274
|
+
|
|
275
|
+
Connection.reconnect(connection)
|
|
276
|
+
(conn_ref, status) = Connection.get_connection(connection)
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
tmpCursor = conn_ref.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
280
|
+
tmpCursor.callproc('"SQLPrepare"."ExecuteQuery"', sql_params)
|
|
281
|
+
rec = tmpCursor.fetchone()
|
|
282
|
+
return rec[0]
|
|
283
|
+
except Exception as e:
|
|
284
|
+
ErrorJSON = {}
|
|
285
|
+
ErrorJSON['error'] = True
|
|
286
|
+
ErrorJSON['exception'] = type(e).__name__
|
|
287
|
+
ErrorJSON['exceptionCause'] = e.message
|
|
288
|
+
return json.dumps(ErrorJSON)
|
|
289
|
+
|
|
290
|
+
@staticmethod
|
|
291
|
+
def execute(connection, sql_statement, sql_params=None):
|
|
292
|
+
"""
|
|
293
|
+
"""
|
|
294
|
+
|
|
295
|
+
Connection.reconnect(connection)
|
|
296
|
+
(conn_ref, status) = Connection.get_connection(connection)
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
tmpCursor = conn_ref.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
300
|
+
tmpCursor.execute(sql_statement, sql_params)
|
|
301
|
+
return tmpCursor
|
|
302
|
+
except Exception as e:
|
|
303
|
+
raise DBQueryError(repr(e))
|
|
304
|
+
|
|
305
|
+
@staticmethod
|
|
306
|
+
def check_db(connection):
|
|
307
|
+
"""
|
|
308
|
+
"""
|
|
309
|
+
(conn_ref, status) = Connection.get_connection(connection)
|
|
310
|
+
try:
|
|
311
|
+
tmpCursor = conn_ref.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
312
|
+
tmpCursor.execute("SELECT to_char(now(), 'HH:MI:SS') AS result")
|
|
313
|
+
rec = tmpCursor.fetchone()
|
|
314
|
+
except Exception as e:
|
|
315
|
+
raise DBOfflineError
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
class Handler(object):
|
|
319
|
+
"""
|
|
320
|
+
(Query) Handler Class.
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
def __enter__(self):
|
|
324
|
+
"""
|
|
325
|
+
"""
|
|
326
|
+
return self
|
|
327
|
+
|
|
328
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
329
|
+
"""
|
|
330
|
+
"""
|
|
331
|
+
self._cleanup()
|
|
332
|
+
|
|
333
|
+
def query(self, statement, params=None):
|
|
334
|
+
"""
|
|
335
|
+
"""
|
|
336
|
+
return Query.execute(self._connection, statement, params)
|
|
337
|
+
|
|
338
|
+
def query_prepared(self, params):
|
|
339
|
+
"""
|
|
340
|
+
"""
|
|
341
|
+
return Query.execute_prepared(self._connection, params)
|
|
342
|
+
|
|
343
|
+
def _cleanup(self):
|
|
344
|
+
"""
|
|
345
|
+
"""
|
|
346
|
+
self.logger.debug('cleanup connection:{}'.format(self._connection))
|
|
347
|
+
|
|
348
|
+
try:
|
|
349
|
+
self.conn_ref.commit()
|
|
350
|
+
except Exception as e:
|
|
351
|
+
pass
|
|
352
|
+
|
|
353
|
+
Connection.set_connection_status(
|
|
354
|
+
(self._group, self._conn_id),
|
|
355
|
+
'free'
|
|
356
|
+
)
|
|
357
|
+
return
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def __init__(self, group):
|
|
361
|
+
"""
|
|
362
|
+
"""
|
|
363
|
+
|
|
364
|
+
self.logger = logging.getLogger(__name__)
|
|
365
|
+
self._group = group
|
|
366
|
+
|
|
367
|
+
while True:
|
|
368
|
+
try:
|
|
369
|
+
(self._conn_id, self.conn_ref) = Connection.get_next_connection(group)
|
|
370
|
+
self._connection = (self._group, self._conn_id)
|
|
371
|
+
self.logger.debug('handler connection:{}'.format(self._connection))
|
|
372
|
+
return
|
|
373
|
+
except TypeError:
|
|
374
|
+
time.sleep(0.1)
|