bitvavo-api-upgraded 2.0.0__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,175 @@
1
+ """
2
+ Dataframe utilities for comprehensive dataframe library support using Narwhals.
3
+
4
+ This module provides utilities for converting API responses to dataframes
5
+ using Narwhals as a unified interface across multiple dataframe libraries:
6
+ pandas, polars, cuDF, modin, pyarrow, dask, duckdb, ibis, pyspark, and more.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Any
12
+
13
+ from bitvavo_api_upgraded.type_aliases import OutputFormat
14
+
15
+
16
+ def is_narwhals_available() -> bool:
17
+ """Check if narwhals is available."""
18
+ try:
19
+ import narwhals # noqa: PLC0415, F401
20
+ except ImportError:
21
+ return False
22
+ else:
23
+ return True
24
+
25
+
26
+ def is_library_available(library_name: str) -> bool:
27
+ """Check if a specific dataframe library is available."""
28
+ import_mapping = {
29
+ "pandas": "pandas",
30
+ "polars": "polars",
31
+ "cudf": "cudf",
32
+ "modin": "modin.pandas",
33
+ "pyarrow": "pyarrow",
34
+ "dask": "dask.dataframe",
35
+ "duckdb": "duckdb",
36
+ "ibis": "ibis",
37
+ "pyspark": "pyspark.sql",
38
+ "pyspark-connect": "pyspark.sql.connect",
39
+ "sqlframe": "sqlframe",
40
+ }
41
+
42
+ module_path = import_mapping.get(library_name)
43
+ if not module_path:
44
+ return False
45
+
46
+ try:
47
+ __import__(module_path)
48
+ except ImportError:
49
+ return False
50
+ else:
51
+ return True
52
+
53
+
54
+ def _normalize_output_format(output_format: str | OutputFormat) -> OutputFormat:
55
+ """Convert string input to OutputFormat enum if needed."""
56
+ if isinstance(output_format, OutputFormat):
57
+ return output_format
58
+
59
+ # Convert string to enum
60
+ valid_formats = {fmt.value: fmt for fmt in OutputFormat}
61
+ if output_format not in valid_formats:
62
+ valid_values = list(valid_formats.keys())
63
+ msg = f"Invalid output_format: {output_format}. Valid options: {valid_values}"
64
+ raise ValueError(msg)
65
+
66
+ return valid_formats[output_format]
67
+
68
+
69
+ def validate_output_format(output_format: str | OutputFormat) -> None:
70
+ """Validate the output format and check if required libraries are available."""
71
+ # Normalize input to enum format
72
+ format_enum = _normalize_output_format(output_format)
73
+ format_str = format_enum.value
74
+
75
+ # Dict format doesn't need any special libraries
76
+ if format_str == OutputFormat.DICT.value:
77
+ return
78
+
79
+ # All dataframe formats require narwhals
80
+ if not is_narwhals_available():
81
+ msg = f"narwhals is not available. Install with: pip install 'bitvavo-api-upgraded[{format_str}]'"
82
+ raise ImportError(msg)
83
+
84
+ # Check if the specific library is available
85
+ if not is_library_available(format_str):
86
+ msg = f"{format_str} is not available. Install with: pip install 'bitvavo-api-upgraded[{format_str}]'"
87
+ raise ImportError(msg)
88
+
89
+
90
+ def convert_to_dataframe(data: Any, output_format: str | OutputFormat) -> Any:
91
+ """Convert data to the specified dataframe format."""
92
+ # Normalize the output format first
93
+ format_enum = _normalize_output_format(output_format)
94
+
95
+ validate_output_format(format_enum)
96
+
97
+ if format_enum == OutputFormat.DICT:
98
+ return data
99
+
100
+ if not isinstance(data, list) or not data:
101
+ # If it's not a list or empty, return as-is for dict format compatibility
102
+ return data
103
+
104
+ # Use Narwhals for conversion - it handles all supported libraries automatically
105
+ import narwhals as nw # noqa: PLC0415
106
+
107
+ # Create a native dataframe - for most libraries, we can let narwhals handle the details
108
+ # We'll create a simple pandas dataframe and let narwhals convert to the target format
109
+ if format_enum in (OutputFormat.DASK, OutputFormat.DUCKDB):
110
+ # Special handling for dask and duckdb
111
+ native_df = _create_special_dataframe(data, format_enum)
112
+ else:
113
+ # Use pandas as intermediate format for most cases
114
+ import pandas as pd # noqa: PLC0415
115
+
116
+ native_df = pd.DataFrame(data)
117
+
118
+ # Convert through narwhals to ensure compatibility
119
+ nw_df = nw.from_native(native_df)
120
+ return nw_df.to_native()
121
+
122
+
123
+ def _create_special_dataframe(data: Any, output_format: OutputFormat) -> Any:
124
+ """Create special dataframes that need custom handling."""
125
+ if output_format == OutputFormat.DASK:
126
+ import dask.dataframe as dd # noqa: PLC0415
127
+ import pandas as pd # noqa: PLC0415
128
+
129
+ # Create pandas df first, then convert to dask
130
+ pdf = pd.DataFrame(data)
131
+ return dd.from_pandas(pdf, npartitions=1)
132
+
133
+ if output_format == OutputFormat.DUCKDB:
134
+ import duckdb # noqa: PLC0415
135
+ import pandas as pd # noqa: PLC0415
136
+
137
+ # DuckDB works with relations - create via pandas first
138
+ conn = duckdb.connect()
139
+ pdf = pd.DataFrame(data)
140
+ return conn.from_df(pdf)
141
+
142
+ # Fallback to pandas
143
+ import pandas as pd # noqa: PLC0415
144
+
145
+ return pd.DataFrame(data)
146
+
147
+
148
+ def convert_candles_to_dataframe(data: Any, output_format: str | OutputFormat) -> Any:
149
+ """Convert candlestick data to the requested format.
150
+
151
+ Candlestick data comes as list of lists:
152
+ [[timestamp, open, high, low, close, volume], ...]
153
+ """
154
+ # Normalize the output format first
155
+ format_enum = _normalize_output_format(output_format)
156
+
157
+ validate_output_format(format_enum)
158
+
159
+ if format_enum == OutputFormat.DICT:
160
+ return data
161
+
162
+ if not isinstance(data, list) or not data:
163
+ return data
164
+
165
+ # Convert list of lists to list of dicts first
166
+ columns = ["timestamp", "open", "high", "low", "close", "volume"]
167
+ dict_data = [
168
+ dict(zip(columns, candle)) for candle in data if isinstance(candle, list) and len(candle) >= len(columns)
169
+ ]
170
+
171
+ if not dict_data:
172
+ return data
173
+
174
+ # Reuse the standard conversion function for consistency
175
+ return convert_to_dataframe(dict_data, format_enum)
@@ -1,5 +1,8 @@
1
- import logging
1
+ from __future__ import annotations
2
+
3
+ import os
2
4
  from pathlib import Path
5
+ from typing import Literal
3
6
 
4
7
  from pydantic import Field, field_validator, model_validator
5
8
  from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -13,44 +16,96 @@ class BitvavoApiUpgradedSettings(BaseSettings):
13
16
  then, but I figured that would be a bad idea.
14
17
  """
15
18
 
16
- LOG_LEVEL: str = Field("INFO")
17
- LOG_EXTERNAL_LEVEL: str = Field("WARNING")
18
- LAG: ms = Field(ms(50))
19
- RATE_LIMITING_BUFFER: int = Field(25)
19
+ LOG_LEVEL: Literal["CRITICAL", "FATAL", "ERROR", "WARN", "WARNING", "INFO", "DEBUG", "NOTSET"] = Field(
20
+ default="INFO",
21
+ description="Logging level for the application",
22
+ )
23
+ LOG_EXTERNAL_LEVEL: Literal["CRITICAL", "FATAL", "ERROR", "WARN", "WARNING", "INFO", "DEBUG", "NOTSET"] = Field(
24
+ default="WARNING",
25
+ description="Logging level for external libraries",
26
+ )
27
+ LAG: ms = Field(default=ms(50))
28
+ RATE_LIMITING_BUFFER: int = Field(default=25)
29
+
30
+ # Multi-API key settings
31
+ PREFER_KEYLESS: bool = Field(default=True, description="Prefer keyless requests over API key requests")
32
+ DEFAULT_RATE_LIMIT: int = Field(default=1000, description="Default rate limit for new API keys")
33
+
34
+ SSL_CERT_FILE: str | None = Field(
35
+ default=None,
36
+ description="Path to SSL certificate file for HTTPS/WSS connections",
37
+ )
20
38
 
21
39
  # Configuration for Pydantic Settings
22
- model_config = SettingsConfigDict(
40
+ model_config: SettingsConfigDict = SettingsConfigDict(
23
41
  env_file=Path.cwd() / ".env",
24
42
  env_file_encoding="utf-8",
25
43
  env_prefix="BITVAVO_API_UPGRADED_",
26
44
  extra="ignore",
27
45
  )
28
46
 
29
- @classmethod
30
47
  @field_validator("LOG_LEVEL", "LOG_EXTERNAL_LEVEL", mode="before")
48
+ @classmethod
31
49
  def validate_log_level(cls, v: str) -> str:
32
- if v not in logging._nameToLevel: # noqa: SLF001
33
- msg = f"Invalid log level: {v}"
50
+ valid_levels = {"CRITICAL", "FATAL", "ERROR", "WARN", "WARNING", "INFO", "DEBUG", "NOTSET"}
51
+ if v.upper() not in valid_levels:
52
+ msg = f"Invalid log level: {v}. Must be one of: {', '.join(valid_levels)}"
34
53
  raise ValueError(msg)
35
- return v
54
+ return v.upper()
55
+
56
+ @model_validator(mode="after")
57
+ def configure_ssl_certificate(self) -> BitvavoApiUpgradedSettings:
58
+ """Configure SSL certificate file path and set environment variable if needed."""
59
+ if self.SSL_CERT_FILE is None and "SSL_CERT_FILE" not in os.environ:
60
+ # Try to auto-detect SSL certificate file only if not already set in environment
61
+ common_ssl_cert_paths = [
62
+ "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu/NixOS
63
+ "/etc/ssl/certs/ca-bundle.crt", # CentOS/RHEL/Fedora
64
+ "/etc/ssl/cert.pem", # OpenBSD/macOS
65
+ "/usr/local/share/certs/ca-root-nss.crt", # FreeBSD
66
+ "/etc/pki/tls/certs/ca-bundle.crt", # Old CentOS/RHEL
67
+ ]
68
+
69
+ for cert_path in common_ssl_cert_paths:
70
+ if Path(cert_path).exists():
71
+ self.SSL_CERT_FILE = cert_path
72
+ break
73
+
74
+ # Set the environment variable if we have a certificate file
75
+ if self.SSL_CERT_FILE and Path(self.SSL_CERT_FILE).exists():
76
+ os.environ["SSL_CERT_FILE"] = self.SSL_CERT_FILE
77
+ elif self.SSL_CERT_FILE:
78
+ # User specified a path but it doesn't exist
79
+ msg = f"SSL certificate file not found: {self.SSL_CERT_FILE}"
80
+ raise FileNotFoundError(msg)
81
+
82
+ return self
36
83
 
37
84
 
38
85
  class BitvavoSettings(BaseSettings):
39
86
  """
40
87
  These are the base settings from the original library.
88
+ Enhanced to support multiple API keys.
41
89
  """
42
90
 
43
- ACCESSWINDOW: int = Field(10_000)
91
+ ACCESSWINDOW: int = Field(default=10_000)
44
92
  API_RATING_LIMIT_PER_MINUTE: int = Field(default=1000)
45
93
  API_RATING_LIMIT_PER_SECOND: int = Field(default=1000)
46
- APIKEY: str = Field(default="BITVAVO_APIKEY is missing")
47
- APISECRET: str = Field(default="BITVAVO_APISECRET is missing")
94
+ APIKEY: str = Field(default="")
95
+ APISECRET: str = Field(default="")
96
+
97
+ # Multiple API key support
98
+ APIKEYS: list[dict[str, str]] = Field(default_factory=list, description="List of API key/secret pairs")
99
+
48
100
  DEBUGGING: bool = Field(default=False)
49
101
  RESTURL: str = Field(default="https://api.bitvavo.com/v2")
50
102
  WSURL: str = Field(default="wss://ws.bitvavo.com/v2/")
51
103
 
104
+ # Multi-key specific settings
105
+ PREFER_KEYLESS: bool = Field(default=True)
106
+
52
107
  # Configuration for Pydantic Settings
53
- model_config = SettingsConfigDict(
108
+ model_config: SettingsConfigDict = SettingsConfigDict(
54
109
  env_file=Path.cwd() / ".env",
55
110
  env_file_encoding="utf-8",
56
111
  env_prefix="BITVAVO_",
@@ -58,8 +113,18 @@ class BitvavoSettings(BaseSettings):
58
113
  )
59
114
 
60
115
  @model_validator(mode="after")
61
- def set_api_rating_limit_per_second(self) -> "BitvavoSettings":
62
- self.API_RATING_LIMIT_PER_SECOND = self.API_RATING_LIMIT_PER_SECOND // 60
116
+ def set_api_rating_limit_per_second(self) -> BitvavoSettings:
117
+ # Create a new value instead of modifying the Field directly
118
+ object.__setattr__(self, "API_RATING_LIMIT_PER_SECOND", self.API_RATING_LIMIT_PER_SECOND // 60)
119
+ return self
120
+
121
+ @model_validator(mode="after")
122
+ def process_api_keys(self) -> BitvavoSettings:
123
+ """Process API keys from environment variables."""
124
+ # If single APIKEY/APISECRET provided and APIKEYS is empty, create APIKEYS list
125
+ if self.APIKEY and self.APISECRET and not self.APIKEYS:
126
+ object.__setattr__(self, "APIKEYS", [{"key": self.APIKEY, "secret": self.APISECRET}])
127
+
63
128
  return self
64
129
 
65
130
 
@@ -3,8 +3,24 @@ This file contains all type aliases that I use within the lib,
3
3
  to clearify the intention or semantics/meaning/unit of a variable
4
4
  """
5
5
 
6
+ import sys
6
7
  from typing import Any, Union
7
8
 
9
+ if sys.version_info >= (3, 11):
10
+ from enum import StrEnum
11
+ else:
12
+ # Backport of StrEnum for Python < 3.11
13
+ from enum import Enum
14
+
15
+ class StrEnum(str, Enum):
16
+ """String enumeration for Python < 3.11 compatibility."""
17
+
18
+ def __new__(cls, value: str) -> "StrEnum":
19
+ obj = str.__new__(cls, value)
20
+ obj._value_ = value
21
+ return obj
22
+
23
+
8
24
  # type simplification
9
25
  anydict = dict[str, Any]
10
26
  strdict = dict[str, str]
@@ -23,3 +39,21 @@ us = int # microseconds, normally written as μs, but nobody has the μ (mu) sy
23
39
  s_f = float # seconds, but as float
24
40
  ms_f = float # milliseconds, but as float
25
41
  us_f = float # microseconds, but as float
42
+
43
+
44
+ # Dataframe output formats
45
+ class OutputFormat(StrEnum):
46
+ """Supported dataframe output formats."""
47
+
48
+ DICT = "dict" # standard dictionary format
49
+ PANDAS = "pandas" # pandas DataFrames
50
+ POLARS = "polars" # polars DataFrames
51
+ CUDF = "cudf" # NVIDIA cuDF (GPU-accelerated)
52
+ MODIN = "modin" # distributed pandas
53
+ PYARROW = "pyarrow" # Apache Arrow tables
54
+ DASK = "dask" # Dask DataFrames (distributed)
55
+ DUCKDB = "duckdb" # DuckDB relations
56
+ IBIS = "ibis" # Ibis expressions
57
+ PYSPARK = "pyspark" # PySpark DataFrames
58
+ PYSPARK_CONNECT = "pyspark-connect" # PySpark Connect
59
+ SQLFRAME = "sqlframe" # SQLFrame DataFrames