cryptodatapy 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cryptodatapy/extract/datarequest.py +27 -1
- cryptodatapy/extract/libraries/Untitled.ipynb +168 -2
- cryptodatapy/extract/libraries/ccxt_api.py +17 -17
- cryptodatapy/extract/libraries/pandasdr_api.py +151 -137
- cryptodatapy/transform/convertparams.py +73 -164
- cryptodatapy/transform/wrangle.py +43 -23
- {cryptodatapy-0.2.7.dist-info → cryptodatapy-0.2.8.dist-info}/METADATA +1 -1
- {cryptodatapy-0.2.7.dist-info → cryptodatapy-0.2.8.dist-info}/RECORD +10 -10
- {cryptodatapy-0.2.7.dist-info → cryptodatapy-0.2.8.dist-info}/LICENSE +0 -0
- {cryptodatapy-0.2.7.dist-info → cryptodatapy-0.2.8.dist-info}/WHEEL +0 -0
@@ -688,108 +688,78 @@ class ConvertParams:
|
|
688
688
|
# convert tickers
|
689
689
|
with resources.path("cryptodatapy.conf", "tickers.csv") as f:
|
690
690
|
tickers_path = f
|
691
|
-
tickers_df
|
691
|
+
tickers_df = pd.read_csv(tickers_path, index_col=0, encoding="latin1")
|
692
692
|
|
693
|
-
if self.data_req.source_tickers is
|
694
|
-
|
695
|
-
self.data_req.tickers = self.data_req.source_tickers
|
696
|
-
else:
|
693
|
+
if self.data_req.source_tickers is None:
|
694
|
+
self.data_req.source_tickers = []
|
697
695
|
for ticker in self.data_req.tickers:
|
698
696
|
try:
|
699
|
-
|
697
|
+
self.data_req.source_tickers.append(tickers_df.loc[ticker, "fred_id"])
|
700
698
|
except KeyError:
|
701
699
|
logging.warning(
|
702
|
-
f"{ticker} not found for Fred
|
700
|
+
f"{ticker} not found for Fred source. Check tickers in"
|
703
701
|
f" data catalog and try again."
|
704
702
|
)
|
705
|
-
|
706
|
-
#
|
707
|
-
if self.data_req.source_freq is
|
708
|
-
|
709
|
-
|
710
|
-
else:
|
711
|
-
freq = self.data_req.freq
|
712
|
-
# convert quote ccy
|
713
|
-
quote_ccy = self.data_req.quote_ccy
|
703
|
+
|
704
|
+
# freq
|
705
|
+
if self.data_req.source_freq is None:
|
706
|
+
self.data_req.source_freq = self.data_req.freq
|
707
|
+
|
714
708
|
# start date
|
715
|
-
if self.data_req.
|
716
|
-
|
709
|
+
if self.data_req.source_start_date is None:
|
710
|
+
self.data_req.source_start_date = pd.Timestamp('1920-01-01')
|
717
711
|
else:
|
718
|
-
|
712
|
+
self.data_req.source_start_date = self.data_req.start_date
|
713
|
+
|
719
714
|
# end date
|
720
715
|
if self.data_req.end_date is None:
|
721
|
-
|
716
|
+
self.data_req.source_end_date = pd.Timestamp.utcnow().tz_localize(None)
|
722
717
|
else:
|
723
|
-
|
718
|
+
self.data_req.source_end_date = self.data_req.end_date
|
719
|
+
|
724
720
|
# fields
|
725
|
-
if self.data_req.source_fields is
|
726
|
-
|
727
|
-
|
728
|
-
else:
|
729
|
-
fields = self.convert_fields(data_source='fred')
|
721
|
+
if self.data_req.source_fields is None:
|
722
|
+
self.data_req.source_fields = self.convert_fields(data_source='fred')
|
723
|
+
|
730
724
|
# tz
|
731
725
|
if self.data_req.tz is None:
|
732
|
-
tz = "America/New_York"
|
733
|
-
else:
|
734
|
-
tz = self.data_req.tz
|
726
|
+
self.data_req.tz = "America/New_York"
|
735
727
|
|
736
|
-
return
|
737
|
-
"tickers": tickers,
|
738
|
-
"freq": freq,
|
739
|
-
"quote_ccy": quote_ccy,
|
740
|
-
"exch": self.data_req.exch,
|
741
|
-
"ctys": None,
|
742
|
-
"mkt_type": self.data_req.mkt_type,
|
743
|
-
"mkts": None,
|
744
|
-
"start_date": start_date,
|
745
|
-
"end_date": end_date,
|
746
|
-
"fields": fields,
|
747
|
-
"tz": tz,
|
748
|
-
"inst": None,
|
749
|
-
"cat": self.data_req.cat,
|
750
|
-
"trials": self.data_req.trials,
|
751
|
-
"pause": self.data_req.pause,
|
752
|
-
"source_tickers": self.data_req.source_tickers,
|
753
|
-
"source_freq": self.data_req.source_freq,
|
754
|
-
"source_fields": self.data_req.source_fields,
|
755
|
-
}
|
728
|
+
return self.data_req
|
756
729
|
|
757
730
|
def to_wb(self) -> Dict[str, Union[list, str, int, float, datetime, None]]:
|
758
731
|
"""
|
759
732
|
Convert tickers from CryptoDataPy to Yahoo Finance format.
|
760
733
|
"""
|
761
|
-
#
|
734
|
+
# tickers
|
762
735
|
with resources.path("cryptodatapy.conf", "tickers.csv") as f:
|
763
736
|
tickers_path = f
|
764
|
-
tickers_df
|
737
|
+
tickers_df = pd.read_csv(tickers_path, index_col=0, encoding="latin1")
|
765
738
|
|
766
|
-
if self.data_req.source_tickers is
|
767
|
-
|
768
|
-
self.data_req.tickers = self.data_req.source_tickers
|
769
|
-
else:
|
739
|
+
if self.data_req.source_tickers is None:
|
740
|
+
self.data_req.source_tickers = []
|
770
741
|
for ticker in self.data_req.tickers:
|
771
742
|
try:
|
772
|
-
|
743
|
+
self.data_req.source_tickers.append(tickers_df.loc[ticker, "wb_id"])
|
773
744
|
except KeyError:
|
774
745
|
logging.warning(
|
775
|
-
f"{ticker} not found for World Bank
|
746
|
+
f"{ticker} not found for World Bank source. Check tickers in"
|
776
747
|
f" data catalog and try again."
|
777
748
|
)
|
778
|
-
self.data_req.tickers.remove(ticker)
|
779
749
|
# drop dupes
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
self.data_req.
|
785
|
-
|
786
|
-
freq = self.data_req.freq
|
750
|
+
self.data_req.source_tickers = list(set(self.data_req.source_tickers))
|
751
|
+
|
752
|
+
# freq
|
753
|
+
if self.data_req.source_freq is None:
|
754
|
+
self.data_req.source_freq = self.data_req.freq
|
755
|
+
|
787
756
|
# convert quote ccy
|
788
757
|
if self.data_req.quote_ccy is None:
|
789
|
-
quote_ccy = "USD"
|
758
|
+
self.data_req.quote_ccy = "USD"
|
790
759
|
else:
|
791
|
-
quote_ccy = self.data_req.quote_ccy.upper()
|
792
|
-
|
760
|
+
self.data_req.quote_ccy = self.data_req.quote_ccy.upper()
|
761
|
+
|
762
|
+
# ctys
|
793
763
|
ctys_list = []
|
794
764
|
if self.data_req.cat == "macro":
|
795
765
|
for ticker in self.data_req.tickers:
|
@@ -800,62 +770,44 @@ class ConvertParams:
|
|
800
770
|
f"{ticker} not found for {self.data_req.source} source. Check tickers in "
|
801
771
|
f"data catalog and try again."
|
802
772
|
)
|
803
|
-
|
773
|
+
self.data_req.ctys = list(set(ctys_list))
|
774
|
+
|
804
775
|
# start date
|
805
776
|
if self.data_req.start_date is None:
|
806
|
-
|
777
|
+
self.data_req.source_start_date = 1920
|
807
778
|
else:
|
808
|
-
|
779
|
+
self.data_req.source_start_date = int(self.data_req.start_date.year)
|
780
|
+
|
809
781
|
# end date
|
810
782
|
if self.data_req.end_date is None:
|
811
|
-
|
783
|
+
self.data_req.source_end_date = pd.Timestamp.utcnow().year
|
812
784
|
else:
|
813
|
-
|
785
|
+
self.data_req.source_end_date = int(self.data_req.end_date.year)
|
786
|
+
|
814
787
|
# fields
|
815
|
-
if self.data_req.source_fields is
|
816
|
-
|
817
|
-
self.data_req.fields = self.data_req.source_fields
|
818
|
-
else:
|
819
|
-
fields = self.convert_fields(data_source='wb')
|
788
|
+
if self.data_req.source_fields is None:
|
789
|
+
self.data_req.source_fields = self.convert_fields(data_source='wb')
|
820
790
|
|
821
|
-
return
|
822
|
-
"tickers": tickers,
|
823
|
-
"freq": freq,
|
824
|
-
"quote_ccy": quote_ccy,
|
825
|
-
"exch": self.data_req.exch,
|
826
|
-
"ctys": ctys_list,
|
827
|
-
"mkt_type": None,
|
828
|
-
"mkts": None,
|
829
|
-
"start_date": start_date,
|
830
|
-
"end_date": end_date,
|
831
|
-
"fields": fields,
|
832
|
-
"tz": self.data_req.tz,
|
833
|
-
"inst": None,
|
834
|
-
"cat": self.data_req.cat,
|
835
|
-
"trials": self.data_req.trials,
|
836
|
-
"pause": self.data_req.pause,
|
837
|
-
"source_tickers": self.data_req.source_tickers,
|
838
|
-
"source_freq": self.data_req.source_freq,
|
839
|
-
"source_fields": self.data_req.source_fields,
|
840
|
-
}
|
791
|
+
return self.data_req
|
841
792
|
|
842
|
-
def to_yahoo(self) ->
|
793
|
+
def to_yahoo(self) -> DataRequest:
|
843
794
|
"""
|
844
795
|
Convert tickers from CryptoDataPy to Yahoo Finance format.
|
845
796
|
"""
|
846
797
|
# tickers
|
847
798
|
with resources.path("cryptodatapy.conf", "tickers.csv") as f:
|
848
799
|
tickers_path = f
|
849
|
-
tickers_df
|
800
|
+
tickers_df = pd.read_csv(tickers_path, index_col=0, encoding="latin1")
|
850
801
|
|
851
802
|
if self.data_req.source_tickers is None:
|
852
803
|
if self.data_req.cat == 'eqty':
|
853
804
|
self.data_req.source_tickers = [ticker.upper() for ticker in self.data_req.tickers]
|
805
|
+
self.data_req.tickers = self.data_req.source_tickers
|
854
806
|
else:
|
855
807
|
self.data_req.source_tickers = []
|
808
|
+
if self.data_req.cat == 'fx':
|
809
|
+
self.data_req.tickers = [ticker.upper() for ticker in self.data_req.tickers]
|
856
810
|
for ticker in self.data_req.tickers:
|
857
|
-
if self.data_req.cat == 'fx':
|
858
|
-
ticker = ticker.upper()
|
859
811
|
try:
|
860
812
|
self.data_req.source_tickers.append(tickers_df.loc[ticker, "yahoo_id"])
|
861
813
|
except KeyError:
|
@@ -888,88 +840,45 @@ class ConvertParams:
|
|
888
840
|
if self.data_req.tz is None:
|
889
841
|
self.data_req.tz = "America/New_York"
|
890
842
|
|
891
|
-
|
892
|
-
|
893
|
-
|
894
|
-
# "quote_ccy": quote_ccy,
|
895
|
-
# "exch": self.data_req.exch,
|
896
|
-
# "ctys": None,
|
897
|
-
# "mkt_type": self.data_req.mkt_type,
|
898
|
-
# "mkts": None,
|
899
|
-
# "start_date": start_date,
|
900
|
-
# "end_date": end_date,
|
901
|
-
# "fields": fields,
|
902
|
-
# "tz": tz,
|
903
|
-
# "inst": None,
|
904
|
-
# "cat": self.data_req.cat,
|
905
|
-
# "trials": self.data_req.trials,
|
906
|
-
# "pause": self.data_req.pause,
|
907
|
-
# "source_tickers": self.data_req.source_tickers,
|
908
|
-
# "source_freq": self.data_req.source_freq,
|
909
|
-
# "source_fields": self.data_req.source_fields,
|
910
|
-
# }
|
911
|
-
|
912
|
-
def to_famafrench(self) -> Dict[str, Union[list, str, int, float, datetime, None]]:
|
843
|
+
return self.data_req
|
844
|
+
|
845
|
+
def to_famafrench(self) -> DataRequest:
|
913
846
|
"""
|
914
847
|
Convert tickers from CryptoDataPy to Fama-French format.
|
915
848
|
"""
|
916
|
-
#
|
849
|
+
# tickers
|
917
850
|
with resources.path("cryptodatapy.conf", "tickers.csv") as f:
|
918
851
|
tickers_path = f
|
919
|
-
tickers_df
|
852
|
+
tickers_df = pd.read_csv(tickers_path, index_col=0, encoding="latin1")
|
920
853
|
|
921
|
-
if self.data_req.source_tickers is
|
922
|
-
|
923
|
-
self.data_req.tickers = self.data_req.source_tickers
|
924
|
-
else:
|
854
|
+
if self.data_req.source_tickers is None:
|
855
|
+
self.data_req.source_tickers = []
|
925
856
|
for ticker in self.data_req.tickers:
|
926
857
|
try:
|
927
|
-
|
858
|
+
self.data_req.source_tickers.append(tickers_df.loc[ticker, "famafrench_id"])
|
928
859
|
except KeyError:
|
929
860
|
logging.warning(
|
930
861
|
f"{ticker} not found for Fama-French source. Check tickers in"
|
931
862
|
f" data catalog and try again."
|
932
863
|
)
|
933
|
-
|
934
|
-
#
|
935
|
-
if self.data_req.source_freq is
|
936
|
-
|
937
|
-
|
938
|
-
else:
|
939
|
-
freq = self.data_req.freq
|
940
|
-
# convert quote ccy
|
941
|
-
quote_ccy = self.data_req.quote_ccy
|
864
|
+
|
865
|
+
# freq
|
866
|
+
if self.data_req.source_freq is None:
|
867
|
+
self.data_req.source_freq = self.data_req.freq
|
868
|
+
|
942
869
|
# start date
|
943
870
|
if self.data_req.start_date is None:
|
944
|
-
|
871
|
+
self.data_req.source_start_date = datetime(1920, 1, 1)
|
945
872
|
else:
|
946
|
-
|
873
|
+
self.data_req.source_start_date = self.data_req.start_date
|
874
|
+
|
947
875
|
# end date
|
948
876
|
if self.data_req.end_date is None:
|
949
|
-
|
877
|
+
self.data_req.source_end_date = datetime.now()
|
950
878
|
else:
|
951
|
-
|
879
|
+
self.data_req.source_end_date = self.data_req.end_date
|
952
880
|
|
953
|
-
return
|
954
|
-
"tickers": tickers,
|
955
|
-
"freq": freq,
|
956
|
-
"quote_ccy": quote_ccy,
|
957
|
-
"exch": self.data_req.exch,
|
958
|
-
"ctys": None,
|
959
|
-
"mkt_type": self.data_req.mkt_type,
|
960
|
-
"mkts": None,
|
961
|
-
"start_date": start_date,
|
962
|
-
"end_date": end_date,
|
963
|
-
"fields": self.data_req.fields,
|
964
|
-
"tz": self.data_req.tz,
|
965
|
-
"inst": None,
|
966
|
-
"cat": self.data_req.cat,
|
967
|
-
"trials": self.data_req.trials,
|
968
|
-
"pause": self.data_req.pause,
|
969
|
-
"source_tickers": self.data_req.source_tickers,
|
970
|
-
"source_freq": self.data_req.source_freq,
|
971
|
-
"source_fields": self.data_req.source_fields,
|
972
|
-
}
|
881
|
+
return self.data_req
|
973
882
|
|
974
883
|
def to_aqr(self) -> Dict[str, Union[list, str, int, dict, float, datetime, None]]:
|
975
884
|
"""
|
@@ -717,16 +717,22 @@ class WrangleData:
|
|
717
717
|
"""
|
718
718
|
# convert fields to lib
|
719
719
|
self.convert_fields_to_lib(data_source='dbnomics')
|
720
|
+
|
720
721
|
# convert to datetime
|
721
722
|
self.data_resp['date'] = pd.to_datetime(self.data_resp['date'])
|
723
|
+
|
722
724
|
# set index
|
723
725
|
self.data_resp = self.data_resp.set_index('date').sort_index()
|
726
|
+
|
724
727
|
# resample
|
725
728
|
self.data_resp = self.data_resp.resample(self.data_req.freq).last().ffill()
|
729
|
+
|
726
730
|
# filter dates
|
727
731
|
self.filter_dates()
|
732
|
+
|
728
733
|
# type conversion
|
729
734
|
self.data_resp = self.data_resp.apply(pd.to_numeric, errors='coerce').convert_dtypes()
|
735
|
+
|
730
736
|
# remove bad data
|
731
737
|
self.data_resp = self.data_resp[self.data_resp != 0] # 0 values
|
732
738
|
self.data_resp = self.data_resp[~self.data_resp.index.duplicated()] # duplicate rows
|
@@ -862,24 +868,29 @@ class WrangleData:
|
|
862
868
|
-------
|
863
869
|
pd.DataFrame
|
864
870
|
Wrangled dataframe into tidy data format.
|
865
|
-
|
866
871
|
"""
|
867
|
-
#
|
872
|
+
# tickers
|
868
873
|
self.data_resp.columns = self.data_req.tickers # convert tickers to cryptodatapy format
|
874
|
+
|
869
875
|
# resample to match end of reporting period, not beginning
|
870
876
|
self.data_resp = self.data_resp.resample('d').last().ffill().resample(self.data_req.freq).last().stack(). \
|
871
877
|
to_frame().reset_index()
|
878
|
+
|
872
879
|
# convert cols
|
873
880
|
if self.data_req.cat == 'macro':
|
874
881
|
self.data_resp.columns = ['DATE', 'symbol', 'actual']
|
875
882
|
else:
|
876
883
|
self.data_resp.columns = ['DATE', 'symbol', 'close']
|
877
|
-
|
884
|
+
|
885
|
+
# fields
|
878
886
|
self.convert_fields_to_lib(data_source='fred')
|
879
|
-
|
887
|
+
|
888
|
+
# index
|
880
889
|
self.data_resp.set_index(['date', 'ticker'], inplace=True)
|
890
|
+
|
881
891
|
# type conversion
|
882
892
|
self.data_resp = self.data_resp.apply(pd.to_numeric, errors='coerce').convert_dtypes()
|
893
|
+
|
883
894
|
# remove bad data
|
884
895
|
self.data_resp = self.data_resp[self.data_resp != 0] # 0 values
|
885
896
|
self.data_resp = self.data_resp[~self.data_resp.index.duplicated()] # duplicate rows
|
@@ -896,37 +907,41 @@ class WrangleData:
|
|
896
907
|
pd.DataFrame
|
897
908
|
Wrangled dataframe into tidy data format.
|
898
909
|
"""
|
899
|
-
#
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
self.data_resp = self.data_resp.stack() # stack to multi-index
|
910
|
+
# tickers
|
911
|
+
tickers_dict = {source_ticker: ticker for source_ticker, ticker in zip(self.data_req.source_tickers,
|
912
|
+
self.data_req.tickers)}
|
913
|
+
if len(self.data_req.tickers) == 1:
|
914
|
+
self.data_resp['Ticker'] = self.data_req.tickers[0]
|
915
|
+
else:
|
916
|
+
self.data_resp = self.data_resp.stack()
|
907
917
|
self.data_resp.index.names = ['Date', 'Ticker']
|
908
|
-
|
909
|
-
|
910
|
-
self.data_req.tickers], level=1)
|
911
|
-
else:
|
912
|
-
self.data_resp.index = self.data_resp.index.set_levels([ticker for ticker in self.data_req.tickers],
|
913
|
-
level=1)
|
918
|
+
self.data_resp.index = self.data_resp.index.set_levels(self.data_resp.index.levels[1].map(tickers_dict),
|
919
|
+
level=1)
|
914
920
|
self.data_resp.reset_index(inplace=True)
|
915
|
-
|
921
|
+
|
922
|
+
# fields
|
916
923
|
self.convert_fields_to_lib(data_source='yahoo')
|
917
|
-
|
924
|
+
|
925
|
+
# index
|
918
926
|
self.data_resp['date'] = pd.to_datetime(self.data_resp['date'])
|
927
|
+
self.data_resp.set_index(['date', 'ticker'], inplace=True)
|
928
|
+
|
919
929
|
# resample
|
920
|
-
self.data_resp = self.data_resp.
|
921
|
-
|
930
|
+
self.data_resp = self.data_resp.groupby('ticker').\
|
931
|
+
resample(self.data_req.freq, level='date').\
|
932
|
+
last().swaplevel('ticker', 'date').sort_index()
|
933
|
+
|
922
934
|
# re-order cols
|
923
935
|
self.data_resp = self.data_resp.loc[:, ['open', 'high', 'low', 'close', 'close_adj', 'volume']]
|
936
|
+
|
924
937
|
# type conversion
|
925
938
|
self.data_resp = self.data_resp.apply(pd.to_numeric, errors='coerce').convert_dtypes()
|
939
|
+
|
926
940
|
# remove bad data
|
927
941
|
self.data_resp = self.data_resp[self.data_resp != 0] # 0 values
|
928
942
|
self.data_resp = self.data_resp[~self.data_resp.index.duplicated()] # duplicate rows
|
929
943
|
self.data_resp = self.data_resp.dropna(how='all').dropna(how='all', axis=1) # entire row or col NaNs
|
944
|
+
|
930
945
|
# keep only requested fields and sort index
|
931
946
|
self.data_resp = self.data_resp[self.data_req.fields].sort_index()
|
932
947
|
|
@@ -942,7 +957,7 @@ class WrangleData:
|
|
942
957
|
Wrangled dataframe into tidy data format.
|
943
958
|
|
944
959
|
"""
|
945
|
-
#
|
960
|
+
# ticker
|
946
961
|
ff_tickers_dict = {'RF': 'US_Rates_1M_RF',
|
947
962
|
'Mkt-RF': 'US_Eqty_CSRP_ER',
|
948
963
|
'HML': 'US_Eqty_Val',
|
@@ -951,6 +966,7 @@ class WrangleData:
|
|
951
966
|
'CMA': 'US_Eqty_Inv',
|
952
967
|
'Mom': 'US_Eqty_Mom',
|
953
968
|
'ST_Rev': 'US_Eqty_STRev'}
|
969
|
+
|
954
970
|
# remove white space from cols str
|
955
971
|
self.data_resp.columns = [col.strip() for col in self.data_resp.columns]
|
956
972
|
# keep cols in data req tickers
|
@@ -959,14 +975,18 @@ class WrangleData:
|
|
959
975
|
drop_cols = [col for col in self.data_resp.columns if col not in self.data_req.tickers]
|
960
976
|
self.data_resp.drop(columns=drop_cols, inplace=True)
|
961
977
|
self.data_resp = self.data_resp.loc[:, ~self.data_resp.columns.duplicated()] # drop dup cols
|
978
|
+
|
962
979
|
# resample freq
|
963
980
|
self.data_resp = self.data_resp.resample(self.data_req.freq).sum()
|
981
|
+
|
964
982
|
# format index
|
965
983
|
self.data_resp.index.name = 'date' # rename
|
966
984
|
self.data_resp = self.data_resp.stack().to_frame('er')
|
967
985
|
self.data_resp.index.names = ['date', 'ticker']
|
986
|
+
|
968
987
|
# type and conversion to decimals
|
969
988
|
self.data_resp = self.data_resp.apply(pd.to_numeric, errors='coerce').convert_dtypes() / 100
|
989
|
+
|
970
990
|
# remove bad data
|
971
991
|
self.data_resp = self.data_resp[self.data_resp != 0] # 0 values
|
972
992
|
self.data_resp = self.data_resp[~self.data_resp.index.duplicated()] # duplicate rows
|
@@ -34,16 +34,16 @@ cryptodatapy/extract/data_vendors/cryptocompare_api.py,sha256=3oBfQioBz1vrs9JNtw
|
|
34
34
|
cryptodatapy/extract/data_vendors/datavendor.py,sha256=kGKxHcPng6JiGGhcuPx87ij0DXl4E-OSqxlvxhJ1HQo,12642
|
35
35
|
cryptodatapy/extract/data_vendors/glassnode_api.py,sha256=PuuJOjHztoJyFijb5XU1zm1S_2NAj7MX-wC89DL_bWQ,13103
|
36
36
|
cryptodatapy/extract/data_vendors/tiingo_api.py,sha256=Bvj5nF8zCkpU3cf5ImUmCS1cd1w2UtjgQvRmQ9Wfg6g,26404
|
37
|
-
cryptodatapy/extract/datarequest.py,sha256=
|
37
|
+
cryptodatapy/extract/datarequest.py,sha256=gwYhodt_Du12mmH_4qdROZb6ics_8-KpaG-2RxjyXnU,25880
|
38
38
|
cryptodatapy/extract/getdata.py,sha256=HzWQyacfmphms97LVKbx1gEgcgsQJViBT4BBxL9TBXk,8703
|
39
|
-
cryptodatapy/extract/libraries/Untitled.ipynb,sha256=
|
39
|
+
cryptodatapy/extract/libraries/Untitled.ipynb,sha256=qW8zLbpXG-AeJ4Vlo6-0AbrTBlM5Y4PMDz9-1TGb5QM,1218203
|
40
40
|
cryptodatapy/extract/libraries/__init__.py,sha256=9rJ_hFHWlvkPwyIkNG5bqH6HTY2jQNPIKQjzYEsVSDo,319
|
41
41
|
cryptodatapy/extract/libraries/ccxt.ipynb,sha256=O-xkr_jtRBY4kuKWek61UOLaU5AiyNRM7AnquNLUjFs,22865
|
42
|
-
cryptodatapy/extract/libraries/ccxt_api.py,sha256=
|
42
|
+
cryptodatapy/extract/libraries/ccxt_api.py,sha256=nHkNeOKAL2KEvbY-ZI-ZgTlO5v15k9KnqODPiMM5zWw,35865
|
43
43
|
cryptodatapy/extract/libraries/dbnomics_api.py,sha256=M6kPIH-hKqkmeBQb-g56dY9jatqLCtSl_MnvPblHtAc,9421
|
44
44
|
cryptodatapy/extract/libraries/investpy_api.py,sha256=qtGm3LDluXxJorvFv0w1bm1oBrcZIfE5cZSYzNYvttY,18409
|
45
45
|
cryptodatapy/extract/libraries/library.py,sha256=070YsO1RJzm4z_enhCjqe5hrj8qsk-Ni0Q_QKoAwQ6U,12316
|
46
|
-
cryptodatapy/extract/libraries/pandasdr_api.py,sha256
|
46
|
+
cryptodatapy/extract/libraries/pandasdr_api.py,sha256=-62P0W0Pa98f-96nB_bDgDkPFshP8yiqKZ9VU-usv94,13696
|
47
47
|
cryptodatapy/extract/libraries/yfinance_api.py,sha256=E4c8gIpDh5ta8ILsn9SBs3C1pOU1VP4OqwQb6TcOzCc,17311
|
48
48
|
cryptodatapy/extract/web/__init__.py,sha256=8i0fweCeqSpdiPf-47jT240I4ca6SizCu9aD-qDS67w,89
|
49
49
|
cryptodatapy/extract/web/aqr.py,sha256=LS1D7QzG6UWkLUfDMgBFtiHpznnnAUOpec5Sx3vRGME,11875
|
@@ -54,7 +54,7 @@ cryptodatapy/transform/clean.py,sha256=C9VypQOjdJ987TcD-qAHh7qYaoJBotvp3cWTr3ttS
|
|
54
54
|
cryptodatapy/transform/clean_onchain_data.ipynb,sha256=WrVPs8_WVKEgL6XRvGUATzeinqGUDTbXv_CHivg0nXg,687176
|
55
55
|
cryptodatapy/transform/clean_perp_futures_ohlcv.ipynb,sha256=3TFTG6riUfu5f0uYvlMC44iUtQRd27sQPxBMXBXzp6A,72758
|
56
56
|
cryptodatapy/transform/cmdty_data.ipynb,sha256=McAMfzNDfrv61gSlzFOkw_DXaOGZE1qfqXc2E_KeSbs,1220371
|
57
|
-
cryptodatapy/transform/convertparams.py,sha256=
|
57
|
+
cryptodatapy/transform/convertparams.py,sha256=X80Hdi2AMHVSYTJ6i-ovOzv5L6JQlGswJlC82xCriX8,39687
|
58
58
|
cryptodatapy/transform/credit_data.ipynb,sha256=Wvvnu9ejsmqCb0s3cTG8bLJaywWQCskgk6FBd5J5Vf8,1892822
|
59
59
|
cryptodatapy/transform/eqty_data.ipynb,sha256=A5cA13hOPrOe7Fra0HL4QPFkJGVfArigTR0GUUBpQ3A,25609
|
60
60
|
cryptodatapy/transform/filter.py,sha256=iQDUXthEXVGcrZUZLjevhDqwf9oywEQHTIh6n_sxOhU,9056
|
@@ -63,11 +63,11 @@ cryptodatapy/transform/impute.py,sha256=c7qdgFg0qs_xuQnX0jazpt0wgASC0KElLZRuxTke
|
|
63
63
|
cryptodatapy/transform/od.py,sha256=z__CWiN70f1leqx12SS9pIvTggxpUPrg1falJIKMZCc,31031
|
64
64
|
cryptodatapy/transform/rates_data.ipynb,sha256=olKY4t2j4sfjsCYlhupTgaviC6922HHGBr-y3f80qjQ,13358
|
65
65
|
cryptodatapy/transform/us_rates_daily.csv,sha256=BIA4a6egQYrVsLk51IZ54ZXXWMwjrx_t5S4XMdvHg44,6434830
|
66
|
-
cryptodatapy/transform/wrangle.py,sha256=
|
66
|
+
cryptodatapy/transform/wrangle.py,sha256=KqPIY7akFtHasW5gqUNR1cCGMBBkgHmzWxyMZFw8t-Q,42564
|
67
67
|
cryptodatapy/util/__init__.py,sha256=zSQ2HU2QIXzCuptJjknmrClwtQKCvIj4aNysZljIgrU,116
|
68
68
|
cryptodatapy/util/datacatalog.py,sha256=qCCX6srXvaAbVAKuA0M2y5IK_2OEx5xA3yRahDZlC-g,13157
|
69
69
|
cryptodatapy/util/datacredentials.py,sha256=fXuGgI2NKCLlcnK8M37CtdyAc3O_YCV23x3KTlfakjA,2160
|
70
|
-
cryptodatapy-0.2.
|
71
|
-
cryptodatapy-0.2.
|
72
|
-
cryptodatapy-0.2.
|
73
|
-
cryptodatapy-0.2.
|
70
|
+
cryptodatapy-0.2.8.dist-info/LICENSE,sha256=sw4oVq8bDjT3uMtaFebQ-xeIVP4H-bXldTs9q-Jjeks,11344
|
71
|
+
cryptodatapy-0.2.8.dist-info/METADATA,sha256=8Xb8H8X_moxjtR263dEucMEecr5UpausfU7fH_Ne0w0,6426
|
72
|
+
cryptodatapy-0.2.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
73
|
+
cryptodatapy-0.2.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|