pyswordfish 3.0.4.0__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyswordfish-3.0.4.0.dist-info/METADATA +53 -0
- pyswordfish-3.0.4.0.dist-info/RECORD +644 -0
- pyswordfish-3.0.4.0.dist-info/WHEEL +5 -0
- pyswordfish-3.0.4.0.dist-info/top_level.txt +1 -0
- swordfish/__init__.py +102 -0
- swordfish/_config.py +901 -0
- swordfish/_connection.py +1418 -0
- swordfish/_core.py +17 -0
- swordfish/_engine.py +1456 -0
- swordfish/_enums.py +142 -0
- swordfish/_function_bindings.py +23594 -0
- swordfish/_function_tools.py +124 -0
- swordfish/_helper.py +57 -0
- swordfish/_runtime.py +158 -0
- swordfish/_sqlbuilder.py +549 -0
- swordfish/_streaming.py +333 -0
- swordfish/_swordfishcpp.cp312-win_amd64.pyd +0 -0
- swordfish/_swordfishcpp.pyi +4784 -0
- swordfish/_translator.py +580 -0
- swordfish/asset/dolphindb.dos +71 -0
- swordfish/asset/tzdb/Africa/Abidjan +0 -0
- swordfish/asset/tzdb/Africa/Accra +0 -0
- swordfish/asset/tzdb/Africa/Addis_Ababa +0 -0
- swordfish/asset/tzdb/Africa/Algiers +0 -0
- swordfish/asset/tzdb/Africa/Asmara +0 -0
- swordfish/asset/tzdb/Africa/Asmera +0 -0
- swordfish/asset/tzdb/Africa/Bamako +0 -0
- swordfish/asset/tzdb/Africa/Bangui +0 -0
- swordfish/asset/tzdb/Africa/Banjul +0 -0
- swordfish/asset/tzdb/Africa/Bissau +0 -0
- swordfish/asset/tzdb/Africa/Blantyre +0 -0
- swordfish/asset/tzdb/Africa/Brazzaville +0 -0
- swordfish/asset/tzdb/Africa/Bujumbura +0 -0
- swordfish/asset/tzdb/Africa/Cairo +0 -0
- swordfish/asset/tzdb/Africa/Casablanca +0 -0
- swordfish/asset/tzdb/Africa/Ceuta +0 -0
- swordfish/asset/tzdb/Africa/Conakry +0 -0
- swordfish/asset/tzdb/Africa/Dakar +0 -0
- swordfish/asset/tzdb/Africa/Dar_es_Salaam +0 -0
- swordfish/asset/tzdb/Africa/Djibouti +0 -0
- swordfish/asset/tzdb/Africa/Douala +0 -0
- swordfish/asset/tzdb/Africa/El_Aaiun +0 -0
- swordfish/asset/tzdb/Africa/Freetown +0 -0
- swordfish/asset/tzdb/Africa/Gaborone +0 -0
- swordfish/asset/tzdb/Africa/Harare +0 -0
- swordfish/asset/tzdb/Africa/Johannesburg +0 -0
- swordfish/asset/tzdb/Africa/Juba +0 -0
- swordfish/asset/tzdb/Africa/Kampala +0 -0
- swordfish/asset/tzdb/Africa/Khartoum +0 -0
- swordfish/asset/tzdb/Africa/Kigali +0 -0
- swordfish/asset/tzdb/Africa/Kinshasa +0 -0
- swordfish/asset/tzdb/Africa/Lagos +0 -0
- swordfish/asset/tzdb/Africa/Libreville +0 -0
- swordfish/asset/tzdb/Africa/Lome +0 -0
- swordfish/asset/tzdb/Africa/Luanda +0 -0
- swordfish/asset/tzdb/Africa/Lubumbashi +0 -0
- swordfish/asset/tzdb/Africa/Lusaka +0 -0
- swordfish/asset/tzdb/Africa/Malabo +0 -0
- swordfish/asset/tzdb/Africa/Maputo +0 -0
- swordfish/asset/tzdb/Africa/Maseru +0 -0
- swordfish/asset/tzdb/Africa/Mbabane +0 -0
- swordfish/asset/tzdb/Africa/Mogadishu +0 -0
- swordfish/asset/tzdb/Africa/Monrovia +0 -0
- swordfish/asset/tzdb/Africa/Nairobi +0 -0
- swordfish/asset/tzdb/Africa/Ndjamena +0 -0
- swordfish/asset/tzdb/Africa/Niamey +0 -0
- swordfish/asset/tzdb/Africa/Nouakchott +0 -0
- swordfish/asset/tzdb/Africa/Ouagadougou +0 -0
- swordfish/asset/tzdb/Africa/Porto-Novo +0 -0
- swordfish/asset/tzdb/Africa/Sao_Tome +0 -0
- swordfish/asset/tzdb/Africa/Timbuktu +0 -0
- swordfish/asset/tzdb/Africa/Tripoli +0 -0
- swordfish/asset/tzdb/Africa/Tunis +0 -0
- swordfish/asset/tzdb/Africa/Windhoek +0 -0
- swordfish/asset/tzdb/America/Adak +0 -0
- swordfish/asset/tzdb/America/Anchorage +0 -0
- swordfish/asset/tzdb/America/Anguilla +0 -0
- swordfish/asset/tzdb/America/Antigua +0 -0
- swordfish/asset/tzdb/America/Araguaina +0 -0
- swordfish/asset/tzdb/America/Argentina/Buenos_Aires +0 -0
- swordfish/asset/tzdb/America/Argentina/Catamarca +0 -0
- swordfish/asset/tzdb/America/Argentina/ComodRivadavia +0 -0
- swordfish/asset/tzdb/America/Argentina/Cordoba +0 -0
- swordfish/asset/tzdb/America/Argentina/Jujuy +0 -0
- swordfish/asset/tzdb/America/Argentina/La_Rioja +0 -0
- swordfish/asset/tzdb/America/Argentina/Mendoza +0 -0
- swordfish/asset/tzdb/America/Argentina/Rio_Gallegos +0 -0
- swordfish/asset/tzdb/America/Argentina/Salta +0 -0
- swordfish/asset/tzdb/America/Argentina/San_Juan +0 -0
- swordfish/asset/tzdb/America/Argentina/San_Luis +0 -0
- swordfish/asset/tzdb/America/Argentina/Tucuman +0 -0
- swordfish/asset/tzdb/America/Argentina/Ushuaia +0 -0
- swordfish/asset/tzdb/America/Aruba +0 -0
- swordfish/asset/tzdb/America/Asuncion +0 -0
- swordfish/asset/tzdb/America/Atikokan +0 -0
- swordfish/asset/tzdb/America/Atka +0 -0
- swordfish/asset/tzdb/America/Bahia +0 -0
- swordfish/asset/tzdb/America/Bahia_Banderas +0 -0
- swordfish/asset/tzdb/America/Barbados +0 -0
- swordfish/asset/tzdb/America/Belem +0 -0
- swordfish/asset/tzdb/America/Belize +0 -0
- swordfish/asset/tzdb/America/Blanc-Sablon +0 -0
- swordfish/asset/tzdb/America/Boa_Vista +0 -0
- swordfish/asset/tzdb/America/Bogota +0 -0
- swordfish/asset/tzdb/America/Boise +0 -0
- swordfish/asset/tzdb/America/Buenos_Aires +0 -0
- swordfish/asset/tzdb/America/Cambridge_Bay +0 -0
- swordfish/asset/tzdb/America/Campo_Grande +0 -0
- swordfish/asset/tzdb/America/Cancun +0 -0
- swordfish/asset/tzdb/America/Caracas +0 -0
- swordfish/asset/tzdb/America/Catamarca +0 -0
- swordfish/asset/tzdb/America/Cayenne +0 -0
- swordfish/asset/tzdb/America/Cayman +0 -0
- swordfish/asset/tzdb/America/Chicago +0 -0
- swordfish/asset/tzdb/America/Chihuahua +0 -0
- swordfish/asset/tzdb/America/Coral_Harbour +0 -0
- swordfish/asset/tzdb/America/Cordoba +0 -0
- swordfish/asset/tzdb/America/Costa_Rica +0 -0
- swordfish/asset/tzdb/America/Creston +0 -0
- swordfish/asset/tzdb/America/Cuiaba +0 -0
- swordfish/asset/tzdb/America/Curacao +0 -0
- swordfish/asset/tzdb/America/Danmarkshavn +0 -0
- swordfish/asset/tzdb/America/Dawson +0 -0
- swordfish/asset/tzdb/America/Dawson_Creek +0 -0
- swordfish/asset/tzdb/America/Denver +0 -0
- swordfish/asset/tzdb/America/Detroit +0 -0
- swordfish/asset/tzdb/America/Dominica +0 -0
- swordfish/asset/tzdb/America/Edmonton +0 -0
- swordfish/asset/tzdb/America/Eirunepe +0 -0
- swordfish/asset/tzdb/America/El_Salvador +0 -0
- swordfish/asset/tzdb/America/Ensenada +0 -0
- swordfish/asset/tzdb/America/Fort_Nelson +0 -0
- swordfish/asset/tzdb/America/Fort_Wayne +0 -0
- swordfish/asset/tzdb/America/Fortaleza +0 -0
- swordfish/asset/tzdb/America/Glace_Bay +0 -0
- swordfish/asset/tzdb/America/Godthab +0 -0
- swordfish/asset/tzdb/America/Goose_Bay +0 -0
- swordfish/asset/tzdb/America/Grand_Turk +0 -0
- swordfish/asset/tzdb/America/Grenada +0 -0
- swordfish/asset/tzdb/America/Guadeloupe +0 -0
- swordfish/asset/tzdb/America/Guatemala +0 -0
- swordfish/asset/tzdb/America/Guayaquil +0 -0
- swordfish/asset/tzdb/America/Guyana +0 -0
- swordfish/asset/tzdb/America/Halifax +0 -0
- swordfish/asset/tzdb/America/Havana +0 -0
- swordfish/asset/tzdb/America/Hermosillo +0 -0
- swordfish/asset/tzdb/America/Indiana/Indianapolis +0 -0
- swordfish/asset/tzdb/America/Indiana/Knox +0 -0
- swordfish/asset/tzdb/America/Indiana/Marengo +0 -0
- swordfish/asset/tzdb/America/Indiana/Petersburg +0 -0
- swordfish/asset/tzdb/America/Indiana/Tell_City +0 -0
- swordfish/asset/tzdb/America/Indiana/Vevay +0 -0
- swordfish/asset/tzdb/America/Indiana/Vincennes +0 -0
- swordfish/asset/tzdb/America/Indiana/Winamac +0 -0
- swordfish/asset/tzdb/America/Indianapolis +0 -0
- swordfish/asset/tzdb/America/Inuvik +0 -0
- swordfish/asset/tzdb/America/Iqaluit +0 -0
- swordfish/asset/tzdb/America/Jamaica +0 -0
- swordfish/asset/tzdb/America/Jujuy +0 -0
- swordfish/asset/tzdb/America/Juneau +0 -0
- swordfish/asset/tzdb/America/Kentucky/Louisville +0 -0
- swordfish/asset/tzdb/America/Kentucky/Monticello +0 -0
- swordfish/asset/tzdb/America/Knox_IN +0 -0
- swordfish/asset/tzdb/America/Kralendijk +0 -0
- swordfish/asset/tzdb/America/La_Paz +0 -0
- swordfish/asset/tzdb/America/Lima +0 -0
- swordfish/asset/tzdb/America/Los_Angeles +0 -0
- swordfish/asset/tzdb/America/Louisville +0 -0
- swordfish/asset/tzdb/America/Lower_Princes +0 -0
- swordfish/asset/tzdb/America/Maceio +0 -0
- swordfish/asset/tzdb/America/Managua +0 -0
- swordfish/asset/tzdb/America/Manaus +0 -0
- swordfish/asset/tzdb/America/Marigot +0 -0
- swordfish/asset/tzdb/America/Martinique +0 -0
- swordfish/asset/tzdb/America/Matamoros +0 -0
- swordfish/asset/tzdb/America/Mazatlan +0 -0
- swordfish/asset/tzdb/America/Mendoza +0 -0
- swordfish/asset/tzdb/America/Menominee +0 -0
- swordfish/asset/tzdb/America/Merida +0 -0
- swordfish/asset/tzdb/America/Metlakatla +0 -0
- swordfish/asset/tzdb/America/Mexico_City +0 -0
- swordfish/asset/tzdb/America/Miquelon +0 -0
- swordfish/asset/tzdb/America/Moncton +0 -0
- swordfish/asset/tzdb/America/Monterrey +0 -0
- swordfish/asset/tzdb/America/Montevideo +0 -0
- swordfish/asset/tzdb/America/Montreal +0 -0
- swordfish/asset/tzdb/America/Montserrat +0 -0
- swordfish/asset/tzdb/America/Nassau +0 -0
- swordfish/asset/tzdb/America/New_York +0 -0
- swordfish/asset/tzdb/America/Nipigon +0 -0
- swordfish/asset/tzdb/America/Nome +0 -0
- swordfish/asset/tzdb/America/Noronha +0 -0
- swordfish/asset/tzdb/America/North_Dakota/Beulah +0 -0
- swordfish/asset/tzdb/America/North_Dakota/Center +0 -0
- swordfish/asset/tzdb/America/North_Dakota/New_Salem +0 -0
- swordfish/asset/tzdb/America/Ojinaga +0 -0
- swordfish/asset/tzdb/America/Panama +0 -0
- swordfish/asset/tzdb/America/Pangnirtung +0 -0
- swordfish/asset/tzdb/America/Paramaribo +0 -0
- swordfish/asset/tzdb/America/Phoenix +0 -0
- swordfish/asset/tzdb/America/Port-au-Prince +0 -0
- swordfish/asset/tzdb/America/Port_of_Spain +0 -0
- swordfish/asset/tzdb/America/Porto_Acre +0 -0
- swordfish/asset/tzdb/America/Porto_Velho +0 -0
- swordfish/asset/tzdb/America/Puerto_Rico +0 -0
- swordfish/asset/tzdb/America/Punta_Arenas +0 -0
- swordfish/asset/tzdb/America/Rainy_River +0 -0
- swordfish/asset/tzdb/America/Rankin_Inlet +0 -0
- swordfish/asset/tzdb/America/Recife +0 -0
- swordfish/asset/tzdb/America/Regina +0 -0
- swordfish/asset/tzdb/America/Resolute +0 -0
- swordfish/asset/tzdb/America/Rio_Branco +0 -0
- swordfish/asset/tzdb/America/Rosario +0 -0
- swordfish/asset/tzdb/America/Santa_Isabel +0 -0
- swordfish/asset/tzdb/America/Santarem +0 -0
- swordfish/asset/tzdb/America/Santiago +0 -0
- swordfish/asset/tzdb/America/Santo_Domingo +0 -0
- swordfish/asset/tzdb/America/Sao_Paulo +0 -0
- swordfish/asset/tzdb/America/Scoresbysund +0 -0
- swordfish/asset/tzdb/America/Shiprock +0 -0
- swordfish/asset/tzdb/America/Sitka +0 -0
- swordfish/asset/tzdb/America/St_Barthelemy +0 -0
- swordfish/asset/tzdb/America/St_Johns +0 -0
- swordfish/asset/tzdb/America/St_Kitts +0 -0
- swordfish/asset/tzdb/America/St_Lucia +0 -0
- swordfish/asset/tzdb/America/St_Thomas +0 -0
- swordfish/asset/tzdb/America/St_Vincent +0 -0
- swordfish/asset/tzdb/America/Swift_Current +0 -0
- swordfish/asset/tzdb/America/Tegucigalpa +0 -0
- swordfish/asset/tzdb/America/Thule +0 -0
- swordfish/asset/tzdb/America/Thunder_Bay +0 -0
- swordfish/asset/tzdb/America/Tijuana +0 -0
- swordfish/asset/tzdb/America/Toronto +0 -0
- swordfish/asset/tzdb/America/Tortola +0 -0
- swordfish/asset/tzdb/America/Vancouver +0 -0
- swordfish/asset/tzdb/America/Virgin +0 -0
- swordfish/asset/tzdb/America/Whitehorse +0 -0
- swordfish/asset/tzdb/America/Winnipeg +0 -0
- swordfish/asset/tzdb/America/Yakutat +0 -0
- swordfish/asset/tzdb/America/Yellowknife +0 -0
- swordfish/asset/tzdb/Antarctica/Casey +0 -0
- swordfish/asset/tzdb/Antarctica/Davis +0 -0
- swordfish/asset/tzdb/Antarctica/DumontDUrville +0 -0
- swordfish/asset/tzdb/Antarctica/Macquarie +0 -0
- swordfish/asset/tzdb/Antarctica/Mawson +0 -0
- swordfish/asset/tzdb/Antarctica/McMurdo +0 -0
- swordfish/asset/tzdb/Antarctica/Palmer +0 -0
- swordfish/asset/tzdb/Antarctica/Rothera +0 -0
- swordfish/asset/tzdb/Antarctica/South_Pole +0 -0
- swordfish/asset/tzdb/Antarctica/Syowa +0 -0
- swordfish/asset/tzdb/Antarctica/Troll +0 -0
- swordfish/asset/tzdb/Antarctica/Vostok +0 -0
- swordfish/asset/tzdb/Arctic/Longyearbyen +0 -0
- swordfish/asset/tzdb/Asia/Aden +0 -0
- swordfish/asset/tzdb/Asia/Almaty +0 -0
- swordfish/asset/tzdb/Asia/Amman +0 -0
- swordfish/asset/tzdb/Asia/Anadyr +0 -0
- swordfish/asset/tzdb/Asia/Aqtau +0 -0
- swordfish/asset/tzdb/Asia/Aqtobe +0 -0
- swordfish/asset/tzdb/Asia/Ashgabat +0 -0
- swordfish/asset/tzdb/Asia/Ashkhabad +0 -0
- swordfish/asset/tzdb/Asia/Atyrau +0 -0
- swordfish/asset/tzdb/Asia/Baghdad +0 -0
- swordfish/asset/tzdb/Asia/Bahrain +0 -0
- swordfish/asset/tzdb/Asia/Baku +0 -0
- swordfish/asset/tzdb/Asia/Bangkok +0 -0
- swordfish/asset/tzdb/Asia/Barnaul +0 -0
- swordfish/asset/tzdb/Asia/Beirut +0 -0
- swordfish/asset/tzdb/Asia/Bishkek +0 -0
- swordfish/asset/tzdb/Asia/Brunei +0 -0
- swordfish/asset/tzdb/Asia/Calcutta +0 -0
- swordfish/asset/tzdb/Asia/Chita +0 -0
- swordfish/asset/tzdb/Asia/Choibalsan +0 -0
- swordfish/asset/tzdb/Asia/Chongqing +0 -0
- swordfish/asset/tzdb/Asia/Chungking +0 -0
- swordfish/asset/tzdb/Asia/Colombo +0 -0
- swordfish/asset/tzdb/Asia/Dacca +0 -0
- swordfish/asset/tzdb/Asia/Damascus +0 -0
- swordfish/asset/tzdb/Asia/Dhaka +0 -0
- swordfish/asset/tzdb/Asia/Dili +0 -0
- swordfish/asset/tzdb/Asia/Dubai +0 -0
- swordfish/asset/tzdb/Asia/Dushanbe +0 -0
- swordfish/asset/tzdb/Asia/Famagusta +0 -0
- swordfish/asset/tzdb/Asia/Gaza +0 -0
- swordfish/asset/tzdb/Asia/Harbin +0 -0
- swordfish/asset/tzdb/Asia/Hebron +0 -0
- swordfish/asset/tzdb/Asia/Ho_Chi_Minh +0 -0
- swordfish/asset/tzdb/Asia/Hong_Kong +0 -0
- swordfish/asset/tzdb/Asia/Hovd +0 -0
- swordfish/asset/tzdb/Asia/Irkutsk +0 -0
- swordfish/asset/tzdb/Asia/Istanbul +0 -0
- swordfish/asset/tzdb/Asia/Jakarta +0 -0
- swordfish/asset/tzdb/Asia/Jayapura +0 -0
- swordfish/asset/tzdb/Asia/Jerusalem +0 -0
- swordfish/asset/tzdb/Asia/Kabul +0 -0
- swordfish/asset/tzdb/Asia/Kamchatka +0 -0
- swordfish/asset/tzdb/Asia/Karachi +0 -0
- swordfish/asset/tzdb/Asia/Kashgar +0 -0
- swordfish/asset/tzdb/Asia/Kathmandu +0 -0
- swordfish/asset/tzdb/Asia/Katmandu +0 -0
- swordfish/asset/tzdb/Asia/Khandyga +0 -0
- swordfish/asset/tzdb/Asia/Kolkata +0 -0
- swordfish/asset/tzdb/Asia/Krasnoyarsk +0 -0
- swordfish/asset/tzdb/Asia/Kuala_Lumpur +0 -0
- swordfish/asset/tzdb/Asia/Kuching +0 -0
- swordfish/asset/tzdb/Asia/Kuwait +0 -0
- swordfish/asset/tzdb/Asia/Macao +0 -0
- swordfish/asset/tzdb/Asia/Macau +0 -0
- swordfish/asset/tzdb/Asia/Magadan +0 -0
- swordfish/asset/tzdb/Asia/Makassar +0 -0
- swordfish/asset/tzdb/Asia/Manila +0 -0
- swordfish/asset/tzdb/Asia/Muscat +0 -0
- swordfish/asset/tzdb/Asia/Nicosia +0 -0
- swordfish/asset/tzdb/Asia/Novokuznetsk +0 -0
- swordfish/asset/tzdb/Asia/Novosibirsk +0 -0
- swordfish/asset/tzdb/Asia/Omsk +0 -0
- swordfish/asset/tzdb/Asia/Oral +0 -0
- swordfish/asset/tzdb/Asia/Phnom_Penh +0 -0
- swordfish/asset/tzdb/Asia/Pontianak +0 -0
- swordfish/asset/tzdb/Asia/Pyongyang +0 -0
- swordfish/asset/tzdb/Asia/Qatar +0 -0
- swordfish/asset/tzdb/Asia/Qyzylorda +0 -0
- swordfish/asset/tzdb/Asia/Rangoon +0 -0
- swordfish/asset/tzdb/Asia/Riyadh +0 -0
- swordfish/asset/tzdb/Asia/Saigon +0 -0
- swordfish/asset/tzdb/Asia/Sakhalin +0 -0
- swordfish/asset/tzdb/Asia/Samarkand +0 -0
- swordfish/asset/tzdb/Asia/Seoul +0 -0
- swordfish/asset/tzdb/Asia/Shanghai +0 -0
- swordfish/asset/tzdb/Asia/Singapore +0 -0
- swordfish/asset/tzdb/Asia/Srednekolymsk +0 -0
- swordfish/asset/tzdb/Asia/Taipei +0 -0
- swordfish/asset/tzdb/Asia/Tashkent +0 -0
- swordfish/asset/tzdb/Asia/Tbilisi +0 -0
- swordfish/asset/tzdb/Asia/Tehran +0 -0
- swordfish/asset/tzdb/Asia/Tel_Aviv +0 -0
- swordfish/asset/tzdb/Asia/Thimbu +0 -0
- swordfish/asset/tzdb/Asia/Thimphu +0 -0
- swordfish/asset/tzdb/Asia/Tokyo +0 -0
- swordfish/asset/tzdb/Asia/Tomsk +0 -0
- swordfish/asset/tzdb/Asia/Ujung_Pandang +0 -0
- swordfish/asset/tzdb/Asia/Ulaanbaatar +0 -0
- swordfish/asset/tzdb/Asia/Ulan_Bator +0 -0
- swordfish/asset/tzdb/Asia/Urumqi +0 -0
- swordfish/asset/tzdb/Asia/Ust-Nera +0 -0
- swordfish/asset/tzdb/Asia/Vientiane +0 -0
- swordfish/asset/tzdb/Asia/Vladivostok +0 -0
- swordfish/asset/tzdb/Asia/Yakutsk +0 -0
- swordfish/asset/tzdb/Asia/Yangon +0 -0
- swordfish/asset/tzdb/Asia/Yekaterinburg +0 -0
- swordfish/asset/tzdb/Asia/Yerevan +0 -0
- swordfish/asset/tzdb/Atlantic/Azores +0 -0
- swordfish/asset/tzdb/Atlantic/Bermuda +0 -0
- swordfish/asset/tzdb/Atlantic/Canary +0 -0
- swordfish/asset/tzdb/Atlantic/Cape_Verde +0 -0
- swordfish/asset/tzdb/Atlantic/Faeroe +0 -0
- swordfish/asset/tzdb/Atlantic/Faroe +0 -0
- swordfish/asset/tzdb/Atlantic/Jan_Mayen +0 -0
- swordfish/asset/tzdb/Atlantic/Madeira +0 -0
- swordfish/asset/tzdb/Atlantic/Reykjavik +0 -0
- swordfish/asset/tzdb/Atlantic/South_Georgia +0 -0
- swordfish/asset/tzdb/Atlantic/St_Helena +0 -0
- swordfish/asset/tzdb/Atlantic/Stanley +0 -0
- swordfish/asset/tzdb/Australia/ACT +0 -0
- swordfish/asset/tzdb/Australia/Adelaide +0 -0
- swordfish/asset/tzdb/Australia/Brisbane +0 -0
- swordfish/asset/tzdb/Australia/Broken_Hill +0 -0
- swordfish/asset/tzdb/Australia/Canberra +0 -0
- swordfish/asset/tzdb/Australia/Currie +0 -0
- swordfish/asset/tzdb/Australia/Darwin +0 -0
- swordfish/asset/tzdb/Australia/Eucla +0 -0
- swordfish/asset/tzdb/Australia/Hobart +0 -0
- swordfish/asset/tzdb/Australia/LHI +0 -0
- swordfish/asset/tzdb/Australia/Lindeman +0 -0
- swordfish/asset/tzdb/Australia/Lord_Howe +0 -0
- swordfish/asset/tzdb/Australia/Melbourne +0 -0
- swordfish/asset/tzdb/Australia/NSW +0 -0
- swordfish/asset/tzdb/Australia/North +0 -0
- swordfish/asset/tzdb/Australia/Perth +0 -0
- swordfish/asset/tzdb/Australia/Queensland +0 -0
- swordfish/asset/tzdb/Australia/South +0 -0
- swordfish/asset/tzdb/Australia/Sydney +0 -0
- swordfish/asset/tzdb/Australia/Tasmania +0 -0
- swordfish/asset/tzdb/Australia/Victoria +0 -0
- swordfish/asset/tzdb/Australia/West +0 -0
- swordfish/asset/tzdb/Australia/Yancowinna +0 -0
- swordfish/asset/tzdb/Brazil/Acre +0 -0
- swordfish/asset/tzdb/Brazil/DeNoronha +0 -0
- swordfish/asset/tzdb/Brazil/East +0 -0
- swordfish/asset/tzdb/Brazil/West +0 -0
- swordfish/asset/tzdb/CET +0 -0
- swordfish/asset/tzdb/CST6CDT +0 -0
- swordfish/asset/tzdb/Canada/Atlantic +0 -0
- swordfish/asset/tzdb/Canada/Central +0 -0
- swordfish/asset/tzdb/Canada/Eastern +0 -0
- swordfish/asset/tzdb/Canada/Mountain +0 -0
- swordfish/asset/tzdb/Canada/Newfoundland +0 -0
- swordfish/asset/tzdb/Canada/Pacific +0 -0
- swordfish/asset/tzdb/Canada/Saskatchewan +0 -0
- swordfish/asset/tzdb/Canada/Yukon +0 -0
- swordfish/asset/tzdb/Chile/Continental +0 -0
- swordfish/asset/tzdb/Chile/EasterIsland +0 -0
- swordfish/asset/tzdb/Cuba +0 -0
- swordfish/asset/tzdb/EET +0 -0
- swordfish/asset/tzdb/EST +0 -0
- swordfish/asset/tzdb/EST5EDT +0 -0
- swordfish/asset/tzdb/Egypt +0 -0
- swordfish/asset/tzdb/Eire +0 -0
- swordfish/asset/tzdb/Etc/GMT +0 -0
- swordfish/asset/tzdb/Etc/GMT+0 +0 -0
- swordfish/asset/tzdb/Etc/GMT+1 +0 -0
- swordfish/asset/tzdb/Etc/GMT+10 +0 -0
- swordfish/asset/tzdb/Etc/GMT+11 +0 -0
- swordfish/asset/tzdb/Etc/GMT+12 +0 -0
- swordfish/asset/tzdb/Etc/GMT+2 +0 -0
- swordfish/asset/tzdb/Etc/GMT+3 +0 -0
- swordfish/asset/tzdb/Etc/GMT+4 +0 -0
- swordfish/asset/tzdb/Etc/GMT+5 +0 -0
- swordfish/asset/tzdb/Etc/GMT+6 +0 -0
- swordfish/asset/tzdb/Etc/GMT+7 +0 -0
- swordfish/asset/tzdb/Etc/GMT+8 +0 -0
- swordfish/asset/tzdb/Etc/GMT+9 +0 -0
- swordfish/asset/tzdb/Etc/GMT-0 +0 -0
- swordfish/asset/tzdb/Etc/GMT-1 +0 -0
- swordfish/asset/tzdb/Etc/GMT-10 +0 -0
- swordfish/asset/tzdb/Etc/GMT-11 +0 -0
- swordfish/asset/tzdb/Etc/GMT-12 +0 -0
- swordfish/asset/tzdb/Etc/GMT-13 +0 -0
- swordfish/asset/tzdb/Etc/GMT-14 +0 -0
- swordfish/asset/tzdb/Etc/GMT-2 +0 -0
- swordfish/asset/tzdb/Etc/GMT-3 +0 -0
- swordfish/asset/tzdb/Etc/GMT-4 +0 -0
- swordfish/asset/tzdb/Etc/GMT-5 +0 -0
- swordfish/asset/tzdb/Etc/GMT-6 +0 -0
- swordfish/asset/tzdb/Etc/GMT-7 +0 -0
- swordfish/asset/tzdb/Etc/GMT-8 +0 -0
- swordfish/asset/tzdb/Etc/GMT-9 +0 -0
- swordfish/asset/tzdb/Etc/GMT0 +0 -0
- swordfish/asset/tzdb/Etc/Greenwich +0 -0
- swordfish/asset/tzdb/Etc/UCT +0 -0
- swordfish/asset/tzdb/Etc/UTC +0 -0
- swordfish/asset/tzdb/Etc/Universal +0 -0
- swordfish/asset/tzdb/Etc/Zulu +0 -0
- swordfish/asset/tzdb/Europe/Amsterdam +0 -0
- swordfish/asset/tzdb/Europe/Andorra +0 -0
- swordfish/asset/tzdb/Europe/Astrakhan +0 -0
- swordfish/asset/tzdb/Europe/Athens +0 -0
- swordfish/asset/tzdb/Europe/Belfast +0 -0
- swordfish/asset/tzdb/Europe/Belgrade +0 -0
- swordfish/asset/tzdb/Europe/Berlin +0 -0
- swordfish/asset/tzdb/Europe/Bratislava +0 -0
- swordfish/asset/tzdb/Europe/Brussels +0 -0
- swordfish/asset/tzdb/Europe/Bucharest +0 -0
- swordfish/asset/tzdb/Europe/Budapest +0 -0
- swordfish/asset/tzdb/Europe/Busingen +0 -0
- swordfish/asset/tzdb/Europe/Chisinau +0 -0
- swordfish/asset/tzdb/Europe/Copenhagen +0 -0
- swordfish/asset/tzdb/Europe/Dublin +0 -0
- swordfish/asset/tzdb/Europe/Gibraltar +0 -0
- swordfish/asset/tzdb/Europe/Guernsey +0 -0
- swordfish/asset/tzdb/Europe/Helsinki +0 -0
- swordfish/asset/tzdb/Europe/Isle_of_Man +0 -0
- swordfish/asset/tzdb/Europe/Istanbul +0 -0
- swordfish/asset/tzdb/Europe/Jersey +0 -0
- swordfish/asset/tzdb/Europe/Kaliningrad +0 -0
- swordfish/asset/tzdb/Europe/Kiev +0 -0
- swordfish/asset/tzdb/Europe/Kirov +0 -0
- swordfish/asset/tzdb/Europe/Lisbon +0 -0
- swordfish/asset/tzdb/Europe/Ljubljana +0 -0
- swordfish/asset/tzdb/Europe/London +0 -0
- swordfish/asset/tzdb/Europe/Luxembourg +0 -0
- swordfish/asset/tzdb/Europe/Madrid +0 -0
- swordfish/asset/tzdb/Europe/Malta +0 -0
- swordfish/asset/tzdb/Europe/Mariehamn +0 -0
- swordfish/asset/tzdb/Europe/Minsk +0 -0
- swordfish/asset/tzdb/Europe/Monaco +0 -0
- swordfish/asset/tzdb/Europe/Moscow +0 -0
- swordfish/asset/tzdb/Europe/Nicosia +0 -0
- swordfish/asset/tzdb/Europe/Oslo +0 -0
- swordfish/asset/tzdb/Europe/Paris +0 -0
- swordfish/asset/tzdb/Europe/Podgorica +0 -0
- swordfish/asset/tzdb/Europe/Prague +0 -0
- swordfish/asset/tzdb/Europe/Riga +0 -0
- swordfish/asset/tzdb/Europe/Rome +0 -0
- swordfish/asset/tzdb/Europe/Samara +0 -0
- swordfish/asset/tzdb/Europe/San_Marino +0 -0
- swordfish/asset/tzdb/Europe/Sarajevo +0 -0
- swordfish/asset/tzdb/Europe/Saratov +0 -0
- swordfish/asset/tzdb/Europe/Simferopol +0 -0
- swordfish/asset/tzdb/Europe/Skopje +0 -0
- swordfish/asset/tzdb/Europe/Sofia +0 -0
- swordfish/asset/tzdb/Europe/Stockholm +0 -0
- swordfish/asset/tzdb/Europe/Tallinn +0 -0
- swordfish/asset/tzdb/Europe/Tirane +0 -0
- swordfish/asset/tzdb/Europe/Tiraspol +0 -0
- swordfish/asset/tzdb/Europe/Ulyanovsk +0 -0
- swordfish/asset/tzdb/Europe/Uzhgorod +0 -0
- swordfish/asset/tzdb/Europe/Vaduz +0 -0
- swordfish/asset/tzdb/Europe/Vatican +0 -0
- swordfish/asset/tzdb/Europe/Vienna +0 -0
- swordfish/asset/tzdb/Europe/Vilnius +0 -0
- swordfish/asset/tzdb/Europe/Volgograd +0 -0
- swordfish/asset/tzdb/Europe/Warsaw +0 -0
- swordfish/asset/tzdb/Europe/Zagreb +0 -0
- swordfish/asset/tzdb/Europe/Zaporozhye +0 -0
- swordfish/asset/tzdb/Europe/Zurich +0 -0
- swordfish/asset/tzdb/Factory +0 -0
- swordfish/asset/tzdb/GB +0 -0
- swordfish/asset/tzdb/GB-Eire +0 -0
- swordfish/asset/tzdb/GMT +0 -0
- swordfish/asset/tzdb/GMT+0 +0 -0
- swordfish/asset/tzdb/GMT-0 +0 -0
- swordfish/asset/tzdb/GMT0 +0 -0
- swordfish/asset/tzdb/Greenwich +0 -0
- swordfish/asset/tzdb/HST +0 -0
- swordfish/asset/tzdb/Hongkong +0 -0
- swordfish/asset/tzdb/Iceland +0 -0
- swordfish/asset/tzdb/Indian/Antananarivo +0 -0
- swordfish/asset/tzdb/Indian/Chagos +0 -0
- swordfish/asset/tzdb/Indian/Christmas +0 -0
- swordfish/asset/tzdb/Indian/Cocos +0 -0
- swordfish/asset/tzdb/Indian/Comoro +0 -0
- swordfish/asset/tzdb/Indian/Kerguelen +0 -0
- swordfish/asset/tzdb/Indian/Mahe +0 -0
- swordfish/asset/tzdb/Indian/Maldives +0 -0
- swordfish/asset/tzdb/Indian/Mauritius +0 -0
- swordfish/asset/tzdb/Indian/Mayotte +0 -0
- swordfish/asset/tzdb/Indian/Reunion +0 -0
- swordfish/asset/tzdb/Iran +0 -0
- swordfish/asset/tzdb/Israel +0 -0
- swordfish/asset/tzdb/Jamaica +0 -0
- swordfish/asset/tzdb/Japan +0 -0
- swordfish/asset/tzdb/Kwajalein +0 -0
- swordfish/asset/tzdb/Libya +0 -0
- swordfish/asset/tzdb/MET +0 -0
- swordfish/asset/tzdb/MST +0 -0
- swordfish/asset/tzdb/MST7MDT +0 -0
- swordfish/asset/tzdb/Mexico/BajaNorte +0 -0
- swordfish/asset/tzdb/Mexico/BajaSur +0 -0
- swordfish/asset/tzdb/Mexico/General +0 -0
- swordfish/asset/tzdb/NZ +0 -0
- swordfish/asset/tzdb/NZ-CHAT +0 -0
- swordfish/asset/tzdb/Navajo +0 -0
- swordfish/asset/tzdb/PRC +0 -0
- swordfish/asset/tzdb/PST8PDT +0 -0
- swordfish/asset/tzdb/Pacific/Apia +0 -0
- swordfish/asset/tzdb/Pacific/Auckland +0 -0
- swordfish/asset/tzdb/Pacific/Bougainville +0 -0
- swordfish/asset/tzdb/Pacific/Chatham +0 -0
- swordfish/asset/tzdb/Pacific/Chuuk +0 -0
- swordfish/asset/tzdb/Pacific/Easter +0 -0
- swordfish/asset/tzdb/Pacific/Efate +0 -0
- swordfish/asset/tzdb/Pacific/Enderbury +0 -0
- swordfish/asset/tzdb/Pacific/Fakaofo +0 -0
- swordfish/asset/tzdb/Pacific/Fiji +0 -0
- swordfish/asset/tzdb/Pacific/Funafuti +0 -0
- swordfish/asset/tzdb/Pacific/Galapagos +0 -0
- swordfish/asset/tzdb/Pacific/Gambier +0 -0
- swordfish/asset/tzdb/Pacific/Guadalcanal +0 -0
- swordfish/asset/tzdb/Pacific/Guam +0 -0
- swordfish/asset/tzdb/Pacific/Honolulu +0 -0
- swordfish/asset/tzdb/Pacific/Johnston +0 -0
- swordfish/asset/tzdb/Pacific/Kiritimati +0 -0
- swordfish/asset/tzdb/Pacific/Kosrae +0 -0
- swordfish/asset/tzdb/Pacific/Kwajalein +0 -0
- swordfish/asset/tzdb/Pacific/Majuro +0 -0
- swordfish/asset/tzdb/Pacific/Marquesas +0 -0
- swordfish/asset/tzdb/Pacific/Midway +0 -0
- swordfish/asset/tzdb/Pacific/Nauru +0 -0
- swordfish/asset/tzdb/Pacific/Niue +0 -0
- swordfish/asset/tzdb/Pacific/Norfolk +0 -0
- swordfish/asset/tzdb/Pacific/Noumea +0 -0
- swordfish/asset/tzdb/Pacific/Pago_Pago +0 -0
- swordfish/asset/tzdb/Pacific/Palau +0 -0
- swordfish/asset/tzdb/Pacific/Pitcairn +0 -0
- swordfish/asset/tzdb/Pacific/Pohnpei +0 -0
- swordfish/asset/tzdb/Pacific/Ponape +0 -0
- swordfish/asset/tzdb/Pacific/Port_Moresby +0 -0
- swordfish/asset/tzdb/Pacific/Rarotonga +0 -0
- swordfish/asset/tzdb/Pacific/Saipan +0 -0
- swordfish/asset/tzdb/Pacific/Samoa +0 -0
- swordfish/asset/tzdb/Pacific/Tahiti +0 -0
- swordfish/asset/tzdb/Pacific/Tarawa +0 -0
- swordfish/asset/tzdb/Pacific/Tongatapu +0 -0
- swordfish/asset/tzdb/Pacific/Truk +0 -0
- swordfish/asset/tzdb/Pacific/Wake +0 -0
- swordfish/asset/tzdb/Pacific/Wallis +0 -0
- swordfish/asset/tzdb/Pacific/Yap +0 -0
- swordfish/asset/tzdb/Poland +0 -0
- swordfish/asset/tzdb/Portugal +0 -0
- swordfish/asset/tzdb/ROC +0 -0
- swordfish/asset/tzdb/ROK +0 -0
- swordfish/asset/tzdb/Singapore +0 -0
- swordfish/asset/tzdb/Turkey +0 -0
- swordfish/asset/tzdb/UCT +0 -0
- swordfish/asset/tzdb/US/Alaska +0 -0
- swordfish/asset/tzdb/US/Aleutian +0 -0
- swordfish/asset/tzdb/US/Arizona +0 -0
- swordfish/asset/tzdb/US/Central +0 -0
- swordfish/asset/tzdb/US/East-Indiana +0 -0
- swordfish/asset/tzdb/US/Eastern +0 -0
- swordfish/asset/tzdb/US/Hawaii +0 -0
- swordfish/asset/tzdb/US/Indiana-Starke +0 -0
- swordfish/asset/tzdb/US/Michigan +0 -0
- swordfish/asset/tzdb/US/Mountain +0 -0
- swordfish/asset/tzdb/US/Pacific +0 -0
- swordfish/asset/tzdb/US/Samoa +0 -0
- swordfish/asset/tzdb/UTC +0 -0
- swordfish/asset/tzdb/Universal +0 -0
- swordfish/asset/tzdb/W-SU +0 -0
- swordfish/asset/tzdb/WET +0 -0
- swordfish/asset/tzdb/Zulu +0 -0
- swordfish/asset/tzdb/iso3166.tab +274 -0
- swordfish/asset/tzdb/leapseconds +61 -0
- swordfish/asset/tzdb/posixrules +0 -0
- swordfish/asset/tzdb/tzdata.zi +4150 -0
- swordfish/asset/tzdb/tzmap_gen.py +27 -0
- swordfish/asset/tzdb/tzmapping +501 -0
- swordfish/asset/tzdb/windowsZones.xml +781 -0
- swordfish/asset/tzdb/zone.tab +448 -0
- swordfish/asset/tzdb/zone1970.tab +382 -0
- swordfish/connection.py +33 -0
- swordfish/data.py +806 -0
- swordfish/engine.py +28 -0
- swordfish/enums.py +32 -0
- swordfish/function.py +3 -0
- swordfish/infos.py +53 -0
- swordfish/io.py +11 -0
- swordfish/libSwordfish.dll +0 -0
- swordfish/libclucene-contribs-lib.dll +0 -0
- swordfish/libclucene-core.dll +0 -0
- swordfish/libclucene-shared.dll +0 -0
- swordfish/libgcc_s_seh-1.dll +0 -0
- swordfish/libstdc++-6.dll +0 -0
- swordfish/libwinpthread-1.dll +0 -0
- swordfish/module.py +57 -0
- swordfish/plugins/__init__.py +17 -0
- swordfish/plugins/backtest/__init__.py +38 -0
- swordfish/plugins/backtest/backtest.py +4228 -0
- swordfish/plugins/backtest/translator.py +820 -0
- swordfish/plugins/matching_engine_simulator.py +247 -0
- swordfish/streaming.py +19 -0
- swordfish/tools.py +71 -0
- swordfish/types.py +30 -0
swordfish/_engine.py
ADDED
|
@@ -0,0 +1,1456 @@
|
|
|
1
|
+
from ._swordfishcpp import ( # type: ignore
|
|
2
|
+
EngineType, StreamEngine, _create_engine,
|
|
3
|
+
StreamBroadcastEngine, TimeSeriesEngine, CrossSectionalEngine,
|
|
4
|
+
ReactiveStateEngine, StreamFilterEngine,
|
|
5
|
+
ProgrammingError,
|
|
6
|
+
Constant, Table, FunctionDef, MetaCode,
|
|
7
|
+
_global_exec, _global_call
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
from .types import TypeDict
|
|
11
|
+
from . import data as sf_data
|
|
12
|
+
|
|
13
|
+
from typing import Literal, Any, List, Union, Tuple, Optional, Dict
|
|
14
|
+
|
|
15
|
+
import abc
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Builder(abc.ABC):
|
|
20
|
+
name: str
|
|
21
|
+
"""The name of the engine.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, name: str):
|
|
25
|
+
self.name = name
|
|
26
|
+
|
|
27
|
+
@abc.abstractmethod
|
|
28
|
+
def submit(self) -> StreamEngine:
|
|
29
|
+
"""
|
|
30
|
+
Abstract method to build a StreamEngine.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
StreamEngine
|
|
35
|
+
An instance of a built StreamEngine.
|
|
36
|
+
"""
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def generate_create_method(builder_class):
|
|
41
|
+
def _create_classmethod(cls, *args, **kwargs):
|
|
42
|
+
return builder_class(*args, **kwargs)
|
|
43
|
+
return _create_classmethod
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def __internal_list_engine(dst_type: Optional[EngineType] = None):
|
|
47
|
+
res_tb = _global_exec("getStreamEngineList()")
|
|
48
|
+
engine_types = res_tb["engineType"].to_list()
|
|
49
|
+
engine_names = res_tb["engineName"].to_list()
|
|
50
|
+
users = res_tb["user"].to_list()
|
|
51
|
+
res_list = []
|
|
52
|
+
for engine_name, engine_type, user in zip(engine_names, engine_types, users):
|
|
53
|
+
engine_type = EngineType.get_from_str(engine_type)
|
|
54
|
+
if dst_type and engine_type != dst_type:
|
|
55
|
+
continue
|
|
56
|
+
res_list.append((engine_name, engine_type, user))
|
|
57
|
+
return res_list
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def generate_list_method(engine_class):
|
|
61
|
+
def _list_classmethod(cls):
|
|
62
|
+
return __internal_list_engine(engine_class.engine_type)
|
|
63
|
+
return _list_classmethod
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def generate_get_method(engine_class):
|
|
67
|
+
def _get_classmethod(cls, name: str):
|
|
68
|
+
engine = _global_call("getStreamEngine", name)
|
|
69
|
+
if engine.engine_type != engine_class.engine_type:
|
|
70
|
+
raise ProgrammingError("Cannot get StreamEngine with name: " + name)
|
|
71
|
+
return engine
|
|
72
|
+
return _get_classmethod
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class StreamBroadcastEngineBuilder(Builder):
|
|
76
|
+
def __init__(self, name, table_schema: Union[Table, TypeDict], outputs: List[Table]):
|
|
77
|
+
super().__init__(name)
|
|
78
|
+
if isinstance(table_schema, dict):
|
|
79
|
+
table_schema = sf_data.table(types=table_schema, size=0, capacity=1)
|
|
80
|
+
self._dummy = table_schema
|
|
81
|
+
self._outs = outputs
|
|
82
|
+
|
|
83
|
+
def submit(self) -> StreamBroadcastEngine:
|
|
84
|
+
return _create_engine(EngineType.StreamBroadcastEngine, self.name, self._dummy, self._outs)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
StreamBroadcastEngine.create = classmethod(generate_create_method(StreamBroadcastEngineBuilder))
|
|
88
|
+
StreamBroadcastEngine.list = classmethod(generate_list_method(StreamBroadcastEngine))
|
|
89
|
+
StreamBroadcastEngine.get = classmethod(generate_get_method(StreamBroadcastEngine))
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class TimeSeriesEngineBuilder(Builder):
|
|
93
|
+
def __init__(
|
|
94
|
+
self, name: str, table_schema: Union[Table, TypeDict], outputs: Table,
|
|
95
|
+
window_size, step, metrics, *,
|
|
96
|
+
time_col: Optional[Union[List[str], str]] = None,
|
|
97
|
+
use_system_time: bool = False,
|
|
98
|
+
key_col: Optional[Union[List[str], str]] = None,
|
|
99
|
+
garbage_size: int = 50000,
|
|
100
|
+
update_time: Optional[int] = None,
|
|
101
|
+
use_window_start_time: bool = False,
|
|
102
|
+
round_time: bool = True,
|
|
103
|
+
snapshot_dir: Optional[Union[Path, str]] = None,
|
|
104
|
+
snapshot_interval_in_msg_count: Optional[int] = None,
|
|
105
|
+
fill: Union[Literal["none", "null", "ffill"], Constant, List[Union[Literal["null", "ffill"], Constant]]] = "none",
|
|
106
|
+
force_trigger_time: Optional[int] = None,
|
|
107
|
+
key_purge_freq_in_sec: Optional[int] = None,
|
|
108
|
+
closed: Literal["left", "right"] = "left",
|
|
109
|
+
output_elapsed_microseconds: bool = False,
|
|
110
|
+
sub_window: Optional[Union[int, Constant]] = None,
|
|
111
|
+
parallelism: int = 1,
|
|
112
|
+
accepted_delay: int = 0,
|
|
113
|
+
output_handler: Optional[FunctionDef] = None,
|
|
114
|
+
msg_as_table: bool = False,
|
|
115
|
+
):
|
|
116
|
+
super().__init__(name)
|
|
117
|
+
if isinstance(table_schema, dict):
|
|
118
|
+
table_schema = sf_data.table(types=table_schema, size=0, capacity=1)
|
|
119
|
+
self._dummy = table_schema
|
|
120
|
+
self._outputs = outputs
|
|
121
|
+
self._window_size = window_size
|
|
122
|
+
self._step = step
|
|
123
|
+
self._metrics = metrics
|
|
124
|
+
self.time_col(time_col)
|
|
125
|
+
self.use_system_time(use_system_time)
|
|
126
|
+
self.key_col(key_col)
|
|
127
|
+
self.garbage_size(garbage_size)
|
|
128
|
+
self.update_time(update_time)
|
|
129
|
+
self.use_window_start_time(use_window_start_time)
|
|
130
|
+
self.round_time(round_time)
|
|
131
|
+
self.snapshot_dir(snapshot_dir)
|
|
132
|
+
self.snapshot_interval_in_msg_count(snapshot_interval_in_msg_count)
|
|
133
|
+
self.fill(fill)
|
|
134
|
+
self.force_trigger_time(force_trigger_time)
|
|
135
|
+
self.key_purge_freq_in_sec(key_purge_freq_in_sec)
|
|
136
|
+
self.closed(closed)
|
|
137
|
+
self.output_elapsed_microseconds(output_elapsed_microseconds)
|
|
138
|
+
self.sub_window(sub_window)
|
|
139
|
+
self.parallelism(parallelism)
|
|
140
|
+
self.accepted_delay(accepted_delay)
|
|
141
|
+
self.output_handler(output_handler)
|
|
142
|
+
self.msg_as_table(msg_as_table)
|
|
143
|
+
|
|
144
|
+
def time_col(self, val: Optional[Union[List[str], str]] = None):
|
|
145
|
+
"""
|
|
146
|
+
Sets the time column(s) for the subscribed stream table.
|
|
147
|
+
|
|
148
|
+
Parameters
|
|
149
|
+
----------
|
|
150
|
+
val : Union[List[str], str], optional
|
|
151
|
+
Specifies the time column(s). If provided as a list, it must contain
|
|
152
|
+
exactly two elements: a date (as a DATE type) and a time (as a TIME,
|
|
153
|
+
SECOND, or NANOTIME type). In such cases, the first column of the
|
|
154
|
+
output table will combine these elements into a single datetime value,
|
|
155
|
+
with the data type matching the result of `concatDateTime(date, time)`.
|
|
156
|
+
Defaults to None.
|
|
157
|
+
|
|
158
|
+
Returns
|
|
159
|
+
-------
|
|
160
|
+
Self
|
|
161
|
+
The instance itself.
|
|
162
|
+
"""
|
|
163
|
+
self._time_col = val
|
|
164
|
+
return self
|
|
165
|
+
|
|
166
|
+
def use_system_time(self, val: bool = False):
|
|
167
|
+
"""
|
|
168
|
+
Sets whether to perform calculations based on system time when ingesting data.
|
|
169
|
+
|
|
170
|
+
Parameters
|
|
171
|
+
----------
|
|
172
|
+
val : bool, optional
|
|
173
|
+
If True, the engine will regularly window the streaming data at fixed
|
|
174
|
+
time intervals for calculations according to the ingestion time (local
|
|
175
|
+
system time with millisecond precision, independent of any temporal
|
|
176
|
+
columns in the streaming table) of each record. As long as a window
|
|
177
|
+
contains data, the calculation will be performed automatically when the
|
|
178
|
+
window ends. The first column in output table indicates the timestamp
|
|
179
|
+
when the calculation occurred. If False, the engine windows data based
|
|
180
|
+
on a specified time column in the stream table. The calculation for a
|
|
181
|
+
window is triggered by the first record arriving after the previous
|
|
182
|
+
window, excluding the triggering record. Defaults to False.
|
|
183
|
+
|
|
184
|
+
Returns
|
|
185
|
+
-------
|
|
186
|
+
Self
|
|
187
|
+
The instance itself.
|
|
188
|
+
"""
|
|
189
|
+
self._use_system_time = val
|
|
190
|
+
return self
|
|
191
|
+
|
|
192
|
+
def key_col(self, val: Optional[Union[List[str], str]] = None):
|
|
193
|
+
"""
|
|
194
|
+
Sets the name of the grouping column(s).
|
|
195
|
+
|
|
196
|
+
Parameters
|
|
197
|
+
----------
|
|
198
|
+
val : Union[List[str], str], optional
|
|
199
|
+
The name of the grouping column(s). Defaults to None.
|
|
200
|
+
|
|
201
|
+
Returns
|
|
202
|
+
-------
|
|
203
|
+
Self
|
|
204
|
+
The instance itself.
|
|
205
|
+
"""
|
|
206
|
+
self._key_col = val
|
|
207
|
+
return self
|
|
208
|
+
|
|
209
|
+
def garbage_size(self, val: int = 50000):
|
|
210
|
+
"""
|
|
211
|
+
Sets the threshold for garbage collection of historical data.
|
|
212
|
+
|
|
213
|
+
Parameters
|
|
214
|
+
----------
|
|
215
|
+
val : int, optional
|
|
216
|
+
The threshold for garbage collection in number of rows. Defaults to
|
|
217
|
+
50,000.
|
|
218
|
+
|
|
219
|
+
Returns
|
|
220
|
+
-------
|
|
221
|
+
Self
|
|
222
|
+
The instance itself.
|
|
223
|
+
"""
|
|
224
|
+
self._garbage_size = val
|
|
225
|
+
return self
|
|
226
|
+
|
|
227
|
+
def update_time(self, val: int = None):
|
|
228
|
+
"""
|
|
229
|
+
Sets the interval to trigger window calculations before the window ends.
|
|
230
|
+
|
|
231
|
+
Parameters
|
|
232
|
+
----------
|
|
233
|
+
val : int, optional
|
|
234
|
+
The interval to trigger window calculations. Defaults to None.
|
|
235
|
+
|
|
236
|
+
Returns
|
|
237
|
+
-------
|
|
238
|
+
Self
|
|
239
|
+
The instance itself.
|
|
240
|
+
"""
|
|
241
|
+
self._update_time = val
|
|
242
|
+
return self
|
|
243
|
+
|
|
244
|
+
def use_window_start_time(self, val: bool = False):
|
|
245
|
+
"""
|
|
246
|
+
Sets whether the time column in the output table uses the starting time of
|
|
247
|
+
the windows.
|
|
248
|
+
|
|
249
|
+
Parameters
|
|
250
|
+
----------
|
|
251
|
+
val : bool, optional
|
|
252
|
+
Whether to use the starting time of the windows. If False, the
|
|
253
|
+
timestamps in the output table represent the end time of the windows.
|
|
254
|
+
If `window_size` is a list, `use_window_startTime` must be False.
|
|
255
|
+
Defaults to False.
|
|
256
|
+
|
|
257
|
+
Returns
|
|
258
|
+
-------
|
|
259
|
+
Self
|
|
260
|
+
The instance itself.
|
|
261
|
+
"""
|
|
262
|
+
self._use_window_start_time = val
|
|
263
|
+
return self
|
|
264
|
+
|
|
265
|
+
def round_time(self, val: bool = True):
|
|
266
|
+
"""
|
|
267
|
+
Aligns the window boundary based on the specified alignment rule.
|
|
268
|
+
|
|
269
|
+
Parameters
|
|
270
|
+
----------
|
|
271
|
+
val : bool, optional
|
|
272
|
+
If True, uses the multi-minute rule for alignment. If False, uses the
|
|
273
|
+
one-minute rule. Defaults to True.
|
|
274
|
+
|
|
275
|
+
Returns
|
|
276
|
+
-------
|
|
277
|
+
Self
|
|
278
|
+
The instance itself.
|
|
279
|
+
"""
|
|
280
|
+
self._round_time = val
|
|
281
|
+
return self
|
|
282
|
+
|
|
283
|
+
def snapshot_dir(self, val: Optional[Union[Path, str]] = None):
|
|
284
|
+
"""
|
|
285
|
+
Sets the directory where the streaming engine snapshot is saved.
|
|
286
|
+
|
|
287
|
+
Parameters
|
|
288
|
+
----------
|
|
289
|
+
val : Union[Path, str], optional
|
|
290
|
+
The directory path for saving the snapshot. Defaults to None.
|
|
291
|
+
|
|
292
|
+
Returns
|
|
293
|
+
-------
|
|
294
|
+
Self
|
|
295
|
+
The instance itself.
|
|
296
|
+
"""
|
|
297
|
+
self._snapshot_dir = str(val) if val is not None else None
|
|
298
|
+
return self
|
|
299
|
+
|
|
300
|
+
def snapshot_interval_in_msg_count(self, val: int = None):
|
|
301
|
+
"""
|
|
302
|
+
Sets the number of messages to receive before saving the next snapshot.
|
|
303
|
+
|
|
304
|
+
Parameters
|
|
305
|
+
----------
|
|
306
|
+
val : int, optional
|
|
307
|
+
The number of messages before the next snapshot. Defaults to None.
|
|
308
|
+
|
|
309
|
+
Returns
|
|
310
|
+
-------
|
|
311
|
+
Self
|
|
312
|
+
The instance itself.
|
|
313
|
+
"""
|
|
314
|
+
self._snapshot_interval_in_msg_count = val
|
|
315
|
+
return self
|
|
316
|
+
|
|
317
|
+
def fill(self, val: Union[Literal["none", "null", "ffill"], Constant, List[Union[Literal["null", "ffill"], Constant]]] = "none"):
|
|
318
|
+
"""
|
|
319
|
+
Sets the filling method(s) to deal with an empty window (in a group).
|
|
320
|
+
|
|
321
|
+
Parameters
|
|
322
|
+
----------
|
|
323
|
+
val : Union[Literal["none", "null", "ffill"], Constant,
|
|
324
|
+
List[Union[Literal["null", "ffill"], Constant]]], optional
|
|
325
|
+
The filling method or a list of filling methods. Defaults to "none".
|
|
326
|
+
|
|
327
|
+
Returns
|
|
328
|
+
-------
|
|
329
|
+
Self
|
|
330
|
+
The instance itself.
|
|
331
|
+
"""
|
|
332
|
+
self._fill = val
|
|
333
|
+
return self
|
|
334
|
+
|
|
335
|
+
def force_trigger_time(self, val: int = None):
|
|
336
|
+
"""
|
|
337
|
+
Sets the waiting time to force trigger calculation in uncalculated windows
|
|
338
|
+
for each group.
|
|
339
|
+
|
|
340
|
+
Parameters
|
|
341
|
+
----------
|
|
342
|
+
val : int, optional
|
|
343
|
+
The waiting time in milliseconds to trigger window calculation. Defaults
|
|
344
|
+
to None.
|
|
345
|
+
|
|
346
|
+
Returns
|
|
347
|
+
-------
|
|
348
|
+
Self
|
|
349
|
+
The instance itself.
|
|
350
|
+
"""
|
|
351
|
+
self._force_trigger_time = val
|
|
352
|
+
return self
|
|
353
|
+
|
|
354
|
+
def key_purge_freq_in_sec(self, val: int = None):
|
|
355
|
+
"""
|
|
356
|
+
Sets the interval in seconds to remove groups with no incoming data for a
|
|
357
|
+
long time.
|
|
358
|
+
|
|
359
|
+
Parameters
|
|
360
|
+
----------
|
|
361
|
+
val : int, optional
|
|
362
|
+
The interval (in seconds) to purge inactive groups. Defaults to None.
|
|
363
|
+
|
|
364
|
+
Returns
|
|
365
|
+
-------
|
|
366
|
+
Self
|
|
367
|
+
The instance itself.
|
|
368
|
+
"""
|
|
369
|
+
self._key_purge_freq_in_sec = val
|
|
370
|
+
return self
|
|
371
|
+
|
|
372
|
+
def closed(self, val: Literal["left", "right"] = "left"):
|
|
373
|
+
"""
|
|
374
|
+
Specifies whether the left or right boundary is included in the window.
|
|
375
|
+
|
|
376
|
+
Parameters
|
|
377
|
+
----------
|
|
378
|
+
val : Literal["left", "right"], optional
|
|
379
|
+
Specifies which boundary is included. Defaults to "left".
|
|
380
|
+
|
|
381
|
+
Returns
|
|
382
|
+
-------
|
|
383
|
+
Self
|
|
384
|
+
The instance itself.
|
|
385
|
+
"""
|
|
386
|
+
self._closed = val
|
|
387
|
+
return self
|
|
388
|
+
|
|
389
|
+
def output_elapsed_microseconds(self, val: bool = False):
|
|
390
|
+
"""
|
|
391
|
+
Determines whether to output the elapsed time (in microseconds).
|
|
392
|
+
|
|
393
|
+
Parameters
|
|
394
|
+
----------
|
|
395
|
+
val : bool, optional
|
|
396
|
+
Whether to output the elapsed time. Defaults to False.
|
|
397
|
+
|
|
398
|
+
Returns
|
|
399
|
+
-------
|
|
400
|
+
Self
|
|
401
|
+
The instance itself.
|
|
402
|
+
"""
|
|
403
|
+
self._output_elapsed_microseconds = val
|
|
404
|
+
return self
|
|
405
|
+
|
|
406
|
+
def sub_window(self, val: Optional[Union[int, Constant]] = None):
|
|
407
|
+
"""
|
|
408
|
+
Specifies the range of the subwindow within the window defined by
|
|
409
|
+
`window_size`.
|
|
410
|
+
|
|
411
|
+
Parameters
|
|
412
|
+
----------
|
|
413
|
+
val : Union[int, Constant], optional
|
|
414
|
+
The range of the subwindow. Defaults to None.
|
|
415
|
+
|
|
416
|
+
Returns
|
|
417
|
+
-------
|
|
418
|
+
Self
|
|
419
|
+
The instance itself.
|
|
420
|
+
"""
|
|
421
|
+
self._sub_window = val
|
|
422
|
+
return self
|
|
423
|
+
|
|
424
|
+
def parallelism(self, val: int = 1):
|
|
425
|
+
"""
|
|
426
|
+
Sets the number of worker threads for parallel computation.
|
|
427
|
+
|
|
428
|
+
Parameters
|
|
429
|
+
----------
|
|
430
|
+
val : int, optional
|
|
431
|
+
The number of worker threads. Defaults to 1.
|
|
432
|
+
|
|
433
|
+
Returns
|
|
434
|
+
-------
|
|
435
|
+
Self
|
|
436
|
+
The instance itself.
|
|
437
|
+
"""
|
|
438
|
+
self._parallelism = val
|
|
439
|
+
return self
|
|
440
|
+
|
|
441
|
+
def accepted_delay(self, val: int = 0):
|
|
442
|
+
"""
|
|
443
|
+
Sets the maximum delay for each window to accept data.
|
|
444
|
+
|
|
445
|
+
Parameters
|
|
446
|
+
----------
|
|
447
|
+
val : int, optional
|
|
448
|
+
A positive integer specifying the maximum delay. Defaults to 0.
|
|
449
|
+
|
|
450
|
+
Returns
|
|
451
|
+
-------
|
|
452
|
+
Self
|
|
453
|
+
The instance itself.
|
|
454
|
+
"""
|
|
455
|
+
self._accepted_delay = val
|
|
456
|
+
return self
|
|
457
|
+
|
|
458
|
+
def output_handler(self, val: FunctionDef = None):
|
|
459
|
+
"""
|
|
460
|
+
Sets a unary or partial function to handle the output. If specified, the
|
|
461
|
+
engine will not write calculation results to the output table directly.
|
|
462
|
+
|
|
463
|
+
Parameters
|
|
464
|
+
----------
|
|
465
|
+
val : FunctionDef, optional
|
|
466
|
+
The function to handle the output. Defaults to None.
|
|
467
|
+
|
|
468
|
+
Returns
|
|
469
|
+
-------
|
|
470
|
+
Self
|
|
471
|
+
The instance itself.
|
|
472
|
+
"""
|
|
473
|
+
self._output_handler = val
|
|
474
|
+
return self
|
|
475
|
+
|
|
476
|
+
def msg_as_table(self, val: bool = False):
|
|
477
|
+
"""
|
|
478
|
+
Sets whether the output data is passed into the function (specified by
|
|
479
|
+
`output_handler`) as a table or as an AnyVector.
|
|
480
|
+
|
|
481
|
+
Parameters
|
|
482
|
+
----------
|
|
483
|
+
val : bool, optional
|
|
484
|
+
Whether to pass data as a table (`True`) or as an AnyVector (`False`).
|
|
485
|
+
Defaults to False.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
Self
|
|
490
|
+
The instance itself.
|
|
491
|
+
"""
|
|
492
|
+
self._msg_as_table = val
|
|
493
|
+
return self
|
|
494
|
+
|
|
495
|
+
def submit(self) -> TimeSeriesEngine:
|
|
496
|
+
return _create_engine(
|
|
497
|
+
EngineType.TimeSeriesEngine, self.name,
|
|
498
|
+
self._window_size, self._step, self._metrics,
|
|
499
|
+
self._dummy, self._outputs,
|
|
500
|
+
self._time_col,
|
|
501
|
+
self._use_system_time,
|
|
502
|
+
self._key_col,
|
|
503
|
+
self._garbage_size,
|
|
504
|
+
self._update_time,
|
|
505
|
+
self._use_window_start_time,
|
|
506
|
+
self._round_time,
|
|
507
|
+
self._snapshot_dir,
|
|
508
|
+
self._snapshot_interval_in_msg_count,
|
|
509
|
+
self._fill,
|
|
510
|
+
self._force_trigger_time,
|
|
511
|
+
sf_data.Nothing,
|
|
512
|
+
self._key_purge_freq_in_sec,
|
|
513
|
+
self._closed,
|
|
514
|
+
self._output_elapsed_microseconds,
|
|
515
|
+
self._sub_window,
|
|
516
|
+
self._parallelism,
|
|
517
|
+
self._accepted_delay,
|
|
518
|
+
self._output_handler,
|
|
519
|
+
self._msg_as_table,
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
TimeSeriesEngine.create = classmethod(generate_create_method(TimeSeriesEngineBuilder))
|
|
524
|
+
TimeSeriesEngine.list = classmethod(generate_list_method(TimeSeriesEngine))
|
|
525
|
+
TimeSeriesEngine.get = classmethod(generate_get_method(TimeSeriesEngine))
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
class CrossSectionalEngineBuilder(Builder):
|
|
529
|
+
def __init__(
|
|
530
|
+
self, name: str, table_schema: Union[Table, TypeDict],
|
|
531
|
+
key_col: Union[List[str], str], *,
|
|
532
|
+
metrics=None,
|
|
533
|
+
output: Table = None,
|
|
534
|
+
triggering_pattern: Literal["per_batch", "per_row", "interval", "key_count", "data_interval"] = "per_batch",
|
|
535
|
+
triggering_interval: Any = None,
|
|
536
|
+
use_system_time: bool = True,
|
|
537
|
+
time_col: Optional[str] = None,
|
|
538
|
+
last_batch_only: bool = False,
|
|
539
|
+
context_by_col: Optional[Union[List[str], str]] = None,
|
|
540
|
+
snapshot_dir: Optional[Union[Path, str]] = None,
|
|
541
|
+
snapshot_interval_in_msg_count: Optional[int] = None,
|
|
542
|
+
output_elapsed_microseconds: bool = False,
|
|
543
|
+
round_time: bool = True,
|
|
544
|
+
key_filter: Optional[MetaCode] = None,
|
|
545
|
+
updated_context_groups_only: bool = False,
|
|
546
|
+
):
|
|
547
|
+
super().__init__(name)
|
|
548
|
+
if isinstance(table_schema, dict):
|
|
549
|
+
table_schema = sf_data.table(types=table_schema, size=0, capacity=1)
|
|
550
|
+
self._dummy = table_schema
|
|
551
|
+
self._key_col = key_col
|
|
552
|
+
self.metrics(metrics)
|
|
553
|
+
self.output(output)
|
|
554
|
+
self.triggering_pattern(triggering_pattern)
|
|
555
|
+
self.triggering_interval(triggering_interval)
|
|
556
|
+
self.use_system_time(use_system_time)
|
|
557
|
+
self.time_col(time_col)
|
|
558
|
+
self.last_batch_only(last_batch_only)
|
|
559
|
+
self.context_by_col(context_by_col)
|
|
560
|
+
self.snapshot_dir(snapshot_dir)
|
|
561
|
+
self.snapshot_interval_in_msg_count(snapshot_interval_in_msg_count)
|
|
562
|
+
self.output_elapsed_microseconds(output_elapsed_microseconds)
|
|
563
|
+
self.round_time(round_time)
|
|
564
|
+
self.key_filter(key_filter)
|
|
565
|
+
self.updated_context_groups_only(updated_context_groups_only)
|
|
566
|
+
|
|
567
|
+
def metrics(self, val: Union[MetaCode, List[MetaCode]] = None):
|
|
568
|
+
"""
|
|
569
|
+
Specifies the formulas for calculation using MetaCode or an AnyVector.
|
|
570
|
+
|
|
571
|
+
The value can be:
|
|
572
|
+
- Built-in or user-defined aggregate functions, e.g., `<myfunc(qty)>`
|
|
573
|
+
|
|
574
|
+
>>> @F.swordfish_udf
|
|
575
|
+
>>> def myFunc(x):
|
|
576
|
+
... return x + 1
|
|
577
|
+
...
|
|
578
|
+
>>> with sf.meta_code() as m:
|
|
579
|
+
... metircs = myFunc(m.col("qty"))
|
|
580
|
+
|
|
581
|
+
- Expressions on previous results, e.g., `<avg(price1)>`.
|
|
582
|
+
|
|
583
|
+
>>> with sf.meta_code() as m:
|
|
584
|
+
... metrics = F.avg(m.col("price1"))
|
|
585
|
+
|
|
586
|
+
- Calculations on multiple columns, e.g., `<[std(price1-price2)]>`.
|
|
587
|
+
|
|
588
|
+
>>> with sf.meta_code() as m:
|
|
589
|
+
... metrics = F.std(m.col("price1") - m.col("price2"))
|
|
590
|
+
|
|
591
|
+
- Functions with multiple returns, such as `<func(price) as `col1`col2>`.
|
|
592
|
+
|
|
593
|
+
>>> with sf.meta_code() as m:
|
|
594
|
+
... metrics = m.col_alias(func(m.col("price")), ["col1", "col2"])
|
|
595
|
+
|
|
596
|
+
The column names specified in `metrics` are not case-sensitive and can be
|
|
597
|
+
inconsistent with the column names of the input tables.
|
|
598
|
+
|
|
599
|
+
Parameters
|
|
600
|
+
----------
|
|
601
|
+
val : Union[MetaCode, List[MetaCode]], optional
|
|
602
|
+
MetaCode or an AnyVector specifying the formulas. Defaults to None.
|
|
603
|
+
|
|
604
|
+
Returns
|
|
605
|
+
-------
|
|
606
|
+
Self
|
|
607
|
+
The instance itself.
|
|
608
|
+
"""
|
|
609
|
+
self._metrics = val
|
|
610
|
+
return self
|
|
611
|
+
|
|
612
|
+
def output(self, val: Table = None):
|
|
613
|
+
"""
|
|
614
|
+
Specifies the output table for the results.
|
|
615
|
+
|
|
616
|
+
- If context_by_col is not specified, the output columns are in the following
|
|
617
|
+
order:
|
|
618
|
+
|
|
619
|
+
- The first column is of TIMESTAMP type, storing the time when each
|
|
620
|
+
calculation starts. If ``time_col`` is specified, it takes the values of
|
|
621
|
+
``time_col``.
|
|
622
|
+
|
|
623
|
+
- The column(s) storing calculation results. The data types of the
|
|
624
|
+
column(s) must be the same as the results of metrics.
|
|
625
|
+
|
|
626
|
+
- A column of LONG type storing the calculation time of each batch. Output
|
|
627
|
+
only when output_elapsed_microseconds=True.
|
|
628
|
+
|
|
629
|
+
- A column of INT type storing the number of records of each batch. Output
|
|
630
|
+
only when output_elapsed_microseconds=True.
|
|
631
|
+
|
|
632
|
+
- If context_by_col is specified, the output columns are in the following
|
|
633
|
+
order:
|
|
634
|
+
|
|
635
|
+
- The first column is of TIMESTAMP type, storing the time when each
|
|
636
|
+
calculation starts. If time_col is specified, it takes the values of
|
|
637
|
+
time_col.
|
|
638
|
+
|
|
639
|
+
- The second column is the column specified by context_by_col.
|
|
640
|
+
|
|
641
|
+
- The column(s) storing calculation results. The data types of the
|
|
642
|
+
column(s) must be the same as the results of metrics.
|
|
643
|
+
|
|
644
|
+
- A column of LONG type storing the calculation time of each batch. Output
|
|
645
|
+
only when output_elapsed_microseconds=true.
|
|
646
|
+
|
|
647
|
+
- A column of INT type storing the number of records of each batch. Output
|
|
648
|
+
only when output_elapsed_microseconds=true.
|
|
649
|
+
|
|
650
|
+
Parameters
|
|
651
|
+
----------
|
|
652
|
+
val : Table, optional
|
|
653
|
+
an in-memory table or a DFS table, by default None
|
|
654
|
+
|
|
655
|
+
Returns
|
|
656
|
+
-------
|
|
657
|
+
Self
|
|
658
|
+
The instance itself.
|
|
659
|
+
"""
|
|
660
|
+
self._output = val
|
|
661
|
+
return self
|
|
662
|
+
|
|
663
|
+
_inner_pattern_map = {
|
|
664
|
+
"per_batch": "perBatch",
|
|
665
|
+
"per_row": "perRow",
|
|
666
|
+
"interval": "interval",
|
|
667
|
+
"key_count": "keyCount",
|
|
668
|
+
"data_interval": "dataInterval",
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
def triggering_pattern(self, val: Literal["per_batch", "per_row", "interval", "key_count", "data_interval"] = "per_batch"):
|
|
672
|
+
"""
|
|
673
|
+
Specifies how to trigger the calculations.
|
|
674
|
+
The engine returns a result every time a calculation is triggered.
|
|
675
|
+
|
|
676
|
+
Parameters
|
|
677
|
+
----------
|
|
678
|
+
val : Literal["per_batch", "per_row", "interval", "key_count", "data_interval"], optional
|
|
679
|
+
|
|
680
|
+
- 'per_batch' (default): Calculates when a batch of data arrives.
|
|
681
|
+
|
|
682
|
+
- 'per_row': Calculates when a new record arrives.
|
|
683
|
+
|
|
684
|
+
- 'interval': Calculates at intervals specified by `triggering_interval`,
|
|
685
|
+
using system time.
|
|
686
|
+
|
|
687
|
+
- 'key_count': When data with the same timestamp arrives in batches, the
|
|
688
|
+
calculation is triggered when the number of keys with the latest
|
|
689
|
+
timestamp reaches `triggering_interval`, or data with a newer timestamp
|
|
690
|
+
arrives.
|
|
691
|
+
|
|
692
|
+
- 'data_interval': Calculates at intervals based on timestamps in the data.
|
|
693
|
+
Requires `time_col` to be specified and `use_system_time` to be False.
|
|
694
|
+
|
|
695
|
+
.. note::
|
|
696
|
+
To use 'key_count' or 'data_interval', `time_col` must be specified and
|
|
697
|
+
`use_system_time` must be set to False. Out-of-order data will be
|
|
698
|
+
discarded in these cases.
|
|
699
|
+
|
|
700
|
+
Returns
|
|
701
|
+
-------
|
|
702
|
+
Self
|
|
703
|
+
The instance itself.
|
|
704
|
+
"""
|
|
705
|
+
self._triggering_pattern = val
|
|
706
|
+
return self
|
|
707
|
+
|
|
708
|
+
def triggering_interval(self, val: Any = None):
|
|
709
|
+
"""
|
|
710
|
+
Sets the triggering interval for the system based on the triggering pattern.
|
|
711
|
+
|
|
712
|
+
The behavior of `triggering_interval` depends on the value of
|
|
713
|
+
`triggering_pattern`:
|
|
714
|
+
|
|
715
|
+
- If `triggering_pattern` = 'interval', `triggering_interval` is a positive
|
|
716
|
+
integer indicating the interval in milliseconds between 2 adjacent
|
|
717
|
+
calculations. Default is 1,000 milliseconds. A calculation is triggered
|
|
718
|
+
every `triggering_interval` milliseconds if the data in the engine has not
|
|
719
|
+
been calculated.
|
|
720
|
+
|
|
721
|
+
- If `triggering_pattern` = 'keyCount', `triggering_interval` can either be:
|
|
722
|
+
|
|
723
|
+
- An integer specifying a threshold for the number of uncalculated
|
|
724
|
+
records.
|
|
725
|
+
|
|
726
|
+
- A tuple of 2 elements:
|
|
727
|
+
|
|
728
|
+
- The first element is an integer specifying the threshold of records
|
|
729
|
+
with the latest timestamp to trigger a calculation.
|
|
730
|
+
|
|
731
|
+
- The second element can be either:
|
|
732
|
+
|
|
733
|
+
- An int threshold
|
|
734
|
+
|
|
735
|
+
- A Duration value. For example, when `triggering_interval` is set to (c1, c2):
|
|
736
|
+
|
|
737
|
+
- If c2 is an integer and the number of keys with the latest
|
|
738
|
+
timestamp t1 doesn't reach c1, calculation will not be
|
|
739
|
+
triggered and the system goes on to save data with greater
|
|
740
|
+
timestamp t2 (t2>t1). Data with t1 will be calculated when
|
|
741
|
+
either of the events happens: the number of keys with
|
|
742
|
+
timestamp t2 reaches c2, or data with greater timestamp t3
|
|
743
|
+
(t3>t2) arrives. Note that c2 must be smaller than c1.
|
|
744
|
+
|
|
745
|
+
- If c2 is a duration and the number of keys with the latest
|
|
746
|
+
timestamp t1 doesn't reach c1, calculation will not be
|
|
747
|
+
triggered and the system goes on to save data with greater
|
|
748
|
+
timestamp t2 (t2>t1). Once data with t2 starts to come in,
|
|
749
|
+
data with t1 will not be calculated until any of the events
|
|
750
|
+
happens: the number of keys with timestamp t1 reaches c1, or
|
|
751
|
+
data with greater timestamp t3 (t3>t2) arrives, or the
|
|
752
|
+
Duration c2 comes to an end.
|
|
753
|
+
|
|
754
|
+
- If `triggering_pattern` = 'dataInterval', `triggering_interval` is a
|
|
755
|
+
positive integer in the same units as the timestamps in `time_col`. Default
|
|
756
|
+
is 1,000 milliseconds. A calculation is triggered for all data in the
|
|
757
|
+
current window when the first record of the next window arrives. A
|
|
758
|
+
calculation is triggered only for windows containing data.
|
|
759
|
+
|
|
760
|
+
Parameters
|
|
761
|
+
----------
|
|
762
|
+
val : Any, optional
|
|
763
|
+
The triggering interval or conditions. Defaults to None.
|
|
764
|
+
|
|
765
|
+
Returns
|
|
766
|
+
-------
|
|
767
|
+
Self
|
|
768
|
+
The instance itself.
|
|
769
|
+
"""
|
|
770
|
+
if isinstance(val, int):
|
|
771
|
+
val = sf_data.Int(val)
|
|
772
|
+
self._triggering_interval = val
|
|
773
|
+
return self
|
|
774
|
+
|
|
775
|
+
def use_system_time(self, val: bool = True):
|
|
776
|
+
"""
|
|
777
|
+
Sets whether calculations are performed based on the system time when data is
|
|
778
|
+
ingested into the engine.
|
|
779
|
+
|
|
780
|
+
- If `use_system_time` is True, the time column of output table is the system
|
|
781
|
+
time.
|
|
782
|
+
- If `use_system_time` is False, the `time_Col` parameter must be specified,
|
|
783
|
+
and the time column of output table uses the timestamp of each record.
|
|
784
|
+
|
|
785
|
+
Parameters
|
|
786
|
+
----------
|
|
787
|
+
val : bool, optional
|
|
788
|
+
Indicates whether to use system time for calculations. Defaults to True.
|
|
789
|
+
|
|
790
|
+
Returns
|
|
791
|
+
-------
|
|
792
|
+
Self
|
|
793
|
+
The instance itself.
|
|
794
|
+
"""
|
|
795
|
+
self._use_system_time = val
|
|
796
|
+
return self
|
|
797
|
+
|
|
798
|
+
def time_col(self, val: Optional[str] = None):
|
|
799
|
+
"""
|
|
800
|
+
Specifies the time column in the stream table to which the engine subscribes
|
|
801
|
+
when `use_system_time` is False. The column must be of Timestamp type.
|
|
802
|
+
|
|
803
|
+
Parameters
|
|
804
|
+
----------
|
|
805
|
+
val : Optional[str], optional
|
|
806
|
+
The name of the time column. Defaults to None.
|
|
807
|
+
|
|
808
|
+
Returns
|
|
809
|
+
-------
|
|
810
|
+
Self
|
|
811
|
+
The instance itself.
|
|
812
|
+
"""
|
|
813
|
+
self._time_col = val
|
|
814
|
+
return self
|
|
815
|
+
|
|
816
|
+
def last_batch_only(self, val: bool = False):
|
|
817
|
+
"""
|
|
818
|
+
Determines whether to keep only the records with the latest timestamp in the
|
|
819
|
+
engine.
|
|
820
|
+
|
|
821
|
+
When `last_batch_only` is true, `triggering_pattern` must be set to
|
|
822
|
+
'keyCount', and the cross-sectional engine will only maintain key values with
|
|
823
|
+
the latest timestamp for calculation.
|
|
824
|
+
|
|
825
|
+
Otherwise, the engine updates and retains all values for calculation.
|
|
826
|
+
|
|
827
|
+
Parameters
|
|
828
|
+
----------
|
|
829
|
+
val : bool, optional
|
|
830
|
+
Whether to keep only the latest timestamped records. Defaults to False.
|
|
831
|
+
|
|
832
|
+
Returns
|
|
833
|
+
-------
|
|
834
|
+
Self
|
|
835
|
+
The instance itself.
|
|
836
|
+
"""
|
|
837
|
+
self._last_batch_only = val
|
|
838
|
+
return self
|
|
839
|
+
|
|
840
|
+
def context_by_col(self, val: Optional[Union[List[str], str]] = None):
|
|
841
|
+
"""
|
|
842
|
+
Specifies the grouping column(s) by which calculations are performed within
|
|
843
|
+
groups. This parameter only takes effect if `metrics` and `output` are
|
|
844
|
+
specified.
|
|
845
|
+
|
|
846
|
+
If `metrics` contain only aggregate functions, the results will be the same as
|
|
847
|
+
a SQL query using `group by`.
|
|
848
|
+
|
|
849
|
+
Otherwise, the results will be consistent with using `context by`.
|
|
850
|
+
|
|
851
|
+
Parameters
|
|
852
|
+
----------
|
|
853
|
+
val : Optional[Union[List[str], str]], optional
|
|
854
|
+
The grouping column(s) for the calculation. Defaults to None.
|
|
855
|
+
|
|
856
|
+
Returns
|
|
857
|
+
-------
|
|
858
|
+
Self
|
|
859
|
+
The instance itself.
|
|
860
|
+
"""
|
|
861
|
+
self._context_by_col = val
|
|
862
|
+
return self
|
|
863
|
+
|
|
864
|
+
def snapshot_dir(self, val: Optional[Union[Path, str]] = None):
|
|
865
|
+
"""
|
|
866
|
+
Sets the directory where the streaming engine snapshot is saved.
|
|
867
|
+
|
|
868
|
+
The directory must already exist, or an exception will be raised. If a
|
|
869
|
+
snapshot directory is specified, the system checks for an existing snapshot in
|
|
870
|
+
the directory when creating the streaming engine.
|
|
871
|
+
|
|
872
|
+
If found, the snapshot is loaded to restore the engine's state. Multiple
|
|
873
|
+
streaming engines can share a directory, with snapshot files named after the
|
|
874
|
+
engine names.
|
|
875
|
+
|
|
876
|
+
Snapshot file extensions:
|
|
877
|
+
- `<engineName>.tmp`: Temporary snapshot.
|
|
878
|
+
- `<engineName>.snapshot`: A snapshot that is flushed to disk.
|
|
879
|
+
- `<engineName>.old`: If a snapshot with the same name exists, the previous
|
|
880
|
+
one is renamed to `<engineName>.old`.
|
|
881
|
+
|
|
882
|
+
Parameters
|
|
883
|
+
----------
|
|
884
|
+
val : Optional[Union[Path, str]], optional
|
|
885
|
+
The directory path for saving the snapshot. Defaults to None.
|
|
886
|
+
|
|
887
|
+
Returns
|
|
888
|
+
-------
|
|
889
|
+
Self
|
|
890
|
+
The instance itself.
|
|
891
|
+
"""
|
|
892
|
+
self._snapshot_dir = str(val) if val is not None else None
|
|
893
|
+
return self
|
|
894
|
+
|
|
895
|
+
def snapshot_interval_in_msg_count(self, val: Optional[int] = None):
|
|
896
|
+
"""
|
|
897
|
+
Sets the number of messages to receive before saving the next snapshot.
|
|
898
|
+
|
|
899
|
+
Parameters
|
|
900
|
+
----------
|
|
901
|
+
val : Optional[int], optional
|
|
902
|
+
The number of messages before the next snapshot. Defaults to None.
|
|
903
|
+
|
|
904
|
+
Returns
|
|
905
|
+
-------
|
|
906
|
+
Self
|
|
907
|
+
The instance itself.
|
|
908
|
+
"""
|
|
909
|
+
self._snapshot_interval_in_msg_count = val
|
|
910
|
+
return self
|
|
911
|
+
|
|
912
|
+
def output_elapsed_microseconds(self, val: bool = False):
|
|
913
|
+
"""
|
|
914
|
+
Determines whether to output the elapsed time (in microseconds).
|
|
915
|
+
|
|
916
|
+
The elapsed time is measured from when the calculation is triggered to when
|
|
917
|
+
the result is output for each window. When both
|
|
918
|
+
`output_elapsed_microseconds` and `useSystemTime` parameters are set to true,
|
|
919
|
+
aggregate function cannot be used in `metrics`.
|
|
920
|
+
|
|
921
|
+
Parameters
|
|
922
|
+
----------
|
|
923
|
+
val : bool, optional
|
|
924
|
+
Whether to output the elapsed time. Defaults to False.
|
|
925
|
+
|
|
926
|
+
Returns
|
|
927
|
+
-------
|
|
928
|
+
Self
|
|
929
|
+
The instance itself.
|
|
930
|
+
"""
|
|
931
|
+
self._output_elapsed_microseconds = val
|
|
932
|
+
return self
|
|
933
|
+
|
|
934
|
+
def round_time(self, val: bool = True):
|
|
935
|
+
"""
|
|
936
|
+
Aligns the window boundary based on the specified alignment rule.
|
|
937
|
+
|
|
938
|
+
If the time precision is in milliseconds or seconds and the step is greater
|
|
939
|
+
than one minute, this method determines whether to apply multi-minute or
|
|
940
|
+
one-minute alignment.
|
|
941
|
+
|
|
942
|
+
Parameters
|
|
943
|
+
----------
|
|
944
|
+
val : bool, optional
|
|
945
|
+
If True, uses the multi-minute rule for alignment. If False, uses the
|
|
946
|
+
one-minute rule. Defaults to True.
|
|
947
|
+
|
|
948
|
+
Returns
|
|
949
|
+
-------
|
|
950
|
+
Self
|
|
951
|
+
The instance itself.
|
|
952
|
+
"""
|
|
953
|
+
self._round_time = val
|
|
954
|
+
return self
|
|
955
|
+
|
|
956
|
+
def key_filter(self, val: Optional[MetaCode] = None):
|
|
957
|
+
"""
|
|
958
|
+
Specifies the conditions for filtering keys in the keyed table returned by the
|
|
959
|
+
engine.
|
|
960
|
+
|
|
961
|
+
Only data with keys satisfying the filtering conditions will be taken for
|
|
962
|
+
calculation. The MetaCode represents an expression or function call that
|
|
963
|
+
returns a bool vector.
|
|
964
|
+
|
|
965
|
+
Parameters
|
|
966
|
+
----------
|
|
967
|
+
val : Optional[MetaCode], optional
|
|
968
|
+
MetaCode of the filtering conditions. Defaults to None.
|
|
969
|
+
|
|
970
|
+
Returns
|
|
971
|
+
-------
|
|
972
|
+
Self
|
|
973
|
+
The instance itself.
|
|
974
|
+
"""
|
|
975
|
+
self._key_filter = val
|
|
976
|
+
return self
|
|
977
|
+
|
|
978
|
+
def updated_context_groups_only(self, val: bool = False):
|
|
979
|
+
"""
|
|
980
|
+
Indicates whether to compute only the groups updated with new data since the
|
|
981
|
+
last output.
|
|
982
|
+
|
|
983
|
+
Parameters
|
|
984
|
+
----------
|
|
985
|
+
val : bool, optional
|
|
986
|
+
Whether to compute only updated groups. Defaults to False.
|
|
987
|
+
|
|
988
|
+
Returns
|
|
989
|
+
-------
|
|
990
|
+
Self
|
|
991
|
+
The instance itself.
|
|
992
|
+
"""
|
|
993
|
+
self._updated_context_groups_only = val
|
|
994
|
+
return self
|
|
995
|
+
|
|
996
|
+
def submit(self) -> CrossSectionalEngine:
|
|
997
|
+
return _create_engine(
|
|
998
|
+
EngineType.CrossSectionalEngine,
|
|
999
|
+
self.name, self._metrics, self._dummy,
|
|
1000
|
+
self._output, self._key_col,
|
|
1001
|
+
self._inner_pattern_map[str(self._triggering_pattern)],
|
|
1002
|
+
self._triggering_interval,
|
|
1003
|
+
self._use_system_time,
|
|
1004
|
+
self._time_col,
|
|
1005
|
+
self._last_batch_only,
|
|
1006
|
+
self._context_by_col,
|
|
1007
|
+
self._snapshot_dir,
|
|
1008
|
+
self._snapshot_interval_in_msg_count,
|
|
1009
|
+
sf_data.Nothing,
|
|
1010
|
+
self._output_elapsed_microseconds,
|
|
1011
|
+
self._round_time,
|
|
1012
|
+
self._key_filter,
|
|
1013
|
+
# self._updated_context_groups_only
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
|
|
1017
|
+
CrossSectionalEngine.create = classmethod(generate_create_method(CrossSectionalEngineBuilder))
|
|
1018
|
+
CrossSectionalEngine.list = classmethod(generate_list_method(CrossSectionalEngine))
|
|
1019
|
+
CrossSectionalEngine.get = classmethod(generate_get_method(CrossSectionalEngine))
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
class ReactiveStateEngineBuilder(Builder):
|
|
1023
|
+
def __init__(
|
|
1024
|
+
self, name: str, table_schema: Union[Table, TypeDict],
|
|
1025
|
+
output: Table, metrics,
|
|
1026
|
+
*,
|
|
1027
|
+
key_col: Optional[Union[List[str], str]] = None,
|
|
1028
|
+
filter: Optional[MetaCode] = None,
|
|
1029
|
+
snapshot_dir: Optional[Union[Path, str]] = None,
|
|
1030
|
+
snapshot_interval_in_msg_count: Optional[int] = None,
|
|
1031
|
+
keep_order: Optional[bool] = None,
|
|
1032
|
+
key_purge_filter: Optional[MetaCode] = None,
|
|
1033
|
+
key_purge_freq_in_second: Optional[int] = None,
|
|
1034
|
+
output_elapsed_microseconds: bool = False,
|
|
1035
|
+
key_capacity: int = 1024,
|
|
1036
|
+
parallelism: int = 1,
|
|
1037
|
+
output_handler: Optional[FunctionDef] = None,
|
|
1038
|
+
msg_as_table: bool = False,
|
|
1039
|
+
):
|
|
1040
|
+
super().__init__(name)
|
|
1041
|
+
if isinstance(table_schema, dict):
|
|
1042
|
+
table_schema = sf_data.table(types=table_schema, size=0, capacity=1)
|
|
1043
|
+
self._dummy = table_schema
|
|
1044
|
+
self._output = output
|
|
1045
|
+
self._metrics = metrics
|
|
1046
|
+
self.key_col(key_col)
|
|
1047
|
+
self.filter(filter)
|
|
1048
|
+
self.snapshot_dir(snapshot_dir)
|
|
1049
|
+
self.snapshot_interval_in_msg_count(snapshot_interval_in_msg_count)
|
|
1050
|
+
self.keep_order(keep_order)
|
|
1051
|
+
self.key_purge_filter(key_purge_filter)
|
|
1052
|
+
self.key_purge_freq_in_second(key_purge_freq_in_second)
|
|
1053
|
+
self.output_elapsed_microseconds(output_elapsed_microseconds)
|
|
1054
|
+
self.key_capacity(key_capacity)
|
|
1055
|
+
self.parallelism(parallelism)
|
|
1056
|
+
self.output_handler(output_handler)
|
|
1057
|
+
self.msg_as_table(msg_as_table)
|
|
1058
|
+
|
|
1059
|
+
def key_col(self, val: Optional[Union[List[str], str]] = None):
|
|
1060
|
+
"""
|
|
1061
|
+
Specifies the grouping column(s) for the calculation.
|
|
1062
|
+
|
|
1063
|
+
The calculation is conducted within each group defined by the specified
|
|
1064
|
+
column(s).
|
|
1065
|
+
|
|
1066
|
+
Parameters
|
|
1067
|
+
----------
|
|
1068
|
+
val : Optional[Union[List[str], str]], optional
|
|
1069
|
+
The column(s) to group by. Defaults to None.
|
|
1070
|
+
|
|
1071
|
+
Returns
|
|
1072
|
+
-------
|
|
1073
|
+
Self
|
|
1074
|
+
The instance itself.
|
|
1075
|
+
"""
|
|
1076
|
+
self._key_col = val
|
|
1077
|
+
return self
|
|
1078
|
+
|
|
1079
|
+
def filter(self, val: Optional[MetaCode] = None):
|
|
1080
|
+
"""
|
|
1081
|
+
Specifies the filtering conditions for the output table.
|
|
1082
|
+
|
|
1083
|
+
The MetaCode represents the filtering conditions, which must be an
|
|
1084
|
+
expression and can only include columns of `dummy_table`. Multiple
|
|
1085
|
+
conditions can be combined using logical operators (and, or). Only
|
|
1086
|
+
results satisfying the filter conditions are included in the output
|
|
1087
|
+
table.
|
|
1088
|
+
|
|
1089
|
+
Parameters
|
|
1090
|
+
----------
|
|
1091
|
+
val : Optional[MetaCode], optional
|
|
1092
|
+
The MetaCode representing the filtering conditions. Defaults to None.
|
|
1093
|
+
|
|
1094
|
+
Returns
|
|
1095
|
+
-------
|
|
1096
|
+
Self
|
|
1097
|
+
The instance itself.
|
|
1098
|
+
"""
|
|
1099
|
+
self._filter = val
|
|
1100
|
+
return self
|
|
1101
|
+
|
|
1102
|
+
def snapshot_dir(self, val: Optional[Union[Path, str]] = None):
|
|
1103
|
+
"""
|
|
1104
|
+
Sets the directory where the streaming engine snapshot is saved.
|
|
1105
|
+
|
|
1106
|
+
The directory must already exist, or an exception will be raised. If a
|
|
1107
|
+
snapshot directory is specified, the system checks for an existing
|
|
1108
|
+
snapshot in the directory when creating the streaming engine.
|
|
1109
|
+
|
|
1110
|
+
If found, the snapshot is loaded to restore the engine's state. Multiple
|
|
1111
|
+
streaming engines can share a directory, with snapshot files named after
|
|
1112
|
+
the engine names.
|
|
1113
|
+
|
|
1114
|
+
Snapshot file extensions:
|
|
1115
|
+
- `<engineName>.tmp`: Temporary snapshot.
|
|
1116
|
+
- `<engineName>.snapshot`: A snapshot that is flushed to disk.
|
|
1117
|
+
- `<engineName>.old`: If a snapshot with the same name exists, the
|
|
1118
|
+
previous one is renamed to `<engineName>.old`.
|
|
1119
|
+
|
|
1120
|
+
Parameters
|
|
1121
|
+
----------
|
|
1122
|
+
val : Optional[Union[Path, str]], optional
|
|
1123
|
+
The directory path for saving the snapshot. Defaults to None.
|
|
1124
|
+
|
|
1125
|
+
Returns
|
|
1126
|
+
-------
|
|
1127
|
+
Self
|
|
1128
|
+
The instance itself.
|
|
1129
|
+
"""
|
|
1130
|
+
self._snapshot_dir = str(val) if val is not None else None
|
|
1131
|
+
return self
|
|
1132
|
+
|
|
1133
|
+
def snapshot_interval_in_msg_count(self, val: Optional[int] = None):
|
|
1134
|
+
"""
|
|
1135
|
+
Sets the number of messages to receive before saving the next snapshot.
|
|
1136
|
+
|
|
1137
|
+
Parameters
|
|
1138
|
+
----------
|
|
1139
|
+
val : Optional[int], optional
|
|
1140
|
+
The number of messages before the next snapshot. Defaults to None.
|
|
1141
|
+
|
|
1142
|
+
Returns
|
|
1143
|
+
-------
|
|
1144
|
+
Self
|
|
1145
|
+
The instance itself.
|
|
1146
|
+
"""
|
|
1147
|
+
self._snapshot_interval_in_msg_count = val
|
|
1148
|
+
return self
|
|
1149
|
+
|
|
1150
|
+
def keep_order(self, val: Optional[bool] = None):
|
|
1151
|
+
"""
|
|
1152
|
+
Specifies whether to preserve the insertion order of records in the output
|
|
1153
|
+
table.
|
|
1154
|
+
|
|
1155
|
+
If `key_col` contains a time column, the default value is True; otherwise,
|
|
1156
|
+
it is False.
|
|
1157
|
+
|
|
1158
|
+
Parameters
|
|
1159
|
+
----------
|
|
1160
|
+
val : Optional[bool], optional
|
|
1161
|
+
Whether to preserve the insertion order. Defaults to None.
|
|
1162
|
+
|
|
1163
|
+
Returns
|
|
1164
|
+
-------
|
|
1165
|
+
Self
|
|
1166
|
+
The instance itself.
|
|
1167
|
+
"""
|
|
1168
|
+
self._keep_order = val
|
|
1169
|
+
return self
|
|
1170
|
+
|
|
1171
|
+
def key_purge_filter(self, val: Optional[MetaCode] = None):
|
|
1172
|
+
"""
|
|
1173
|
+
Sets the filtering conditions to identify the data to be purged from the
|
|
1174
|
+
cache.
|
|
1175
|
+
|
|
1176
|
+
To clean up unnecessary data after calculations, specify both
|
|
1177
|
+
`key_purge_filter` and `key_purge_freq_in_second`.
|
|
1178
|
+
|
|
1179
|
+
This is MetaCode composed of conditional expressions that must refer to
|
|
1180
|
+
columns in the output table. The filter is effective only when `key_col`
|
|
1181
|
+
is specified.
|
|
1182
|
+
|
|
1183
|
+
Parameters
|
|
1184
|
+
----------
|
|
1185
|
+
val : Optional[MetaCode], optional
|
|
1186
|
+
The MetaCode filter conditions. Defaults to None.
|
|
1187
|
+
|
|
1188
|
+
Returns
|
|
1189
|
+
-------
|
|
1190
|
+
Self
|
|
1191
|
+
The instance itself.
|
|
1192
|
+
"""
|
|
1193
|
+
self._key_purge_filter = val
|
|
1194
|
+
return self
|
|
1195
|
+
|
|
1196
|
+
def key_purge_freq_in_second(self, val: Optional[int] = None):
|
|
1197
|
+
"""
|
|
1198
|
+
Sets the time interval (in seconds) to trigger a purge. The purge is
|
|
1199
|
+
triggered when the time since the last data ingestion meets or exceeds
|
|
1200
|
+
this interval.
|
|
1201
|
+
|
|
1202
|
+
The filter is effective only when `key_col` is specified.
|
|
1203
|
+
|
|
1204
|
+
For each data ingestion, a purge is triggered if the following conditions
|
|
1205
|
+
are met:
|
|
1206
|
+
|
|
1207
|
+
1. The time elapsed since the last data ingestion is equal to or greater
|
|
1208
|
+
than `key_purge_freq_in_second` (for the first check, the time elapsed
|
|
1209
|
+
between data ingestion and engine creation is used).
|
|
1210
|
+
2. If the first condition is met, `key_purge_filter` is applied to
|
|
1211
|
+
determine the data to be purged.
|
|
1212
|
+
3. The number of groups containing data to be purged is equal to or
|
|
1213
|
+
greater than 10% of the total groups in the engine.
|
|
1214
|
+
|
|
1215
|
+
To check engine status before and after the purge, access the attribute
|
|
1216
|
+
`ReactiveStateEngine.stat`, where the `numGroups` field indicates the
|
|
1217
|
+
number of groups in the reactive state engine.
|
|
1218
|
+
|
|
1219
|
+
Parameters
|
|
1220
|
+
----------
|
|
1221
|
+
val : Optional[int], optional
|
|
1222
|
+
The time interval (in seconds) to trigger the purge. Defaults to None.
|
|
1223
|
+
|
|
1224
|
+
Returns
|
|
1225
|
+
-------
|
|
1226
|
+
Self
|
|
1227
|
+
The instance itself.
|
|
1228
|
+
"""
|
|
1229
|
+
self._key_purge_freq_in_second = val
|
|
1230
|
+
return self
|
|
1231
|
+
|
|
1232
|
+
def output_elapsed_microseconds(self, val: bool = False):
|
|
1233
|
+
"""
|
|
1234
|
+
Determines whether to output the elapsed time (in microseconds).
|
|
1235
|
+
|
|
1236
|
+
The elapsed time is measured from when the calculation is triggered to
|
|
1237
|
+
when the result is output for each window. When both
|
|
1238
|
+
`output_elapsed_microseconds` and `useSystemTime` parameters are set to
|
|
1239
|
+
true, aggregate function cannot be used in `metrics`.
|
|
1240
|
+
|
|
1241
|
+
Parameters
|
|
1242
|
+
----------
|
|
1243
|
+
val : bool, optional
|
|
1244
|
+
Whether to output the elapsed time. Defaults to False.
|
|
1245
|
+
|
|
1246
|
+
Returns
|
|
1247
|
+
-------
|
|
1248
|
+
Self
|
|
1249
|
+
The instance itself.
|
|
1250
|
+
"""
|
|
1251
|
+
self._output_elapsed_microseconds = val
|
|
1252
|
+
return self
|
|
1253
|
+
|
|
1254
|
+
def key_capacity(self, val: int = 1024):
|
|
1255
|
+
"""
|
|
1256
|
+
A positive integer indicating the amount of memory allocated for buffering
|
|
1257
|
+
state of each group.
|
|
1258
|
+
|
|
1259
|
+
The memory is allocated on a row basis. The default value is 1024. For
|
|
1260
|
+
data with a large number of groups, setting this parameter can reduce
|
|
1261
|
+
latency.
|
|
1262
|
+
|
|
1263
|
+
Parameters
|
|
1264
|
+
----------
|
|
1265
|
+
val : int, optional
|
|
1266
|
+
A positive integer. Defaults to 1024.
|
|
1267
|
+
|
|
1268
|
+
Returns
|
|
1269
|
+
-------
|
|
1270
|
+
Self
|
|
1271
|
+
The instance itself.
|
|
1272
|
+
"""
|
|
1273
|
+
self._key_capacity = val
|
|
1274
|
+
return self
|
|
1275
|
+
|
|
1276
|
+
def parallelism(self, val: int = 1):
|
|
1277
|
+
"""
|
|
1278
|
+
A positive integer no greater than 63, indicating the maximum number of
|
|
1279
|
+
workers that can run in parallel.
|
|
1280
|
+
|
|
1281
|
+
The default value is 1. For large computation workloads, adjusting this
|
|
1282
|
+
parameter can effectively utilize computing resources and reduce
|
|
1283
|
+
computation time.
|
|
1284
|
+
|
|
1285
|
+
Note: `parallelism` cannot exceed the lesser of the numbers of logical
|
|
1286
|
+
cores minus one.
|
|
1287
|
+
|
|
1288
|
+
Parameters
|
|
1289
|
+
----------
|
|
1290
|
+
val : int, optional
|
|
1291
|
+
A positive integer. Defaults to 1.
|
|
1292
|
+
|
|
1293
|
+
Returns
|
|
1294
|
+
-------
|
|
1295
|
+
Self
|
|
1296
|
+
The instance itself.
|
|
1297
|
+
"""
|
|
1298
|
+
self._parallelism = val
|
|
1299
|
+
return self
|
|
1300
|
+
|
|
1301
|
+
def output_handler(self, val: Optional[FunctionDef] = None):
|
|
1302
|
+
"""
|
|
1303
|
+
A unary function or a partial function with a single unfixed parameter.
|
|
1304
|
+
|
|
1305
|
+
If set, the engine will not write the calculation results to the output
|
|
1306
|
+
table directly. Instead, the results will be passed as a parameter to the
|
|
1307
|
+
specified function.
|
|
1308
|
+
|
|
1309
|
+
Parameters
|
|
1310
|
+
----------
|
|
1311
|
+
val : Optional[FunctionDef], optional
|
|
1312
|
+
A unary function or a partial function with a single unfixed
|
|
1313
|
+
parameter. The default value is null, which means the result will be
|
|
1314
|
+
written to the output table.
|
|
1315
|
+
|
|
1316
|
+
Returns
|
|
1317
|
+
-------
|
|
1318
|
+
Self
|
|
1319
|
+
The instance itself.
|
|
1320
|
+
"""
|
|
1321
|
+
self._output_handler = val
|
|
1322
|
+
return self
|
|
1323
|
+
|
|
1324
|
+
def msg_as_table(self, val: bool = False):
|
|
1325
|
+
"""
|
|
1326
|
+
Sets whether the output data is passed into the function (specified by
|
|
1327
|
+
`output_handler`) as a Table or as an AnyVector. If True, the data is
|
|
1328
|
+
passed as a Table; otherwise, it is passed as AnyVector of columns.
|
|
1329
|
+
|
|
1330
|
+
Parameters
|
|
1331
|
+
----------
|
|
1332
|
+
val : bool, optional
|
|
1333
|
+
Whether to pass data as a Table (True) or as an AnyVector (False).
|
|
1334
|
+
Defaults to False.
|
|
1335
|
+
|
|
1336
|
+
Returns
|
|
1337
|
+
-------
|
|
1338
|
+
Self
|
|
1339
|
+
The instance itself.
|
|
1340
|
+
"""
|
|
1341
|
+
self._msg_as_table = val
|
|
1342
|
+
return self
|
|
1343
|
+
|
|
1344
|
+
def submit(self) -> ReactiveStateEngine:
|
|
1345
|
+
return _create_engine(
|
|
1346
|
+
EngineType.ReactiveStateEngine,
|
|
1347
|
+
self.name, self._metrics, self._dummy,
|
|
1348
|
+
self._output,
|
|
1349
|
+
self._key_col,
|
|
1350
|
+
self._filter,
|
|
1351
|
+
self._snapshot_dir,
|
|
1352
|
+
self._snapshot_interval_in_msg_count,
|
|
1353
|
+
self._keep_order,
|
|
1354
|
+
self._key_purge_filter,
|
|
1355
|
+
self._key_purge_freq_in_second,
|
|
1356
|
+
sf_data.Nothing,
|
|
1357
|
+
self._output_elapsed_microseconds,
|
|
1358
|
+
self._key_capacity,
|
|
1359
|
+
self._parallelism,
|
|
1360
|
+
self._output_handler,
|
|
1361
|
+
self._msg_as_table,
|
|
1362
|
+
)
|
|
1363
|
+
|
|
1364
|
+
|
|
1365
|
+
ReactiveStateEngine.create = classmethod(generate_create_method(ReactiveStateEngineBuilder))
|
|
1366
|
+
ReactiveStateEngine.list = classmethod(generate_list_method(ReactiveStateEngine))
|
|
1367
|
+
ReactiveStateEngine.get = classmethod(generate_get_method(ReactiveStateEngine))
|
|
1368
|
+
|
|
1369
|
+
|
|
1370
|
+
filter_dict = Dict[Literal["timeRange", "condition", "handler"], Any]
|
|
1371
|
+
|
|
1372
|
+
|
|
1373
|
+
class StreamFilterEngineBuilder(Builder):
|
|
1374
|
+
def __init__(
|
|
1375
|
+
self, name: str, table_schema: Union[Table, TypeDict],
|
|
1376
|
+
filter: Union[filter_dict, List[filter_dict]],
|
|
1377
|
+
*,
|
|
1378
|
+
msg_schema: Optional[Dict] = None,
|
|
1379
|
+
time_col: Optional[str] = None,
|
|
1380
|
+
condition_col: Optional[str] = None,
|
|
1381
|
+
):
|
|
1382
|
+
super().__init__(name)
|
|
1383
|
+
if isinstance(table_schema, dict):
|
|
1384
|
+
table_schema = sf_data.table(types=table_schema, size=0, capacity=1)
|
|
1385
|
+
self._dummy = table_schema
|
|
1386
|
+
self._filter = filter
|
|
1387
|
+
self.msg_schema(msg_schema)
|
|
1388
|
+
self.time_col(time_col)
|
|
1389
|
+
self.condition_col(condition_col)
|
|
1390
|
+
|
|
1391
|
+
def msg_schema(self, val: Optional[Dict] = None):
|
|
1392
|
+
self._msg_schema = val
|
|
1393
|
+
return self
|
|
1394
|
+
|
|
1395
|
+
def time_col(self, val: Optional[str] = None):
|
|
1396
|
+
self._time_col = val
|
|
1397
|
+
return self
|
|
1398
|
+
|
|
1399
|
+
def condition_col(self, val: Optional[str] = None):
|
|
1400
|
+
self._condition_col = val
|
|
1401
|
+
return self
|
|
1402
|
+
|
|
1403
|
+
def submit(self) -> StreamFilterEngine:
|
|
1404
|
+
return _create_engine(
|
|
1405
|
+
EngineType.StreamFilterEngine,
|
|
1406
|
+
self.name, self._dummy, self._filter,
|
|
1407
|
+
self._msg_schema,
|
|
1408
|
+
self._time_col,
|
|
1409
|
+
self._condition_col,
|
|
1410
|
+
)
|
|
1411
|
+
|
|
1412
|
+
|
|
1413
|
+
StreamFilterEngine.create = classmethod(generate_create_method(StreamFilterEngineBuilder))
|
|
1414
|
+
StreamFilterEngine.list = classmethod(generate_list_method(StreamFilterEngine))
|
|
1415
|
+
StreamFilterEngine.get = classmethod(generate_get_method(StreamFilterEngine))
|
|
1416
|
+
|
|
1417
|
+
|
|
1418
|
+
def list() -> List[Tuple[str, EngineType, str]]:
|
|
1419
|
+
"""
|
|
1420
|
+
Retrieves all stream engines in the system.
|
|
1421
|
+
|
|
1422
|
+
Returns
|
|
1423
|
+
-------
|
|
1424
|
+
List[Tuple[str, str, str]]
|
|
1425
|
+
[engine_name, engine_type, user]
|
|
1426
|
+
"""
|
|
1427
|
+
return __internal_list_engine()
|
|
1428
|
+
|
|
1429
|
+
|
|
1430
|
+
def drop(name: str):
|
|
1431
|
+
"""
|
|
1432
|
+
Drops a stream engine by its name.
|
|
1433
|
+
|
|
1434
|
+
Parameters
|
|
1435
|
+
----------
|
|
1436
|
+
name : str
|
|
1437
|
+
The name of the stream engine to be dropped.
|
|
1438
|
+
"""
|
|
1439
|
+
_global_call("dropStreamEngine", name)
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
def get(name: str) -> StreamEngine:
|
|
1443
|
+
"""
|
|
1444
|
+
Retrieves a stream engine by its name.
|
|
1445
|
+
|
|
1446
|
+
Parameters
|
|
1447
|
+
----------
|
|
1448
|
+
name : str
|
|
1449
|
+
The name of the stream engine to retrieve.
|
|
1450
|
+
|
|
1451
|
+
Returns
|
|
1452
|
+
-------
|
|
1453
|
+
StreamEngine
|
|
1454
|
+
The corresponding StreamEngine.
|
|
1455
|
+
"""
|
|
1456
|
+
return _global_call("getStreamEngine", name)
|