tensorbored 2.21.0rc1769983804__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tensorbored/__init__.py +112 -0
- tensorbored/_vendor/__init__.py +0 -0
- tensorbored/_vendor/bleach/__init__.py +125 -0
- tensorbored/_vendor/bleach/_vendor/__init__.py +0 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/__init__.py +35 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_ihatexml.py +289 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_inputstream.py +918 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_tokenizer.py +1735 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/__init__.py +5 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/_base.py +40 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/py.py +67 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_utils.py +159 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/constants.py +2946 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/__init__.py +0 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/alphabeticalattributes.py +29 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/base.py +12 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/inject_meta_charset.py +73 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/lint.py +93 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/optionaltags.py +207 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/sanitizer.py +916 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/whitespace.py +38 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/html5parser.py +2795 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/serializer.py +409 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/__init__.py +30 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/genshi.py +54 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/sax.py +50 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/__init__.py +88 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/base.py +417 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/dom.py +239 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/etree.py +343 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/etree_lxml.py +392 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/__init__.py +154 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/base.py +252 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/dom.py +43 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/etree.py +131 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/etree_lxml.py +215 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/genshi.py +69 -0
- tensorbored/_vendor/bleach/_vendor/parse.py +1078 -0
- tensorbored/_vendor/bleach/callbacks.py +32 -0
- tensorbored/_vendor/bleach/html5lib_shim.py +757 -0
- tensorbored/_vendor/bleach/linkifier.py +633 -0
- tensorbored/_vendor/bleach/parse_shim.py +1 -0
- tensorbored/_vendor/bleach/sanitizer.py +638 -0
- tensorbored/_vendor/bleach/six_shim.py +19 -0
- tensorbored/_vendor/webencodings/__init__.py +342 -0
- tensorbored/_vendor/webencodings/labels.py +231 -0
- tensorbored/_vendor/webencodings/mklabels.py +59 -0
- tensorbored/_vendor/webencodings/x_user_defined.py +325 -0
- tensorbored/assets.py +36 -0
- tensorbored/auth.py +102 -0
- tensorbored/backend/__init__.py +0 -0
- tensorbored/backend/application.py +604 -0
- tensorbored/backend/auth_context_middleware.py +38 -0
- tensorbored/backend/client_feature_flags.py +113 -0
- tensorbored/backend/empty_path_redirect.py +46 -0
- tensorbored/backend/event_processing/__init__.py +0 -0
- tensorbored/backend/event_processing/data_ingester.py +276 -0
- tensorbored/backend/event_processing/data_provider.py +535 -0
- tensorbored/backend/event_processing/directory_loader.py +142 -0
- tensorbored/backend/event_processing/directory_watcher.py +272 -0
- tensorbored/backend/event_processing/event_accumulator.py +950 -0
- tensorbored/backend/event_processing/event_file_inspector.py +463 -0
- tensorbored/backend/event_processing/event_file_loader.py +292 -0
- tensorbored/backend/event_processing/event_multiplexer.py +521 -0
- tensorbored/backend/event_processing/event_util.py +68 -0
- tensorbored/backend/event_processing/io_wrapper.py +223 -0
- tensorbored/backend/event_processing/plugin_asset_util.py +104 -0
- tensorbored/backend/event_processing/plugin_event_accumulator.py +721 -0
- tensorbored/backend/event_processing/plugin_event_multiplexer.py +522 -0
- tensorbored/backend/event_processing/reservoir.py +266 -0
- tensorbored/backend/event_processing/tag_types.py +29 -0
- tensorbored/backend/experiment_id.py +71 -0
- tensorbored/backend/experimental_plugin.py +51 -0
- tensorbored/backend/http_util.py +263 -0
- tensorbored/backend/json_util.py +70 -0
- tensorbored/backend/path_prefix.py +67 -0
- tensorbored/backend/process_graph.py +74 -0
- tensorbored/backend/security_validator.py +202 -0
- tensorbored/compat/__init__.py +69 -0
- tensorbored/compat/proto/__init__.py +0 -0
- tensorbored/compat/proto/allocation_description_pb2.py +35 -0
- tensorbored/compat/proto/api_def_pb2.py +82 -0
- tensorbored/compat/proto/attr_value_pb2.py +80 -0
- tensorbored/compat/proto/cluster_pb2.py +58 -0
- tensorbored/compat/proto/config_pb2.py +271 -0
- tensorbored/compat/proto/coordination_config_pb2.py +45 -0
- tensorbored/compat/proto/cost_graph_pb2.py +87 -0
- tensorbored/compat/proto/cpp_shape_inference_pb2.py +70 -0
- tensorbored/compat/proto/debug_pb2.py +65 -0
- tensorbored/compat/proto/event_pb2.py +149 -0
- tensorbored/compat/proto/full_type_pb2.py +74 -0
- tensorbored/compat/proto/function_pb2.py +157 -0
- tensorbored/compat/proto/graph_debug_info_pb2.py +111 -0
- tensorbored/compat/proto/graph_pb2.py +41 -0
- tensorbored/compat/proto/histogram_pb2.py +39 -0
- tensorbored/compat/proto/meta_graph_pb2.py +254 -0
- tensorbored/compat/proto/node_def_pb2.py +61 -0
- tensorbored/compat/proto/op_def_pb2.py +81 -0
- tensorbored/compat/proto/resource_handle_pb2.py +48 -0
- tensorbored/compat/proto/rewriter_config_pb2.py +93 -0
- tensorbored/compat/proto/rpc_options_pb2.py +35 -0
- tensorbored/compat/proto/saved_object_graph_pb2.py +193 -0
- tensorbored/compat/proto/saver_pb2.py +38 -0
- tensorbored/compat/proto/step_stats_pb2.py +116 -0
- tensorbored/compat/proto/struct_pb2.py +144 -0
- tensorbored/compat/proto/summary_pb2.py +111 -0
- tensorbored/compat/proto/tensor_description_pb2.py +38 -0
- tensorbored/compat/proto/tensor_pb2.py +68 -0
- tensorbored/compat/proto/tensor_shape_pb2.py +46 -0
- tensorbored/compat/proto/tfprof_log_pb2.py +307 -0
- tensorbored/compat/proto/trackable_object_graph_pb2.py +90 -0
- tensorbored/compat/proto/types_pb2.py +105 -0
- tensorbored/compat/proto/variable_pb2.py +62 -0
- tensorbored/compat/proto/verifier_config_pb2.py +38 -0
- tensorbored/compat/proto/versions_pb2.py +35 -0
- tensorbored/compat/tensorflow_stub/__init__.py +38 -0
- tensorbored/compat/tensorflow_stub/app.py +124 -0
- tensorbored/compat/tensorflow_stub/compat/__init__.py +131 -0
- tensorbored/compat/tensorflow_stub/compat/v1/__init__.py +20 -0
- tensorbored/compat/tensorflow_stub/dtypes.py +692 -0
- tensorbored/compat/tensorflow_stub/error_codes.py +169 -0
- tensorbored/compat/tensorflow_stub/errors.py +507 -0
- tensorbored/compat/tensorflow_stub/flags.py +124 -0
- tensorbored/compat/tensorflow_stub/io/__init__.py +17 -0
- tensorbored/compat/tensorflow_stub/io/gfile.py +1011 -0
- tensorbored/compat/tensorflow_stub/pywrap_tensorflow.py +285 -0
- tensorbored/compat/tensorflow_stub/tensor_shape.py +1035 -0
- tensorbored/context.py +129 -0
- tensorbored/data/__init__.py +0 -0
- tensorbored/data/grpc_provider.py +365 -0
- tensorbored/data/ingester.py +46 -0
- tensorbored/data/proto/__init__.py +0 -0
- tensorbored/data/proto/data_provider_pb2.py +517 -0
- tensorbored/data/proto/data_provider_pb2_grpc.py +374 -0
- tensorbored/data/provider.py +1365 -0
- tensorbored/data/server_ingester.py +301 -0
- tensorbored/data_compat.py +159 -0
- tensorbored/dataclass_compat.py +224 -0
- tensorbored/default.py +124 -0
- tensorbored/errors.py +130 -0
- tensorbored/lazy.py +99 -0
- tensorbored/main.py +48 -0
- tensorbored/main_lib.py +62 -0
- tensorbored/manager.py +487 -0
- tensorbored/notebook.py +441 -0
- tensorbored/plugin_util.py +266 -0
- tensorbored/plugins/__init__.py +0 -0
- tensorbored/plugins/audio/__init__.py +0 -0
- tensorbored/plugins/audio/audio_plugin.py +229 -0
- tensorbored/plugins/audio/metadata.py +69 -0
- tensorbored/plugins/audio/plugin_data_pb2.py +37 -0
- tensorbored/plugins/audio/summary.py +230 -0
- tensorbored/plugins/audio/summary_v2.py +124 -0
- tensorbored/plugins/base_plugin.py +367 -0
- tensorbored/plugins/core/__init__.py +0 -0
- tensorbored/plugins/core/core_plugin.py +981 -0
- tensorbored/plugins/custom_scalar/__init__.py +0 -0
- tensorbored/plugins/custom_scalar/custom_scalars_plugin.py +320 -0
- tensorbored/plugins/custom_scalar/layout_pb2.py +85 -0
- tensorbored/plugins/custom_scalar/metadata.py +35 -0
- tensorbored/plugins/custom_scalar/summary.py +79 -0
- tensorbored/plugins/debugger_v2/__init__.py +0 -0
- tensorbored/plugins/debugger_v2/debug_data_multiplexer.py +631 -0
- tensorbored/plugins/debugger_v2/debug_data_provider.py +634 -0
- tensorbored/plugins/debugger_v2/debugger_v2_plugin.py +504 -0
- tensorbored/plugins/distribution/__init__.py +0 -0
- tensorbored/plugins/distribution/compressor.py +158 -0
- tensorbored/plugins/distribution/distributions_plugin.py +116 -0
- tensorbored/plugins/distribution/metadata.py +19 -0
- tensorbored/plugins/graph/__init__.py +0 -0
- tensorbored/plugins/graph/graph_util.py +129 -0
- tensorbored/plugins/graph/graphs_plugin.py +336 -0
- tensorbored/plugins/graph/keras_util.py +328 -0
- tensorbored/plugins/graph/metadata.py +42 -0
- tensorbored/plugins/histogram/__init__.py +0 -0
- tensorbored/plugins/histogram/histograms_plugin.py +144 -0
- tensorbored/plugins/histogram/metadata.py +63 -0
- tensorbored/plugins/histogram/plugin_data_pb2.py +34 -0
- tensorbored/plugins/histogram/summary.py +234 -0
- tensorbored/plugins/histogram/summary_v2.py +292 -0
- tensorbored/plugins/hparams/__init__.py +14 -0
- tensorbored/plugins/hparams/_keras.py +93 -0
- tensorbored/plugins/hparams/api.py +130 -0
- tensorbored/plugins/hparams/api_pb2.py +208 -0
- tensorbored/plugins/hparams/backend_context.py +606 -0
- tensorbored/plugins/hparams/download_data.py +158 -0
- tensorbored/plugins/hparams/error.py +26 -0
- tensorbored/plugins/hparams/get_experiment.py +71 -0
- tensorbored/plugins/hparams/hparams_plugin.py +206 -0
- tensorbored/plugins/hparams/hparams_util_pb2.py +69 -0
- tensorbored/plugins/hparams/json_format_compat.py +38 -0
- tensorbored/plugins/hparams/list_metric_evals.py +57 -0
- tensorbored/plugins/hparams/list_session_groups.py +1040 -0
- tensorbored/plugins/hparams/metadata.py +125 -0
- tensorbored/plugins/hparams/metrics.py +41 -0
- tensorbored/plugins/hparams/plugin_data_pb2.py +69 -0
- tensorbored/plugins/hparams/summary.py +205 -0
- tensorbored/plugins/hparams/summary_v2.py +597 -0
- tensorbored/plugins/image/__init__.py +0 -0
- tensorbored/plugins/image/images_plugin.py +232 -0
- tensorbored/plugins/image/metadata.py +65 -0
- tensorbored/plugins/image/plugin_data_pb2.py +34 -0
- tensorbored/plugins/image/summary.py +159 -0
- tensorbored/plugins/image/summary_v2.py +130 -0
- tensorbored/plugins/mesh/__init__.py +14 -0
- tensorbored/plugins/mesh/mesh_plugin.py +292 -0
- tensorbored/plugins/mesh/metadata.py +152 -0
- tensorbored/plugins/mesh/plugin_data_pb2.py +37 -0
- tensorbored/plugins/mesh/summary.py +251 -0
- tensorbored/plugins/mesh/summary_v2.py +214 -0
- tensorbored/plugins/metrics/__init__.py +0 -0
- tensorbored/plugins/metrics/metadata.py +17 -0
- tensorbored/plugins/metrics/metrics_plugin.py +623 -0
- tensorbored/plugins/pr_curve/__init__.py +0 -0
- tensorbored/plugins/pr_curve/metadata.py +75 -0
- tensorbored/plugins/pr_curve/plugin_data_pb2.py +34 -0
- tensorbored/plugins/pr_curve/pr_curves_plugin.py +241 -0
- tensorbored/plugins/pr_curve/summary.py +574 -0
- tensorbored/plugins/profile_redirect/__init__.py +0 -0
- tensorbored/plugins/profile_redirect/profile_redirect_plugin.py +49 -0
- tensorbored/plugins/projector/__init__.py +67 -0
- tensorbored/plugins/projector/metadata.py +26 -0
- tensorbored/plugins/projector/projector_config_pb2.py +54 -0
- tensorbored/plugins/projector/projector_plugin.py +795 -0
- tensorbored/plugins/projector/tf_projector_plugin/index.js +32 -0
- tensorbored/plugins/projector/tf_projector_plugin/projector_binary.html +524 -0
- tensorbored/plugins/projector/tf_projector_plugin/projector_binary.js +15536 -0
- tensorbored/plugins/scalar/__init__.py +0 -0
- tensorbored/plugins/scalar/metadata.py +60 -0
- tensorbored/plugins/scalar/plugin_data_pb2.py +34 -0
- tensorbored/plugins/scalar/scalars_plugin.py +181 -0
- tensorbored/plugins/scalar/summary.py +109 -0
- tensorbored/plugins/scalar/summary_v2.py +124 -0
- tensorbored/plugins/text/__init__.py +0 -0
- tensorbored/plugins/text/metadata.py +62 -0
- tensorbored/plugins/text/plugin_data_pb2.py +34 -0
- tensorbored/plugins/text/summary.py +114 -0
- tensorbored/plugins/text/summary_v2.py +124 -0
- tensorbored/plugins/text/text_plugin.py +288 -0
- tensorbored/plugins/wit_redirect/__init__.py +0 -0
- tensorbored/plugins/wit_redirect/wit_redirect_plugin.py +49 -0
- tensorbored/program.py +910 -0
- tensorbored/summary/__init__.py +35 -0
- tensorbored/summary/_output.py +124 -0
- tensorbored/summary/_tf/__init__.py +14 -0
- tensorbored/summary/_tf/summary/__init__.py +178 -0
- tensorbored/summary/_writer.py +105 -0
- tensorbored/summary/v1.py +51 -0
- tensorbored/summary/v2.py +25 -0
- tensorbored/summary/writer/__init__.py +13 -0
- tensorbored/summary/writer/event_file_writer.py +291 -0
- tensorbored/summary/writer/record_writer.py +50 -0
- tensorbored/util/__init__.py +0 -0
- tensorbored/util/encoder.py +116 -0
- tensorbored/util/grpc_util.py +311 -0
- tensorbored/util/img_mime_type_detector.py +40 -0
- tensorbored/util/io_util.py +20 -0
- tensorbored/util/lazy_tensor_creator.py +110 -0
- tensorbored/util/op_evaluator.py +104 -0
- tensorbored/util/platform_util.py +20 -0
- tensorbored/util/tb_logging.py +24 -0
- tensorbored/util/tensor_util.py +617 -0
- tensorbored/util/timing.py +122 -0
- tensorbored/version.py +21 -0
- tensorbored/webfiles.zip +0 -0
- tensorbored-2.21.0rc1769983804.dist-info/METADATA +49 -0
- tensorbored-2.21.0rc1769983804.dist-info/RECORD +271 -0
- tensorbored-2.21.0rc1769983804.dist-info/WHEEL +5 -0
- tensorbored-2.21.0rc1769983804.dist-info/entry_points.txt +6 -0
- tensorbored-2.21.0rc1769983804.dist-info/licenses/LICENSE +739 -0
- tensorbored-2.21.0rc1769983804.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
|
|
16
|
+
"""A key-value[] store that implements reservoir sampling on the values."""
|
|
17
|
+
|
|
18
|
+
import collections
|
|
19
|
+
import random
|
|
20
|
+
import threading
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Reservoir:
|
|
24
|
+
"""A map-to-arrays container, with deterministic Reservoir Sampling.
|
|
25
|
+
|
|
26
|
+
Items are added with an associated key. Items may be retrieved by key, and
|
|
27
|
+
a list of keys can also be retrieved. If size is not zero, then it dictates
|
|
28
|
+
the maximum number of items that will be stored with each key. Once there are
|
|
29
|
+
more items for a given key, they are replaced via reservoir sampling, such
|
|
30
|
+
that each item has an equal probability of being included in the sample.
|
|
31
|
+
|
|
32
|
+
Deterministic means that for any given seed and bucket size, the sequence of
|
|
33
|
+
values that are kept for any given tag will always be the same, and that this
|
|
34
|
+
is independent of any insertions on other tags. That is:
|
|
35
|
+
|
|
36
|
+
>>> separate_reservoir = reservoir.Reservoir(10)
|
|
37
|
+
>>> interleaved_reservoir = reservoir.Reservoir(10)
|
|
38
|
+
>>> for i in range(100):
|
|
39
|
+
>>> separate_reservoir.AddItem('key1', i)
|
|
40
|
+
>>> for i in range(100):
|
|
41
|
+
>>> separate_reservoir.AddItem('key2', i)
|
|
42
|
+
>>> for i in range(100):
|
|
43
|
+
>>> interleaved_reservoir.AddItem('key1', i)
|
|
44
|
+
>>> interleaved_reservoir.AddItem('key2', i)
|
|
45
|
+
|
|
46
|
+
separate_reservoir and interleaved_reservoir will be in identical states.
|
|
47
|
+
|
|
48
|
+
See: https://en.wikipedia.org/wiki/Reservoir_sampling
|
|
49
|
+
|
|
50
|
+
Adding items has amortized O(1) runtime.
|
|
51
|
+
|
|
52
|
+
Fields:
|
|
53
|
+
always_keep_last: Whether the latest seen sample is always at the
|
|
54
|
+
end of the reservoir. Defaults to True.
|
|
55
|
+
size: An integer of the maximum number of samples.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self, size, seed=0, always_keep_last=True):
|
|
59
|
+
"""Creates a new reservoir.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
size: The number of values to keep in the reservoir for each tag. If 0,
|
|
63
|
+
all values will be kept.
|
|
64
|
+
seed: The seed of the random number generator to use when sampling.
|
|
65
|
+
Different values for |seed| will produce different samples from the same
|
|
66
|
+
input items.
|
|
67
|
+
always_keep_last: Whether to always keep the latest seen item in the
|
|
68
|
+
end of the reservoir. Defaults to True.
|
|
69
|
+
|
|
70
|
+
Raises:
|
|
71
|
+
ValueError: If size is negative or not an integer.
|
|
72
|
+
"""
|
|
73
|
+
if size < 0 or size != round(size):
|
|
74
|
+
raise ValueError("size must be nonnegative integer, was %s" % size)
|
|
75
|
+
self._buckets = collections.defaultdict(
|
|
76
|
+
lambda: _ReservoirBucket(
|
|
77
|
+
size, random.Random(seed), always_keep_last
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
# _mutex guards the keys - creating new keys, retrieving by key, etc
|
|
81
|
+
# the internal items are guarded by the ReservoirBuckets' internal mutexes
|
|
82
|
+
self._mutex = threading.Lock()
|
|
83
|
+
self.size = size
|
|
84
|
+
self.always_keep_last = always_keep_last
|
|
85
|
+
|
|
86
|
+
def Keys(self):
|
|
87
|
+
"""Return all the keys in the reservoir.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
['list', 'of', 'keys'] in the Reservoir.
|
|
91
|
+
"""
|
|
92
|
+
with self._mutex:
|
|
93
|
+
return list(self._buckets.keys())
|
|
94
|
+
|
|
95
|
+
def Items(self, key):
|
|
96
|
+
"""Return items associated with given key.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
key: The key for which we are finding associated items.
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
KeyError: If the key is not found in the reservoir.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
[list, of, items] associated with that key.
|
|
106
|
+
"""
|
|
107
|
+
with self._mutex:
|
|
108
|
+
if key not in self._buckets:
|
|
109
|
+
raise KeyError("Key %s was not found in Reservoir" % key)
|
|
110
|
+
bucket = self._buckets[key]
|
|
111
|
+
return bucket.Items()
|
|
112
|
+
|
|
113
|
+
def AddItem(self, key, item, f=lambda x: x):
|
|
114
|
+
"""Add a new item to the Reservoir with the given tag.
|
|
115
|
+
|
|
116
|
+
If the reservoir has not yet reached full size, the new item is guaranteed
|
|
117
|
+
to be added. If the reservoir is full, then behavior depends on the
|
|
118
|
+
always_keep_last boolean.
|
|
119
|
+
|
|
120
|
+
If always_keep_last was set to true, the new item is guaranteed to be added
|
|
121
|
+
to the reservoir, and either the previous last item will be replaced, or
|
|
122
|
+
(with low probability) an older item will be replaced.
|
|
123
|
+
|
|
124
|
+
If always_keep_last was set to false, then the new item will replace an
|
|
125
|
+
old item with low probability.
|
|
126
|
+
|
|
127
|
+
If f is provided, it will be applied to transform item (lazily, iff item is
|
|
128
|
+
going to be included in the reservoir).
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
key: The key to store the item under.
|
|
132
|
+
item: The item to add to the reservoir.
|
|
133
|
+
f: An optional function to transform the item prior to addition.
|
|
134
|
+
"""
|
|
135
|
+
with self._mutex:
|
|
136
|
+
bucket = self._buckets[key]
|
|
137
|
+
bucket.AddItem(item, f)
|
|
138
|
+
|
|
139
|
+
def FilterItems(self, filterFn, key=None):
|
|
140
|
+
"""Filter items within a Reservoir, using a filtering function.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
filterFn: A function that returns True for the items to be kept.
|
|
144
|
+
key: An optional bucket key to filter. If not specified, will filter all
|
|
145
|
+
all buckets.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
The number of items removed.
|
|
149
|
+
"""
|
|
150
|
+
with self._mutex:
|
|
151
|
+
if key:
|
|
152
|
+
if key in self._buckets:
|
|
153
|
+
return self._buckets[key].FilterItems(filterFn)
|
|
154
|
+
else:
|
|
155
|
+
return 0
|
|
156
|
+
else:
|
|
157
|
+
return sum(
|
|
158
|
+
bucket.FilterItems(filterFn)
|
|
159
|
+
for bucket in self._buckets.values()
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class _ReservoirBucket:
|
|
164
|
+
"""A container for items from a stream, that implements reservoir sampling.
|
|
165
|
+
|
|
166
|
+
It always stores the most recent item as its final item.
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
def __init__(self, _max_size, _random=None, always_keep_last=True):
|
|
170
|
+
"""Create the _ReservoirBucket.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
_max_size: The maximum size the reservoir bucket may grow to. If size is
|
|
174
|
+
zero, the bucket has unbounded size.
|
|
175
|
+
_random: The random number generator to use. If not specified, defaults to
|
|
176
|
+
random.Random(0).
|
|
177
|
+
always_keep_last: Whether the latest seen item should always be included
|
|
178
|
+
in the end of the bucket.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
ValueError: if the size is not a nonnegative integer.
|
|
182
|
+
"""
|
|
183
|
+
if _max_size < 0 or _max_size != round(_max_size):
|
|
184
|
+
raise ValueError(
|
|
185
|
+
"_max_size must be nonnegative int, was %s" % _max_size
|
|
186
|
+
)
|
|
187
|
+
self.items = []
|
|
188
|
+
# This mutex protects the internal items, ensuring that calls to Items and
|
|
189
|
+
# AddItem are thread-safe
|
|
190
|
+
self._mutex = threading.Lock()
|
|
191
|
+
self._max_size = _max_size
|
|
192
|
+
self._num_items_seen = 0
|
|
193
|
+
if _random is not None:
|
|
194
|
+
self._random = _random
|
|
195
|
+
else:
|
|
196
|
+
self._random = random.Random(0)
|
|
197
|
+
self.always_keep_last = always_keep_last
|
|
198
|
+
|
|
199
|
+
def AddItem(self, item, f=lambda x: x):
|
|
200
|
+
"""Add an item to the ReservoirBucket, replacing an old item if
|
|
201
|
+
necessary.
|
|
202
|
+
|
|
203
|
+
The new item is guaranteed to be added to the bucket, and to be the last
|
|
204
|
+
element in the bucket. If the bucket has reached capacity, then an old item
|
|
205
|
+
will be replaced. With probability (_max_size/_num_items_seen) a random item
|
|
206
|
+
in the bucket will be popped out and the new item will be appended
|
|
207
|
+
to the end. With probability (1 - _max_size/_num_items_seen)
|
|
208
|
+
the last item in the bucket will be replaced.
|
|
209
|
+
|
|
210
|
+
Since the O(n) replacements occur with O(1/_num_items_seen) likelihood,
|
|
211
|
+
the amortized runtime is O(1).
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
item: The item to add to the bucket.
|
|
215
|
+
f: A function to transform item before addition, if it will be kept in
|
|
216
|
+
the reservoir.
|
|
217
|
+
"""
|
|
218
|
+
with self._mutex:
|
|
219
|
+
if len(self.items) < self._max_size or self._max_size == 0:
|
|
220
|
+
self.items.append(f(item))
|
|
221
|
+
else:
|
|
222
|
+
r = self._random.randint(0, self._num_items_seen)
|
|
223
|
+
if r < self._max_size:
|
|
224
|
+
self.items.pop(r)
|
|
225
|
+
self.items.append(f(item))
|
|
226
|
+
elif self.always_keep_last:
|
|
227
|
+
self.items[-1] = f(item)
|
|
228
|
+
self._num_items_seen += 1
|
|
229
|
+
|
|
230
|
+
def FilterItems(self, filterFn):
|
|
231
|
+
"""Filter items in a ReservoirBucket, using a filtering function.
|
|
232
|
+
|
|
233
|
+
Filtering items from the reservoir bucket must update the
|
|
234
|
+
internal state variable self._num_items_seen, which is used for determining
|
|
235
|
+
the rate of replacement in reservoir sampling. Ideally, self._num_items_seen
|
|
236
|
+
would contain the exact number of items that have ever seen by the
|
|
237
|
+
ReservoirBucket and satisfy filterFn. However, the ReservoirBucket does not
|
|
238
|
+
have access to all items seen -- it only has access to the subset of items
|
|
239
|
+
that have survived sampling (self.items). Therefore, we estimate
|
|
240
|
+
self._num_items_seen by scaling it by the same ratio as the ratio of items
|
|
241
|
+
not removed from self.items.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
filterFn: A function that returns True for items to be kept.
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
The number of items removed from the bucket.
|
|
248
|
+
"""
|
|
249
|
+
with self._mutex:
|
|
250
|
+
size_before = len(self.items)
|
|
251
|
+
self.items = list(filter(filterFn, self.items))
|
|
252
|
+
size_diff = size_before - len(self.items)
|
|
253
|
+
|
|
254
|
+
# Estimate a correction the number of items seen
|
|
255
|
+
prop_remaining = (
|
|
256
|
+
len(self.items) / float(size_before) if size_before > 0 else 0
|
|
257
|
+
)
|
|
258
|
+
self._num_items_seen = int(
|
|
259
|
+
round(self._num_items_seen * prop_remaining)
|
|
260
|
+
)
|
|
261
|
+
return size_diff
|
|
262
|
+
|
|
263
|
+
def Items(self):
|
|
264
|
+
"""Get all the items in the bucket."""
|
|
265
|
+
with self._mutex:
|
|
266
|
+
return list(self.items)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""String constants describing contents of an event accumulator."""
|
|
16
|
+
|
|
17
|
+
# Arbitrary strings chosen to pass the type information of the tag from
|
|
18
|
+
# the backend to the frontend.
|
|
19
|
+
TENSORS = "tensors"
|
|
20
|
+
GRAPH = "graph"
|
|
21
|
+
META_GRAPH = "meta_graph"
|
|
22
|
+
RUN_METADATA = "run_metadata"
|
|
23
|
+
|
|
24
|
+
# Legacy (pre-tensor) tag types.
|
|
25
|
+
COMPRESSED_HISTOGRAMS = "distributions"
|
|
26
|
+
HISTOGRAMS = "histograms"
|
|
27
|
+
IMAGES = "images"
|
|
28
|
+
AUDIO = "audio"
|
|
29
|
+
SCALARS = "scalars"
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Application-level experiment ID support."""
|
|
16
|
+
|
|
17
|
+
import re
|
|
18
|
+
|
|
19
|
+
# Value of the first path component that signals that the second path
|
|
20
|
+
# component represents an experiment ID.
|
|
21
|
+
_EXPERIMENT_PATH_COMPONENT = "experiment"
|
|
22
|
+
|
|
23
|
+
# Key into the WSGI environment used for the experiment ID.
|
|
24
|
+
WSGI_ENVIRON_KEY = "HTTP_TENSORBOARD_EXPERIMENT_ID"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ExperimentIdMiddleware:
|
|
28
|
+
"""WSGI middleware extracting experiment IDs from URL to environment.
|
|
29
|
+
|
|
30
|
+
Any request whose path matches `/experiment/SOME_EID[/...]` will have
|
|
31
|
+
its first two path components stripped, and its experiment ID stored
|
|
32
|
+
onto the WSGI environment with key taken from the `WSGI_ENVIRON_KEY`
|
|
33
|
+
constant. All other requests will have paths unchanged and the
|
|
34
|
+
experiment ID set to the empty string. It noops if the key taken from
|
|
35
|
+
the `WSGI_ENVIRON_KEY` is already present in the environment.
|
|
36
|
+
|
|
37
|
+
Instances of this class are WSGI applications (see PEP 3333).
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, application):
|
|
41
|
+
"""Initializes an `ExperimentIdMiddleware`.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
application: The WSGI application to wrap (see PEP 3333).
|
|
45
|
+
"""
|
|
46
|
+
self._application = application
|
|
47
|
+
# Regular expression that matches the whole `/experiment/EID` prefix
|
|
48
|
+
# (without any trailing slash) and captures the experiment ID.
|
|
49
|
+
self._pat = re.compile(
|
|
50
|
+
r"/%s/([^/]*)" % re.escape(_EXPERIMENT_PATH_COMPONENT)
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def __call__(self, environ, start_response):
|
|
54
|
+
# Skip ExperimentIdMiddleware was already called.
|
|
55
|
+
if WSGI_ENVIRON_KEY in environ:
|
|
56
|
+
return self._application(environ, start_response)
|
|
57
|
+
|
|
58
|
+
path = environ.get("PATH_INFO", "")
|
|
59
|
+
m = self._pat.match(path)
|
|
60
|
+
if m:
|
|
61
|
+
eid = m.group(1)
|
|
62
|
+
new_path = path[m.end(0) :]
|
|
63
|
+
root = m.group(0)
|
|
64
|
+
else:
|
|
65
|
+
eid = ""
|
|
66
|
+
new_path = path
|
|
67
|
+
root = ""
|
|
68
|
+
environ[WSGI_ENVIRON_KEY] = eid
|
|
69
|
+
environ["PATH_INFO"] = new_path
|
|
70
|
+
environ["SCRIPT_NAME"] = environ.get("SCRIPT_NAME", "") + root
|
|
71
|
+
return self._application(environ, start_response)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""Experimental plugin support for TensorBoard.
|
|
16
|
+
|
|
17
|
+
Contains the mechanism for marking plugins as experimental.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ExperimentalPlugin:
|
|
22
|
+
"""A marker class used to annotate a plugin as experimental.
|
|
23
|
+
|
|
24
|
+
Experimental plugins are hidden from users by default. The plugin will only
|
|
25
|
+
be enabled for a user if the user has specified the plugin with the
|
|
26
|
+
experimentalPlugin query parameter in the URL.
|
|
27
|
+
|
|
28
|
+
The marker class can annotate either TBPlugin or TBLoader instances, whichever
|
|
29
|
+
is most convenient.
|
|
30
|
+
|
|
31
|
+
Typical usage is to create a new class that inherits from both an existing
|
|
32
|
+
TBPlugin/TBLoader class and this marker class. For example:
|
|
33
|
+
|
|
34
|
+
class ExperimentalGraphsPlugin(
|
|
35
|
+
graphs_plugin.GraphsPlugin,
|
|
36
|
+
experimental_plugin.ExperimentalPlugin,
|
|
37
|
+
):
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ExperimentalDebuggerPluginLoader(
|
|
42
|
+
debugger_plugin_loader.DebuggerPluginLoader,
|
|
43
|
+
experimental_plugin.ExperimentalPlugin
|
|
44
|
+
):
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
Note: This class is itself an experimental mechanism and is subject to
|
|
48
|
+
modification or removal without warning.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
pass
|
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""TensorBoard HTTP utilities."""
|
|
16
|
+
|
|
17
|
+
import gzip
|
|
18
|
+
import io
|
|
19
|
+
import json
|
|
20
|
+
import re
|
|
21
|
+
import struct
|
|
22
|
+
import time
|
|
23
|
+
import wsgiref.handlers
|
|
24
|
+
|
|
25
|
+
import werkzeug
|
|
26
|
+
|
|
27
|
+
from tensorbored.backend import json_util
|
|
28
|
+
|
|
29
|
+
_DISALLOWED_CHAR_IN_DOMAIN = re.compile(r"\s")
|
|
30
|
+
|
|
31
|
+
# TODO(stephanwlee): Refactor this to not use the module variable but
|
|
32
|
+
# instead use a configurable via some kind of assets provider which would
|
|
33
|
+
# hold configurations for the CSP.
|
|
34
|
+
|
|
35
|
+
# @vaadin/vaadin-lumo-styles/font-icons(via vaadin-grid) uses data URI for
|
|
36
|
+
# loading font icons.
|
|
37
|
+
_CSP_FONT_DOMAINS_WHITELIST = ["data:"]
|
|
38
|
+
_CSP_FRAME_DOMAINS_WHITELIST = ["https://ui.perfetto.dev"]
|
|
39
|
+
_CSP_IMG_DOMAINS_WHITELIST = []
|
|
40
|
+
_CSP_SCRIPT_DOMAINS_WHITELIST = []
|
|
41
|
+
_CSP_CONNECT_DOMAINS_WHITELIST = []
|
|
42
|
+
_CSP_SCRIPT_SELF = True
|
|
43
|
+
# numericjs (via projector) uses unsafe-eval :(.
|
|
44
|
+
_CSP_SCRIPT_UNSAFE_EVAL = True
|
|
45
|
+
_CSP_STYLE_DOMAINS_WHITELIST = []
|
|
46
|
+
|
|
47
|
+
_EXTRACT_MIMETYPE_PATTERN = re.compile(r"^[^;\s]*")
|
|
48
|
+
_EXTRACT_CHARSET_PATTERN = re.compile(r"charset=([-_0-9A-Za-z]+)")
|
|
49
|
+
|
|
50
|
+
# Allows *, gzip or x-gzip, but forbid gzip;q=0
|
|
51
|
+
# https://tools.ietf.org/html/rfc7231#section-5.3.4
|
|
52
|
+
_ALLOWS_GZIP_PATTERN = re.compile(
|
|
53
|
+
r"(?:^|,|\s)(?:(?:x-)?gzip|\*)(?!;q=0)(?:\s|,|$)"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
_TEXTUAL_MIMETYPES = set(
|
|
57
|
+
[
|
|
58
|
+
"application/javascript",
|
|
59
|
+
"application/json",
|
|
60
|
+
"application/json+protobuf",
|
|
61
|
+
"image/svg+xml",
|
|
62
|
+
"text/css",
|
|
63
|
+
"text/csv",
|
|
64
|
+
"text/html",
|
|
65
|
+
"text/javascript",
|
|
66
|
+
"text/plain",
|
|
67
|
+
"text/tab-separated-values",
|
|
68
|
+
"text/x-protobuf",
|
|
69
|
+
]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
_JSON_MIMETYPES = set(
|
|
73
|
+
[
|
|
74
|
+
"application/json",
|
|
75
|
+
"application/json+protobuf",
|
|
76
|
+
]
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Do not support xhtml for now.
|
|
80
|
+
_HTML_MIMETYPE = "text/html"
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def Respond(
|
|
84
|
+
request,
|
|
85
|
+
content,
|
|
86
|
+
content_type,
|
|
87
|
+
code=200,
|
|
88
|
+
expires=0,
|
|
89
|
+
content_encoding=None,
|
|
90
|
+
encoding="utf-8",
|
|
91
|
+
csp_scripts_sha256s=None,
|
|
92
|
+
headers=None,
|
|
93
|
+
):
|
|
94
|
+
"""Construct a werkzeug Response.
|
|
95
|
+
|
|
96
|
+
Responses are transmitted to the browser with compression if: a) the browser
|
|
97
|
+
supports it; b) it's sane to compress the content_type in question; and c)
|
|
98
|
+
the content isn't already compressed, as indicated by the content_encoding
|
|
99
|
+
parameter.
|
|
100
|
+
|
|
101
|
+
Browser and proxy caching is completely disabled by default. If the expires
|
|
102
|
+
parameter is greater than zero then the response will be able to be cached by
|
|
103
|
+
the browser for that many seconds; however, proxies are still forbidden from
|
|
104
|
+
caching so that developers can bypass the cache with Ctrl+Shift+R.
|
|
105
|
+
|
|
106
|
+
For textual content that isn't JSON, the encoding parameter is used as the
|
|
107
|
+
transmission charset which is automatically appended to the Content-Type
|
|
108
|
+
header. That is unless of course the content_type parameter contains a
|
|
109
|
+
charset parameter. If the two disagree, the characters in content will be
|
|
110
|
+
transcoded to the latter.
|
|
111
|
+
|
|
112
|
+
If content_type declares a JSON media type, then content MAY be a dict, list,
|
|
113
|
+
tuple, or set, in which case this function has an implicit composition with
|
|
114
|
+
json_util.Cleanse and json.dumps. The encoding parameter is used to decode
|
|
115
|
+
byte strings within the JSON object; therefore transmitting binary data
|
|
116
|
+
within JSON is not permitted. JSON is transmitted as ASCII unless the
|
|
117
|
+
content_type parameter explicitly defines a charset parameter, in which case
|
|
118
|
+
the serialized JSON bytes will use that instead of escape sequences.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
request: A werkzeug Request object. Used mostly to check the
|
|
122
|
+
Accept-Encoding header.
|
|
123
|
+
content: Payload data as byte string, unicode string, or maybe JSON.
|
|
124
|
+
content_type: Media type and optionally an output charset.
|
|
125
|
+
code: Numeric HTTP status code to use.
|
|
126
|
+
expires: Second duration for browser caching.
|
|
127
|
+
content_encoding: Encoding if content is already encoded, e.g. 'gzip'.
|
|
128
|
+
encoding: Input charset if content parameter has byte strings.
|
|
129
|
+
csp_scripts_sha256s: List of base64 serialized sha256 of whitelisted script
|
|
130
|
+
elements for script-src of the Content-Security-Policy. If it is None, the
|
|
131
|
+
HTML will disallow any script to execute. It is only be used when the
|
|
132
|
+
content_type is text/html.
|
|
133
|
+
headers: Any additional headers to include on the response, as a
|
|
134
|
+
list of key-value tuples: e.g., `[("Allow", "GET")]`. In case of
|
|
135
|
+
conflict, these may be overridden with headers added by this function.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
A werkzeug Response object (a WSGI application).
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(0)
|
|
142
|
+
charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)
|
|
143
|
+
charset = charset_match.group(1) if charset_match else encoding
|
|
144
|
+
textual = charset_match or mimetype in _TEXTUAL_MIMETYPES
|
|
145
|
+
if mimetype in _JSON_MIMETYPES and isinstance(
|
|
146
|
+
content, (dict, list, set, tuple)
|
|
147
|
+
):
|
|
148
|
+
content = json.dumps(
|
|
149
|
+
json_util.Cleanse(content, encoding), ensure_ascii=not charset_match
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# Ensure correct output encoding, transcoding if necessary.
|
|
153
|
+
if charset != encoding and isinstance(content, bytes):
|
|
154
|
+
content = content.decode(encoding)
|
|
155
|
+
if isinstance(content, str):
|
|
156
|
+
content = content.encode(charset)
|
|
157
|
+
|
|
158
|
+
if textual and not charset_match and mimetype not in _JSON_MIMETYPES:
|
|
159
|
+
content_type += "; charset=" + charset
|
|
160
|
+
gzip_accepted = _ALLOWS_GZIP_PATTERN.search(
|
|
161
|
+
request.headers.get("Accept-Encoding", "")
|
|
162
|
+
)
|
|
163
|
+
# Automatically gzip uncompressed text data if accepted.
|
|
164
|
+
if textual and not content_encoding and gzip_accepted:
|
|
165
|
+
out = io.BytesIO()
|
|
166
|
+
# Set mtime to zero to make payload for a given input deterministic.
|
|
167
|
+
with gzip.GzipFile(
|
|
168
|
+
fileobj=out, mode="wb", compresslevel=3, mtime=0
|
|
169
|
+
) as f:
|
|
170
|
+
f.write(content)
|
|
171
|
+
content = out.getvalue()
|
|
172
|
+
content_encoding = "gzip"
|
|
173
|
+
|
|
174
|
+
content_length = len(content)
|
|
175
|
+
direct_passthrough = False
|
|
176
|
+
# Automatically streamwise-gunzip precompressed data if not accepted.
|
|
177
|
+
if content_encoding == "gzip" and not gzip_accepted:
|
|
178
|
+
gzip_file = gzip.GzipFile(fileobj=io.BytesIO(content), mode="rb")
|
|
179
|
+
# Last 4 bytes of gzip formatted data (little-endian) store the original
|
|
180
|
+
# content length mod 2^32; we just assume it's the content length. That
|
|
181
|
+
# means we can't streamwise-gunzip >4 GB precompressed file; this is ok.
|
|
182
|
+
content_length = struct.unpack("<I", content[-4:])[0]
|
|
183
|
+
content = werkzeug.wsgi.wrap_file(request.environ, gzip_file)
|
|
184
|
+
content_encoding = None
|
|
185
|
+
direct_passthrough = True
|
|
186
|
+
|
|
187
|
+
headers = list(headers or [])
|
|
188
|
+
headers.append(("Content-Length", str(content_length)))
|
|
189
|
+
headers.append(("X-Content-Type-Options", "nosniff"))
|
|
190
|
+
if content_encoding:
|
|
191
|
+
headers.append(("Content-Encoding", content_encoding))
|
|
192
|
+
if expires > 0:
|
|
193
|
+
e = wsgiref.handlers.format_date_time(time.time() + float(expires))
|
|
194
|
+
headers.append(("Expires", e))
|
|
195
|
+
headers.append(("Cache-Control", "private, max-age=%d" % expires))
|
|
196
|
+
else:
|
|
197
|
+
headers.append(("Expires", "0"))
|
|
198
|
+
headers.append(("Cache-Control", "no-cache, must-revalidate"))
|
|
199
|
+
if mimetype == _HTML_MIMETYPE:
|
|
200
|
+
frags = (
|
|
201
|
+
_CSP_SCRIPT_DOMAINS_WHITELIST
|
|
202
|
+
+ [
|
|
203
|
+
"'self'" if _CSP_SCRIPT_SELF else "",
|
|
204
|
+
"'unsafe-eval'" if _CSP_SCRIPT_UNSAFE_EVAL else "",
|
|
205
|
+
]
|
|
206
|
+
+ [
|
|
207
|
+
"'sha256-{}'".format(sha256)
|
|
208
|
+
for sha256 in (csp_scripts_sha256s or [])
|
|
209
|
+
]
|
|
210
|
+
)
|
|
211
|
+
script_srcs = _create_csp_string(*frags)
|
|
212
|
+
|
|
213
|
+
csp_string = ";".join(
|
|
214
|
+
[
|
|
215
|
+
"default-src 'self'",
|
|
216
|
+
"font-src %s"
|
|
217
|
+
% _create_csp_string("'self'", *_CSP_FONT_DOMAINS_WHITELIST),
|
|
218
|
+
# Dynamic plugins are rendered inside an iframe.
|
|
219
|
+
"frame-src %s"
|
|
220
|
+
% _create_csp_string("'self'", *_CSP_FRAME_DOMAINS_WHITELIST),
|
|
221
|
+
"img-src %s"
|
|
222
|
+
% _create_csp_string(
|
|
223
|
+
"'self'",
|
|
224
|
+
# used by favicon
|
|
225
|
+
"data:",
|
|
226
|
+
# used by What-If tool for image sprites.
|
|
227
|
+
"blob:",
|
|
228
|
+
*_CSP_IMG_DOMAINS_WHITELIST,
|
|
229
|
+
),
|
|
230
|
+
"object-src 'none'",
|
|
231
|
+
"style-src %s"
|
|
232
|
+
% _create_csp_string(
|
|
233
|
+
"'self'",
|
|
234
|
+
# used by google-chart
|
|
235
|
+
"https://www.gstatic.com",
|
|
236
|
+
"data:",
|
|
237
|
+
# inline styles: Polymer templates + d3 uses inline styles.
|
|
238
|
+
"'unsafe-inline'",
|
|
239
|
+
*_CSP_STYLE_DOMAINS_WHITELIST,
|
|
240
|
+
),
|
|
241
|
+
"connect-src %s"
|
|
242
|
+
% _create_csp_string("'self'", *_CSP_CONNECT_DOMAINS_WHITELIST),
|
|
243
|
+
"script-src %s" % script_srcs,
|
|
244
|
+
]
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
headers.append(("Content-Security-Policy", csp_string))
|
|
248
|
+
|
|
249
|
+
if request.method == "HEAD":
|
|
250
|
+
content = None
|
|
251
|
+
|
|
252
|
+
return werkzeug.wrappers.Response(
|
|
253
|
+
response=content,
|
|
254
|
+
status=code,
|
|
255
|
+
headers=headers,
|
|
256
|
+
content_type=content_type,
|
|
257
|
+
direct_passthrough=direct_passthrough,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _create_csp_string(*csp_fragments):
|
|
262
|
+
csp_string = " ".join([frag for frag in csp_fragments if frag])
|
|
263
|
+
return csp_string if csp_string else "'none'"
|