tensorbored 2.21.0rc1769983804__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tensorbored/__init__.py +112 -0
- tensorbored/_vendor/__init__.py +0 -0
- tensorbored/_vendor/bleach/__init__.py +125 -0
- tensorbored/_vendor/bleach/_vendor/__init__.py +0 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/__init__.py +35 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_ihatexml.py +289 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_inputstream.py +918 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_tokenizer.py +1735 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/__init__.py +5 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/_base.py +40 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_trie/py.py +67 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/_utils.py +159 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/constants.py +2946 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/__init__.py +0 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/alphabeticalattributes.py +29 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/base.py +12 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/inject_meta_charset.py +73 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/lint.py +93 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/optionaltags.py +207 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/sanitizer.py +916 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/filters/whitespace.py +38 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/html5parser.py +2795 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/serializer.py +409 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/__init__.py +30 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/genshi.py +54 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treeadapters/sax.py +50 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/__init__.py +88 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/base.py +417 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/dom.py +239 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/etree.py +343 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treebuilders/etree_lxml.py +392 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/__init__.py +154 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/base.py +252 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/dom.py +43 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/etree.py +131 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/etree_lxml.py +215 -0
- tensorbored/_vendor/bleach/_vendor/html5lib/treewalkers/genshi.py +69 -0
- tensorbored/_vendor/bleach/_vendor/parse.py +1078 -0
- tensorbored/_vendor/bleach/callbacks.py +32 -0
- tensorbored/_vendor/bleach/html5lib_shim.py +757 -0
- tensorbored/_vendor/bleach/linkifier.py +633 -0
- tensorbored/_vendor/bleach/parse_shim.py +1 -0
- tensorbored/_vendor/bleach/sanitizer.py +638 -0
- tensorbored/_vendor/bleach/six_shim.py +19 -0
- tensorbored/_vendor/webencodings/__init__.py +342 -0
- tensorbored/_vendor/webencodings/labels.py +231 -0
- tensorbored/_vendor/webencodings/mklabels.py +59 -0
- tensorbored/_vendor/webencodings/x_user_defined.py +325 -0
- tensorbored/assets.py +36 -0
- tensorbored/auth.py +102 -0
- tensorbored/backend/__init__.py +0 -0
- tensorbored/backend/application.py +604 -0
- tensorbored/backend/auth_context_middleware.py +38 -0
- tensorbored/backend/client_feature_flags.py +113 -0
- tensorbored/backend/empty_path_redirect.py +46 -0
- tensorbored/backend/event_processing/__init__.py +0 -0
- tensorbored/backend/event_processing/data_ingester.py +276 -0
- tensorbored/backend/event_processing/data_provider.py +535 -0
- tensorbored/backend/event_processing/directory_loader.py +142 -0
- tensorbored/backend/event_processing/directory_watcher.py +272 -0
- tensorbored/backend/event_processing/event_accumulator.py +950 -0
- tensorbored/backend/event_processing/event_file_inspector.py +463 -0
- tensorbored/backend/event_processing/event_file_loader.py +292 -0
- tensorbored/backend/event_processing/event_multiplexer.py +521 -0
- tensorbored/backend/event_processing/event_util.py +68 -0
- tensorbored/backend/event_processing/io_wrapper.py +223 -0
- tensorbored/backend/event_processing/plugin_asset_util.py +104 -0
- tensorbored/backend/event_processing/plugin_event_accumulator.py +721 -0
- tensorbored/backend/event_processing/plugin_event_multiplexer.py +522 -0
- tensorbored/backend/event_processing/reservoir.py +266 -0
- tensorbored/backend/event_processing/tag_types.py +29 -0
- tensorbored/backend/experiment_id.py +71 -0
- tensorbored/backend/experimental_plugin.py +51 -0
- tensorbored/backend/http_util.py +263 -0
- tensorbored/backend/json_util.py +70 -0
- tensorbored/backend/path_prefix.py +67 -0
- tensorbored/backend/process_graph.py +74 -0
- tensorbored/backend/security_validator.py +202 -0
- tensorbored/compat/__init__.py +69 -0
- tensorbored/compat/proto/__init__.py +0 -0
- tensorbored/compat/proto/allocation_description_pb2.py +35 -0
- tensorbored/compat/proto/api_def_pb2.py +82 -0
- tensorbored/compat/proto/attr_value_pb2.py +80 -0
- tensorbored/compat/proto/cluster_pb2.py +58 -0
- tensorbored/compat/proto/config_pb2.py +271 -0
- tensorbored/compat/proto/coordination_config_pb2.py +45 -0
- tensorbored/compat/proto/cost_graph_pb2.py +87 -0
- tensorbored/compat/proto/cpp_shape_inference_pb2.py +70 -0
- tensorbored/compat/proto/debug_pb2.py +65 -0
- tensorbored/compat/proto/event_pb2.py +149 -0
- tensorbored/compat/proto/full_type_pb2.py +74 -0
- tensorbored/compat/proto/function_pb2.py +157 -0
- tensorbored/compat/proto/graph_debug_info_pb2.py +111 -0
- tensorbored/compat/proto/graph_pb2.py +41 -0
- tensorbored/compat/proto/histogram_pb2.py +39 -0
- tensorbored/compat/proto/meta_graph_pb2.py +254 -0
- tensorbored/compat/proto/node_def_pb2.py +61 -0
- tensorbored/compat/proto/op_def_pb2.py +81 -0
- tensorbored/compat/proto/resource_handle_pb2.py +48 -0
- tensorbored/compat/proto/rewriter_config_pb2.py +93 -0
- tensorbored/compat/proto/rpc_options_pb2.py +35 -0
- tensorbored/compat/proto/saved_object_graph_pb2.py +193 -0
- tensorbored/compat/proto/saver_pb2.py +38 -0
- tensorbored/compat/proto/step_stats_pb2.py +116 -0
- tensorbored/compat/proto/struct_pb2.py +144 -0
- tensorbored/compat/proto/summary_pb2.py +111 -0
- tensorbored/compat/proto/tensor_description_pb2.py +38 -0
- tensorbored/compat/proto/tensor_pb2.py +68 -0
- tensorbored/compat/proto/tensor_shape_pb2.py +46 -0
- tensorbored/compat/proto/tfprof_log_pb2.py +307 -0
- tensorbored/compat/proto/trackable_object_graph_pb2.py +90 -0
- tensorbored/compat/proto/types_pb2.py +105 -0
- tensorbored/compat/proto/variable_pb2.py +62 -0
- tensorbored/compat/proto/verifier_config_pb2.py +38 -0
- tensorbored/compat/proto/versions_pb2.py +35 -0
- tensorbored/compat/tensorflow_stub/__init__.py +38 -0
- tensorbored/compat/tensorflow_stub/app.py +124 -0
- tensorbored/compat/tensorflow_stub/compat/__init__.py +131 -0
- tensorbored/compat/tensorflow_stub/compat/v1/__init__.py +20 -0
- tensorbored/compat/tensorflow_stub/dtypes.py +692 -0
- tensorbored/compat/tensorflow_stub/error_codes.py +169 -0
- tensorbored/compat/tensorflow_stub/errors.py +507 -0
- tensorbored/compat/tensorflow_stub/flags.py +124 -0
- tensorbored/compat/tensorflow_stub/io/__init__.py +17 -0
- tensorbored/compat/tensorflow_stub/io/gfile.py +1011 -0
- tensorbored/compat/tensorflow_stub/pywrap_tensorflow.py +285 -0
- tensorbored/compat/tensorflow_stub/tensor_shape.py +1035 -0
- tensorbored/context.py +129 -0
- tensorbored/data/__init__.py +0 -0
- tensorbored/data/grpc_provider.py +365 -0
- tensorbored/data/ingester.py +46 -0
- tensorbored/data/proto/__init__.py +0 -0
- tensorbored/data/proto/data_provider_pb2.py +517 -0
- tensorbored/data/proto/data_provider_pb2_grpc.py +374 -0
- tensorbored/data/provider.py +1365 -0
- tensorbored/data/server_ingester.py +301 -0
- tensorbored/data_compat.py +159 -0
- tensorbored/dataclass_compat.py +224 -0
- tensorbored/default.py +124 -0
- tensorbored/errors.py +130 -0
- tensorbored/lazy.py +99 -0
- tensorbored/main.py +48 -0
- tensorbored/main_lib.py +62 -0
- tensorbored/manager.py +487 -0
- tensorbored/notebook.py +441 -0
- tensorbored/plugin_util.py +266 -0
- tensorbored/plugins/__init__.py +0 -0
- tensorbored/plugins/audio/__init__.py +0 -0
- tensorbored/plugins/audio/audio_plugin.py +229 -0
- tensorbored/plugins/audio/metadata.py +69 -0
- tensorbored/plugins/audio/plugin_data_pb2.py +37 -0
- tensorbored/plugins/audio/summary.py +230 -0
- tensorbored/plugins/audio/summary_v2.py +124 -0
- tensorbored/plugins/base_plugin.py +367 -0
- tensorbored/plugins/core/__init__.py +0 -0
- tensorbored/plugins/core/core_plugin.py +981 -0
- tensorbored/plugins/custom_scalar/__init__.py +0 -0
- tensorbored/plugins/custom_scalar/custom_scalars_plugin.py +320 -0
- tensorbored/plugins/custom_scalar/layout_pb2.py +85 -0
- tensorbored/plugins/custom_scalar/metadata.py +35 -0
- tensorbored/plugins/custom_scalar/summary.py +79 -0
- tensorbored/plugins/debugger_v2/__init__.py +0 -0
- tensorbored/plugins/debugger_v2/debug_data_multiplexer.py +631 -0
- tensorbored/plugins/debugger_v2/debug_data_provider.py +634 -0
- tensorbored/plugins/debugger_v2/debugger_v2_plugin.py +504 -0
- tensorbored/plugins/distribution/__init__.py +0 -0
- tensorbored/plugins/distribution/compressor.py +158 -0
- tensorbored/plugins/distribution/distributions_plugin.py +116 -0
- tensorbored/plugins/distribution/metadata.py +19 -0
- tensorbored/plugins/graph/__init__.py +0 -0
- tensorbored/plugins/graph/graph_util.py +129 -0
- tensorbored/plugins/graph/graphs_plugin.py +336 -0
- tensorbored/plugins/graph/keras_util.py +328 -0
- tensorbored/plugins/graph/metadata.py +42 -0
- tensorbored/plugins/histogram/__init__.py +0 -0
- tensorbored/plugins/histogram/histograms_plugin.py +144 -0
- tensorbored/plugins/histogram/metadata.py +63 -0
- tensorbored/plugins/histogram/plugin_data_pb2.py +34 -0
- tensorbored/plugins/histogram/summary.py +234 -0
- tensorbored/plugins/histogram/summary_v2.py +292 -0
- tensorbored/plugins/hparams/__init__.py +14 -0
- tensorbored/plugins/hparams/_keras.py +93 -0
- tensorbored/plugins/hparams/api.py +130 -0
- tensorbored/plugins/hparams/api_pb2.py +208 -0
- tensorbored/plugins/hparams/backend_context.py +606 -0
- tensorbored/plugins/hparams/download_data.py +158 -0
- tensorbored/plugins/hparams/error.py +26 -0
- tensorbored/plugins/hparams/get_experiment.py +71 -0
- tensorbored/plugins/hparams/hparams_plugin.py +206 -0
- tensorbored/plugins/hparams/hparams_util_pb2.py +69 -0
- tensorbored/plugins/hparams/json_format_compat.py +38 -0
- tensorbored/plugins/hparams/list_metric_evals.py +57 -0
- tensorbored/plugins/hparams/list_session_groups.py +1040 -0
- tensorbored/plugins/hparams/metadata.py +125 -0
- tensorbored/plugins/hparams/metrics.py +41 -0
- tensorbored/plugins/hparams/plugin_data_pb2.py +69 -0
- tensorbored/plugins/hparams/summary.py +205 -0
- tensorbored/plugins/hparams/summary_v2.py +597 -0
- tensorbored/plugins/image/__init__.py +0 -0
- tensorbored/plugins/image/images_plugin.py +232 -0
- tensorbored/plugins/image/metadata.py +65 -0
- tensorbored/plugins/image/plugin_data_pb2.py +34 -0
- tensorbored/plugins/image/summary.py +159 -0
- tensorbored/plugins/image/summary_v2.py +130 -0
- tensorbored/plugins/mesh/__init__.py +14 -0
- tensorbored/plugins/mesh/mesh_plugin.py +292 -0
- tensorbored/plugins/mesh/metadata.py +152 -0
- tensorbored/plugins/mesh/plugin_data_pb2.py +37 -0
- tensorbored/plugins/mesh/summary.py +251 -0
- tensorbored/plugins/mesh/summary_v2.py +214 -0
- tensorbored/plugins/metrics/__init__.py +0 -0
- tensorbored/plugins/metrics/metadata.py +17 -0
- tensorbored/plugins/metrics/metrics_plugin.py +623 -0
- tensorbored/plugins/pr_curve/__init__.py +0 -0
- tensorbored/plugins/pr_curve/metadata.py +75 -0
- tensorbored/plugins/pr_curve/plugin_data_pb2.py +34 -0
- tensorbored/plugins/pr_curve/pr_curves_plugin.py +241 -0
- tensorbored/plugins/pr_curve/summary.py +574 -0
- tensorbored/plugins/profile_redirect/__init__.py +0 -0
- tensorbored/plugins/profile_redirect/profile_redirect_plugin.py +49 -0
- tensorbored/plugins/projector/__init__.py +67 -0
- tensorbored/plugins/projector/metadata.py +26 -0
- tensorbored/plugins/projector/projector_config_pb2.py +54 -0
- tensorbored/plugins/projector/projector_plugin.py +795 -0
- tensorbored/plugins/projector/tf_projector_plugin/index.js +32 -0
- tensorbored/plugins/projector/tf_projector_plugin/projector_binary.html +524 -0
- tensorbored/plugins/projector/tf_projector_plugin/projector_binary.js +15536 -0
- tensorbored/plugins/scalar/__init__.py +0 -0
- tensorbored/plugins/scalar/metadata.py +60 -0
- tensorbored/plugins/scalar/plugin_data_pb2.py +34 -0
- tensorbored/plugins/scalar/scalars_plugin.py +181 -0
- tensorbored/plugins/scalar/summary.py +109 -0
- tensorbored/plugins/scalar/summary_v2.py +124 -0
- tensorbored/plugins/text/__init__.py +0 -0
- tensorbored/plugins/text/metadata.py +62 -0
- tensorbored/plugins/text/plugin_data_pb2.py +34 -0
- tensorbored/plugins/text/summary.py +114 -0
- tensorbored/plugins/text/summary_v2.py +124 -0
- tensorbored/plugins/text/text_plugin.py +288 -0
- tensorbored/plugins/wit_redirect/__init__.py +0 -0
- tensorbored/plugins/wit_redirect/wit_redirect_plugin.py +49 -0
- tensorbored/program.py +910 -0
- tensorbored/summary/__init__.py +35 -0
- tensorbored/summary/_output.py +124 -0
- tensorbored/summary/_tf/__init__.py +14 -0
- tensorbored/summary/_tf/summary/__init__.py +178 -0
- tensorbored/summary/_writer.py +105 -0
- tensorbored/summary/v1.py +51 -0
- tensorbored/summary/v2.py +25 -0
- tensorbored/summary/writer/__init__.py +13 -0
- tensorbored/summary/writer/event_file_writer.py +291 -0
- tensorbored/summary/writer/record_writer.py +50 -0
- tensorbored/util/__init__.py +0 -0
- tensorbored/util/encoder.py +116 -0
- tensorbored/util/grpc_util.py +311 -0
- tensorbored/util/img_mime_type_detector.py +40 -0
- tensorbored/util/io_util.py +20 -0
- tensorbored/util/lazy_tensor_creator.py +110 -0
- tensorbored/util/op_evaluator.py +104 -0
- tensorbored/util/platform_util.py +20 -0
- tensorbored/util/tb_logging.py +24 -0
- tensorbored/util/tensor_util.py +617 -0
- tensorbored/util/timing.py +122 -0
- tensorbored/version.py +21 -0
- tensorbored/webfiles.zip +0 -0
- tensorbored-2.21.0rc1769983804.dist-info/METADATA +49 -0
- tensorbored-2.21.0rc1769983804.dist-info/RECORD +271 -0
- tensorbored-2.21.0rc1769983804.dist-info/WHEEL +5 -0
- tensorbored-2.21.0rc1769983804.dist-info/entry_points.txt +6 -0
- tensorbored-2.21.0rc1769983804.dist-info/licenses/LICENSE +739 -0
- tensorbored-2.21.0rc1769983804.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,795 @@
|
|
|
1
|
+
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
"""The Embedding Projector plugin."""
|
|
16
|
+
|
|
17
|
+
import collections
|
|
18
|
+
import functools
|
|
19
|
+
import mimetypes
|
|
20
|
+
import os
|
|
21
|
+
import threading
|
|
22
|
+
|
|
23
|
+
import numpy as np
|
|
24
|
+
from werkzeug import wrappers
|
|
25
|
+
|
|
26
|
+
from google.protobuf import json_format
|
|
27
|
+
from google.protobuf import text_format
|
|
28
|
+
|
|
29
|
+
from tensorbored import context
|
|
30
|
+
from tensorbored.backend.event_processing import plugin_asset_util
|
|
31
|
+
from tensorbored.backend.http_util import Respond
|
|
32
|
+
from tensorbored.compat import tf
|
|
33
|
+
from tensorbored.plugins import base_plugin
|
|
34
|
+
from tensorbored.plugins.projector import metadata
|
|
35
|
+
from tensorbored.plugins.projector.projector_config_pb2 import ProjectorConfig
|
|
36
|
+
from tensorbored.util import img_mime_type_detector, tb_logging
|
|
37
|
+
|
|
38
|
+
logger = tb_logging.get_logger()
|
|
39
|
+
|
|
40
|
+
# Number of tensors in the LRU cache.
|
|
41
|
+
_TENSOR_CACHE_CAPACITY = 1
|
|
42
|
+
|
|
43
|
+
# HTTP routes.
|
|
44
|
+
CONFIG_ROUTE = "/info"
|
|
45
|
+
TENSOR_ROUTE = "/tensor"
|
|
46
|
+
METADATA_ROUTE = "/metadata"
|
|
47
|
+
RUNS_ROUTE = "/runs"
|
|
48
|
+
BOOKMARKS_ROUTE = "/bookmarks"
|
|
49
|
+
SPRITE_IMAGE_ROUTE = "/sprite_image"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class LRUCache:
|
|
53
|
+
"""LRU cache.
|
|
54
|
+
|
|
55
|
+
Used for storing the last used tensor.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self, size):
|
|
59
|
+
if size < 1:
|
|
60
|
+
raise ValueError("The cache size must be >=1")
|
|
61
|
+
self._size = size
|
|
62
|
+
self._dict = collections.OrderedDict()
|
|
63
|
+
|
|
64
|
+
def get(self, key):
|
|
65
|
+
try:
|
|
66
|
+
value = self._dict.pop(key)
|
|
67
|
+
self._dict[key] = value
|
|
68
|
+
return value
|
|
69
|
+
except KeyError:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
def set(self, key, value):
|
|
73
|
+
if value is None:
|
|
74
|
+
raise ValueError("value must be != None")
|
|
75
|
+
try:
|
|
76
|
+
self._dict.pop(key)
|
|
77
|
+
except KeyError:
|
|
78
|
+
if len(self._dict) >= self._size:
|
|
79
|
+
self._dict.popitem(last=False)
|
|
80
|
+
self._dict[key] = value
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class EmbeddingMetadata:
|
|
84
|
+
"""Metadata container for an embedding.
|
|
85
|
+
|
|
86
|
+
The metadata holds different columns with values used for
|
|
87
|
+
visualization (color by, label by) in the "Embeddings" tab in
|
|
88
|
+
TensorBoard.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
def __init__(self, num_points):
|
|
92
|
+
"""Constructs a metadata for an embedding of the specified size.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
num_points: Number of points in the embedding.
|
|
96
|
+
"""
|
|
97
|
+
self.num_points = num_points
|
|
98
|
+
self.column_names = []
|
|
99
|
+
self.name_to_values = {}
|
|
100
|
+
|
|
101
|
+
def add_column(self, column_name, column_values):
|
|
102
|
+
"""Adds a named column of metadata values.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
column_name: Name of the column.
|
|
106
|
+
column_values: 1D array/list/iterable holding the column values. Must be
|
|
107
|
+
of length `num_points`. The i-th value corresponds to the i-th point.
|
|
108
|
+
|
|
109
|
+
Raises:
|
|
110
|
+
ValueError: If `column_values` is not 1D array, or of length `num_points`,
|
|
111
|
+
or the `name` is already used.
|
|
112
|
+
"""
|
|
113
|
+
# Sanity checks.
|
|
114
|
+
if isinstance(column_values, list) and isinstance(
|
|
115
|
+
column_values[0], list
|
|
116
|
+
):
|
|
117
|
+
raise ValueError(
|
|
118
|
+
'"column_values" must be a flat list, but we detected '
|
|
119
|
+
"that its first entry is a list"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if isinstance(column_values, np.ndarray) and column_values.ndim != 1:
|
|
123
|
+
raise ValueError(
|
|
124
|
+
'"column_values" should be of rank 1, '
|
|
125
|
+
"but is of rank %d" % column_values.ndim
|
|
126
|
+
)
|
|
127
|
+
if len(column_values) != self.num_points:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
'"column_values" should be of length %d, but is of '
|
|
130
|
+
"length %d" % (self.num_points, len(column_values))
|
|
131
|
+
)
|
|
132
|
+
if column_name in self.name_to_values:
|
|
133
|
+
raise ValueError(
|
|
134
|
+
'The column name "%s" is already used' % column_name
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
self.column_names.append(column_name)
|
|
138
|
+
self.name_to_values[column_name] = column_values
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _read_tensor_tsv_file(fpath):
|
|
142
|
+
with tf.io.gfile.GFile(fpath, "r") as f:
|
|
143
|
+
tensor = []
|
|
144
|
+
for line in f:
|
|
145
|
+
line = line.rstrip("\n")
|
|
146
|
+
if line:
|
|
147
|
+
tensor.append(list(map(float, line.split("\t"))))
|
|
148
|
+
return np.array(tensor, dtype="float32")
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _read_tensor_binary_file(fpath, shape):
|
|
152
|
+
if len(shape) != 2:
|
|
153
|
+
raise ValueError("Tensor must be 2D, got shape {}".format(shape))
|
|
154
|
+
tensor = np.fromfile(fpath, dtype="float32")
|
|
155
|
+
return tensor.reshape(shape)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _assets_dir_to_logdir(assets_dir):
|
|
159
|
+
sub_path = os.path.sep + metadata.PLUGINS_DIR + os.path.sep
|
|
160
|
+
if sub_path in assets_dir:
|
|
161
|
+
two_parents_up = os.pardir + os.path.sep + os.pardir
|
|
162
|
+
return os.path.abspath(os.path.join(assets_dir, two_parents_up))
|
|
163
|
+
return assets_dir
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _latest_checkpoints_changed(configs, run_path_pairs):
|
|
167
|
+
"""Returns true if the latest checkpoint has changed in any of the runs."""
|
|
168
|
+
for run_name, assets_dir in run_path_pairs:
|
|
169
|
+
if run_name not in configs:
|
|
170
|
+
config = ProjectorConfig()
|
|
171
|
+
config_fpath = os.path.join(assets_dir, metadata.PROJECTOR_FILENAME)
|
|
172
|
+
if tf.io.gfile.exists(config_fpath):
|
|
173
|
+
with tf.io.gfile.GFile(config_fpath, "r") as f:
|
|
174
|
+
file_content = f.read()
|
|
175
|
+
text_format.Parse(file_content, config)
|
|
176
|
+
else:
|
|
177
|
+
config = configs[run_name]
|
|
178
|
+
|
|
179
|
+
# See if you can find a checkpoint file in the logdir.
|
|
180
|
+
logdir = _assets_dir_to_logdir(assets_dir)
|
|
181
|
+
ckpt_path = _find_latest_checkpoint(logdir)
|
|
182
|
+
if not ckpt_path:
|
|
183
|
+
continue
|
|
184
|
+
if config.model_checkpoint_path != ckpt_path:
|
|
185
|
+
return True
|
|
186
|
+
return False
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _parse_positive_int_param(request, param_name):
|
|
190
|
+
"""Parses and asserts a positive (>0) integer query parameter.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
request: The Werkzeug Request object
|
|
194
|
+
param_name: Name of the parameter.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Param, or None, or -1 if parameter is not a positive integer.
|
|
198
|
+
"""
|
|
199
|
+
param = request.args.get(param_name)
|
|
200
|
+
if not param:
|
|
201
|
+
return None
|
|
202
|
+
try:
|
|
203
|
+
param = int(param)
|
|
204
|
+
if param <= 0:
|
|
205
|
+
raise ValueError()
|
|
206
|
+
return param
|
|
207
|
+
except ValueError:
|
|
208
|
+
return -1
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _rel_to_abs_asset_path(fpath, config_fpath):
|
|
212
|
+
fpath = os.path.expanduser(fpath)
|
|
213
|
+
if not os.path.isabs(fpath):
|
|
214
|
+
return os.path.join(os.path.dirname(config_fpath), fpath)
|
|
215
|
+
return fpath
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _using_tf():
|
|
219
|
+
"""Return true if we're not using the fake TF API stub implementation."""
|
|
220
|
+
return tf.__version__ != "stub"
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
class ProjectorPlugin(base_plugin.TBPlugin):
|
|
224
|
+
"""Embedding projector."""
|
|
225
|
+
|
|
226
|
+
plugin_name = metadata.PLUGIN_NAME
|
|
227
|
+
|
|
228
|
+
def __init__(self, context):
|
|
229
|
+
"""Instantiates ProjectorPlugin via TensorBoard core.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
context: A base_plugin.TBContext instance.
|
|
233
|
+
"""
|
|
234
|
+
self.data_provider = context.data_provider
|
|
235
|
+
self.logdir = context.logdir
|
|
236
|
+
self.readers = {}
|
|
237
|
+
self._run_paths = None
|
|
238
|
+
self._configs = {}
|
|
239
|
+
self.config_fpaths = None
|
|
240
|
+
self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)
|
|
241
|
+
|
|
242
|
+
# Whether the plugin is active (has meaningful data to process and serve).
|
|
243
|
+
# Once the plugin is deemed active, we no longer re-compute the value
|
|
244
|
+
# because doing so is potentially expensive.
|
|
245
|
+
self._is_active = False
|
|
246
|
+
|
|
247
|
+
# The running thread that is currently determining whether the plugin is
|
|
248
|
+
# active. If such a thread exists, do not start a duplicate thread.
|
|
249
|
+
self._thread_for_determining_is_active = None
|
|
250
|
+
|
|
251
|
+
def get_plugin_apps(self):
|
|
252
|
+
asset_prefix = "tf_projector_plugin"
|
|
253
|
+
return {
|
|
254
|
+
RUNS_ROUTE: self._serve_runs,
|
|
255
|
+
CONFIG_ROUTE: self._serve_config,
|
|
256
|
+
TENSOR_ROUTE: self._serve_tensor,
|
|
257
|
+
METADATA_ROUTE: self._serve_metadata,
|
|
258
|
+
BOOKMARKS_ROUTE: self._serve_bookmarks,
|
|
259
|
+
SPRITE_IMAGE_ROUTE: self._serve_sprite_image,
|
|
260
|
+
"/index.js": functools.partial(
|
|
261
|
+
self._serve_file,
|
|
262
|
+
os.path.join(asset_prefix, "index.js"),
|
|
263
|
+
),
|
|
264
|
+
"/projector_binary.html": functools.partial(
|
|
265
|
+
self._serve_file,
|
|
266
|
+
os.path.join(asset_prefix, "projector_binary.html"),
|
|
267
|
+
),
|
|
268
|
+
"/projector_binary.js": functools.partial(
|
|
269
|
+
self._serve_file,
|
|
270
|
+
os.path.join(asset_prefix, "projector_binary.js"),
|
|
271
|
+
),
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
def is_active(self):
|
|
275
|
+
"""Determines whether this plugin is active.
|
|
276
|
+
|
|
277
|
+
This plugin is only active if any run has an embedding, and only
|
|
278
|
+
when running against a local log directory.
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
Whether any run has embedding data to show in the projector.
|
|
282
|
+
"""
|
|
283
|
+
if not self.data_provider or not self.logdir:
|
|
284
|
+
return False
|
|
285
|
+
|
|
286
|
+
if self._is_active:
|
|
287
|
+
# We have already determined that the projector plugin should be active.
|
|
288
|
+
# Do not re-compute that. We have no reason to later set this plugin to be
|
|
289
|
+
# inactive.
|
|
290
|
+
return True
|
|
291
|
+
|
|
292
|
+
if self._thread_for_determining_is_active:
|
|
293
|
+
# We are currently determining whether the plugin is active. Do not start
|
|
294
|
+
# a separate thread.
|
|
295
|
+
return self._is_active
|
|
296
|
+
|
|
297
|
+
# The plugin is currently not active. The frontend might check again later.
|
|
298
|
+
# For now, spin off a separate thread to determine whether the plugin is
|
|
299
|
+
# active.
|
|
300
|
+
new_thread = threading.Thread(
|
|
301
|
+
target=self._determine_is_active,
|
|
302
|
+
name="ProjectorPluginIsActiveThread",
|
|
303
|
+
)
|
|
304
|
+
self._thread_for_determining_is_active = new_thread
|
|
305
|
+
new_thread.start()
|
|
306
|
+
return False
|
|
307
|
+
|
|
308
|
+
def frontend_metadata(self):
|
|
309
|
+
return base_plugin.FrontendMetadata(
|
|
310
|
+
es_module_path="/index.js",
|
|
311
|
+
disable_reload=True,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
def _determine_is_active(self):
|
|
315
|
+
"""Determines whether the plugin is active.
|
|
316
|
+
|
|
317
|
+
This method is run in a separate thread so that the plugin can
|
|
318
|
+
offer an immediate response to whether it is active and
|
|
319
|
+
determine whether it should be active in a separate thread.
|
|
320
|
+
"""
|
|
321
|
+
self._update_configs()
|
|
322
|
+
if self._configs:
|
|
323
|
+
self._is_active = True
|
|
324
|
+
self._thread_for_determining_is_active = None
|
|
325
|
+
|
|
326
|
+
def _update_configs(self):
|
|
327
|
+
"""Updates `self._configs` and `self._run_paths`."""
|
|
328
|
+
if self.data_provider and self.logdir:
|
|
329
|
+
# Create a background context; we may not be in a request.
|
|
330
|
+
ctx = context.RequestContext()
|
|
331
|
+
run_paths = {
|
|
332
|
+
run.run_name: os.path.join(self.logdir, run.run_name)
|
|
333
|
+
for run in self.data_provider.list_runs(ctx, experiment_id="")
|
|
334
|
+
}
|
|
335
|
+
else:
|
|
336
|
+
run_paths = {}
|
|
337
|
+
run_paths_changed = run_paths != self._run_paths
|
|
338
|
+
self._run_paths = run_paths
|
|
339
|
+
|
|
340
|
+
run_path_pairs = list(self._run_paths.items())
|
|
341
|
+
self._append_plugin_asset_directories(run_path_pairs)
|
|
342
|
+
# Also accept the root logdir as a model checkpoint directory,
|
|
343
|
+
# so that the projector still works when there are no runs.
|
|
344
|
+
# (Case on `run` rather than `path` to avoid issues with
|
|
345
|
+
# absolute/relative paths on any filesystems.)
|
|
346
|
+
if "." not in self._run_paths:
|
|
347
|
+
run_path_pairs.append((".", self.logdir))
|
|
348
|
+
if run_paths_changed or _latest_checkpoints_changed(
|
|
349
|
+
self._configs, run_path_pairs
|
|
350
|
+
):
|
|
351
|
+
self.readers = {}
|
|
352
|
+
self._configs, self.config_fpaths = self._read_latest_config_files(
|
|
353
|
+
run_path_pairs
|
|
354
|
+
)
|
|
355
|
+
self._augment_configs_with_checkpoint_info()
|
|
356
|
+
|
|
357
|
+
def _augment_configs_with_checkpoint_info(self):
|
|
358
|
+
for run, config in self._configs.items():
|
|
359
|
+
for embedding in config.embeddings:
|
|
360
|
+
# Normalize the name of the embeddings.
|
|
361
|
+
if embedding.tensor_name.endswith(":0"):
|
|
362
|
+
embedding.tensor_name = embedding.tensor_name[:-2]
|
|
363
|
+
# Find the size of embeddings associated with a tensors file.
|
|
364
|
+
if embedding.tensor_path:
|
|
365
|
+
fpath = _rel_to_abs_asset_path(
|
|
366
|
+
embedding.tensor_path, self.config_fpaths[run]
|
|
367
|
+
)
|
|
368
|
+
tensor = self.tensor_cache.get((run, embedding.tensor_name))
|
|
369
|
+
if tensor is None:
|
|
370
|
+
try:
|
|
371
|
+
tensor = _read_tensor_tsv_file(fpath)
|
|
372
|
+
except UnicodeDecodeError:
|
|
373
|
+
tensor = _read_tensor_binary_file(
|
|
374
|
+
fpath, embedding.tensor_shape
|
|
375
|
+
)
|
|
376
|
+
self.tensor_cache.set(
|
|
377
|
+
(run, embedding.tensor_name), tensor
|
|
378
|
+
)
|
|
379
|
+
if not embedding.tensor_shape:
|
|
380
|
+
embedding.tensor_shape.extend(
|
|
381
|
+
[len(tensor), len(tensor[0])]
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
reader = self._get_reader_for_run(run)
|
|
385
|
+
if not reader:
|
|
386
|
+
continue
|
|
387
|
+
# Augment the configuration with the tensors in the checkpoint file.
|
|
388
|
+
special_embedding = None
|
|
389
|
+
if config.embeddings and not config.embeddings[0].tensor_name:
|
|
390
|
+
special_embedding = config.embeddings[0]
|
|
391
|
+
config.embeddings.remove(special_embedding)
|
|
392
|
+
var_map = reader.get_variable_to_shape_map()
|
|
393
|
+
for tensor_name, tensor_shape in var_map.items():
|
|
394
|
+
if len(tensor_shape) != 2:
|
|
395
|
+
continue
|
|
396
|
+
# Optimizer slot values are the same shape as embeddings
|
|
397
|
+
# but are not embeddings.
|
|
398
|
+
if ".OPTIMIZER_SLOT" in tensor_name:
|
|
399
|
+
continue
|
|
400
|
+
embedding = self._get_embedding(tensor_name, config)
|
|
401
|
+
if not embedding:
|
|
402
|
+
embedding = config.embeddings.add()
|
|
403
|
+
embedding.tensor_name = tensor_name
|
|
404
|
+
if special_embedding:
|
|
405
|
+
embedding.metadata_path = (
|
|
406
|
+
special_embedding.metadata_path
|
|
407
|
+
)
|
|
408
|
+
embedding.bookmarks_path = (
|
|
409
|
+
special_embedding.bookmarks_path
|
|
410
|
+
)
|
|
411
|
+
if not embedding.tensor_shape:
|
|
412
|
+
embedding.tensor_shape.extend(tensor_shape)
|
|
413
|
+
|
|
414
|
+
# Remove configs that do not have any valid (2D) tensors.
|
|
415
|
+
runs_to_remove = []
|
|
416
|
+
for run, config in self._configs.items():
|
|
417
|
+
if not config.embeddings:
|
|
418
|
+
runs_to_remove.append(run)
|
|
419
|
+
for run in runs_to_remove:
|
|
420
|
+
del self._configs[run]
|
|
421
|
+
del self.config_fpaths[run]
|
|
422
|
+
|
|
423
|
+
def _read_latest_config_files(self, run_path_pairs):
|
|
424
|
+
"""Reads and returns the projector config files in every run
|
|
425
|
+
directory."""
|
|
426
|
+
configs = {}
|
|
427
|
+
config_fpaths = {}
|
|
428
|
+
for run_name, assets_dir in run_path_pairs:
|
|
429
|
+
config = ProjectorConfig()
|
|
430
|
+
config_fpath = os.path.join(assets_dir, metadata.PROJECTOR_FILENAME)
|
|
431
|
+
if tf.io.gfile.exists(config_fpath):
|
|
432
|
+
with tf.io.gfile.GFile(config_fpath, "r") as f:
|
|
433
|
+
file_content = f.read()
|
|
434
|
+
text_format.Parse(file_content, config)
|
|
435
|
+
has_tensor_files = False
|
|
436
|
+
for embedding in config.embeddings:
|
|
437
|
+
if embedding.tensor_path:
|
|
438
|
+
if not embedding.tensor_name:
|
|
439
|
+
embedding.tensor_name = os.path.basename(
|
|
440
|
+
embedding.tensor_path
|
|
441
|
+
)
|
|
442
|
+
has_tensor_files = True
|
|
443
|
+
break
|
|
444
|
+
|
|
445
|
+
if not config.model_checkpoint_path:
|
|
446
|
+
# See if you can find a checkpoint file in the logdir.
|
|
447
|
+
logdir = _assets_dir_to_logdir(assets_dir)
|
|
448
|
+
ckpt_path = _find_latest_checkpoint(logdir)
|
|
449
|
+
if not ckpt_path and not has_tensor_files:
|
|
450
|
+
continue
|
|
451
|
+
if ckpt_path:
|
|
452
|
+
config.model_checkpoint_path = ckpt_path
|
|
453
|
+
|
|
454
|
+
# Sanity check for the checkpoint file existing.
|
|
455
|
+
if (
|
|
456
|
+
config.model_checkpoint_path
|
|
457
|
+
and _using_tf()
|
|
458
|
+
and not tf.io.gfile.glob(config.model_checkpoint_path + "*")
|
|
459
|
+
):
|
|
460
|
+
logger.warning(
|
|
461
|
+
'Checkpoint file "%s" not found',
|
|
462
|
+
config.model_checkpoint_path,
|
|
463
|
+
)
|
|
464
|
+
continue
|
|
465
|
+
configs[run_name] = config
|
|
466
|
+
config_fpaths[run_name] = config_fpath
|
|
467
|
+
return configs, config_fpaths
|
|
468
|
+
|
|
469
|
+
def _get_reader_for_run(self, run):
|
|
470
|
+
if run in self.readers:
|
|
471
|
+
return self.readers[run]
|
|
472
|
+
|
|
473
|
+
config = self._configs[run]
|
|
474
|
+
reader = None
|
|
475
|
+
if config.model_checkpoint_path and _using_tf():
|
|
476
|
+
try:
|
|
477
|
+
reader = tf.train.load_checkpoint(config.model_checkpoint_path)
|
|
478
|
+
except Exception: # pylint: disable=broad-except
|
|
479
|
+
logger.warning(
|
|
480
|
+
'Failed reading "%s"', config.model_checkpoint_path
|
|
481
|
+
)
|
|
482
|
+
self.readers[run] = reader
|
|
483
|
+
return reader
|
|
484
|
+
|
|
485
|
+
def _get_metadata_file_for_tensor(self, tensor_name, config):
|
|
486
|
+
embedding_info = self._get_embedding(tensor_name, config)
|
|
487
|
+
if embedding_info:
|
|
488
|
+
return embedding_info.metadata_path
|
|
489
|
+
return None
|
|
490
|
+
|
|
491
|
+
def _get_bookmarks_file_for_tensor(self, tensor_name, config):
|
|
492
|
+
embedding_info = self._get_embedding(tensor_name, config)
|
|
493
|
+
if embedding_info:
|
|
494
|
+
return embedding_info.bookmarks_path
|
|
495
|
+
return None
|
|
496
|
+
|
|
497
|
+
def _canonical_tensor_name(self, tensor_name):
|
|
498
|
+
if ":" not in tensor_name:
|
|
499
|
+
return tensor_name + ":0"
|
|
500
|
+
else:
|
|
501
|
+
return tensor_name
|
|
502
|
+
|
|
503
|
+
def _get_embedding(self, tensor_name, config):
|
|
504
|
+
if not config.embeddings:
|
|
505
|
+
return None
|
|
506
|
+
for info in config.embeddings:
|
|
507
|
+
if self._canonical_tensor_name(
|
|
508
|
+
info.tensor_name
|
|
509
|
+
) == self._canonical_tensor_name(tensor_name):
|
|
510
|
+
return info
|
|
511
|
+
return None
|
|
512
|
+
|
|
513
|
+
def _append_plugin_asset_directories(self, run_path_pairs):
|
|
514
|
+
extra = []
|
|
515
|
+
plugin_assets_name = metadata.PLUGIN_ASSETS_NAME
|
|
516
|
+
for run, logdir in run_path_pairs:
|
|
517
|
+
assets = plugin_asset_util.ListAssets(logdir, plugin_assets_name)
|
|
518
|
+
if metadata.PROJECTOR_FILENAME not in assets:
|
|
519
|
+
continue
|
|
520
|
+
assets_dir = os.path.join(
|
|
521
|
+
self._run_paths[run], metadata.PLUGINS_DIR, plugin_assets_name
|
|
522
|
+
)
|
|
523
|
+
assets_path_pair = (run, os.path.abspath(assets_dir))
|
|
524
|
+
extra.append(assets_path_pair)
|
|
525
|
+
run_path_pairs.extend(extra)
|
|
526
|
+
|
|
527
|
+
@wrappers.Request.application
|
|
528
|
+
def _serve_file(self, file_path, request):
|
|
529
|
+
"""Returns a resource file."""
|
|
530
|
+
res_path = os.path.join(os.path.dirname(__file__), file_path)
|
|
531
|
+
with open(res_path, "rb") as read_file:
|
|
532
|
+
mimetype = mimetypes.guess_type(file_path)[0]
|
|
533
|
+
return Respond(request, read_file.read(), content_type=mimetype)
|
|
534
|
+
|
|
535
|
+
@wrappers.Request.application
|
|
536
|
+
def _serve_runs(self, request):
|
|
537
|
+
"""Returns a list of runs that have embeddings."""
|
|
538
|
+
self._update_configs()
|
|
539
|
+
return Respond(request, list(self._configs.keys()), "application/json")
|
|
540
|
+
|
|
541
|
+
@wrappers.Request.application
|
|
542
|
+
def _serve_config(self, request):
|
|
543
|
+
run = request.args.get("run")
|
|
544
|
+
if run is None:
|
|
545
|
+
return Respond(
|
|
546
|
+
request, 'query parameter "run" is required', "text/plain", 400
|
|
547
|
+
)
|
|
548
|
+
self._update_configs()
|
|
549
|
+
config = self._configs.get(run)
|
|
550
|
+
if config is None:
|
|
551
|
+
return Respond(
|
|
552
|
+
request, 'Unknown run: "%s"' % run, "text/plain", 400
|
|
553
|
+
)
|
|
554
|
+
return Respond(
|
|
555
|
+
request, json_format.MessageToJson(config), "application/json"
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
@wrappers.Request.application
|
|
559
|
+
def _serve_metadata(self, request):
|
|
560
|
+
run = request.args.get("run")
|
|
561
|
+
if run is None:
|
|
562
|
+
return Respond(
|
|
563
|
+
request, 'query parameter "run" is required', "text/plain", 400
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
name = request.args.get("name")
|
|
567
|
+
if name is None:
|
|
568
|
+
return Respond(
|
|
569
|
+
request, 'query parameter "name" is required', "text/plain", 400
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
num_rows = _parse_positive_int_param(request, "num_rows")
|
|
573
|
+
if num_rows == -1:
|
|
574
|
+
return Respond(
|
|
575
|
+
request,
|
|
576
|
+
"query parameter num_rows must be integer > 0",
|
|
577
|
+
"text/plain",
|
|
578
|
+
400,
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
self._update_configs()
|
|
582
|
+
config = self._configs.get(run)
|
|
583
|
+
if config is None:
|
|
584
|
+
return Respond(
|
|
585
|
+
request, 'Unknown run: "%s"' % run, "text/plain", 400
|
|
586
|
+
)
|
|
587
|
+
fpath = self._get_metadata_file_for_tensor(name, config)
|
|
588
|
+
if not fpath:
|
|
589
|
+
return Respond(
|
|
590
|
+
request,
|
|
591
|
+
'No metadata file found for tensor "%s" in the config file "%s"'
|
|
592
|
+
% (name, self.config_fpaths[run]),
|
|
593
|
+
"text/plain",
|
|
594
|
+
400,
|
|
595
|
+
)
|
|
596
|
+
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
|
|
597
|
+
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
|
|
598
|
+
return Respond(
|
|
599
|
+
request,
|
|
600
|
+
'"%s" not found, or is not a file' % fpath,
|
|
601
|
+
"text/plain",
|
|
602
|
+
400,
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
num_header_rows = 0
|
|
606
|
+
with tf.io.gfile.GFile(fpath, "r") as f:
|
|
607
|
+
lines = []
|
|
608
|
+
# Stream reading the file with early break in case the file doesn't fit in
|
|
609
|
+
# memory.
|
|
610
|
+
for line in f:
|
|
611
|
+
lines.append(line)
|
|
612
|
+
if len(lines) == 1 and "\t" in lines[0]:
|
|
613
|
+
num_header_rows = 1
|
|
614
|
+
if num_rows and len(lines) >= num_rows + num_header_rows:
|
|
615
|
+
break
|
|
616
|
+
return Respond(request, "".join(lines), "text/plain")
|
|
617
|
+
|
|
618
|
+
@wrappers.Request.application
|
|
619
|
+
def _serve_tensor(self, request):
|
|
620
|
+
run = request.args.get("run")
|
|
621
|
+
if run is None:
|
|
622
|
+
return Respond(
|
|
623
|
+
request, 'query parameter "run" is required', "text/plain", 400
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
name = request.args.get("name")
|
|
627
|
+
if name is None:
|
|
628
|
+
return Respond(
|
|
629
|
+
request, 'query parameter "name" is required', "text/plain", 400
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
num_rows = _parse_positive_int_param(request, "num_rows")
|
|
633
|
+
if num_rows == -1:
|
|
634
|
+
return Respond(
|
|
635
|
+
request,
|
|
636
|
+
"query parameter num_rows must be integer > 0",
|
|
637
|
+
"text/plain",
|
|
638
|
+
400,
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
self._update_configs()
|
|
642
|
+
config = self._configs.get(run)
|
|
643
|
+
if config is None:
|
|
644
|
+
return Respond(
|
|
645
|
+
request, 'Unknown run: "%s"' % run, "text/plain", 400
|
|
646
|
+
)
|
|
647
|
+
tensor = self.tensor_cache.get((run, name))
|
|
648
|
+
if tensor is None:
|
|
649
|
+
# See if there is a tensor file in the config.
|
|
650
|
+
embedding = self._get_embedding(name, config)
|
|
651
|
+
|
|
652
|
+
if embedding and embedding.tensor_path:
|
|
653
|
+
fpath = _rel_to_abs_asset_path(
|
|
654
|
+
embedding.tensor_path, self.config_fpaths[run]
|
|
655
|
+
)
|
|
656
|
+
if not tf.io.gfile.exists(fpath):
|
|
657
|
+
return Respond(
|
|
658
|
+
request,
|
|
659
|
+
'Tensor file "%s" does not exist' % fpath,
|
|
660
|
+
"text/plain",
|
|
661
|
+
400,
|
|
662
|
+
)
|
|
663
|
+
try:
|
|
664
|
+
tensor = _read_tensor_tsv_file(fpath)
|
|
665
|
+
except UnicodeDecodeError:
|
|
666
|
+
tensor = _read_tensor_binary_file(
|
|
667
|
+
fpath, embedding.tensor_shape
|
|
668
|
+
)
|
|
669
|
+
else:
|
|
670
|
+
reader = self._get_reader_for_run(run)
|
|
671
|
+
if not reader or not reader.has_tensor(name):
|
|
672
|
+
return Respond(
|
|
673
|
+
request,
|
|
674
|
+
'Tensor "%s" not found in checkpoint dir "%s"'
|
|
675
|
+
% (name, config.model_checkpoint_path),
|
|
676
|
+
"text/plain",
|
|
677
|
+
400,
|
|
678
|
+
)
|
|
679
|
+
try:
|
|
680
|
+
tensor = reader.get_tensor(name)
|
|
681
|
+
except tf.errors.InvalidArgumentError as e:
|
|
682
|
+
return Respond(request, str(e), "text/plain", 400)
|
|
683
|
+
|
|
684
|
+
self.tensor_cache.set((run, name), tensor)
|
|
685
|
+
|
|
686
|
+
if num_rows:
|
|
687
|
+
tensor = tensor[:num_rows]
|
|
688
|
+
if tensor.dtype != "float32":
|
|
689
|
+
tensor = tensor.astype(dtype="float32", copy=False)
|
|
690
|
+
data_bytes = tensor.tobytes()
|
|
691
|
+
return Respond(request, data_bytes, "application/octet-stream")
|
|
692
|
+
|
|
693
|
+
@wrappers.Request.application
|
|
694
|
+
def _serve_bookmarks(self, request):
|
|
695
|
+
run = request.args.get("run")
|
|
696
|
+
if not run:
|
|
697
|
+
return Respond(
|
|
698
|
+
request, 'query parameter "run" is required', "text/plain", 400
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
name = request.args.get("name")
|
|
702
|
+
if name is None:
|
|
703
|
+
return Respond(
|
|
704
|
+
request, 'query parameter "name" is required', "text/plain", 400
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
self._update_configs()
|
|
708
|
+
config = self._configs.get(run)
|
|
709
|
+
if config is None:
|
|
710
|
+
return Respond(
|
|
711
|
+
request, 'Unknown run: "%s"' % run, "text/plain", 400
|
|
712
|
+
)
|
|
713
|
+
fpath = self._get_bookmarks_file_for_tensor(name, config)
|
|
714
|
+
if not fpath:
|
|
715
|
+
return Respond(
|
|
716
|
+
request,
|
|
717
|
+
'No bookmarks file found for tensor "%s" in the config file "%s"'
|
|
718
|
+
% (name, self.config_fpaths[run]),
|
|
719
|
+
"text/plain",
|
|
720
|
+
400,
|
|
721
|
+
)
|
|
722
|
+
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
|
|
723
|
+
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
|
|
724
|
+
return Respond(
|
|
725
|
+
request,
|
|
726
|
+
'"%s" not found, or is not a file' % fpath,
|
|
727
|
+
"text/plain",
|
|
728
|
+
400,
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
bookmarks_json = None
|
|
732
|
+
with tf.io.gfile.GFile(fpath, "rb") as f:
|
|
733
|
+
bookmarks_json = f.read()
|
|
734
|
+
return Respond(request, bookmarks_json, "application/json")
|
|
735
|
+
|
|
736
|
+
@wrappers.Request.application
|
|
737
|
+
def _serve_sprite_image(self, request):
|
|
738
|
+
run = request.args.get("run")
|
|
739
|
+
if not run:
|
|
740
|
+
return Respond(
|
|
741
|
+
request, 'query parameter "run" is required', "text/plain", 400
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
name = request.args.get("name")
|
|
745
|
+
if name is None:
|
|
746
|
+
return Respond(
|
|
747
|
+
request, 'query parameter "name" is required', "text/plain", 400
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
self._update_configs()
|
|
751
|
+
config = self._configs.get(run)
|
|
752
|
+
if config is None:
|
|
753
|
+
return Respond(
|
|
754
|
+
request, 'Unknown run: "%s"' % run, "text/plain", 400
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
embedding_info = self._get_embedding(name, config)
|
|
758
|
+
if not embedding_info or not embedding_info.sprite.image_path:
|
|
759
|
+
return Respond(
|
|
760
|
+
request,
|
|
761
|
+
'No sprite image file found for tensor "%s" in the config file "%s"'
|
|
762
|
+
% (name, self.config_fpaths[run]),
|
|
763
|
+
"text/plain",
|
|
764
|
+
400,
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
fpath = os.path.expanduser(embedding_info.sprite.image_path)
|
|
768
|
+
fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])
|
|
769
|
+
if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):
|
|
770
|
+
return Respond(
|
|
771
|
+
request,
|
|
772
|
+
'"%s" does not exist or is directory' % fpath,
|
|
773
|
+
"text/plain",
|
|
774
|
+
400,
|
|
775
|
+
)
|
|
776
|
+
f = tf.io.gfile.GFile(fpath, "rb")
|
|
777
|
+
encoded_image_string = f.read()
|
|
778
|
+
f.close()
|
|
779
|
+
mime_type = img_mime_type_detector.from_bytes(encoded_image_string)
|
|
780
|
+
return Respond(request, encoded_image_string, mime_type)
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
def _find_latest_checkpoint(dir_path):
|
|
784
|
+
if not _using_tf():
|
|
785
|
+
return None
|
|
786
|
+
try:
|
|
787
|
+
ckpt_path = tf.train.latest_checkpoint(dir_path)
|
|
788
|
+
if not ckpt_path:
|
|
789
|
+
# Check the parent directory.
|
|
790
|
+
ckpt_path = tf.train.latest_checkpoint(
|
|
791
|
+
os.path.join(dir_path, os.pardir)
|
|
792
|
+
)
|
|
793
|
+
return ckpt_path
|
|
794
|
+
except tf.errors.NotFoundError:
|
|
795
|
+
return None
|