ml-dash 0.0.11__py3-none-any.whl → 0.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. ml_dash/__init__.py +59 -1
  2. ml_dash/auto_start.py +42 -0
  3. ml_dash/cli.py +67 -0
  4. ml_dash/cli_commands/__init__.py +1 -0
  5. ml_dash/cli_commands/download.py +797 -0
  6. ml_dash/cli_commands/list.py +343 -0
  7. ml_dash/cli_commands/upload.py +1298 -0
  8. ml_dash/client.py +955 -0
  9. ml_dash/config.py +114 -11
  10. ml_dash/experiment.py +1020 -0
  11. ml_dash/files.py +688 -0
  12. ml_dash/log.py +181 -0
  13. ml_dash/metric.py +292 -0
  14. ml_dash/params.py +188 -0
  15. ml_dash/storage.py +1115 -0
  16. ml_dash-0.5.9.dist-info/METADATA +244 -0
  17. ml_dash-0.5.9.dist-info/RECORD +20 -0
  18. ml_dash-0.5.9.dist-info/WHEEL +4 -0
  19. ml_dash-0.5.9.dist-info/entry_points.txt +3 -0
  20. ml_dash/app.py +0 -33
  21. ml_dash/file_events.py +0 -71
  22. ml_dash/file_handlers.py +0 -141
  23. ml_dash/file_utils.py +0 -5
  24. ml_dash/file_watcher.py +0 -30
  25. ml_dash/main.py +0 -60
  26. ml_dash/mime_types.py +0 -20
  27. ml_dash/schema/__init__.py +0 -110
  28. ml_dash/schema/archive.py +0 -165
  29. ml_dash/schema/directories.py +0 -59
  30. ml_dash/schema/experiments.py +0 -65
  31. ml_dash/schema/files/__init__.py +0 -204
  32. ml_dash/schema/files/file_helpers.py +0 -79
  33. ml_dash/schema/files/images.py +0 -27
  34. ml_dash/schema/files/metrics.py +0 -64
  35. ml_dash/schema/files/parameters.py +0 -50
  36. ml_dash/schema/files/series.py +0 -235
  37. ml_dash/schema/files/videos.py +0 -27
  38. ml_dash/schema/helpers.py +0 -66
  39. ml_dash/schema/projects.py +0 -65
  40. ml_dash/schema/schema_helpers.py +0 -19
  41. ml_dash/schema/users.py +0 -33
  42. ml_dash/sse.py +0 -18
  43. ml_dash-0.0.11.dist-info/METADATA +0 -67
  44. ml_dash-0.0.11.dist-info/RECORD +0 -30
  45. ml_dash-0.0.11.dist-info/WHEEL +0 -5
  46. ml_dash-0.0.11.dist-info/top_level.txt +0 -1
  47. /ml_dash/{example.py → py.typed} +0 -0
@@ -1,27 +0,0 @@
1
- from os.path import split
2
- from graphene import ObjectType, relay, String
3
- from ml_dash import schema
4
-
5
-
6
- class File(ObjectType):
7
- class Meta:
8
- interfaces = relay.Node,
9
-
10
- name = String(description='name of the directory')
11
-
12
- # description = String(description='string serialized data')
13
- # experiments = List(lambda: schema.Experiments)
14
-
15
- @classmethod
16
- def get_node(cls, info, id):
17
- return get_file(id)
18
-
19
-
20
- class FileConnection(relay.Connection):
21
- class Meta:
22
- node = File
23
-
24
-
25
- def get_file(id):
26
- # path = os.path.join(Args.logdir, id[1:])
27
- return File(id=id, name=split(id[1:])[1])
@@ -1,64 +0,0 @@
1
- from os.path import split, realpath, join
2
- from graphene import relay, ObjectType, String, List, JSONString, Int
3
- from graphene.types.generic import GenericScalar
4
- from graphql_relay import from_global_id
5
- from ml_dash.config import Args
6
- from ml_dash.schema.files.file_helpers import find_files, read_records, read_dataframe
7
-
8
-
9
- class Metrics(ObjectType):
10
- class Meta:
11
- interfaces = relay.Node,
12
-
13
- path = String(description="path to the file")
14
-
15
- def resolve_path(self, info):
16
- return self.id
17
-
18
- keys = List(String, description="list of keys for the metrics")
19
-
20
- # value = List(GenericScalar, description="the raw value")
21
- value = GenericScalar(description="The value of the metrics file",
22
- keys=List(String),
23
- k=Int(required=False),
24
- last=Int(required=False),
25
- window=Int(required=False))
26
-
27
- def resolve_keys(self, info):
28
- df = read_dataframe(join(Args.logdir, self.id[1:]))
29
- keys = df.dropna().keys()
30
- return list(keys)
31
-
32
- # todo: add more complex queries.
33
- def resolve_value(self, info, keys=None, k=None, last=None, window=None):
34
- path = join(Args.logdir, self.id[1:])
35
- realpath(path)
36
- _ = read_dataframe(path)
37
- if keys:
38
- df = _[keys].dropna()
39
- return {k: df[k].values.tolist() for k in keys}
40
- else:
41
- df = _.dropna()
42
- return {k: v.values.tolist() for k, v in df.items()}
43
-
44
- @classmethod
45
- def get_node(cls, info, id):
46
- return Metrics(id)
47
-
48
-
49
- class MetricsConnection(relay.Connection):
50
- class Meta:
51
- node = Metrics
52
-
53
-
54
- def get_metrics(id):
55
- # note: this is where the key finding happens.
56
- return Metrics(id=id)
57
-
58
-
59
- def find_metrics(cwd, **kwargs):
60
- from ml_dash.config import Args
61
- _cwd = realpath(join(Args.logdir, cwd[1:]))
62
- parameter_files = find_files(_cwd, "**/metrics.pkl", **kwargs)
63
- for p in parameter_files:
64
- yield Metrics(id=join(cwd, p['path']))
@@ -1,50 +0,0 @@
1
- from functools import reduce
2
- from os.path import split, join
3
- from graphene import ObjectType, relay, String, List
4
- from graphene.types.generic import GenericScalar
5
- from ml_dash import schema
6
- from ml_dash.config import Args
7
- from ml_dash.schema.files.file_helpers import read_json
8
- from ml_dash.schema.helpers import assign, dot_keys, dot_flatten
9
-
10
-
11
- class Parameters(ObjectType):
12
- class Meta:
13
- interfaces = relay.Node,
14
-
15
- _path = String(description="The true path to the parameter file. Internal use only")
16
- keys = List(String, description="list of parameter keys")
17
- value = GenericScalar(description="the json value for the parameters")
18
- raw = GenericScalar(description="the raw data object for the parameters")
19
- flat = GenericScalar(description="the raw data object for the parameters")
20
-
21
- def resolve_keys(self, info):
22
- value = reduce(assign, read_json(join(Args.logdir, self.id[1:])) or [{}])
23
- return dot_keys(value)
24
-
25
- def resolve_value(self, info, **kwargs):
26
- return reduce(assign, read_json(join(Args.logdir, self.id[1:])) or [{}])
27
-
28
- def resolve_raw(self, info, **kwargs):
29
- return read_json(join(Args.logdir, self.id[1:]))
30
-
31
- def resolve_flat(self, info, **kwargs):
32
- value = reduce(assign, read_json(join(Args.logdir, self.id[1:])) or [{}])
33
- return dot_flatten(value)
34
-
35
- # description = String(description='string serialized data')
36
- # experiments = List(lambda: schema.Experiments)
37
-
38
- @classmethod
39
- def get_node(cls, info, id):
40
- return get_parameters(id)
41
-
42
-
43
- class ParameterConnection(relay.Connection):
44
- class Meta:
45
- node = Parameters
46
-
47
-
48
- def get_parameters(id):
49
- # path = os.path.join(Args.logdir, id[1:])
50
- return Parameters(id=id)
@@ -1,235 +0,0 @@
1
- from os.path import split, realpath, join, isabs
2
- from graphene import relay, ObjectType, String, List, JSONString, ID, Enum, Date, Time, DateTime, Int, Float, Union
3
- from graphene.types.generic import GenericScalar
4
- from ml_dash.config import Args
5
- from ml_dash.schema.files.file_helpers import find_files, read_records, read_dataframe
6
- import numpy as np
7
-
8
-
9
- class Series(ObjectType):
10
- class Meta:
11
- interfaces = relay.Node,
12
-
13
- path = String(description="the file path for the configuration file")
14
-
15
- prefix = String(description='path prefix for the metrics files')
16
- metrics_files = List(ID, description="List of metrics file IDs that we use to aggregate this series")
17
-
18
- _df = GenericScalar(description='the processed dataframe object that aggregates all metrics files.')
19
-
20
- window = Float(description="the window for the rolling average")
21
-
22
- label = String(description="the lable for the series")
23
- x_key = String(description="key for the x")
24
- y_key = String(description="key for the y axis")
25
- y_keys = List(String, description="tuple of keys for the y axis")
26
-
27
- # stype = SeriesTyes(description="the type of series data")
28
-
29
- # resolved from dataset
30
- x_data = GenericScalar(description="x data")
31
- y_mean = GenericScalar(description="y data from the mean of the window")
32
- # y_mode = GenericScalar(description="y data as from mode of the window")
33
- y_median = GenericScalar(description="y data as from mode of the window")
34
- y_min = GenericScalar(description="min in each bin")
35
- y_max = GenericScalar(description="max in each bin")
36
- y_25 = GenericScalar(description="quarter quantile")
37
- y_75 = GenericScalar(description="3/4th quantile")
38
- y_95 = GenericScalar(description="95th quantile")
39
- y_05 = GenericScalar(description="5th quantile")
40
- y_count = GenericScalar(description="the number of datapoints used to compute each tick")
41
-
42
- # todo: start time
43
-
44
- # todo: need to move the keys out, so that we can dropnan on the joint table.
45
- # Otherwise the different data columns would not necessarily be the same length.
46
- def resolve_x_data(self, info):
47
- # note: new in 0.24.1.
48
- # ~> df.value.dtype does NOT work for categorical data.
49
- _ = self._df['__x'].to_numpy()
50
- if np.issubdtype(_.dtype, np.datetime64):
51
- return (_.astype(int) / 1000).tolist()
52
- elif np.issubdtype(_.dtype, np.timedelta64):
53
- return (_.astype(int) / 1000).tolist()
54
- return _.tolist()
55
-
56
- def resolve_y_mean(self, info):
57
- if self.y_key is not None:
58
- return self._df[self.y_key]['mean'].to_numpy().tolist()
59
- return {k: self._df[k]['mean'].to_numpy().tolist() for k in self.y_keys}
60
-
61
- # def resolve_y_mode(self, info):
62
- # if self.y_key is not None:
63
- # return self._df[self.y_key]['mode'].to_numpy().tolist()
64
- # return {k: self._df[k]['mode'].to_numpy().tolist() for k in self.y_keys}
65
-
66
- def resolve_y_min(self, info):
67
- if self.y_key is not None:
68
- return self._df[self.y_key]['min'].to_numpy().tolist()
69
- return {k: self._df[k]['min'].to_numpy().tolist() for k in self.y_keys}
70
-
71
- def resolve_y_max(self, info):
72
- if self.y_key is not None:
73
- return self._df[self.y_key]['max'].to_numpy().tolist()
74
- return {k: self._df[k]['max'].to_numpy().tolist() for k in self.y_keys}
75
-
76
- def resolve_y_median(self, info):
77
- if self.y_key is not None:
78
- return self._df[self.y_key]['50%'].to_numpy().tolist()
79
- return {k: self._df[k]['50%'].to_numpy().tolist() for k in self.y_keys}
80
-
81
- def resolve_y_25(self, info):
82
- if self.y_key is not None:
83
- return self._df[self.y_key]['25%'].to_numpy().tolist()
84
- return {k: self._df[k]['25%'].to_numpy().tolist() for k in self.y_keys}
85
-
86
- def resolve_y_75(self, info):
87
- if self.y_key is not None:
88
- return self._df[self.y_key]['75%'].to_numpy().tolist()
89
- return {k: self._df[k]['75%'].to_numpy().tolist() for k in self.y_keys}
90
-
91
- def resolve_y_95(self, info):
92
- if self.y_key is not None:
93
- return self._df[self.y_key]['95%'].to_numpy().tolist()
94
- return {k: self._df[k]['95%'].to_numpy().tolist() for k in self.y_keys}
95
-
96
- def resolve_y_05(self, info):
97
- if self.y_key is not None:
98
- return self._df[self.y_key]['5%'].to_numpy().tolist()
99
- return {k: self._df[k]['5%'].to_numpy().tolist() for k in self.y_keys}
100
-
101
- def resolve_y_count(self, info):
102
- if self.y_key is not None:
103
- return self._df[self.y_key]['count'].to_numpy().tolist()
104
- return {k: self._df[k]['count'].to_numpy().tolist() for k in self.y_keys}
105
-
106
- @classmethod
107
- def get_node(cls, info, id):
108
- return Series(id)
109
-
110
-
111
- def get_series(metrics_files=tuple(),
112
- prefix=None,
113
- head=None,
114
- tail=None,
115
- x_low=None,
116
- x_high=None,
117
- x_edge=None, # OneOf('start', 'after', 'mid', 'mode')
118
- k=None,
119
- x_align=None, # OneOf(int, 'left', 'right')
120
- x_key=None,
121
- y_key=None,
122
- y_keys=None,
123
- label=None):
124
- assert not y_key or not y_keys, "yKey and yKeys can not be trueful at the same time"
125
- assert y_key or y_keys, "yKey and yKeys can not be both falseful."
126
- assert head is None or tail is None, "head and tail can not be trueful at the same time"
127
- if not prefix:
128
- for id in metrics_files:
129
- assert isabs(id), f"metricFile need to be absolute path is prefix is {prefix}. It is {id} instead."
130
-
131
- ids = [join(prefix or "", id) for id in metrics_files]
132
- dfs = [read_dataframe(join(Args.logdir, _id[1:])) for _id in ids]
133
-
134
- y_keys = y_keys or [y_key]
135
- join_keys = [x_key, *y_keys]
136
- join_keys = list(set([k for k in join_keys if k is not None]))
137
-
138
- joined = []
139
- for df in dfs:
140
- if df is None:
141
- continue
142
- if x_key is not None:
143
- df.set_index(x_key)
144
- if x_align is None:
145
- pass
146
- elif x_align == "start":
147
- df[x_key] -= df[x_key][0]
148
- elif x_align == "end":
149
- df[x_key] -= df[x_key][-1]
150
- else:
151
- df[x_key] -= x_align
152
- else:
153
- df = df[y_keys]
154
-
155
- if tail is not None:
156
- df = df.tail(tail)
157
- if head is not None:
158
- df = df.head(head)
159
- inds = True
160
- if x_low is not None:
161
- inds &= df.index >= x_low
162
- if x_high is not None:
163
- inds &= df.index <= x_high
164
- if inds is not True:
165
- df = df.loc[inds]
166
-
167
- # todo: only dropna if we are not using ranges. <need to test>
168
- try:
169
- if head is None and tail is None:
170
- joined.append(df[join_keys].dropna())
171
- else:
172
- joined.append(df[join_keys])
173
- except KeyError as e:
174
- raise KeyError(
175
- f"{join_keys} contain keys that is not in the dataframe. "
176
- f"Keys available include {df.keys()}") from e
177
-
178
- if not joined: # No dataframe, return `null`.
179
- return None
180
-
181
- import pandas as pd
182
- all = pd.concat(joined)
183
-
184
- if x_key:
185
- all = all.set_index(x_key)
186
-
187
- all.rank(method='first')
188
-
189
- if k is not None:
190
- bins = pd.qcut(all.index, k, duplicates='drop')
191
- grouped = all.groupby(bins)
192
- else:
193
- grouped = all.groupby(level=0)
194
-
195
- df = pd.merge(grouped[y_keys].agg(['count', 'mean', 'min', 'max']).reset_index(),
196
- grouped[y_keys].describe(percentiles=[0.25, 0.75, 0.5, 0.05, 0.95]))
197
-
198
- if k is not None:
199
- if x_edge == "right" or x_edge is None:
200
- df['__x'] = df['index'].apply(lambda r: r.right)
201
- elif x_edge == "left":
202
- df['__x'] = df['index'].apply(lambda r: r.left)
203
- elif x_edge == "mean":
204
- df['__x'] = df['index'].apply(lambda r: 0.5 * (r.left + r.right))
205
- # todo: use mode of each bin
206
- else:
207
- raise KeyError(f"x_edge {[x_edge]} should be OneOf['start', 'after', 'mid', 'mode']")
208
- else:
209
- df['__x'] = df.index
210
-
211
- return Series(metrics_files,
212
- _df=df.sort_values(by="__x"),
213
- metrics_files=metrics_files,
214
- prefix=prefix,
215
- x_key=x_key,
216
- y_key=y_key,
217
- y_keys=y_keys,
218
- label=label,
219
- )
220
-
221
-
222
- SeriesArguments = dict(
223
- metrics_files=List(String, required=True),
224
- prefix=String(description="prefix to the metricFiles.", required=False),
225
- head=Int(description="the number of datapoints (for each metrics file) to take from the head-end"),
226
- tail=Int(description="the number of datapoints (for each metrics file) to take from the tail-end"),
227
- x_low=Float(description="the (inclusive) lower end of the x column"),
228
- x_high=Float(description="the (inclusive) higher end of the x column"),
229
- k=Int(required=False, description='the number of datapoints to return.'),
230
- x_align=String(description="a number (anchor point), 'start', 'end'"),
231
- x_key=String(),
232
- y_key=String(description="You can leave the xKey, but the yKey is required."),
233
- y_keys=List(String, description="Alternatively you can pass a list of keys to yKey*s*."),
234
- label=String(),
235
- )
@@ -1,27 +0,0 @@
1
- from os.path import split
2
- from graphene import ObjectType, relay, String
3
- from ml_dash import schema
4
-
5
-
6
- class File(ObjectType):
7
- class Meta:
8
- interfaces = relay.Node,
9
-
10
- name = String(description='name of the directory')
11
-
12
- # description = String(description='string serialized data')
13
- # experiments = List(lambda: schema.Experiments)
14
-
15
- @classmethod
16
- def get_node(cls, info, id):
17
- return get_file(id)
18
-
19
-
20
- class FileConnection(relay.Connection):
21
- class Meta:
22
- node = File
23
-
24
-
25
- def get_file(id):
26
- # path = os.path.join(Args.logdir, id[1:])
27
- return File(id=id, name=split(id[1:])[1])
ml_dash/schema/helpers.py DELETED
@@ -1,66 +0,0 @@
1
- from typing import List, Any
2
-
3
-
4
- def assign(d1, d2):
5
- if not d2:
6
- return d1
7
- for k, v in d2.items():
8
- if isinstance(d1.get(k, None), dict):
9
- d1[k] = assign(d1[k], v)
10
- else:
11
- d1[k] = v
12
- return d1
13
-
14
-
15
- if __name__ == "__main__":
16
- object1 = {"a": 1, "b": 2, "c": 3}
17
- object2 = assign({"c": 4, "d": 5}, object1)
18
- assert object2['c'] == 3
19
- assert object2['d'] == 5
20
-
21
-
22
- def idot_keys(d, strict=True):
23
- for k, v in d.items():
24
- if isinstance(v, dict):
25
- if not strict:
26
- yield k
27
- for _ in idot_keys(v, strict):
28
- yield k + "." + _
29
- else:
30
- yield k
31
-
32
-
33
- def dot_keys(d, strict=True):
34
- return [*idot_keys(d, strict)]
35
-
36
-
37
- if __name__ == "__main__":
38
- object = {"a": 1, "b": 2, "c": 3, "child": {"a": 3, "grandchild": {'d': 8}}}
39
- assert dot_keys(object) == ['a', 'b', 'c', 'child.a', 'child.grandchild.d']
40
- assert dot_keys(object, strict=False) == ['a', 'b', 'c', 'child', 'child.a', 'child.grandchild',
41
- 'child.grandchild.d']
42
-
43
-
44
- def idot_flatten(d, ancestors: List[Any] = tuple()):
45
- """
46
- returns a flattened dictionary with the keys of the nexted dictionaries converted into dot-separated keys.
47
-
48
- :param d: map
49
- :return: flat map
50
- """
51
- for k, v in d.items():
52
- if isinstance(v, dict):
53
- for _k, _v in idot_flatten(v):
54
- yield k + "." + _k, _v
55
- else:
56
- yield k, v
57
-
58
-
59
- def dot_flatten(d):
60
- # note: dictionaries are ordered by default in python 3.7.
61
- return dict(idot_flatten(d))
62
-
63
-
64
- if __name__ == "__main__":
65
- object = {"a": 1, "b": 2, "c": 3, "child": {"a": 3, "grandchild": {'d': 8}}}
66
- assert list(dot_flatten(object).keys()) == ['a', 'b', 'c', 'child.a', 'child.grandchild.d']
@@ -1,65 +0,0 @@
1
- from os import listdir
2
- from os.path import isfile, join, split
3
-
4
- from graphene import ObjectType, relay, String, List
5
- from ml_dash import schema
6
-
7
-
8
- class Project(ObjectType):
9
- class Meta:
10
- interfaces = relay.Node,
11
-
12
- name = String(description='name of the project')
13
-
14
- # description = String(description='string serialized data')
15
- # experiments = List(lambda: schema.Experiments)
16
-
17
- experiments = relay.ConnectionField(lambda: schema.experiments.ExperimentConnection)
18
-
19
- def resolve_experiments(self, info, **kwargs):
20
- return schema.experiments.find_experiments(cwd=self.id)
21
-
22
- directories = relay.ConnectionField(lambda: schema.directories.DirectoryConnection)
23
- files = relay.ConnectionField(lambda: schema.files.FileConnection)
24
-
25
- def resolve_directories(self, info, **kwargs):
26
- from ml_dash.config import Args
27
- root_dir = join(Args.logdir, self.id[1:])
28
- return [schema.Directory(id=join(self.id, _), name=_)
29
- for _ in listdir(root_dir) if not isfile(join(root_dir, _))]
30
-
31
- def resolve_files(self, info, **kwargs):
32
- from ml_dash.config import Args
33
- root_dir = join(Args.logdir, self.id[1:])
34
- return [schema.Directory(id=join(self.id, _), name=_)
35
- for _ in listdir(root_dir) if isfile(join(root_dir, _))]
36
-
37
- # def resolve_experiments(self, info, **kargs):
38
- # from ml_dash.config import Args
39
- # root_dir = join(Args.logdir, self.id[1:])
40
- #
41
- # return [schema.Directory(id=join(self.id, _), name=_)
42
- # for _ in listdir(root_dir) if isfile(join(root_dir, _))]
43
-
44
- @classmethod
45
- def get_node(cls, info, id):
46
- return get_project(id)
47
-
48
-
49
- class ProjectConnection(relay.Connection):
50
- class Meta:
51
- node = Project
52
-
53
-
54
- def get_projects(username):
55
- import os
56
- from ml_dash.config import Args
57
- user_root = join(Args.logdir, username)
58
- return [Project(name=_, id=join('/', username, _))
59
- for _ in os.listdir(user_root) if not isfile(_)]
60
-
61
-
62
- def get_project(id):
63
- from ml_dash.config import Args
64
- path = join(Args.logdir, id[1:])
65
- return Project(id=id, name=split(id[1:])[1], _path=path)
@@ -1,19 +0,0 @@
1
- def bind(fn):
2
- """
3
- Binds the function to the class.
4
-
5
- :param fn:
6
- :return: bound_fn
7
- """
8
- return lambda _, *args, **kwargs: fn(*args, **kwargs)
9
-
10
-
11
- def bind_args(fn):
12
- """
13
- Binds args after info.
14
-
15
- :param fn:
16
- :return: bound_fn
17
- """
18
- return lambda _, info, *args, **kwargs: fn(*args, **kwargs)
19
-
ml_dash/schema/users.py DELETED
@@ -1,33 +0,0 @@
1
- from os.path import isfile
2
- from graphene import ObjectType, relay, String
3
- from ml_dash import schema
4
-
5
-
6
- class User(ObjectType):
7
- class Meta:
8
- interfaces = relay.Node,
9
-
10
- @classmethod
11
- def get_node(_, info, id):
12
- print(info, id)
13
- return get_user(id)
14
-
15
- username = String(description='string serialized data')
16
- name = String(description='string serialized data')
17
-
18
- projects = relay.ConnectionField(lambda: schema.projects.ProjectConnection)
19
-
20
- def resolve_projects(self, info, **kwargs):
21
- return schema.projects.get_projects(self.username)
22
-
23
- # teams = List(lambda: schema.Team)
24
-
25
-
26
- def get_users(ids=None):
27
- import os
28
- from ml_dash.config import Args
29
- return [User(username=_, name="Ge Yang") for _ in os.listdir(Args.logdir) if not isfile(_)]
30
-
31
-
32
- def get_user(username):
33
- return User(username=username, name="Ge Yang", id=username)
ml_dash/sse.py DELETED
@@ -1,18 +0,0 @@
1
- # SSE "protocol" is described here: http://mzl.la/UPFyxY
2
- class ServerSentEvent(object):
3
-
4
- def __init__(self, data):
5
- self.data = data
6
- self.event = None
7
- self.id = None
8
- self.desc_map = {
9
- self.data: "data",
10
- self.event: "event",
11
- self.id: "id"
12
- }
13
-
14
- def __str__(self):
15
- if not self.data:
16
- return ""
17
- lines = [f"{v}: {k}" for k, v in self.desc_map.items() if k]
18
- return "%s\n\n" % "\n".join(lines)
@@ -1,67 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: ml-dash
3
- Version: 0.0.11
4
- Summary: A Beautiful Visualization Dashboard For Machine Learning
5
- Home-page: https://github.com/episodeyang/ml_dash
6
- Author: Ge Yang
7
- Author-email: yangge1987@gmail.com
8
- License: UNKNOWN
9
- Keywords: ml_logger,ml-logger,ml dash,ml-dash,ml_dashdashboard,machine learning,vis_server,logging,debug,debugging
10
- Platform: UNKNOWN
11
- Classifier: Development Status :: 4 - Beta
12
- Classifier: Intended Audience :: Science/Research
13
- Classifier: Programming Language :: Python :: 3
14
- Requires-Dist: typing
15
- Requires-Dist: numpy
16
- Requires-Dist: termcolor
17
- Requires-Dist: params-proto
18
- Requires-Dist: cloudpickle
19
- Requires-Dist: uvloop (==0.8.1)
20
- Requires-Dist: requests
21
- Requires-Dist: requests-futures
22
- Requires-Dist: hachiko
23
- Requires-Dist: sanic
24
- Requires-Dist: sanic-cors
25
- Requires-Dist: sanic-graphql
26
- Requires-Dist: graphene
27
- Requires-Dist: graphql-core
28
- Requires-Dist: graphql-relay
29
- Requires-Dist: dill
30
- Requires-Dist: ruamel.yaml
31
-
32
- ML-Dash, A Beautiful Visualization Dashboard for Machine Learning
33
- =================================================================
34
-
35
- `Downloads <http://pepy.tech/project/ml-dash>`__
36
-
37
- ML-dash replaces visdom and tensorboard. It is the single real-time job
38
- visualization dashboard for machine learning.
39
-
40
- **Parallel Coordinates** **Aggregating Over Multiple Runs** **Create
41
- Movies out of images**
42
-
43
- Usage
44
- -----
45
-
46
- To **install** ``ml_dash``, do:
47
-
48
- .. code-block:: bash
49
-
50
- pip install ml-dash
51
-
52
- **Note: the server accepts requests from ``localhost`` only, by
53
- default.** In order to
54
-
55
- .. code-block:: bash
56
-
57
- python -m ml_dash.main --log-dir=<your-log-dir> --host=0.0.0.0 --port=<your-port-number> --workers=4
58
-
59
- It is the easiest if you setup a long-lived instrument server with a
60
- public ip for yourself or the entire lab.
61
-
62
- Implementation Notes
63
- ~~~~~~~~~~~~~~~~~~~~
64
-
65
- See `https://github.com/episodeyang/ml_dash/blob/master/notes/README.md <https://github.com/episodeyang/ml_dash/blob/master/notes/README.md>`__
66
-
67
-