kaggle-environments 1.17.2__py2.py3-none-any.whl → 1.17.5__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kaggle-environments might be problematic. Click here for more details.

Files changed (30) hide show
  1. kaggle_environments/__init__.py +2 -2
  2. kaggle_environments/envs/open_spiel/__init__.py +0 -0
  3. kaggle_environments/envs/open_spiel/games/__init__.py +0 -0
  4. kaggle_environments/envs/open_spiel/games/chess/chess.js +294 -0
  5. kaggle_environments/envs/open_spiel/games/connect_four/__init__.py +0 -0
  6. kaggle_environments/envs/open_spiel/games/connect_four/connect_four.js +296 -0
  7. kaggle_environments/envs/open_spiel/games/connect_four/connect_four_proxy.py +86 -0
  8. kaggle_environments/envs/open_spiel/games/connect_four/connect_four_proxy_test.py +57 -0
  9. kaggle_environments/envs/open_spiel/games/go/__init__.py +0 -0
  10. kaggle_environments/envs/open_spiel/games/go/go.js +481 -0
  11. kaggle_environments/envs/open_spiel/games/go/go_proxy.py +105 -0
  12. kaggle_environments/envs/open_spiel/games/tic_tac_toe/__init__.py +0 -0
  13. kaggle_environments/envs/open_spiel/games/tic_tac_toe/tic_tac_toe.js +345 -0
  14. kaggle_environments/envs/open_spiel/games/tic_tac_toe/tic_tac_toe_proxy.py +101 -0
  15. kaggle_environments/envs/open_spiel/games/universal_poker/__init__.py +0 -0
  16. kaggle_environments/envs/open_spiel/games/universal_poker/universal_poker.js +431 -0
  17. kaggle_environments/envs/open_spiel/games/universal_poker/universal_poker_proxy.py +159 -0
  18. kaggle_environments/envs/open_spiel/games/universal_poker/universal_poker_proxy_test.py +49 -0
  19. kaggle_environments/envs/open_spiel/html_playthrough_generator.py +30 -0
  20. kaggle_environments/envs/open_spiel/observation.py +133 -0
  21. kaggle_environments/envs/open_spiel/open_spiel.py +325 -224
  22. kaggle_environments/envs/open_spiel/proxy.py +139 -0
  23. kaggle_environments/envs/open_spiel/proxy_test.py +64 -0
  24. kaggle_environments/envs/open_spiel/test_open_spiel.py +23 -8
  25. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/METADATA +2 -2
  26. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/RECORD +30 -9
  27. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/WHEEL +0 -0
  28. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/entry_points.txt +0 -0
  29. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/licenses/LICENSE +0 -0
  30. {kaggle_environments-1.17.2.dist-info → kaggle_environments-1.17.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,133 @@
1
+ # Copyright 2019 DeepMind Technologies Limited
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """An observation of a game.
16
+
17
+ This is intended to be the main way to get observations of states in Python.
18
+ The usage pattern is as follows:
19
+
20
+ 0. Create the game we will be playing
21
+ 1. Create each kind of observation required, using `make_observation`
22
+ 2. Every time a new observation is required, call:
23
+ `observation.set_from(state, player)`
24
+ The tensor contained in the Observation class will be updated with an
25
+ observation of the supplied state. This tensor is updated in-place, so if
26
+ you wish to retain it, you must make a copy.
27
+
28
+ The following options are available when creating an Observation:
29
+ - perfect_recall: if true, each observation must allow the observing player to
30
+ reconstruct their history of actions and observations.
31
+ - public_info: if true, the observation should include public information
32
+ - private_info: specifies for which players private information should be
33
+ included - all players, the observing player, or no players
34
+ - params: game-specific parameters for observations
35
+
36
+ We ultimately aim to have all games support all combinations of these arguments.
37
+ However, initially many games will only support the combinations corresponding
38
+ to ObservationTensor and InformationStateTensor:
39
+ - ObservationTensor: perfect_recall=False, public_info=True,
40
+ private_info=SinglePlayer
41
+ - InformationStateTensor: perfect_recall=True, public_info=True,
42
+ private_info=SinglePlayer
43
+
44
+ Three formats of observation are supported:
45
+ a. 1-D numpy array, accessed by `observation.tensor`
46
+ b. Dict of numpy arrays, accessed by `observation.dict`. These are pieces of the
47
+ 1-D array, reshaped. The np.array objects refer to the same memory as the
48
+ 1-D array (no copying!).
49
+ c. String, hopefully human-readable (primarily for debugging purposes)
50
+
51
+ For usage examples, see `observation_test.py`.
52
+ """
53
+
54
+ import numpy as np
55
+
56
+ import pyspiel
57
+
58
+
59
+ # Corresponds to the old information_state_XXX methods.
60
+ INFO_STATE_OBS_TYPE = pyspiel.IIGObservationType(perfect_recall=True)
61
+
62
+
63
+ class _Observation:
64
+ """Contains an observation from a game."""
65
+
66
+ def __init__(self, game, observer):
67
+ self._observation = pyspiel._Observation(game, observer)
68
+ self.dict = {}
69
+ if self._observation.has_tensor():
70
+ self.tensor = np.frombuffer(self._observation, np.float32)
71
+ offset = 0
72
+ for tensor_info in self._observation.tensors_info():
73
+ size = np.prod(tensor_info.shape, dtype=np.int64)
74
+ values = self.tensor[offset:offset + size].reshape(tensor_info.shape)
75
+ self.dict[tensor_info.name] = values
76
+ offset += size
77
+ else:
78
+ self.tensor = None
79
+
80
+ def set_from(self, state, player):
81
+ self._observation.set_from(state, player)
82
+
83
+ def string_from(self, state, player):
84
+ return (self._observation.string_from(state, player)
85
+ if self._observation.has_string() else None)
86
+
87
+ def compress(self):
88
+ return self._observation.compress()
89
+
90
+ def decompress(self, compressed_observation):
91
+ self._observation.decompress(compressed_observation)
92
+
93
+
94
+ def make_observation(
95
+ game,
96
+ imperfect_information_observation_type=None,
97
+ params=None,
98
+ ):
99
+ """Returns an _Observation instance if the imperfect_information_observation_type is supported, otherwise None."""
100
+ params = params or {}
101
+ if hasattr(game, 'make_py_observer'):
102
+ return game.make_py_observer(imperfect_information_observation_type, params)
103
+ else:
104
+ if imperfect_information_observation_type is not None:
105
+ observer = game.make_observer(
106
+ imperfect_information_observation_type, params
107
+ )
108
+ else:
109
+ observer = game.make_observer(params)
110
+ if observer is None:
111
+ return None
112
+ return _Observation(game, observer)
113
+
114
+
115
+ class IIGObserverForPublicInfoGame:
116
+ """Observer for imperfect information obvservations of public-info games."""
117
+
118
+ def __init__(self, iig_obs_type, params):
119
+ if params:
120
+ raise ValueError(f'Observation parameters not supported; passed {params}')
121
+ self._iig_obs_type = iig_obs_type
122
+ self.tensor = None
123
+ self.dict = {}
124
+
125
+ def set_from(self, state, player):
126
+ pass
127
+
128
+ def string_from(self, state, player):
129
+ del player
130
+ if self._iig_obs_type.public_info:
131
+ return state.history_str()
132
+ else:
133
+ return '' # No private information to return