graph-games-proto 0.3.1741__py3-none-any.whl → 0.3.1752__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,3 @@
1
1
  # __init__.py
2
2
  from .main import hello
3
- from .fns import get_deadlines, get_longest_path_length, get_max_allotted_times, get_legal_actions_for_path, find_player_with_longest_path, calc_player_graph, get_edges, FrozenDek, QValueLearningPolicy, Action2, getnextstate2, isactionlegal2, LegalAction, AltAction, Fig, RandoPolicy, StaticBoardConfig, getplayerstate, autoplay, getstate, getpublicstate, generate_cards, PublicState, State, PlayerInfo, Action, Fig, get_imagined_state, getprivatescore, get_qvalue_trajectories, getnextaction, PlayerState, getnextstate, initfig, getavailablepathstatuses, initboardconfig, gettoplay, getlegalactionspecsforplayer, getcompletedroutes, getstateidx, isactionlegal, getpathstatus, printstate, getinitialstate, Card, PrivateState, getprivatestate, printaction, json_serializer, getrng, FrozenBoardConfig, initgameconfig, GameConfig
3
+ from .fns import get_deadlines, get_longest_path_length, get_max_allotted_times, get_legal_actions_for_path, find_player_with_longest_path, calc_player_graph, get_edges, FrozenDek, QValueLearningPolicy, Action2, getnextstate2, isactionlegal2, LegalAction, AltAction, Fig, RandoPolicy, StaticBoardConfig, autoplay, getpublicstate, generate_cards, PublicState, State, PlayerInfo, Action, Fig, get_imagined_state, getprivatescore, get_qvalue_trajectories, getnextaction, PlayerState, initfig, getavailablepathstatuses, initboardconfig, gettoplay, getlegalactionspecsforplayer, getpathstatus, printstate, getinitialstate, Card, PrivateState, getprivatestate, printaction, json_serializer, getrng, FrozenBoardConfig, initgameconfig, GameConfig
graph_games_proto/fns.py CHANGED
@@ -1008,7 +1008,6 @@ class ActionDrawUnit:
1008
1008
  # struct PublicState
1009
1009
  # fig::Fig
1010
1010
  # logged_game_uuid::UUID
1011
- # action_history::Vector{Action}
1012
1011
  # to_play::Vector{Int}
1013
1012
  # num_route_cards::Int
1014
1013
  # num_route_discards::Int
@@ -1020,8 +1019,6 @@ class ActionDrawUnit:
1020
1019
  # captured_points::Vector{CapturedPoint}
1021
1020
  # last_to_play::Union{Nothing,Int}
1022
1021
  # terminal::Bool
1023
- # longest_trail_player_idxs::Vector{Int}
1024
- # most_clusters_player_idxs::Vector{Int}
1025
1022
  # winners::Vector{Int}
1026
1023
  # market_refills::Vector{MarketRefill}
1027
1024
 
@@ -2184,15 +2181,12 @@ class State(PClass):
2184
2181
  rng = field(type=random.Random)
2185
2182
  terminal = field(type=bool)
2186
2183
  initial_to_play = field(type=list) # List[int]
2187
- action_history = field(type=list) # List[Action]
2188
2184
  route_cards = field(type=PVector) # List[int]
2189
2185
  route_discards = field(type=PVector) # List[int]
2190
2186
  player_hands = field(type=PVector) # List[PlayerInfo]
2191
2187
  unit_cards = field(type=PVector) # List[int]
2192
2188
  faceup_spots = field(type=PVector) # List[Union{Nothing, int}]
2193
2189
  unit_discards = field(type=PVector) # List[int]
2194
- most_clusters_player_idxs = field(type=list) # List[int]
2195
- longest_trail_player_idxs = field(type=list) # List[int]
2196
2190
  last_to_play = field(type=(int, type(None)), initial=None)
2197
2191
  winners = field(type=list) # List[int]
2198
2192
  # market_refills::Vector{MarketRefill}
@@ -2228,15 +2222,12 @@ class State(PClass):
2228
2222
  "rng": rng2json(self.rng),
2229
2223
  "terminal": self.terminal,
2230
2224
  "initial_to_play": self.initial_to_play,
2231
- "action_history": [x.__todict__() for x in self.action_history],
2232
2225
  "route_cards": list(self.route_cards),
2233
2226
  "route_discards": list(self.route_discards),
2234
2227
  "player_hands": [x.__todict__() for x in self.player_hands],
2235
2228
  "unit_cards": list(self.unit_cards),
2236
2229
  "faceup_spots": list(self.faceup_spots),
2237
2230
  "unit_discards": list(self.unit_discards),
2238
- "most_clusters_player_idxs": self.most_clusters_player_idxs,
2239
- "longest_trail_player_idxs": self.longest_trail_player_idxs,
2240
2231
  "last_to_play": self.last_to_play,
2241
2232
  "winners": self.winners,
2242
2233
  }
@@ -2273,15 +2264,12 @@ class State(PClass):
2273
2264
  rng=json2rng(d["rng"]),
2274
2265
  terminal=d["terminal"],
2275
2266
  initial_to_play=d["initial_to_play"],
2276
- action_history=[AltAction.__fromdict__(a) for a in d["action_history"]],
2277
2267
  route_cards=pvector(d["route_cards"]),
2278
2268
  route_discards=pvector(d["route_discards"]),
2279
2269
  player_hands=pvector([PlayerInfo.__fromdict__(h) for h in d["player_hands"]]),
2280
2270
  unit_cards=pvector(d["unit_cards"]),
2281
2271
  faceup_spots=pvector(d["faceup_spots"]),
2282
2272
  unit_discards=pvector(d["unit_discards"]),
2283
- most_clusters_player_idxs=d["most_clusters_player_idxs"],
2284
- longest_trail_player_idxs=d["longest_trail_player_idxs"],
2285
2273
  last_to_play=d.get("last_to_play"),
2286
2274
  winners=d["winners"],
2287
2275
  )
@@ -2546,7 +2534,6 @@ class PublicState(PClass):
2546
2534
  player_idxs = field(type=list) # List[int]
2547
2535
  game_idx = field(type=int)
2548
2536
  initial_to_play = field(type=list) # List[int]
2549
- action_history = field(type=list) # List[AltAction]
2550
2537
  to_play = field(type=list) # List[int]
2551
2538
  unit_discards = field(type=list) # List[int]
2552
2539
  num_route_cards = field(type=int)
@@ -2554,11 +2541,9 @@ class PublicState(PClass):
2554
2541
  num_unit_cards = field(type=int)
2555
2542
  num_unit_discards = field(type=int)
2556
2543
  faceup_spots = field(type=list) # List[Union{Nothing, int}]
2557
- most_clusters_player_idxs = field(type=list)
2558
2544
  players = field(type=list) # List[PublicPlayer]
2559
2545
  player_hands = field(type=list) # List[PublicPlayerInfo]
2560
2546
  last_to_play = field(type=(int, type(None)), initial=None)
2561
- longest_trail_player_idxs = field(type=list)
2562
2547
  winners = field(type=list)
2563
2548
  terminal = field(type=bool)
2564
2549
  captured_points = field(type=list) # List[CapturedPoint]
@@ -2583,9 +2568,7 @@ class PublicState(PClass):
2583
2568
  "decks": [deck.__todict__() for deck in self.decks],
2584
2569
  "piles": [pile.__todict__() for pile in self.piles],
2585
2570
  "player_idxs": self.player_idxs,
2586
- "game_idx": self.game_idx,
2587
2571
  "initial_to_play": self.initial_to_play,
2588
- "action_history": [x.__todict__() for x in self.action_history],
2589
2572
  "to_play": self.to_play,
2590
2573
  "unit_discards": self.unit_discards,
2591
2574
  "num_route_cards": self.num_route_cards,
@@ -2593,11 +2576,9 @@ class PublicState(PClass):
2593
2576
  "num_unit_cards": self.num_unit_cards,
2594
2577
  "num_unit_discards": self.num_unit_discards,
2595
2578
  "faceup_spots": self.faceup_spots,
2596
- "most_clusters_player_idxs": self.most_clusters_player_idxs,
2597
2579
  "players": [x.__todict__() for x in self.players],
2598
2580
  "player_hands": [x.__todict__() for x in self.player_hands],
2599
2581
  "last_to_play": self.last_to_play,
2600
- "longest_trail_player_idxs": self.longest_trail_player_idxs,
2601
2582
  "winners": self.winners,
2602
2583
  "terminal": self.terminal,
2603
2584
  "captured_points": [x.__todict__() for x in self.captured_points],
@@ -2626,7 +2607,6 @@ class PublicState(PClass):
2626
2607
  player_idxs=d["player_idxs"],
2627
2608
  game_idx=d["game_idx"],
2628
2609
  initial_to_play=d["initial_to_play"],
2629
- action_history=[AltAction.__fromdict__(x) for x in d["action_history"]],
2630
2610
  to_play=d["to_play"],
2631
2611
  unit_discards=d["unit_discards"],
2632
2612
  num_route_cards=d["num_route_cards"],
@@ -2634,11 +2614,9 @@ class PublicState(PClass):
2634
2614
  num_unit_cards=d["num_unit_cards"],
2635
2615
  num_unit_discards=d["num_unit_discards"],
2636
2616
  faceup_spots=d["faceup_spots"],
2637
- most_clusters_player_idxs=d["most_clusters_player_idxs"],
2638
2617
  players=[PublicPlayer.__fromdict__(x) for x in d["players"]],
2639
2618
  player_hands=[PublicPlayerInfo.__fromdict__(x) for x in d["player_hands"]],
2640
2619
  last_to_play=d.get("last_to_play"),
2641
- longest_trail_player_idxs=d["longest_trail_player_idxs"],
2642
2620
  winners=d["winners"],
2643
2621
  terminal=d["terminal"],
2644
2622
  captured_points=[CapturedPoint.__fromdict__(x) for x in d["captured_points"]],
@@ -2676,8 +2654,6 @@ def autoplay(seed, fig, num_players, policy, log=False):
2676
2654
  if log:
2677
2655
  printstate(s)
2678
2656
  a = getnextaction(s, policy)
2679
- if log:
2680
- printaction(a, getstateidx(s))
2681
2657
  s = getnextstate2(s, a)
2682
2658
  actions.append(a)
2683
2659
 
@@ -2776,6 +2752,19 @@ def generate_cards(dek):
2776
2752
  return cards
2777
2753
 
2778
2754
 
2755
+ @dispatch(Fig, str)
2756
+ def getsettingvalue(f, setting_name):
2757
+ for setting in f.board_config.settings:
2758
+ if setting.name == setting_name:
2759
+ return json.loads(setting.value_json)
2760
+ return None
2761
+
2762
+
2763
+ @dispatch(State, str)
2764
+ def getsettingvalue(s, setting_name):
2765
+ return getsettingvalue(s.game_config.fig, setting_name)
2766
+
2767
+
2779
2768
  @dispatch(GameConfig)
2780
2769
  def getinitialstate(game_config):
2781
2770
  fig = game_config.fig
@@ -2941,15 +2930,12 @@ def getinitialstate(game_config):
2941
2930
  game_config=game_config,
2942
2931
  initial_to_play=initial_to_play,
2943
2932
  rng=rng,
2944
- action_history=[],
2945
2933
  route_cards=pvector(route_cards),
2946
2934
  route_discards=pvector([]),
2947
2935
  player_hands=pvector(player_hands),
2948
2936
  unit_cards=pvector(unit_cards),
2949
2937
  unit_discards=pvector([]),
2950
2938
  faceup_spots=pvector(faceup_spots),
2951
- most_clusters_player_idxs=[],
2952
- longest_trail_player_idxs=[],
2953
2939
  last_to_play=None,
2954
2940
  winners=[],
2955
2941
  terminal=False,
@@ -4425,9 +4411,7 @@ def getpublicstate(s):
4425
4411
  decks=[getpublicdeck(s, deck) for deck in s.decks],
4426
4412
  piles=s.piles,
4427
4413
  player_idxs=s.player_idxs,
4428
- game_idx=len(s.action_history),
4429
4414
  initial_to_play=s.initial_to_play,
4430
- action_history=s.action_history,
4431
4415
  to_play=gettoplay(s),
4432
4416
  unit_discards=list(s.unit_discards),
4433
4417
  num_route_cards=len(s.route_cards),
@@ -4435,11 +4419,9 @@ def getpublicstate(s):
4435
4419
  num_unit_cards=len(s.unit_cards),
4436
4420
  num_unit_discards=len(s.unit_discards),
4437
4421
  faceup_spots=list(s.faceup_spots),
4438
- most_clusters_player_idxs=s.most_clusters_player_idxs,
4439
4422
  players=[getpublicplayer(s, p) for p in s.players],
4440
4423
  player_hands=[getpublicplayerinfo(s, p) for p in s.player_hands],
4441
4424
  last_to_play=s.last_to_play,
4442
- longest_trail_player_idxs=s.longest_trail_player_idxs,
4443
4425
  winners=s.winners,
4444
4426
  terminal=s.terminal,
4445
4427
  captured_points=getcapturedpoints(s),
@@ -4569,14 +4551,6 @@ def getplayerpathidxs(s, player_idx):
4569
4551
  return s.player_hands[player_idx].paths
4570
4552
 
4571
4553
 
4572
- # Implementing the following Julia function:
4573
- # getlastaction(s::State) = isempty(s.actions) ? nothing : s.actions[end]
4574
- def getlastaction(s):
4575
- if not s.action_history:
4576
- return None
4577
- return s.action_history[-1]
4578
-
4579
-
4580
4554
  # Function implements the following Julia function:
4581
4555
  # function getlastactionkey(s)
4582
4556
  # last_action = getlastaction(s)
@@ -4587,10 +4561,7 @@ def getlastaction(s):
4587
4561
  # end
4588
4562
  @dispatch(State)
4589
4563
  def getlastactiontype(s):
4590
- last_action = getlastaction(s)
4591
- if last_action is None:
4592
- return NoAction()
4593
- return getactiontype(last_action.action_name)
4564
+ pass
4594
4565
 
4595
4566
 
4596
4567
  def getactiontype(action_name):
@@ -4612,10 +4583,7 @@ def getactiontype(action_name):
4612
4583
  # Function implements the following Julia function:
4613
4584
  # getlastplayeridxplus1(s) = mod1(getlastaction(s).player_idx + 1, s.game_config.num_players)
4614
4585
  def getlastplayeridxplus1(s):
4615
- last_action = getlastaction(s)
4616
- if last_action is None:
4617
- return 0
4618
- return (last_action.player_idx + 1) % s.game_config.num_players
4586
+ pass
4619
4587
 
4620
4588
 
4621
4589
  @dispatch(State)
@@ -4764,10 +4732,7 @@ def getlegalactionspecs(s, player_idx):
4764
4732
  # Val(player_idx == last_action.player_idx)
4765
4733
  # end
4766
4734
  def getrepeatplayerbooltype(s, player_idx):
4767
- last_action = getlastaction(s)
4768
- if last_action is None:
4769
- return getbooltype(False)
4770
- return getbooltype(player_idx == last_action.player_idx)
4735
+ pass
4771
4736
 
4772
4737
 
4773
4738
  def combinations(a, n=None):
@@ -4878,22 +4843,8 @@ def getrouteoptionsets(s, player_idx, min_required):
4878
4843
  # end
4879
4844
  @dispatch(State, int, object, object)
4880
4845
  def getlegalactionspecsforplayer(s, player_idx, repeat_player, last_action):
4881
- min_initial_routes = getsettingvalue(s, 'min_initial_routes')
4882
4846
  min_chosen_routes = getsettingvalue(s, 'min_chosen_routes')
4883
4847
 
4884
- # Initial Route Card Discard
4885
- if getsettingvalue(s, 'action_route_discard') and len(s.action_history) < s.game_config.num_players:
4886
- return [
4887
- ActionSpec(
4888
- player_idx=player_idx,
4889
- action_name="ROUTE_DISCARD",
4890
- return_route_option_sets = getrouteoptionsets(s, player_idx, min_initial_routes),
4891
- draw_faceup_spots={},
4892
- points = [],
4893
- paths = [],
4894
- )
4895
- ]
4896
-
4897
4848
  action_specs = []
4898
4849
  if getsettingvalue(s, 'action_draw_unit_faceup') and s.faceup_spots:
4899
4850
 
@@ -5751,11 +5702,6 @@ def getunavailablepoints(s):
5751
5702
  return unavailable_points
5752
5703
 
5753
5704
 
5754
- def getstateidx(s):
5755
- return len(s.action_history)
5756
-
5757
-
5758
-
5759
5705
  # Implementing the following Julia function:
5760
5706
  # function calcfinalscores(s::State)
5761
5707
  # if !s.terminal
@@ -5820,9 +5766,7 @@ def printplayer(s, player_idx):
5820
5766
 
5821
5767
 
5822
5768
  def printstate(s):
5823
- state_idx = getstateidx(s)
5824
5769
  print(f"*************** State {state_idx} ***************")
5825
- print(f"Most clusters: {list(s.most_clusters_player_idxs)}")
5826
5770
  print(f"Last to play: {s.last_to_play}")
5827
5771
  print(f"Winners: {list(s.winners)}")
5828
5772
  print(f"Route Deck: {list(s.route_cards)}")
@@ -5948,24 +5892,6 @@ def getprivatescore(s, hand):
5948
5892
  amount=path_scores[len],
5949
5893
  ))
5950
5894
 
5951
- # Bonus: most clusters
5952
- if getsettingvalue(s, 'most_clusters_bonus'):
5953
- bonus_most_clusters_score = getsettingvalue(s.game_config.fig, 'bonus_most_clusters_score')
5954
- if player_idx in s.most_clusters_player_idxs:
5955
- breakdown.append(ScoreItem(
5956
- code_idx=getscorecodeidx(s.game_config.fig, 'MOST_CLUSTERS'),
5957
- amount=bonus_most_clusters_score,
5958
- ))
5959
-
5960
- # Longest trail
5961
- if not getsettingvalue(s, 'disable_longest_path_bonus'):
5962
- longest_path_score = getsettingvalue(s.game_config.fig, 'longest_path_score')
5963
- if player_idx in s.longest_trail_player_idxs:
5964
- breakdown.append(ScoreItem(
5965
- code_idx=getscorecodeidx(s.game_config.fig, 'LONGEST_ROAD'),
5966
- amount=longest_path_score,
5967
- ))
5968
-
5969
5895
  # Completed routes
5970
5896
  if False and getsettingvalue(s, 'route_scoring'):
5971
5897
  routes = s.game_config.fig.board_config.routes
@@ -6466,10 +6392,6 @@ def get_imagined_state(static_board_config, player_state):
6466
6392
  for unit_card in my_hand.unit_cards:
6467
6393
  remove_card_idx(possible_unit_card_idxs, unit_card-1)
6468
6394
 
6469
- for action in public_state.action_history:
6470
- if action.action_name == "DRAW_UNIT_DECK":
6471
- remove_card_idx(possible_unit_card_idxs, action.unit_card_num - 1)
6472
-
6473
6395
  imagined_unit_card_idxs = rng.sample(possible_unit_card_idxs, public_state.num_unit_cards)
6474
6396
  imagined_unit_cards = [x+1 for x in imagined_unit_card_idxs]
6475
6397
  remove_card_idxs(possible_unit_card_idxs, imagined_unit_card_idxs)
@@ -6539,15 +6461,12 @@ def get_imagined_state(static_board_config, player_state):
6539
6461
  rng = rng, # TODO: again figure out this stochasticity
6540
6462
  terminal = public_state.terminal,
6541
6463
  initial_to_play = public_state.initial_to_play,
6542
- action_history = public_state.action_history,
6543
6464
  route_cards = pvector(imagined_route_cards), # Guess at this.
6544
6465
  route_discards = pvector(imagined_route_discards), # Guess at this.
6545
6466
  player_hands = pvector(imagined_player_hands), # Guess at this.
6546
6467
  unit_cards = pvector(imagined_unit_cards), # Guess at this.
6547
6468
  faceup_spots = pvector(public_state.faceup_spots),
6548
6469
  unit_discards = pvector(public_state.unit_discards),
6549
- most_clusters_player_idxs = public_state.most_clusters_player_idxs,
6550
- longest_trail_player_idxs = public_state.longest_trail_player_idxs,
6551
6470
  last_to_play = public_state.last_to_play,
6552
6471
  winners = public_state.winners,
6553
6472
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: graph_games_proto
3
- Version: 0.3.1741
3
+ Version: 0.3.1752
4
4
  Requires-Dist: multipledispatch==1.0.0
5
5
  Requires-Dist: pyrsistent==0.20.0
6
6
  Requires-Dist: numpy==2.2.4
@@ -0,0 +1,9 @@
1
+ graph_games_proto/__init__.py,sha256=O5XjRfe3DlxbJn4zezDvvy7cXvL4IzIRPZCL3Y-n7s8,776
2
+ graph_games_proto/all_types.py,sha256=IpbwftEcHS5Ewz-saFNk0lO9FvcbuHG36odRTayCXUk,54911
3
+ graph_games_proto/fns.py,sha256=CEs1hScC6_KPdLxWM13SrcunwYg4SICQPo2blYG7o_o,235957
4
+ graph_games_proto/main.py,sha256=fj2U7KcwrpZtuUhjOX5yVxY18LZvvsxDFYZ_S5mxe04,145
5
+ graph_games_proto/state.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ graph_games_proto-0.3.1752.dist-info/METADATA,sha256=ponF3VF7XgXC18_uuCgAWnm3jBlXhIA1RVeqDEPJS5Q,188
7
+ graph_games_proto-0.3.1752.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
8
+ graph_games_proto-0.3.1752.dist-info/top_level.txt,sha256=-4QSrBMf_MM4BGsr2QXBpqDx8c8k_OPnzGyFjqjakes,18
9
+ graph_games_proto-0.3.1752.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- graph_games_proto/__init__.py,sha256=sI31dBPkrs_UHYsuc1Q2sdYkJfpVPKpB--FuqghA208,864
2
- graph_games_proto/all_types.py,sha256=IpbwftEcHS5Ewz-saFNk0lO9FvcbuHG36odRTayCXUk,54911
3
- graph_games_proto/fns.py,sha256=Yjdzi-ST7rSv5PLcJiECu5fgWXSbOjNl_LCYmulfuJc,239934
4
- graph_games_proto/main.py,sha256=fj2U7KcwrpZtuUhjOX5yVxY18LZvvsxDFYZ_S5mxe04,145
5
- graph_games_proto/state.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- graph_games_proto-0.3.1741.dist-info/METADATA,sha256=iaIQuc2Cf9DaYgB4dq4A1j85nGei4di7auJx4wb3Ans,188
7
- graph_games_proto-0.3.1741.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
8
- graph_games_proto-0.3.1741.dist-info/top_level.txt,sha256=-4QSrBMf_MM4BGsr2QXBpqDx8c8k_OPnzGyFjqjakes,18
9
- graph_games_proto-0.3.1741.dist-info/RECORD,,