-
Notifications
You must be signed in to change notification settings - Fork 1
/
AI.jl
137 lines (113 loc) · 3.46 KB
/
AI.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
include("board.jl")
module AI
using AIHelp
using BM
global states = Array(Dict{Tuple{Int64,Int64},Tuple{String,Char}},0)
global gameType,board,seed,calculationTime
function init(igameType,iboard,iseed,difficulty)
global difficultyDict = Dict("normal"=>20,"hard"=>300,"suicidal"=>5,"protacted death"=>5)
global calculationTime = difficultyDict[difficulty]
global states
global gameType = igameType
global board = iboard
global seed = iseed
push!(states,board.state)
end
#gameType = "standard"
#board = startGame(gameType)
#seed = time()
#Max calculation time permitted in seconds
max_moves = 500
global wins = Dict()
global plays = Dict()
global C = 1.4
function update(state)
push!(states,state)
end
function get_play()
global max_depth,states,wins,plays
global max_depth = 0
state = states[size(states)[1]]
player = currentPlayer(states)
legal = AIHelp.legalMovesPlayer(state,player,gameType)
if size(legal)[1] == 0
return (0,0,0,0)
elseif size(legal)[1] == 1
return legal[1]
end
games = 0
bTime = time()
while time()-bTime < calculationTime
# println(time()-bTime)
run_simulation()
games += 1
print(STDOUT, "\u1b[1G") # go to first column
print(STDOUT,time()-bTime)
print(STDOUT, "\u1b[K") # clear the rest of the line
end
moves_states = collect( (p,next_state(state,p)) for p in legal)
println("$games, $(time()-bTime)")
(percent_wins,moveIndex) = findmax(
get(wins,(player,S),0)/get(plays,(player,S),1)
for (p,S) in moves_states )
move = moves_states[moveIndex][1]
#=Print stats?=#
println("Maximum depth searched: $(max_depth)")
println("with %wins $percent_wins : move:")
return move
end
function run_simulation() #2,3,5
global max_depth,states,plays,wins
visited_states = Set()
states_copy = deepcopy(states)
state = states_copy[size(states_copy)[1]]
player = currentPlayer(states_copy)
#println(player)
expandP = true
for i in 2:(max_moves+1)
state = states_copy[size(states_copy)[1]]
player = currentPlayer(states_copy)
legalC = AIHelp.legalMovesPlayer(state,player,gameType)
moves_states = collect( (p,next_state(state,p)) for p in legalC)
if all(collect(get(plays,(player,S),0)!=0 for (p,S) in moves_states))#Have stats
log_total = log(
sum(plays[(player,S)] for (p,S) in moves_states)
)
# println("have all stats")
(value,moveIndex)=findmax(
collect(wins[(player,S)]/plays[(player,S)] + C*sqrt(log_total / plays[(player,S)]) for (p,S) in moves_states))
(move,state) = moves_states[moveIndex]
else #Random choice
move = legalC[mod(Int(ceil(seed))*i,size(legalC)[1])+1] #Choose a "random" move for CURRENT PLAYER from legalC seeded from 'seed'
state = next_state(state, move)
end
push!(states_copy,state)
if expandP && !( (player,state) in keys(plays) )
expandP = false
plays[(player,state)] = 0
wins[(player,state)] = 0
if i > max_depth
max_depth = i
end
end
push!(visited_states,(player,state))
player = currentPlayer(states)
winner = BM.winner(states[size(states)[1]])
if winner!='?' #The current state has a winner
#println(winner)
break #Stop searching
end
end
for (player,state) in visited_states
if ! ( (player,state) in keys(plays))
continue
end
winner = BM.winner(state)
plays[(player,state)] += 1
if player == winner
wins[(player,state)] += 1
end
end
end
export init,get_play
end