-
Notifications
You must be signed in to change notification settings - Fork 7
/
fit-np-mala.py
executable file
·112 lines (89 loc) · 2.68 KB
/
fit-np-mala.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#!/usr/bin/env python3
# fit-np-mala.py
# Bayesian fit using numpy for MALA
import os
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from scipy.optimize import minimize
df = pd.read_parquet(os.path.join("..", "pima.parquet"))
print(df)
n, p = df.shape
print(n, p)
y = pd.get_dummies(df["type"])["Yes"].to_numpy(dtype='float32')
X = df.drop(columns="type").to_numpy()
X = np.hstack((np.ones((n,1)), X))
print(X)
print(y)
def ll(beta):
return np.sum(-np.log(1 + np.exp(-(2*y - 1)*(X.dot(beta)))))
init = np.random.randn(p)*0.1
print(init)
print("MAP:")
pscale = np.array([10.,1.,1.,1.,1.,1.,1.,1.])
def lprior(beta):
return np.sum(sp.stats.norm.logpdf(beta, loc=0, scale=pscale))
def lpost(beta):
return ll(beta) + lprior(beta)
print("without gradients")
print(lpost(init))
res = minimize(lambda x: -lpost(x), init, method='BFGS')
print(res.x)
print(ll(res.x))
def glp(beta):
glpr = -beta/(pscale*pscale)
gll = (X.T).dot(y - 1/(1 + np.exp(-X.dot(beta))))
return (glpr + gll)
print(glp(init))
print(glp(res.x))
print("with gradients")
res = minimize(lambda x: -lpost(x), init, jac=lambda x: -glp(x), method='BFGS')
print(res.x)
print(ll(res.x))
print(glp(res.x))
print("MALA:")
def mhKernel(lpost, rprop, dprop = lambda new, old: 1.):
def kernel(x, ll):
prop = rprop(x)
lp = lpost(prop)
a = lp - ll + dprop(x, prop) - dprop(prop, x)
if (np.log(np.random.rand()) < a):
x = prop
ll = lp
return x, ll
return kernel
def malaKernel(lpi, glpi, dt = 1e-4, pre = 1):
p = len(init)
sdt = np.sqrt(dt)
spre = np.sqrt(pre)
advance = lambda x: x + 0.5*pre*glpi(x)*dt
return mhKernel(lpi, lambda x: advance(x) + np.random.randn(p)*spre*sdt,
lambda new, old: np.sum(sp.stats.norm.logpdf(new, loc=advance(old), scale=spre*sdt)))
def mcmc(init, kernel, thin = 10, iters = 10000, verb = True):
p = len(init)
ll = -np.inf
mat = np.zeros((iters, p))
x = init
if (verb):
print(str(iters) + " iterations")
for i in range(iters):
if (verb):
print(str(i), end=" ", flush=True)
for j in range(thin):
x, ll = kernel(x, ll)
mat[i,:] = x
if (verb):
print("\nDone.", flush=True)
return mat
pre = np.array([100.,1.,1.,1.,1.,1.,25.,1.])
out = mcmc(res.x, malaKernel(lpost, glp, dt=1e-5, pre=pre), thin=1000)
print(out)
odf = pd.DataFrame(out, columns=["b0","b1","b2","b3","b4","b5","b6","b7"])
odf.to_parquet("fit-np-mala.parquet")
print("Posterior summaries:")
summ = scipy.stats.describe(out)
print(summ)
print("\nMean: " + str(summ.mean))
print("Variance: " + str(summ.variance))
# eof