Compare commits
10 Commits
e711892d79
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ae93a9a6a | ||
|
|
8bb99e507f | ||
|
|
6fe9044cdc | ||
|
|
cfe03b7014 | ||
|
|
d9ea9ddb86 | ||
|
|
792a4c4398 | ||
|
|
d6d3b7f18e | ||
|
|
eebd9b5e3d | ||
|
|
f9e66aa340 | ||
|
|
a755cba5fb |
84
bayesian/normalizing_flows/flows.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
|
||||||
|
class Planar(nn.Module):
|
||||||
|
def __init__(self, size=1, init_sigma=0.01):
|
||||||
|
super().__init__()
|
||||||
|
self.u = nn.Parameter(torch.randn(1, size).normal_(0, init_sigma))
|
||||||
|
self.w = nn.Parameter(torch.randn(1, size).normal_(0, init_sigma))
|
||||||
|
self.b = nn.Parameter(torch.zeros(1))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def normalized_u(self):
|
||||||
|
"""
|
||||||
|
Needed for invertibility condition.
|
||||||
|
|
||||||
|
See Appendix A.1
|
||||||
|
Rezende et al. Variational Inference with Normalizing Flows
|
||||||
|
https://arxiv.org/pdf/1505.05770.pdf
|
||||||
|
"""
|
||||||
|
|
||||||
|
# softplus
|
||||||
|
def m(x):
|
||||||
|
return -1 + torch.log(1 + torch.exp(x))
|
||||||
|
|
||||||
|
wtu = torch.matmul(self.w, self.u.t())
|
||||||
|
w_div_w2 = self.w / torch.norm(self.w)
|
||||||
|
return self.u + (m(wtu) - wtu) * w_div_w2
|
||||||
|
|
||||||
|
def psi(self, z):
|
||||||
|
"""
|
||||||
|
ψ(z) =h′(w^tz+b)w
|
||||||
|
|
||||||
|
See eq(11)
|
||||||
|
Rezende et al. Variational Inference with Normalizing Flows
|
||||||
|
https://arxiv.org/pdf/1505.05770.pdf
|
||||||
|
"""
|
||||||
|
return self.h_prime(z @ self.w.t() + self.b) @ self.w
|
||||||
|
|
||||||
|
def h(self, x):
|
||||||
|
return torch.tanh(x)
|
||||||
|
|
||||||
|
def h_prime(self, z):
|
||||||
|
return 1 - torch.tanh(z) ** 2
|
||||||
|
|
||||||
|
def forward(self, z):
|
||||||
|
if isinstance(z, tuple):
|
||||||
|
z, accumulating_ldj = z
|
||||||
|
else:
|
||||||
|
z, accumulating_ldj = z, 0
|
||||||
|
psi = self.psi(z)
|
||||||
|
|
||||||
|
u = self.normalized_u
|
||||||
|
|
||||||
|
# determinant of jacobian
|
||||||
|
det = (1 + psi @ u.t())
|
||||||
|
|
||||||
|
# log |det Jac|
|
||||||
|
ldj = torch.log(torch.abs(det) + 1e-6)
|
||||||
|
|
||||||
|
wzb = z @ self.w.t() + self.b
|
||||||
|
|
||||||
|
fz = z + (u * self.h(wzb))
|
||||||
|
|
||||||
|
return fz, ldj + accumulating_ldj
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
z0 = torch.rand((1000, 2))
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
pf = Planar(size=2)
|
||||||
|
|
||||||
|
zk = z0
|
||||||
|
for i in range(10):
|
||||||
|
zk, ldj = pf.forward(zk)
|
||||||
|
|
||||||
|
plt.scatter(zk[:, 0], zk[:, 1], alpha=0.2)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
428
bayesian/normalizing_flows/planar_flow.ipynb
Normal file
93
bayesian/normalizing_flows/planar_flow/simple.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
from torch import distributions as dist
|
||||||
|
from flows import Planar
|
||||||
|
|
||||||
|
|
||||||
|
def target_density(z):
|
||||||
|
z1, z2 = z[..., 0], z[..., 1]
|
||||||
|
norm = (z1**2 + z2**2)**0.5
|
||||||
|
exp1 = torch.exp(-0.2 * ((z1 - 2) / 0.8) ** 2)
|
||||||
|
exp2 = torch.exp(-0.2 * ((z1 + 2) / 0.8) ** 2)
|
||||||
|
u = 0.5 * ((norm - 4) / 0.4) ** 2 - torch.log(exp1 + exp2)
|
||||||
|
return torch.exp(-u)
|
||||||
|
|
||||||
|
|
||||||
|
class Flow(nn.Module):
|
||||||
|
def __init__(self, dim=2, n_flows=10):
|
||||||
|
super().__init__()
|
||||||
|
self.flow = nn.Sequential(*[
|
||||||
|
Planar(dim) for _ in range(n_flows)
|
||||||
|
])
|
||||||
|
self.mu = nn.Parameter(torch.randn(dim, ).normal_(0, 0.01))
|
||||||
|
self.log_var = nn.Parameter(torch.randn(dim, ).normal_(1, 0.01))
|
||||||
|
|
||||||
|
def forward(self, shape):
|
||||||
|
std = torch.exp(0.5 * self.log_var)
|
||||||
|
eps = torch.randn(shape) # unit gaussian
|
||||||
|
z0 = self.mu + eps * std
|
||||||
|
|
||||||
|
zk, ldj = self.flow(z0)
|
||||||
|
return z0, zk, ldj, self.mu, self.log_var
|
||||||
|
|
||||||
|
|
||||||
|
def det_loss(mu, log_var, z_0, z_k, ldj, beta):
|
||||||
|
# Note that I assume uniform prior here.
|
||||||
|
# So P(z) is constant and not modelled in this loss function
|
||||||
|
batch_size = z_0.size(0)
|
||||||
|
|
||||||
|
# Qz0
|
||||||
|
log_qz0 = dist.Normal(mu, torch.exp(0.5 * log_var)).log_prob(z_0)
|
||||||
|
# Qzk = Qz0 + sum(log det jac)
|
||||||
|
log_qzk = log_qz0.sum() - ldj.sum()
|
||||||
|
# P(x|z)
|
||||||
|
nll = -torch.log(target_density(z_k) + 1e-7).sum() * beta
|
||||||
|
return (log_qzk + nll) / batch_size
|
||||||
|
|
||||||
|
|
||||||
|
def train_flow(flow, shape, epochs=1000):
|
||||||
|
optim = torch.optim.Adam(flow.parameters(), lr=1e-2)
|
||||||
|
|
||||||
|
for i in range(epochs):
|
||||||
|
z0, zk, ldj, mu, log_var = flow(shape=shape)
|
||||||
|
loss = det_loss(mu=mu,
|
||||||
|
log_var=log_var,
|
||||||
|
z_0=z0,
|
||||||
|
z_k=zk,
|
||||||
|
ldj=ldj,
|
||||||
|
beta=1)
|
||||||
|
loss.backward()
|
||||||
|
optim.step()
|
||||||
|
optim.zero_grad()
|
||||||
|
if i % 100 == 0:
|
||||||
|
print(loss.item())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
x1 = np.linspace(-7.5, 7.5)
|
||||||
|
x2 = np.linspace(-7.5, 7.5)
|
||||||
|
x1_s, x2_s = np.meshgrid(x1, x2)
|
||||||
|
x_field = np.concatenate([x1_s[..., None], x2_s[..., None]], axis=-1)
|
||||||
|
x_field = torch.tensor(x_field, dtype=torch.float)
|
||||||
|
|
||||||
|
plt.figure(figsize=(8, 8))
|
||||||
|
plt.title("Target distribution")
|
||||||
|
plt.xlabel('$z_1$')
|
||||||
|
plt.ylabel('$z_2$')
|
||||||
|
plt.contourf(x1_s, x2_s, target_density(x_field))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
def show_samples(s):
|
||||||
|
plt.figure(figsize=(6, 6))
|
||||||
|
plt.scatter(s[:, 0], s[:, 1], alpha=0.1)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
flow = Flow(dim=2, n_flows=10)
|
||||||
|
shape = (1000, 2)
|
||||||
|
train_flow(flow, shape, epochs=5000)
|
||||||
|
z0, zk, ldj, mu, log_var = flow((5000, 2))
|
||||||
|
show_samples(zk.data)
|
||||||
|
|
||||||
520
bayesian/vae.ipynb
Normal file
1496
collaborative_filtering/svds.ipynb
Normal file
357
nearest_neighbors/locality-sensitive-hashing.ipynb
Normal file
|
Before Width: | Height: | Size: 113 KiB After Width: | Height: | Size: 113 KiB |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
|
Before Width: | Height: | Size: 2.9 KiB After Width: | Height: | Size: 2.9 KiB |
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 35 KiB |
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 480 KiB After Width: | Height: | Size: 480 KiB |
|
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 91 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 80 KiB After Width: | Height: | Size: 80 KiB |
4
trainings/variational_inference_core_team/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
.ipynb_checkpoints/
|
||||||
|
.idea/
|
||||||
|
__pycache__/
|
||||||
|
data/
|
||||||
BIN
trainings/variational_inference_core_team/img/auto-encoder.png
Normal file
|
After Width: | Height: | Size: 134 KiB |
BIN
trainings/variational_inference_core_team/img/dafuq.jpg
Normal file
|
After Width: | Height: | Size: 66 KiB |
BIN
trainings/variational_inference_core_team/img/easydist.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
|
After Width: | Height: | Size: 772 KiB |
BIN
trainings/variational_inference_core_team/img/plate-vae.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
63
trainings/variational_inference_core_team/models.py
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
|
||||||
|
class AutoEncoder(nn.Module):
|
||||||
|
def __init__(self, input_size=784, z_size=20):
|
||||||
|
super().__init__()
|
||||||
|
hidden_size = int((input_size - z_size) / 2 + z_size)
|
||||||
|
self.encoder = nn.Sequential(
|
||||||
|
nn.Linear(input_size, hidden_size),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(hidden_size, z_size)
|
||||||
|
)
|
||||||
|
self.decoder = nn.Sequential(
|
||||||
|
nn.Linear(z_size, hidden_size),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(hidden_size, input_size),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = x.view(-1, 784)
|
||||||
|
z = self.encoder(x)
|
||||||
|
x = self.decoder(z)
|
||||||
|
|
||||||
|
if self.training:
|
||||||
|
return x
|
||||||
|
else:
|
||||||
|
return F.sigmoid(x)
|
||||||
|
|
||||||
|
|
||||||
|
class VAE(nn.Module):
|
||||||
|
def __init__(self, input_size=784, z_size=20):
|
||||||
|
super().__init__()
|
||||||
|
hidden_size = int((input_size - z_size) / 2 + z_size)
|
||||||
|
self.z_size = z_size
|
||||||
|
self.encoder = nn.Sequential(
|
||||||
|
nn.Linear(input_size, hidden_size),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(hidden_size, hidden_size),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(hidden_size, z_size * 2)
|
||||||
|
)
|
||||||
|
self.decoder = nn.Sequential(
|
||||||
|
nn.Linear(z_size, hidden_size),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(hidden_size, input_size),
|
||||||
|
nn.Sigmoid()
|
||||||
|
)
|
||||||
|
|
||||||
|
def reparameterize(self, mu, log_var):
|
||||||
|
std = torch.exp(0.5 * log_var)
|
||||||
|
eps = torch.randn_like(std) # unit gaussian
|
||||||
|
z = mu + eps * std
|
||||||
|
return z
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = x.view(-1, 784)
|
||||||
|
variational_params = self.encoder(x)
|
||||||
|
mu = variational_params[..., :self.z_size]
|
||||||
|
log_var = variational_params[..., self.z_size:]
|
||||||
|
z = self.reparameterize(mu, log_var)
|
||||||
|
return self.decoder(z), z, mu, log_var
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
torch==1.3.0
|
||||||
|
torchvision>=0.4
|
||||||
|
numpy==1.17.2
|
||||||
|
matplotlib==3.1.1
|
||||||
|
scipy==1.3.1
|
||||||