deleted and moved .py files

This commit is contained in:
Roger Labbe
2014-09-01 19:52:29 -07:00
parent 42d4ace9d1
commit d77c63e87a
13 changed files with 0 additions and 30 deletions

149
exp/DiscreteBayes1D.py Normal file
View File

@@ -0,0 +1,149 @@
# -*- coding: utf-8 -*-
"""
Created on Fri May 2 11:48:55 2014
@author: rlabbe
"""
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import bar_plot
import numpy.random as random
import matplotlib.pyplot as plt
''' should this be a class? seems like both sense and update are very
problem specific
'''
class DiscreteBayes1D(object):
def __init__(self, world_map, belief=None):
self.world_map = copy.deepcopy(world_map)
self.N = len(world_map)
if belief is None:
# create belief, make all equally likely
self.belief = np.empty(self.N)
self.belief.fill (1./self.N)
else:
self.belief = copy.deepcopy(belief)
# This will be used to temporarily store calculations of the new
# belief. 'k' just means 'this iteration'.
self.belief_k = np.empty(self.N)
assert self.belief.shape == self.world_map.shape
def _normalize (self):
s = sum(self.belief)
self.belief = self.belief/s
def sense(self, Z, pHit, pMiss):
for i in range (self.N):
hit = (self.world_map[i] ==Z)
self.belief_k[i] = self.belief[i] * (pHit*hit + pMiss*(1-hit))
# copy results to the belief vector using swap-copy idiom
self.belief, self.belief_k = self.belief_k, self.belief
self._normalize()
def update(self, U, kernel):
N = self.N
kN = len(kernel)
width = int((kN - 1) / 2)
self.belief_k.fill(0)
for i in range(N):
for k in range (kN):
index = (i + (width-k)-U) % N
#print(i,k,index)
self.belief_k[i] += self.belief[index] * kernel[k]
# copy results to the belief vector using swap-copy idiom
self.belief, self.belief_k = self.belief_k, self.belief
def add_noise (Z, count):
n= len(Z)
for i in range(count):
j = random.randint(0,n)
Z[j] = random.randint(0,2)
def animate_three_doors (loops=5):
world = np.array([1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0])
#world = np.array([1,1,1,1,1])
#world = np.array([1,0,1,0,1,0])
f = DiscreteBayes1D(world)
measurements = np.tile(world,loops)
add_noise(measurements, 4)
for m in measurements:
f.sense (m, .8, .2)
f.update(1, (.05, .9, .05))
bar_plot.plot(f.belief)
plt.pause(0.01)
def _test_filter():
def is_near_equal(a,b):
try:
assert sum(abs(a-b)) < 0.001
except:
print(a, b)
assert False
def test_update_1():
f = DiscreteBayes1D(np.array([0,0,1,0,0]), np.array([0,0,.8,0,0]))
f.update (1, [1])
is_near_equal (f.belief, np.array([0,0,0,.8,0]))
f.update (1, [1])
is_near_equal (f.belief, np.array([0,0,0,0,.8]))
f.update (1, [1])
is_near_equal (f.belief, np.array([.8,0,0,0,0]))
f.update (-1, [1])
is_near_equal (f.belief, np.array([0,0,0,0,.8]))
f.update (2, [1])
is_near_equal (f.belief, np.array([0,.8,0,0,0]))
f.update (5, [1])
is_near_equal (f.belief, np.array([0,.8,0,0,0]))
def test_undershoot():
f = DiscreteBayes1D(np.array([0,0,1,0,0]), np.array([0,0,.8,0,0]))
f.update (2, [.2, .8,0.])
is_near_equal (f.belief, np.array([0,0,0,.16,.64]))
def test_overshoot():
f = DiscreteBayes1D(np.array([0,0,1,0,0]), np.array([0,0,.8,0,0]))
f.update (2, [0, .8, .2])
is_near_equal (f.belief, np.array([.16,0,0,0,.64]))
test_update_1()
test_undershoot()
if __name__ == "__main__":
_test_filter()
#animate_three_doors(loops=1)

26
exp/DogSensor.py Normal file
View File

@@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
"""
Created on Sun May 11 13:21:39 2014
@author: rlabbe
"""
from __future__ import print_function, division
import numpy.random as random
import math
class DogSensor(object):
def __init__(self, x0=0, velocity=1, noise=0.0):
""" x0 - initial position
velocity - (+=right, -=left)
noise - scaling factor for noise, 0== no noise
"""
self.x = x0
self.velocity = velocity
self.noise = math.sqrt(noise)
def sense(self):
self.x = self.x + self.velocity
return self.x + random.randn() * self.noise

192
exp/baseball.py Normal file
View File

@@ -0,0 +1,192 @@
# -*- coding: utf-8 -*-
"""
Computes the trajectory of a stitched baseball with air drag.
Takes altitude into account (higher altitude means further travel) and the
stitching on the baseball influencing drag.
This is based on the book Computational Physics by N. Giordano.
The takeaway point is that the drag coefficient on a stitched baseball is
LOWER the higher its velocity, which is somewhat counterintuitive.
"""
from __future__ import division
import math
import matplotlib.pyplot as plt
from numpy.random import randn
import numpy as np
class BaseballPath(object):
def __init__(self, x0, y0, launch_angle_deg, velocity_ms, noise=(1.0,1.0)):
""" Create baseball path object in 2D (y=height above ground)
x0,y0 initial position
launch_angle_deg angle ball is travelling respective to ground plane
velocity_ms speeed of ball in meters/second
noise amount of noise to add to each reported position in (x,y)
"""
omega = radians(launch_angle_deg)
self.v_x = velocity_ms * cos(omega)
self.v_y = velocity_ms * sin(omega)
self.x = x0
self.y = y0
self.noise = noise
def drag_force (self, velocity):
""" Returns the force on a baseball due to air drag at
the specified velocity. Units are SI
"""
B_m = 0.0039 + 0.0058 / (1. + exp((velocity-35.)/5.))
return B_m * velocity
def update(self, dt, vel_wind=0.):
""" compute the ball position based on the specified time step and
wind velocity. Returns (x,y) position tuple.
"""
# Euler equations for x and y
self.x += self.v_x*dt
self.y += self.v_y*dt
# force due to air drag
v_x_wind = self.v_x - vel_wind
v = sqrt (v_x_wind**2 + self.v_y**2)
F = self.drag_force(v)
# Euler's equations for velocity
self.v_x = self.v_x - F*v_x_wind*dt
self.v_y = self.v_y - 9.81*dt - F*self.v_y*dt
return (self.x + random.randn()*self.noise[0],
self.y + random.randn()*self.noise[1])
def a_drag (vel, altitude):
""" returns the drag coefficient of a baseball at a given velocity (m/s)
and altitude (m).
"""
v_d = 35
delta = 5
y_0 = 1.0e4
val = 0.0039 + 0.0058 / (1 + math.exp((vel - v_d)/delta))
val *= math.exp(-altitude / y_0)
return val
def compute_trajectory_vacuum (v_0_mph,
theta,
dt=0.01,
noise_scale=0.0,
x0=0., y0 = 0.):
theta = math.radians(theta)
x = x0
y = y0
v0_y = v_0_mph * math.sin(theta)
v0_x = v_0_mph * math.cos(theta)
xs = []
ys = []
t = dt
while y >= 0:
x = v0_x*t + x0
y = -0.5*9.8*t**2 + v0_y*t + y0
xs.append (x + randn() * noise_scale)
ys.append (y + randn() * noise_scale)
t += dt
return (xs, ys)
def compute_trajectory(v_0_mph, theta, v_wind_mph=0, alt_ft = 0.0, dt = 0.01):
g = 9.8
### comput
theta = math.radians(theta)
# initial velocity in direction of travel
v_0 = v_0_mph * 0.447 # mph to m/s
# velocity components in x and y
v_x = v_0 * math.cos(theta)
v_y = v_0 * math.sin(theta)
v_wind = v_wind_mph * 0.447 # mph to m/s
altitude = alt_ft / 3.28 # to m/s
ground_level = altitude
x = [0.0]
y = [altitude]
i = 0
while y[i] >= ground_level:
v = math.sqrt((v_x - v_wind) **2+ v_y**2)
x.append(x[i] + v_x * dt)
y.append(y[i] + v_y * dt)
v_x = v_x - a_drag(v, altitude) * v * (v_x - v_wind) * dt
v_y = v_y - a_drag(v, altitude) * v * v_y * dt - g * dt
i += 1
# meters to yards
np.multiply (x, 1.09361)
np.multiply (y, 1.09361)
return (x,y)
def predict (x0, y0, x1, y1, dt, time):
g = 10.724*dt*dt
v_x = x1-x0
v_y = y1-y0
v = math.sqrt(v_x**2 + v_y**2)
x = x1
y = y1
while time > 0:
v_x = v_x - a_drag(v, y) * v * v_x
v_y = v_y - a_drag(v, y) * v * v_y - g
x = x + v_x
y = y + v_y
time -= dt
return (x,y)
if __name__ == "__main__":
x,y = compute_trajectory(v_0_mph = 110., theta=35., v_wind_mph = 0., alt_ft=5000.)
plt.plot (x, y)
plt.show()

219
exp/bb_test.py Normal file
View File

@@ -0,0 +1,219 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 4 12:33:38 2014
@author: rlabbe
"""
from __future__ import print_function,division
from filterpy.kalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import baseball
from numpy.random import randn
def ball_filter6(dt,R=1., Q = 0.1):
f1 = KalmanFilter(dim=6)
g = 10
f1.F = np.mat ([[1., dt, dt**2, 0, 0, 0],
[0, 1., dt, 0, 0, 0],
[0, 0, 1., 0, 0, 0],
[0, 0, 0., 1., dt, -0.5*dt*dt*g],
[0, 0, 0, 0, 1., -g*dt],
[0, 0, 0, 0, 0., 1.]])
f1.H = np.mat([[1,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0]])
f1.R = np.mat(np.eye(6)) * R
f1.Q = np.zeros((6,6))
f1.Q[2,2] = Q
f1.Q[5,5] = Q
f1.x = np.mat([0, 0 , 0, 0, 0, 0]).T
f1.P = np.eye(6) * 50.
f1.B = 0.
f1.u = 0
return f1
def ball_filter4(dt,R=1., Q = 0.1):
f1 = KalmanFilter(dim=4)
g = 10
f1.F = np.mat ([[1., dt, 0, 0,],
[0, 1., 0, 0],
[0, 0, 1., dt],
[0, 0, 0., 1.]])
f1.H = np.mat([[1,0,0,0],
[0,0,0,0],
[0,0,1,0],
[0,0,0,0]])
f1.B = np.mat([[0,0,0,0],
[0,0,0,0],
[0,0,1.,0],
[0,0,0,1.]])
f1.u = np.mat([[0],
[0],
[-0.5*g*dt**2],
[-g*dt]])
f1.R = np.mat(np.eye(4)) * R
f1.Q = np.zeros((4,4))
f1.Q[1,1] = Q
f1.Q[3,3] = Q
f1.x = np.mat([0, 0 , 0, 0]).T
f1.P = np.eye(4) * 50.
return f1
def plot_ball_filter6 (f1, zs, skip_start=-1, skip_end=-1):
xs, ys = [],[]
pxs, pys = [],[]
for i,z in enumerate(zs):
m = np.mat([z[0], 0, 0, z[1], 0, 0]).T
f1.predict ()
if i == skip_start:
x2 = xs[-2]
x1 = xs[-1]
y2 = ys[-2]
y1 = ys[-1]
if i >= skip_start and i <= skip_end:
#x,y = baseball.predict (x2, y2, x1, y1, 1/30, 1/30* (i-skip_start+1))
x,y = baseball.predict (xs[-2], ys[-2], xs[-1], ys[-1], 1/30, 1/30)
m[0] = x
m[3] = y
#print ('skip', i, f1.x[2],f1.x[5])
f1.update(m)
'''
if i >= skip_start and i <= skip_end:
#f1.x[2] = -0.1
#f1.x[5] = -17.
pass
else:
f1.update (m)
'''
xs.append (f1.x[0,0])
ys.append (f1.x[3,0])
pxs.append (z[0])
pys.append(z[1])
if i > 0 and z[1] < 0:
break;
p1, = plt.plot (xs, ys, 'r--')
p2, = plt.plot (pxs, pys)
plt.axis('equal')
plt.legend([p1,p2], ['filter', 'measurement'], 2)
plt.xlim([0,xs[-1]])
plt.show()
def plot_ball_filter4 (f1, zs, skip_start=-1, skip_end=-1):
xs, ys = [],[]
pxs, pys = [],[]
for i,z in enumerate(zs):
m = np.mat([z[0], 0, z[1], 0]).T
f1.predict ()
if i == skip_start:
x2 = xs[-2]
x1 = xs[-1]
y2 = ys[-2]
y1 = ys[-1]
if i >= skip_start and i <= skip_end:
#x,y = baseball.predict (x2, y2, x1, y1, 1/30, 1/30* (i-skip_start+1))
x,y = baseball.predict (xs[-2], ys[-2], xs[-1], ys[-1], 1/30, 1/30)
m[0] = x
m[2] = y
f1.update (m)
'''
if i >= skip_start and i <= skip_end:
#f1.x[2] = -0.1
#f1.x[5] = -17.
pass
else:
f1.update (m)
'''
xs.append (f1.x[0,0])
ys.append (f1.x[2,0])
pxs.append (z[0])
pys.append(z[1])
if i > 0 and z[1] < 0:
break;
p1, = plt.plot (xs, ys, 'r--')
p2, = plt.plot (pxs, pys)
plt.axis('equal')
plt.legend([p1,p2], ['filter', 'measurement'], 2)
plt.xlim([0,xs[-1]])
plt.show()
start_skip = 20
end_skip = 60
def run_6():
dt = 1/30
noise = 0.0
f1 = ball_filter6(dt, R=.16, Q=0.1)
plt.cla()
x,y = baseball.compute_trajectory(v_0_mph = 100., theta=50., dt=dt)
znoise = [(i+randn()*noise,j+randn()*noise) for (i,j) in zip(x,y)]
plot_ball_filter6 (f1, znoise, start_skip, end_skip)
def run_4():
dt = 1/30
noise = 0.0
f1 = ball_filter4(dt, R=.16, Q=0.1)
plt.cla()
x,y = baseball.compute_trajectory(v_0_mph = 100., theta=50., dt=dt)
znoise = [(i+randn()*noise,j+randn()*noise) for (i,j) in zip(x,y)]
plot_ball_filter4 (f1, znoise, start_skip, end_skip)
if __name__ == "__main__":
run_4()

36
exp/dog_track_1d.py Normal file
View File

@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 10:35:19 2014
@author: rlabbe
"""
import numpy.random as random
import math
class dog_sensor(object):
def __init__(self, x0 = 0, motion=1, noise=0.0):
self.x = x0
self.motion = motion
self.noise = math.sqrt(noise)
def sense(self):
self.x = self.x + self.motion
self.x += random.randn() * self.noise
return self.x
def measure_dog ():
if not hasattr(measure_dog, "x"):
measure_dog.x = 0
measure_dog.motion = 1
if __name__ == '__main__':
dog = dog_sensor(noise = 1)
for i in range(10):
print (dog.sense())

78
exp/gauss.py Normal file
View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 22 11:38:49 2014
@author: rlabbe
"""
from __future__ import division, print_function
import math
import matplotlib.pyplot as plt
import numpy.random as random
class gaussian(object):
def __init__(self, mean, variance):
try:
self.mean = float(mean)
self.variance = float(variance)
except:
self.mean = mean
self.variance = variance
def __add__ (a, b):
return gaussian (a.mean + b.mean, a.variance + b.variance)
def __mul__ (a, b):
m = (a.variance*b.mean + b.variance*a.mean) / (a.variance + b.variance)
v = 1. / (1./a.variance + 1./b.variance)
return gaussian (m, v)
def __call__(self, x):
""" Impl
"""
return math.exp (-0.5 * (x-self.mean)**2 / self.variance) / \
math.sqrt(2.*math.pi*self.variance)
def __str__(self):
return "(%f, %f)" %(self.mean, self.sigma)
def stddev(self):
return math.sqrt (self.variance)
def as_tuple(self):
return (self.mean, self.variance)
def __tuple__(self):
return (self.mean, self.variance)
def __getitem__ (self,index):
""" maybe silly, allows you to access obect as a tuple:
a = gaussian(3,4)
print (tuple(a))
"""
if index == 0: return self.mean
elif index == 1: return self.variance
else: raise StopIteration
class KF1D(object):
def __init__ (self, pos, sigma):
self.estimate = gaussian(pos,sigma)
def update(self, Z,var):
self.estimate = self.estimate * gaussian (Z,var)
def predict(self, U, var):
self.estimate = self.estimate + gaussian (U,var)
def mul2 (a, b):
m = (a['variance']*b['mean'] + b['variance']*a['mean']) / (a['variance'] + b['variance'])
v = 1. / (1./a['variance'] + 1./b['variance'])
return gaussian (m, v)
#varying_error_kf( noise_factor=100)

57
exp/histogram.py Normal file
View File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 22 10:43:38 2014
@author: rlabbe
"""
p = [.2, .2, .2, .2, .2]
world = ['green', 'red', 'red', 'green', 'green']
measurements = ['red', 'green']
pHit = 0.6
pMiss = 0.2
pOvershoot = 0.1
pUndershoot = 0.1
pExact = 0.8
def normalize (p):
s = sum(p)
for i in range (len(p)):
p[i] = p[i] / s
def sense(p, Z):
q= []
for i in range (len(p)):
hit = (world[i] ==Z)
q.append(p[i] * (pHit*hit + pMiss*(1-hit)))
normalize(q)
return q
def move(p, U):
q = []
for i in range(len(p)):
pexact = p[(i-U) % len(p)] * pExact
pover = p[(i-U-1) % len(p)] * pOvershoot
punder = p[(i-U+1) % len(p)] * pUndershoot
q.append (pexact + pover + punder)
normalize(q)
return q
if __name__ == "__main__":
p = sense(p, 'red')
print p
pause()
for z in measurements:
p = sense (p, z)
p = move (p, 1)
print p

47
exp/image_tracker.py Normal file
View File

@@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
"""
Created on Sat May 24 14:42:55 2014
@author: rlabbe
"""
from KalmanFilter import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as random
f = KalmanFilter (dim=4)
dt = 1
f.F = np.mat ([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]])
f.H = np.mat ([[1, 0, 0, 0],
[0, 0, 1, 0]])
f.Q *= 4.
f.R = np.mat([[50,0],
[0, 50]])
f.x = np.mat([0,0,0,0]).T
f.P *= 100.
xs = []
ys = []
count = 200
for i in range(count):
z = np.mat([[i+random.randn()*1],[i+random.randn()*1]])
f.predict ()
f.update (z)
xs.append (f.x[0,0])
ys.append (f.x[2,0])
plt.plot (xs, ys)
plt.show()

57
exp/mkf_ellipse_test.py Normal file
View File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
"""
Created on Sun May 11 20:47:52 2014
@author: rlabbe
"""
from DogSensor import DogSensor
from filterpy.kalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import stats
def dog_tracking_filter(R,Q=0,cov=1.):
f = KalmanFilter (dim_x=2, dim_z=1)
f.x = np.matrix([[0], [0]]) # initial state (location and velocity)
f.F = np.matrix([[1,1],[0,1]]) # state transition matrix
f.H = np.matrix([[1,0]]) # Measurement function
f.R = R # measurement uncertainty
f.P *= cov # covariance matrix
f.Q = Q
return f
def plot_track(noise, count, R, Q=0, plot_P=True, title='Kalman Filter'):
dog = DogSensor(velocity=1, noise=noise)
f = dog_tracking_filter(R=R, Q=Q, cov=10.)
ps = []
zs = []
cov = []
for t in range (count):
z = dog.sense()
f.update (z)
#print (t,z)
ps.append (f.x[0,0])
cov.append(f.P)
zs.append(z)
f.predict()
p0, = plt.plot([0,count],[0,count],'g')
p1, = plt.plot(range(1,count+1),zs,c='r', linestyle='dashed')
p2, = plt.plot(range(1,count+1),ps, c='b')
plt.axis('equal')
plt.legend([p0,p1,p2], ['actual','measurement', 'filter'], 2)
plt.title(title)
for i,p in enumerate(cov):
print(i,p)
e = stats.sigma_ellipse (p, i+1, ps[i])
stats.plot_sigma_ellipse(e, axis_equal=False)
plt.xlim((-1,count))
plt.show()
if __name__ == "__main__":
plot_track (noise=30, R=5, Q=2, count=20)

13
exp/noise.py Normal file
View File

@@ -0,0 +1,13 @@
# -*- coding: utf-8 -*-
"""
Created on Fri May 2 10:28:27 2014
@author: rlabbe
"""
import numpy.random
def white_noise (sigma2=1.):
return sigma2 * numpy.random.randn()
if __name__ == "__main__":
assert white_noise(.0) == 0.

88
exp/test_stats.py Normal file
View File

@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""
py.test module to test stats.py module.
Created on Wed Aug 27 06:45:06 2014
@author: rlabbe
"""
from __future__ import division
from math import pi, exp
import numpy as np
from stats import gaussian, multivariate_gaussian, _to_cov
from numpy.linalg import inv
from numpy import linalg
def near_equal(x,y):
return abs(x-y) < 1.e-15
def test_gaussian():
import scipy.stats
mean = 3.
var = 1.5
std = var**0.5
for i in np.arange(-5,5,0.1):
p0 = scipy.stats.norm(mean, std).pdf(i)
p1 = gaussian(i, mean, var)
assert near_equal(p0, p1)
def norm_pdf_multivariate(x, mu, sigma):
""" extremely literal transcription of the multivariate equation.
Slow, but easy to verify by eye compared to my version."""
n = len(x)
sigma = _to_cov(sigma,n)
det = linalg.det(sigma)
norm_const = 1.0 / (pow((2*pi), n/2) * pow(det, .5))
x_mu = x - mu
result = exp(-0.5 * (x_mu.dot(inv(sigma)).dot(x_mu.T)))
return norm_const * result
def test_multivariate():
from scipy.stats import multivariate_normal as mvn
from numpy.random import rand
mean = 3
var = 1.5
assert near_equal(mvn(mean,var).pdf(0.5),
multivariate_gaussian(0.5, mean, var))
mean = np.array([2.,17.])
var = np.array([[10., 1.2], [1.2, 4.]])
x = np.array([1,16])
assert near_equal(mvn(mean,var).pdf(x),
multivariate_gaussian(x, mean, var))
for i in range(100):
x = np.array([rand(), rand()])
assert near_equal(mvn(mean,var).pdf(x),
multivariate_gaussian(x, mean, var))
assert near_equal(mvn(mean,var).pdf(x),
norm_pdf_multivariate(x, mean, var))
mean = np.array([1,2,3,4])
var = np.eye(4)*rand()
x = np.array([2,3,4,5])
assert near_equal(mvn(mean,var).pdf(x),
norm_pdf_multivariate(x, mean, var))