Skip to content

Commit 41ffb7a

Browse files
committed
First part
1 parent bf23216 commit 41ffb7a

File tree

3 files changed

+127
-0
lines changed

3 files changed

+127
-0
lines changed

methods.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import numpy as np
2+
import matplotlib.pyplot as pp
3+
4+
def abs2(vec):
5+
""" Norm of the vector squared """
6+
return np.sum(vec*vec)
7+
8+
def k_n_m(xn, xm, thetas):
9+
return thetas[0]*np.exp(-thetas[1]/2.0 * abs2(xn-xm)) + thetas[2] + thetas[3]*np.dot(xn.T, xm)
10+
11+
def computeK(X, thetas):
12+
N = X.shape[0]
13+
14+
15+
16+
17+
K = np.zeros((N,N))
18+
19+
for n in xrange(N):
20+
for m in xrange(N):
21+
K[n,m] = k_n_m(X[n], X[m], thetas)
22+
23+
return K
24+
25+
26+
27+

script.p.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Wed Oct 09 12:08:57 2013
4+
5+
@author: AlbertoEAF
6+
"""
7+
8+
sigma = 0.1
9+
beta = 1.0 / pow(sigma,2) # this is the beta used in Bishop Eqn. 6.59
10+
N_test = 100
11+
x_test = np.linspace(-1,1,N_test);
12+
mu_test = np.zeros( N_test )
13+
14+
def true_mean_function( x ):
15+
return np.sin( 2*pi*(x+1) )
16+
17+
def add_noise( y, sigma ):
18+
return y + sigma*np.random.randn(len(y))
19+
20+
def generate_t( x, sigma ):
21+
return add_noise( true_mean_function( x), sigma )
22+
23+
y_test = true_mean_function( x_test )
24+
t_test = add_noise( y_test, sigma )
25+
pp.plot( x_test, y_test, 'b-', lw=2)
26+
pp.plot( x_test, t_test, 'go')

script.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Wed Oct 09 12:08:57 2013
4+
5+
@author: AlbertoEAF
6+
"""
7+
8+
from methods import *
9+
10+
11+
12+
#sigma = 0.1
13+
#beta = 1.0 / pow(sigma,2) # this is the beta used in Bishop Eqn. 6.59
14+
N_test = 100
15+
x_test = np.linspace(-1,1,N_test);
16+
mu_test = np.zeros( N_test )
17+
#
18+
#def true_mean_function( x ):
19+
# return np.sin( 2*pi*(x+1) )
20+
#
21+
#def add_noise( y, sigma ):
22+
# return y + sigma*np.random.randn(len(y))
23+
#
24+
#def generate_t( x, sigma ):
25+
# return add_noise( true_mean_function( x), sigma )
26+
#
27+
#y_test = true_mean_function( x_test )
28+
#t_test = add_noise( y_test, sigma )
29+
#pp.plot( x_test, y_test, 'b-', lw=2)
30+
#pp.plot( x_test, t_test, 'go')
31+
32+
33+
def gp_plot( x_test, y_test, mu_test, var_test, x_train, t_train, theta, beta ):
34+
# x_test:
35+
# y_test: the true function at x_test
36+
# mu_test: predictive mean at x_test
37+
# var_test: predictive covariance at x_test
38+
# t_train: the training values
39+
# theta: the kernel parameters
40+
# beta: the precision (known)
41+
42+
# the reason for the manipulation is to allow plots separating model and data stddevs.
43+
std_total = np.sqrt(np.diag(var_test)) # includes all uncertainty, model and target noise
44+
std_model = np.sqrt( std_total**2 - 1.0/beta ) # remove data noise to get model uncertainty in stddev
45+
std_combo = std_model + np.sqrt( 1.0/beta ) # add stddev (note: not the same as full)
46+
47+
pp.plot( x_test, y_test, 'b', lw=3)
48+
pp.plot( x_test, mu_test, 'k--', lw=2 )
49+
pp.fill_between( x_test, mu_test+2*std_combo,mu_test-2*std_combo, color='k', alpha=0.25 )
50+
pp.fill_between( x_test, mu_test+2*std_model,mu_test-2*std_model, color='r', alpha=0.25 )
51+
pp.plot( x_train, t_train, 'ro', ms=10 )
52+
53+
THETAS = np.array([[1,4,0,0], [9,4,0,0], [1,64,0,0], [1,0.25,0,0], [1,4,10,0], [1,4,0,5]])
54+
55+
mean = np.zeros((N_test,1))
56+
57+
y_test = np.random.multivariate_normal(mu_test, computeK(x_test, THETAS[5]), N_test)
58+
59+
60+
61+
print y_test.shape
62+
63+
pp.plot(x_test,y_test[0])
64+
pp.plot(x_test,y_test[1])
65+
pp.plot(x_test,y_test[2])
66+
pp.plot(x_test,y_test[3])
67+
pp.plot(x_test,y_test[4])
68+
pp.plot(x_test,y_test[5])
69+
#pp.plot(x_test,y_test)
70+
71+
pp.show()
72+
73+
74+

0 commit comments

Comments
 (0)