function computeCost theta COMPUTECOST Compute cost for linear regress

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
function J = computeCost(X, y, theta)
%COMPUTECOST Compute cost for linear regression
% J = COMPUTECOST(X, y, theta) computes the cost of using theta as the
% parameter for linear regression to fit the data points in X and y
m = length(y); % number of training examples
J = 0;
for i=1:m;
J += ((theta * X(i,:))(1,1) - y(i, 1)) ^ 2;
end
J = J / (2 * m);
end
function [theta, j_history] = gradientDescent(X, y, theta, alpha, num_iters)
%GRADIENTDESCENT Performs gradient descent to learn theta
% theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by
% taking num_iters gradient steps with learning rate alpha
% Initialize some useful values
m = length(y); % number of training examples
j_history = zeros(num_iters, 1);
for iter = 1:num_iters;
% Instructions: Perform a single gradient step on the parameter vector theta.
j_history(iter) = computeCost(X, y, theta);
disp(j_history(iter))
% Remember previous value of theta vector for calculation
% of new theta on current iteration of the cycle
old_theta = theta;
% Calculate 0th element of theta vector
sum_diff = 0;
for tmp = 1:m;
hyp = old_theta(1) * X(tmp, 1) + old_theta(2) * X(tmp, 2);
sum_diff += hyp - y(tmp);
end
theta(1) = old_theta(1) - alpha * (1 / m) * sum_diff;
% Calculate 1st element of theta vector
sum_diff = 0;
for tmp = 1:m;
hyp = old_theta(1) * X(tmp, 1) + old_theta(2) * X(tmp, 2);
sum_diff += (hyp - y(tmp)) * X(tmp, 2);
end
theta(2) = old_theta(2) - alpha * (1 / m) * sum_diff;
end
end