日期:2014-05-16 浏览次数:20402 次
function J = computeCostMulti(X, y, theta) m = length(y); % number of training examples J = 0; predictions = X * theta; J = 1/(2*m)*(predictions - y)' * (predictions - y); end
function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) m = length(y); % number of training examples J_history = zeros(num_iters, 1); feature_number = size(X,2); temp = zeros(feature_number,1); for iter = 1:num_iters for i=1:feature_number temp(i) = theta(i) - (alpha / m) * sum((X * theta - y).* X(:,i)); end for j=1:feature_number theta(j) = temp(j); end J_history(iter) = computeCostMulti(X, y, theta); end end
function [X_norm, mu, sigma] = featureNormalize(X) X_norm = X; mu = zeros(1, size(X, 2)); sigma = zeros(1, size(X, 2)); mu = mean(X); sigma = std(X); for i=1:size(mu,2) X_norm(:,i) = (X(:,i).-mu(i))./sigma(i); end end
function [theta] = normalEqn(X, y) theta = zeros(size(X, 2), 1); theta = pinv(X'*X)*X'*y; end