Skip to content
Snippets Groups Projects
Commit 6cf0ad81 authored by Jacopo Moretti's avatar Jacopo Moretti
Browse files

feat: start implementations

parent d51f66f0
Branches main
No related tags found
No related merge requests found
import numpy as np
### HELPERS : added by quartztz
def compute_loss_mse(y, tx, w):
e = (y - tx @ w)
N = tx.shape[0]
return 1/(2*N) * (e @ e)
def compute_gradient_mse(y, tx, w):
"""
Computes the gradient of the MSE loss function.
Args:
y: ground truth (N, )
tx: data (N, D)
w: weights (D, )
Returns:
grad: gradient of the loss function (D, )
"""
e = y - tx @ w
N = tx.shape[0]
return -1/N * (tx.T @ e)
def compute_gradient_mse_stoch(y, tx, w):
"""
Computes the gradient of the MSE loss function.
Args:
y: ground truth (N, )
tx: data (N, D)
w: weights (D, )
Returns:
grad: gradient of the loss function (D, )
"""
e = y - tx @ w
N = tx.shape[0]
return -1/N * (tx.T * e)
def softmax(tx, w):
return np.exp(tx @ w) / np.sum(np.exp(tx @ w))
def compute_loss_logistic(y, tx, w):
return -np.sum(y.T @ np.log(softmax(tx, w)))
def compute_gradient_logistic(y, tx, w):
return tx.T @ (softmax(tx, w) - y)
def mean_squared_error_gd(y, tx, initial_w, max_iters, gamma):
# performs the gradient descent on ground data tx, y.
# computes the step w <- w - gamma * grad, where grad is the gradient computed
# from the given data using the mean squared loss function.
w = initial_w
for _ in range(max_iters):
grad = compute_gradient_mse(y, tx, w)
w = w - gamma * grad
loss = compute_loss_mse(y, tx, w)
return w, loss
def mean_squared_error_sgd(y, tx, initial_w, max_iters, gamma):
'''
Performs the stochastic gradient descent on ground data tx, y.
Args:
y: ground truth (N, )
tx: data (N, D)
initial_w: initial weights (D, )
max_iters: maximum number of iterations (int)
gamma: learning rate (scalar)
Returns:
w: weights (D, )
loss: loss (scalar)
'''
w = initial_w
for _ in range(max_iters):
idx = np.random.randint(len(y))
m_y = y[idx]
m_tx = tx[idx]
grad = compute_gradient_mse_stoch(m_y, m_tx, w)
w = w - gamma * grad
loss = compute_loss_mse(y, tx, w)
return w, loss
def least_squares(y, tx):
a = tx.T @ tx
b = tx.T @ y
w = np.linalg.solve(a, b)
loss = compute_loss_mse(y, tx, w)
return w, loss
def ridge_regression(y, tx, lambda_):
'''
Performs ridge regression on the given data.
Args:
y: ground truth (N, )
tx: data (N, D)
lambda_: regularization parameter (scalar)
Returns:
w: weights (D, )
loss: loss (scalar)
'''
N, d = tx.shape[0], tx.shape[1]
a = tx.T @ tx + 2 * N * lambda_ * np.eye(d)
b = tx.T @ y
w = np.linalg.solve(a, b)
loss = compute_loss_mse(y, tx, w)
return w, loss
def logistic_regression(y, tx, initial_w, max_iters, gamma):
# performs the logistic regression on ground data tx, y.
# computes the step w <- w - gamma * grad, where grad is the gradient computed
# from the given data using the logistic loss function.
w = initial_w
for _ in range(max_iters):
grad = compute_gradient_logistic(y, tx, w)
w = w - gamma * grad
loss = compute_loss_logistic(y, tx, w)
return w, loss
def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):
# performs the regularized logistic regression on ground data tx, y.
# computes the step w <- w - gamma * grad, where grad is the gradient computed
# from the given data using the regularized logistic loss function.
w = initial_w
for _ in range(max_iters):
grad = compute_gradient_logistic(y, tx, w) + 2 * lambda_ * w
w = w - gamma * grad
loss = compute_loss_logistic(y, tx, w)
return w, loss
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment