This content originally appeared on DEV Community and was authored by Super Kai (Kazuya Ito)
*Memos:
- My post explains Linear Regression in PyTorch.
- My post explains Batch, Mini-Batch and Stochastic Gradient Descent with DataLoader() in PyTorch.
-
My post explains Batch Gradient Descent without
DataLoader()
in PyTorch.
You can save a model with save() after training and testing it as shown below:
*Memos:
- Saving the model's state_dict() which has parameters and buffers is recommended according to the doc. *The doc explains What is a state_dict?.
- My post explains save() and load().
- My post explains Path.mkdir(parents, exist_ok) in Python.
import torch
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
""" Prepare dataset """
weight = 0.8
bias = 0.5
X = torch.tensor([[0.00], [0.02], [0.04], [0.06], [0.08], # Size(50, 1)
[0.10], [0.12], [0.14], [0.16], [0.18],
[0.20], [0.22], [0.24], [0.26], [0.28],
[0.30], [0.32], [0.34], [0.36], [0.38],
[0.40], [0.42], [0.44], [0.46], [0.48],
[0.50], [0.52], [0.54], [0.56], [0.58],
[0.60], [0.62], [0.64], [0.66], [0.68],
[0.70], [0.72], [0.74], [0.76], [0.78],
[0.80], [0.82], [0.84], [0.86], [0.88],
[0.90], [0.92], [0.94], [0.96], [0.98]], device=device)
Y = weight * X + bias
l = int(0.8 * len(X))
X_train, Y_train, X_test, Y_test = X[:l], Y[:l], X[l:], Y[l:]
""" Prepare dataset """
""" Prepare model, loss function and optimizer """
class LinearRegressionModel(nn.Module):
def __init__(self):
super().__init__()
self.linear_layer = nn.Linear(in_features=1, out_features=1)
def forward(self, x):
return self.linear_layer(x)
torch.manual_seed(42)
my_model = LinearRegressionModel().to(device)
loss_fn = nn.L1Loss()
optimizer = optim.SGD(params=my_model.parameters(), lr=0.01)
""" Prepare model, loss function and optimizer """
""" Train and test model """
epochs = 50
epoch_count = []
loss_values = []
test_loss_values = []
for epoch in range(epochs):
""" Train """
my_model.train()
# 1. Calculate predictions(Forward propagation)
Y_pred = my_model(X_train)
# 2. Calculate loss
loss = loss_fn(Y_pred, Y_train)
# 3. Zero out gradients
optimizer.zero_grad()
# 4. Calculate a gradient(Backpropagation)
loss.backward()
# 5. Update parameters
optimizer.step()
""" Train """
""" Test """
my_model.eval()
with torch.inference_mode():
Y_test_pred = my_model(x=X_test)
test_loss = loss_fn(Y_test_pred, Y_test)
if epoch % 10 == 0:
epoch_count.append(epoch)
loss_values.append(loss)
test_loss_values.append(test_loss)
# print(f"Epoch: {epoch} | Loss: {loss} | Test loss: {test_loss}")
# ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ Uncomment it to see the details ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑
""" Test """
""" Train and test model """
""" Visualize train and test data and predictions"""
with torch.inference_mode():
Y_pred = my_model(x=X_test)
def plot_predictions(X_train, Y_train, X_test, Y_test, predictions=None):
plt.figure(figsize=[6, 4])
plt.scatter(X_train, Y_train, c='g', s=5, label='Train data(Green)')
plt.scatter(X_test, Y_test, c='b', s=15, label='Test data(Blue)')
if predictions is not None:
plt.scatter(X_test, predictions, c='r', s=15, label='Predictions(Red)')
plt.title(label="Train and test data and predictions", fontsize=14)
plt.legend(fontsize=14)
plot_predictions(X_train=X_train.cpu(),
Y_train=Y_train.cpu(),
X_test=X_test.cpu(),
Y_test=Y_test.cpu(),
predictions=Y_pred.cpu())
""" Visualize train and test data, predictions"""
""" Visualize train and test loss """
def plot_loss_curves(epoch_count, loss_values, test_loss_values):
plt.figure(figsize=[6, 4])
plt.plot(epoch_count, loss_values, label="Train loss")
plt.plot(epoch_count, test_loss_values, label="Test loss")
plt.title(label="Train and test loss curves", fontsize=14)
plt.ylabel(ylabel="Loss", fontsize=14)
plt.xlabel(xlabel="Epochs", fontsize=14)
plt.legend(fontsize=14)
plot_loss_curves(epoch_count=epoch_count,
loss_values=torch.tensor(loss_values).cpu(),
test_loss_values=torch.tensor(test_loss_values).cpu())
""" Visualize train and test loss """
""" Save model """
from pathlib import Path
MODEL_PATH = Path("models")
MODEL_PATH.mkdir(parents=True, exist_ok=True)
MODEL_NAME = "linear_regression_0.pth"
MODEL_SAVE_PATH = MODEL_PATH / MODEL_NAME
torch.save(obj=my_model.state_dict(), f=MODEL_SAVE_PATH)
""" Save model """
Now, a model is saved as shown below:
Colab:
JupyterLab:
This content originally appeared on DEV Community and was authored by Super Kai (Kazuya Ito)
Print
Share
Comment
Cite
Upload
Translate
Updates
There are no updates yet.
Click the Upload button above to add an update.
APA
MLA
Super Kai (Kazuya Ito) | Sciencx (2024-10-16T03:36:55+00:00) Save model in PyTorch. Retrieved from https://www.scien.cx/2024/10/16/save-model-in-pytorch/
" » Save model in PyTorch." Super Kai (Kazuya Ito) | Sciencx - Wednesday October 16, 2024, https://www.scien.cx/2024/10/16/save-model-in-pytorch/
HARVARDSuper Kai (Kazuya Ito) | Sciencx Wednesday October 16, 2024 » Save model in PyTorch., viewed ,<https://www.scien.cx/2024/10/16/save-model-in-pytorch/>
VANCOUVERSuper Kai (Kazuya Ito) | Sciencx - » Save model in PyTorch. [Internet]. [Accessed ]. Available from: https://www.scien.cx/2024/10/16/save-model-in-pytorch/
CHICAGO" » Save model in PyTorch." Super Kai (Kazuya Ito) | Sciencx - Accessed . https://www.scien.cx/2024/10/16/save-model-in-pytorch/
IEEE" » Save model in PyTorch." Super Kai (Kazuya Ito) | Sciencx [Online]. Available: https://www.scien.cx/2024/10/16/save-model-in-pytorch/. [Accessed: ]
rf:citation » Save model in PyTorch | Super Kai (Kazuya Ito) | Sciencx | https://www.scien.cx/2024/10/16/save-model-in-pytorch/ |
Please log in to upload a file.
There are no updates yet.
Click the Upload button above to add an update.