How to use Sequential method to build a neural network in pytorch

[This tutorial is under development]

Using a class to create a neural network is not the only way, there is a much simpler way to define neural network using Sequential approach.

import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import Dataset, DataLoader
from torchsummary import summary
import matplotlib.pyplot as plt
import random

# generate synthetic data
X = []
Y = []
for _ in range(10):
    x1 = random.randint(1,20)
    x2 = random.randint(1,20)
    X.append([x1, x2])
    Y.append([x1 + x2])

# convert lists into tensor objects (it is a good practice to convert tensor elements in float)
# objects since it will be multiplied by float weights
X = torch.tensor(X).float()
Y = torch.tensor(Y).float()

device = 'cuda' if torch.cuda.is_available() else 'cpu'
X = X.to(device)
Y = Y.to(device)

class ThisDataset(Dataset):
    def __init__(self,x,y):
        self.x = torch.tensor(x).float()
        self.y = torch.tensor(y).float()
    def __len__(self):
        return len(self.x)
    def __getitem__(self, ix):
        return self.x[ix], self.y[ix]
ds = ThisDataset(X, Y)

# create a neural network which can learn multiplication operation
model = nn.Sequential(nn.Linear(2, 10),
            nn.ReLU(),
            nn.Linear(10, 1)).to(device)
print(summary(model))
loss_func = nn.MSELoss()

opt = SGD(model.parameters(), lr = 0.001)
loss_track = []
dl = DataLoader(ds, batch_size=5, shuffle=True)
for _ in range(100):
    for data in dl:
        x, y = data
        opt.zero_grad()
        loss_value = loss_func(model(x),y)
        loss_value.backward()
        opt.step()
        loss_track.append(loss_value)

val_x = [[3,15]]
val_x = torch.tensor(val_x).float().to(device)
print(model(val_x))

Output:

=================================================================
Layer (type:depth-idx)                   Param #
=================================================================
├─Linear: 1-1                            30
├─ReLU: 1-2                              --
├─Linear: 1-3                            11
=================================================================
Total params: 41
Trainable params: 41
Non-trainable params: 0
=================================================================
=================================================================
Layer (type:depth-idx)                   Param #
=================================================================
├─Linear: 1-1                            30
├─ReLU: 1-2                              --
├─Linear: 1-3                            11
=================================================================
Total params: 41
Trainable params: 41
Non-trainable params: 0
=================================================================
tensor([[17.8943]], device='cuda:0', grad_fn=<AddmmBackward>)