(打卡)task04

使用pytorch进行神经网络的搭建,掌握了PyTorch Lightning的用法。

  • 尝试了MLP神经网络
class LitMLP(pl.LightningModule):
def __init__(self,val_x,val_y):
    super().__init__()
    
    self.val_x = val_x.cuda()
    self.val_y = val_y.cuda()
    
    num_linear = 500  #等宽mlp
    self.layer1 = nn.Sequential(
    nn.Dropout(0.1),
    nn.Linear(205,num_linear),
    nn.Dropout(0.2),
    nn.ReLU(inplace=True),
    nn.Linear(num_linear,num_linear),
    nn.Dropout(0.2),
    nn.ReLU(inplace=True),
    nn.Linear(num_linear,num_linear),
    nn.Dropout(0.2),
    nn.ReLU(inplace=True),
    nn.Linear(num_linear,num_linear),
    nn.Dropout(0.3),
    nn.ReLU(inplace=True),
    nn.Linear(num_linear,4)
    )
       
    
    
def forward(self,x):
    x = self.layer1(x)
    x = F.softmax(x)
    return x
    

def training_step(self, batch, batch_idx):
    # train process
    x, y = batch
    pred = self.forward(x)
    x = self.layer1(x)  
    loss = F.cross_entropy(x,y) #F.cross_entropy已经包含softmax处理

    # logger process 
    # identifying number of correct predections in a given batch
    correct = pred.argmax(dim=1).eq(y).sum().detach()
    # identifying total number of labels in a given batch
    total = len(y)
    train_acc = correct/total
    self.logger.experiment.add_scalar("train_loss",loss,self.global_step)
    self.logger.experiment.add_scalar("train_acc",train_acc,self.global_step)
    
    batch_dict = {
        #REQUIRED: We must return "loss"
        "loss": loss,
        #optional anything
    }
    return batch_dict


def validation_step(self, batch, batch_idx):
    # valid process
    x, y = batch
    pred = self.forward(x)

    # logger process 
    # identifying number of correct predections in a given batch
    correct = pred.argmax(dim=1).eq(y).sum().detach()
    # identifying total number of labels in a given batch
    total = len(y)

    batch_dict = {
        "correct":correct,
        "total":total
    }
    return batch_dict

def oneHotEncoder(self,num_class,y):
    index = y.view(-1,1)
    out = torch.zeros(len(index),num_class).cuda() #此处必须转成cuda()
    out = out.scatter(dim=1,index=index,value=1)
    return out

def validation_epoch_end(self,validation_step_out):
    correct = sum([x["correct"] for x in validation_step_out])
    total = sum([x["total"] for x in validation_step_out])
    valid_acc = correct/total
    self.logger.experiment.add_scalar("valid_acc",valid_acc,self.global_step)
    #计算验证集上的得分
    pred = self.forward(self.val_x)
    y_hat = self.oneHotEncoder(4,self.val_y)
    score = sum(sum(abs(pred-y_hat)))
    self.logger.experiment.add_scalar("score",score,self.global_step)
    
    
def configure_optimizers(self):
    optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
    return optimizer
  • 还尝试了FCN全卷积神经网络,但是还没有调试好,目前效果不如MLP,与文献不一致