超市网站建设,app制作平台推荐,珠海网络排名优化,33ee紧急页面访问升级#x1f368; 本文为#x1f517;365天深度学习训练营 中的学习记录博客#x1f366; 参考文章地址#xff1a; 365天深度学习训练营-第P8周#xff1a;YOLOv5-Backbone模块实现#x1f356; 作者#xff1a;K同学啊一、前期准备1.设置GPUimport torch
from torch impor… 本文为365天深度学习训练营 中的学习记录博客 参考文章地址 365天深度学习训练营-第P8周YOLOv5-Backbone模块实现 作者K同学啊一、前期准备1.设置GPUimport torch
from torch import nn
import torchvision
from torchvision import transforms,datasets,models
import matplotlib.pyplot as plt
import os,PIL,pathlibdevice torch.device(cuda if torch.cuda.is_available() else cpu)
devicedevice(typecuda)2.导入数据data_dir ./weather_photos/
data_dir pathlib.Path(data_dir)data_paths list(data_dir.glob(*))
classNames [str(path).split(\\)[1] for path in data_paths]
classNames[cloudy, rain, shine, sunrise]# 关于transforms.Compose的更多介绍可以参考https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms transforms.Compose([transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸# transforms.RandomHorizontalFlip(), # 随机水平翻转transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor并归一化到[0,1]之间transforms.Normalize( # 标准化处理--转换为标准正太分布高斯分布使模型更容易收敛mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]) # 其中 mean[0.485,0.456,0.406]与std[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])test_transform transforms.Compose([transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor并归一化到[0,1]之间transforms.Normalize( # 标准化处理--转换为标准正太分布高斯分布使模型更容易收敛mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]) # 其中 mean[0.485,0.456,0.406]与std[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])total_data datasets.ImageFolder(data_dir,transformtrain_transforms)
total_dataDataset ImageFolder Number of datapoints: 1125 Root location: weather_photos StandardTransformTransform: Compose( Resize(size[224, 224], interpolationPIL.Image.BILINEAR) ToTensor() Normalize(mean[0.485, 0.456, 0.406], std[0.229, 0.224, 0.225]) )total_data.class_to_idx{cloudy: 0, rain: 1, shine: 2, sunrise: 3}3.划分数据集train_size int(0.8*len(total_data))
test_size len(total_data) - train_size
train_dataset, test_dataset torch.utils.data.random_split(total_data,[train_size,test_size])
train_dataset,test_dataset(torch.utils.data.dataset.Subset at 0x1e42b97f4f0, torch.utils.data.dataset.Subset at 0x1e42b196a30)batch_size 4
train_dl torch.utils.data.DataLoader(train_dataset,batch_sizebatch_size,shuffleTrue,num_workers1)
test_dl torch.utils.data.DataLoader(test_dataset,batch_sizebatch_size,shuffleTrue,num_workers1)for X,y in test_dl:print(Shape of X [N, C, H, W]:, X.shape)print(Shape of y:, y.shape)breakShape of X [N, C, H, W]: torch.Size([4, 3, 224, 224])Shape of y: torch.Size([4])二、搭建包含Backbone模块的模型1.搭建模型import torch.nn.functional as Fdef autopad(k, pNone): # kernel, padding# Pad to sameif p is None:p k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-padreturn pclass Conv(nn.Module):# Standard convolutiondef __init__(self, c1, c2, k1, s1, pNone, g1, actTrue): # ch_in, ch_out, kernel, stride, padding, groupssuper().__init__()self.conv nn.Conv2d(c1, c2, k, s, autopad(k, p), groupsg, biasFalse)self.bn nn.BatchNorm2d(c2)self.act nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())def forward(self, x):return self.act(self.bn(self.conv(x)))class Bottleneck(nn.Module):# Standard bottleneckdef __init__(self, c1, c2, shortcutTrue, g1, e0.5): # ch_in, ch_out, shortcut, groups, expansionsuper().__init__()c_ int(c2 * e) # hidden channelsself.cv1 Conv(c1, c_, 1, 1)self.cv2 Conv(c_, c2, 3, 1, gg)self.add shortcut and c1 c2def forward(self, x):return x self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))class C3(nn.Module):# CSP Bottleneck with 3 convolutionsdef __init__(self, c1, c2, n1, shortcutTrue, g1, e0.5): # ch_in, ch_out, number, shortcut, groups, expansionsuper().__init__()c_ int(c2 * e) # hidden channelsself.cv1 Conv(c1, c_, 1, 1)self.cv2 Conv(c1, c_, 1, 1)self.cv3 Conv(2 * c_, c2, 1) # actFReLU(c2)self.m nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e1.0) for _ in range(n)))def forward(self, x):return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim1))class SPPF(nn.Module):# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocherdef __init__(self, c1, c2, k5): # equivalent to SPP(k(5, 9, 13))super().__init__()c_ c1 // 2 # hidden channelsself.cv1 Conv(c1, c_, 1, 1)self.cv2 Conv(c_ * 4, c2, 1, 1)self.m nn.MaxPool2d(kernel_sizek, stride1, paddingk // 2)def forward(self, x):x self.cv1(x)with warnings.catch_warnings():warnings.simplefilter(ignore) # suppress torch 1.9.0 max_pool2d() warningy1 self.m(x)y2 self.m(y1)return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))这个是YOLOv5, 6.0版本的主干网络这里进行复现
注有部分删改详细讲解将在后续进行展开class YOLOv5_backbone(nn.Module):def __init__(self):super(YOLOv5_backbone, self).__init__()self.Conv_1 Conv(3, 64, 3, 2, 2) self.Conv_2 Conv(64, 128, 3, 2) self.C3_3 C3(128,128)self.Conv_4 Conv(128, 256, 3, 2) self.C3_5 C3(256,256)self.Conv_6 Conv(256, 512, 3, 2) self.C3_7 C3(512,512)self.Conv_8 Conv(512, 1024, 3, 2) self.C3_9 C3(1024, 1024)self.SPPF SPPF(1024, 1024, 5)# 全连接网络层用于分类self.classifier nn.Sequential(nn.Linear(in_features65536, out_features100),nn.ReLU(),nn.Linear(in_features100, out_features4))def forward(self, x):x self.Conv_1(x)x self.Conv_2(x)x self.C3_3(x)x self.Conv_4(x)x self.C3_5(x)x self.Conv_6(x)x self.C3_7(x)x self.Conv_8(x)x self.C3_9(x)x self.SPPF(x)x torch.flatten(x, start_dim1)x self.classifier(x)return xdevice cuda if torch.cuda.is_available() else cpu
print(Using {} device.format(device))model YOLOv5_backbone().to(device)
model略2.查看详细模型# 统计模型参数量以及其他指标
import torchsummary as summary
summary.summary(model, (3, 224, 224))略三、训练模型1.编写训练函数# 训练循环
def train(dataloader, model, loss_fn, optimizer):size len(dataloader.dataset) # 训练集的大小一共900张图片num_batches len(dataloader) # 批次数目29900/32train_loss, train_acc 0, 0 # 初始化训练损失和正确率for X, y in dataloader: # 获取图片及其标签X, y X.to(device), y.to(device)# 计算预测误差pred model(X) # 网络输出loss loss_fn(pred, y) # 计算网络输出和真实值之间的差距targets为真实值计算二者差值即为损失# 反向传播optimizer.zero_grad() # grad属性归零loss.backward() # 反向传播optimizer.step() # 每一步自动更新# 记录acc与losstrain_acc (pred.argmax(1) y).type(torch.float).sum().item()train_loss loss.item()train_acc / sizetrain_loss / num_batchesreturn train_acc, train_loss2.编写测试函数def test (dataloader, model, loss_fn):size len(dataloader.dataset) # 测试集的大小一共10000张图片num_batches len(dataloader) # 批次数目8255/328向上取整test_loss, test_acc 0, 0# 当不进行训练时停止梯度更新节省计算内存消耗with torch.no_grad():for imgs, target in dataloader:imgs, target imgs.to(device), target.to(device)# 计算losstarget_pred model(imgs)loss loss_fn(target_pred, target)test_loss loss.item()test_acc (target_pred.argmax(1) target).type(torch.float).sum().item()test_acc / sizetest_loss / num_batchesreturn test_acc, test_loss3.正式训练import copyoptimizer torch.optim.Adam(model.parameters(), lr 1e-4)
loss_fn nn.CrossEntropyLoss() # 创建损失函数epochs 20train_loss []
train_acc []
test_loss []
test_acc []best_acc 0 # 设置一个最佳准确率作为最佳模型的判别指标for epoch in range(epochs):model.train()epoch_train_acc, epoch_train_loss train(train_dl, model, loss_fn, optimizer)model.eval()epoch_test_acc, epoch_test_loss test(test_dl, model, loss_fn)# 保存最佳模型到 best_modelif epoch_test_acc best_acc:best_acc epoch_test_accbest_model copy.deepcopy(model)train_acc.append(epoch_train_acc)train_loss.append(epoch_train_loss)test_acc.append(epoch_test_acc)test_loss.append(epoch_test_loss)# 获取当前的学习率lr optimizer.state_dict()[param_groups][0][lr]template (Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E})print(template.format(epoch1, epoch_train_acc*100, epoch_train_loss, epoch_test_acc*100, epoch_test_loss, lr))# 保存最佳模型到文件中
PATH ./best_model.pth # 保存的参数文件名
torch.save(model.state_dict(), PATH)print(Done)。。。Epoch:18, Train_acc:95.0%, Train_loss:0.142, Test_acc:91.6%, Test_loss:0.236, Lr:1.00E-04Epoch:19, Train_acc:92.8%, Train_loss:0.193, Test_acc:88.0%, Test_loss:0.278, Lr:1.00E-04Epoch:20, Train_acc:94.6%, Train_loss:0.160, Test_acc:92.0%, Test_loss:0.220, Lr:1.00E-04Done四、结果可视化1.Loss与Accuracy图import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings(ignore) #忽略警告信息
plt.rcParams[font.sans-serif] [SimHei] # 用来正常显示中文标签
plt.rcParams[axes.unicode_minus] False # 用来正常显示负号
plt.rcParams[figure.dpi] 100 #分辨率epochs_range range(epochs)plt.figure(figsize(12, 3))
plt.subplot(1, 2, 1)plt.plot(epochs_range, train_acc, labelTraining Accuracy)
plt.plot(epochs_range, test_acc, labelTest Accuracy)
plt.legend(loclower right)
plt.title(Training and Validation Accuracy)plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, labelTraining Loss)
plt.plot(epochs_range, test_loss, labelTest Loss)
plt.legend(locupper right)
plt.title(Training and Validation Loss)
plt.show()2.模型评估# 将参数加载到model当中
best_model.load_state_dict(torch.load(PATH, map_locationdevice))
epoch_test_acc, epoch_test_loss test(test_dl, best_model, loss_fn)
epoch_test_acc, epoch_test_loss(0.92, 0.21799196774352886)# 查看是否与我们记录的最高准确率一致
epoch_test_acc0.92