700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > 深度学习7日入门-CV疫情特辑心得

深度学习7日入门-CV疫情特辑心得

时间:2020-12-17 09:08:56

相关推荐

深度学习7日入门-CV疫情特辑心得

深度学习7日入门-CV疫情特辑心得

学习后感觉的整体感觉:内容安排非常紧凑, 课件内容很准确,作业有针对性,比赛题目比较难。

下面从内容上的回顾一下课程内容:

首先,小白需要自学预习课(不过这部分内容在day5发不出来,稍微有点晚)。预习课主要熟悉Python语言,Paddle基本使用,NoteBook使用及深度学习基础等。这一部分重点是熟悉Python语法和工具使用。学习内容中提供的深度学习基本概念和数学概念,基本上可以略过(数学部分只是把涉及的数学公式做了列举,没有展开;深度学习在day1和day2都有讲解)。通过MINST和波士顿房价案例,可以熟悉Paddle一般过程,不过该过程属于静态模型,大概浏览即可。

Day01 新冠疫情可视化

主要学习数据爬取和PyEchart的使用。该部分没有涉及深度学习内容。

Day02 手势识别

熟悉数据处理和DNN书写训练,目标手势识别 分类问题,这一天算是正式开始接触深度学习,基本是使用全连接,还不能算是卷积网络。这部分我觉得应该掌握动图下设计的基本程序结构,大体上分为6步。

(1)准备训练和测试数据

该部分主要学习训练数据和测试数据的划分方法。

# 生成图像列表data_path = '/home/aistudio/data/data23668/Dataset'character_folders = os.listdir(data_path)# print(character_folders)if(os.path.exists('./train_data.txt')):os.remove('./train_data.txt')if(os.path.exists('./test_data.txt')):os.remove('./test_data.txt')for character_folder in character_folders:with open('./train_data.txt', 'a') as f_train:with open('./test_data.txt', 'a') as f_test:if character_folder == '.DS_Store':continuecharacter_imgs = os.listdir(os.path.join(data_path,character_folder))count = 0 for img in character_imgs:if img =='.DS_Store':continueif count%10 == 0: #控制训练数据和测试数据比例#形成“路径 -t 标签”格式f_test.write(os.path.join(data_path,character_folder,img) + '\t' + character_folder + '\n')else:f_train.write(os.path.join(data_path,character_folder,img) + '\t' + character_folder + '\n')count +=1print('列表已生成')

(2)数据读取器

# 定义训练集和测试集的readerdef data_mapper(sample):img, label = sampleimg = Image.open(img)img = img.resize((100, 100), Image.ANTIALIAS)img = np.array(img).astype('float32')img = img.transpose((2, 0, 1))img = img/255.0return img, labeldef data_reader(data_list_path):def reader():with open(data_list_path, 'r') as f:lines = f.readlines()for line in lines:img, label = line.split('\t')yield img, int(label) #生成器from PIL import Imagereturn paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 512)

(3)定义网络

#定义DNN网络class MyDNN(fluid.dygraph.Layer):def __init__(self):super(MyDNN,self).__init__()self.hidden1 = Linear(100,100,act='relu')self.hidden2 = Linear(100,100,act='relu')self.hidden3 = Linear(100,100,act='relu')self.hidden4 = Linear(3*100*100,10,act='softmax')def forward(self,input):x= self.hidden1(input)x = self.hidden2(x)x = self.hidden3(x)x = fluid.layers.reshape(x,shape=[-1,3*100*100])y = self.hidden4(x)return y

(4)训练

#用动态图进行训练with fluid.dygraph.guard():model=MyDNN() #模型实例化model.train() #训练模式opt=fluid.optimizer.SGDOptimizer(learning_rate=0.001, parameter_list=model.parameters(#优化器选用SGD随机梯度下降,学习率为0.001epochs_num=5 #迭代次数for pass_num in range(epochs_num):for batch_id,data in enumerate(train_reader()):images=np.array([x[0].reshape(3,100,100) for x in data],np.float32)labels = np.array([x[1] for x in data]).astype('int64')labels = labels[:, np.newaxis]# print(images.shape)image=fluid.dygraph.to_variable(images)label=fluid.dygraph.to_variable(labels)predict=model(image)#预测# print(predict)loss=fluid.layers.cross_entropy(predict,label)avg_loss=fluid.layers.mean(loss)#获取loss值acc=fluid.layers.accuracy(predict,label)#计算精度if batch_id!=0 and batch_id%50==0:print("train_pass:{},batch_id:{},train_loss:{},train_acc:{}".format(pass_num,batch_id,avg_loss.numpy(),acc.numpy()))avg_loss.backward()opt.minimize(avg_loss)model.clear_gradients()fluid.save_dygraph(model.state_dict(),'MyDNN')#保存模型

(5)模型测试

#模型校验with fluid.dygraph.guard():accs = []model_dict, _ = fluid.load_dygraph('MyDNN')model = MyDNN()model.load_dict(model_dict) #加载模型参数model.eval() #训练模式for batch_id,data in enumerate(test_reader()):#测试集images=np.array([x[0].reshape(3,100,100) for x in data],np.float32)labels = np.array([x[1] for x in data]).astype('int64')labels = labels[:, np.newaxis]image=fluid.dygraph.to_variable(images)label=fluid.dygraph.to_variable(labels)predict=model(image) acc=fluid.layers.accuracy(predict,label)accs.append(acc.numpy()[0])avg_acc = np.mean(accs)print(avg_acc)

(6)预测

#构建预测动态图过程with fluid.dygraph.guard():infer_path = '手势.JPG'model=MyDNN()#模型实例化model_dict,_=fluid.load_dygraph('MyDNN')model.load_dict(model_dict)#加载模型参数model.eval()#评估模式infer_img = load_image(infer_path)infer_img=np.array(infer_img).astype('float32')infer_img=infer_img[np.newaxis,:, : ,:]infer_img = fluid.dygraph.to_variable(infer_img)result=model(infer_img)display(Image.open('手势.JPG'))print(np.argmax(result.numpy()))

Day03 车牌识别

主要学习卷积网络LeNet,生成字符图像列表,目标车牌分类问题 ,另外使用了CV来分割图像存储,进而预测结果。

对于该部分数据准备过程,与day2基本相似,不过这边种类更多一些。数据读取器的buffer也要相应增加。不过源图为灰度图,所以不再有转置处理。

# 用上一步生成的图像列表定义车牌字符训练集和测试集的readerdef data_mapper(sample):img, label = sampleimg = paddle.dataset.image.load_image(file=img, is_color=False)img = img.flatten().astype('float32') / 255.0return img, labeldef data_reader(data_list_path):def reader():with open(data_list_path, 'r') as f:lines = f.readlines()for line in lines:img, label = line.split('\t')yield img, int(label)return paddle.reader.xmap_readers(data_mapper, reader, cpu_count(), 1024)

在实现网络代码,与LeNet稍有差别,主要在Pooling部分

class MyLeNet(fluid.dygraph.Layer):def __init__(self):super(MyLeNet,self).__init__()self.hidden1_1 = Conv2D(1,28,5,1)self.hidden1_2 = Pool2D(pool_size=2,pool_type='max',pool_stride=1)self.hidden2_1 = Conv2D(28,32,3,1)self.hidden2_2 = Pool2D(pool_size=2,pool_type='max',pool_stride=1)self.hidden3 = Conv2D(32,32,3,1)self.hidden4 = Linear(32*10*10,65,act='softmax')def forward(self,input):x = self.hidden1_1(input)x = self.hidden1_2(x)x = self.hidden2_1(x)x = self.hidden2_2(x)x = self.hidden3(x)x = fluid.layers.reshape(x,shape=[-1,32*10*10])y = self.hidden4(x)return y

训练和验证过程与day2基本类似。

预测过程由于要分割图片,使用opencv。利用cv2.threshold提取文字。

# 对车牌图片进行处理,分割出车牌中的每一个字符并保存license_plate = cv2.imread('./车牌.png')gray_plate = cv2.cvtColor(license_plate, cv2.COLOR_RGB2GRAY)ret, binary_plate = cv2.threshold(gray_plate, 175, 255, cv2.THRESH_BINARY) # 二值化result = []print(binary_plate.shape)print(binary_plate)print(binary_plate.shape[1])for col in range(binary_plate.shape[1]): #722result.append(0) # 默认值for row in range(binary_plate.shape[0]): # 170result[col] = result[col] + binary_plate[row][col]/255 # 统计各列色值和character_dict = {}num = 0i = 0while i < len(result): # 按列遍历if result[i] == 0:i += 1else:index = i + 1while result[index] != 0:index += 1character_dict[num] = [i, index-1] # 确定各字的列范围num += 1i = indexfor i in range(8):if i==2:continuepadding = (170 - (character_dict[i][1] - character_dict[i][0])) / 2# 以固定值填充该字两边,形成170*170 或170*169的数组ndarray = np.pad(binary_plate[:,character_dict[i][0]:character_dict[i][1]], ((0,0), (int(padding), int(padding))), 'constant', constant_values=(0,0))ndarray = cv2.resize(ndarray, (20,20)) # 图像缩放cv2.imwrite('./' + str(i) + '.png', ndarray)def load_image(path):img = paddle.dataset.image.load_image(file=path, is_color=False)img = img.astype('float32')img = img[np.newaxis, ] / 255.0return img

构建标签字典,用于映射显示车牌

match = {'A':'A','B':'B','C':'C','D':'D','E':'E','F':'F','G':'G','H':'H','I':'I','J':'J','K':'K','L':'L','M':'M','N':'N','O':'O','P':'P','Q':'Q','R':'R','S':'S','T':'T','U':'U','V':'V','W':'W','X':'X','Y':'Y','Z':'Z','yun':'云','cuan':'川','hei':'黑','zhe':'浙','ning':'宁','jin':'津','gan':'赣','hu':'沪','liao':'辽','jl':'吉','qing':'青','zang':'藏','e1':'鄂','meng':'蒙','gan1':'甘','qiong':'琼','shan':'陕','min':'闽','su':'苏','xin':'新','wan':'皖','jing':'京','xiang':'湘','gui':'贵','yu1':'渝','yu':'豫','ji':'冀','yue':'粤','gui1':'桂','sx':'晋','lu':'鲁','0':'0','1':'1','2':'2','3':'3','4':'4','5':'5','6':'6','7':'7','8':'8','9':'9'}L = 0LABEL ={}for V in LABEL_temp.values():LABEL[str(L)] = match[V]L += 1print(LABEL)

预测过程

构建预测动态图过程with fluid.dygraph.guard():model=MyLeNet()#模型实例化model_dict,_=fluid.load_dygraph('MyLeNet')model.load_dict(model_dict)#加载模型参数model.eval()#评估模式lab=[]for i in range(8):if i==2:continueinfer_imgs = []infer_imgs.append(load_image('./' + str(i) + '.png'))infer_imgs = np.array(infer_imgs)infer_imgs = fluid.dygraph.to_variable(infer_imgs)result=model(infer_imgs)lab.append(np.argmax(result.numpy())) # 得到预测编号print(lab)display(Image.open('./车牌.png'))print('\n车牌识别结果为:',end='')for i in range(len(lab)):print(LABEL[str(lab[i])],end='') #根据字典返回字符

Day04 口罩分类

主要学习构造VGG网络实现分类处理。

代码结构较之前有所变化,基本结构如下:

(1)参数字典

train_parameters = {"input_size": [3, 224, 224],#输入图片的shape"class_dim": -1, #分类数"src_path":"/home/aistudio/work/maskDetect.zip",#原始数据集路径"target_path":"/home/aistudio/data/", #要解压的路径"train_list_path": "/home/aistudio/data/train.txt", #train.txt路径"eval_list_path": "/home/aistudio/data/eval.txt", #eval.txt路径"readme_path": "/home/aistudio/data/readme.json", #readme.json路径"label_dict":{}, #标签字典"num_epochs": 1, #训练轮数"train_batch_size": 8,#训练时每个批次的大小"learning_strategy": {#优化函数相关的配置"lr": 0.001 #超参数学习率} }

(2)数据准备

最大变化是乱序部分

def get_data_list(target_path,train_list_path,eval_list_path):'''生成数据列表'''#存放所有类别的信息class_detail = []#获取所有类别保存的文件夹名称data_list_path=target_path+"maskDetect/"class_dirs = os.listdir(data_list_path) #总的图像数量all_class_images = 0#存放类别标签class_label=0#存放类别数目class_dim = 0#存储要写进eval.txt和train.txt中的内容trainer_list=[]eval_list=[]#读取每个类别,['maskimages', 'nomaskimages']for class_dir in class_dirs:if class_dir != ".DS_Store":class_dim += 1#每个类别的信息class_detail_list = {}eval_sum = 0trainer_sum = 0#统计每个类别有多少张图片class_sum = 0#获取类别路径 path = data_list_path + class_dir# 获取所有图片img_paths = os.listdir(path)for img_path in img_paths: # 遍历文件夹下的每个图片name_path = path + '/' + img_path # 每张图片的路径if class_sum % 10 == 0: # 每10张图片取一个做验证数据eval_sum += 1 # test_sum为测试数据的数目eval_list.append(name_path + "\t%d" % class_label + "\n")else:trainer_sum += 1 trainer_list.append(name_path + "\t%d" % class_label + "\n")#trainer_sum测试数据的数目class_sum += 1 #每类图片的数目all_class_images += 1#所有类图片的数目# 说明的json文件的class_detail数据class_detail_list['class_name'] = class_dir #类别名称,如jiangwenclass_detail_list['class_label'] = class_label#类别标签class_detail_list['class_eval_images'] = eval_sum #该类数据的测试集数目class_detail_list['class_trainer_images'] = trainer_sum #该类数据的训练集数目class_detail.append(class_detail_list) #初始化标签列表train_parameters['label_dict'][str(class_label)] = class_dirclass_label += 1 #初始化分类数train_parameters['class_dim'] = class_dim#乱序 ,没有使用paddle乱序random.shuffle(eval_list)with open(eval_list_path, 'a') as f:for eval_image in eval_list:f.write(eval_image) random.shuffle(trainer_list)with open(train_list_path, 'a') as f2:for train_image in trainer_list:f2.write(train_image) # 说明的json文件信息readjson = {}readjson['all_class_name'] = data_list_path #文件父目录readjson['all_class_images'] = all_class_imagesreadjson['class_detail'] = class_detailjsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))with open(train_parameters['readme_path'],'w') as f:f.write(jsons)print ('生成数据列表完成!')def custom_reader(file_list):'''自定义reader'''def reader():with open(file_list, 'r') as f:lines = [line.strip() for line in f]for line in lines:img_path, lab = line.strip().split('\t')img = Image.open(img_path) if img.mode != 'RGB': img = img.convert('RGB') img = img.resize((224, 224), Image.BILINEAR) # 双线性插值img = np.array(img).astype('float32') img = img.transpose((2, 0, 1)) # HWC to CHW img = img/255# 像素值归一化 yield img, int(lab) return reader'''参数初始化'''src_path=train_parameters['src_path']target_path=train_parameters['target_path']train_list_path=train_parameters['train_list_path']eval_list_path=train_parameters['eval_list_path']batch_size=train_parameters['train_batch_size']'''解压原始数据到指定路径'''unzip_data(src_path,target_path)'''划分训练集与验证集,乱序,生成数据列表'''#每次生成数据列表前,首先清空train.txt和eval.txtwith open(train_list_path, 'w') as f: f.seek(0)f.truncate() with open(eval_list_path, 'w') as f: f.seek(0)f.truncate() #生成数据列表 get_data_list(target_path,train_list_path,eval_list_path)'''构造数据提供器'''train_reader = paddle.batch(custom_reader(train_list_path),batch_size=batch_size,drop_last=True)eval_reader = paddle.batch(custom_reader(eval_list_path),batch_size=batch_size,drop_last=True)

(2)定义模型

目标实现一个简化的VGG

转存失败重新上传取消

class ConvPool(fluid.dygraph.Layer): #实现卷积和池化组'''卷积+池化'''def __init__(self,num_channels,num_filters,filter_size,pool_size,pool_stride,groups,pool_padding=0,pool_type='max',conv_stride=1,conv_padding=1,act=None):super(ConvPool, self).__init__() self._conv2d_list = []for i in range(groups):conv2d = self.add_sublayer( #返回一个由所有子层组成的列表。'bb_%d' % i,fluid.dygraph.Conv2D(num_channels=num_channels, #通道数num_filters=num_filters, #卷积核个数filter_size=filter_size, #卷积核大小stride=conv_stride, #步长padding=conv_padding,#padding大小,默认为0act=act))self._conv2d_list.append(conv2d) self._pool2d = fluid.dygraph.Pool2D(pool_size=pool_size, #池化核大小pool_type=pool_type, #池化类型,默认是最大池化pool_stride=pool_stride, #池化步长pool_padding=pool_padding#填充大小)def forward(self, inputs):x = inputsfor conv in self._conv2d_list:x = conv(x)x = self._pool2d(x)return xclass VGGNet(fluid.dygraph.Layer):'''VGG网络'''def __init__(self):super(VGGNet, self).__init__()self.convpool01 = ConvPool(3,64,3,2,2,2,act='relu')self.convpool02 = ConvPool(64,128,3,2,2,2,act='relu')self.convpool03 = ConvPool(128,256,3,2,2,3,act='relu')self.convpool04 = ConvPool(256,512,3,2,2,3,act='relu')self.convpool05 = ConvPool(512,512,3,2,2,3,act='relu')self.pool_5_shape = 512 * 7 * 7self.fc01 = fluid.dygraph.Linear(self.pool_5_shape,4096,act='relu')self.fc02 = fluid.dygraph.Linear(4096,4096,act='relu')self.fc03 = fluid.dygraph.Linear(4096,2,act='softmax')def forward(self, inputs, label=None):"""前向计算"""print(inputs.shape)out = self.convpool01(inputs)print(out.shape)out = self.convpool02(out)print(out.shape)out = self.convpool03(out)print(out.shape)out = self.convpool04(out)print(out.shape)out = self.convpool05(out)print(out.shape)out = fluid.layers.reshape(out,shape=[-1,512*7*7])out = self.fc01(out)out = self.fc02(out)out = self.fc03(out)if label is not None:acc = fluid.layers.accuracy(input=out,label=label)return out,accelse:return out

(3)模型训练

主要使用Adam优化器,较day3有所变化

'''模型训练'''#with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):with fluid.dygraph.guard():print(train_parameters['class_dim'])print(train_parameters['label_dict'])vgg = VGGNet()optimizer=fluid.optimizer.AdamOptimizer(learning_rate=train_parameters['learning_strategy']['lr'],parameter_list=vgg.parameters()) for epoch_num in range(train_parameters['num_epochs']):for batch_id, data in enumerate(train_reader()):dy_x_data = np.array([x[0] for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64')y_data = y_data[:, np.newaxis]#将Numpy转换为DyGraph接收的输入img = fluid.dygraph.to_variable(dy_x_data)label = fluid.dygraph.to_variable(y_data)out,acc = vgg(img,label)loss = fluid.layers.cross_entropy(out, label)avg_loss = fluid.layers.mean(loss)#使用backward()方法可以执行反向网络avg_loss.backward()optimizer.minimize(avg_loss)#将参数梯度清零以保证下一轮训练的正确性vgg.clear_gradients()all_train_iter=all_train_iter+train_parameters['train_batch_size']all_train_iters.append(all_train_iter)all_train_costs.append(loss.numpy()[0])all_train_accs.append(acc.numpy()[0])if batch_id % 1 == 0:print("Loss at epoch {} step {}: {}, acc: {}".format(epoch_num, batch_id, avg_loss.numpy(), acc.numpy()))draw_train_process("training",all_train_iters,all_train_costs,all_train_accs,"trainning cost","trainning acc") draw_process("trainning loss","red",all_train_iters,all_train_costs,"trainning loss")draw_process("trainning acc","green",all_train_iters,all_train_accs,"trainning acc") #保存模型参数fluid.save_dygraph(vgg.state_dict(), "vgg") print("Final loss: {}".format(avg_loss.numpy()))

Day05 PaddleHub体验

主要学习的模型迁移的使用方法。

迁移使用过程大体如下:

(1)查询结构

hub search resnet

(2)加载预训练模型

import paddlehub as hubmodule = hub.Module(name="resnet_v2_50_imagenet")input_dict, output_dict, program = module.context(trainable=True)

(3)数据准备

# 直接用PaddleHub提供的数据集dataset = hub.dataset.DogCat()data_reader = hub.reader.ImageClassificationReader(image_width=module.get_expected_image_width(),image_height=module.get_expected_image_height(),images_mean=module.get_pretrained_images_mean(),images_std=module.get_pretrained_images_std(),dataset=dataset)

(4)配置策略

config = hub.RunConfig(use_cuda=False,#是否使用GPU训练,默认为False;num_epoch=1, #Fine-tune的轮数;checkpoint_dir="cv_finetune_turtorial_demo",#模型checkpoint保存路径, 若用户没有指定,程序会自动生成;batch_size=32,#训练的批大小,如果使用GPU,请根据实际情况调整batch_size;eval_interval=50, #模型评估的间隔,默认每100个step评估一次验证集;strategy=hub.finetune.strategy.DefaultFinetuneStrategy()) #Fine-tune优化策略;

(5)迁移组网

feature_map = output_dict["feature_map"]feed_list = [input_dict["image"].name]#ImageClassifierTask:通用的分类任务Task,该Task基于输入的特征,添加一个或多个全连接层来创建一个分类任务用于Fine-tunetask = hub.ImageClassifierTask(data_reader=data_reader, #提供数据的readerfeed_list=feed_list,#待feed变量的名字列表feature=feature_map,#输入的特征矩阵num_classes=dataset.num_labels, #分类任务的类别数量config=config) #运行配置

(6)Fine-tune

run_states = task.finetune_and_eval()

(7)预测

import numpy as npdata = ["test_img_dog.jpg"]label_map = dataset.label_dict()index = 0run_states = task.predict(data=data)results = [run_state.run_results for run_state in run_states]for batch_result in results:print(batch_result)batch_result = np.argmax(batch_result, axis=2)[0]print(batch_result)for result in batch_result:index += 1result = label_map[result]print("input %i is %s, and the predict result is %s" %(index, data[index - 1], result))

Day06-PaddleSlim模型压缩

主要学习模型压缩优化方法,主要了解了四种主流方法

本讲作业关于模型量化,其一般处理过程:

(1)构建模型

use_gpu = fluid.is_compiled_with_cuda() # GPUexe, train_program, val_program, inputs, outputs = slim.models.image_classification("MobileNet", [1, 28, 28], 10, use_gpu=use_gpu)place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() #解释器

(2)定义输入数据

import paddle.dataset.mnist as readertrain_reader = paddle.batch(reader.train(), batch_size=128, drop_last=True)test_reader = paddle.batch(reader.test(), batch_size=128, drop_last=True)data_feeder = fluid.DataFeeder(inputs, place)

(3)训练和测试

def train(prog):iter = 0for data in train_reader():acc1, acc5, loss = exe.run(prog, feed=data_feeder.feed(data), fetch_list=outputs)if iter % 100 == 0:print('train iter={}, top1={}, top5={}, loss={}'.format(iter, acc1.mean(), acc5.mean(), loss.mean()))iter += 1def test(prog):iter = 0res = [[], []]for data in test_reader():acc1, acc5, loss = exe.run(prog, feed=data_feeder.feed(data), fetch_list=outputs)if iter % 100 == 0:print('test iter={}, top1={}, top5={}, loss={}'.format(iter, acc1.mean(), acc5.mean(), loss.mean()))res[0].append(acc1.mean())res[1].append(acc5.mean())iter += 1print('final test result top1={}, top5={}'.format(np.array(res[0]).mean(), np.array(res[1]).mean()))

(4)量化模型

place = exe.placeimport paddleslim.quant as quant# config = {#'weight_quantize_type': 'abs_max',#'activation_quantize_type': 'moving_average_abs_max',#'weight_bits': 8,#'activation_bits': 8,#'not_quant_pattern': ['skip_quant'],#'quantize_op_types': ['conv2d', 'depthwise_conv2d', 'mul'],#'dtype': 'int8',#'window_size': 10000,#'moving_rate': 0.9# }config = {'weight_quantize_type': 'abs_max','activation_quantize_type': 'moving_average_abs_max'}quant_program = quant.quant_aware(train_program, place, config, for_test=False)val_quant_program = quant.quant_aware(val_program, place, config, for_test=True)

(5)训练和测试量化后的模型

train(quant_program)

比赛 人流密度检测

主要学习人流密度的基本方法,根据提供的base版本,开发网络,实现对人流识别。这部分通过讲解明确强调了样本增强内容,这部分很重要

'''加载相关类库'''import zipfileimport paddleimport paddle.fluid as fluidimport matplotlib.pyplot as pltimport matplotlib.image as mpingimport jsonimport numpy as npimport cv2import sysimport timeimport h5pyfrom matplotlib import pyplot as pltfrom scipy.ndimage.filters import gaussian_filter import scipyfrom matplotlib import cm as CMfrom paddle.utils.plot import Ploterfrom PIL import Imagefrom PIL import ImageFileImageFile.LOAD_TRUNCATED_IMAGES = True'''查看train.json相关信息,重点关注annotations中的标注信息'''f = open('/home/aistudio/data/data1917/train.json',encoding='utf-8')content = json.load(f)'''将上面的到的content中的name中的“stage1/”去掉'''for j in range(len(content['annotations'])):content['annotations'][j]['name'] = content['annotations'][j]['name'].lstrip('stage1').lstrip('/')'''使用高斯滤波变换生成密度图'''def gaussian_filter_density(gt):# 初始化密度图density = np.zeros(gt.shape, dtype=np.float32)# 获取gt中不为0的元素的个数gt_count = np.count_nonzero(gt)# 如果gt全为0,就返回全0的密度图if gt_count == 0:return densitypts = np.array(list(zip(np.nonzero(gt)[1].ravel(), np.nonzero(gt)[0].ravel())))for i, pt in enumerate(pts):pt2d = np.zeros(gt.shape, dtype=np.float32)pt2d[pt[1],pt[0]] = 1.if gt_count > 1:# sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1sigma = 25else:sigma = np.average(np.array(gt.shape))/2./2. density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')return density'''图片操作:对图片进行resize、归一化,将方框标注变为点标注返回:resize后的图片 和 gt'''def picture_opt(img,ann):size_x,size_y = img.sizetrain_img_size = (640,480)img = img.resize(train_img_size,Image.ANTIALIAS)img = np.array(img) img = img / 255.0gt = []for b_l in range(len(ann)):# 假设人体是使用方框标注的,通过求均值的方法将框变为点if 'w' in ann[b_l].keys(): x = (ann[b_l]['x']+(ann[b_l]['x']+ann[b_l]['w']))/2y = ann[b_l]['y']+20x = (x*640/size_x)/8y = (y*480/size_y)/8gt.append((x,y)) else:x = ann[b_l]['x']y = ann[b_l]['y']x = (x*640/size_x)/8y = (y*480/size_y)/8gt.append((x,y)) return img,gt'''密度图处理'''def ground(img,gt):imgs = imgx = imgs.shape[0]/8y = imgs.shape[1]/8k = np.zeros((int(x),int(y)))for i in range(0,len(gt)):if int(gt[i][1]) < int(x) and int(gt[i][0]) < int(y):k[int(gt[i][1]),int(gt[i][0])]=1k = gaussian_filter_density(k)return k'''定义数据生成器'''def train_set():def inner():for ig_index in range(2000): #遍历所有图片if len(content['annotations'][ig_index]['annotation']) == 2:continueif len(content['annotations'][ig_index]['annotation']) == 3:continueif content['annotations'][ig_index]['ignore_region']: #把忽略区域都用像素为0填上ig_list = [] #存放忽略区1的数据ig_list1 = [] #存放忽略区2的数据# print(content['annotations'][ig_index]['ignore_region'])if len(content['annotations'][ig_index]['ignore_region'])==1: #因为每张图的忽略区域最多2个,这里是为1的情况# print('ig1',ig_index)ign_rge = content['annotations'][ig_index]['ignore_region'][0] #取第一个忽略区的数据for ig_len in range(len(ign_rge)):#遍历忽略区坐标个数,组成多少变型ig_list.append([ign_rge[ig_len]['x'],ign_rge[ig_len]['y']]) #取出每个坐标的x,y然后组成一个小列表放到ig_listig_cv_img = cv2.imread(content['annotations'][ig_index]['name'])#用cv2读取一张图片pts = np.array(ig_list,np.int32) #把ig_list转成numpy.ndarray数据格式,为了填充需要cv2.fillPoly(ig_cv_img,[pts],(0,0,0),cv2.LINE_AA) #使用cv2.fillPoly方法对有忽略区的图片用像素为0填充ig_img = Image.fromarray(cv2.cvtColor(ig_cv_img,cv2.COLOR_BGR2RGB)) #cv2转PILann = content['annotations'][ig_index]['annotation']#把所有标注的信息读取出来ig_im,gt = picture_opt(ig_img,ann)k = ground(ig_im,gt)groundtruth = np.asarray(k)groundtruth = groundtruth.T.astype('float32')ig_im = ig_im.transpose().astype('float32')yield ig_im,groundtruthif len(content['annotations'][ig_index]['ignore_region'])==2: #有2个忽略区域# print('ig2',ig_index)ign_rge = content['annotations'][ig_index]['ignore_region'][0]ign_rge1 = content['annotations'][ig_index]['ignore_region'][1]for ig_len in range(len(ign_rge)):ig_list.append([ign_rge[ig_len]['x'],ign_rge[ig_len]['y']])for ig_len1 in range(len(ign_rge1)):ig_list1.append([ign_rge1[ig_len1]['x'],ign_rge1[ig_len1]['y']]) ig_cv_img2 = cv2.imread(content['annotations'][ig_index]['name'])pts = np.array(ig_list,np.int32)pts1 = np.array(ig_list1,np.int32)cv2.fillPoly(ig_cv_img2,[pts],(0,0,0),cv2.LINE_AA)cv2.fillPoly(ig_cv_img2,[pts1],(0,0,0),cv2.LINE_AA)ig_img2 = Image.fromarray(cv2.cvtColor(ig_cv_img2,cv2.COLOR_BGR2RGB)) #cv2转PILann = content['annotations'][ig_index]['annotation']#把所有标注的信息读取出来ig_im,gt = picture_opt(ig_img2,ann)k = ground(ig_im,gt)k = np.zeros((int(ig_im.shape[0]/8),int(ig_im.shape[1]/8)))groundtruth = np.asarray(k)groundtruth = groundtruth.T.astype('float32')ig_im = ig_im.transpose().astype('float32')yield ig_im,groundtruthelse:img = Image.open(content['annotations'][ig_index]['name'])ann = content['annotations'][ig_index]['annotation']#把所有标注的信息读取出来im,gt = picture_opt(img,ann)k = ground(im,gt)groundtruth = np.asarray(k)groundtruth = groundtruth.T.astype('float32')im = im.transpose().astype('float32')yield im,groundtruthreturn innerBATCH_SIZE= 3 #每次取3张# 设置训练readertrain_reader = paddle.batch(paddle.reader.shuffle(train_set(), buf_size=512),batch_size=BATCH_SIZE)BATCH_SIZE= 3 #每次取3张# 设置训练readertrain_reader = paddle.batch(paddle.reader.shuffle(train_set(), buf_size=512),batch_size=BATCH_SIZE)

这就是7日我总结的笔记,收获满满,学到了很多。

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。