700字范文,内容丰富有趣,生活中的好帮手!
700字范文 > 遗传算法(GA/NSGA)优化神经网络 GA-BP

遗传算法(GA/NSGA)优化神经网络 GA-BP

时间:2020-05-05 11:53:54

相关推荐

遗传算法(GA/NSGA)优化神经网络 GA-BP

查了网上一些论文和代码,自己写了两个版本的GA-BP优化代码(实际应该为EGA-BP),从简到繁,从易到难。该文章代码基于Python3 pytorch进行编写。

版本说明:Part 1 为 利用 GA-BP求神经网络最优的learning rate和隐藏层的神经元个数。Part 2 为利用 GA-BP 在神经网络训练中加入遗传变异等操作。Combined 即为结合 Part 1 and Part 2。

说明:主要便于方便代入自己的数据所以写了如下代码。自己用的时候主要可以修改Net中的网络结构,Train中的load_data变成自己要读的文件,选用合适的损失函数等等。geatpy为国内大佬写的遗传算法库,这里假设读者已经会用。关于GA和NSGA的区别只在于代码中运用模板的区别。

代码都有注释,可以试着读一读。因为作者是初学者的时候写的该代码,可能会有小错误。

可供测试data文件。测试文件说明:最后一列为label,除最后一列外为data。

两个版本对比:对上述测试文件对R^2 >= 0.96为指标,在我的破电脑上运行时间part1≈1min,combined≈20s,我没有测试过别的例子的速度,对于combined不能保证每个例子都能用,有一定缺陷,不同问题可以选用不同的版本进行使用。

GA-BP Part1: 利用 GA 求神经网络最优的learning rate和隐藏层的神经元个数

import torch.nn as nnimport torchimport geatpy as eaimport numpy as npimport osfrom sklearn.model_selection import train_test_splitinput_dimension = 7output_dimension = 1device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 自定义网络class Net(nn.Module):def __init__(self, neurons_num):super(Net, self).__init__()self.hidden0 = torch.nn.Linear(input_dimension, neurons_num)self.hidden1 = torch.nn.Linear(neurons_num, neurons_num)self.hidden2 = torch.nn.Linear(neurons_num, neurons_num)self.hidden3 = torch.nn.Linear(neurons_num, output_dimension)def forward(self, x):x = torch.relu(self.hidden0(x))x = torch.relu(self.hidden1(x))x = torch.relu(self.hidden2(x))x = self.hidden3(x)return x# r^2 函数def r2(y_test, y):return 1 - ((y_test - y) ** 2).sum(axis=0) / ((y.mean() - y) ** 2).sum(axis=0)# 神经网络训练class Train:train_x, train_y, test_x, test_y, model, lr, neurons_num, x, y, optimizer = None, None, None, None, None, None, None, None, None, Nonedef __init__(self):self.use_gpu = torch.cuda.is_available()# 选用合适的 loss function# self.loss_fn = torch.nn.CrossEntropyLoss()self.loss_fn = torch.nn.MSELoss()self.load_data()# 自定义读入数据def load_data(self):with open('data.csv') as f:df = np.loadtxt(f, delimiter=",", skiprows=0)self.x = df[:, :-1]self.y = df[:, -1:]f.close()# 重新创建不一样的 train and test data setdef reload(self, learing_rate, neurons_num):train_x, test_x, train_y, test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42)self.train_x = torch.from_numpy(train_x).float().to(device)self.train_y = torch.from_numpy(train_y).float().to(device)self.test_x = torch.from_numpy(test_x).float().to(device)self.test_y = torch.from_numpy(test_y).float().to(device)self.model = Net(neurons_num).to(device)self.lr = learing_rateself.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)# 训练神经网络,返回 r^2 值def train(self, n=10):for epoch in range(n):model_output = self.model(self.train_x)loss = self.loss_fn(model_output, self.train_y)self.optimizer.zero_grad()loss.backward()self.optimizer.step()model_output = self.model(self.test_x)return float(r2(model_output.data, self.test_y).cpu())# 自定义 GA,对 learning rate and neurons num 经行改变class My_nsga(ea.Problem):def __init__(self, epoch):if "result" not in os.listdir():os.makedirs("./result")name = 'GA-NET'M = 1maxormins = [-1] * MDim = 2varTypes = [1] * Dimlb = [10, 10]ub = [5000, 100]lbin = [1] * Dimubin = [1] * Dimself.epoch = epochself.train = Train()ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)# 目标函数即神经网络返回值def evalVars(self, Vars):ans = np.zeros(len(Vars), dtype=float).reshape(len(Vars), 1)for i in range(len(Vars)):self.train.reload(Vars[i][0] / 100000, Vars[i][1])# 括号内参数表示单个神经网络训练次数data = self.train.train(self.epoch)print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], data))torch.save(self.train.model, "./result/lr={}num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))# 达到一定准确率停止if data >= 1:torch.save(self.train.model, "lr{}=num={}epoch={}r2={}.pt".format(Vars[i][0] / 100000, Vars[i][1], self.epoch, round(data, 3)))exit("Find!")ans[i][0] = datareturn ans# 运行 GAclass Run_nsga:def __init__(self, epoch=10, ndind=10, maxgen=10):problem = My_nsga(epoch)myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0)myAlgorithm.drawing = 0res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result')print(res)print(res['Vars'][0])if __name__ == "__main__":# 括号内参数表示单个神经网络训练次数,种群数,GA迭代数Run_nsga(10000, 10, 30)

GA-BP Part2and Combined: 在神经网络中也加入GA改变网络权值来加快神经网络训练速度

参考代码原地址:遗传算法(GA) - 优化神经网络(CNN) - pytorch(亲测可用)_Vertira的博客-CSDN博客_遗传算法优化cnn

import randomimport torch.nn as nnimport torchimport geatpy as eaimport numpy as npimport osimport copyfrom sklearn.model_selection import train_test_splitfrom torch.distributions import Categoricalinput_dimension = 7output_dimension = 1# 该参数在数据较少的输入时也相应变少,最好使得 data_size / batch_size = NetGA_pop_sizebatch_size = 100device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")# 自定义网络class Net(torch.nn.Module):def __init__(self, neurons_num, lr):super(Net, self).__init__()self.layers = torch.nn.Sequential(torch.nn.Linear(input_dimension, neurons_num),torch.nn.ReLU(),torch.nn.Linear(neurons_num, neurons_num),torch.nn.ReLU(),torch.nn.Linear(neurons_num, neurons_num),torch.nn.ReLU(),torch.nn.Linear(neurons_num, output_dimension))self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)def forward(self, x):return self.layers(x)def set_layer(self, layers):self.layers = layers# r^2 函数def r2(y_test, y):return 1 - ((y_test - y) ** 2).sum(axis=0) / ((y.mean() - y) ** 2).sum(axis=0)# Data 类,以便带入自己的dataclass Data(torch.utils.data.Dataset):def __init__(self, data, label):self.x = dataself.y = labelself.len = len(self.y)def __len__(self):return self.lendef __getitem__(self, item):return self.x[item], self.y[item]# GA 优化的神经网络训练class NetTrainGA:def __init__(self, _pop_size=10, _r_mutation=0.1, _p_mutation=0.1, _elite_num=6, stddev=0.1):self.test_x, self.test_y, self.trainSetLoader, self.x, self.y = None, None, None, None, None # 数据存储self.pop_size = _pop_size # 种群数self.r_mutation = _r_mutation # 变异里,数据变异的概率self.p_mutation = _p_mutation # 变异概率self.elite_num = _elite_num # 精英数self.chroms = [] # 储存所有 modelself.stddev = stddev # 网络权值步进大小的最大值self.criterion = nn.MSELoss() # 计算 loss 的方法self.model = None # 全局最优解 modelself.load_data() # 加载数据self.lr = 0.001 # learning rate# 自定义读入数据def load_data(self):with open('data.csv') as f:df = np.loadtxt(f, delimiter=",", skiprows=0)self.x = df[:, :-1]self.y = df[:, -1:]self.x = torch.from_numpy(self.x).float().to(device)self.y = torch.from_numpy(self.y).float().to(device)f.close()# 创建 train and testself.re_data_split()# 重新创建不一样的 train and test data set,便于带入到 reload 函数中def re_data_split(self):train_x, self.test_x, train_y, self.test_y = train_test_split(self.x, self.y, test_size=0.3, random_state=42)trainSet = Data(train_x, train_y)# train 池化self.trainSetLoader = torch.utils.data.DataLoader(trainSet, batch_size=batch_size, shuffle=False)def reload(self, learning_rate, neurons_num):# 可选择是否重新分类 test and train# self.re_data_splitself.lr = learning_ratefor i in range(self.pop_size):net = Net(neurons_num, learning_rate).to(device)self.chroms.append(net)# 训练神经网络,返回R^2的值"""对下列博客代码进行改进/Vertira/article/details/122561056"""def train(self, n):for epoch in range(n):result = [{'pop': i, 'train_acc': float("-inf")} for i in range(self.pop_size)]# 为种群训练不同的数据for step, (batch_x, batch_y) in enumerate(self.trainSetLoader):Train(batch_x, batch_y, (step + epoch) % self.pop_size)# 计算 train accuracyfor i in range(self.pop_size):output = self.chroms[i](self.test_x)result[i]["train_acc"] = float(r2(output.data, self.test_y).cpu())result = sorted(result, key=lambda x: x['train_acc'], reverse=True)# self.model 即为类中最优解,可直接套用 test 经行预测self.model = self.chroms[result[0]['pop']]self.selection(result)model_output = self.model(self.test_x)return float(r2(model_output.data, self.test_y).cpu())def netTrain(self, batch_x, batch_y, now):model = self.chroms[now]optimizer = model.optimizer# 选择每次神经网络训练次数,这个参数影响了训练速度,但跟多时候会影响梯度,很多时候我也不知道为什么梯度就没了,所以太小梯度可能变0或None导致训练停滞,太大训练的有可能变慢for j in range(100):output = model(batch_x)optimizer.zero_grad()train_loss = self.criterion(output, batch_y).requires_grad_()train_loss.backward()optimizer.step()# 保留精英个数,并进行交叉操作至种群数满,最后进行变异操作def selection(self, result):elites = [e['pop'] for e in result[:self.elite_num]]# 保留 elites 个精英children = [copy.deepcopy(self.chroms[i]) for i in elites]# 轮盘赌来选择交配的个体,使用 softmax 处理负数问题prob = torch.softmax(torch.tensor([i["train_acc"] for i in result]), dim=0)m = Categorical(prob)# 随机选择两个交配直至达到种群大小while len(children) < self.pop_size:# 随机选择两个进行 self.crossover交配pair = [result[m.sample()]['pop'], result[m.sample()]['pop']]children.append(self.crossover(pair))del self.chroms[:]self.chroms[:] = children# 变异且不变异精英for i in range(self.elite_num, self.pop_size):# 满足变异概率if random.random() < self.p_mutation:mutated_child = self.mutation(i)del self.chroms[i]self.chroms.insert(i, mutated_child)def crossover(self, _selected_pop):if _selected_pop[0] == _selected_pop[1]:return copy.deepcopy(self.chroms[_selected_pop[0]])chrom1 = copy.deepcopy(self.chroms[_selected_pop[0]])chrom2 = copy.deepcopy(self.chroms[_selected_pop[1]])chrom1_layers = nn.ModuleList(chrom1.modules())chrom2_layers = nn.ModuleList(chrom2.modules())child = torch.nn.Sequential()for i in range(len(chrom1_layers)):layer1 = chrom1_layers[i]layer2 = chrom2_layers[i]# 对 Linear 层随机交换if isinstance(layer1, nn.Linear):child.add_module(str(i - 2), layer1 if random.random() < 0.5 else layer2)elif isinstance(layer1, (torch.nn.Sequential, Net)):passelse:child.add_module(str(i - 2), layer1)chrom1.set_layer(child)chrom1.optimizer = torch.optim.Adam(chrom1.parameters(), lr=self.lr)return chrom1def mutation(self, _selected_pop):child = torch.nn.Sequential()chrom = copy.deepcopy(self.chroms[_selected_pop])chrom_layers = nn.ModuleList(chrom.modules())# 变异比例,选择几层进行变异for i, layer in enumerate(chrom_layers):if isinstance(layer, nn.Linear):# 变异 Linear 层,且有一定变异比例if random.random() < self.r_mutation:# 提取权重weights = layer.weight.detach()# 更改权重w = weights + torch.normal(0, self.stddev, weights.shape).float().to(device)# 重新设置layer.weight = torch.nn.Parameter(w)child.add_module(str(i - 2), layer)elif isinstance(layer, (torch.nn.Sequential, Net)):passelse:child.add_module(str(i - 2), layer)chrom.set_layer(child)chrom.optimizer = torch.optim.Adam(chrom.parameters(), lr=self.lr)return chrom# 自定义 GA,对 learning rate and neurons num 经行改变class My_nsga(ea.Problem):def __init__(self, epoch):if "result" not in os.listdir():os.makedirs("./result")name = 'GA-NET'M = 1maxormins = [-1] * MDim = 2varTypes = [1] * Dimlb = [10, 10]ub = [500, 100]lbin = [1] * Dimubin = [1] * Dimself.count = 1self.epoch = epochself.train = NetTrainGA()ea.Problem.__init__(self, name, M, maxormins, Dim, varTypes, lb, ub, lbin, ubin)# 目标函数即神经网络返回值def evalVars(self, Vars):ans = np.zeros(len(Vars)).reshape(len(Vars), 1)for i in range(len(Vars)):self.train.reload(Vars[i][0] / 10000, Vars[i][1])# 括号内参数表示单个神经网络训练次数data = self.train.train(self.epoch)print("learning rate = {}, neurons num = {}, R^2 = {}".format(Vars[i][0] / 10000, Vars[i][1], round(data, 3)))torch.save(self.train.model, "./result/lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3)))# 达到一定准确率停止if data >= 1:torch.save(self.train.model, "lr{}num{}epoch{}r2{}.pt".format(Vars[i][0] / 10000, Vars[i][1], self.epoch, round(data, 3)))return 0ans[i] = float(data)return ans# 运行 GAclass Run_nsga:def __init__(self, epoch=10, ndind=10, maxgen=10):problem = My_nsga(epoch)myAlgorithm = ea.soea_EGA_templet(problem, ea.Population(Encoding='RI', NIND=ndind), MAXGEN=maxgen, logTras=0)myAlgorithm.drawing = 0res = ea.optimize(myAlgorithm, seed=1, verbose=False, drawing=0, outputMsg=True, drawLog=False, saveFlag=False, dirName='result')print(res)print(res['Vars'][0])if __name__ == "__main__":# 括号内参数表示单个神经网络训练次数,种群数,GA迭代数Run_nsga(100, 10, 10)"""# 也可以单独调用 NetTrainGA,设置初始参数netga = NetTrainGA()# learning rate, neurons_numnetga.reload(0.001, 30)# epochprint(netga.train(1000))"""

以上均为回归损失计算方法,要计算分类损失只需要修改如下代码。

详情也可见该文章

# 修改输出 dimensionoutput_dimension = 28# 修改 y 维度self.y = df[:, -1]# 修改损失函数self.criterion = nn.CrossEntropyLoss()# 修改 train_y 从 float 变成 longtrain_y = torch.from_numpy(train_y).long()# 修改 train_acc 计算方法result[i]["train_acc"] = float((output.argmax(dim=1) == self.test_y).sum()) / len(self.test_y)# 修改 train 输出return float((model_output.argmax(dim=1) == self.test_y).sum() / len(self.test_y))

Part2 版本有缺陷,optimizer是整个GA优化神经网络权值项目的bug所在,该文章最后更新时间为 /4/15: 加入 gpu 训练。如果有更好的改进方法欢迎一起讨论。

个人博客地址,欢迎访问https://www.pancake.work/?p=1511

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。