1200字范文,内容丰富有趣,写作的好帮手!
1200字范文 > 图解人工神经网络 BP算法代码和注释

图解人工神经网络 BP算法代码和注释

时间:2024-06-11 01:07:52

相关推荐

图解人工神经网络 BP算法代码和注释

动图很长

代码

# -*- coding: utf-8 -*-import numpy as npimport matplotlib.pyplot as pypOUTPUT = 'output'SUM = 'sum'INPUT = 'input'EXPECT = 'exp'WEIGHT = 'weight'LOST = 'lost'START = 'start'END = 'end'B = 1.0 # 额外的输入def af(v):"""激励函数。Args:v (matrix): 自变量Returns:matrix: 函数值。"""return 1/(1+np.exp(-v))def af_d(v):"""激励函数的导数。Args:v (matrix): 自变量Returns:matrix: 函数值。"""d = af(v)return np.multiply(d, 1.0-d)def loss(y_hat, y): # 交叉熵损失函数L(y^,y)=-ylog(y^)-(1-y)log(1-y^)"""交叉熵损失函数。Args:y_hat (matrix): 观测结果即神经网络输出层返回值y (matrix): 模型输出Returns:float: 计算模型输出和观测结果间的差异。"""ret = []row_count, col_count = y_hat.shapefor row_index in range(row_count):y_hat_row_t = y_hat[row_index, ].Tleft = np.dot(-y, np.log(y_hat_row_t))right = np.dot((np.array([[1]]) - y), np.log(1 - y_hat_row_t))res = np.sum(left-right)ret.append(res)passreturn ret # -np.sum(y_hat*np.log(y))def init_weights(*widths):"""初始化权值。Returns:list: 初始权值。"""weights = []depth = len(widths)for i in range(1, depth):miu = np.sqrt(1/widths[i]) # 权值方差,控制权重值分布不要太零散w = miu * np.random.randn(widths[i], widths[i-1]+1) # 随机初始化weights.append(np.mat(w))passreturn weights # 长度为神经网络深度-1(减去输入层),每层行数为本层宽度n,列数为前层宽度m+1(加上偏置值)def fp_layer(input, weights):"""单层前向传播。Args:input (matrix): 本层输入值,每一行表示对前一层一个神经元的输出weights (matrix): 本层权值Returns:dict: 包含本层输入值(包含额外输入),本层加权求和和非线性变换结果。"""iab = np.insert(np.mat([[B]]), [1], input, axis=0) # 加入偏置于input头部sums = np.dot(weights, iab) # 加权求和res = af(sums) # 非线性变换return {INPUT: iab, SUM: sums, OUTPUT: res}def bp_layer(exp, weights, sum, inputs):"""单层BPArgs:exp (matrix)): 本层模型输出,每一行表示本层一个神经元的模型输出weights (matrix): 本层权值sum (matrix): 前层加权求和结果inputs (matrix): 本层输入值(包含额外输入)Returns:dict: 前层模型输出和权值变化量。"""# exp为n*1delta_weights = np.dot(exp, inputs.T) # 没乘学习率grad = af_d(sum) # grad为n*1propagate = np.dot(weights.T[1:], exp) # 偏置列不传播propagate = np.multiply(grad, propagate) # δ=f'(s)(w^T*δ^+1)return{EXPECT: propagate, WEIGHT: delta_weights}def fit(data, weights):"""对单个数据拟合。Args:data (dict): 包含输入和模型输出weights (list): 神经网络权值Returns:dict: 包含观测结果和权值变化量。"""depth = len(weights)fp_results = []input = data[INPUT]for i in range(depth):fp_res = fp_layer(input, weights[i]) # 前向传播fp_results.append(fp_res)input = fp_res[OUTPUT]passdelta_weights = []exp = input - data[EXPECT] # y-y_hatfor i in reversed(range(depth)):net = fp_results[i-1][SUM]bp_res = bp_layer(exp, weights[i], net, fp_results[i][INPUT]) # BPexp = bp_res[EXPECT] # 反向传播delta_weights.append(bp_res[WEIGHT]) # 保存ΔWpassdelta_weights.reverse() # 权重层序号是反的,需要反转,此处反转return{OUTPUT: input, WEIGHT: delta_weights}times = 3000eta = 0.25 # 学习率data_inputs = [[1, 0], [1, 1], [0, 1], [0, 0]] # 一个异或的数据data_outputs = [[1], [0], [1], [0]]data_size = len(data_inputs)weights = init_weights(2, 3, 1)depth = len(weights)result = {LOST: [], START: None, END: []}for t in range(times):delta_weights = []res = []for data_index in range(data_size): # 拟合每一组数据data = {INPUT: np.mat([data_inputs[data_index]]).T,EXPECT: np.mat([data_outputs[data_index]]).T}fit_res = fit(data, weights)delta_weights.append(fit_res[WEIGHT])res.append(fit_res[OUTPUT].T.tolist()[0]) # 之后res就不参加运算了passif result[START] is None: # 存储神经网络输出以供打印result[START] = resresult[END] = reslost = loss(np.mat(res).T, np.mat(data_outputs).T)result[LOST].append(np.dot(1/data_size, lost))for delta_weight in delta_weights:for layerIndex in range(depth):layer = delta_weight[layerIndex]weights[layerIndex] -= np.dot(eta, layer) # 更新权值和乘学习率passpasspassprint('\033[31m', '训练前输出:', np.mat(result[START]).T, '\033[0m')print('\033[35m', '训练后输出:', np.mat(result[END]).T, '\033[0m')pyp.plot(result[LOST]) # 画图看损失pyp.show() # 展示图

输出

训练前输出: [[0.57172222 0.55289663 0.61069527 0.63006013]] 训练后输出: [[0.99686652 0.00336393 0.9959174 0.00293138]]

用tensorflow实现代码更短:戳我跳转

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。