您好,登錄后才能下訂單哦!
這篇文章主要介紹“怎么使用C#完成常用神經網絡”,在日常操作中,相信很多人在怎么使用C#完成常用神經網絡問題上存在疑惑,小編查閱了各式資料,整理出簡單好用的操作方法,希望對大家解答”怎么使用C#完成常用神經網絡”的疑惑有所幫助!接下來,請跟著小編一起來學習吧!
WeaveNetC#編寫的用于神經網絡的計算圖框架
這是一個由c#編寫的神經網絡,可以看到內部任何細節的實現,可以對學習神經網絡,以及理解其中的計算方式。此架構不包含自動 Backward 向后傳播,為了展示更多的計算細節。
源碼包含 cnn,bp,fcn,lstm,convlstm,GRU 等示例內容,包含示例所用的數據內容。
LOSS支持:MESLOSS,cross-entropy
激活函數支持:ReLu,Tanh,Sigmod,Softmax
數據類型支持: float[][] 與 float[][][,],二維與四維
池化支持:平均池化Averpooling,最大池化Maxpooling
其他支持:ConvLayer,Conv2DLayer,MulLayer,ConvTranspose2DLayer
每個支持類都包含了向前傳播Forward,與Backward向后傳播的方法
以下幾個小例子
CNN的訓練實現
public class CNN { Conv2DLayer cl; Conv2DLayer cl2; Conv2DLayer cl3; //TanhLayer sl = new TanhLayer(); //TanhLayer sl2 = new TanhLayer(); //TanhLayer sl3 = new TanhLayer(); Maxpooling ap1; Maxpooling ap2; SigmodLayer sl = new SigmodLayer(); SigmodLayer sl2 = new SigmodLayer(); //SigmodLayer sl3 = new SigmodLayer(); Softmax sl3 = new Softmax(); //Averpooling ap2; //Averpooling ap1; public CNN() { cl = new Conv2DLayer(1, 0, 5, 1, 6); //ap1 = new Averpooling(2); ap1 = new Maxpooling(2); cl2 = new Conv2DLayer(1, 0, 5, 6, 12); // ap2 = new Averpooling(2); ap2 = new Maxpooling(2); cl3 = new Conv2DLayer(in_channels: 12, out_channels: 10, _inSize: 4,_full:true ); } public dynamic Forward(float[][][,] matrices) { dynamic data = cl.Forward(matrices); data = sl.Forward(data); data = ap1.Forward(data); data = cl2.Forward(data); data = sl2.Forward(data); data = ap2.Forward(data); data = cl3.Forward(data); data = sl3.Forward(data); return data; } dynamic cl3grid; dynamic cl2grid; dynamic clgrid; public void backward(dynamic grid) { dynamic grid2 = sl3.Backward(grid); cl3grid = cl3.backweight(grid2);//獲取cl3的權重 //-------------------------------- grid2 = cl3.Backward(grid2); grid2 =ap2.Backward(grid2); grid2 = sl2.Backward(grid2); cl2grid = cl2.backweight(grid2);//獲取cl2的權重 //------------------------------------- grid2 = cl2.Backward(grid2); grid2 = ap1.Backward(grid2); grid2 = sl.Backward(grid2); clgrid = cl.backweight(grid2);//獲取cl的權重 } float lr = 1.0f; public void update() { // int channl = cl3grid.grid.Length; cl3.wdata = Matrix.MatrixSub(cl3.wdata, Matrix.multiply(cl3grid.grid, lr)); cl3.basicData = Matrix.MatrixSub(cl3.basicData, Matrix.multiply(cl3grid.basic, lr)); cl2.weights = Matrix.MatrixSub(cl2.weights, Matrix.multiply(cl2grid.grid, lr)); cl2.basicData = Matrix.MatrixSub(cl2.basicData, Matrix.multiply(cl2grid.basic, lr)); cl.weights = Matrix.MatrixSub(cl.weights, Matrix.multiply(clgrid.grid, lr)); cl.basicData = Matrix.MatrixSub(cl.basicData, Matrix.multiply(clgrid.basic, lr)); } }
LSTM 實現例子
public class LSTMCELL { ConvLayer convLayerih; ConvLayer convLayerhh; int input_size; int hidden_size; public LSTMCELL(int _input_size, int _hidden_size) { input_size = _input_size; hidden_size = _hidden_size; convLayerih = new ConvLayer(input_size, hidden_size * 4 ); //convLayerih.weights = JsonConvert.DeserializeObject<float[][]>(util.getstr("D:\\lstmihw.json")); //convLayerih.basicData = JsonConvert.DeserializeObject<float[]>(util.getstr("D:\\lstmihb.json")); convLayerhh = new ConvLayer( hidden_size, hidden_size * 4); //convLayerhh.weights = JsonConvert.DeserializeObject<float[][]>(util.getstr("D:\\lstmhhw.json")); //convLayerhh.basicData = JsonConvert.DeserializeObject<float[]>(util.getstr("D:\\lstmhhb.json")); } SigmodLayer input_gate_s = new SigmodLayer(); SigmodLayer forget_gate_s = new SigmodLayer(); SigmodLayer output_gate_s = new SigmodLayer(); TanhLayer cell_memory_tl = new TanhLayer(); TanhLayer cell_tl = new TanhLayer(); MulLayer c_next_mul = new MulLayer(); MulLayer mulin_gate_mul = new MulLayer(); MulLayer h_next_mul = new MulLayer(); public dynamic Forward(float[][] input, float[][] h_prev, float[][] c_prev) { //a_vector = np.dot(x, self.weight_ih.T) + np.dot(h_prev, self.weight_hh.T) //a_vector += self.bias_ih + self.bias_hh Xinput = input; xh_prev = h_prev; xc_prev = c_prev; var ih = convLayerih.Forward(input); var hh = convLayerhh.Forward(h_prev); var a_vector = Matrix.MatrixAdd(ih, hh); List<float[][]> liast = Matrix.chunk(a_vector,4,1); var a_i = liast[0]; var a_f = liast[1]; var a_c = liast[2]; var a_o = liast[3]; input_gate = input_gate_s.Forward(a_i); forget_gate = forget_gate_s.Forward(a_f); cell_memory = cell_memory_tl.Forward(a_c); output_gate = output_gate_s.Forward(a_o); var c_next_temp = c_next_mul.Forward(forget_gate, c_prev); var mulin_gate = mulin_gate_mul.Forward(input_gate, cell_memory); var c_next = Matrix.MatrixAdd(c_next_temp, mulin_gate); var h_next = h_next_mul.Forward(output_gate, cell_tl.Forward(c_next)); // dh_prev = Matrix.zroe(h_next.Length, h_next[0].Length); return (h_next,c_next);//上次的狀態,上次的記憶 } dynamic Xinput, xh_prev, xc_prev, input_gate, forget_gate, cell_memory, output_gate; // dynamic dh_prev; dynamic ihweight, hhweight; public dynamic backward(dynamic grid) { var dh = h_next_mul.BackwardY(grid); var d_tanh_c = cell_tl.Backward(dh); //var dc_prev=c_next_mul.backwardY(d_tanh_c); var d_input_gate = mulin_gate_mul.Backward(d_tanh_c); var d_forget_gate=c_next_mul.Backward(d_tanh_c); var d_cell_memory = mulin_gate_mul.BackwardY(d_tanh_c); var d_output_gate = h_next_mul.Backward(grid);// d_tanh_c var d_ai = input_gate_s.Backward(d_input_gate); var d_af = forget_gate_s.Backward(d_forget_gate); var d_ao = output_gate_s.Backward(d_output_gate); var d_ac = cell_memory_tl.Backward(d_cell_memory); var temp=Matrix.cat(d_ai, d_af, 1); var temp2 = Matrix.cat( d_ac, d_ao, 1); var da= Matrix.cat(temp, temp2, 1); // var daT=Matrix.T(da); ihweight = convLayerih.backweight(da); hhweight = convLayerhh.backweight(da); return convLayerih.backward(da); } float lr = 0.1f; public void update() { convLayerih.weights = Matrix.MatrixSub(convLayerih.weights, Matrix.multiply(ihweight.grid, lr)); convLayerih.basicData = Matrix.MatrixSub(convLayerih.basicData, Matrix.multiply(ihweight.basic, lr)); convLayerhh.weights = Matrix.MatrixSub(convLayerhh.weights, Matrix.multiply(hhweight.grid, lr)); convLayerhh.basicData = Matrix.MatrixSub(convLayerhh.basicData, Matrix.multiply(hhweight.basic, lr)); } }
FCN實現例子
public class FCN { Conv2DLayer cl; Conv2DLayer cl2; Conv2DLayer cl3; ConvTranspose2DLayer Tcl1; Maxpooling mpl = new Maxpooling(); Maxpooling mpl2 = new Maxpooling(); SigmodLayer sl = new SigmodLayer(); SigmodLayer sl2 = new SigmodLayer(); SigmodLayer sl3 = new SigmodLayer(); Softmax sl4 = new Softmax(); public FCN(int weightssize) { cl = new Conv2DLayer(1, weightssize / 2, weightssize, 1, 6, bias: false); cl2 = new Conv2DLayer(1, weightssize / 2, weightssize, 6, 12, bias: false); cl3 = new Conv2DLayer(1, weightssize / 2, weightssize, 12, 24, bias: false); Tcl1 = new ConvTranspose2DLayer(2, 1, weightssize + 1, 24, 1, bias: false); } public dynamic Forward(dynamic data) { dynamic data2= cl.Forward(data); data2=sl.Forward(data2); data2=mpl.Forward(data2); data2 = cl2.Forward(data2); data2 = sl2.Forward(data2); data2 = mpl2.Forward(data2); data2 = cl3.Forward(data2); data2 = sl3.Forward(data2); data2=Tcl1.Forward(data2); data2 = sl4.Forward(data2); return data2; } public dynamic backward(dynamic grid) { var grid2 = sl4.Backward(grid); grid2= Tcl1.Backward(grid2); grid2 = sl3.Backward(grid2); grid2 = cl3.Backward(grid2); grid2 = mpl2.Backward(grid2); grid2 = sl2.Backward(grid2); grid2 = cl2.Backward(grid2); grid2 = mpl.Backward(grid2); grid2 = sl.Backward(grid2); grid2 = cl.Backward(grid2); return grid2; } }
到此,關于“怎么使用C#完成常用神經網絡”的學習就結束了,希望能夠解決大家的疑惑。理論與實踐的搭配能更好的幫助大家學習,快去試試吧!若想繼續學習更多相關知識,請繼續關注億速云網站,小編會繼續努力為大家帶來更多實用的文章!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。