ãã¤ãããchatGPTã«XORã®å¦ç¿ã®C#ã³ã¼ãããé¡ããã¦(èããã³ã«éãããã°ã©ã ãåãã®ãåãï½)ããã£ã¨åãã³ã¼ããã§ããã
Unityãããªãã¦ãWindowsã®.NetFrameworkã®C#ã³ã³ãã¤ã©ã¼ã®ã³ã³ã½ã¼ã«ã¢ããªã§ãã£ã¦ã¿ããã©ã
XORã ããããªãã¦ãANDãORãã¡ããã¨å¦ç¿åºæ¥ã
sigmoid dSigmoidããReLU dReLUã«å¤ãã¦ãå¦ç¿åºæ¥ã
å
¥åã3å
¥åã®XORã«ãã¦ãå¦ç¿åºæ¥ãã®ã§ã大ä¸å¤«ã£ã½ã
ãããUnityä¸ã§åãããããã«ããã°ãã²ã¼ã ä¸ã§ãèªåã§ãªã«ãå¦ç¿ã§ãããã
ãã¬ã¼ã ã¯ã¼ã¯ã使ãã¨ã¡ã¢ãªã¼ã¨ããããªãé£ããããªãã§ãã¹ããã²ã¼ã ã¯èªåã§ãã£ã¦ã¿ã¦ãããããªã¨
using System; namespace NeuralNetwork { class Program { static void Main(string[] args) { // ãã¥ã¼ã©ã«ãããã¯ã¼ã¯ã®ä½æ int[] layerSizes = { 2, 4, 1 }; NeuralNetwork nn = new NeuralNetwork(layerSizes); double[][] inputs = { new double[] { 0, 0 }, new double[] { 0, 1 }, new double[] { 1, 0 }, new double[] { 1, 1 } }; double[][] targets = { new double[] { 0 }, new double[] { 1 }, new double[] { 1 }, new double[] { 0 } }; // å¦ç¿ nn.Train(inputs, targets, 10000, 0.1); } } class NeuralNetwork { private int[] layerSizes; private double[][] activations; private double[][] outputs; private double[][][] weights; private double[][] biases; private Random rand = new Random(); public NeuralNetwork( int[] layerSizes) { this.layerSizes = layerSizes; activations = new double[layerSizes.Length][]; outputs = new double[layerSizes.Length][]; weights = new double[layerSizes.Length - 1][][]; biases = new double[layerSizes.Length - 1][]; for (int l = 0; l < layerSizes.Length; l++) { activations[l] = new double[layerSizes[l]]; outputs[l] = new double[layerSizes[l]]; if( l > 0) { weights[l-1] = new double[layerSizes[l]][]; biases[l-1] = new double[layerSizes[l]]; for (int i = 0; i < layerSizes[l]; i++) { weights[l - 1][i] = new double[layerSizes[l-1]]; for (int j = 0; j < layerSizes[l - 1]; j++) { weights[l - 1][i][j] = rand.NextDouble() * 2 - 1; } biases[l - 1][i] = rand.NextDouble() * 2 - 1; } } } } public double sigmoid(double x) { return 1.0 / ( 1.0 +Math.Exp(-x) ); } public double dSigmoid(double x) { return x * (1.0 - x); } public double ReLU(double x) { return Math.Max(0,x) ; } public double dReLU(double x) { return x >0 ? 1 : 0; } public double[] FeedForward(double[] input) { for (int i = 0; i < layerSizes[0]; i++) { activations[0][i] = input[i]; outputs[0][i] = input[i]; } // é ä¼æ for (int l = 1; l < layerSizes.Length; l++) { for (int i = 0; i < layerSizes[l]; i++) { double sum = biases[l - 1][i]; for (int j = 0; j < layerSizes[l - 1]; j++) { sum += weights[l - 1][i][j] * outputs[l - 1][j]; } activations[l][i] = sum; outputs[l][i] = sigmoid(sum); } } // åºåãè¿ã return outputs[layerSizes.Length - 1]; } private void BackPropagation(double[] target, double learningRate) { // åºå層ã®èª¤å·®ãè¨ç® for (int i = 0; i < layerSizes[layerSizes.Length - 1]; i++) { double output = outputs[layerSizes.Length - 1][i]; double error = target[i] - output; double derivative = dSigmoid( output ); activations[layerSizes.Length - 1][i] = error * derivative; } // éä¼æ for (int l = layerSizes.Length - 2; l >= 1; l--) { for (int i = 0; i < layerSizes[l]; i++) { double output = outputs[l][i]; double derivative = dSigmoid( output ); double sum = 0; for (int j = 0; j < layerSizes[l + 1]; j++) { sum += activations[l + 1][j] * weights[l][j][i]; } activations[l][i] = sum * derivative; } } // éã¿ã¨ãã¤ã¢ã¹ã®æ´æ° for (int l = 0; l < layerSizes.Length - 1; l++) { for (int i = 0; i < layerSizes[l + 1]; i++) { for (int j = 0; j < layerSizes[l]; j++) { double output = outputs[l][j]; double delta = learningRate * activations[l + 1][i] * output; weights[l][i][j] += delta; } biases[l][i] += learningRate * activations[l + 1][i]; } } } // å¦ç¿ public void Train(double[][] inputs, double[][] targets, int iterations, double learningRate) { for (int i = 0; i < iterations; i++) { for (int j = 0; j < inputs.Length; j++) { double[] output = FeedForward(inputs[j]); BackPropagation(targets[j], learningRate); } // ãã¹ã if( (i%1000) == 0) { String str = ""; for (int j = 0; j < 4; j++) { str += inputs[j][0]+"x"+inputs[j][1]+" = "+FeedForward(inputs[j])[0]+", "; } Console.WriteLine( "#"+i+" " + str); } } } } }