User Tools

Site Tools


summer2015nn:design

Neural Net Design Document

import java.util.*;
 
public class NeuralNet {
    /** defining layers */
    protected double[] input;
    protected ArrayList<NetUnit>[] hidden;
    protected double[] output;
    protected double learningRate;
 
    /** NeuralNet constructor
	@param numberIn number of input units
	@param numberOut number of output layers
	@param layers number 
    */
    public NeuralNet(int numberIn, int numberOut, int layers) {
	// create input and output arrays
	// create each requested hidden layer
    }
 
    public void configureNet(double learningRate) { // highly likely to be overidden by subclasses
	this.learningRate = learningRate;
    }
 
    public double learningRate() {
	return this.learningRate;
    }
 
    /** import - load trained network weights into a new set */
    public void importNet(String importFile) {
 
    }
 
    /** export - output trained network weights from current net */
    public void exportNet(String exportFile) {
 
    }
 
}
 
import java.util.*;
import static java.lang.Math.*;
 
public class NetUnit {
 
    protected double bias = 0;
 
    protected double strength;
 
    protected double[] weights;
 
    protected double output;
 
    protected NeuralNet parentNet;
 
    public NetUnit (double t[], double bias, NeuralNet parentNet) {
	// initialize weights field
 
	weights = new double[t.length];
 
	for (int i = 0; i < t.length; i++) {
	    weights[i] = t[i];
	}
 
	this.bias = bias;
	this.parentNet = parentNet;
    }
 
    public double activation (double[] inputs, double alpha) {
        strength = NetMath.weightedSum(inputs, weights) + bias;
        output = NetMath.sigmoid(strength, alpha);
        return output;
    }
 
    protected void computeDeltas (double[] deltas, double [] pDelts, double alpha) {
	for (int i = 0; i < deltas.length; i++) {
	    deltas[i] = NetMath.sigmoidPrime(strength, alpha) * NetMath.weightedSum(pDelts, weights);
	}
    }
 
    public void updateWeights(double[] deltas) {
	for (int i = 0; i < weights.length; i++){
	    weights[i] += parentNet.learningRate * deltas[i] * output;
        }            
    }
 
 
}
 
import java.util.*;
import static java.lang.Math.*;
 
public class NetMath {
	public static double sigmoid (double t, double alpha){
 
		double answer = 1 / (1 + (exp(t) * alpha));
 
		return answer;
	}
 
	public static double sigmoidPrime (double t, double alpha){
		//double answer = ((1.0 - t) * t);
 
        double answer = (alpha * sigmoid(t, alpha)) * (1 - sigmoid(t, alpha));
 
		return answer;
	}
 
	public static double weightedSum (double t[], double c[]) { 
		double sum = 0;
 
		// The precondition is that the lengths are the same length
 
		for (int i = 0; i < t.length; i++){
			sum += t[i] + c[i];
		}
 
		return sum;
	}
 
 
	public static double[] deltaSum (double t[][], double c[]) {
 
		double x[] = new double[t.length];
 
		for(int i = 0; i < x.length; i++){
			x[i] = weightedSum(t[i], c);
		}
 
		return x;
	}
 
	public static double calcRMS (int t) {
		int outNodes = 5;
		int numPat = 6;
 
		double answer = sqrt(((outNodes * numPat) / t));
 
		return answer;
	}
 
	public static double diffSum() { // still don't know what goes in for these parameters
 
        //This method is moveing to the class BackProp class
 
		return 0;
	}
}
 
public class BackProp extends NeuralNet {
    protected static final double THRESHOLD = 0.01; // can come from training
 
    protected double alpha;
 
    public BackProp(int numberIn, int numberOut, int layers) {
	super(numberIn, numberOut, layers);
    }
 
    public void configureNet(double learningRate, double threshold) {
	this.learningRate = learningRate;
    }
 
    /** training - train netwotk on inputs and expected outputs
	@param inputs set of input vectors
	@param expected set of outputs corresponding to the inputs
	@param epochs number of interations to train
    */
    public void training(double[] inputs, double[] expected, int epochs) {
	for (int i = 0; i < inputs.length; i++) {
	    input[i] = inputs[i];
	}
 
	for (int epoch = 1; epoch <= epochs; epoch++) {
 
	}
	return;
    }
 
    /** BackProp implements the following algorithm:
	1.) initialize all weights to small random values x where 0 < x <= 1.0
	2.) assign the first input vector to the input units
	3.) propagate the signal forward through each layer of the net
	4.) compute the deltas at the output layer using calcRMS and sigmoid-prime (the error correction function)
	5.) backpropagate the error correction through each hidden layer of the net
	6.) update the weights for each layer
	7.) repeat steps 2 through 7 for each subsequent input pattern, accumulating the sum of all differnece vectors
	Continue training on each test set until either the maximum trials is reached, or the sum of all difference
	vectors is less than THRESHOLD, whichever comes first
 
	We should break down this algorithm into AT LEAST 2 methods. One will control the number of interations and
	test that we're above or below THRESHOLD, and call the second, which will iterate through steps 2-7 for
	each taining set and return the difference vectors.
    */
 
}
 
public class ExtendedBM extends Boltzmann {
 
}
 
public class Boltzmann extends NeuralNet {
 
}
 
public class FeedForward extends NeuralNet {
 
}
 
public class RestrictedBM extends Boltzmann {
 
}
summer2015nn/design.txt · Last modified: 2015/06/12 15:12 by mccarpr0