Skip to content

Commit

Permalink
Initial Release
Browse files Browse the repository at this point in the history
Initial Release — with bugs
  • Loading branch information
suquark committed Jan 23, 2017
1 parent 7e20866 commit 006c3f6
Show file tree
Hide file tree
Showing 22 changed files with 2,232 additions and 0 deletions.
21 changes: 21 additions & 0 deletions Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Modern ConvNetJS

It is a project trying to reimplement [ConvNetJS](https://github.com/karpathy/convnetjs) with modern styles (ECMAScript6, functional programming), making it's code shorter, more readable for beginners and easier to extend.

It's sure that we should never expect a neural network training in the browser doing a big deal, but it's useful for presentation and understanding.

It is still under developing.

Following is a short introduction to ConvNetJS itself:

ConvNetJS is a Javascript implementation of Neural networks, together with nice browser-based demos. It currently supports:

- Common **Neural Network modules** (fully connected layers, non-linearities)
- Classification (SVM/Softmax) and Regression (L2) **cost functions**
- Ability to specify and train **Convolutional Networks** that process images
- An experimental **Reinforcement Learning** module, based on Deep Q Learning

For much more information, see the main page at [convnetjs.com](http://convnetjs.com)

## License
MIT
64 changes: 64 additions & 0 deletions cnnutil.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@

// contains various utility functions
var cnnutil = (function(exports){

// a window stores _size_ number of values
// and returns averages. Useful for keeping running
// track of validation or training accuracy during SGD
var Window = function(size, minsize) {
this.v = [];
this.size = typeof(size)==='undefined' ? 100 : size;
this.minsize = typeof(minsize)==='undefined' ? 20 : minsize;
this.sum = 0;
}
Window.prototype = {
add: function(x) {
this.v.push(x);
this.sum += x;
if(this.v.length>this.size) {
var xold = this.v.shift();
this.sum -= xold;
}
},
get_average: function() {
if(this.v.length < this.minsize) return -1;
else return this.sum/this.v.length;
},
reset: function(x) {
this.v = [];
this.sum = 0;
}
}

// returns min, max and indeces of an array
var maxmin = function(w) {
if(w.length === 0) { return {}; } // ... ;s

var maxv = w[0];
var minv = w[0];
var maxi = 0;
var mini = 0;
for(var i=1;i<w.length;i++) {
if(w[i] > maxv) { maxv = w[i]; maxi = i; }
if(w[i] < minv) { minv = w[i]; mini = i; }
}
return {maxi: maxi, maxv: maxv, mini: mini, minv: minv, dv:maxv-minv};
}

// returns string representation of float
// but truncated to length of d digits
var f2t = function(x, d) {
if(typeof(d)==='undefined') { var d = 5; }
var dd = 1.0 * Math.pow(10, d);
return '' + Math.floor(x*dd)/dd;
}

exports = exports || {};
exports.Window = Window;
exports.maxmin = maxmin;
exports.f2t = f2t;
return exports;

})(typeof module != 'undefined' && module.exports); // add exports to module.exports if in node.js


138 changes: 138 additions & 0 deletions convnet.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
import { Vol } from 'vol';
import { assert, indexOfMax } from 'util';
import { get_layer } from 'layers/layer_export'

// Net manages a set of layers
// For now constraints: Simple linear order of layers, first layer input last layer a cost layer

class Net {
constructor(options) {
this.layers = [];
}
// takes a list of layer definitions and creates the network layer objects
makeLayers(defs) {
// few checks
assert(defs.length >= 2, 'Error! At least one input layer and one loss layer are required.');
assert(defs[0].type === 'input', 'Error! First layer must be the input layer, to declare size of inputs');

// desugar layer_defs for adding activation, dropout layers etc

var new_defs = [];
defs.forEach(function(def) {
if (def.type==='softmax' || def.type==='svm') {
// add an fc layer here, there is no reason the user should
// have to worry about this and we almost always want to
new_defs.push({type:'fc', num_neurons: def.num_classes});
}

if (def.type==='regression') {
// add an fc layer here, there is no reason the user should
// have to worry about this and we almost always want to
new_defs.push({type:'fc', num_neurons: def.num_neurons});
}

if((def.type==='fc' || def.type==='conv') && typeof(def.bias_pref) === 'undefined') {
def.bias_pref = 0.0;
if(typeof def.activation !== 'undefined' && def.activation === 'relu') {
def.bias_pref = 0.1; // relus like a bit of positive bias to get gradients early
// otherwise it's technically possible that a relu unit will never turn on (by chance)
// and will never get any gradient and never contribute any computation. Dead relu.
}
}
new_defs.push(def);
if (typeof def.activation !== 'undefined') {
if (def.activation==='relu') { new_defs.push({type:'relu'}); }
else if (def.activation==='sigmoid') { new_defs.push({type:'sigmoid'}); }
else if (def.activation==='tanh') { new_defs.push({type:'tanh'}); }
else if (def.activation==='maxout') {
// create maxout activation, and pass along group size, if provided
new_defs.push({type:'maxout', group_size: def.group_size || 2});
}
else { console.log('ERROR unsupported activation ' + def.activation); }
}
if (typeof def.drop_prob !== 'undefined' && def.type !== 'dropout') {
new_defs.push({type:'dropout', drop_prob: def.drop_prob});
}
});

defs = new_defs;

// create the layers
this.layers = [];
for(var i = 0; i < defs.length; i++) {
let def = defs[i];
if (i > 0) {
let prev = this.layers[i - 1];
def.in_sx = prev.out_sx;
def.in_sy = prev.out_sy;
def.in_depth = prev.out_depth;
}
this.layers.push(get_layer(def));
}
}

// forward prop the network.
// The trainer class passes is_training = true, but when this function is
// called from outside (not from the trainer), it defaults to prediction mode
forward(V, is_training=false) {
return this.layers.reduce((input, layer) => layer.forward(input, is_training), V);
}
// let act = this.layers[0].forward(V, is_training);
// for(let i = 1; i < this.layers.length; i++) {
// act = this.layers[i].forward(act, is_training);
// }
// return act;


getCostLoss(V, y) {
this.forward(V, false);
let loss = outputLayer().backward(y);
return loss;
}

// backprop: compute gradients wrt all parameters
backward(y) {
// reduceRight
var N = this.layers.length;
var loss = this.layers[N-1].backward(y); // last layer assumed to be loss layer
for(var i = N - 2; i >= 0; i--) { // first layer assumed input
this.layers[i].backward();
}
return loss;
}

getParamsAndGrads() {
// accumulate parameters and gradients for the entire network
return this.layers.reduce((acc, cur) => acc.concat(cur.getParamsAndGrads()), []);
// var response = [];
// this.layers.forEach(function(l) {
// Array.prototype.push.apply(response, this.layers[i].getParamsAndGrads()); // concat them
// });
// return response;
}

getPrediction() {
// this is a convenience function for returning the argmax
// prediction, assuming the last layer of the net is a softmax
let S = outputLayer();
assert(S.layer_type === 'softmax', 'getPrediction function assumes softmax as last layer of the net!');
// return index of the class with highest class probability
let p = S.out_act.w;
return indexOfMax(p);
}

compile(options) { this.layers.forEach(function(l) { l.compile(options); }); }

toJSON() { return {'layers': this.layers.map(x => x.toJSON())}; }

fromJSON(json) {
// NOTE: .fromJSON(Lj) returns undefined, so we take the risk of using JSON as opts
this.layers = json.layers.map(Lj => get_layer(Lj));
}

outputLayer() { return this.layers[this.layers.length - 1]; }

}


export { Net };
Loading

0 comments on commit 006c3f6

Please sign in to comment.