OK, turing.

<- leave blank

Wed Oct 17 23:49:20 EDT 2018

ip=10.30.67.233 ipmask=255.255.254.0 ipgw=10.30.67.254
	sys=chiri
	dom=chiri.student.iastate.edu
	dns=129.186.1.200
	dns=129.186.142.200
	dns=129.186.140.200
	dns=129.186.78.200
	ntp=129.186.1.244
	ntp=129.186.87.244

ipbootinfo=
	fs=10.30.67.10
	auth=10.30.67.233


Wed Oct 17 23:46:14 EDT 2018
#
# files comprising the database, use as many as you like, see ndb(6)
#
database=
	file=/net/ndb
	file=/lib/ndb/local
	file=/lib/ndb/common

#
# entries defining the dns root.  these will be overridden by any
# authentic info obtained from the root.
#
dom=
	ns=A.ROOT-SERVERS.NET
	ns=B.ROOT-SERVERS.NET
	ns=C.ROOT-SERVERS.NET
	ns=D.ROOT-SERVERS.NET
	ns=E.ROOT-SERVERS.NET
	ns=F.ROOT-SERVERS.NET
	ns=G.ROOT-SERVERS.NET
	ns=H.ROOT-SERVERS.NET
	ns=I.ROOT-SERVERS.NET
	ns=J.ROOT-SERVERS.NET
	ns=K.ROOT-SERVERS.NET
	ns=L.ROOT-SERVERS.NET
	ns=M.ROOT-SERVERS.NET

#
# because the public demands the name localsource
#
ip=127.0.0.1 sys=localhost dom=localhost

# example: adjust to fit your network
#auth=cirno authdom=9front
#ipnet=9front ip=192.168.0.0 ipmask=255.255.255.0
# ipgw=192.168.0.1
# dns=192.168.0.1
# auth=cirno
# dnsdom=9front
# cpu=cirno
# smtp=cirno
#
#ip=192.168.0.99 sys=cirno dom=cirno.9front ether=112233445566


ipnet=zetsubou ip=10.0.0.0 ipmask=255.255.254.0
	auth=chiri
	cpu=chiri
	authdom=ufo
	fs=kiri

sys=chiri ether=00a0989e96e7

sys=kiri ether=00a09813037



Wed Oct 17 17:43:04 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
	void *user;
	void *internal;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
	void *user;
	void *internal;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
double activation_tanh(Neuron*);
double gradient_tanh(Neuron*);
double activation_leaky_relu(Neuron*);
double gradient_leaky_relu(Neuron*);

Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

typedef struct Adam Adam;

struct Adam {
	double rate;
	double beta1;
	double beta2;
	Weights **first;
	Weights **second;
	double epsilon;
	int timestep;
};

double anntrain_adam(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

double
activation_tanh(Neuron *in)
{
	return tanh(in->sum);
}

double
gradient_tanh(Neuron *in)
{
	return 1.0 - in->value*in->value;
}

double
activation_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return in->sum;
	return in->sum * 0.01;
}

double
gradient_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return 1.0;
	return 0.01;
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 0.2);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	return ret;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.25;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_leaky_relu,
		gradient_leaky_relu);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	}

	va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *D, *D2;

	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w + 1];
				sum = 0.0;
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
			}
		}

		D2 = D;
	}

	// update weights
	for (w = 0; w < ann->n-1; w++) {
		W = ann->weights[w];
		D = ann->deltas[w];

		for (i = 0; i <= W->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < W->outputs; o++) {
				W->values[i][o] += D->values[i][o] *
				ann->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

Ann*
adaminit(Ann *ann)
{
	int i;
	Adam *I = calloc(1, sizeof(Adam));

	I->rate = 0.001;
	I->beta1 = 0.75;
	I->beta2 = 0.9;
	I->epsilon = 10e-8;
	I->timestep = 0;
	I->first = calloc(ann->n-1, sizeof(Weights*));
	I->second = calloc(ann->n-1, sizeof(Weights*));

	for (i = 0; i < (ann->n-1); i++) {
		I->first[i] = weightscreate(ann->layers[i]->n,
		ann->layers[i+1]->n, 0);
		I->second[i] = weightscreate(ann->layers[i]->n,
		ann->layers[i+1]->n, 0);
	}

	ann->internal = I;

	return ann;
}

double
anntrain_adam(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum, m, v;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *D, *D2, *M, *V;
	Adam *annI;

	if (ann->internal == 0)
		adaminit(ann);
	annI = ann->internal;
	annI->timestep++;

	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];
		M = annI->first[w];
		V = annI->second[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w+1];
				sum = 0.0;
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
				M->values[i][o] *= annI->beta1;
				M->values[i][o] += (1.0 - annI->beta1) *
				D->values[i][o];
				V->values[i][o] *= annI->beta2;
				V->values[i][o] += (1.0 - annI->beta2) *
				D->values[i][o] * D->values[i][o];
			}
		}

		D2 = D;
	}

	// update weights
	for (w = 0; w < ann->n-1; w++) {
		W = ann->weights[w];
		M = annI->first[w];
		V = annI->second[w];

		for (i = 0; i <= W->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < W->outputs; o++) {
				m = M->values[i][o] / (1.0 -
				pow(annI->beta1, annI->timestep));
				v = V->values[i][o] / (1.0 -
				pow(annI->beta2, annI->timestep));
				W->values[i][o] += (m / (sqrt(v) +
				annI->epsilon)) * annI->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main()
{
	int i, counter = 0;
	Ann *test = anncreate(3, 2, 16, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double error = 1000;

	printf("testing anntrain()\n");
	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);
		counter++;
		if (counter % 10000 == 1)
			printf("error: %f\n", error);
	}
	printf("error: %f, done after %d epochs\n", error, counter);

	error = 1000;
	counter = 0;
	for (i = test->n-2; i >= 0; i--)
		weightsinitrand(test->weights[i]);

	printf("testing anntrain_adam()\n");
	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain_adam(test, inputs[i], &outputs[i]);
		counter++;
		if (counter % 10000 == 1)
			printf("error: %f\n", error);
	}
	printf("error: %f, done after %d epochs\n", error, counter);
}


Wed Oct 17 14:10:21 EDT 2018
In Praise of Electronically Monitoring Employees

Wed Oct 17 13:45:39 EDT 2018
in function didtype of sdiahci.c

; hg diff sys/src/9/pc/sdiahci.c
diff -r d6cb2bd03583 sys/src/9/pc/sdiahci.c
--- a/sys/src/9/pc/sdiahci.c Sun Sep 09 15:38:53 2018 +0200
+++ b/sys/src/9/pc/sdiahci.c Wed Oct 17 10:37:48 2018 -0700

@@ -2087,9 +2092,14 @@
			return Tich; /* pch */
		break;
	case 0x1002:
+ print("sdiahci didtype p->vid %hux, p->did %hux, p->ccru %ux,
p->ccrp %ux\n", p->vid, p->did, p->ccru, p->ccrp);
		if(p->ccru == 1 || p->ccrp != 1)
- if(p->did == 0x4380 || p->did == 0x4390)
- sbsetupahci(p);
+ if(p->did == 0x4380 || p->did == 0x4390)
+ sbsetupahci(p);
+ if(p->did == 0x4391 || p->did == 0x4394) {
+ type = Tahci;
+ break;
+ }
		type = Tsb600;
		break;
	case 0x1106:


Wed Oct 17 12:05:24 EDT 2018
in function didtype of 9/pc/sdiahci.c
	case 0x1002:
		if(p->ccru == 1 || p->ccrp != 1)
		if(p->did == 0x4380 || p->did == 0x4390)
			sbsetupahci(p);
		type = Tsb600;
		break;

changed to
	case 0x1002:
		if(p->ccru == 1 || p->ccrp != 1)
		if(p->did == 0x4380 || p->did == 0x4390) {
			sbsetupahci(p);
			type = Tsb600;
		} else if (p->did == 0x4391) {
			sbsetupahci(p);
			type = Tahci;
		}
		break;

as using Tsb600 sets the ahci maximum in function ahcibio to 255.  That setting is
specific to sb600 SATA controllers (from linux src code) and not to the sb700 SATA
controllers.

My SATA controller is and the 8192 works fine
0.17.0: disk 01.06.01 1002/4391 11 0:0000b001 16 1:0000a001 16 2:00009001 16
3:00008001 16 4:00007001 16 5:fdcffc00 1024
	Advanced Micro Devices, Inc.  [AMD/ATI] SB7x0/SB8x0/SB9x0 SATA Controller
	[AHCI mode]


Wed Oct 17 11:30:46 EDT 2018
diff --git a/rc/bin/man b/rc/bin/man
--- a/rc/bin/man
+++ b/rc/bin/man
@@ -57,6 +57,7 @@ fn page {


 search=yes
+fpath=no
 while(~ $d 0) {
	if(~ $#* 0) {
		echo 'Usage: man [-bntpPSw] [0-9] [0-9] ...  name1 name2 ...'
		>[1=2]
@@ -69,6 +70,7 @@ while(~ $d 0) {
	if not
		switch($1) {
		case -b ; cmd=b ; shift
+ case -f ; fpath=yes ; shift
		case -n ; cmd=n ; shift
		case -P ; cmd=P ; shift
		case -p ; cmd=p ; shift
@@ -85,7 +87,7 @@ ix=$S/$sec/INDEX
 if(~ $#* 1) pat='^'^$1^' '
 if not pat='^('^`{echo $* | sed 's/ /|/g'}^') '
 fils=()
-if(~ $search yes)
+if(~ $search yes && ~ $fpath no)
 for(i in $S/$sec){
	if(/bin/test -f $i/INDEX){
		try=`{grep -i $pat $i/INDEX | sed 's/^[^ ]* //' | sort -u}
@@ -94,7 +96,7 @@ for(i in $S/$sec){
	}
 }
 # bug: should also do following loop if not all pages found
-if(~ $#fils 0) {
+if(~ $#fils 0 && ~ $fpath no) {
	# nothing in INDEX.  try for file of given name
	for(i) {
		if(~ $i intro) i=0intro
@@ -104,10 +106,14 @@ if(~ $#fils 0) {
				fils=($fils $try)
		}
	}
- if(~ $#fils 0) {
- echo 'man: no manual page' >[1=2]
- exit 'no man'
- }
+}
+if(~ $fpath yes)
+ for(i)
+ if(/bin/test -f $i)
+ fils=($fils $i)
+if(~ $#fils 0) {
+ echo 'man: no manual page' >[1=2]
+ exit 'no man'
 }
 switch($cmd) {
 case p; out=proof
diff --git a/sys/man/1/man b/sys/man/1/man
--- a/sys/man/1/man
+++ b/sys/man/1/man
@@ -4,7 +4,7 @@ man, lookman, sig \- print or find pages
 .SH SYNOPSIS
 .B man
 [
-.B -bnpPStw
+.B -bfnpPStw
 ]
 [
 .I section ...
@@ -66,6 +66,12 @@ on the specified man pages.
 Do not search the manual indices for the names.
 Only print pages whose file names match the names.
 .TP
+.B -f
+Instead of looking for
+.I title
+in the manual treat it as a path to the man
+page source file.
+.TP
 .B -t
 Run
 .IR troff (1)


Wed Oct 17 06:33:18 EDT 2018
interface Groupoid g where
	(<+>) : g -> g -> g

IsLeftUnit : Groupoid g => g -> Type
IsLeftUnit s = (e : g) -> (s <+> e = e)

IsRightUnit : Groupoid g => g -> Type
IsRightUnit s = (e : g) -> (e <+> s = e)

IsUnit : Groupoid g => g ->Type
IsUnit s = (IsRightUnit s, IsLeftUnit s)

[GNatWithAdd] Groupoid Nat where
	(<+>) = (+)

interface Groupoid g => Semigroup g where
	isAssociative : (a,b,c : g) -> (a <+> (b <+> c)) = ((a
	<+> b) <+> c)

ZeroIsRightUnit : IsRightUnit @{GNatWithAdd} Z
ZeroIsRightUnit = plusZeroRightNeutral

ZeroIsLeftUnit : IsLeftUnit @{GNatWithAdd} Z
ZeroIsLeftUnit = plusZeroLeftNeutral

ZeroIsUnit : IsUnit @{GNatWithAdd} Z
ZeroIsUnit = (ZeroIsRightUnit, ZeroIsLeftUnit)

[SGNatWithAdd] Main.Semigroup Nat @{GNatWithAdd} where
	isAssociative = plusAssociative

interface Main.Semigroup g => UnitSemigroup g where
	hasUnit : DPair g $ \s => IsUnit s


Tue Oct 16 23:17:49 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	double beta1;
	double beta2;
	double epsilon;
	int timestep;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
	Weights **first;
	Weights **second;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
double activation_tanh(Neuron*);
double gradient_tanh(Neuron*);
double activation_leaky_relu(Neuron*);
double gradient_leaky_relu(Neuron*);

Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

double
activation_tanh(Neuron *in)
{
	return tanh(in->sum);
}

double
gradient_tanh(Neuron *in)
{
	return 1.0 - in->value*in->value;
}

double
activation_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return in->sum;
	return in->sum * 0.01;
}

double
gradient_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return 1.0;
	return 0.01;
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 1.0);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	else
		weightsinitdouble(ret, 0.0);
	return ret;
}

Ann*
adaminit(Ann *ret)
{
	int i;

	ret->beta1 = 0.9;
	ret->beta2 = 0.999;
	ret->epsilon = 10e-8;
	ret->first = calloc(ret->n-1, sizeof(Weights*));
	ret->second = calloc(ret->n-1, sizeof(Weights*));

	for (i = 0; i < (ret->n-1); i++) {
		ret->first[i] = weightscreate(ret->layers[i]->n,
		ret->layers[i+1]->n, 0);
		ret->second[i] = weightscreate(ret->layers[i]->n,
		ret->layers[i+1]->n, 0);
	}

	ret->rate = 0.001;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.25;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));
	ret->timestep = 0;
	ret->first = NULL;

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_leaky_relu,
		gradient_leaky_relu);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	}
	va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *W2, *W3, *D, *D2;
	double m, v;

	ann->timestep++;

	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];
		if (ann->first != NULL) {
			W = ann->first[w];
			W2 = ann->second[w];
		}

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W3 = ann->weights[w + 1];
				sum = 0.0;
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W3->values[o][n];
			}
			if (ann->first != NULL) {
				for (i = 0; i <= ann->layers[w]->n; i++)
				{
					D->values[i][o] *= acc * sum;
					W->values[i][o] *= ann->beta1;
					W->values[i][o] += (1.0 -
					ann->beta1) * D->values[i][o];
					W2->values[i][o] *= ann->beta2;
					W2->values[i][o] += (1.0 -
					ann->beta2) * D->values[i][o] *
					D->values[i][o];
				}
			} else {
				for (i = 0; i <= ann->layers[w]->n; i++)
					D->values[i][o] *= acc * sum;
			}
		}

		D2 = D;
	}

	// update weights
	for (w = 0; w < ann->n-1; w++) {
		D = ann->deltas[w];
		D2 = ann->weights[w];
		if (ann->first != NULL) {
			W = ann->first[w];
			W2 = ann->second[w];
		}

		for (i = 0; i <= D2->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			if (ann->first != NULL) {
				for (o = 0; o < D2->outputs; o++) {
					m = W->values[i][o] / (1.0 -
					pow(ann->beta1, ann->timestep));
					v = W2->values[i][o] / (1.0 -
					pow(ann->beta2, ann->timestep));
					D2->values[i][o] += ann->rate * (m /
					(sqrt(v) + ann->epsilon)) *
					I->value;
				}
			} else {
				for (o = 0; o < D2->outputs; o++)
					D2->values[i][o] += D->values[i][o]
					* ann->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main(int argc, char **argv)
{
	int i, counter = 0;;
	Ann *test = anncreate(3, 2, 16, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double *results;
	double error = 1000;

// test->layers[test->n-1]->neurons[0]->activation =
activation_sigmoid;
// test->layers[test->n-1]->neurons[0]->gradient = gradient_sigmoid;
	if (argc == 2 && argv[1][0] == '-' && argv[1][1] == 'a')
		adaminit(test);

	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);

		counter++;
		if (counter % 100 == 1)
			printf("error: %f\n", error);
	}

	printf("error: %f, done after %d epochs\n", error, counter);

	return 0;
}




Tue Oct 16 20:06:08 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <omp.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	double beta1;
	double beta2;
	double epsilon;
	int timestep;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
	Weights **first;
	Weights **second;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
double activation_tanh(Neuron*);
double gradient_tanh(Neuron*);
double activation_leaky_relu(Neuron*);
double gradient_leaky_relu(Neuron*);

Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

double
activation_tanh(Neuron *in)
{
	return tanh(in->sum);
}

double
gradient_tanh(Neuron *in)
{
	return 1.0 - in->value*in->value;
}

double
activation_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return in->sum;
	return in->sum * 0.01;
}

double
gradient_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return 1.0;
	return 0.01;
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 4.0);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	else
		weightsinitdouble(ret, 0.0);
	return ret;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.00001;
	ret->beta1 = 0.9;
	ret->beta2 = 0.999;
	ret->epsilon = 10e-8;
	ret->timestep = 0;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));
	ret->first = calloc(num_layers-1, sizeof(Weights*));
	ret->second = calloc(num_layers-1, sizeof(Weights*));

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_leaky_relu,
		gradient_leaky_relu);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
			ret->first[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
			ret->second[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	} va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	#pragma omp parallel for shared(ann) private(i)
	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			#pragma omp parallel for shared(ann) private(i)
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	#pragma omp parallel for shared(ret,ann) private(o)
	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *W2, *D, *D2;
	double m, v;

	ann->timestep++;

	#pragma omp parallel for shared(error) private(o)
	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	#pragma omp parallel for shared(ann) private(i)
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w + 1];
				sum = 0.0;
				#pragma omp parallel for shared(D2,W,sum)
				private(n)
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			#pragma omp parallel for shared(D) private(i)
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
			}
			W = ann->first[w];
			W2 = ann->second[w];
			for (i = 0; i < W->inputs; i++) {
				W->values[i][o] *= ann->beta1;
				W->values[i][o] += (1.0 - ann->beta1) *
				D->values[i][o];
				W2->values[i][o] *= ann->beta2;
				W2->values[i][o] += (1.0 - ann->beta2) *
				D->values[i][o] * D->values[i][o];
			}
		}

		D2 = D;
	}

	// update weights
	#pragma omp parallel for shared(ann) private(w,W,D,i,o,I)
	for (w = 0; w < ann->n-1; w++) {
		D2 = ann->weights[w];
		D = ann->deltas[w];
		W = ann->first[w];
		W2 = ann->second[w];

		for (i = 0; i <= D2->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < D2->outputs; o++) {
				m = W->values[i][o] / (1.0 - pow(ann->beta1,
				ann->timestep));
				v = W2->values[i][o] / (1.0 -
				pow(ann->beta2, ann->timestep));
				D2->values[i][o] += ann->rate * (m /
				(sqrt(v) + ann->epsilon)) * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main()
{
	int i, counter = 0;;
	Ann *test = anncreate(3, 2, 4, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double *results;
	double error = 1000;

// test->layers[test->n-1]->neurons[0]->activation =
activation_sigmoid;
// test->layers[test->n-1]->neurons[0]->gradient = gradient_sigmoid;

	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);

		counter++;
		if (counter % 100 == 1)
			printf("error: %f\n", error);
	}

	printf("error: %f, done after %d epochs\n", error, counter);

	return 0;
}




Tue Oct 16 19:47:08 EDT 2018
#include <u.h>
#include <libc.h>

#define width 49
#define b1 1
#define b2 2
#define b3 4

/* Tracks which mouse buttons are pressed at a given time */
void
main()
{
	int fd = open("/dev/mouse", OREAD);
	char last = 'x';
	for(;;){
		// see: mouse(3)
		char buf[width];
		read(fd, buf, width);
		char button = buf[35];
		if(button != last){
			switch(button){
			case '0'+b1:
				print("\Left button pressed.\n");
				break;
			case '0'+b2:
				print("Middle button pressed.\n");
				break;
			case '0'+b3:
				print("Right button pressed.\n");
				break;
			case '0'+(b1|b2):
				print("Left and Middle buttons pressed.\n");
				break;
			case '0'+(b1|b3):
				print("Left and Right buttons pressed.\n");
				break;
			case '0'+(b2|b3):
				print("Middle and Right buttons pressed.\n");
				break;
			case '0'+(b1|b2|b3):
				print("All buttons pressed.\n");
				break;
			case '0':
				print("No buttons pressed.\n");
				break;
			}
			last = button;
		}
	}
}


next