OK, turing.

<- leave blank

Mon Oct 15 23:27:52 EDT 2018

Even a scapegoat is liable to butt.

Mon Oct 15 10:13:43 EDT 2018
More than a dozen F-22s were left behind as Hurricane Michael bore down on the
base Oct.  10.  Now, in Michael's wake, many of those are damaged, and some beyond
repair, at a cost of more than $1 billion, Air Force officials said.

Sun Oct 14 19:32:56 EDT 2018
--- /sys/src/ants/grio/data.c Thu Mar 1 03:11:57 2018
+++ ./data.c Thu Oct 11 13:51:41 2018
@@ -7,6 +7,7 @@
 #include <keyboard.h>
 #include <frame.h>
 #include <fcall.h>
+#include <theme.h>
 #include "dat.h"
 #include "fns.h"

@@ -189,22 +190,23 @@
 {
	int fd;
	Image *bimg = nil;
+ themeinit("rio");

- background = allocimage(display, Rect(0,0,1,1), RGB24, 1, bgtrans);
+ background = allocimage(display, Rect(0,0,1,1), RGB24, 1, themeget("background",
bgtrans));

	/* greys are multiples of 0x11111100+0xFF, 14* being palest */
- cols[BACK] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, winbgcolor^reverse);
- cols[BORD] = allocimage(display, Rect(0,0,1,1), CMAP8, 1,
scrollbarcolor^reverse);
- cols[TEXT] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, textcolor^reverse);
- cols[HTEXT] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, 0x000000FF);
+ cols[BACK] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("termback",
winbgcolor^reverse));
+ cols[BORD] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("termbord",
scrollbarcolor^reverse));
+ cols[TEXT] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("termtext",
textcolor^reverse));
+ cols[HTEXT] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("termhtext",
0x000000FF));
	if(!reverse) {
- cols[HIGH] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, 0xCCCCCCFF);
- titlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, borderactivecolor);
- lighttitlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, borderbgcolor);
+ cols[HIGH] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("termhigh",
0xCCCCCCFF));
+ titlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("title",
borderactivecolor));
+ lighttitlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1,
themeget("lighttitle", borderbgcolor));
	} else {
- cols[HIGH] = allocimage(display, Rect(0,0,1,1), CMAP8, 1, DPurpleblue);
- titlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, borderactivecolor);
- lighttitlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, borderbgcolor);
+ cols[HIGH] = allocimage(display, Rect(0,0,1,1), CMAP8, 1,themeget("termhigh",
DPurpleblue));
+ titlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("title",
borderactivecolor));
+ lighttitlecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1,
themeget("lighttitle", borderbgcolor));
	}
	if(bf != nil){
		fd = open(bf, OREAD);
@@ -218,14 +220,16 @@
		background = allocimage(display, Rect(0, 0, Dx(bimg->r),
		Dy(bimg->r)), RGB24, 1, 0xFFFFFFFF);
		draw(background, background->r, bimg, 0, bimg->r.min);
	}
- dholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, DMedblue);
- lightholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, DGreyblue);
- paleholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, DPalegreyblue);
- paletextcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, 0x666666FF^reverse);
- sizecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, DRed);
+ dholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("hold",
DMedblue));
+ lightholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1,
themeget("lighthold", DGreyblue));
+ paleholdcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("palehold",
DPalegreyblue));
+ paletextcol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("paletext",
0x666666FF^reverse));
+ sizecol = allocimage(display, Rect(0,0,1,1), CMAP8, 1, themeget("size", DRed));

	if(reverse == 0)
		holdcol = dholdcol;
	else
		holdcol = paleholdcol;
+
+ themeend();
 }
--- /sys/src/ants/grio/mkfile Fri Jan 5 14:57:30 2018
+++ ./mkfile Wed Oct 10 00:31:57 2018
@@ -24,7 +24,8 @@
 </sys/src/cmd/mkone

 $O.out: /$objtype/lib/libdraw.a /$objtype/lib/libframe.a \
- /$objtype/lib/libthread.a /$objtype/lib/libplumb.a /$objtype/lib/libc.a
+ /$objtype/lib/libthread.a /$objtype/lib/libplumb.a \
+ /$objtype/lib/libtheme.a /$objtype/lib/libc.a
 syms:V:
	$CC -a $CFLAGS rio.c > syms
	$CC -aa $CFLAGS *.c >>syms


Sat Oct 13 18:28:13 EDT 2018
sam /lib/glass
assert failed: (p.flags & 1) != 0
truetypefs 225659: suicide: sys: trap: fault read addr=0x0 pc=0x204072
getsubfont: can't read /n/ttf/cour.ttf.16/s.1ea0-1ef9: mount rpc error
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel
getsubfont: can't open /n/ttf/cour.ttf.16/s.1ea0-1ef9: '/n/ttf/cour.ttf.16' i/o on
hungup channel

Sat Oct 13 13:03:42 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <omp.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
double activation_tanh(Neuron*);
double gradient_tanh(Neuron*);
double activation_leaky_relu(Neuron*);
double gradient_leaky_relu(Neuron*);

Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

double
activation_tanh(Neuron *in)
{
	return tanh(in->sum);
}

double
gradient_tanh(Neuron *in)
{
	return 1.0 - in->value*in->value;
}

double
activation_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return in->sum;
	return in->sum * 0.01;
}

double
gradient_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return 1.0;
	return 0.01;
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 1.0);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	else
		weightsinitdouble(ret, 1.0);
	return ret;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.25;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_leaky_relu,
		gradient_leaky_relu);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	} va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	#pragma omp parallel for shared(ann) private(i)
	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			#pragma omp parallel for shared(ann) private(i)
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	#pragma omp parallel for shared(ret,ann) private(o)
	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *D, *D2;

	#pragma omp parallel for shared(error) private(o)
	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	#pragma omp parallel for shared(ann) private(i)
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w + 1];
				sum = 0.0;
				#pragma omp parallel for shared(D2,W,sum)
				private(n)
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			#pragma omp parallel for shared(D) private(i)
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
			}
		}

		D2 = D;
	}

	// update weights
	#pragma omp parallel for shared(ann) private(w,W,D,i,o,I)
	for (w = 0; w < ann->n-1; w++) {
		W = ann->weights[w];
		D = ann->deltas[w];

		for (i = 0; i <= W->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < W->outputs; o++) {
				W->values[i][o] += D->values[i][o] *
				ann->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main()
{
	int i, counter = 0;;
	Ann *test = anncreate(3, 2, 16, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double *results;
	double error = 1000;

	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);

		counter++;
		if (counter % 100 == 1)
			printf("error: %f\n", error);
	}

	printf("error: %f, done after %d epochs\n", error, counter);

	return 0;
}



Sat Oct 13 11:54:54 EDT 2018
UPDATE: After publication, Marvel contacted us with a “no comment.”

Sat Oct 13 11:48:24 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
double activation_tanh(Neuron*);
double gradient_tanh(Neuron*);
double activation_leaky_relu(Neuron*);
double gradient_leaky_relu(Neuron*);

Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

double
activation_tanh(Neuron *in)
{
	return tanh(in->sum);
}

double
gradient_tanh(Neuron *in)
{
	return 1.0 - in->value*in->value;
}

double
activation_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return in->sum;
	return in->sum * 0.01;
}

double
gradient_leaky_relu(Neuron *in)
{
	if (in->sum > 0)
		return 1.0;
	return 0.01;
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 1.0);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	else
		weightsinitdouble(ret, 1.0);
	return ret;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.25;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_leaky_relu,
		gradient_leaky_relu);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	} va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *D, *D2;

	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w + 1];
				sum = 0.0;
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
			}
		}

		D2 = D;
	}

	// update weights
	for (w = 0; w < ann->n-1; w++) {
		W = ann->weights[w];
		D = ann->deltas[w];

		for (i = 0; i <= W->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < W->outputs; o++) {
				W->values[i][o] += D->values[i][o] *
				ann->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main()
{
	int i, counter = 0;;
	Ann *test = anncreate(3, 2, 16, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double *results;
	double error = 1000;

	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);

		counter++;
		if (counter % 100 == 1)
			printf("error: %f\n", error);
	}

	printf("error: %f, done after %d epochs\n", error, counter);

	return 0;
}



Sat Oct 13 11:44:44 EDT 2018
Date: Sat, 13 Oct 2018 09:59:29 -0600
From: <uriel@cat-v.org>
To: <uriel@cat-v.org>
Subject: Your Account Was Hacked!

8Hello!
I'm a member of an international hacker group.

As you could probably have guessed, your account uriel@cat-v.org was hacked,
I sent message you from your account (please look on "from address").

Now I have access to all your accounts!

Within a period from July 30, 2018 to October 9, 2018, you were infected by the
virus we've created, through an adult website you've visited.
So far, we have access to your messages, social media accounts, and messengers.
Moreover, we've gotten full damps of these data.

We are aware of your little and big secrets...yeah, you do have them.  We saw and
recorded your doings on porn websites.  Your tastes are so weird, you know..

But the key thing is that sometimes we recorded you with your webcam, syncing the
recordings with what you watched!
I think you are not interested show this video to your friends, relatives, and
your intimate one...

Transfer $450 to our Bitcoin wallet: 1MN7A7QqQaAVoxV4zdjdrnEHXmjhzcQ4Bq
If you don't know about Bitcoin please input in Google "buy BTC".  It's really
easy.

I guarantee that after that, we'll erase all your "data" :)

A timer will start once you read this message.  You have 48 hours to pay the
above-mentioned amount.

Your data will be erased once the money are transferred.
If they are not, all your messages and videos recorded will be automatically sent
to all your contacts found on your devices at the moment of infection.

You should always think about your security.
We hope this case will teach you to keep secrets.
Take care of yourself.




Sat Oct 13 11:13:49 EDT 2018
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <math.h>
#include <time.h>

typedef struct Ann Ann;
typedef struct Layer Layer;
typedef struct Neuron Neuron;
typedef struct Weights Weights;

struct Ann {
	int n;
	double rate;
	Layer **layers;
	Weights **weights;
	Weights **deltas;
};

struct Layer {
	int n;
	Neuron **neurons;
};

struct Neuron {
	double (*activation)(Neuron*);
	double (*gradient)(Neuron*);
	double steepness;
	double value;
	double sum;
};

struct Weights {
	int inputs;
	int outputs;
	double **values;
};

double activation_sigmoid(Neuron*);
double gradient_sigmoid(Neuron*);
Ann *anncreate(int, ...);
Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
Weights *weightsinitrand(Weights*);
Weights *weightsinitrandscale(Weights*, double);
Weights *weightsinitdouble(Weights*, double);
Weights *weightsinitdoubles(Weights*, double*);
Weights *weightscreate(int, int, int);
double *annrun(Ann*, double*);
double anntrain(Ann*, double*, double*);

double
activation_sigmoid(Neuron *in)
{
	return 1.0/(1.0+exp(-in->sum));
}

double
gradient_sigmoid(Neuron *in)
{
	double y = in->value;
	return y * (1.0 - y);
}

Weights*
weightsinitdoubles(Weights *in, double *init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init[o];

	return in;
}

Weights*
weightsinitdouble(Weights *in, double init)
{
	int i, o;

	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = init;

	return in;
}

Weights*
weightsinitrandscale(Weights *in, double scale)
{
	int i, o;

	srand(time(0));
	for (i = 0; i <= in->inputs; i++)
		for (o = 0; o < in->outputs; o++)
			in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) *
			scale;

	return in;
}

Weights*
weightsinitrand(Weights *in)
{
	weightsinitrandscale(in, 4.0);
	return in;
}

Neuron*
neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*),
double steepness)
{
	in->activation = activation;
	in->gradient = gradient;
	in->steepness = steepness;
	in->value = 1.0;
	in->sum = 0;
	return in;
}

Neuron*
neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double
steepness)
{
	Neuron *ret = calloc(1, sizeof(Neuron));
	neuroninit(ret, activation, gradient, steepness);
	return ret;
}

Layer*
layercreate(int num_neurons, double(*activation)(Neuron*),
double(*gradient)(Neuron*))
{
	Layer *ret = calloc(1, sizeof(Layer));
	int i;

	ret->n = num_neurons;
	ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
	for (i = 0; i <= ret->n; i++) {
		ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
	}
	return ret;
}

Weights*
weightscreate(int inputs, int outputs, int initialize)
{
	int i;
	Weights *ret = calloc(1, sizeof(Weights));
	ret->inputs = inputs;
	ret->outputs = outputs;
	ret->values = calloc(inputs+1, sizeof(double*));
	for (i = 0; i <= inputs; i++)
		ret->values[i] = calloc(outputs, sizeof(double));
	if (initialize)
		weightsinitrand(ret);
	else
		weightsinitdouble(ret, 1.0);
	return ret;
}

Ann*
anncreate(int num_layers, ...)
{
	Ann *ret = calloc(1, sizeof(Ann));
	va_list args;
	int arg;
	int i;

	va_start(args, num_layers);
	ret->n = num_layers;
	ret->rate = 0.25;
	ret->layers = calloc(num_layers, sizeof(Layer*));
	ret->weights = calloc(num_layers-1, sizeof(Weights*));
	ret->deltas = calloc(num_layers-1, sizeof(Weights*));

	for (i = 0; i < num_layers; i++) {
		arg = va_arg(args, int);
		if (arg < 0 || arg > 1000000)
			arg = 0;
		ret->layers[i] = layercreate(arg, activation_sigmoid,
		gradient_sigmoid);
		if (i > 0) {
			ret->weights[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 1);
			ret->deltas[i-1] =
			weightscreate(ret->layers[i-1]->n,
			ret->layers[i]->n, 0);
		}
	} va_end(args);

	return ret;
}

double*
annrun(Ann *ann, double *input)
{
	int l, i, o;
	int outputs = ann->layers[ann->n - 1]->n;
	double *ret = calloc(outputs, sizeof(double));
	Neuron *O;

	for (i = 0; i < ann->layers[0]->n; i++)
		ann->layers[0]->neurons[i]->value = input[i];

	for (l = 1; l < ann->n; l++) {
		for (o = 0; o < ann->layers[l]->n; o++) {
			O = ann->layers[l]->neurons[o];
			O->sum =
			ann->weights[l-1]->values[ann->weights[l-1]->inputs][o];
			// bias
			for (i = 0; i < ann->layers[l-1]->n; i++)
				O->sum +=
				ann->layers[l-1]->neurons[i]->value *
				ann->weights[l-1]->values[i][o];
			O->value = O->activation(O);
		}
	}

	for (o = 0; o < outputs; o++)
		ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;

	return ret;
}

double
anntrain(Ann *ann, double *inputs, double *outputs)
{
	double *error = annrun(ann, inputs);
	double ret = 0.0;
	int noutputs = ann->layers[ann->n-1]->n;
	double acc, sum;
	int o, i, w, n;
	Neuron *O, *I;
	Weights *W, *D, *D2;

	for (o = 0; o < noutputs; o++) {
		// error = outputs[o] - result
		error[o] -= outputs[o];
		error[o] = -error[o];
		ret += pow(error[o], 2.0) * 0.5;
	}
	D = ann->deltas[ann->n-2];
	weightsinitdoubles(D, error);
	for (i = 0; i < (ann->n-2); i++) {
		D = ann->deltas[i];
		weightsinitdouble(D, 1.0);
	}

	// backpropagate MSE
	D2 = ann->deltas[ann->n-2];
	for (w = ann->n-2; w >= 0; w--) {
		D = ann->deltas[w];

		for (o = 0; o < ann->layers[w+1]->n; o++) {
			O = ann->layers[w+1]->neurons[o];
			acc = O->gradient(O) * O->steepness;
			sum = 1.0;
			if (D2 != D) {
				W = ann->weights[w + 1];
				sum = 0.0;
				for (n = 0; n < D2->outputs; n++)
					sum += D2->values[o][n] *
					W->values[o][n];
			}
			for (i = 0; i <= ann->layers[w]->n; i++) {
				D->values[i][o] *= acc * sum;
			}
		}

		D2 = D;
	}

	// update weights
	for (w = 0; w < ann->n-1; w++) {
		W = ann->weights[w];
		D = ann->deltas[w];

		for (i = 0; i <= W->inputs; i++) {
			I = ann->layers[w]->neurons[i];
			for (o = 0; o < W->outputs; o++) {
				W->values[i][o] += D->values[i][o] *
				ann->rate * I->value;
			}
		}
	}

	free(error);
	return ret;
}

int
main()
{
	int i, counter = 0;;
	Ann *test = anncreate(3, 2, 16, 1);
	double inputs[4][2] = { { 1.0, 1.0 }, {1.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}};
	double outputs[4] = { 0.0, 1.0, 1.0, 0.0 };
	double *results;
	double error = 1000;

	test->rate = 4.0;

	while (error >= 0.001) {
		error = 0;
		for (i = 0; i < 4; i++)
			error += anntrain(test, inputs[i], &outputs[i]);

		counter++;
		if (counter % 100 == 1)
			printf("error: %f\n", error);
	}

	printf("error: %f, done after %d epochs\n", error, counter);

	return 0;
}


Sat Oct 13 11:00:02 EDT 2018
fgdfgdfgdfgdfg

Fri Oct 12 22:33:33 EDT 2018
diff -r 5b18d18ae709 sys/src/libdisk/proto.c
--- a/sys/src/libdisk/proto.c Sat Oct 13 00:07:46 2018 +0200
+++ b/sys/src/libdisk/proto.c Sat Oct 13 04:33:31 2018 +0200
@@ -64,6 +64,8 @@

 static int copyfile(Mkaux*, File*, Dir*, int);
 static void freefile(File*);
+static void freeoptptr(Opt*, void*);
+static char* getline(Mkaux*);
 static File* getfile(Mkaux*, File*);
 static char* getmode(Mkaux*, char*, ulong*);
 static char* getname(Mkaux*, char*, char**);
@@ -72,16 +74,11 @@
 static char* mkpath(Mkaux*, char*, char*);
 static void mktree(Mkaux*, File*, int);
 static void setname(Mkaux*, Name*, File*);
+static void setopt(Mkaux*, char*, char*);
 static void skipdir(Mkaux*);
 static void warn(Mkaux*, char *, ...);
 static void popopt(Mkaux *mkaux);

-//static void
-//mprint(char *new, char *old, Dir *d, void*)
-//{
-// print("%s %s %D\n", new, old, d);
-//}
-
 int
 rdproto(char *proto, char *root, Mkfsenum *mkenum, Mkfserr *mkerr, void *a)
 {
@@ -151,7 +148,7 @@
	int rec;

	child = getfile(mkaux, me);
- if(!child)
+ if(child == nil)
		return;
	if((child->elem[0] == '+' || child->elem[0] == '*') &&
	child->elem[1] == '\0'){
		rec = child->elem[0] == '+';
@@ -162,13 +159,13 @@
		freefile(child);
		child = getfile(mkaux, me);
	}
- while(child && mkaux->indent > level){
+ while(child != nil && mkaux->indent > level){
		if(mkfile(mkaux, child))
			domkfs(mkaux, child, mkaux->indent);
		freefile(child);
		child = getfile(mkaux, me);
	}
- if(child){
+ if(child != nil){
		freefile(child);
		Bseek(mkaux->b, -Blinelen(mkaux->b), 1);
		mkaux->lineno--;
@@ -198,15 +195,14 @@
					continue;
			}
			child.new = mkpath(mkaux, me->new, d[i].name);
- if(me->old)
+ if(me->old != nil)
				child.old = mkpath(mkaux, me->old, d[i].name);
			child.elem = d[i].name;
			setname(mkaux, &mkaux->oldfile, &child);
			if((!(d[i].mode&DMDIR) || rec) && copyfile(mkaux, &child,
			&d[i], 1) && rec)
				mktree(mkaux, &child, rec);
			free(child.new);
- if(child.old)
- free(child.old);
+ free(child.old);
		}
		free(d);
	}
@@ -281,11 +277,11 @@
	o = mkaux->opt;
	if(strcmp(f->uid, "-") != 0)
		d->uid = f->uid;
- else if(o && o->uid)
+ else if(o != nil && o->uid != nil)
		d->uid = o->uid;
	if(strcmp(f->gid, "-") != 0)
		d->gid = f->gid;
- else if(o && o->gid)
+ else if(o != nil && o->gid != nil)
		d->gid = o->gid;
	if(f->mode != ~0){
		if(permonly)
@@ -294,10 +290,10 @@
			warn(mkaux, "inconsistent mode for %s", f->new);
		else
			d->mode = f->mode;
- } else if(o && o->mask)
+ } else if(o != nil && o->mask)
		d->mode = (d->mode & ~o->mask) | (o->mode &
		o->mask);

- if(p = strrchr(f->new, '/'))
+ if((p = strrchr(f->new, '/')) != nil)
		d->name = p+1;
	else
		d->name = f->new;
@@ -415,28 +411,22 @@
	o = mkaux->opt;
	if(o == nil || mkaux->indent > o->level){
		o = emalloc(mkaux, sizeof(*o));
- if(o == nil)
- longjmp(mkaux->jmp, 1);
- if(mkaux->opt){
+ if(mkaux->opt != nil)
			*o = *mkaux->opt;
- if(o->uid)
- o->uid = estrdup(mkaux, o->uid);
- if(o->gid)
- o->gid = estrdup(mkaux, o->gid);
- }else
- memset(o, 0, sizeof(*o));
		o->level = mkaux->indent;
		o->prev = mkaux->opt;
		mkaux->opt = o;
	} else if(mkaux->indent < o->level)
		return;
	if(strcmp(key, "skip") == 0){
- o->skip = regcomp(val);
+ freeoptptr(o, &o->skip);
+ if((o->skip = regcomp(val)) == nil)
+ warn(mkaux, "bad regular expression %s", val);
	} else if(strcmp(key, "uid") == 0){
- free(o->uid);
+ freeoptptr(o, &o->uid);
		o->uid = *val ? estrdup(mkaux, val) : nil;
	} else if(strcmp(key, "gid") == 0){
- free(o->gid);
+ freeoptptr(o, &o->gid);
		o->gid = *val ? estrdup(mkaux, val) : nil;
	} else if(strcmp(key, "mode") == 0){
		if(!parsemode(val, &o->mask, &o->mode))
@@ -451,23 +441,37 @@
 {
	Opt *o;

- while(o = mkaux->opt){
+ while((o = mkaux->opt) != nil){
		if(o->level <= mkaux->indent)
			break;
		mkaux->opt = o->prev;
- free(o->uid);
- free(o->gid);
+ freeoptptr(o, &o->skip);
+ freeoptptr(o, &o->uid);
+ freeoptptr(o, &o->gid);
		free(o);
	}
 }

 static void
+freeoptptr(Opt *o, void *p)
+{
+ int x = (void**)p - (void**)o;
+ void *v = ((void**)o)[x];
+ if(v == nil)
+ return;
+ ((void**)o)[x] = nil;
+ if((o = o->prev) != nil)
+ if(((void**)o)[x] == v)
+ return;
+ free(v);
+}
+
+
+static void
 freefile(File *f)
 {
- if(f->old)
- free(f->old);
- if(f->new)
- free(f->new);
+ free(f->old);
+ free(f->new);
	free(f);
 }

@@ -478,27 +482,10 @@
 static void
 skipdir(Mkaux *mkaux)
 {
- char *p, c;
	int level;

- if(mkaux->indent < 0)
- return;
	level = mkaux->indent;
- for(;;){
- mkaux->indent = 0;
- p = Brdline(mkaux->b, '\n');
- mkaux->lineno++;
- if(!p){
- mkaux->indent = -1;
- return;
- }
- while((c = *p++) != '\n')
- if(c == ' ')
- mkaux->indent++;
- else if(c == '\t')
- mkaux->indent += 8;
- else
- break;
+ while(getline(mkaux) != nil){
		if(mkaux->indent <= level){
			popopt(mkaux);
			Bseek(mkaux->b, -Blinelen(mkaux->b), 1);
@@ -508,23 +495,26 @@
	}
 }

-static File*
-getfile(Mkaux *mkaux, File *old)
+static char*
+getline(Mkaux *mkaux)
 {
- File *f;
- char *elem;
- char *p, *s;
+ char *p;
	int c;

	if(mkaux->indent < 0)
- return 0;
+ return nil;
 loop:
	mkaux->indent = 0;
	p = Brdline(mkaux->b, '\n');
	mkaux->lineno++;
- if(!p){
+ if(p == nil){
		mkaux->indent = -1;
- return 0;
+ return nil;
+ }
+ if(memchr(p, 0, Blinelen(mkaux->b)) != nil){
+ warn(mkaux, "null bytes in proto");
+ longjmp(mkaux->jmp, 1);
+ return nil;
	}
	while((c = *p++) != '\n')
		if(c == ' ')
@@ -535,41 +525,62 @@
			break;
	if(c == '\n' || c == '#')
		goto loop;
- p--;
+ return --p;
+}
+
+static File*
+getfile(Mkaux *mkaux, File *old)
+{
+ File *f;
+ char *elem;
+ char *p, *s;
+
+loop:
+ if((p = getline(mkaux)) == nil)
+ return nil;
	popopt(mkaux);
+
	*strchr(p, '\n') = 0;
- if(s = strchr(p, '=')){
+ if((s = strchr(p, '=')) != nil){
		*s++ = 0;
		setopt(mkaux, p, s);
		goto loop;
	}else
		p[strlen(p)] = '\n';
- f = emalloc(mkaux, sizeof *f);
- p = getname(mkaux, p, &elem);
- if(p == nil)
+
+ if((p = getname(mkaux, p, &elem)) == nil)
		return nil;

+ f = emalloc(mkaux, sizeof *f);
	f->new = mkpath(mkaux, old->new, elem);
	free(elem);
	f->elem = utfrrune(f->new, L'/') + 1;
- p = getmode(mkaux, p, &f->mode);
- p = getname(mkaux, p, &f->uid); /* LEAK */
- if(p == nil)
+
+ if((p = getmode(mkaux, p, &f->mode)) == nil){
+ freefile(f);
		return nil;
+ }

- if(!*f->uid)
+ if((p = getname(mkaux, p, &f->uid)) == nil){
+ freefile(f);
+ return nil;
+ }
+ if(*f->uid == 0)
		strcpy(f->uid, "-");
- p = getname(mkaux, p, &f->gid); /* LEAK */
- if(p == nil)
+
+ if((p = getname(mkaux, p, &f->gid)) == nil){
+ freefile(f);
		return nil;
+ }
+ if(*f->gid == 0)
+ strcpy(f->gid, "-");

- if(!*f->gid)
- strcpy(f->gid, "-");
	f->old = getpath(mkaux, p);
- if(f->old && strcmp(f->old, "-") == 0){
+ if(f->old != nil && strcmp(f->old, "-") == 0){
		free(f->old);
- f->old = 0;
+ f->old = nil;
	}
+
	setname(mkaux, &mkaux->oldfile, f);

	return f;
@@ -587,7 +598,7 @@
	while((c = *q) != '\n' && c != ' ' && c != '\t')
		q++;
	if(q == p)
- return 0;
+ return nil;
	n = q - p;
	new = emalloc(mkaux, n + 1);
	memcpy(new, p, n);
@@ -613,14 +624,14 @@
		return nil;
	memmove(*buf, start, p-start);

- (*buf)[p-start] = '\0';
+ (*buf)[p-start] = 0;

	if(**buf == '$'){
		s = getenv(*buf+1);
- if(s == 0){
+ if(s == nil){
			warn(mkaux, "can't read environment variable %s", *buf+1);
+ free(*buf);
			skipdir(mkaux);
- free(*buf);
			return nil;
		}
		free(*buf);
@@ -636,12 +647,11 @@
	ulong m;

	*xmode = ~0;
- p = getname(mkaux, p, &buf);
- if(p == nil)
+ if((p = getname(mkaux, p, &buf)) == nil)
		return nil;

	s = buf;
- if(!*s || strcmp(s, "-") == 0)
+ if(*s == 0 || strcmp(s, "-") == 0)
		return p;
	m = 0;
	if(*s == 'd'){
@@ -679,7 +689,7 @@
	vseprint(buf, buf+sizeof(buf), fmt, va);
	va_end(va);

- if(mkaux->warn)
+ if(mkaux->warn != nil)
		mkaux->warn(buf, mkaux->a);
	else
		fprint(2, "warning: %s\n", buf);


prev | next