#!/usr/bin/bash

set -e

MACHINE=$1
PATH_OLD=$PATH

rm workers.list
for w in 0 1 2 3 4 6 8; do
	echo $w >> workers.list
done

for run in 1 2 3; do

	#for size in small large verylarge; do
	for size in small; do

		# what fraction of table to delete, each page has ~200 rows
		for m in 100 500 1000 5000; do

			for logged in logged unlogged; do

				PATH=/mnt/data/builds/master/bin:$PATH

				# create copy of the database
				dropdb --if-exists test
				createdb test -T test-$size-$logged -S file_copy

				r=0

				for indexes in 5 4 3 2 1 0; do

					psql test -c "drop index if exists idx_${indexes}"

					for build in master patched; do

						for workers in $(shuf workers.list); do

							# master does not support parallel vacuum
							if [ "$build" == "master" ] && [ "$workers" != "0" ]; then
								continue
							fi

							ts=$(date +%s)
							echo `date` "=========== ts: $ts run: $r size: $size build: $build workers: $workers indexes: $indexes logged: $logged m: $m r: $r =========="

							PATH=/mnt/data/builds/$build/bin:$PATH

							# stop/start, to use correct build
							pg_ctl -D /mnt/pgdata/data-parallel-vacuum -l pg.log stop
							pg_ctl -D /mnt/pgdata/data-parallel-vacuum -l pg.log start

							# clean page cache
							echo `date` "drop page cache"
							sudo ./drop-caches.sh

							ps ax | grep postgres

							datoid=$(psql -t -A test -c "select oid from pg_database where datname = current_database()")
							relfilenodes=$(psql -t -A test -c "select pg_relation_filenode(oid) from pg_class where relname like 'idx_%' order by relname")

							for f in $relfilenodes; do
								echo `date` "page cache warmup index relfilenode $datoid/$f"
								cat /mnt/pgdata/data-parallel-vacuum/base/$datoid/$f* > /dev/null
							done

							relfilenode=$(psql -t -A test -c "select pg_relation_filenode(oid) from pg_class where relname like 'test_vacuum'")

							echo `date` "page cache warmup table relfilenode $datoid/$relfilenode"
							cat /mnt/pgdata/data-parallel-vacuum/base/$datoid/$relfilenode* > /dev/null

							psql test -c "\d+"
							psql test -c "\di+"

							psql test -c "\d+ test_vacuum"

							psql test -c "vacuum analyze test_vacuum"
							psql test -c "checkpoint"

							sync

							psql test > logs/$ts.delete.log 2>&1 <<EOF
-- deletes two rows on each page
delete from test_vacuum where mod(a,$m) = $r;
checkpoint;
EOF

							cat logs/$ts.delete.log

							s=$(psql -t -A test -c "select extract(epoch from now())")
							start_time=$(psql -t -A test -c "select now()")

							echo `date` "vacuum (parallel $workers, verbose) test_vacuum";

							psql test > logs/$ts.vacuum.log 2>&1 <<EOF
vacuum (parallel $workers, verbose) test_vacuum;
EOF
							d=$(psql -t -A test -c "select extract(epoch from now()) - $s")
							e=$(psql -t -A test -c "select extract(epoch from now())")
							end_time=$(psql -t -A test -c "select now()")

							is=$(grep 'index scans:' logs/$ts.vacuum.log | awk '{print $7}')

							pages_removed=$(grep '^pages:' logs/$ts.vacuum.log | awk '{print $2}')
							pages_remain=$(grep '^pages:' logs/$ts.vacuum.log | awk '{print $4}')
							pages_scanned=$(grep '^pages:' logs/$ts.vacuum.log | awk '{print $6}')
							pages_frozen=$(grep '^frozen:' logs/$ts.vacuum.log | awk '{print $2}')

							tuples_removed=$(grep '^tuples:' logs/$ts.vacuum.log | awk '{print $2}')
							tuples_remain=$(grep '^tuples:' logs/$ts.vacuum.log | awk '{print $4}')

							cat logs/$ts.vacuum.log

							echo `date` "START TIME: $start_time ($s)"
							echo `date` "END TIME: $end_time ($e)"
							echo `date` "DELTA: $d"

							echo $MACHINE $ts $run $size $m $r $build $workers $indexes $logged $d $is $pages_removed $pages_remain $pages_scanned $pages_frozen $tuples_removed $tuples_remain >> results.csv

							r=$((r+1))

						done

					done

				done

			done

		done

	done

done
