#Pre condition:
#Create and initialize the database and set the export PGDATA 
export PGDATA=/data/amit.kapila/workspace/master/data
export PGPORT=5433
export LD_LIBRARY_PATH=/data/amit.kapila/workspace/master/installation/lib:$LD_LIBRARY_PATH

#for i in "shared_buffers time_for_readings no_of_readings orig_or_patch"
for i in "8GB 600 3 0" "8GB 600 3 1" "8GB 600 3 2"
do
shared_bufs=`echo $i | cut -d" " -f1`
time_for_reading=`echo $i | cut -d" " -f2`
no_of_readings=`echo $i | cut -d" " -f3`
orig_or_patch=`echo $i | cut -d" " -f4`

if [ $orig_or_patch = "0" ]
then
	run_bin="org"	
elif [ $orig_or_patch = "1" ]
then
	run_bin="clog_bufs_64"    
elif [ $orig_or_patch = "2" ]
then
	run_bin="clog_bufs_128"    

fi
# -----------------------------------------------

echo "Start of script for $scale_factor $shared_bufs " >> test_results.txt


echo "============== $run_bin =============" >> test_results.txt
cp postgres_${run_bin} postgres


for threads in 1 64 
do      
        #Start taking reading
	for ((readcnt = 1 ; readcnt <= $no_of_readings ; readcnt++))
	do
		echo "================================================"  >> test_results.txt
		echo  $shared_bufs, $threads, $threads, $time_for_reading Reading - ${readcnt}  >> test_results.txt
		#start server
		./postgres -c shared_buffers=$shared_bufs -c max_connections=200 -c maintenance_work_mem=1GB -c checkpoint_timeout=35min -c min_wal_size=10GB -c max_wal_size=15GB -c checkpoint_completion_target=0.9 -c wal_buffers=256MB&
 		sleep 5
		#drop and recreate database
		./dropdb postgres
		./createdb postgres
		#initialize database
		./psql -f access_clog_prep.sql -d postgres 
		# Run pgbench	
		./pgbench -n -M prepared -f access_clog.sql -c $threads -j $threads -T $time_for_reading postgres  >> test_results.txt
		sleep 10
		./psql -d postgres -c "checkpoint" >> test_results.txt
		./pg_ctl stop
		sleep 10
	done;
done;

sleep 1


mv test_results.txt test_results_list_${shared_bufs}_${run_bin}_${time_for_reading}.txt
done;
