#!/bin/bash
BASEDIR=$(dirname $0)
if [ $# != '1' ];
then
echo "usage : sh score.sh <scalaClassName>"
echo "ex) sh score.sh EditorScore"
exit
fi
HOME=$BASEDIR
LOG_DIR=$HOME/logs
mkdir -p $LOG_DIR
TODAY=`date +"%Y%m%d"`
DATE_SUB="7"
DAY_TO_DELETE="$(date "+%Y%m%d" -d "$DATE_SUB days ago")"
DELETE_LOG="${LOG_DIR}/$1_$DAY_TO_DELETE.log"
LOG="${LOG_DIR}/$1_$TODAY.log"
#HADOOP_CONF_DIR=/etc/hadoop/conf
/data/spark/bin/spark-submit \
--class com.piki_ds.ver1.$1 \
--master yarn-cluster \
--conf "spark.yarn.maxAppAttempts=1" \
--conf "spark.default.parallelism=120" \
--conf "spark.executor.memory=3g" \
--conf "spark.dynamicAllocation.minExecutors=5" \
--conf "spark.dynamicAllocation.maxExecutors=20" \
$BASEDIR/target/scala-2.11/dsquality-assembly-0.1.0-SNAPSHOT.jar >> $LOG 2>&1
#target/scala-2.11/dsmakingscore-assembly-0.1.0-SNAPSHOT.jar >> $LOG 2>&1
#target/scala-2.11/dsmakingscore_2.11-0.1.0-SNAPSHOT.jar >> $LOG 2>&1
#--jars "lib_managed/jars/mysql/mysql-connector-java/mysql-connector-java-5.1.36.jar" \
echo "END END END END END" >> $LOG
echo "END END END END END" >> $LOG
echo "END END END END END" >> $LOG
echo "END END END END END" >> $LOG
echo "END END END END END" >> $LOG
echo "IF LOG FILE $DELETE_LOG EXISTS DELETE" >> $LOG
# Log rotation
if [ -f $DELETE_LOG ]
then
rm -f $DELETE_LOG >> $LOG 2>&1
echo "$DELETE_LOG deleted" >> $LOG
fi