summaryrefslogtreecommitdiff
path: root/scripts/05-dataproc-submit.sh
diff options
context:
space:
mode:
authorSanto Cariotti <santo@dcariotti.me>2024-12-28 15:17:04 +0100
committerSanto Cariotti <santo@dcariotti.me>2024-12-28 15:17:04 +0100
commit2aa010e47387f5c60d63824dce65f76f22eecddc (patch)
treecf39154c7e58c697da3d9c0e00fad711fe9e59c0 /scripts/05-dataproc-submit.sh
parent246369828ecdaf879923b19ff222881cbe6c3953 (diff)
Check on scripts + update num worker
Diffstat (limited to 'scripts/05-dataproc-submit.sh')
-rwxr-xr-xscripts/05-dataproc-submit.sh41
1 files changed, 35 insertions, 6 deletions
diff --git a/scripts/05-dataproc-submit.sh b/scripts/05-dataproc-submit.sh
index dfc5498..b70e138 100755
--- a/scripts/05-dataproc-submit.sh
+++ b/scripts/05-dataproc-submit.sh
@@ -1,9 +1,38 @@
#!/bin/sh
-gcloud dataproc jobs submit spark \
- --cluster=${CLUSTER} \
- --jar=gs://${BUCKET_NAME}/scala/co-purchase-analysis_2.12-1.0.jar \
- --region=${REGION} \
- --properties spark.hadoop.fs.gs.impl=com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem \
- -- gs://${BUCKET_NAME}/input/ gs://${BUCKET_NAME}/output/
+set -e
+
+INPUT_PATH="gs://${BUCKET_NAME}/input/"
+OUTPUT_PATH="gs://${BUCKET_NAME}/output"
+
+if [ -z "${BUCKET_NAME}" ] || [ -z "${CLUSTER}" ] || [ -z "${REGION}" ]; then
+ echo "Error: BUCKET_NAME, CLUSTER, and REGION environment variables must be set."
+ exit 1
+fi
+if gsutil ls "${OUTPUT_PATH}" > /dev/null 2>&1; then
+ echo ">>>> Output folder already exists. Renaming..."
+ UUID=$(cat /proc/sys/kernel/random/uuid)
+ NEW_OUTPUT_PATH="${OUTPUT_PATH}-${UUID}"
+
+ echo ">>>> Copying existing output folder to ${NEW_OUTPUT_PATH}..."
+ if gsutil -m cp -r "${OUTPUT_PATH}/" "${NEW_OUTPUT_PATH}/"; then
+ echo ">>>> Deleting original output folder..."
+ if gsutil -m rm -r "${OUTPUT_PATH}"; then
+ echo ">>>> Original output folder successfully renamed to ${NEW_OUTPUT_PATH}"
+ else
+ echo "Error: Failed to delete the original output folder after copying."
+ exit 1
+ fi
+ else
+ echo "Error: Failed to copy the output folder to ${NEW_OUTPUT_PATH}."
+ exit 1
+ fi
+fi
+
+gcloud dataproc jobs submit spark \
+ --cluster="${CLUSTER}" \
+ --jar="gs://${BUCKET_NAME}/scala/co-purchase-analysis_2.12-1.0.jar" \
+ --region="${REGION}" \
+ --properties="spark.hadoop.fs.gs.impl=com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem" \
+ -- "${INPUT_PATH}" "${OUTPUT_PATH}"