diff --git a/Jenkinsfile b/Jenkinsfile index ccad6e17b..4383c1a7a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -218,22 +218,24 @@ void makeReport() { } void clusterRunner(String cluster) { - def clusterCreated=0 + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]){ + def clusterCreated=0 - for (int i=0; i= 1) { - shutdownCluster(cluster) + if (clusterCreated >= 1) { + shutdownCluster(cluster) + } } } @@ -306,6 +308,35 @@ EOF curl -sL https://github.com/mitchellh/golicense/releases/latest/download/golicense_0.2.0_linux_x86_64.tar.gz | sudo tar -C /usr/local/bin -xzf - golicense """ + installAzureCLI() + azureAuth() +} + +void azureAuth() { + withCredentials([azureServicePrincipal('PERCONA-OPERATORS-SP')]) { + sh ''' + az login --service-principal -u "$AZURE_CLIENT_ID" -p "$AZURE_CLIENT_SECRET" -t "$AZURE_TENANT_ID" --allow-no-subscriptions + az account set -s "$AZURE_SUBSCRIPTION_ID" + ''' + } +} + +void installAzureCLI() { + sh """ + if ! command -v az &>/dev/null; then + echo "Installing Azure CLI for Hetzner instances..." + sudo rpm --import https://packages.microsoft.com/keys/microsoft.asc + cat </dev/null + setup_gcs_credentials + check_backup_existence_gcs "$backup_dest_gcp" "/rs0/myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-mongos.$namespace" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" run_restore "$backup_name_gcp" diff --git a/e2e-tests/demand-backup-eks-credentials-irsa/run b/e2e-tests/demand-backup-eks-credentials-irsa/run index 3284475ac..9c084ff96 100755 --- a/e2e-tests/demand-backup-eks-credentials-irsa/run +++ b/e2e-tests/demand-backup-eks-credentials-irsa/run @@ -116,7 +116,8 @@ sleep 5 desc 'check backup and restore -- aws-s3' backup_dest_aws=$(get_backup_dest "$backup_name_aws") -curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs0/myApp.test.gz" | gunzip >/dev/null +setup_aws_credentials +check_backup_existence_aws "$backup_dest_aws" "/rs0/myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-rs0.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-0.$cluster-rs0.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0-1.$cluster-rs0.$namespace" "-2nd" diff --git a/e2e-tests/demand-backup-eks-credentials/run b/e2e-tests/demand-backup-eks-credentials/run index 96ffed793..35ed886fe 100755 --- a/e2e-tests/demand-backup-eks-credentials/run +++ b/e2e-tests/demand-backup-eks-credentials/run @@ -57,7 +57,8 @@ sleep 5 desc 'check backup and restore -- aws-s3' backup_dest_aws=$(get_backup_dest "$backup_name_aws") -curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs0/myApp.test.gz" | gunzip >/dev/null +setup_aws_credentials +check_backup_existence_aws "$backup_dest_aws" "/rs0/myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" @@ -70,7 +71,7 @@ compare_mongo_cmd "find" "myApp:myPass@$cluster-2.$cluster.$namespace" desc 'delete backup and check if it is removed from bucket -- aws-s3' kubectl_bin delete psmdb-backup --all -check_backup_deletion "https://s3.amazonaws.com/${backup_dest_aws}" "aws-s3" +check_backup_deletion_aws "$backup_dest_aws" "/rs0/myApp.test.gz" destroy $namespace diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index 7be597b69..43c81b0cb 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -108,9 +108,10 @@ sleep 5 if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- aws-s3' backup_dest_aws=$(get_backup_dest "$backup_name_aws") - curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs0/myApp.test.gz" | gunzip >/dev/null - curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs1/myApp1.test.gz" | gunzip >/dev/null - curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs2/myApp2.test.gz" | gunzip >/dev/null + setup_aws_credentials + check_backup_existence_aws "$backup_dest_aws" "/rs0/myApp.test.gz" + check_backup_existence_aws "$backup_dest_aws" "/rs1/myApp1.test.gz" + check_backup_existence_aws "$backup_dest_aws" "/rs2/myApp2.test.gz" insert_data_mongos "100501" "myApp" "" "$custom_port" insert_data_mongos "100501" "myApp1" "" "$custom_port" insert_data_mongos "100501" "myApp2" "" "$custom_port" @@ -121,9 +122,10 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- gcp-cs' backup_dest_gcp=$(get_backup_dest "$backup_name_gcp") - curl -s "https://storage.googleapis.com/${backup_dest_gcp}/rs0/myApp.test.gz" | gunzip >/dev/null - curl -s "https://storage.googleapis.com/${backup_dest_gcp}/rs1/myApp1.test.gz" | gunzip >/dev/null - curl -s "https://storage.googleapis.com/${backup_dest_gcp}/rs2/myApp2.test.gz" | gunzip >/dev/null + setup_gcs_credentials + check_backup_existence_gcs "$backup_dest_gcp" "/rs0/myApp.test.gz" + check_backup_existence_gcs "$backup_dest_gcp" "/rs1/myApp1.test.gz" + check_backup_existence_gcs "$backup_dest_gcp" "/rs2/myApp2.test.gz" insert_data_mongos "100501" "myApp" "" "$custom_port" insert_data_mongos "100501" "myApp1" "" "$custom_port" insert_data_mongos "100501" "myApp2" "" "$custom_port" @@ -134,9 +136,10 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- azure-blob' backup_dest_azure=$(get_backup_dest "$backup_name_azure") - curl -s "https://engk8soperators.blob.core.windows.net/${backup_dest_azure}/rs0/myApp.test.gz" | gunzip >/dev/null - curl -s "https://engk8soperators.blob.core.windows.net/${backup_dest_azure}/rs1/myApp1.test.gz" | gunzip >/dev/null - curl -s "https://engk8soperators.blob.core.windows.net/${backup_dest_azure}/rs2/myApp2.test.gz" | gunzip >/dev/null + setup_azure_credentials + check_backup_existence_azure "$backup_dest_azure" "/rs0/myApp.test.gz" + check_backup_existence_azure "$backup_dest_azure" "/rs1/myApp1.test.gz" + check_backup_existence_azure "$backup_dest_azure" "/rs2/myApp2.test.gz" insert_data_mongos "100501" "myApp" "" "$custom_port" insert_data_mongos "100501" "myApp1" "" "$custom_port" insert_data_mongos "100501" "myApp2" "" "$custom_port" @@ -169,9 +172,9 @@ if [[ $backup_exists -eq 1 ]]; then fi if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - check_backup_deletion "https://s3.amazonaws.com/${backup_dest_aws}" "aws-s3" - check_backup_deletion "https://storage.googleapis.com/${backup_dest_gcp}" "gcp-cs" - check_backup_deletion "https://engk8soperators.blob.core.windows.net/${backup_dest_azure}" "azure-blob" + check_backup_deletion_aws "$backup_dest_aws" "/rs0/myApp.test.gz" + check_backup_deletion_gcs "$backup_dest_gcp" "/rs0/myApp.test.gz" + check_backup_deletion_azure "$backup_dest_azure" "/rs0/myApp.test.gz" fi # Temporarily skipping this check @@ -180,3 +183,4 @@ fi kubectl_bin delete -f "$conf_dir/container-rc.yaml" destroy "$namespace" +desc 'test passed' diff --git a/e2e-tests/demand-backup/run b/e2e-tests/demand-backup/run index 2c14b609d..09a4b77a7 100755 --- a/e2e-tests/demand-backup/run +++ b/e2e-tests/demand-backup/run @@ -188,7 +188,7 @@ wait_backup_agent $cluster-1 wait_backup_agent $cluster-2 if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - desc 'Check GCS pfofiles' + desc 'Check GCS profiles' compare_pbm_profile_setup "some-name" "gcp-cs-s3" compare_pbm_profile_setup "some-name" "gcp-cs-sa" fi @@ -223,22 +223,25 @@ sleep 5 if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- aws-s3' backup_dest_aws=$(get_backup_dest "$backup_name_aws") - check_backup_in_storage ${backup_name_aws} s3 rs0 'myApp.test.gz' + setup_aws_credentials + check_backup_existence_aws ${backup_dest_aws} '/rs0/myApp.test.gz' run_recovery_check "$backup_name_aws" "$cluster" desc 'check backup and restore -- gcp-cs-s3' backup_dest_gcp_s3=$(get_backup_dest "$backup_name_gcp_s3") - check_backup_in_storage ${backup_name_gcp_s3} gcs rs0 'myApp.test.gz' + setup_gcs_credentials + check_backup_existence_gcs ${backup_dest_gcp_s3} '/rs0/myApp.test.gz' run_recovery_check "$backup_name_gcp_s3" "$cluster" desc 'check backup and restore -- gcp-cs-sa' backup_dest_gcp_sa=$(get_backup_dest "$backup_name_gcp_sa") - check_backup_in_storage ${backup_name_gcp_sa} gcs rs0 'myApp.test.gz' + check_backup_existence_gcs ${backup_dest_gcp_sa} '/rs0/myApp.test.gz' run_recovery_check "$backup_name_gcp_sa" "$cluster" desc 'check backup and restore -- azure-blob' backup_dest_azure=$(get_backup_dest "$backup_name_azure") - check_backup_in_storage ${backup_name_azure} azure rs0 'myApp.test.gz' + setup_azure_credentials + check_backup_existence_azure ${backup_dest_azure} '/rs0/myApp.test.gz' run_recovery_check "$backup_name_azure" "$cluster" fi @@ -285,10 +288,10 @@ if [[ $backup_exists -eq 1 ]]; then fi if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - check_backup_deletion "https://s3.amazonaws.com/${backup_dest_aws}" "aws-s3" - check_backup_deletion "https://storage.googleapis.com/${backup_dest_gcp_s3}" "gcp-cs-s3" - check_backup_deletion "https://storage.googleapis.com/${backup_dest_gcp_sa}" "gcp-cs-sa" - check_backup_deletion "${backup_dest_azure}" "azure-blob" + check_backup_deletion_aws "$backup_dest_aws" "/rs0/myApp.test.gz" + check_backup_deletion_gcs "$backup_dest_gcp_s3" "/rs0/myApp.test.gz" + check_backup_deletion_gcs "$backup_dest_gcp_sa" "/rs0/myApp.test.gz" + check_backup_deletion_azure "$backup_dest_azure" "/rs0/myApp.test.gz" fi desc 'checking backup deletion without cr' @@ -327,10 +330,10 @@ if [[ $backup_exists -eq 1 ]]; then fi if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - check_backup_deletion "https://s3.amazonaws.com/${backup_dest_aws}" "aws-s3" - check_backup_deletion "https://storage.googleapis.com/${backup_dest_gcp_s3}" "gcp-cs-s3" - check_backup_deletion "https://storage.googleapis.com/${backup_dest_gcp_sa}" "gcp-cs-sa" - check_backup_deletion "${backup_dest_azure}" "azure-blob" + check_backup_deletion_aws "$backup_dest_aws" "/rs0/myApp.test.gz" + check_backup_deletion_gcs "$backup_dest_gcp_s3" "/rs0/myApp.test.gz" + check_backup_deletion_gcs "$backup_dest_gcp_sa" "/rs0/myApp.test.gz" + check_backup_deletion_azure "$backup_dest_azure" "/rs0/myApp.test.gz" fi # Temporarily skipping this check diff --git a/e2e-tests/functions b/e2e-tests/functions index a6e4259f6..9bace1660 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -10,7 +10,7 @@ IMAGE_MONGOD_CHAIN=${IMAGE_MONGOD_CHAIN:-$' perconalab/percona-server-mongodb-operator:main-mongod6.0 perconalab/percona-server-mongodb-operator:main-mongod7.0 perconalab/percona-server-mongodb-operator:main-mongod8.0'} -IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-server-mongodb-operator:main-backup"} +IMAGE_BACKUP=${IMAGE_BACKUP:-"percona/percona-backup-mongodb:2.11.0"} SKIP_BACKUPS_TO_AWS_GCP_AZURE=${SKIP_BACKUPS_TO_AWS_GCP_AZURE:-1} PMM_SERVER_VER=${PMM_SERVER_VER:-"9.9.9"} IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"percona/pmm-client:2.44.1-1"} @@ -1569,6 +1569,72 @@ run_backup() { | kubectl_bin apply -f - } +function check_backup_deletion_aws() { + bucket=$(echo "$1" | cut -d'/' -f1) + key_prefix=$(echo "$1" | cut -d'/' -f2-) + key=$2 + storage_name="aws-s3" + retry=0 + + while aws s3api head-object --bucket "$bucket" --key "${key_prefix}${key}" &>/dev/null; do + if [ $retry -ge 15 ]; then + echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster" + echo "Backup still exists in $storage_name (expected it to be deleted)" + exit 1 + fi + echo "waiting for backup to be deleted from $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup ${key_prefix}${key} in bucket $bucket not found in $storage_name" +} + +function check_backup_deletion_gcs() { + backup_dest_gcp=$1 + obj=$2 + storage_name="gcp-cs" + retry=0 + gcs_path="gs://${backup_dest_gcp}${obj}" + + while gsutil ls "$gcs_path" >/dev/null 2>&1; do + if [ $retry -ge 15 ]; then + echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster" + echo "Backup $gcs_path still exists in $storage_name (expected it to be deleted)" + exit 1 + fi + echo "waiting for backup to be deleted from $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup $gcs_path not found in $storage_name" +} + +function check_backup_deletion_azure() { + url_path=$(echo "$1" | sed 's|https://[^/]*\.blob\.core\.windows\.net/||') + container=$(echo "$url_path" | cut -d'/' -f1) + blob_prefix=$(echo "$url_path" | cut -d'/' -f2-) + + blob=$2 + storage_name="azure-blob" + retry=0 + blob_path="${blob_prefix}${blob}" + + while az storage blob show --container-name "$container" --name "$blob_path" &>/dev/null; do + if [ $retry -ge 15 ]; then + echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster" + echo "Backup still exists in $storage_name (expected it to be deleted)" + exit 1 + fi + echo "waiting for backup to be deleted from $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup ${blob_path} in container $container not found in $storage_name" +} + check_backup_deletion() { local path=$1 local storage_name=$2 @@ -1586,6 +1652,119 @@ check_backup_deletion() { done } +function setup_aws_credentials() { + local secret_name="aws-s3-secret" + + if [[ -n "$AWS_ACCESS_KEY_ID" ]] && [[ -n "$AWS_SECRET_ACCESS_KEY" ]]; then + echo "AWS credentials already set in environment" + return 0 + fi + + echo "Setting up AWS credentials from secret: $secret_name" + + # Disable tracing for the entire credential section + local trace_was_on=0 + if [[ $- == *x* ]]; then + trace_was_on=1 + set +x + fi + + AWS_ACCESS_KEY_ID=$(kubectl get secret "$secret_name" -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' 2>/dev/null | base64 -d 2>/dev/null) + AWS_SECRET_ACCESS_KEY=$(kubectl get secret "$secret_name" -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' 2>/dev/null | base64 -d 2>/dev/null) + + if [[ -z "$AWS_ACCESS_KEY_ID" ]] || [[ -z "$AWS_SECRET_ACCESS_KEY" ]]; then + # Re-enable tracing before error message if it was on + [[ $trace_was_on -eq 1 ]] && set -x + echo "Failed to extract AWS credentials from secret" + return 1 + fi + + export AWS_ACCESS_KEY_ID + export AWS_SECRET_ACCESS_KEY + + # Re-enable tracing if it was on + [[ $trace_was_on -eq 1 ]] && set -x + + echo "AWS credentials configured successfully" +} + +function setup_gcs_credentials() { + local secret_name="gcp-cs-secret" + + if gsutil ls >/dev/null 2>&1; then + echo "GCS credentials already set in environment" + return 0 + fi + + echo "Setting up GCS credentials from K8s secret: $secret_name" + + # Disable tracing for the entire credential section + local trace_was_on=0 + if [[ $- == *x* ]]; then + trace_was_on=1 + set +x + fi + + ACCESS_KEY_ID=$(kubectl get secret "$secret_name" -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' 2>/dev/null | base64 -d 2>/dev/null) + SECRET_ACCESS_KEY=$(kubectl get secret "$secret_name" -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' 2>/dev/null | base64 -d 2>/dev/null) + + if [[ -z "$ACCESS_KEY_ID" ]] || [[ -z "$SECRET_ACCESS_KEY" ]]; then + # Re-enable tracing before error message if it was on + [[ $trace_was_on -eq 1 ]] && set -x + echo "Failed to extract GCS credentials from secret" >&2 + return 1 + fi + + boto_tmp=$(mktemp /tmp/boto.XXXXXX) + chmod 600 "$boto_tmp" + + cat <"$boto_tmp" +[Credentials] +gs_access_key_id = ${ACCESS_KEY_ID} +gs_secret_access_key = ${SECRET_ACCESS_KEY} +EOF + + export BOTO_CONFIG="$boto_tmp" + + unset ACCESS_KEY_ID + unset SECRET_ACCESS_KEY + + [[ $trace_was_on -eq 1 ]] && set -x + + echo "GCS credentials configured successfully" +} + +function setup_azure_credentials() { + local secret_name="azure-secret" + + echo "Setting up Azure credentials from K8s secret: $secret_name" + + # Disable tracing for the entire credential section + local trace_was_on=0 + if [[ $- == *x* ]]; then + trace_was_on=1 + set +x + fi + + AZURE_STORAGE_ACCOUNT=$(kubectl_bin get secret "$secret_name" -o jsonpath='{.data.AZURE_STORAGE_ACCOUNT_NAME}' 2>/dev/null | base64 -d 2>/dev/null) + AZURE_STORAGE_KEY=$(kubectl_bin get secret "$secret_name" -o jsonpath='{.data.AZURE_STORAGE_ACCOUNT_KEY}' 2>/dev/null | base64 -d 2>/dev/null) + + if [[ -z "$AZURE_STORAGE_ACCOUNT" ]] || [[ -z "$AZURE_STORAGE_KEY" ]]; then + # Re-enable tracing before error message if it was on + [[ $trace_was_on -eq 1 ]] && set -x + echo "Failed to extract Azure credentials from secret" >&2 + return 1 + fi + + export AZURE_STORAGE_ACCOUNT + export AZURE_STORAGE_KEY + + # Re-enable tracing if it was on + [[ $trace_was_on -eq 1 ]] && set -x + + echo "Azure credentials configured successfully" +} + create_infra() { local ns="$1" @@ -2152,6 +2331,73 @@ wait_for_oplogs() { done } +function check_backup_existence_aws() { + bucket=$(echo "$1" | cut -d'/' -f1) + key_prefix=$(echo "$1" | cut -d'/' -f2-) + key=$2 + storage_name="aws-s3" + retry=0 + + until aws s3api head-object --bucket "$bucket" --key "${key_prefix}${key}" &>/dev/null; do + if [ $retry -ge 30 ]; then + echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster" + echo "Backup was not found in bucket -- $storage_name" + exit 1 + fi + echo "waiting for backup $bucket/$key_prefix/$key in $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup $bucket/$key_prefix/$key found in $storage_name" +} + +function check_backup_existence_gcs() { + backup_dest_gcp=$1 + obj=$2 + storage_name="gcp-cs" + retry=0 + + gcs_path="gs://${backup_dest_gcp}${obj}" + + until gsutil ls "$gcs_path" >/dev/null 2>&1; do + if [ $retry -ge 30 ]; then + echo "Max retry count $retry reached. Something went wrong with operator or Kubernetes cluster." + echo "Backup was not found in bucket -- $storage_name" + exit 1 + fi + echo "Waiting for backup $gcs_path in $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup $gcs_path found in $storage_name" +} + +function check_backup_existence_azure() { + url_path=$(echo "$1" | sed 's|https://[^/]*\.blob\.core\.windows\.net/||') + container=$(echo "$url_path" | cut -d'/' -f1) + blob_prefix=$(echo "$url_path" | cut -d'/' -f2-) + + blob=$2 + storage_name="azure-blob" + retry=0 + blob_path="${blob_prefix}${blob}" + + until az storage blob show --container-name "$container" --name "$blob_path" &>/dev/null; do + if [ $retry -ge 30 ]; then + echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster" + echo "Backup was not found in container -- $storage_name" + exit 1 + fi + echo "waiting for backup $blob_path in $storage_name" + sleep 10 + ((retry += 1)) + done + + echo "Backup $blob_path found in $storage_name" +} + check_backup_in_storage() { local backup=$1 local storage_type=$2 @@ -2159,41 +2405,40 @@ check_backup_in_storage() { local file=${4:-"filelist.pbm"} local endpoint + backup_dest=$(get_backup_dest "$backup" | $sed 's|https://engk8soperators.blob.core.windows.net/||') case ${storage_type} in s3) endpoint="s3.amazonaws.com" + setup_aws_credentials + check_backup_existence_aws "$backup_dest" "/${replset}/${file}" ;; gcs) endpoint="storage.googleapis.com" + setup_gcs_credentials + check_backup_existence_gcs "$backup_dest" "/${replset}/${file}" ;; azure) endpoint="engk8soperators.blob.core.windows.net" + setup_azure_credentials + check_backup_existence_azure "$backup_dest" "/${replset}/${file}" ;; minio) endpoint="minio-service" + until kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ + /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ + /usr/bin/aws --endpoint-url http://${endpoint}:9000 s3 ls "s3://${backup_dest}/${replset}/${file}" \ + | grep "${file}"; do + sleep 1 + let retry+=1 + if [ $retry -ge 60 ]; then + log "Max retry count $retry reached. Something went wrong with writing backup" + exit 1 + fi + done ;; *) echo "unsupported storage type: ${storage_type}" exit 1 ;; esac - - backup_dest=$(get_backup_dest "$backup" | $sed 's|https://engk8soperators.blob.core.windows.net/||') - if [[ ${storage_type} == 'minio' ]]; then - until kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://${endpoint}:9000 s3 ls "s3://${backup_dest}/${replset}/${file}" \ - | grep "${file}"; do - sleep 1 - let retry+=1 - if [ $retry -ge 60 ]; then - log "Max retry count $retry reached. Something went wrong with writing backup" - exit 1 - fi - done - else - local url="https://${endpoint}/${backup_dest}/${replset}/${file}" - log "checking if ${url} exists" - curl --fail --head "${url}" - fi } diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index 35cebb3a7..32cee0ffc 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -134,7 +134,8 @@ compare_mongo_cmd "find" "myApp:myPass@$cluster-2.$cluster.$namespace" if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- aws-s3' backup_dest_aws=$(get_backup_dest "$backup_name_aws") - curl -s "https://s3.amazonaws.com/${backup_dest_aws}/rs0/myApp.test.gz" | gunzip >/dev/null + setup_aws_credentials + check_backup_existence_aws "$backup_dest_aws" "/rs0/myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" @@ -147,7 +148,8 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- gcp-cs' backup_dest_gcp=$(get_backup_dest "$backup_name_gcp") - curl -s "https://storage.googleapis.com/${backup_dest_gcp}/rs0/myApp.test.gz" | gunzip >/dev/null + setup_gcs_credentials + check_backup_existence_gcs "$backup_dest_gcp" "/rs0/myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" @@ -160,7 +162,8 @@ if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then desc 'check backup and restore -- azure-blob' backup_dest_azure=$(get_backup_dest "$backup_name_azure") - curl -s "https://engk8soperators.blob.core.windows.net/${backup_dest_azure}/rs0/myApp.test.gz" | gunzip >/dev/null + setup_azure_credentials + check_backup_existence_azure "$backup_dest_azure" "/rs0/myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" diff --git a/e2e-tests/upgrade-partial-backup/run b/e2e-tests/upgrade-partial-backup/run index eb2cfb9a2..4a5c0d96a 100755 --- a/e2e-tests/upgrade-partial-backup/run +++ b/e2e-tests/upgrade-partial-backup/run @@ -209,7 +209,7 @@ test_backup_deletion() { local logical_dest=$(get_backup_dest "${BACKUP_NAME_MINIO}-logical") kubectl_bin delete psmdb-backup ${BACKUP_NAME_MINIO}-incremental-base - check_backup_deletion "https://storage.googleapis.com/${incremental_base_dest}" "gcp-cs" + check_backup_deletion_gcs "$incremental_base_dest" # base backup can only be deleted if PiTR is disabled log "disabling PiTR on ${cluster}" @@ -217,7 +217,7 @@ test_backup_deletion() { --patch '{"spec": {"backup": {"pitr": {"enabled": false}}}}' kubectl_bin delete psmdb-backup ${BACKUP_NAME_MINIO}-logical - check_backup_deletion "https://storage.googleapis.com/${logical_dest}" "gcp-cs" + check_backup_deletion_gcs "$logical_dest" }