diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7aa5ac9a..b4768071 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,4 @@ # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners # Order is important; the last matching pattern takes the most precedence. -* @yurkovychv @puneet0191 @olexandr-havryliak @sandraromanchenko +* @yurkovychv @puneet0191 @olexandr-havryliak @sandraromanchenko @peterSirotnak diff --git a/.github/workflows/PMM_PDPGSQL.yaml b/.github/workflows/PMM_PDPGSQL.yaml index 2499a8ba..241333a8 100644 --- a/.github/workflows/PMM_PDPGSQL.yaml +++ b/.github/workflows/PMM_PDPGSQL.yaml @@ -3,86 +3,89 @@ on: workflow_dispatch: inputs: pdpgsql_version: - description: "PDPGSQL Docker hub example perconalab/percona-distribution-postgresql:16.0 latest available Image" + description: "PDPGSQL Version available Image versions on github, example: 15, 16, 17" required: true - pmm_image: - description: "pmm_image, example: perconalab/pmm-server:dev-latest" + default: '17' + type: string + pmm_qa_branch: + description: "Branch for qa-integration to checkout" required: false - repo: - description: "Percona Release Repo defaults to Experiemental example: experimental" + type: string + pmm_ui_tests_branch: + description: "Branch for PMM-UI tests to checkout" required: false + type: string + pmm_server_image: + description: "pmm_image, example: perconalab/pmm-server:3-dev-latest" + default: 'perconalab/pmm-server:3-dev-latest' + required: false + type: string + pmm_client_version: + description: 'PMM Client version (3-dev-latest|pmm3-rc|x.xx.x|https...)' + default: '3-dev-latest' + required: false + type: string push: branches: - - main + - v3 pull_request: branches: - - main + - v3 jobs: PMM_PDPGSQL_TEST: - runs-on: ubuntu-latest - timeout-minutes: 20 + runs-on: ubuntu-22.04 + timeout-minutes: 40 env: - PDPGSQL_VERSION: ${{ github.event.inputs.pdpgsql_version || 'perconalab/percona-distribution-postgresql:16.0' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} - REPO: ${{ github.event.inputs.repo || 'experimental' }} - + ADMIN_PASSWORD: 'admin' + PDPGSQL_VERSION: ${{ github.event.inputs.pdpgsql_version || '17' }} + PMM_IMAGE: ${{ github.event.inputs.pmm_server_image || 'perconalab/pmm-server:3-dev-latest' }} + PMM_QA_BRANCH: ${{ github.event.inputs.pmm_qa_branch || 'v3' }} + PMM_UI_BRANCH: ${{ github.event.inputs.pmm_ui_tests_branch || 'v3' }} + PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_client_version || '3-dev-latest' }} + steps: - uses: actions/checkout@v2 - - name: Concatenate values to environment file - run: | - echo "PMM_QA_REPO_URL=https://github.com/percona/pmm-qa/" >> $GITHUB_ENV - echo "PMM_QA_REPO_BRANCH=main" >> $GITHUB_ENV - - - name: Install Ansible and update the node - run: sudo apt-get update -y && sudo apt-get install ansible -y - - - name: Install Percona Release Package - run: wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb && sudo dpkg -i percona-release_latest.generic_all.deb - - - name: Install Enable percona-release experimental for dev-latest installation of pmm2-client - if: ${{ inputs.repo == 'experimental' }} - run: sudo percona-release enable-only original experimental + - name: Run the PMM Server container + run: docker run --detach --restart always -p 443:8443 -p 8081:8080 --name pmm-server ${{ env.PMM_IMAGE }} - - name: Install Enable percona-release testing for rc installation of pmm2-client - if: ${{ inputs.repo == 'testing' }} - run: sudo percona-release enable-only original testing - - - name: Install PMM Client - run: sudo apt update -y && sudo apt-get upgrade -y && sudo apt-get install -y pmm2-client + - name: Checkout qa-integration repo + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_QA_BRANCH }} + repository: Percona-Lab/qa-integration + path: ./qa-integration - - name: Delete if the Repo already checked out - run: sudo rm -r pmm-qa || true + - name: Export path to qa-integration repo + working-directory: qa-integration + run: echo "PATH_TO_PMM_QA=$(pwd)" >> $GITHUB_ENV - - name: Clone the PMM_QA repo - run: git clone -b ${{ env.PMM_QA_REPO_BRANCH }} ${{ env.PMM_QA_REPO_URL }} - - - name: Give write perimssion to the bash script - run: chmod +x ./pmm-tests/pmm-framework.sh - working-directory: pmm-qa - - - name: Run the pmm container in backgrounds - run: docker run --detach --restart always -p 443:443 -p 80:80 --name pmm-server ${{ env.PMM_IMAGE }} - - - name: Wait before pmm-admin config command - run: sleep 120 - - - name: setup pmm-admin config - run: sudo pmm-admin config --server-insecure-tls --server-url=https://admin:admin@localhost:443 + - name: Run Setup for E2E Tests + working-directory: qa-integration/pmm_qa + run: | + mkdir -m 777 -p /tmp/backup_data + python3 -m venv virtenv + . virtenv/bin/activate + pip install --upgrade pip + pip install --force-reinstall -U setuptools + pip install -r requirements.txt + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PDPGSQL=${{ env.PDPGSQL_VERSION }} - - name: Run for PDPGSQL tests - run: sudo ./pmm-framework.sh --pdpgsql-version=16 --pmm2 --download --addclient=pdpgsql,1 - working-directory: pmm-qa/pmm-tests - - - name: Checkout the repo and install node packages - run: git clone https://github.com/percona/pmm-ui-tests.git && cd pmm-ui-tests && npm ci + - name: Checkout PMM UI tests + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_UI_BRANCH }} + repository: percona/pmm-ui-tests + path: ./pmm-ui-tests - name: Install npx dependencies - run: sudo npx playwright install-deps && npx codeceptjs def pr.codecept.js + run: npm ci && npx playwright install --with-deps && npx codeceptjs def pr.codecept.js working-directory: pmm-ui-tests - name: Run the Integration tests of pdpgsql - run: npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_pdpgsql_integration_test.js --steps --debug working-directory: pmm-ui-tests + run: | + export PMM_UI_URL="http://127.0.0.1:8081/" + npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_pdpgsql_integration_test.js diff --git a/.github/workflows/PMM_PROXYSQL.yaml b/.github/workflows/PMM_PROXYSQL.yaml index f2be9a7a..79c2e9da 100644 --- a/.github/workflows/PMM_PROXYSQL.yaml +++ b/.github/workflows/PMM_PROXYSQL.yaml @@ -1,37 +1,61 @@ -name: PMM_PXC_PROXYSQL +name: PMM_PROXYSQL on: workflow_dispatch: inputs: pxc_version: - description: "pxc version, example: 8.0.33-25 Fetched from https://github.com/Percona-QA/package-testing/blob/master/VERSIONS by default" - required: false + description: "pxc version, example: 8.0.33-25 Fetched from https://github.com/Percona-QA/package-testing/blob/master/VERSIONS" + default: 'https://github.com/Percona-QA/package-testing/blob/master/VERSIONS' + required: true + type: string pxc_glibc: description: "pxc glibc version, example: 2.35" + default: '2.35' required: true - pmm_image: - description: "pmm_image, example: perconalab/pmm-server:dev-latest" + pmm_qa_branch: + description: "Branch for qa-integration to checkout" + required: false + type: string + pmm_ui_tests_branch: + description: "Branch for PMM-UI tests to checkout" + required: false + type: string + pmm_server_image: + description: "pmm_image, example: perconalab/pmm-server:3-dev-latest" + default: 'perconalab/pmm-server:3-dev-latest' required: false + type: string + pmm_client_version: + description: 'PMM Client version (3-dev-latest|pmm3-rc|x.xx.x|https...)' + default: '3-dev-latest' + required: false + type: string push: branches: - - main + - v3 pull_request: branches: - - main + - v3 jobs: PMM_PXC_TEST: - runs-on: ubuntu-latest - timeout-minutes: 20 + runs-on: ubuntu-22.04 + timeout-minutes: 40 env: + ADMIN_PASSWORD: 'admin' + PXC_VERSION: ${{ github.event.inputs.pxc_version || '8.0' }} PXC_GLIBC: ${{ github.event.inputs.pxc_glibc || '2.35' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} + PMM_SERVER_IMAGE: ${{ github.event.inputs.pmm_server_image || 'perconalab/pmm-server:3-dev-latest' }} + PMM_QA_BRANCH: ${{ github.event.inputs.pmm_qa_branch || 'v3' }} + PMM_UI_BRANCH: ${{ github.event.inputs.pmm_ui_tests_branch || 'v3' }} + PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_client_version || '3-dev-latest' }} + steps: - uses: actions/checkout@v2 - name: PXC_VERSION ENV Setup run: | - if [[ -n "${{ github.event.inputs.pxc_version }}" ]]; then + if [[ "${{ github.event.inputs.pxc_version }}" != http* ]]; then PXC_VERSION="${{ github.event.inputs.pxc_version }}" PXC_VERSION_PATH=$(echo "pxc-$PXC_VERSION") PXC_VERSION_TRIMED=$(echo ${PXC_VERSION%-*}) @@ -50,50 +74,53 @@ jobs: run: | echo "PXC_TARBALL_PATH=https://downloads.percona.com/downloads/TESTING/${{env.PXC_VERSION_PATH}}/Percona-XtraDB-Cluster_${{env.PXC_VERSION_TAR}}_Linux.x86_64.glibc${{env.PXC_GLIBC}}.tar.gz" >> $GITHUB_ENV echo "PMM_QA_REPO_URL=https://github.com/percona/pmm-qa/" >> $GITHUB_ENV - echo "PMM_QA_REPO_BRANCH=main" >> $GITHUB_ENV - name: Install Ansible and update the node run: sudo apt-get update -y && sudo apt-get install ansible -y - - name: Install Percona Release Package - run: wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb && sudo dpkg -i percona-release_latest.generic_all.deb - - - name: Install Enable percona-release experimental for dev-latest installation of pmm2-client - run: sudo percona-release enable-only original experimental - - - name: Install PMM Client - run: sudo apt update -y && sudo apt-get upgrade -y && sudo apt-get install -y pmm2-client - - - name: Delete if the Repo already checked out - run: sudo rm -r pmm-qa || true + - name: Run the PMM Server container + run: docker run --detach --restart always -p 443:8443 -p 8081:8080 --name pmm-server ${{ env.PMM_SERVER_IMAGE }} - - name: Clone the PMM_QA repo - run: git clone -b ${{ env.PMM_QA_REPO_BRANCH }} ${{ env.PMM_QA_REPO_URL }} - - - name: Give write perimssion to the bash script - run: chmod +x ./pmm-tests/pmm-framework.sh - working-directory: pmm-qa + - name: Checkout qa-integration repo + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_QA_BRANCH }} + repository: Percona-Lab/qa-integration + path: ./qa-integration - - name: Run the pmm container in backgrounds - run: docker run --detach --restart always -p 443:443 -p 80:80 --name pmm-server ${{ env.PMM_IMAGE }} + - name: Export path to qa-integration repo + working-directory: qa-integration + run: echo "PATH_TO_PMM_QA=$(pwd)" >> $GITHUB_ENV - - name: Wait before pmm-admin config command - run: sleep 120 - - - name: setup pmm-admin config - run: sudo pmm-admin config --server-insecure-tls --server-url=https://admin:admin@localhost:443 + - name: Run Setup for E2E Tests + working-directory: qa-integration/pmm_qa + run: | + mkdir -m 777 -p /tmp/backup_data + python3 -m venv virtenv + . virtenv/bin/activate + pip install --upgrade pip + pip install --force-reinstall -U setuptools + pip install -r requirements.txt + if [[ "${{ github.event.inputs.pxc_version }}" != http* ]]; then + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PXC=${{ env.PXC_VERSION }} + else + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PXC,TARBALL=${{ env.PXC_TARBALL_PATH }} + fi - - name: Run for PXC tests - run: ./pmm-framework.sh --with-proxysql --pxc-version ${{ env.PXC_VERSION }} --addclient=pxc,1 --pmm2 --pxc-tarball ${{ env.PXC_TARBALL_PATH }} - working-directory: pmm-qa/pmm-tests - - - name: Checkout the repo and install node packages - run: git clone https://github.com/percona/pmm-ui-tests.git && cd pmm-ui-tests && npm ci + - name: Checkout PMM UI tests + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_UI_BRANCH }} + repository: percona/pmm-ui-tests + path: ./pmm-ui-tests - name: Install npx dependencies - run: cd pmm-ui-tests && sudo npx playwright install-deps && npx codeceptjs def pr.codecept.js - + run: npm ci && npx playwright install --with-deps && npx codeceptjs def pr.codecept.js + working-directory: pmm-ui-tests + - name: Run the Integration tests of PXC - run: npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_pxc_integration_test.js --steps --debug + run: | + export PMM_UI_URL="http://127.0.0.1:8081/" + npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_pxc_integration_test.js working-directory: pmm-ui-tests diff --git a/.github/workflows/PMM_PS.yaml b/.github/workflows/PMM_PS.yaml index 3c7525d7..4ffa9c29 100644 --- a/.github/workflows/PMM_PS.yaml +++ b/.github/workflows/PMM_PS.yaml @@ -3,36 +3,58 @@ on: workflow_dispatch: inputs: ps_version: - description: "ps version example: 8.0.34-26 , Fetched from https://github.com/Percona-QA/package-testing/blob/master/VERSIONS by default" + description: "PS Version ex: 5.7/8.0/8.4, Minor versions from https://github.com/Percona-QA/package-testing/blob/master/VERSIONS" + default: '8.4' required: true ps_glibc: - description: "ps glibc version, example: 2.28" + description: "PS Glibc version, example: 2.35" + default: '2.35' required: false - pmm_image: - description: "pmm_image, example: perconalab/pmm-server:dev-latest" + pmm_qa_branch: + description: "Branch for qa-integration to checkout" required: false + type: string + pmm_ui_tests_branch: + description: "Branch for PMM-UI tests to checkout" + required: false + type: string + pmm_server_image: + description: "PMM Server image, example: perconalab/pmm-server:3-dev-latest" + default: 'perconalab/pmm-server:3-dev-latest' + required: false + pmm_client_version: + description: 'PMM Client version (3-dev-latest|pmm3-rc|3.xx.x|https...)' + default: '3-dev-latest' + required: false + type: string push: branches: - - main + - v3 pull_request: branches: - - main + - v3 jobs: PMM_PS_TEST: - runs-on: ubuntu-latest - timeout-minutes: 20 + runs-on: ubuntu-22.04 + timeout-minutes: 40 env: - PS_GLIBC: ${{ github.event.inputs.ps_glibc || '2.28' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} + ADMIN_PASSWORD: 'admin' + PS_VERSION: ${{ github.event.inputs.ps_version || '8.4' }} + PS_GLIBC: ${{ github.event.inputs.ps_glibc || '2.35' }} + PMM_SERVER_IMAGE: ${{ github.event.inputs.pmm_server_image || 'perconalab/pmm-server:3-dev-latest' }} + PMM_QA_BRANCH: ${{ github.event.inputs.pmm_qa_branch || 'v3' }} + PMM_UI_BRANCH: ${{ github.event.inputs.pmm_ui_tests_branch || 'v3' }} + PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_client_version || '3-dev-latest' }} + steps: - uses: actions/checkout@v2 - name: PS_VERSION ENV Setup run: | - if [[ -n "${{ github.event.inputs.ps_version }}" ]]; then + if [[ "${{ github.event.inputs.ps_version }}" != http* ]]; then PS_VERSION="${{ github.event.inputs.ps_version }}" PS_VERSION_PATH=$(echo "ps-$PS_VERSION") PS_VERSION_TRIMED=$(echo ${PS_VERSION%-*}) @@ -56,45 +78,58 @@ jobs: - name: Install Ansible and update the node run: sudo apt-get update -y && sudo apt-get install ansible -y - - name: Install Percona Release Package - run: wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb && sudo dpkg -i percona-release_latest.generic_all.deb - - - name: Install Enable percona-release experimental for dev-latest installation of pmm2-client - run: sudo percona-release enable-only original experimental - - - name: Install PMM Client - run: sudo apt update -y && sudo apt-get upgrade -y && sudo apt-get install -y pmm2-client + - name: Run the PMM Server container + run: docker run --detach --restart always -p 443:8443 -p 8081:8080 --name pmm-server ${{ env.PMM_SERVER_IMAGE }} - - name: Delete if the Repo already checked out - run: sudo rm -r pmm-qa || true + - name: Checkout qa-integration repo + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_QA_BRANCH }} + repository: Percona-Lab/qa-integration + path: ./qa-integration - - name: Clone the PMM_QA repo - run: git clone -b ${{ env.PMM_QA_REPO_BRANCH }} ${{ env.PMM_QA_REPO_URL }} - - - name: Give write perimssion to the bash script - run: chmod +x ./pmm-tests/pmm-framework.sh - working-directory: pmm-qa - - - name: Run the pmm container in backgrounds - run: docker run --detach --restart always -p 443:443 -p 80:80 --name pmm-server ${{ env.PMM_IMAGE }} + - name: Export path to qa-integration repo + working-directory: qa-integration + run: echo "PATH_TO_PMM_QA=$(pwd)" >> $GITHUB_ENV - - name: Wait before pmm-admin config command - run: sleep 120 + - name: Run Setup for PS E2E Tests + working-directory: qa-integration/pmm_qa + run: | + mkdir -m 777 -p /tmp/backup_data + python3 -m venv virtenv + . virtenv/bin/activate + pip install --upgrade pip + pip install --force-reinstall -U setuptools + pip install -r requirements.txt + if [[ "${{ github.event.inputs.ps_version }}" != http* ]]; then + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PS=${{ env.PS_VERSION }} + else + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PS,TARBALL=${{ env.PS_TARBALL_PATH }} + fi - - name: setup pmm-admin config - run: sudo pmm-admin config --server-insecure-tls --server-url=https://admin:admin@localhost:443 + - name: Run Setup for PS Replica E2E Tests + working-directory: qa-integration/pmm_qa + run: | + . virtenv/bin/activate + if [[ "${{ github.event.inputs.ps_version }}" != http* ]]; then + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PS=${{ env.PS_VERSION }},SETUP_TYPE=replica + else + python pmm-framework.py --verbose --pmm-server-password=${{ env.ADMIN_PASSWORD }} --client-version=${{ env.PMM_CLIENT_VERSION }} --database PS,TARBALL=${{ env.PS_TARBALL_PATH }},SETUP_TYPE=replica + fi - - name: Run for PS tests - run: ./pmm-framework.sh --ps-version ${{ env.PS_VERSION }} --setup-pmm-ps-integration --pmm2 --ps-tarball ${{ env.PS_TARBALL_PATH }} --query-source=slowlog - working-directory: pmm-qa/pmm-tests - - - name: Checkout the repo and install node packages - run: git clone https://github.com/percona/pmm-ui-tests.git && cd pmm-ui-tests && npm ci + - name: Checkout PMM UI tests + uses: actions/checkout@v4 + with: + ref: ${{ env.PMM_UI_BRANCH }} + repository: percona/pmm-ui-tests + path: ./pmm-ui-tests - name: Install npx dependencies - run: sudo npx playwright install-deps && npx codeceptjs def pr.codecept.js + run: npm ci && npx playwright install --with-deps && npx codeceptjs def pr.codecept.js working-directory: pmm-ui-tests - - name: Run the Integration tests of PS - run: npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_ps_integration_test.js --steps --debug + - name: Run the Integration tests of PS and PS Replica + run: | + export PMM_UI_URL="http://127.0.0.1:8081/" + npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_ps_integration_test.js tests/qa-integration/pmm_ps_replica_integration_test.js working-directory: pmm-ui-tests diff --git a/.github/workflows/PMM_PSMDB_PBM.yml b/.github/workflows/PMM_PSMDB_PBM.yml index 8531a75d..f610617a 100644 --- a/.github/workflows/PMM_PSMDB_PBM.yml +++ b/.github/workflows/PMM_PSMDB_PBM.yml @@ -5,34 +5,35 @@ on: inputs: psmdb_version: description: "psmdb version" + default: "latest" required: false pbm_version: description: "pbm version" + default: "latest" required: false pmm_version: - description: "pmm2-client version" + description: "pmm3-client version" + default: "3-dev-latest" + required: false + pmm_repo: + description: "pmm3-client repo" + default: "experimental" required: false pmm_image: description: "pmm-server docker image" + default: "perconalab/pmm-server:3-dev-latest" required: false - push: - branches: - - main - - pull_request: - branches: - - main - jobs: test_replica_set: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 20 env: - PSMDB_VERSION: ${{ github.event.inputs.psmdb_version || 'latest' }} - PBM_VERSION: ${{ github.event.inputs.pbm_version || 'latest' }} - PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_version || 'latest' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} + PSMDB_VERSION: ${{ inputs.psmdb_version || 'latest' }} + PBM_VERSION: ${{ inputs.pbm_version || 'latest' }} + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_REPO: ${{ github.event.inputs.pmm_repo || 'experimental' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} steps: - uses: actions/checkout@v2 - name: test-rs @@ -41,13 +42,14 @@ jobs: working-directory: ./pmm_psmdb-pbm_setup test_sharded_cluster: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 20 env: - PSMDB_VERSION: ${{ github.event.inputs.psmdb_version || 'latest' }} - PBM_VERSION: ${{ github.event.inputs.pbm_version || 'latest' }} - PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_version || 'latest' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} + PSMDB_VERSION: ${{ inputs.psmdb_version || 'latest' }} + PBM_VERSION: ${{ inputs.pbm_version || 'latest' }} + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_REPO: ${{ github.event.inputs.pmm_repo || 'experimental' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} steps: - uses: actions/checkout@v2 - name: test-sharded @@ -56,17 +58,15 @@ jobs: working-directory: ./pmm_psmdb-pbm_setup test_diff_auth: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 20 env: - PSMDB_VERSION: ${{ github.event.inputs.psmdb_version || 'latest' }} - PBM_VERSION: ${{ github.event.inputs.pbm_version || 'latest' }} - PMM_CLIENT_VERSION: ${{ github.event.inputs.pmm_version || 'latest' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} + PSMDB_VERSION: ${{ inputs.psmdb_version || 'latest' }} + PBM_VERSION: ${{ inputs.pbm_version || 'latest' }} + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_REPO: ${{ github.event.inputs.pmm_repo || 'experimental' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} steps: - - uses: KengoTODA/actions-setup-docker-compose@v1 - with: - version: '2.17.2' - uses: actions/checkout@v3 - name: test-auth run: | diff --git a/.github/workflows/PMM_PSMDB_PBM_FULL.yml b/.github/workflows/PMM_PSMDB_PBM_FULL.yml new file mode 100644 index 00000000..2c102ebf --- /dev/null +++ b/.github/workflows/PMM_PSMDB_PBM_FULL.yml @@ -0,0 +1,83 @@ +name: PMM_PSMDB_PBM_FULL + +on: + workflow_dispatch: + inputs: + pmm_version: + description: "pmm2-client version" + default: "3-dev-latest" + required: false + pmm_image: + description: "pmm-server docker image" + default: "perconalab/pmm-server:3-dev-latest" + required: false + + push: + branches: + - main + - v3 + + pull_request: + branches: + - main + - v3 + +jobs: + test_replica_set: + runs-on: ubuntu-22.04 + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + psmdb: ["6.0", "7.0", "8.0"] + env: + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} + steps: + - uses: actions/checkout@v2 + - name: Test RS with PSMDB ${{ matrix.psmdb }} + run: | + PSMDB_VERSION=${{ matrix.psmdb }} ./start-rs.sh + working-directory: ./pmm_psmdb-pbm_setup + + test_sharded_cluster: + runs-on: ubuntu-22.04 + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + psmdb: ["6.0", "7.0", "8.0"] + env: + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} + steps: + - uses: actions/checkout@v2 + - name: Test sharded with PSMDB ${{ matrix.psmdb }} + run: | + PSMDB_VERSION=${{ matrix.psmdb }} ./start-sharded.sh + working-directory: ./pmm_psmdb-pbm_setup + + test_diff_auth: + runs-on: ubuntu-22.04 + timeout-minutes: 20 + strategy: + fail-fast: false + matrix: + psmdb: ["6.0", "7.0", "8.0"] + env: + PMM_CLIENT_VERSION: ${{ inputs.pmm_version || '3-dev-latest' }} + PMM_IMAGE: ${{ inputs.pmm_image || 'perconalab/pmm-server:3-dev-latest' }} + steps: + - uses: actions/checkout@v3 + - name: Test auth with PSMDB ${{ matrix.psmdb }} + run: | + PSMDB_VERSION=${{ matrix.psmdb }} ./test-auth.sh + working-directory: ./pmm_psmdb_diffauth_setup + - name: PMM Server container logs + if: failure() + run: | + docker logs pmm-server || true + - name: PSMDB Server container logs + if: failure() + run: | + docker logs psmdb-server || true diff --git a/.github/workflows/PMM_PXC.yaml b/.github/workflows/PMM_PXC.yaml deleted file mode 100644 index da112599..00000000 --- a/.github/workflows/PMM_PXC.yaml +++ /dev/null @@ -1,99 +0,0 @@ -name: PMM_PXC -on: - workflow_dispatch: - inputs: - pxc_version: - description: "pxc version, example: 8.0.33-25 Fetched from https://github.com/Percona-QA/package-testing/blob/master/VERSIONS by default" - required: false - pxc_glibc: - description: "pxc glibc version, example: 2.35" - required: true - pmm_image: - description: "pmm_image, example: perconalab/pmm-server:dev-latest" - required: false - push: - branches: - - main - - pull_request: - branches: - - main - -jobs: - PMM_PXC_TEST: - runs-on: ubuntu-latest - timeout-minutes: 20 - env: - PXC_GLIBC: ${{ github.event.inputs.pxc_glibc || '2.35' }} - PMM_IMAGE: ${{ github.event.inputs.pmm_image || 'perconalab/pmm-server:dev-latest' }} - steps: - - uses: actions/checkout@v2 - - - name: PXC_VERSION ENV Setup - run: | - if [[ -n "${{ github.event.inputs.pxc_version }}" ]]; then - PXC_VERSION="${{ github.event.inputs.pxc_version }}" - PXC_VERSION_PATH=$(echo "pxc-$PXC_VERSION") - PXC_VERSION_TRIMED=$(echo ${PXC_VERSION%-*}) - else - wget https://raw.githubusercontent.com/Percona-QA/package-testing/master/VERSIONS - PXC_VERSION="$(grep 'PXC80_VER' VERSIONS | cut -d = -f 2)" - PXC_VERSION_PATH=$(echo "pxc-$PXC_VERSION.1") - PXC_VERSION_TRIMED=$(echo ${PXC_VERSION%-*}) - fi - echo "PXC_VERSION=${PXC_VERSION}" | sed 's/"//g' >> $GITHUB_ENV - echo "PXC_VERSION_TAR=${PXC_VERSION}.1" | sed 's/"//g' >> $GITHUB_ENV - echo "PXC_VERSION_PATH=${PXC_VERSION_PATH}" >> $GITHUB_ENV - echo "PXC_VERSION_TRIMED=${PXC_VERSION_TRIMED}" >> $GITHUB_ENV - - - name: Concatenate values to environment file - run: | - echo "PXC_TARBALL_PATH=https://downloads.percona.com/downloads/TESTING/${{env.PXC_VERSION_PATH}}/Percona-XtraDB-Cluster_${{env.PXC_VERSION_TAR}}_Linux.x86_64.glibc${{env.PXC_GLIBC}}.tar.gz" >> $GITHUB_ENV - echo "PMM_QA_REPO_URL=https://github.com/percona/pmm-qa/" >> $GITHUB_ENV - echo "PMM_QA_REPO_BRANCH=main" >> $GITHUB_ENV - - - name: Install Ansible and update the node - run: sudo apt-get update -y && sudo apt-get install ansible -y - - - name: Install Percona Release Package - run: wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb && sudo dpkg -i percona-release_latest.generic_all.deb - - - name: Install Enable percona-release experimental for dev-latest installation of pmm2-client - run: sudo percona-release enable-only original experimental - - - name: Install PMM Client - run: sudo apt update -y && sudo apt-get upgrade -y && sudo apt-get install -y pmm2-client - - - name: Delete if the Repo already checked out - run: sudo rm -r pmm-qa || true - - - name: Clone the PMM_QA repo - run: git clone -b ${{ env.PMM_QA_REPO_BRANCH }} ${{ env.PMM_QA_REPO_URL }} - - - name: Give write perimssion to the bash script - run: chmod +x ./pmm-tests/pmm-framework.sh - working-directory: pmm-qa - - - name: Run the pmm container in backgrounds - run: docker run --detach --restart always -p 443:443 -p 80:80 --name pmm-server ${{ env.PMM_IMAGE }} - - - name: Wait before pmm-admin config command - run: sleep 120 - - - name: setup pmm-admin config - run: sudo pmm-admin config --server-insecure-tls --server-url=https://admin:admin@localhost:443 - - - name: Run for PXC tests - run: ./pmm-framework.sh --pxc-version ${{ env.PXC_VERSION }} --addclient=pxc,1 --pmm2 --pxc-tarball ${{ env.PXC_TARBALL_PATH }} - working-directory: pmm-qa/pmm-tests - - - name: Checkout the repo and install node packages - run: git clone https://github.com/percona/pmm-ui-tests.git && cd pmm-ui-tests && npm ci - - - name: Install npx dependencies - run: cd pmm-ui-tests && sudo npx playwright install-deps && npx codeceptjs def pr.codecept.js - - - name: Run the Integration tests of PXC - run: npx codeceptjs run -c pr.codecept.js tests/qa-integration/pmm_pxc_integration_test.js --steps --debug - working-directory: pmm-ui-tests - diff --git a/pmm_psmdb-pbm_setup/Dockerfile b/pmm_psmdb-pbm_setup/Dockerfile index b2d27fb9..d750262a 100644 --- a/pmm_psmdb-pbm_setup/Dockerfile +++ b/pmm_psmdb-pbm_setup/Dockerfile @@ -1,4 +1,5 @@ -FROM oraclelinux:9 +ARG OL_VERSION=9 +FROM oraclelinux:${OL_VERSION} ARG REPO=testing ARG PMM_REPO=experimental ARG PBM_VERSION=latest @@ -18,15 +19,17 @@ VOLUME [ "/sys/fs/cgroup" ] CMD ["/usr/sbin/init"] +ARG OL_VERSION RUN set -ex; \ export GNUPGHOME="$(mktemp -d)"; \ export PERCONA_TELEMETRY_URL="https://check-dev.percona.com/v1/telemetry/GenericReport"; \ - if [ $PSMDB_VERSION != "latest" ]; then export PSMDB_REPO="$(echo "${PSMDB_VERSION}" | awk -F '.' '{print "psmdb-"$1$2}')" ; else export PSMDB_REPO=psmdb-60 ; fi ; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 99DB70FAE1D7CE227FB6488205B555B38483C65D 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1; \ - gpg --batch --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona; \ + dnf install -y gnupg2; \ + if [ $PSMDB_VERSION != "latest" ]; then export PSMDB_REPO="$(echo "${PSMDB_VERSION}" | awk -F '.' '{print "psmdb-"$1$2}')" ; else export PSMDB_REPO=psmdb-80 ; fi ; \ + gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1; \ + gpg --batch --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/PERCONA-PACKAGING-KEY; \ gpg --batch --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-centosofficial; \ gpg --batch --export --armor 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-9; \ - rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona ${GNUPGHOME}/RPM-GPG-KEY-centosofficial ${GNUPGHOME}/RPM-GPG-KEY-EPEL-9; \ + rpmkeys --import ${GNUPGHOME}/PERCONA-PACKAGING-KEY ${GNUPGHOME}/RPM-GPG-KEY-centosofficial ${GNUPGHOME}/RPM-GPG-KEY-EPEL-9; \ curl -Lf -o /tmp/percona-release.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm; \ rpmkeys --checksig /tmp/percona-release.rpm; \ rpm -i /tmp/percona-release.rpm; \ @@ -34,50 +37,59 @@ RUN set -ex; \ rpm --import /etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY; \ percona-release enable pbm ${REPO} && \ percona-release enable ${PSMDB_REPO} ${REPO} && \ - percona-release enable original ${PMM_REPO} && \ - yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \ - yum update --refresh -y && \ - if [ $PSMDB_VERSION == "latest" ]; then \ - yum -y install percona-server-mongodb \ + percona-release enable pmm3-client ${PMM_REPO} && \ + dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \ + dnf update --refresh -y && \ + if [[ $PSMDB_VERSION == "latest" || "$PSMDB_VERSION" =~ ^[0-9]\.[0-9]$ ]] ; then \ + dnf -y install percona-server-mongodb \ percona-server-mongodb-tools \ percona-server-mongodb-server \ percona-server-mongodb-mongos ; \ else \ - yum -y install percona-server-mongodb-${PSMDB_VERSION}.el9 \ - percona-server-mongodb-tools-${PSMDB_VERSION}.el9 \ - percona-server-mongodb-server-${PSMDB_VERSION}.el9 \ - percona-server-mongodb-mongos-${PSMDB_VERSION}.el9 ; \ + dnf -y install percona-server-mongodb-${PSMDB_VERSION}.el${OL_VERSION} \ + percona-server-mongodb-tools-${PSMDB_VERSION}.el${OL_VERSION} \ + percona-server-mongodb-server-${PSMDB_VERSION}.el${OL_VERSION} \ + percona-server-mongodb-mongos-${PSMDB_VERSION}.el${OL_VERSION} ; \ fi && \ - if [ $PBM_VERSION == "latest" ]; then \ - yum -y install percona-backup-mongodb ; \ + if [ "$PBM_VERSION" == "latest" ]; then \ + dnf -y install percona-backup-mongodb ; \ else \ - yum -y install percona-backup-mongodb-${PBM_VERSION}.el9 ; \ - fi && \ - if [[ $PMM_CLIENT_VERSION == http* ]]; then \ - yum -y install pmm2-client && \ - curl -Lf -o /tmp/pmm2-client.tar.gz $PMM_CLIENT_VERSION && \ - cd /tmp && tar -xvf pmm2-client.tar.gz --transform 's,^/*[^/]*,/pmm2-client,S' && \ - cd /tmp/pmm2-client && ./install_tarball && cd ../ && rm -rf pmm2* ;\ - elif [[ "$PMM_CLIENT_VERSION" =~ latest|dev-latest ]]; then \ - yum -y install pmm2-client ; \ - elif [ $PMM_CLIENT_VERSION == "pmm2-rc" ]; then \ - percona-release enable original testing && \ - yum update --refresh -y && \ - yum -y install pmm2-client ; \ + dnf -y install percona-backup-mongodb-${PBM_VERSION}.el${OL_VERSION} ; \ + fi + +ARG OL_VERSION +RUN if [[ "$PMM_CLIENT_VERSION" == http* ]]; then \ + dnf -y install pmm-client && \ + curl -Lf -o /tmp/pmm-client.tar.gz $PMM_CLIENT_VERSION && \ + cd /tmp && tar -xvf pmm-client.tar.gz --transform 's,^/*[^/]*,/pmm-client,S' && \ + cd /tmp/pmm-client && ./install_tarball && cd ../ && rm -rf pmm* ;\ + elif [[ "$PMM_CLIENT_VERSION" =~ 3-dev-latest|latest ]]; then \ + dnf -y install pmm-client ; \ + elif [[ "$PMM_CLIENT_VERSION" = "pmm3-rc" ]]; then \ + percona-release enable pmm3-client testing && \ + dnf update --refresh -y && \ + dnf -y install pmm-client ; \ else \ - yum -y install pmm2-client-${PMM_CLIENT_VERSION}-6.el9 ; \ + percona-release enable pmm3-client release && \ + if [[ "$PMM_CLIENT_VERSION" =~ ^([3-9])\.([1-9][0-9]*)\.([0-9]+)$ ]]; then \ + dnf -y install pmm-client-${PMM_CLIENT_VERSION}-7.el${OL_VERSION} ; \ + else \ + dnf -y install pmm-client-${PMM_CLIENT_VERSION}-6.el${OL_VERSION} ; \ + fi \ fi && \ sed 's/Type=forking/#Type=forking/' -i /usr/lib/systemd/system/mongod.service && \ systemctl enable mongod && \ systemctl enable pbm-agent && \ + sed -i '/ExecStart/a StandardError=file:/var/log/pmm-agent.log' /usr/lib/systemd/system/pmm-agent.service && \ systemctl enable pmm-agent && \ curl -Lf -o /tmp/mgodatagen.tar.gz https://github.com/feliixx/mgodatagen/releases/download/v0.11.2/mgodatagen_0.11.2_Linux_x86_64.tar.gz && \ tar -xf /tmp/mgodatagen.tar.gz -C /usr/bin && \ - yum clean all; \ - rm -rf /var/cache/dnf /var/cache/yum /data/db && mkdir -p /data/db; + dnf clean all; \ + rm -rf /var/cache/dnf /var/cache/dnf /data/db && mkdir -p /data/db; COPY conf/sysconfig/mongod /etc/sysconfig/ COPY keyfile /etc/keyfile +COPY conf/krb/krb5.conf /etc/krb5.conf RUN ln -s /usr/bin/mongosh /usr/bin/mongo || true RUN chown mongod /etc/keyfile && chmod 400 /etc/keyfile EXPOSE 27017 diff --git a/pmm_psmdb-pbm_setup/Dockerfile-kerberos b/pmm_psmdb-pbm_setup/Dockerfile-kerberos new file mode 100644 index 00000000..97e412ba --- /dev/null +++ b/pmm_psmdb-pbm_setup/Dockerfile-kerberos @@ -0,0 +1,5 @@ +FROM alpine +RUN apk add --no-cache bash krb5 krb5-server krb5-pkinit +COPY conf/configure_krb5.sh /var/lib/krb5kdc/ +EXPOSE 88/udp +ENTRYPOINT [ "sh", "/var/lib/krb5kdc/configure_krb5.sh"] diff --git a/pmm_psmdb-pbm_setup/conf/configure_krb5.sh b/pmm_psmdb-pbm_setup/conf/configure_krb5.sh new file mode 100755 index 00000000..8aa99138 --- /dev/null +++ b/pmm_psmdb-pbm_setup/conf/configure_krb5.sh @@ -0,0 +1,33 @@ +#! /env/sh + +cat > /etc/krb5.conf << EOL +[libdefaults] + default_realm = PERCONATEST.COM + forwardable = true + dns_lookup_realm = false + dns_lookup_kdc = false + ignore_acceptor_hostname = true + rdns = false +[realms] + PERCONATEST.COM = { + kdc_ports = 88 + kdc = kerberos + admin_server = kerberos + } +[domain_realm] + .perconatest.com = PERCONATEST.COM + perconatest.com = PERCONATEST.COM + kerberos = PERCONATEST.COM +EOL + +kdb5_util -P password create -s +kadmin.local -q "addprinc -pw password root/admin" +for i in 101 102 103 201 202 203; do + kadmin.local -q "addprinc -pw mongodb mongodb/rs$i" +done +kadmin.local -q "addprinc -pw password1 pmm" +for i in 101 102 103 201 202 203; do + kadmin.local -q "ktadd -k /keytabs/mongodb.keytab mongodb/rs$i@PERCONATEST.COM" +done + +krb5kdc -n diff --git a/pmm_psmdb-pbm_setup/conf/krb/krb5.conf b/pmm_psmdb-pbm_setup/conf/krb/krb5.conf new file mode 100644 index 00000000..527d6078 --- /dev/null +++ b/pmm_psmdb-pbm_setup/conf/krb/krb5.conf @@ -0,0 +1,16 @@ +[libdefaults] + default_realm = PERCONATEST.COM + forwardable = true + dns_lookup_realm = false + dns_lookup_kdc = false + ignore_acceptor_hostname = true + rdns = false +[realms] + PERCONATEST.COM = { + kdc_ports = 88 + kdc = kerberos + } +[domain_realm] + .perconatest.com = PERCONATEST.COM + perconatest.com = PERCONATEST.COM + kerberos = PERCONATEST.COM diff --git a/pmm_psmdb-pbm_setup/conf/mongod-cfg/mongod.conf b/pmm_psmdb-pbm_setup/conf/mongod-cfg/mongod.conf index d69f78d8..19d783ad 100644 --- a/pmm_psmdb-pbm_setup/conf/mongod-cfg/mongod.conf +++ b/pmm_psmdb-pbm_setup/conf/mongod-cfg/mongod.conf @@ -9,10 +9,6 @@ storage: systemLog: destination: syslog -processManagement: -# fork: true - pidFilePath: /var/run/mongod.pid - net: port: 27017 bindIp: 0.0.0.0 @@ -29,3 +25,6 @@ operationProfiling: security: keyFile: /etc/keyfile + authorization: enabled +setParameter: + authenticationMechanisms: SCRAM-SHA-1,GSSAPI diff --git a/pmm_psmdb-pbm_setup/conf/mongod-rs/mongod.conf b/pmm_psmdb-pbm_setup/conf/mongod-rs/mongod.conf index 2dc676f8..987a28c6 100644 --- a/pmm_psmdb-pbm_setup/conf/mongod-rs/mongod.conf +++ b/pmm_psmdb-pbm_setup/conf/mongod-rs/mongod.conf @@ -9,10 +9,6 @@ storage: systemLog: destination: syslog -processManagement: -# fork: true - pidFilePath: /var/run/mongod.pid - net: port: 27017 bindIp: 0.0.0.0 @@ -26,3 +22,6 @@ operationProfiling: security: keyFile: /etc/keyfile + authorization: enabled +setParameter: + authenticationMechanisms: SCRAM-SHA-1,GSSAPI \ No newline at end of file diff --git a/pmm_psmdb-pbm_setup/conf/mongod-rs1/mongod.conf b/pmm_psmdb-pbm_setup/conf/mongod-rs1/mongod.conf index b2fa66f9..7ae73c91 100644 --- a/pmm_psmdb-pbm_setup/conf/mongod-rs1/mongod.conf +++ b/pmm_psmdb-pbm_setup/conf/mongod-rs1/mongod.conf @@ -9,10 +9,6 @@ storage: systemLog: destination: syslog -processManagement: -# fork: true - pidFilePath: /var/run/mongod.pid - net: port: 27017 bindIp: 0.0.0.0 @@ -29,3 +25,6 @@ operationProfiling: security: keyFile: /etc/keyfile + authorization: enabled +setParameter: + authenticationMechanisms: SCRAM-SHA-1,GSSAPI diff --git a/pmm_psmdb-pbm_setup/conf/mongod-rs2/mongod.conf b/pmm_psmdb-pbm_setup/conf/mongod-rs2/mongod.conf index e0ee0ea9..ab71a29d 100644 --- a/pmm_psmdb-pbm_setup/conf/mongod-rs2/mongod.conf +++ b/pmm_psmdb-pbm_setup/conf/mongod-rs2/mongod.conf @@ -9,10 +9,6 @@ storage: systemLog: destination: syslog -processManagement: -# fork: true - pidFilePath: /var/run/mongod.pid - net: port: 27017 bindIp: 0.0.0.0 @@ -29,3 +25,6 @@ operationProfiling: security: keyFile: /etc/keyfile + authorization: enabled +setParameter: + authenticationMechanisms: SCRAM-SHA-1,GSSAPI diff --git a/pmm_psmdb-pbm_setup/conf/sysconfig/mongod b/pmm_psmdb-pbm_setup/conf/sysconfig/mongod index 5040e7c1..263bb7dc 100644 --- a/pmm_psmdb-pbm_setup/conf/sysconfig/mongod +++ b/pmm_psmdb-pbm_setup/conf/sysconfig/mongod @@ -1,3 +1,4 @@ OPTIONS="-f /etc/mongod/mongod.conf" STDOUT="/var/log/mongo/mongod.stdout" STDERR="/var/log/mongo/mongod.stderr" +KRB5_KTNAME=/keytabs/mongodb.keytab diff --git a/pmm_psmdb-pbm_setup/configure-agents.sh b/pmm_psmdb-pbm_setup/configure-agents.sh index c28ed668..89f70d06 100644 --- a/pmm_psmdb-pbm_setup/configure-agents.sh +++ b/pmm_psmdb-pbm_setup/configure-agents.sh @@ -5,6 +5,25 @@ pmm_mongo_user=${PMM_MONGO_USER:-pmm} pmm_mongo_user_pass=${PMM_MONGO_USER_PASS:-pmmpass} pbm_user=${PBM_USER:-pbm} pbm_pass=${PBM_PASS:-pbmpass} +mongo_setup_type=${MONGO_SETUP_TYPE:-pss} +gssapi_enabled=${GSSAPI:-false} +gssapi_username=${GSSAPI_USERNAME:-pmm@PERCONATEST.COM} +gssapi_password=${GSSAPI_PASSWORD:-password1} +client_credentials_flags="--username=${pmm_mongo_user} --password=${pmm_mongo_user_pass}" +gssapi_service_name_part="" + +if [[ $gssapi_enabled == "true" ]]; then + client_credentials_flags=( + --username="$gssapi_username" + --password="$gssapi_password" + --authentication-mechanism=GSSAPI + --authentication-database='$external' + ) + gssapi_service_name_part="_gssapi" +fi + +echo +echo "gssapi enabled: $gssapi_enabled. Using credentials: ${client_credentials_flags[*]}" echo echo "configuring pbm agents" @@ -12,23 +31,36 @@ nodes="rs101 rs102 rs103" for node in $nodes do echo "congiguring pbm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" + docker compose -f docker-compose-rs.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" echo "restarting pbm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node systemctl restart pbm-agent + docker compose -f docker-compose-rs.yaml exec -T $node systemctl restart pbm-agent done + +docker compose -f docker-compose-rs.yaml exec -T rs101 pbm config --file /etc/pbm/minio.yaml + +if [[ $mongo_setup_type == "psa" ]]; then + echo "stop pbm agent for arbiter node" + docker compose -f docker-compose-rs.yaml exec -T rs103 systemctl stop pbm-agent +fi echo echo "configuring pmm agents" +random_number=$RANDOM nodes="rs101 rs102 rs103" for node in $nodes do - echo "congiguring pmm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node pmm-agent setup - docker-compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --enable-all-collectors --cluster=replicaset --replication-set=rs --username=${pmm_mongo_user} --password=${pmm_mongo_user_pass} $node 127.0.0.1:27017 + echo "configuring pmm agent on $node" + docker compose -f docker-compose-rs.yaml exec -T -e PMM_AGENT_SETUP_NODE_NAME=${node}._${random_number} $node pmm-agent setup + if [[ $mongo_setup_type == "psa" && $node == "rs103" ]]; then + docker compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --enable-all-collectors --agent-password=mypass --environment=psmdb-dev --cluster=replicaset --replication-set=rs --host=${node} --port=27017 ${node}${gssapi_service_name_part}_${random_number} + else + echo + docker compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --enable-all-collectors --agent-password=mypass --environment=psmdb-dev --cluster=replicaset --replication-set=rs ${client_credentials_flags[*]} --host=${node} --port=27017 ${node}${gssapi_service_name_part}_${random_number} + fi done echo echo "adding some data" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mgodatagen -f /etc/datagen/replicaset.json --uri=mongodb://${pmm_mongo_user}:${pmm_mongo_user_pass}@127.0.0.1:27017/?replicaSet=rs -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://${pmm_mongo_user}:${pmm_mongo_user_pass}@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mgodatagen -f /etc/datagen/replicaset.json --uri=mongodb://${pmm_mongo_user}:${pmm_mongo_user_pass}@127.0.0.1:27017/?replicaSet=rs +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://${pmm_mongo_user}:${pmm_mongo_user_pass}@localhost/?replicaSet=rs" --quiet << EOF use students; db.students.insertMany([ { diff --git a/pmm_psmdb-pbm_setup/configure-extra-agents.sh b/pmm_psmdb-pbm_setup/configure-extra-agents.sh index 75019b16..c71c16d8 100644 --- a/pmm_psmdb-pbm_setup/configure-extra-agents.sh +++ b/pmm_psmdb-pbm_setup/configure-extra-agents.sh @@ -5,6 +5,25 @@ pmm_mongo_user=${PMM_MONGO_USER:-pmm} pmm_mongo_user_pass=${PMM_MONGO_USER_PASS:-pmmpass} pbm_user=${PBM_USER:-pbm} pbm_pass=${PBM_PASS:-pbmpass} +mongo_setup_type=${MONGO_SETUP_TYPE:-pss} +gssapi_enabled=${GSSAPI:-false} +gssapi_username=${GSSAPI_USERNAME:-pmm@PERCONATEST.COM} +gssapi_password=${GSSAPI_PASSWORD:-password1} +client_credentials_flags="--username=${pmm_mongo_user} --password=${pmm_mongo_user_pass}" +gssapi_service_name_part="" + +if [[ $gssapi_enabled == "true" ]]; then + client_credentials_flags=( + --username="$gssapi_username" + --password="$gssapi_password" + --authentication-mechanism=GSSAPI + --authentication-database='$external' + ) + gssapi_service_name_part="_gssapi" +fi + +echo +echo "gssapi enabled: $gssapi_enabled. Using credentials: ${client_credentials_flags[*]}" echo echo "configuring pbm agents" @@ -12,17 +31,26 @@ nodes="rs201 rs202 rs203" for node in $nodes do echo "configuring pbm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" + docker compose -f docker-compose-rs.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" echo "restarting pbm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node systemctl restart pbm-agent + docker compose -f docker-compose-rs.yaml exec -T $node systemctl restart pbm-agent done + +if [[ $mongo_setup_type == "psa" ]]; then + echo "stop pbm agent for arbiter node rs203" + docker compose -f docker-compose-rs.yaml exec -T rs203 systemctl stop pbm-agent +fi echo echo "configuring pmm agents" +random_number=$RANDOM nodes="rs201 rs202 rs203" for node in $nodes do echo "configuring pmm agent on $node" - docker-compose -f docker-compose-rs.yaml exec -T $node pmm-agent setup - docker-compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --cluster=replicaset1 --replication-set=rs1 --username=${pmm_mongo_user} --password=${pmm_mongo_user_pass} $node 127.0.0.1:27017 + docker compose -f docker-compose-rs.yaml exec -T -e PMM_AGENT_SETUP_NODE_NAME=${node}._${random_number} $node pmm-agent setup + if [[ $mongo_setup_type == "psa" && $node == "rs203" ]]; then + docker compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --enable-all-collectors --agent-password=mypass --cluster=replicaset --replication-set=rs1 --host=${node} --port=27017 ${node}${gssapi_service_name_part}_${random_number} + else + docker compose -f docker-compose-rs.yaml exec -T $node pmm-admin add mongodb --enable-all-collectors --agent-password=mypass --cluster=replicaset ${client_credentials_flags[*]} --host=${node} --port=27017 ${node}${gssapi_service_name_part}_${random_number} + fi done -echo diff --git a/pmm_psmdb-pbm_setup/configure-extra-psa.sh b/pmm_psmdb-pbm_setup/configure-extra-psa.sh new file mode 100755 index 00000000..00bf29a4 --- /dev/null +++ b/pmm_psmdb-pbm_setup/configure-extra-psa.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -e + +pmm_mongo_user=${PMM_MONGO_USER:-pmm} +pmm_mongo_user_pass=${PMM_MONGO_USER_PASS:-pmmpass} +pbm_user=${PBM_USER:-pbm} +pbm_pass=${PBM_PASS:-pbmpass} + +echo +echo "configuring extra PSA replicaset with members priorities" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF + config = { + "_id" : "rs", + "members" : [ + { + "_id" : 0, + "host" : "rs201:27017", + "priority": 2 + }, + { + "_id" : 1, + "host" : "rs202:27017", + "priority": 1 + }, + { + "_id" : 2, + "host" : "rs203:27017", + "arbiterOnly": true + } + ] + }; + rs.initiate(config); +EOF +echo +sleep 60 +echo +echo "configuring root user on primary" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF +db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); +EOF +echo +echo "configuring pbm and pmm roles" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +db.getSiblingDB("admin").createRole({ + "role": "pbmAnyAction", + "privileges": [{ + "resource": { "anyResource": true }, + "actions": [ "anyAction" ] + }], + "roles": [] +}); +db.getSiblingDB("admin").createRole({ + role: "explainRole", + privileges: [{ + resource: { + db: "", + collection: "" + }, + actions: [ + "listIndexes", + "listCollections", + "dbStats", + "dbHash", + "collStats", + "find" + ] + }], + roles:[] +}); +EOF +echo +echo "creating pbm user" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +db.getSiblingDB("admin").createUser({ + user: "${pbm_user}", + pwd: "${pbm_pass}", + "roles" : [ + { "db" : "admin", "role" : "readWrite", "collection": "" }, + { "db" : "admin", "role" : "backup" }, + { "db" : "admin", "role" : "clusterMonitor" }, + { "db" : "admin", "role" : "restore" }, + { "db" : "admin", "role" : "pbmAnyAction" } + ] +}); +EOF +echo +echo "creating pmm user" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +db.getSiblingDB("admin").createUser({ + user: "${pmm_mongo_user}", + pwd: "${pmm_mongo_user_pass}", + roles: [ + { role: "explainRole", db: "admin" }, + { role: "clusterMonitor", db: "admin" }, + { role: "read", db: "local" }, + { "db" : "admin", "role" : "readWrite", "collection": "" }, + { "db" : "admin", "role" : "backup" }, + { "db" : "admin", "role" : "clusterMonitor" }, + { "db" : "admin", "role" : "restore" }, + { "db" : "admin", "role" : "pbmAnyAction" } + ] +}); +EOF diff --git a/pmm_psmdb-pbm_setup/configure-extra-replset.sh b/pmm_psmdb-pbm_setup/configure-extra-replset.sh index cff511d3..5dc94cf5 100644 --- a/pmm_psmdb-pbm_setup/configure-extra-replset.sh +++ b/pmm_psmdb-pbm_setup/configure-extra-replset.sh @@ -8,7 +8,7 @@ pbm_pass=${PBM_PASS:-pbmpass} echo echo "configuring replicaset with members priorities" -docker-compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF config = { "_id" : "rs", "members" : [ @@ -35,12 +35,12 @@ echo sleep 60 echo echo "configuring root user on primary" -docker-compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF echo echo "configuring pbm and pmm roles" -docker-compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -70,7 +70,7 @@ db.getSiblingDB("admin").createRole({ EOF echo echo "creating pbm user" -docker-compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pbm_user}", pwd: "${pbm_pass}", @@ -85,7 +85,7 @@ db.getSiblingDB("admin").createUser({ EOF echo echo "creating pmm user" -docker-compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pmm_mongo_user}", pwd: "${pmm_mongo_user_pass}", @@ -101,3 +101,20 @@ db.getSiblingDB("admin").createUser({ ] }); EOF + +echo "creating pmm kerberos user" +docker compose -f docker-compose-rs.yaml exec -T rs201 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +db.getSiblingDB("\$external").createUser({ + user: "${pmm_mongo_user}@PERCONATEST.COM", + roles: [ + { role: "explainRole", db: "admin" }, + { role: "clusterMonitor", db: "admin" }, + { role: "read", db: "local" }, + { "db" : "admin", "role" : "readWrite", "collection": "" }, + { "db" : "admin", "role" : "backup" }, + { "db" : "admin", "role" : "clusterMonitor" }, + { "db" : "admin", "role" : "restore" }, + { "db" : "admin", "role" : "pbmAnyAction" } + ] +}); +EOF diff --git a/pmm_psmdb-pbm_setup/configure-psa.sh b/pmm_psmdb-pbm_setup/configure-psa.sh index 9ba3e178..391b6df2 100755 --- a/pmm_psmdb-pbm_setup/configure-psa.sh +++ b/pmm_psmdb-pbm_setup/configure-psa.sh @@ -7,8 +7,8 @@ pbm_user=${PBM_USER:-pbm} pbm_pass=${PBM_PASS:-pbmpass} echo -echo "configuring replicaset with members priorities" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF +echo "configuring PSA replicaset with members priorities" +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF config = { "_id" : "rs", "members" : [ @@ -35,12 +35,12 @@ echo sleep 60 echo echo "configuring root user on primary" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF echo echo "configuring pbm and pmm roles" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -70,7 +70,7 @@ db.getSiblingDB("admin").createRole({ EOF echo echo "creating pbm user" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pbm_user}", pwd: "${pbm_pass}", @@ -85,7 +85,7 @@ db.getSiblingDB("admin").createUser({ EOF echo echo "creating pmm user" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pmm_mongo_user}", pwd: "${pmm_mongo_user_pass}", diff --git a/pmm_psmdb-pbm_setup/configure-replset.sh b/pmm_psmdb-pbm_setup/configure-replset.sh index f50bc39e..2b6f7b59 100644 --- a/pmm_psmdb-pbm_setup/configure-replset.sh +++ b/pmm_psmdb-pbm_setup/configure-replset.sh @@ -8,7 +8,7 @@ pbm_pass=${PBM_PASS:-pbmpass} echo echo "configuring replicaset with members priorities" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF config = { "_id" : "rs", "members" : [ @@ -35,12 +35,12 @@ echo sleep 60 echo echo "configuring root user on primary" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF echo echo "configuring pbm and pmm roles" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -70,7 +70,7 @@ db.getSiblingDB("admin").createRole({ EOF echo echo "creating pbm user" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pbm_user}", pwd: "${pbm_pass}", @@ -84,8 +84,8 @@ db.getSiblingDB("admin").createUser({ }); EOF echo -echo "creating pmm user" -docker-compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +echo "creating pmm regular user" +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pmm_mongo_user}", pwd: "${pmm_mongo_user_pass}", @@ -101,3 +101,19 @@ db.getSiblingDB("admin").createUser({ ] }); EOF +echo "creating pmm kerberos user" +docker compose -f docker-compose-rs.yaml exec -T rs101 mongo "mongodb://root:root@localhost/?replicaSet=rs" --quiet << EOF +db.getSiblingDB("\$external").createUser({ + user: "${pmm_mongo_user}@PERCONATEST.COM", + roles: [ + { role: "explainRole", db: "admin" }, + { role: "clusterMonitor", db: "admin" }, + { role: "read", db: "local" }, + { "db" : "admin", "role" : "readWrite", "collection": "" }, + { "db" : "admin", "role" : "backup" }, + { "db" : "admin", "role" : "clusterMonitor" }, + { "db" : "admin", "role" : "restore" }, + { "db" : "admin", "role" : "pbmAnyAction" } + ] +}); +EOF diff --git a/pmm_psmdb-pbm_setup/docker-compose-pmm.yaml b/pmm_psmdb-pbm_setup/docker-compose-pmm.yaml index 6d6b23f0..0672c619 100644 --- a/pmm_psmdb-pbm_setup/docker-compose-pmm.yaml +++ b/pmm_psmdb-pbm_setup/docker-compose-pmm.yaml @@ -1,14 +1,14 @@ version: "3" services: pmm-server: - image: ${PMM_IMAGE:-perconalab/pmm-server:dev-latest} + image: ${PMM_IMAGE:-perconalab/pmm-server:3-dev-latest} container_name: pmm-server environment: - "PMM_DEBUG=1" - "ENABLE_BACKUP_MANAGEMENT=1" ports: - - "443:443" - - "8081:80" + - "443:8443" + - "8081:8080" networks: - qa-integration volumes: @@ -18,6 +18,8 @@ services: build: dockerfile: ./Dockerfile-testinfra context: . + environment: + PMM_SERVER_CONTAINER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} volumes: - ./test:/test - /var/run/docker.sock:/var/run/docker.sock:ro diff --git a/pmm_psmdb-pbm_setup/docker-compose-rs.yaml b/pmm_psmdb-pbm_setup/docker-compose-rs.yaml index c8cc9de7..68b6f934 100644 --- a/pmm_psmdb-pbm_setup/docker-compose-rs.yaml +++ b/pmm_psmdb-pbm_setup/docker-compose-rs.yaml @@ -11,12 +11,16 @@ services: - PMM_REPO=${PMM_REPO:-experimental} - PBM_VERSION=${PBM_VERSION:-latest} - PSMDB_VERSION=${PSMDB_VERSION:-latest} - - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-latest} + - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-3-dev-latest} + - OL_VERSION=${OL_VERSION:-9} command: /bin/bash rs101: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["classic", "extra"] ports: @@ -32,20 +36,26 @@ services: - ./conf/mongod-rs:/etc/mongod - ./conf/datagen:/etc/datagen:ro - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs101 hostname: rs101 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" rs102: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["classic", "extra"] networks: @@ -58,20 +68,26 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs102 hostname: rs102 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" rs103: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["classic", "extra"] networks: @@ -84,20 +100,26 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs103 hostname: rs103 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" rs201: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["extra"] ports: @@ -112,20 +134,26 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs201 hostname: rs201 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" rs202: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["extra"] networks: @@ -138,20 +166,26 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs202 hostname: rs202 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" rs203: depends_on: - - build_member + build_member: + condition: service_started + kerberos: + condition: service_healthy image: replica_member/local profiles: ["extra"] networks: @@ -164,21 +198,26 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw - - /tmp/backup_data:/tmp/backup_data + - /tmp/backup_data:/tmp/backup_data:rw + - keytabs:/keytabs privileged: true + cgroup: host environment: PBM_MONGODB_URI: mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:443} + PMM_AGENT_SERVER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} PMM_AGENT_SERVER_USERNAME: admin PMM_AGENT_SERVER_PASSWORD: ${ADMIN_PASSWORD:-password} PMM_AGENT_SERVER_INSECURE_TLS: 1 container_name: rs203 hostname: rs203 + entrypoint: bash -c "chown -R mongod:mongod /keytabs && exec /usr/sbin/init" minio: image: minio/minio profiles: ["classic", "extra"] container_name: minio + ports: + - "9001:9000" depends_on: - build_member networks: @@ -207,7 +246,30 @@ services: depends_on: - minio entrypoint: > - /bin/sh -c " sleep 5; /usr/bin/mc config host add myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " + /bin/sh -c " sleep 5; /usr/bin/mc alias set myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " + + kerberos: + image: kerberos/local + build: + dockerfile: ./Dockerfile-kerberos + context: . + container_name: kerberos + hostname: kerberos + networks: + - pmm-qa + - pmm-ui-tests1 + - qa-integration + - pmm-ui-tests3 + - pmm-ui-tests2 + environment: + - "KRB5_TRACE=/dev/stderr" + volumes: + - keytabs:/keytabs + healthcheck: + test: ["CMD", "kadmin.local", "-q", "listprincs"] + interval: 2s + timeout: 1s + retries: 5 networks: qa-integration: @@ -224,3 +286,7 @@ networks: pmm-qa: name: pmm-qa external: true + +volumes: + keytabs: + driver: local \ No newline at end of file diff --git a/pmm_psmdb-pbm_setup/docker-compose-sharded.yaml b/pmm_psmdb-pbm_setup/docker-compose-sharded.yaml index 33850863..4f4598a5 100644 --- a/pmm_psmdb-pbm_setup/docker-compose-sharded.yaml +++ b/pmm_psmdb-pbm_setup/docker-compose-sharded.yaml @@ -7,10 +7,10 @@ services: context: . args: - REPO=${REPO:-testing} - - PMM_REPO=${PMM_REPO:-release} + - PMM_REPO=${PMM_REPO:-experimental} - PBM_VERSION=${PBM_VERSION:-latest} - PSMDB_VERSION=${PSMDB_VERSION:-latest} - - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-latest} + - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-3-dev-latest} command: /bin/bash rs101: @@ -21,15 +21,19 @@ services: - ./conf/pbm:/etc/pbm - ./conf/mongod-rs1:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /tmp/mongodb:/tmp privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs101 hostname: rs101 + ports: + - "27027:27017" networks: - test-network @@ -42,11 +46,12 @@ services: - ./conf/mongod-rs1:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs102 hostname: rs102 @@ -62,11 +67,12 @@ services: - ./conf/mongod-rs1:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs103 hostname: rs103 @@ -82,11 +88,12 @@ services: - ./conf/mongod-rs2:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs201 hostname: rs201 @@ -102,11 +109,12 @@ services: - ./conf/mongod-rs2:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs202 hostname: rs202 @@ -122,11 +130,12 @@ services: - ./conf/mongod-rs2:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rs203 hostname: rs203 @@ -142,11 +151,12 @@ services: - ./conf/mongod-cfg:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rscfg01 hostname: rscfg01 @@ -162,11 +172,12 @@ services: - ./conf/mongod-cfg:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rscfg02 hostname: rscfg02 @@ -182,11 +193,12 @@ services: - ./conf/mongod-cfg:/etc/mongod - /sys/fs/cgroup:/sys/fs/cgroup:rw privileged: true + cgroup: host environment: - "PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017" - - "PMM_AGENT_SERVER_ADDRESS=pmm-server:443" + - "PMM_AGENT_SERVER_ADDRESS=${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443}" - "PMM_AGENT_SERVER_USERNAME=admin" - - "PMM_AGENT_SERVER_PASSWORD=password" + - "PMM_AGENT_SERVER_PASSWORD=${ADMIN_PASSWORD:-password}" - "PMM_AGENT_SERVER_INSECURE_TLS=1" container_name: rscfg03 hostname: rscfg03 @@ -203,6 +215,7 @@ services: - /sys/fs/cgroup:/sys/fs/cgroup:rw - ./conf/datagen:/etc/datagen:ro privileged: true + cgroup: host command: "mongos --keyFile=/etc/keyfile --configdb rscfg/rscfg01:27017,rscfg02:27017,rscfg03:27017 --port 27017 --bind_ip 0.0.0.0" networks: - test-network @@ -216,7 +229,7 @@ services: image: minio/minio container_name: minio ports: - - "9000:9000" + - "9001:9000" networks: - test-network volumes: @@ -234,17 +247,17 @@ services: depends_on: - minio entrypoint: > - /bin/sh -c " sleep 5; /usr/bin/mc config host add myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " + /bin/sh -c " sleep 5; /usr/bin/mc alias set myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " pmm-server: - image: ${PMM_IMAGE:-perconalab/pmm-server:dev-latest} + image: ${PMM_IMAGE:-perconalab/pmm-server:3-dev-latest} container_name: pmm-server environment: - "PMM_DEBUG=1" - "ENABLE_BACKUP_MANAGEMENT=1" ports: - - "443:443" - - "8081:80" + - "443:8443" + - "8081:8080" networks: - test-network volumes: @@ -254,6 +267,8 @@ services: build: dockerfile: ./Dockerfile-testinfra context: . + environment: + PMM_SERVER_CONTAINER_ADDRESS: ${PMM_SERVER_CONTAINER_ADDRESS:-pmm-server:8443} volumes: - ./test:/test - /var/run/docker.sock:/var/run/docker.sock:ro diff --git a/pmm_psmdb-pbm_setup/start-rs-only.sh b/pmm_psmdb-pbm_setup/start-rs-only.sh index b21c4600..7f88ad95 100755 --- a/pmm_psmdb-pbm_setup/start-rs-only.sh +++ b/pmm_psmdb-pbm_setup/start-rs-only.sh @@ -2,6 +2,8 @@ set -e profile=${COMPOSE_PROFILES:-classic} +mongo_setup_type=${MONGO_SETUP_TYPE:-pss} +ol_version=${OL_VERSION:-9} docker network create qa-integration || true docker network create pmm-qa || true @@ -10,18 +12,28 @@ docker network create pmm2-upgrade-tests_pmm-network || true docker network create pmm2-ui-tests_pmm-network || true export COMPOSE_PROFILES=${profile} +export MONGO_SETUP_TYPE=${mongo_setup_type} +export OL_VERSION=${ol_version} -docker-compose -f docker-compose-rs.yaml down -v --remove-orphans -docker-compose -f docker-compose-rs.yaml build --no-cache -docker-compose -f docker-compose-rs.yaml up -d +docker compose -f docker-compose-rs.yaml down -v --remove-orphans +docker compose -f docker-compose-rs.yaml build --no-cache +docker compose -f docker-compose-rs.yaml up -d echo -echo "waiting 30 seconds for replica set members to start" -sleep 30 +echo "waiting 60 seconds for replica set members to start" +sleep 60 echo -bash -x ./configure-replset.sh +if [ $mongo_setup_type == "pss" ]; then + bash -e ./configure-replset.sh +else + bash -e ./configure-psa.sh +fi bash -x ./configure-agents.sh if [ $profile = "extra" ]; then - bash -x ./configure-extra-replset.sh + if [ $mongo_setup_type == "pss" ]; then + bash -x ./configure-extra-replset.sh + else + bash -x ./configure-extra-psa.sh + fi bash -x ./configure-extra-agents.sh fi diff --git a/pmm_psmdb-pbm_setup/start-rs.sh b/pmm_psmdb-pbm_setup/start-rs.sh index 527003e0..aa9e3c8b 100755 --- a/pmm_psmdb-pbm_setup/start-rs.sh +++ b/pmm_psmdb-pbm_setup/start-rs.sh @@ -3,6 +3,8 @@ set -e pmm_server_admin_pass=${ADMIN_PASSWORD:-password} profile=${COMPOSE_PROFILES:-classic} +mongo_setup_type=${MONGO_SETUP_TYPE:-pss} +ol_version=${OL_VERSION:-9} docker network create qa-integration || true docker network create pmm-qa || true @@ -11,27 +13,33 @@ docker network create pmm2-upgrade-tests_pmm-network || true docker network create pmm2-ui-tests_pmm-network || true export COMPOSE_PROFILES=${profile} +export MONGO_SETUP_TYPE=${mongo_setup_type} +export OL_VERSION=${ol_version} -docker-compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml down -v --remove-orphans -docker-compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml build -docker-compose -f docker-compose-pmm.yaml -f docker-compose-rs.yaml up -d +docker compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml down -v --remove-orphans +docker compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml build +docker compose -f docker-compose-pmm.yaml -f docker-compose-rs.yaml up -d echo echo "waiting 30 seconds for pmm-server to start" sleep 30 echo "configuring pmm-server" -docker-compose -f docker-compose-pmm.yaml exec -T pmm-server change-admin-password $pmm_server_admin_pass +docker compose -f docker-compose-pmm.yaml exec -T pmm-server change-admin-password $pmm_server_admin_pass echo "restarting pmm-server" -docker-compose -f docker-compose-pmm.yaml restart pmm-server +docker compose -f docker-compose-pmm.yaml restart pmm-server echo "waiting 30 seconds for pmm-server to start" sleep 30 -bash -e ./configure-replset.sh +if [ $mongo_setup_type == "pss" ]; then + bash -e ./configure-replset.sh +else + bash -e ./configure-psa.sh +fi bash -e ./configure-agents.sh tests=${TESTS:-yes} if [ $tests != "no" ]; then echo echo "running tests" - docker-compose -f docker-compose-pmm.yaml run test pytest -s -x --verbose test.py - docker-compose -f docker-compose-pmm.yaml run test chmod -R 777 . + docker compose -f docker-compose-pmm.yaml run test pytest -s -x --verbose test.py + docker compose -f docker-compose-pmm.yaml run test chmod -R 777 . else echo echo "skipping tests" @@ -40,7 +48,7 @@ cleanup=${CLEANUP:-yes} if [ $cleanup != "no" ]; then echo echo "cleanup" - docker-compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml down -v --remove-orphans + docker compose -f docker-compose-rs.yaml -f docker-compose-pmm.yaml down -v --remove-orphans else echo echo "skipping cleanup" diff --git a/pmm_psmdb-pbm_setup/start-sharded.sh b/pmm_psmdb-pbm_setup/start-sharded.sh index e60b8e39..e63274eb 100755 --- a/pmm_psmdb-pbm_setup/start-sharded.sh +++ b/pmm_psmdb-pbm_setup/start-sharded.sh @@ -5,16 +5,16 @@ pmm_pass=${PMM_PASS:-pmmpass} pbm_user=${PBM_USER:-pbm} pbm_pass=${PBM_PASS:-pbmpass} -docker-compose -f docker-compose-sharded.yaml down -v --remove-orphans -docker-compose -f docker-compose-sharded.yaml build -docker-compose -f docker-compose-sharded.yaml up -d +docker compose -f docker-compose-sharded.yaml down -v --remove-orphans +docker compose -f docker-compose-sharded.yaml build +docker compose -f docker-compose-sharded.yaml up -d echo "waiting 30 seconds for pmm-server to start" sleep 30 echo "configuring pmm-server" -docker-compose -f docker-compose-sharded.yaml exec -T pmm-server change-admin-password password +docker compose -f docker-compose-sharded.yaml exec -T pmm-server change-admin-password password echo "restarting pmm-server" -docker-compose -f docker-compose-sharded.yaml restart pmm-server +docker compose -f docker-compose-sharded.yaml restart pmm-server echo "waiting 30 seconds for pmm-server to start" sleep 30 @@ -23,7 +23,7 @@ for node in $nodes do rs=$(echo $node | awk -F "0" '{print $1}') echo "configuring replicaset ${rs} with members priorities" - docker-compose -f docker-compose-sharded.yaml exec -T $node mongo --quiet << EOF + docker compose -f docker-compose-sharded.yaml exec -T $node mongo --quiet << EOF config = { "_id" : "${rs}", "members" : [ @@ -49,12 +49,12 @@ EOF sleep 60 echo echo "configuring root user on primary $node replicaset $rs" - docker-compose -f docker-compose-sharded.yaml exec -T $node mongo --quiet << EOF + docker compose -f docker-compose-sharded.yaml exec -T $node mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF echo echo "configuring pbm and pmm roles on replicaset $rs" - docker-compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF + docker compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -84,7 +84,7 @@ EOF EOF echo echo "creating pbm user for replicaset ${rs}" - docker-compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF + docker compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pbm_user}", pwd: "${pbm_pass}", @@ -99,7 +99,7 @@ EOF EOF echo echo "creating pmm user for replicaset ${rs}" - docker-compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF + docker compose -f docker-compose-sharded.yaml exec -T $node mongo "mongodb://root:root@localhost/?replicaSet=${rs}" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pmm_user}", pwd: "${pmm_pass}", @@ -118,7 +118,7 @@ EOF done echo "configuring configserver replicaset with members priorities" -docker-compose -f docker-compose-sharded.yaml exec -T rscfg01 mongo --quiet << EOF +docker compose -f docker-compose-sharded.yaml exec -T rscfg01 mongo --quiet << EOF config = { "_id" : "rscfg", "members" : [ @@ -144,18 +144,18 @@ EOF sleep 60 echo echo "adding shards and creating global mongo user" -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo --quiet << EOF +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet --eval 'sh.addShard( "rs1/rs101:27017,rs102:27017,rs103:27017" )' +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet --eval 'sh.addShard( "rs1/rs101:27017,rs102:27017,rs103:27017" )' echo sleep 20 -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet --eval 'sh.addShard( "rs2/rs201:27017,rs202:27017,rs203:27017" )' +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet --eval 'sh.addShard( "rs2/rs201:27017,rs202:27017,rs203:27017" )' echo sleep 20 echo echo "configuring pbm and pmm roles" -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -185,7 +185,7 @@ db.getSiblingDB("admin").createRole({ EOF echo echo "creating pbm user" -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pbm_user}", pwd: "${pbm_pass}", @@ -200,7 +200,7 @@ db.getSiblingDB("admin").createUser({ EOF echo echo "creating pmm user" -docker-compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF +docker compose -f docker-compose-sharded.yaml exec -T mongos mongo "mongodb://root:root@localhost" --quiet << EOF db.getSiblingDB("admin").createUser({ user: "${pmm_user}", pwd: "${pmm_pass}", @@ -224,37 +224,38 @@ nodes="rs101 rs102 rs103 rs201 rs202 rs203 rscfg01 rscfg02 rscfg03" for node in $nodes do echo "congiguring pbm agent on $node" - docker-compose -f docker-compose-sharded.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" + docker compose -f docker-compose-sharded.yaml exec -T $node bash -c "echo \"PBM_MONGODB_URI=mongodb://${pbm_user}:${pbm_pass}@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" echo "restarting pbm agent on $node" - docker-compose -f docker-compose-sharded.yaml exec -T $node systemctl restart pbm-agent + docker compose -f docker-compose-sharded.yaml exec -T $node systemctl restart pbm-agent done echo echo "configuring pmm agents" +random_number=$RANDOM nodes="rs101 rs102 rs103 rs201 rs202 rs203 rscfg01 rscfg02 rscfg03" for node in $nodes do echo "congiguring pmm agent on $node" rs=$(echo $node | awk -F "0" '{print $1}') - docker-compose -f docker-compose-sharded.yaml exec -T $node pmm-agent setup - docker-compose -f docker-compose-sharded.yaml exec -T $node pmm-admin add mongodb --cluster=sharded --username=${pmm_user} --password=${pmm_pass} $node 127.0.0.1:27017 + docker compose -f docker-compose-sharded.yaml exec -T -e PMM_AGENT_SETUP_NODE_NAME=${node}_${random_number} $node pmm-agent setup + docker compose -f docker-compose-sharded.yaml exec -T $node pmm-admin add mongodb --agent-password=mypass --cluster=sharded --environment=mongo-sharded-dev --username=${pmm_user} --password=${pmm_pass} ${node}_${random_number} 127.0.0.1:27017 done echo "configuring pmm-agent on primary rscfg01 for mongos instance" -docker-compose -f docker-compose-sharded.yaml exec -T rscfg01 pmm-admin add mongodb --cluster=sharded --username=${pmm_user} --password=${pmm_pass} mongos mongos:27017 +docker compose -f docker-compose-sharded.yaml exec -T rscfg01 pmm-admin add mongodb --agent-password=mypass --cluster=sharded --environment=mongo-sharded-dev --username=${pmm_user} --password=${pmm_pass} mongos_${random_number} mongos:27017 echo "adding some data" -docker-compose -f docker-compose-sharded.yaml exec -T mongos mgodatagen -f /etc/datagen/sharded.json --uri=mongodb://root:root@127.0.0.1:27017 +docker compose -f docker-compose-sharded.yaml exec -T mongos mgodatagen -f /etc/datagen/sharded.json --uri=mongodb://root:root@127.0.0.1:27017 tests=${TESTS:-yes} if [ $tests != "no" ]; then echo "running tests" - docker-compose -f docker-compose-sharded.yaml run test pytest -s -x --verbose test.py - docker-compose -f docker-compose-sharded.yaml run test chmod -R 777 . + docker compose -f docker-compose-sharded.yaml run test pytest -s -x --verbose test.py + docker compose -f docker-compose-sharded.yaml run test chmod -R 777 . else echo "skipping tests" fi cleanup=${CLEANUP:-yes} if [ $cleanup != "no" ]; then echo "cleanup" - docker-compose -f docker-compose-sharded.yaml down -v --remove-orphans + docker compose -f docker-compose-sharded.yaml down -v --remove-orphans else echo "skipping cleanup" fi diff --git a/pmm_psmdb-pbm_setup/test/expected_metrics.txt b/pmm_psmdb-pbm_setup/test/expected_metrics.txt new file mode 100644 index 00000000..4a65db21 --- /dev/null +++ b/pmm_psmdb-pbm_setup/test/expected_metrics.txt @@ -0,0 +1,38 @@ +mongodb_up +mongodb_mongod_global_lock_client +mongodb_mongod_global_lock_current_queue +mongodb_mongod_instance_uptime_seconds +mongodb_mongod_locks_time_acquiring_global_microseconds_total +mongodb_mongod_metrics_cursor_open +mongodb_mongod_metrics_cursor_timed_out_total +mongodb_mongod_metrics_document_total +mongodb_mongod_metrics_get_last_error_wtime_num_total +mongodb_mongod_metrics_get_last_error_wtime_total_milliseconds +mongodb_mongod_metrics_get_last_error_wtimeouts_total +mongodb_mongod_metrics_operation_total +mongodb_mongod_metrics_query_executor_total +mongodb_mongod_metrics_ttl_deleted_documents_total +mongodb_mongod_metrics_ttl_passes_total +mongodb_mongod_op_counters_repl_total +mongodb_mongod_op_latencies_latency_total +mongodb_mongod_op_latencies_ops_total +mongodb_mongod_storage_engine +mongodb_mongod_wiredtiger_blockmanager_bytes_total +mongodb_mongod_wiredtiger_cache_bytes +mongodb_mongod_wiredtiger_cache_bytes_total +mongodb_mongod_wiredtiger_cache_evicted_total +mongodb_mongod_wiredtiger_cache_max_bytes +mongodb_mongod_wiredtiger_cache_overhead_percent +mongodb_mongod_wiredtiger_cache_pages +mongodb_mongod_wiredtiger_cache_pages_total +mongodb_mongod_wiredtiger_concurrent_transactions_available_tickets +mongodb_mongod_wiredtiger_concurrent_transactions_out_tickets +mongodb_mongod_wiredtiger_concurrent_transactions_total_tickets +mongodb_mongod_wiredtiger_log_bytes_total +mongodb_mongod_wiredtiger_log_operations_total +mongodb_mongod_wiredtiger_log_records_scanned_total +mongodb_mongod_wiredtiger_log_records_total +mongodb_mongod_wiredtiger_session_open_sessions_total +mongodb_mongod_wiredtiger_transactions_checkpoint_milliseconds +mongodb_mongod_wiredtiger_transactions_checkpoint_milliseconds_total +mongodb_mongod_wiredtiger_transactions_total diff --git a/pmm_psmdb-pbm_setup/test/test.py b/pmm_psmdb-pbm_setup/test/test.py index 77be1be6..6314f7ca 100755 --- a/pmm_psmdb-pbm_setup/test/test.py +++ b/pmm_psmdb-pbm_setup/test/test.py @@ -1,5 +1,6 @@ +import os + import requests -import docker import pytest import testinfra import time @@ -8,7 +9,9 @@ docker_rs101 = testinfra.get_host('docker://rs101') docker_rs102 = testinfra.get_host('docker://rs102') docker_rs103 = testinfra.get_host('docker://rs103') -testinfra_hosts = ['docker://rs101','docker://rs102','docker://rs103'] +testinfra_hosts = ['docker://rs101', 'docker://rs102', 'docker://rs103'] + +pmm_server_url = os.getenv('PMM_SERVER_CONTAINER_ADDRESS') pytest.location_id = '' pytest.service_id = '' @@ -18,37 +21,42 @@ pytest.pbm_backup_name = '' pytest.restore_id = '' + def test_pmm_services(): - req = requests.post('https://pmm-server/v1/inventory/Services/List',json={},headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + req = requests.get(f"https://{pmm_server_url}/v1/inventory/services", json={}, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) print('\nGetting all mongodb services:') mongodb = req.json()['mongodb'] print(mongodb) assert mongodb - assert "service_id" in mongodb[0]['service_id'] + assert "service_id" in mongodb[0] for service in mongodb: assert "rs" or "mongos" in service['service_name'] if not "mongos" in service['service_name']: - pytest.service_id = service['service_id'] + pytest.service_id = service['service_id'] print('This service_id will be used in the next steps') print(pytest.service_id) + def test_pmm_add_location(): data = { 'name': 'test', 'description': 'test', 's3_config': { - 'endpoint': 'http://minio:9000', - 'access_key': 'minio1234', - 'secret_key': 'minio1234', - 'bucket_name': 'bcp' - } + 'endpoint': 'http://minio:9000', + 'access_key': 'minio1234', + 'secret_key': 'minio1234', + 'bucket_name': 'bcp' } - req = requests.post('https://pmm-server/v1/management/backup/Locations/Add',json=data,headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + } + req = requests.post(f"https://{pmm_server_url}/v1/backups/locations", json=data, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) print('\nAdding new location:') print(req.json()) - assert "location_id" in req.json()['location_id'] + assert "location_id" in req.json() pytest.location_id = req.json()['location_id'] + def test_pmm_logical_backup(): data = { 'service_id': pytest.service_id, @@ -56,19 +64,24 @@ def test_pmm_logical_backup(): 'name': 'test', 'description': 'test', 'retries': 0, - 'data_model': 'LOGICAL' - } - req = requests.post('https://pmm-server/v1/management/backup/Backups/Start',json=data,headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + 'data_model': 'DATA_MODEL_LOGICAL' + } + + print(data) + req = requests.post(f"https://{pmm_server_url}/v1/backups:start", json=data, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) print('\nCreating logical backup:') print(req.json()) - assert "artifact_id" in req.json()['artifact_id'] + assert "artifact_id" in req.json() pytest.artifact_id = req.json()['artifact_id'] + def test_pmm_artifact(): backup_complete = False for i in range(600): done = False - req = requests.post('https://pmm-server/v1/management/backup/Artifacts/List',json={},headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + req = requests.get(f"https://{pmm_server_url}/v1/backups/artifacts", json={}, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) assert req.json()['artifacts'] for artifact in req.json()['artifacts']: if artifact['artifact_id'] == pytest.artifact_id: @@ -80,7 +93,7 @@ def test_pmm_artifact(): print(artifact) pytest.artifact_pbm_meta = artifact['metadata_list'][0]['pbm_metadata']['name'] if "is_sharded_cluster" in artifact: - pytest.artifact_is_sharded = artifact['is_sharded_cluster'] + pytest.artifact_is_sharded = artifact['is_sharded_cluster'] break if done: backup_complete = True @@ -89,6 +102,7 @@ def test_pmm_artifact(): time.sleep(1) assert backup_complete + def test_pbm_artifact(): status = docker_rs101.check_output('pbm status --out json') parsed_status = json.loads(status) @@ -98,26 +112,30 @@ def test_pbm_artifact(): assert parsed_status['backups']['snapshot'][0]['status'] == "done" pytest.pbm_backup_name = parsed_status['backups']['snapshot'][0]['name'] + def test_pmm_start_restore(): if pytest.artifact_is_sharded == True: pytest.skip("Unsupported setup for restore from UI") data = { 'service_id': pytest.service_id, 'artifact_id': pytest.artifact_id - } - req = requests.post('https://pmm-server/v1/management/backup/Backups/Restore',json=data,headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + } + req = requests.post(f"https://{pmm_server_url}/v1/backups/restores:start", json=data, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) print('\nRestoring logical backup:') print(req.json()) - assert "restore_id" in req.json()['restore_id'] + assert "restore_id" in req.json() pytest.restore_id = req.json()['restore_id'] + def test_pmm_restore(): if pytest.artifact_is_sharded == True: pytest.skip("Unsupported setup for restore from UI") restore_complete = False for i in range(600): done = False - req = requests.post('https://pmm-server/v1/management/backup/RestoreHistory/List',json={},headers = {"authorization": "Basic YWRtaW46cGFzc3dvcmQ="},verify=False) + req = requests.get(f"https://{pmm_server_url}/v1/backups/restores", json={}, + headers={"authorization": "Basic YWRtaW46cGFzc3dvcmQ="}, verify=False) assert req.json()['items'] for item in req.json()['items']: if item['restore_id'] == pytest.restore_id: @@ -135,6 +153,7 @@ def test_pmm_restore(): time.sleep(1) assert restore_complete + def test_pbm_restore(): if pytest.artifact_is_sharded == True: pytest.skip("Unsupported setup for restore from UI") @@ -149,3 +168,27 @@ def test_pbm_restore(): restore_complete = True assert restore_complete + +def test_metrics(): + pmm_admin_list = json.loads(docker_rs101.check_output('pmm-admin list --json', timeout=30)) + for agent in pmm_admin_list['agent']: + if agent['agent_type'] == 'AGENT_TYPE_MONGODB_EXPORTER': + agent_id = "mypass" + agent_port = agent['port'] + break + try: + command = f"curl -s http://pmm:{agent_id}@127.0.0.1:{agent_port}/metrics" + metrics = docker_rs101.run(command, timeout=30) + assert metrics.exit_status == 0, f"Curl command failed with exit status {metrics.exit_status}" + except Exception as e: + pytest.fail(f"Fail to get metrics from exporter") + + try: + with open("expected_metrics.txt", "r") as f: + expected_metrics = {line.strip() for line in f if line.strip()} + except FileNotFoundError: + pytest.fail("Expected metrics file not found") + + for metric in expected_metrics: + if metric not in metrics.stdout: + pytest.fail(f"Metric '{metric}' is missing from the exporter output") diff --git a/pmm_psmdb_diffauth_setup/Dockerfile-kerberos b/pmm_psmdb_diffauth_setup/Dockerfile-kerberos new file mode 100644 index 00000000..97e412ba --- /dev/null +++ b/pmm_psmdb_diffauth_setup/Dockerfile-kerberos @@ -0,0 +1,5 @@ +FROM alpine +RUN apk add --no-cache bash krb5 krb5-server krb5-pkinit +COPY conf/configure_krb5.sh /var/lib/krb5kdc/ +EXPOSE 88/udp +ENTRYPOINT [ "sh", "/var/lib/krb5kdc/configure_krb5.sh"] diff --git a/pmm_psmdb_diffauth_setup/conf/configure_krb5.sh b/pmm_psmdb_diffauth_setup/conf/configure_krb5.sh new file mode 100755 index 00000000..a46c1926 --- /dev/null +++ b/pmm_psmdb_diffauth_setup/conf/configure_krb5.sh @@ -0,0 +1,29 @@ +#! /env/sh + +cat > /etc/krb5.conf << EOL +[libdefaults] + default_realm = PERCONATEST.COM + forwardable = true + dns_lookup_realm = false + dns_lookup_kdc = false + ignore_acceptor_hostname = true + rdns = false +[realms] + PERCONATEST.COM = { + kdc_ports = 88 + kdc = kerberos + admin_server = kerberos + } +[domain_realm] + .perconatest.com = PERCONATEST.COM + perconatest.com = PERCONATEST.COM + kerberos = PERCONATEST.COM +EOL + +kdb5_util -P password create -s +kadmin.local -q "addprinc -pw password root/admin" +kadmin.local -q "addprinc -pw mongodb mongodb/psmdb-server" +kadmin.local -q "addprinc -pw password1 pmm-test" +kadmin.local -q "ktadd -k /keytabs/mongodb.keytab mongodb/psmdb-server@PERCONATEST.COM" + +krb5kdc -n diff --git a/pmm_psmdb_diffauth_setup/conf/mongod.conf b/pmm_psmdb_diffauth_setup/conf/mongod.conf index 4800c3fb..125aa695 100644 --- a/pmm_psmdb_diffauth_setup/conf/mongod.conf +++ b/pmm_psmdb_diffauth_setup/conf/mongod.conf @@ -9,10 +9,6 @@ storage: systemLog: destination: syslog -processManagement: -# fork: true - pidFilePath: /var/run/mongod.pid - net: port: 27017 bindIp: 0.0.0.0 @@ -30,16 +26,16 @@ security: validateLDAPServerConfig: false transportSecurity: none servers: ldap-server:1389 - userToDNMapping: '[{match: "arn:aws:iam::(.+):user/(.+)|CN=(.+)", substitution: "cn={1}{2},ou=users,dc=example,dc=org"}]' + userToDNMapping: '[{match: "arn:aws:iam::(.+):user/(.+)|CN=(.+)|([^@]+)@PERCONATEST.COM", substitution: "cn={1}{2}{3},ou=users,dc=example,dc=org"}]' authz: queryTemplate: 'dc=example,dc=org??sub?(&(objectClass=groupOfNames)(member={USER}))' setParameter: - authenticationMechanisms: SCRAM-SHA-1,PLAIN,MONGODB-X509,MONGODB-AWS + authenticationMechanisms: SCRAM-SHA-1,PLAIN,MONGODB-X509,MONGODB-AWS,GSSAPI replication: replSetName: rs0 operationProfiling: - slowOpThresholdMs: 200 mode: all - rateLimit: 100 + slowOpThresholdMs: 1 + diff --git a/pmm_psmdb_diffauth_setup/docker-compose-pmm-psmdb.yml b/pmm_psmdb_diffauth_setup/docker-compose-pmm-psmdb.yml index e6fd74be..c31373c8 100644 --- a/pmm_psmdb_diffauth_setup/docker-compose-pmm-psmdb.yml +++ b/pmm_psmdb_diffauth_setup/docker-compose-pmm-psmdb.yml @@ -10,7 +10,7 @@ services: - PMM_REPO=${PMM_REPO:-experimental} - PBM_VERSION=${PBM_VERSION:-latest} - PSMDB_VERSION=${PSMDB_VERSION:-latest} - - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-latest} + - PMM_CLIENT_VERSION=${PMM_CLIENT_VERSION:-3-dev-latest} command: /bin/bash test: @@ -34,21 +34,27 @@ services: - PBM_MONGODB_URI=mongodb://${PBM_USER:-pbm}:${PBM_PASS:-pbmpass}@127.0.0.1:27017 - PMM_AGENT_SERVER_USERNAME=admin - PMM_AGENT_SERVER_PASSWORD=admin - - PMM_AGENT_SERVER_ADDRESS=pmm-server:443 + - PMM_AGENT_SERVER_ADDRESS=pmm-server:8443 + - PMM_AGENT_PORTS_MIN=43000 + - PMM_AGENT_PORTS_MAX=43020 entrypoint: - bash - -c - | cp /mongodb_certs/ca-certs.pem /etc/pki/tls/certs/ + chown -R mongod:mongod /keytabs exec /usr/sbin/init volumes: - ./conf:/etc/mongod - ./certs:/mongodb_certs - /sys/fs/cgroup:/sys/fs/cgroup:rw - 'psmdb-server-data:/data/db' + - keytabs:/keytabs + - ../pmm_psmdb-pbm_setup/conf/datagen:/etc/datagen:ro privileged: true + cgroup: host ports: - - "42000-42020:42000-42020" + - "43000-43020:43000-43020" healthcheck: test: | test $$(mongo --quiet --eval "try { rs.initiate().ok } catch (_) {} rs.status().ok") -eq 1 @@ -58,16 +64,23 @@ services: depends_on: pmm-server: condition: service_healthy + kerberos: + condition: service_healthy pmm-server: container_name: pmm-server hostname: pmm-server - image: ${PMM_IMAGE:-perconalab/pmm-server:dev-latest} + image: ${PMM_IMAGE:-perconalab/pmm-server:3-dev-latest} + healthcheck: + test: [ "CMD", "curl", "-f", "http://127.0.0.1:8080/ping" ] + interval: 3s + timeout: 2s + retries: 20 environment: - "PMM_DEBUG=1" ports: - - "443:443" - - "8081:80" + - "443:8443" + - "8081:8080" volumes: - ./certs:/srv/nginx - 'pmm-server-data:/srv' @@ -75,7 +88,7 @@ services: ldap-server: container_name: ldap-server hostname: ldap-server - image: bitnami/openldap:2 + image: bitnamilegacy/openldap environment: - LDAP_ADMIN_USERNAME=admin - LDAP_ADMIN_PASSWORD=adminpassword @@ -85,11 +98,30 @@ services: - '1389:1389' - '1636:1636' volumes: - - 'openldap-data:/bitnami/openldap' + - 'openldap-data:/bitnamilegacy/openldap' + + kerberos: + image: kerberos/local + build: + dockerfile: ./Dockerfile-kerberos + context: . + container_name: kerberos + hostname: kerberos + environment: + - "KRB5_TRACE=/dev/stderr" + volumes: + - keytabs:/keytabs + healthcheck: + test: ["CMD", "kadmin.local", "-q", "listprincs"] + interval: 2s + timeout: 1s + retries: 5 minio: image: minio/minio container_name: minio + ports: + - "9001:9000" volumes: - /tmp/minio/backups:/backups environment: @@ -106,7 +138,7 @@ services: depends_on: - minio entrypoint: > - /bin/sh -c " sleep 5; /usr/bin/mc config host add myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " + /bin/sh -c " sleep 5; /usr/bin/mc alias set myminio http://minio:9000 minio1234 minio1234; /usr/bin/mc mb myminio/bcp; exit 0; " volumes: psmdb-server-data: @@ -117,3 +149,5 @@ volumes: driver: local openldap-data: driver: local + keytabs: + driver: local diff --git a/pmm_psmdb_diffauth_setup/generate-certs.sh b/pmm_psmdb_diffauth_setup/generate-certs.sh new file mode 100644 index 00000000..441360b3 --- /dev/null +++ b/pmm_psmdb_diffauth_setup/generate-certs.sh @@ -0,0 +1,16 @@ +#Generate certificates for tests +rm -rf easy-rsa pki certs && mkdir certs +git clone https://github.com/OpenVPN/easy-rsa.git +./easy-rsa/easyrsa3/easyrsa init-pki +./easy-rsa/easyrsa3/easyrsa --req-cn=Percona --batch build-ca nopass +./easy-rsa/easyrsa3/easyrsa --req-ou=server --subject-alt-name=DNS:pmm-server --batch build-server-full pmm-server nopass +./easy-rsa/easyrsa3/easyrsa --req-ou=server --subject-alt-name=DNS:psmdb-server --batch build-server-full psmdb-server nopass +./easy-rsa/easyrsa3/easyrsa --req-ou=client --batch build-client-full pmm-test nopass +openssl dhparam -out certs/dhparam.pem 2048 + +cp pki/ca.crt certs/ca-certs.pem +cp pki/private/pmm-server.key certs/certificate.key +cp pki/issued/pmm-server.crt certs/certificate.crt +cat pki/private/psmdb-server.key pki/issued/psmdb-server.crt > certs/psmdb-server.pem +cat pki/private/pmm-test.key pki/issued/pmm-test.crt > certs/client.pem +find certs -type f -exec chmod 644 {} \; diff --git a/pmm_psmdb_diffauth_setup/init/setup_psmdb.js b/pmm_psmdb_diffauth_setup/init/setup_psmdb.js index f1ac4c16..3614f545 100644 --- a/pmm_psmdb_diffauth_setup/init/setup_psmdb.js +++ b/pmm_psmdb_diffauth_setup/init/setup_psmdb.js @@ -1,4 +1,4 @@ -var db = connect("mongodb://dba:secret@localhost:27017/admin"); +var db = connect("mongodb://root:root@localhost:27017/admin"); db.getSiblingDB("admin").createRole({ "role": "pbmAnyAction", "privileges": [{ @@ -26,7 +26,7 @@ db.getSiblingDB("admin").createRole({ roles:[] }); db.getSiblingDB("admin").createRole({ - role: "cn=readers,ou=users,dc=example,dc=org", + role: "cn=readers,ou=groups,dc=example,dc=org", privileges: [], roles: [ { role: "explainRole", db: "admin" }, diff --git a/pmm_psmdb_diffauth_setup/test-auth.sh b/pmm_psmdb_diffauth_setup/test-auth.sh index 3abe7a99..cfc7ebb9 100755 --- a/pmm_psmdb_diffauth_setup/test-auth.sh +++ b/pmm_psmdb_diffauth_setup/test-auth.sh @@ -19,42 +19,32 @@ if [[ -n "$PSMDB_VERSION" ]] && [[ "$PSMDB_VERSION" == *"4.2."* ]]; then export SKIP_AWS_TESTS="true" fi -#Generate certificates for tests -rm -rf easy-rsa pki certs && mkdir certs -git clone https://github.com/OpenVPN/easy-rsa.git -./easy-rsa/easyrsa3/easyrsa init-pki -./easy-rsa/easyrsa3/easyrsa --req-cn=Percona --batch build-ca nopass -./easy-rsa/easyrsa3/easyrsa --req-ou=server --subject-alt-name=DNS:pmm-server --batch build-server-full pmm-server nopass -./easy-rsa/easyrsa3/easyrsa --req-ou=server --subject-alt-name=DNS:psmdb-server --batch build-server-full psmdb-server nopass -./easy-rsa/easyrsa3/easyrsa --req-ou=client --batch build-client-full pmm-test nopass -openssl dhparam -out certs/dhparam.pem 2048 +if [ -z "$ADMIN_PASSWORD" ]; then + export ADMIN_PASSWORD=admin +fi -cp pki/ca.crt certs/ca-certs.pem -cp pki/private/pmm-server.key certs/certificate.key -cp pki/issued/pmm-server.crt certs/certificate.crt -cat pki/private/psmdb-server.key pki/issued/psmdb-server.crt > certs/psmdb-server.pem -cat pki/private/pmm-test.key pki/issued/pmm-test.crt > certs/client.pem -find certs -type f -exec chmod 644 {} \; +bash -e ./generate-certs.sh #Start setup -docker-compose -f docker-compose-pmm-psmdb.yml build -docker-compose -f docker-compose-pmm-psmdb.yml up -d +docker compose -f docker-compose-pmm-psmdb.yml down -v --remove-orphans +docker compose -f docker-compose-pmm-psmdb.yml build +docker compose -f docker-compose-pmm-psmdb.yml up -d #Add users -docker-compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server mongo --quiet << EOF +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server mongo --quiet << EOF db.getSiblingDB("admin").createUser({ user: "root", pwd: "root", roles: [ "root", "userAdminAnyDatabase", "clusterAdmin" ] }); EOF -docker-compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server mongo --quiet "mongodb://root:root@localhost/?replicaSet=rs0" < init/setup_psmdb.js +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server mongo --quiet "mongodb://root:root@localhost/?replicaSet=rs0" < init/setup_psmdb.js #Configure PBM -docker-compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server bash -c "echo \"PBM_MONGODB_URI=mongodb://pbm:pbmpass@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" -docker-compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server systemctl restart pbm-agent +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server bash -c "echo \"PBM_MONGODB_URI=mongodb://pbm:pbmpass@127.0.0.1:27017\" > /etc/sysconfig/pbm-agent" +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server systemctl restart pbm-agent #Configure PMM set +e i=1 while [ $i -le 3 ]; do - output=$(docker-compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server pmm-agent setup 2>&1) + output=$(docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=pmm-server:8443 --metrics-mode=auto --server-username=admin --server-password=${ADMIN_PASSWORD} --server-insecure-tls) exit_code=$? if [ $exit_code -ne 0 ] && [[ $output == *"500 Internal Server Error"* ]]; then @@ -65,10 +55,16 @@ while [ $i -le 3 ]; do sleep 1 done +#Add Mongo Service +random_number=$RANDOM +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server pmm-admin add mongodb psmdb-server_${random_number} --agent-password=mypass --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" --host psmdb-server --port 27017 --tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem --cluster=mycluster +#Add some data +docker compose -f docker-compose-pmm-psmdb.yml exec -T psmdb-server mgodatagen -f /etc/datagen/replicaset.json --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" --host psmdb-server --port 27017 --tlsCertificateKeyFile=/mongodb_certs/client.pem --tlsCAFile=/mongodb_certs/ca-certs.pem + tests=${TESTS:-yes} if [ $tests = "yes" ]; then echo "running tests" - docker-compose -f docker-compose-pmm-psmdb.yml run test pytest -s -x --verbose test.py + output=$(docker compose -f docker-compose-pmm-psmdb.yml run test pytest -s --verbose test.py) else echo "skipping tests" fi @@ -76,10 +72,15 @@ fi cleanup=${CLEANUP:-yes} if [ $cleanup = "yes" ]; then echo "cleanup" - docker-compose -f docker-compose-pmm-psmdb.yml down -v --remove-orphans + docker compose -f docker-compose-pmm-psmdb.yml down -v --remove-orphans if [[ -n "$PSMDB_VERSION" ]] && [[ "$PSMDB_VERSION" == *"4.2"* ]]; then sed -i 's/MONGODB-X509/MONGODB-X509,MONGODB-AWS/' conf/mongod.conf fi else echo "skipping cleanup" fi + +echo "$output" +if echo "$output" | grep -q "\bFAILED\b"; then + exit 1 +fi diff --git a/pmm_psmdb_diffauth_setup/test/expected_metrics.txt b/pmm_psmdb_diffauth_setup/test/expected_metrics.txt new file mode 100644 index 00000000..4a65db21 --- /dev/null +++ b/pmm_psmdb_diffauth_setup/test/expected_metrics.txt @@ -0,0 +1,38 @@ +mongodb_up +mongodb_mongod_global_lock_client +mongodb_mongod_global_lock_current_queue +mongodb_mongod_instance_uptime_seconds +mongodb_mongod_locks_time_acquiring_global_microseconds_total +mongodb_mongod_metrics_cursor_open +mongodb_mongod_metrics_cursor_timed_out_total +mongodb_mongod_metrics_document_total +mongodb_mongod_metrics_get_last_error_wtime_num_total +mongodb_mongod_metrics_get_last_error_wtime_total_milliseconds +mongodb_mongod_metrics_get_last_error_wtimeouts_total +mongodb_mongod_metrics_operation_total +mongodb_mongod_metrics_query_executor_total +mongodb_mongod_metrics_ttl_deleted_documents_total +mongodb_mongod_metrics_ttl_passes_total +mongodb_mongod_op_counters_repl_total +mongodb_mongod_op_latencies_latency_total +mongodb_mongod_op_latencies_ops_total +mongodb_mongod_storage_engine +mongodb_mongod_wiredtiger_blockmanager_bytes_total +mongodb_mongod_wiredtiger_cache_bytes +mongodb_mongod_wiredtiger_cache_bytes_total +mongodb_mongod_wiredtiger_cache_evicted_total +mongodb_mongod_wiredtiger_cache_max_bytes +mongodb_mongod_wiredtiger_cache_overhead_percent +mongodb_mongod_wiredtiger_cache_pages +mongodb_mongod_wiredtiger_cache_pages_total +mongodb_mongod_wiredtiger_concurrent_transactions_available_tickets +mongodb_mongod_wiredtiger_concurrent_transactions_out_tickets +mongodb_mongod_wiredtiger_concurrent_transactions_total_tickets +mongodb_mongod_wiredtiger_log_bytes_total +mongodb_mongod_wiredtiger_log_operations_total +mongodb_mongod_wiredtiger_log_records_scanned_total +mongodb_mongod_wiredtiger_log_records_total +mongodb_mongod_wiredtiger_session_open_sessions_total +mongodb_mongod_wiredtiger_transactions_checkpoint_milliseconds +mongodb_mongod_wiredtiger_transactions_checkpoint_milliseconds_total +mongodb_mongod_wiredtiger_transactions_total diff --git a/pmm_psmdb_diffauth_setup/test/test.py b/pmm_psmdb_diffauth_setup/test/test.py index 0598adde..7b6a5c93 100755 --- a/pmm_psmdb_diffauth_setup/test/test.py +++ b/pmm_psmdb_diffauth_setup/test/test.py @@ -2,22 +2,33 @@ import pytest import testinfra import re -import datetime import time import os import json -import requests -env_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY','AWS_USERNAME'] +env_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_USERNAME'] client = docker.from_env() docker_pmm_client = testinfra.get_host('docker://psmdb-server') + def run_test(add_db_command): try: - docker_pmm_client.check_output('pmm-admin remove mongodb psmdb-server', timeout=30) - except AssertionError: - pass + output = docker_pmm_client.check_output('pmm-admin list --json', timeout=30) + services_info = json.loads(output) + except (AssertionError, json.JSONDecodeError): + pytest.fail("Failed to get or parse service list from pmm-admin") + services_to_remove = [] + for service in services_info.get("service", []): + service_type = service.get("service_type") + service_name = service.get("service_name", "") + if service_type == "SERVICE_TYPE_MONGODB_SERVICE" and service_name.startswith("psmdb-server"): + services_to_remove.append(service_name) + for service_name in services_to_remove: + try: + docker_pmm_client.check_output(f'pmm-admin remove mongodb {service_name}', timeout=30) + except AssertionError: + pass try: docker_pmm_client.check_output(add_db_command, timeout=30) except AssertionError: @@ -26,67 +37,107 @@ def run_test(add_db_command): pmm_admin_list = json.loads(docker_pmm_client.check_output('pmm-admin list --json', timeout=30)) for agent in pmm_admin_list['agent']: - if agent['agent_type'] == 'MONGODB_EXPORTER': - agent_id = agent['agent_id'] - agent_port = agent['port'] - break + if agent['agent_type'] == 'AGENT_TYPE_MONGODB_EXPORTER': + agent_id = "mypass" if "GSSAPI" not in add_db_command else agent['agent_id'] + agent_port = agent['port'] + break + try: + command = f"curl -s http://pmm:{agent_id}@127.0.0.1:{agent_port}/metrics" + metrics = docker_pmm_client.run(command, timeout=30) + assert metrics.exit_status == 0, f"Curl command failed with exit status {metrics.exit_status}" + except Exception as e: + pytest.fail(f"Fail to get metrics from exporter") - url = f'http://psmdb-server:{agent_port}/metrics' try: - response = requests.get(url, auth=('pmm', agent_id), timeout=5) - assert response.status_code == 200, f"Request for metrics failed with status code {response.status_code}" - pattern = r'mongodb_up (\d+)' - result = re.search(pattern, response.text) - assert result is not None, "MongoDB related data isn't exported" - except requests.exceptions.ConnectionError: - pytest.fail(f"Connection to {url} failed") + with open("expected_metrics.txt", "r") as f: + expected_metrics = {line.strip() for line in f if line.strip()} + except FileNotFoundError: + pytest.fail("Expected metrics file not found") + + for metric in expected_metrics: + if metric not in metrics.stdout: + pytest.fail(f"Metric '{metric}' is missing from the exporter output") + def test_simple_auth_wo_tls(): - run_test('pmm-admin add mongodb psmdb-server --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" '\ - '--host psmdb-server --port 27017') + run_test('pmm-admin add mongodb psmdb-server --agent-password=mypass --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" ''--host ' + 'psmdb-server --port 27017') + def test_simple_auth_tls(): - run_test('pmm-admin add mongodb psmdb-server --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" '\ - '--host psmdb-server --port 27017 '\ - '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem '\ - '--cluster=mycluster') + run_test('pmm-admin add mongodb psmdb-server --agent-password=mypass --username=pmm_mongodb --password="5M](Q%q/U+YQ<^m" ' + '--host psmdb-server --port 27017 ' + '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem ' + '--cluster=mycluster') + +##### +# All tests for external authentication methods (X509, LDAP, Kerberos, AWS) rely on the `mongod` configuration to handle +# authentication using the selected method, followed by authorization via LDAP. +# +# Therefore, no users are added to `$external` database before testing. Instead, after successful authentication +# against the selected service, the username is transformed based on the pattern below to match LDAP user +# `cn=pmm-test,ou=users,dc=example,dc=org`. +# This user is preconfigured on LDAP server and, after authorization, inherits the privileges assigned in +# MongoDB to its default group, `cn=readers,ou=users,dc=example,dc=org`. +# +# Transformation pattern from `mongod` configuration: +# [{match: "arn:aws:iam::(.+):user/(.+)|CN=(.+)|([^@]+)@PERCONATEST.COM", substitution: "cn={1}{2}{3},ou=users,dc=example,dc=org"}] +##### def test_x509_auth(): - run_test('pmm-admin add mongodb psmdb-server --host=psmdb-server --port 27017 '\ - '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem '\ - '--authentication-mechanism=MONGODB-X509 --authentication-database=\'$external\' '\ + run_test('pmm-admin add mongodb psmdb-server --agent-password=mypass --host=psmdb-server --port 27017 ' + '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem ' + '--authentication-mechanism=MONGODB-X509 --authentication-database=\'$external\' ' '--cluster=mycluster') + def test_ldap_auth_wo_tls(): - run_test('pmm-admin add mongodb psmdb-server --username="CN=pmm-test" --password=password1 '\ - '--host=psmdb-server --port 27017 '\ - '--authentication-mechanism=PLAIN --authentication-database=\'$external\' '\ + run_test('pmm-admin add mongodb psmdb-server --agent-password=mypass --username="CN=pmm-test" --password=password1 ' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=PLAIN --authentication-database=\'$external\' ' '--cluster=mycluster') + def test_ldap_auth_tls(): - run_test('pmm-admin add mongodb psmdb-server --username="CN=pmm-test" --password=password1 '\ - '--host=psmdb-server --port 27017 '\ - '--authentication-mechanism=PLAIN --authentication-database=\'$external\' '\ - '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem '\ + run_test('pmm-admin add mongodb psmdb-server --agent-password=mypass --username="CN=pmm-test" --password=password1 ' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=PLAIN --authentication-database=\'$external\' ' + '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem ' + '--cluster=mycluster') + +@pytest.mark.skip(reason="Kerberos support in PMM was reverted") +def test_kerberos_auth_wo_tls(): + run_test('pmm-admin add mongodb psmdb-server --username="pmm-test@PERCONATEST.COM" --password=password1 ' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=GSSAPI --authentication-database=\'$external\' ' + '--cluster=mycluster') + +@pytest.mark.skip(reason="Kerberos support in PMM was reverted") +def test_kerberos_auth_tls(): + run_test('pmm-admin add mongodb psmdb-server --username="pmm-test@PERCONATEST.COM" --password=password1 ' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=GSSAPI --authentication-database=\'$external\' ' + '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem ' '--cluster=mycluster') @pytest.mark.skipif( any(not os.environ.get(var) for var in env_vars) or os.environ.get('SKIP_AWS_TESTS') == 'true', reason=f"One or more of AWS env var isn't defined or SKIP_AWS_TESTS is set to true") def test_aws_auth_wo_tls(): - run_test('pmm-admin add mongodb psmdb-server --username='+ os.environ.get('AWS_ACCESS_KEY_ID') +' '\ - '--password='+ os.environ.get('AWS_SECRET_ACCESS_KEY') +' '\ - '--host=psmdb-server --port 27017 '\ - '--authentication-mechanism=MONGODB-AWS --authentication-database=\'$external\' '\ + run_test(f'pmm-admin add mongodb psmdb-server --agent-password=mypass --username={os.environ.get("AWS_ACCESS_KEY_ID")}' + f'--password={os.environ.get("AWS_SECRET_ACCESS_KEY")}' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=MONGODB-AWS --authentication-database=\'$external\' ' '--cluster=mycluster') + @pytest.mark.skipif( any(not os.environ.get(var) for var in env_vars) or os.environ.get('SKIP_AWS_TESTS') == 'true', reason=f"One or more of AWS env var isn't defined or SKIP_AWS_TESTS is set to true") def test_aws_auth_tls(): - run_test('pmm-admin add mongodb psmdb-server --username='+ os.environ.get('AWS_ACCESS_KEY_ID') +' '\ - '--password='+ os.environ.get('AWS_SECRET_ACCESS_KEY') +' '\ - '--host=psmdb-server --port 27017 '\ - '--authentication-mechanism=MONGODB-AWS --authentication-database=\'$external\' '\ - '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem '\ + run_test(f'pmm-admin add mongodb psmdb-server --agent-password=mypass --username={os.environ.get("AWS_ACCESS_KEY_ID")}' + f'--password={os.environ.get("AWS_SECRET_ACCESS_KEY")}' + '--host=psmdb-server --port 27017 ' + '--authentication-mechanism=MONGODB-AWS --authentication-database=\'$external\' ' + '--tls --tls-certificate-key-file=/mongodb_certs/client.pem --tls-ca-file=/mongodb_certs/ca-certs.pem ' '--cluster=mycluster') diff --git a/pmm_qa/Dockerfile b/pmm_qa/Dockerfile new file mode 100644 index 00000000..f78c2c2c --- /dev/null +++ b/pmm_qa/Dockerfile @@ -0,0 +1,16 @@ +FROM php:8.1-alpine + +RUN set -ex \ + && apk --no-cache add postgresql-libs postgresql-dev \ + && docker-php-ext-install pgsql pdo_pgsql \ + && docker-php-ext-install mysqli && docker-php-ext-enable mysqli \ + && apk del postgresql-dev + +RUN apk --update add --virtual build-dependencies build-base openssl-dev autoconf \ + && pecl install mongodb \ + && docker-php-ext-enable mongodb \ + && apk del build-dependencies build-base openssl-dev autoconf \ + && rm -rf /var/cache/apk/* + +RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/bin/ --filename=composer +RUN composer require mongodb/mongodb diff --git a/pmm_qa/Makefile b/pmm_qa/Makefile new file mode 100644 index 00000000..888b96b4 --- /dev/null +++ b/pmm_qa/Makefile @@ -0,0 +1,21 @@ +default: help + +help: ## Display this help message. + @echo "Please use \`make \` where is one of:" + @grep '^[a-zA-Z]' $(MAKEFILE_LIST) | \ + awk -F ':.*?## ' 'NF==2 {printf " %-26s%s\n", $$1, $$2}' + +init: ## Create venv and install dependencies + python3 -m venv pmm_framework + . pmm_framework/bin/activate + python3 -m pip install -r requirements.txt + +init-ubuntu: ## Create venv and install dependencies for ubuntu + sudo chown $(whoami) -R . + apt install -y python3.10-venv + python3 -m venv virtenv + . virtenv/bin/activate + python --version + which python + python -m pip install -r requirements.txt + diff --git a/pmm_qa/README.md b/pmm_qa/README.md new file mode 100644 index 00000000..fa9c2685 --- /dev/null +++ b/pmm_qa/README.md @@ -0,0 +1,17 @@ +# WIP - PMM-QA Framework Documentation +Sets up all types of dbs base one or with replication sets. + +Available flags: +- +- ```--database``` Sets up selected DB available options: + - ```ps``` - Sets up Percona server, example: ```--database ps=8.4,SETUP_TYPE=gr,QUERY_SOURCE=perfschema``` + - parameters: + - SETUP_TYPE: + - gr - Group replication + - QUERY_SOURCE - The Performance Schema provides detailed, real-time metrics on various server + performance aspects, while the Slow Query Log records queries that exceed a defined execution + time threshold, helping to identify inefficient queries. + - perfschema + - slowlog + - COUNT - Count of percona server dbs created. + - Available versions: ```8.4```, ```8.0```, ```5.7``` \ No newline at end of file diff --git a/pmm_qa/__init__.py b/pmm_qa/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pmm_qa/client_container_ms_setup.sh b/pmm_qa/client_container_ms_setup.sh new file mode 100644 index 00000000..c4004c24 --- /dev/null +++ b/pmm_qa/client_container_ms_setup.sh @@ -0,0 +1,130 @@ +#!/bin/bash + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$number_of_nodes" ] +then + export number_of_nodes=1 +fi + +if [ -z "$ms_version" ] +then + export ms_version="8.0" +fi + +if [ -z "$ms_tarball" ] +then + export ms_tarball="https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-x86_64-minimal.tar.xz" +fi + +if [ -z "$query_source" ] +then + export query_source=perfschema +fi + +touch sysbench_prepare.txt +touch sysbench_run.txt + +## Setup DB deployer +curl -L -s https://bit.ly/dbdeployer | bash || true + +### Get the tarball +wget ${ms_tarball} +mkdir ~/ms${ms_version} || true +mkdir /tmp || true +chmod 1777 /tmp || true + +## Deploy DB deployer +export MS_PORT=3308 +export tar_ball_name=$(ls mysql-*) +dbdeployer unpack ${tar_ball_name} --sandbox-binary=~/ms${ms_version} --overwrite +export db_version_sandbox=$(ls ~/ms${ms_version}) +export SERVICE_RANDOM_NUMBER=$((1 + $RANDOM % 9999)) + +# Initialize my_cnf_options +my_cnf_options="" + +# Check if ps_version is 8.4 or greater to enable the plugin to change the password +if [[ "$ms_version" =~ ^8\.[4-9]([0-9])? || "$ms_version" =~ ^[9-9][0-9]\. ]]; then + my_cnf_options="mysql-native-password=ON" +fi + +if [[ "$number_of_nodes" == 1 ]];then + if [[ ! -z $group_replication ]]; then + dbdeployer deploy --topology=group replication ${db_version_sandbox} --single-primary --sandbox-binary=~/ms${ms_version} --remote-access=% --bind-address=0.0.0.0 --force ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + node_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'group-single-primary' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'msandbox'@'localhost' IDENTIFIED WITH mysql_native_password BY 'msandbox';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'GRgrO9301RuF';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + else + dbdeployer deploy single ${db_version_sandbox} --sandbox-binary=~/ms${ms_version} --port=$MS_PORT --remote-access=% --bind-address=0.0.0.0 --force ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + node_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'single' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'msandbox'@'localhost' IDENTIFIED WITH mysql_native_password BY 'msandbox';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'GRgrO9301RuF';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_slave_statements=ON;" + fi + fi + if [[ ! -z $group_replication ]]; then + for j in `seq 1 3`;do + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_slave_statements=ON;" + fi + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-group-replication-node + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ms-prod --cluster=ms-prod-cluster --replication-set=ms-repl ms-group-replication-node-$j-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$node_port + node_port=$(($node_port + 1)) + sleep 20 + done + else + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-single + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=dev --cluster=dev-cluster --replication-set=repl1 ms-single-${SERVICE_RANDOM_NUMBER} 127.0.0.1:$node_port + fi +else + dbdeployer deploy multiple ${db_version_sandbox} --sandbox-binary=~/ms${ms_version} --nodes $number_of_nodes --force --remote-access=% --bind-address=0.0.0.0 ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + node_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'multiple' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'msandbox'@'localhost' IDENTIFIED WITH mysql_native_password BY 'msandbox';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'GRgrO9301RuF';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + for j in `seq 1 $number_of_nodes`; do + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_slave_statements=ON;" + fi + if [ $(( ${j} % 2 )) -eq 0 ]; then + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ms-prod --cluster=ms-prod-cluster --replication-set=ms-repl2 ms-multiple-node-$j-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$node_port + else + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ms-dev --cluster=ms-dev-cluster --replication-set=ms-repl1 ms-multiple-node-$j-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$node_port + fi + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-multiple-node + node_port=$(($node_port + 1)) + sleep 20 + done +fi + +## Start Running Load +~/sandboxes/${db_sandbox}/sysbench_ready prepare > sysbench_prepare.txt 2>&1 & +sleep 120 +~/sandboxes/${db_sandbox}/sysbench_ready run > sysbench_run.txt 2>&1 & diff --git a/pmm_qa/client_container_proxysql_setup.sh b/pmm_qa/client_container_proxysql_setup.sh new file mode 100644 index 00000000..c3e864a9 --- /dev/null +++ b/pmm_qa/client_container_proxysql_setup.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +mysql -A -uroot -S /home/pxc/PXC/node1/socket.sock -e "drop database if exists sbtest;create database sbtest;" +mysql -A -uroot -S /home/pxc/PXC/node1/socket.sock -e "GRANT ALL PRIVILEGES ON sbtest.* TO 'proxysql_user'@'127.%';" + +## Start Running Load +sysbench /usr/share/sysbench/oltp_insert.lua --mysql-db=sbtest --mysql-user=proxysql_user --mysql-host=127.0.0.1 --mysql-port=6033 --mysql-password=passw0rd --db-driver=mysql --threads=1 --tables=10 --table-size=1000 prepare > sysbench_run_node1_prepare.txt 2>&1 & +sleep 20 +sysbench /usr/share/sysbench/oltp_read_only.lua --mysql-db=sbtest --mysql-user=proxysql_user --mysql-host=127.0.0.1 --mysql-port=6033 --mysql-password=passw0rd --db-driver=mysql --threads=1 --tables=10 --table-size=1000 --time=12000 run > sysbench_run_node1_read_only.txt 2>&1 & +sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-db=sbtest --mysql-user=proxysql_user --mysql-host=127.0.0.1 --mysql-port=6033 --mysql-password=passw0rd --db-driver=mysql --threads=1 --tables=10 --table-size=1000 --time=0 run > sysbench_run_node1_read_write.txt 2>&1 & diff --git a/pmm_qa/client_container_ps_setup.sh b/pmm_qa/client_container_ps_setup.sh new file mode 100644 index 00000000..57337a1d --- /dev/null +++ b/pmm_qa/client_container_ps_setup.sh @@ -0,0 +1,185 @@ +#!/bin/bash + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$number_of_nodes" ] +then + export number_of_nodes=3 +fi + +if [ -z "$ps_version" ] +then + export ps_version=8.0 +fi + +if [ -z "$ps_tarball" ] +then + export ps_tarball="https://downloads.percona.com/downloads/Percona-Server-8.0/Percona-Server-8.0.40-31/binary/tarball/Percona-Server-8.0.40-31-Linux.x86_64.glibc2.35-minimal.tar.gz" +fi + +if [ -z "$query_source" ] +then + export query_source=slowlog +fi + +export PS_PORT=3307 +export PS_USER=msandbox +export PS_PASSWORD=msandbox +touch sysbench_prepare.txt +touch sysbench_run.txt + +## Setup DB deployer +curl -L -s https://bit.ly/dbdeployer | bash || true + +### Get the tarball +wget ${ps_tarball} +mkdir ~/ps${ps_version} || true +mkdir /tmp || true +chmod 1777 /tmp || true + +## Deploy DB deployer +export tar_ball_name=$(ls Percona-Server*) +dbdeployer unpack ${tar_ball_name} --sandbox-binary=~/ps${ps_version} --flavor=percona +export db_version_sandbox=$(ls ~/ps${ps_version}) + +export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') +export SERVICE_RANDOM_NUMBER=$((1 + $RANDOM % 9999)) + +# Initialize my_cnf_options +my_cnf_options="" + +if [[ "$number_of_nodes" -gt 1 ]]; then + # Check if ps_version is greater than 8.0 + if [[ "$ps_version" =~ ^8\.[1-9]([0-9])? || "$ps_version" =~ ^9\.[0-9]+ ]]; then + my_cnf_options="caching_sha2_password_auto_generate_rsa_keys=ON" + else + # MySQL 5.7, create user fails which already exists, to ignore this we do: + my_cnf_options="replicate-ignore-table=mysql.user" + fi +fi + +if [[ "$number_of_nodes" == 1 ]];then + if [[ ! -z $group_replication ]]; then + dbdeployer deploy --topology=group replication ${db_version_sandbox} --single-primary --sandbox-binary=~/ps${ps_version} --remote-access=% --bind-address=0.0.0.0 --force ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + node_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'group-single-primary' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'GRgrO9301RuF';" + else + dbdeployer deploy single ${db_version_sandbox} --sandbox-binary=~/ps${ps_version} --port=${PS_PORT} --remote-access=% --bind-address=0.0.0.0 --force ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + node_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'single' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'GRgrO9301RuF';" + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_rate_limit=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_slave_statements=ON;" + else + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL userstat=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + if echo "$ps_version" | grep '5.7'; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_AUDIT SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_READ SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_WRITE SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL query_response_time_stats=ON;" + fi + fi + fi + if [[ ! -z $group_replication ]]; then + for j in `seq 1 3`;do + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_rate_limit=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL log_slow_slave_statements=ON;" + else + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL userstat=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + if echo "$ps_version" | grep '5.7'; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_AUDIT SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_READ SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_WRITE SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "SET GLOBAL query_response_time_stats=ON;" + fi + fi + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-group-replication-node + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ms-prod --cluster=ps-prod-cluster --replication-set=ps-repl ps-group-replication-node-$j-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$node_port + node_port=$(($node_port + 1)) + sleep 20 + done + else + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-single + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=dev --cluster=dev-cluster --replication-set=repl1 ps-single-${SERVICE_RANDOM_NUMBER} 127.0.0.1:$node_port + sleep 20 + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "set long_query_time = 0; INSERT INTO T2033 ( ID, Value ) VALUES (1,1)" || true + fi +else + dbdeployer deploy multiple ${db_version_sandbox} --sandbox-binary=~/ps${ps_version} --nodes $number_of_nodes --force --remote-access=% --bind-address=0.0.0.0 --my-cnf-options=gtid_mode=ON --my-cnf-options=enforce-gtid-consistency=ON --my-cnf-options=binlog-format=ROW --my-cnf-options=log-slave-updates=ON --my-cnf-options=binlog-checksum=NONE ${my_cnf_options:+--my-cnf-options="$my_cnf_options"} + export db_sandbox=$(dbdeployer sandboxes | awk -F' ' '{print $1}') + master_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'multiple' | awk -F'[' '{print $2}' | awk -F' ' '{print $1}'` + if echo "$ps_version" | grep '5.7'; then + slave_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'multiple' | awk -F'[' '{print $2}' | awk -F' ' '{print $2}'` + else + slave_port=`dbdeployer sandboxes --header | grep ${db_version_sandbox} | grep 'multiple' | awk -F'[' '{print $2}' | awk -F' ' '{print $3}'` + fi + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $master_port -e "ALTER USER 'root'@'localhost' IDENTIFIED BY 'GRgrO9301RuF';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $master_port -e "FLUSH PRIVILEGES;" + mysql -h 127.0.0.1 -u root -p'GRgrO9301RuF' --port $master_port -e "CREATE USER 'repl'@'%' IDENTIFIED BY 'repl_pass';" + mysql -h 127.0.0.1 -u root -p'GRgrO9301RuF' --port $master_port -e "GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'repl'@'%';" + mysql -h 127.0.0.1 -u root -p'GRgrO9301RuF' --port $master_port -e "FLUSH PRIVILEGES;" + if echo "$ps_version" | grep '5.7'; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $slave_port -e "CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='repl', MASTER_PASSWORD='repl_pass', MASTER_PORT=${master_port}, MASTER_AUTO_POSITION=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $slave_port -e "START SLAVE;" + else + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $slave_port -e "CHANGE REPLICATION SOURCE TO SOURCE_HOST='127.0.0.1', SOURCE_USER='repl', SOURCE_PASSWORD='repl_pass', SOURCE_PORT=${master_port}, SOURCE_AUTO_POSITION=1, SOURCE_SSL=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $slave_port -e "START REPLICA;" + fi + for port in $master_port $slave_port; do + if [[ "${query_source}" == "slowlog" ]]; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL slow_query_log='ON';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL long_query_time=0;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL log_slow_rate_limit=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL log_slow_admin_statements=ON;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL log_slow_slave_statements=ON;" + else + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL userstat=1;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL innodb_monitor_enable=all;" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "UPDATE performance_schema.setup_consumers SET ENABLED = 'YES' WHERE NAME LIKE '%statements%';" + if echo "$ps_version" | grep '5.7'; then + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_AUDIT SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_READ SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "INSTALL PLUGIN QUERY_RESPONSE_TIME_WRITE SONAME 'query_response_time.so';" + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $port -e "SET GLOBAL query_response_time_stats=ON;" + fi + fi + if [[ "$port" == "$master_port" ]]; then + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ps-dev --cluster=ps-dev-cluster --replication-set=ps-repl1 ps-multiple-master-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$master_port + else + pmm-admin add mysql --query-source=$query_source --username=msandbox --password=msandbox --environment=ps-dev --cluster=ps-dev-cluster --replication-set=ps-repl1 ps-multiple-slave-${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:$slave_port + fi + #run_workload 127.0.0.1 msandbox msandbox $node_port mysql mysql-multiple-node + sleep 20 + + mysql -h 127.0.0.1 -u msandbox -pmsandbox --port $node_port -e "set long_query_time = 0; INSERT INTO T2033 ( ID, Value ) VALUES (1,1)" || true + done +fi + +## Start Running Load +~/sandboxes/${db_sandbox}/sysbench_ready prepare > sysbench_prepare.txt 2>&1 & +sleep 120 +~/sandboxes/${db_sandbox}/sysbench_ready run > sysbench_run.txt 2>&1 & diff --git a/pmm_qa/client_container_pxc_setup.sh b/pmm_qa/client_container_pxc_setup.sh new file mode 100644 index 00000000..a84ef870 --- /dev/null +++ b/pmm_qa/client_container_pxc_setup.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$number_of_nodes" ] +then + export number_of_nodes=3 +fi + +if [ -z "$pxc_version" ] +then + export pxc_version=8 +fi + +if [ -z "$pxc_tarball" ] +then + export pxc_tarball=https://downloads.percona.com/downloads/Percona-XtraDB-Cluster-80/Percona-XtraDB-Cluster-8.0.32/binary/tarball/Percona-XtraDB-Cluster_8.0.32-24.1_Linux.x86_64.glibc2.34-minimal.tar.gz +fi + +if [ -z "$query_source" ] +then + export query_source=perfschema +fi + +if [ -z "$pxc_dev_cluster" ] +then + export pxc_dev_cluster=pxc-dev-cluster +fi + +whoami +cd ~ +wget https://raw.githubusercontent.com/Percona-QA/percona-qa/master/pxc-tests/pxc-startup.sh +sed -i 's/log-output=none/log-output=file/g' pxc-startup.sh +## bug https://bugs.mysql.com/bug.php?id=90553 workaround +sed -i 's+${MID} --datadir+${MID} --socket=\\${node}/socket.sock --port=\\${RBASE1} --datadir+g' pxc-startup.sh + +## Download right PXC version +if echo "$pxc_version" | grep '8'; then + sed -i 's+wsrep_node_incoming_address=$ADDR+wsrep_node_incoming_address=$ADDR:$RBASE1+g' pxc-startup.sh +fi + +curl ${pxc_tarball} -o Percona-XtraDB-Cluster.tar.gz +sleep 10 +tar -xzf Percona-XtraDB-Cluster.tar.gz +sleep 10 +rm -r Percona-XtraDB-Cluster.tar.gz +mv Percona-XtraDB-Cluster* PXC +cd PXC + +## start PXC +bash ../pxc-startup.sh +bash ./start_pxc $number_of_nodes +touch sysbench_run_node1_prepare.txt +touch sysbench_run_node1_read_write.txt +touch sysbench_run_node1_read_only.txt + +### enable slow log +if [ "$query_source" == "slowlog" ]; then + for j in `seq 1 ${number_of_nodes}`; + do + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL slow_query_log='ON';" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL long_query_time=0;" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_rate_limit=1;" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_verbosity='full';" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_rate_type='query';" + done +fi + +bin/mysql -A -uroot -S/home/pxc/PXC/node1/socket.sock -e "create user 'admin'@'%' identified with mysql_native_password by 'admin';" +bin/mysql -A -uroot -S/home/pxc/PXC/node1/socket.sock -e "grant all on *.* to 'admin'@'%';" + +export SERVICE_RANDOM_NUMBER=$((1 + $RANDOM % 9999)) +for j in `seq 1 ${number_of_nodes}`;do + pmm-admin add mysql --query-source=${query_source} --username=admin --password=admin --host=127.0.0.1 --port=$(cat /home/pxc/PXC/node$j.cnf | grep port | awk -F"=" '{print $2}') --environment=pxc-dev --cluster=${pxc_dev_cluster} --replication-set=pxc-repl pxc_node__${j}_${SERVICE_RANDOM_NUMBER} +done diff --git a/pmm_qa/data/load_pgsql.sql b/pmm_qa/data/load_pgsql.sql new file mode 100644 index 00000000..227e904c --- /dev/null +++ b/pmm_qa/data/load_pgsql.sql @@ -0,0 +1,32 @@ +-- Step 1: Show initial buffers_alloc value +SELECT 'Initial buffers_alloc' AS info, buffers_alloc FROM pg_stat_bgwriter; + +-- Step 2: Drop and create a large test table (~500MB+) +DROP TABLE IF EXISTS buffer_test; + +CREATE TABLE buffer_test AS +SELECT + generate_series(1, 1_000_000) AS id, + md5(random()::text) AS filler; + +ANALYZE buffer_test; + +-- Step 3: Perform repeated full-table scans to stress buffer allocation +DO $$ +BEGIN + FOR i IN 1..10 LOOP + RAISE NOTICE 'Running scan iteration %', i; + PERFORM COUNT(*) FROM buffer_test; + END LOOP; +END $$; + +---- Step 4: Show final buffers_alloc value +--SELECT 'Final buffers_alloc' AS info, buffers_alloc FROM pg_stat_bgwriter; +VACUUM; +SELECT pg_switch_wal(); + +-- Step 5: Query the data +SELECT * FROM buffer_test; + +-- Step 6: Delete the data +DELETE FROM buffer_test; diff --git a/pmm_qa/docker-compose-clients.yaml b/pmm_qa/docker-compose-clients.yaml new file mode 100644 index 00000000..b7830746 --- /dev/null +++ b/pmm_qa/docker-compose-clients.yaml @@ -0,0 +1,64 @@ +version: '3.7' + +services: + pmm-server: + container_name: pmm-server-1 + image: ${DOCKER_VERSION:-perconalab/pmm-server:3-dev-latest} + ports: + - "2553:8443" + - "8085:8080" + environment: + - PMM_DEBUG=1 + networks: + - docker-client-check + + pmm-client: + container_name: pmm-client-1 + image: ${CLIENT_DOCKER_VERSION:-perconalab/pmm-client:3-dev-latest} + environment: + - PMM_AGENT_SERVER_ADDRESS=pmm-server:8443 + - PMM_AGENT_SERVER_USERNAME=admin + - PMM_AGENT_SERVER_PASSWORD=admin + - PMM_AGENT_SERVER_INSECURE_TLS=1 + - PMM_AGENT_SETUP=1 + - PMM_AGENT_SETUP_FORCE=1 + - PMM_AGENT_CONFIG_FILE=/usr/local/percona/pmm/config/pmm-agent.yaml + depends_on: + pmm-server: + condition: service_healthy + networks: + - docker-client-check + + ps: + container_name: ps-1 + image: percona:8.0 + environment: + - MYSQL_ROOT_PASSWORD=root + - MYSQL_USER=pmm + - MYSQL_PASSWORD=pmm-pass + - PMM_AGENT_SERVER_ADDRESS=pmm-server:8443 + networks: + - docker-client-check + + mongodb: + container_name: psmdb-1 + image: percona/percona-server-mongodb:7.0 + environment: + - PMM_AGENT_SERVER_ADDRESS=pmm-server:8443 + - MONGO_INITDB_ROOT_USERNAME=pmm + - MONGO_INITDB_ROOT_PASSWORD=pmm-pass + networks: + - docker-client-check + + postgres: + container_name: pdpgsql-1 + image: percona/percona-distribution-postgresql:16 + environment: + - POSTGRES_USER=pmm + - POSTGRES_PASSWORD=pmm-pass + - PMM_AGENT_SERVER_ADDRESS=pmm-server:8443 + networks: + - docker-client-check + +networks: + docker-client-check: diff --git a/pmm_qa/external_setup.sh b/pmm_qa/external_setup.sh new file mode 100644 index 00000000..b2199d57 --- /dev/null +++ b/pmm_qa/external_setup.sh @@ -0,0 +1,49 @@ +#!/bin/sh + + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$metrics_mode" ] +then + export metrics_mode=push +fi + +if [ -z "$setup_version" ] +then + export setup_version="1.14.0" +fi + +if [ -z "$setup_type" ] +then + export setup_type=redis +fi + +# Install the dependencies +source ~/.bash_profile || true; +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +apt-get -y install -y git ca-certificates gcc libc6-dev liblua5.3-dev libpcre3-dev libssl-dev libsystemd-dev make wget zlib1g-dev + +if [[ "$setup_type" == "redis" ]]; then + wget https://github.com/oliver006/redis_exporter/releases/download/v${setup_version}/redis_exporter-v${setup_version}.linux-386.tar.gz + tar -xvf redis_exporter-v${setup_version}.linux-386.tar.gz + sleep 10 + rm redis_exporter*.tar.gz + mv redis_exporter-* redis_exporter || exit +elif [[ "$setup_type" == "nodeprocess" ]]; then + wget https://github.com/ncabatoff/process-exporter/releases/download/v${setup_version}/process-exporter-${setup_version}.linux-amd64.tar.gz + tar -xvf process-exporter-${setup_version}.linux-amd64.tar.gz || exit + sleep 10 + rm process-exporter*.tar.gz + mv process-exporter-* process-exporter || exit +fi + + diff --git a/pmm_qa/external_setup.yml b/pmm_qa/external_setup.yml new file mode 100644 index 00000000..a3f66d62 --- /dev/null +++ b/pmm_qa/external_setup.yml @@ -0,0 +1,104 @@ +--- +# This playbook does following: +# enables Percona testing repository + +- hosts: all + vars: + external_container: "{{ lookup('vars', 'extra_haproxy_container', default=lookup('env','EXTERNAL_CONTAINER') | default('EXTERNAL', true) ) }}" + redis_version: "{{ lookup('vars', 'extra_redis_version', default=lookup('env','REDIS_EXPORTER_VERSION') | default('0.7.5', true) ) }}" + nodeprocess_version: "{{ lookup('vars', 'extra_nodeprocess_version', default=lookup('env','NODE_PROCESS_EXPORTER_VERSION') | default('0.7.5', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ external_container }}" | grep -q . && docker stop {{ external_container }} && docker rm -fv {{ external_container }} + docker stop redis_container + docker rm -fv redis_container + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for External Exporters + shell: | + docker run -d \ + -p 43100:43100 \ + --name={{ external_container }} \ + --network=pmm-qa \ + phusion/baseimage:noble-1.0.2 + + - name: Copy all required Artifacts to the docker external_container + shell: "{{ item }}" + with_items: + - docker cp ./external_setup.sh {{ external_container }}:/ + - docker exec {{ external_container }} apt-get update + - docker exec {{ external_container }} apt-get -y install wget curl git gnupg2 lsb-release + + - name: Run Redis Container in Host + shell: | + docker run -d \ + --name=redis_container \ + --network=pmm-qa \ + -p 6379:6379 \ + redis '--requirepass oFukiBRg7GujAJXq3tmd' + + - name: Setup External Exporters for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} bash -x ./external_setup.sh > setup_external.log + + - name: Install PMM Client inside of container + include_tasks: ./tasks/install_pmm_client.yml + vars: + container_name: "{{ external_container }}" + + - name: Execute Setup script inside the External container for Redis + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} bash -xe ./external_setup.sh --setup_type redis --setup_version {{ redis_version }} > setup_external_redis.log + delay: 10 + + - name: Start Redis for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} nohup bash -c './redis_exporter/redis_exporter -redis.password=oFukiBRg7GujAJXq3tmd -redis.addr=redis_container:6379 -web.listen-address=:42200 > redis.log 2>&1 &' + + - name: Execute Setup script inside the External container for Process Exporter + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} bash -xe ./external_setup.sh --setup_type nodeprocess --setup_version {{ nodeprocess_version }} > setup_external_nodeprocess.log + + - name: Start Node Process Exporter for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} nohup bash -c './process-exporter/process-exporter > process-exporter.log 2>&1 &' + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Remove EXTERNAL process for monitoring from PMM if already exist with same service name + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove external redis_service_{{ random_number }}' + - docker exec {{ external_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove external nodeprocess_service_{{ random_number }}' + ignore_errors: true + + - name: Add Redis for Monitoring to PMM via pmm-admin add external command + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} pmm-admin list + - docker exec {{ external_container }} pmm-admin add external --listen-port=42200 --group="redis" --service-name=redis_external_service_{{ random_number }} + + - name: Add Node Process for Monitoring to PMM via pmm-admin add external command + shell: "{{ item }}" + with_items: + - docker exec {{ external_container }} pmm-admin list + - docker exec {{ external_container }} pmm-admin add external --group=processes --listen-port=9256 --service-name=nodeprocess_service_{{ random_number }} diff --git a/pmm_qa/haproxy.cfg b/pmm_qa/haproxy.cfg new file mode 100644 index 00000000..956b06cd --- /dev/null +++ b/pmm_qa/haproxy.cfg @@ -0,0 +1,18 @@ +frontend stats + bind *:42100 + stats enable + stats uri /stats + stats refresh 10s + mode http + timeout client 60s + http-request use-service prometheus-exporter if { path /metrics } + default_backend allservers + +backend allservers + timeout connect 10s + timeout server 100s + mode http + server server2222 127.0.0.1:2222 + server server3333 127.0.0.1:3333 + server server4444 127.0.0.1:4444 + server server5555 127.0.0.1:5555 diff --git a/pmm_qa/haproxy_setup.sh b/pmm_qa/haproxy_setup.sh new file mode 100644 index 00000000..6aff6239 --- /dev/null +++ b/pmm_qa/haproxy_setup.sh @@ -0,0 +1,30 @@ +#!/bin/sh + + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$metrics_mode" ] +then + export metrics_mode=push +fi + +# Install the dependencies +source ~/.bash_profile || true; +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +apt-get -y install -y git ca-certificates gcc libc6-dev liblua5.3-dev libpcre3-dev libssl-dev libsystemd-dev make wget zlib1g-dev + +## Get Haproxy +git clone https://github.com/haproxy/haproxy.git +cd haproxy +make TARGET=linux-glibc USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_PROMEX=1 +make install-bin +cp /usr/local/sbin/haproxy /usr/sbin/haproxy diff --git a/pmm_qa/haproxy_setup.yml b/pmm_qa/haproxy_setup.yml new file mode 100644 index 00000000..6f92b786 --- /dev/null +++ b/pmm_qa/haproxy_setup.yml @@ -0,0 +1,77 @@ +--- +# This playbook does following: +# enables Percona testing repository + +- hosts: all + vars: + haproxy_container: "{{ lookup('vars', 'extra_haproxy_container', default=lookup('env','HAPROXY_CONTAINER') | default('HAPROXY', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ haproxy_container }}" | grep -q . && docker stop {{ haproxy_container }} && docker rm -fv {{ haproxy_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for HAPROXY + shell: > + sudo docker run -d -p 42100:42100 --name={{ haproxy_container }} + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker haproxy_container + shell: "{{ item }}" + with_items: + - docker cp ./haproxy_setup.sh {{ haproxy_container }}:/ + - docker cp ./haproxy.cfg {{ haproxy_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ haproxy_container }}:/ + - docker exec {{ haproxy_container }} apt-get update + - docker exec {{ haproxy_container }} apt-get -y install wget curl git gnupg2 lsb-release + + - name: Setup haproxy for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ haproxy_container }} bash -x ./haproxy_setup.sh > setup_haproxy.log + + - name: Install pmm2-client on the haproxy_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ haproxy_container }} + - docker exec {{ haproxy_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Wait for Haproxy building to be finished and binary available + shell: docker exec {{ haproxy_container }} ls -la /usr/sbin/haproxy + retries: 6 + delay: 10 + + - name: Start haproxy for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ haproxy_container }} haproxy -f haproxy.cfg -D + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Remove HAPROXY for monitoring from PMM if already exist with same service name + shell: "{{ item }}" + with_items: + - docker exec {{ haproxy_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove haproxy {{ haproxy_container }}_service_{{ random_number }}' + ignore_errors: true + + - name: Add HAPROXY for Monitoring to PMM via pmm-admin add haproxy command + shell: "{{ item }}" + with_items: + - docker exec {{ haproxy_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ haproxy_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add haproxy --listen-port=42100 --environment=haproxy {{ haproxy_container }}_service_{{ random_number }}' + + - name: Run ping to FE of HAPROXY to monitor the load balancing + shell: while true; do curl http://localhost:42100/; sleep 10; done >/dev/null 2>&1 & diff --git a/pmm_qa/mlaunch_modb_setup.sh b/pmm_qa/mlaunch_modb_setup.sh new file mode 100755 index 00000000..924d458d --- /dev/null +++ b/pmm_qa/mlaunch_modb_setup.sh @@ -0,0 +1,100 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + shift +done + +if [ -z "$mongodb_version" ]; then + export mongodb_version=7.0 +fi + +if [ -z "$mongdb_setup" ]; then + export mongdb_setup=pss +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=push +fi + +# Mongo user credtials for the replicat set cluster +export user="dba" +export pwd="test1234" + +# Install the dependencies +source ~/.bash_profile || true; +apt-get update +apt-get -y install wget curl jq git gnupg2 lsb-release +apt-get -y install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison make flex libipc-run-perl +sleep 10 + +#wget https://raw.githubusercontent.com/Percona-QA/percona-qa/master/mongo_startup.sh +#chmod +x mongo_startup.sh +export SERVICE_RANDOM_NUMBER=$(echo $((1 + $RANDOM % 9999))) + +## Detect latest tarball link for specified mongodb_version: 7.0 | 6.0 | 5.0 | 4.4 | 4.2 at the moment +modb_tarball=$(curl -s "https://www.mongodb.com/try/download/community" | \ + grep -oP "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-${mongodb_version//./\\.}\.\d+\.tgz" | \ + sort -V | tail -n 1) + +echo "Downloading ${mongodb_version} from ${modb_tarball}..." +wget -O percona_server_mongodb.tar.gz ${modb_tarball} +tar -xvf percona_server_mongodb.tar.gz + +export extracted_folder_name=$(ls | grep mongodb-linux) +echo "Extracted folder name ${extracted_folder_name}" +mv ${extracted_folder_name} modb_${mongodb_version} +rm percona_server_mongodb.tar.gz* + +# For mongodb dependency in Debian +wget http://http.us.debian.org/debian/pool/main/o/openldap/libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb +apt install -y ./libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb + +if [ "$mongodb_setup" == "pss" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./modb_${mongodb_version}/bin" --replicaset --name rs1 --nodes 3 + sleep 20 + pmm-admin remove mongodb mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + +if [ "$mongodb_setup" == "psa" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./modb_${mongodb_version}/bin" --replicaset --name rs1 --nodes 2 --arbiter + sleep 20 + pmm-admin remove mongodb mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + +if [ "$mongodb_setup" == "sharded" ] || [ "$mongodb_setup" == "shards" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./modb_${mongodb_version}/bin" --replicaset --sharded rs1 rs2 --config 3 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --environment=mongos_shraded_node mongos_shraded_node_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27017 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27024 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27025 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27026 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27018 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27019 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27020 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27021 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27022 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27023 + sleep 20 +fi diff --git a/pmm_qa/mlaunch_modb_setup.yml b/pmm_qa/mlaunch_modb_setup.yml new file mode 100644 index 00000000..dfb749c0 --- /dev/null +++ b/pmm_qa/mlaunch_modb_setup.yml @@ -0,0 +1,85 @@ +--- + +- hosts: all + become: true + become_method: sudo + vars: + modb_version: "{{ lookup('vars', 'extra_modb_version', default=lookup('env','MODB_VERSION') | default('4.4', true) ) }}" + modb_tarball: "{{ lookup('vars', 'extra_modb_tarball', default=lookup('env','MODB_TARBALL') | default('', true) ) }}" + modb_setup: "{{ lookup('vars', 'extra_modb_setup', default=lookup('env','MODB_SETUP') | default('pss', true) ) }}" + modb_container: "{{ lookup('vars', 'extra_modb_container', default=lookup('env','MODB_CONTAINER') | default('MODB', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('v3', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ modb_container }}" | grep -q . && docker stop {{ modb_container }} && docker rm -fv {{ modb_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for modb + shell: > + docker run -d --name={{ modb_container }} + -p 27017:27017 + phusion/baseimage:focal-1.2.0 + + - name: Copy all required Artifacts to the docker modb_container + shell: "{{ item }}" + with_items: + - docker cp ./mlaunch_modb_setup.sh {{ modb_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ modb_container }}:/ + - docker exec {{ modb_container }} apt-get update + - docker exec {{ modb_container }} apt-get -y install wget curl git gnupg2 lsb-release jq python3 pip + + - name: Install required software's to the docker modb_container + shell: "{{ item }}" + with_items: + - docker exec {{ modb_container }} python3 -m pip install --upgrade pip + - docker exec {{ modb_container }} pip3 install 'mtools[all]' + + - name: Install pmm2-client on the modb_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ modb_container }} + - docker exec {{ modb_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Setup modb for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ modb_container }} bash -x ./mlaunch_modb_setup.sh --mongodb_version {{ modb_version }} --mongodb_setup {{ modb_setup }} > setup_modb_{{ modb_version }}_{{ modb_setup }}.log + + - name: Setup Load Running Docker Container + shell: "{{ item }}" + with_items: + - rm -rf ~/modb_{{ modb_version }} || true; mkdir -p ~/modb_{{ modb_version }} + - wget -P ~/modb_{{ modb_version }}/ "https://raw.githubusercontent.com/Percona-Lab/qa-integration/{{pmm_qa_branch}}/pmm_qa/Dockerfile" + - wget -P ~/modb_{{ modb_version }}/ "https://raw.githubusercontent.com/Percona-Lab/qa-integration/{{pmm_qa_branch}}/pmm_qa/mongodb_query.php" + - docker build --tag php-db ~/modb_{{ modb_version }}/ > ~/docker-build_mongodb_load_{{ modb_version }}_{{ modb_setup }}.log || true + - docker rm mongodb_load_{{ modb_version }}_{{ modb_setup }} || true + - docker run --rm --name mongodb_load_{{ modb_version }}_{{ modb_setup }} --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db composer require mongodb/mongodb || true + + - name: Run load on Replica Set Master(PSS) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ modb_version }}_{{ modb_setup }} -d -e MONGODB_HOST={{ modb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_modb_{{ modb_version }}_{{ modb_setup }}.log + when: modb_setup == "pss" + + - name: Run load on Replica Set Master(PSA) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ modb_version }}_{{ modb_setup }} -d -e MONGODB_HOST={{ modb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_modb_{{ modb_version }}_{{ modb_setup }}.log + when: modb_setup == "psa" + + - name: Run load on Sharded Clusters Master + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ modb_version }}_{{ modb_setup }} -d -e MONGODB_HOST={{ modb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_modb_{{ modb_version }}_{{ modb_setup }}.log + when: modb_setup == "sharded" \ No newline at end of file diff --git a/pmm_qa/mlaunch_psmdb_setup.sh b/pmm_qa/mlaunch_psmdb_setup.sh new file mode 100755 index 00000000..fc4b274a --- /dev/null +++ b/pmm_qa/mlaunch_psmdb_setup.sh @@ -0,0 +1,136 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + shift +done + +if [ -z "$mongodb_version" ]; then + export mongodb_version=7.0 +fi + +if [ -z "$mongdb_setup" ]; then + export mongdb_setup=pss +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=push +fi + +# Mongo user credtials for the replicat set cluster +export user="dba" +export pwd="test1234" + +# Install the dependencies +source ~/.bash_profile || true; +apt-get update +apt-get -y install wget curl jq git gnupg2 lsb-release +apt-get -y install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison make flex libipc-run-perl +sleep 10 + +#wget https://raw.githubusercontent.com/Percona-QA/percona-qa/master/mongo_startup.sh +#chmod +x mongo_startup.sh +export SERVICE_RANDOM_NUMBER=$(echo $((1 + $RANDOM % 9999))) + +### Detect latest tarball link for specified mongodb_version: 8.0 | 7.0 | 6.0 | 5.0 | 4.4 | 4.2 at the moment +psmdb_latest=$( wget -q --post-data "version=percona-server-mongodb-${mongodb_version}" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) +if [[ "$mongodb_version" == "4.4" ]]; then + psmdb_tarball=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep glibc2\.17-minimal) +else + psmdb_tarball=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep jammy-minimal) +fi + +echo "Downloading ${mongodb_version} ..." +wget -O percona_server_mongodb.tar.gz ${psmdb_tarball} +tar -xvf percona_server_mongodb.tar.gz + +export extracted_folder_name=$(ls | grep percona-server-mongodb) +echo "Extracted folder name ${extracted_folder_name}" +mv ${extracted_folder_name} psmdb_${mongodb_version} + +## Detect latest tarball link for specified mongodb_version: 7.0 | 6.0 | 5.0 | 4.4 | 4.2 at the moment +psmdb_tarball=$(curl -s "https://www.mongodb.com/try/download/community" | \ + grep -oP "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-${mongodb_version//./\\.}\.\d+\.tgz" | \ + sort -V | tail -n 1) + +echo "Downloading ${mongodb_version} from ${psmdb_tarball}..." +wget -O percona_server_mongodb.tar.gz ${psmdb_tarball} +tar -xvf percona_server_mongodb.tar.gz + +export extracted_folder_name=$(ls | grep mongodb-linux) +echo "Extracted folder name ${extracted_folder_name}" +mv ${extracted_folder_name} psmdb_${mongodb_version} + +# TODO: refactor if to match range of versions 6.0+ +if [[ "$mongodb_version" == "6.0" || "$mongodb_version" == "7.0" || "$mongodb_version" == "8.0" ]]; then +### PSMDB 6+ requires "percona-mongodb-mongosh" additionally + if [[ "$mongodb_version" == "8.0" ]]; then + # Use Mongo 7.0 mongosh itself for 8.0 + psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-7.0" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) + mongosh_link=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep mongosh || true) + if [ -z "$mongosh_link" ]; then + psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-6.0" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) + fi + fi + mongosh_link=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep mongosh) + echo "Downloading mongosh ${mongosh_link}..." + wget -O mongosh.tar.gz ${mongosh_link} + tar -xvf mongosh.tar.gz + mv percona-mongodb-mongosh* mongosh + cp mongosh/bin/mongosh ./psmdb_${mongodb_version}/bin/mongo + rm mongosh.tar.gz +fi +rm percona_server_mongodb.tar.gz* + +# For mongodb dependency in Debian +wget http://http.us.debian.org/debian/pool/main/o/openldap/libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb +apt install -y ./libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb + +if [ "$mongodb_setup" == "pss" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./psmdb_${mongodb_version}/bin" --replicaset --name rs1 --nodes 3 + sleep 20 + pmm-admin remove mongodb mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + +if [ "$mongodb_setup" == "psa" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./psmdb_${mongodb_version}/bin" --replicaset --name rs1 --nodes 2 --arbiter + sleep 20 + pmm-admin remove mongodb mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} || true; pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + +if [ "$mongodb_setup" == "sharded" ] || [ "$mongodb_setup" == "shards" ]; then + mlaunch init --bind_ip 0.0.0.0 --binarypath "./psmdb_${mongodb_version}/bin" --replicaset --sharded rs1 rs2 --config 3 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --environment=mongos_shraded_node mongos_shraded_node_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27017 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27024 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27025 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27026 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27018 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27019 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27020 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27021 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27022 + sleep 2 + pmm-admin add mongodb --enable-all-collectors --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27023 + sleep 20 +fi diff --git a/pmm_qa/mlaunch_psmdb_setup.yml b/pmm_qa/mlaunch_psmdb_setup.yml new file mode 100644 index 00000000..7b6fa354 --- /dev/null +++ b/pmm_qa/mlaunch_psmdb_setup.yml @@ -0,0 +1,85 @@ +--- + +- hosts: all + become: true + become_method: sudo + vars: + psmdb_version: "{{ lookup('vars', 'extra_psmdb_version', default=lookup('env','PSMDB_VERSION') | default('4.4', true) ) }}" + psmdb_tarball: "{{ lookup('vars', 'extra_psmdb_tarball', default=lookup('env','PSMDB_TARBALL') | default('', true) ) }}" + psmdb_setup: "{{ lookup('vars', 'extra_psmdb_setup', default=lookup('env','PSMDB_SETUP') | default('pss', true) ) }}" + psmdb_container: "{{ lookup('vars', 'extra_psmdb_container', default=lookup('env','PSMDB_CONTAINER') | default('psmdb', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('v3', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ psmdb_container }}" | grep -q . && docker stop {{ psmdb_container }} && docker rm -fv {{ psmdb_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PSMDB + shell: > + docker run -d --name={{ psmdb_container }} + -p 27017:27017 + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker psmdb_container + shell: "{{ item }}" + with_items: + - docker cp ./mlaunch_psmdb_setup.sh {{ psmdb_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ psmdb_container }}:/ + - docker exec {{ psmdb_container }} apt-get update + - docker exec {{ psmdb_container }} apt-get -y install wget curl git gnupg2 lsb-release jq python3 pip + + - name: Install required software's to the docker psmdb_container + shell: "{{ item }}" + with_items: + - docker exec {{ psmdb_container }} python3 -m pip install --upgrade pip + - docker exec {{ psmdb_container }} pip3 install 'mtools[all]' + + - name: Install pmm2-client on the psmdb_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ psmdb_container }} + - docker exec {{ psmdb_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Setup psmdb for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ psmdb_container }} bash -x ./mlaunch_psmdb_setup.sh --mongodb_version {{ psmdb_version }} --mongodb_setup {{ psmdb_setup }} > setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + + - name: Setup Load Running Docker Container + shell: "{{ item }}" + with_items: + - rm -rf ~/psmdb_{{ psmdb_version }} || true; mkdir -p ~/psmdb_{{ psmdb_version }} + - wget -P ~/psmdb_{{ psmdb_version }}/ "https://raw.githubusercontent.com/Percona-Lab/qa-integration/{{pmm_qa_branch}}/pmm_qa/Dockerfile" + - wget -P ~/psmdb_{{ psmdb_version }}/ "https://raw.githubusercontent.com/Percona-Lab/qa-integration/{{pmm_qa_branch}}/pmm_qa/mongodb_query.php" + - docker build --tag php-db ~/psmdb_{{ psmdb_version }}/ > ~/docker-build_mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }}.log || true + - docker rm mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} || true + - docker run --rm --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db composer require mongodb/mongodb || true + + - name: Run load on Replica Set Master(PSS) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "pss" + + - name: Run load on Replica Set Master(PSA) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "psa" + + - name: Run load on Sharded Clusters Master + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "sharded" \ No newline at end of file diff --git a/pmm_qa/mongodb_query.php b/pmm_qa/mongodb_query.php new file mode 100644 index 00000000..494e8a7f --- /dev/null +++ b/pmm_qa/mongodb_query.php @@ -0,0 +1,108 @@ + false]); + +function run_query($db,$collection) +{ + global $client; + $collectionName = "beers" . $collection; + $dbName = "demo" . $db; + $collectionObj = $client->$dbName->$collectionName; + //read + $cursor = $collectionObj->find(); + //update + $collectionObj->updateMany(array("a"=>"a"), + array('$set'=>array("a"=>"a_u"))); + + //count + $collectionObj->count(); + + //distinct + $collectionObj->distinct("a"); + + //aggregate + $collectionObj->aggregate([array('$match' =>array("a"=>"a_u"))]); + + //findAndModify + $collectionObj->findOneAndUpdate(array("a"=>"a_u"), array('$set'=>array("a"=>"a_m"))); + + //delete + $collectionObj->deleteOne(array("a"=>"a_m")); + //create + $result = $collectionObj->insertOne( [ 'a' => 'a', 'b' => 'B', 'c' => $i ] ); +} + +echo("Running Queries...\n"); + +//lets create all db's and data +for($i = 1; $i <= $db; $i++) +{ + $dbName = "demo" . $i; + for ($j = 1; $j <= $collection; $j++) + { + $collectionName = "beers" . $j; + $collectionObj = $client->$dbName->$collectionName; + $result = $collectionObj->insertOne( [ 'a' => 'a', 'b' => 'B', 'c' => $j ] ); + echo "Inserted with Object ID '{$result->getInsertedId()}'"; + } +} + +/* How long we want target to take */ +$target_round_time=1/$target_qps; + +while(1) +{ + $start=microtime(1); + $dbNumber=skewed_rnd(1,$db); + $collectionNumber=skewed_rnd(1,$collection); + run_query($dbNumber,$collectionNumber); + $end=microtime(1); + $round_time=$end-$start; +# echo("Round Took: $round_time\n"); + if($round_time<$target_round_time) /* Went faster than needed */ + { + $sleep=($target_round_time-$round_time)*1000000; +# echo("Sleeping $sleep microseconds\n"); + usleep($sleep); + } +} +?> diff --git a/pmm_qa/ms_pmm_setup.yml b/pmm_qa/ms_pmm_setup.yml new file mode 100644 index 00000000..62915e54 --- /dev/null +++ b/pmm_qa/ms_pmm_setup.yml @@ -0,0 +1,73 @@ +--- +# This playbook does following: +# enables Percona testing repository + +- hosts: all + become: true + become_method: sudo + vars: + ms_version: "{{ lookup('vars', 'extra_ms_version', default=lookup('env','MS_VERSION') | default('8.0', true) ) }}" + ms_tarball: "{{ lookup('vars', 'extra_ms_tarball', default=lookup('env','MS_TARBALL') | default('', true) ) }}" + ms_container: "{{ lookup('vars', 'extra_ms_container', default=lookup('env','MS_CONTAINER') | default('ms_container', true) ) }}" + query_source: "{{ lookup('vars', 'extra_query_source', default=lookup('env','QUERY_SOURCE') | default('slowlog', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + number_of_nodes: "{{ lookup('vars', 'extra_number_of_nodes', default=lookup('env','MS_NODES') | default('1', true) ) }}" + group_replication: "{{ lookup('vars', 'extra_group_replication', default=lookup('env','GROUP_REPLICATION') | default('', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ ms_container }}" | grep -q . && docker stop {{ ms_container }} && docker rm -fv {{ ms_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for MySQL + shell: > + docker run -d --name={{ ms_container }} + -p 3318:3308 + -v /tmp:/tmp + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker ms_container + shell: "{{ item }}" + with_items: + - docker exec {{ ms_container }} mkdir -p artifacts + - docker cp ./client_container_ms_setup.sh {{ ms_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ ms_container }}:/ + + - name: Get Product tarball URL based on the version + shell: awk -F'# ' '/-{{ ms_version | regex_escape }}/ && /ms/ {print $3; exit}' product_version_download_helper + register: tarball + when: lookup('env', 'MS_TARBALL') == '' + + - name: Set Product URL if environment or paramater are not defined + set_fact: + ms_tarball: "{{tarball.stdout | default(ms_tarball,true)}}" + + - name: Setup libraries required inside the container + shell: "{{ item }}" + with_items: + - docker exec {{ ms_container }} apt-get update + - docker exec {{ ms_container }} apt-get -y install wget curl git gnupg2 lsb-release libncurses5 + - docker exec {{ ms_container }} apt-get -y install libaio1 libaio-dev libnuma-dev socat libtirpc-dev + - docker exec {{ ms_container }} apt install -y sysbench + + - name: Install pmm2-client on the ms_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ ms_container }} + - docker exec {{ ms_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Execute Setup script inside the MySQL ms_container + shell: "{{ item }}" + with_items: + - docker exec {{ ms_container }} bash -xe ./client_container_ms_setup.sh --ms_version {{ ms_version }} --ms_tarball {{ ms_tarball }} --number_of_nodes {{ number_of_nodes }} --query_source {{ query_source }} --group_replication {{ group_replication }} > setup_ms_{{ ms_version }}.log + diff --git a/pmm_qa/pdpgsql_pgsm_setup.yml b/pmm_qa/pdpgsql_pgsm_setup.yml new file mode 100644 index 00000000..92668c69 --- /dev/null +++ b/pmm_qa/pdpgsql_pgsm_setup.yml @@ -0,0 +1,101 @@ +--- +# This playbook does following: +# enables Percona testing repository + +- hosts: all + become: true + become_method: sudo + vars: + pdpgsql_version: "{{ lookup('vars', 'extra_pdpgsql_version', default=lookup('env','PDPGSQL_VERSION') | default('15', true) ) }}" + pdpgsql_pgsm_container: "{{ lookup('vars', 'extra_pdpgsql_pgsm_container', default=lookup('env','PDPGSQL_PGSM_CONTAINER') | default('pdpgsql_pgsm', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + pgstat_monitor_branch: "{{ lookup('vars', 'extra_pgstat_monitor_branch', default=lookup('env','PGSTAT_MONITOR_BRANCH') | default('2.1.0', true) ) }}" + pgstat_monitor_repo: "{{ lookup('vars', 'extra_pgstat_monitor_repo', default=lookup('env','PGSTAT_MONITOR_REPO') | default('percona/pg_stat_monitor', true) ) }}" + use_socket: "{{ lookup('vars', 'extra_pdpgsql_version', default=lookup('env','USE_SOCKET') | default('', true) ) }}" + pdpgsql_pgsm_port: "{{ lookup('vars', 'extra_pdpgsql_port', default=lookup('env','PDPGSQL_PGSM_PORT') | default(5447, true) ) }}" + distribution: "{{ lookup('vars', 'extra_pdpgsql_distribution', default=lookup('env','DISTRIBUTION') | default('PPG', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ pdpgsql_pgsm_container }}" | grep -q . && docker stop {{ pdpgsql_pgsm_container }} && docker rm -fv {{ pdpgsql_pgsm_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PostgreSQL + shell: > + sudo docker run -d --name={{ pdpgsql_pgsm_container }} + -p {{ pdpgsql_pgsm_port }}:5432 + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker pdpgsql_pgsm_container + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} mkdir -p artifacts + - docker cp ./pg_stat_monitor_setup.sh {{ pdpgsql_pgsm_container }}:/ + - docker cp ./pgsm_run_queries.sh {{ pdpgsql_pgsm_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ pdpgsql_pgsm_container }}:/ + + - name: Execute Setup script inside the pdpgsql pdpgsql_pgsm_container + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} bash -xe ./pg_stat_monitor_setup.sh --distribution {{ distribution }} --pgsql_version {{ pdpgsql_version }} --pgstat_monitor_branch {{ pgstat_monitor_branch }} --pgstat_monitor_repo {{ pgstat_monitor_repo }} > setup_{{ pdpgsql_pgsm_container }}.log + + - name: Install pmm2-client on the pdpgsql_pgsm_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ pdpgsql_pgsm_container }} + - docker exec {{ pdpgsql_pgsm_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Remove pdpgsql service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove postgresql {{ pdpgsql_pgsm_container }}_service__{{ random_number }}' + ignore_errors: true + + - name: Add pdpgsql_pgsm for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ pdpgsql_pgsm_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add postgresql --username=pmm --password=pmm --query-source=pgstatmonitor {{ pdpgsql_pgsm_container }}_service_{{ random_number }}' + + - name: Add pdpgsql_pgsm for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ pdpgsql_pgsm_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add postgresql --username=pmm --password=pmm --query-source=pgstatmonitor --socket=/var/run/postgresql socket_{{ pdpgsql_pgsm_container }}_service_{{ random_number }}' + + - name: Run queries for generating data + shell: "{{ item }}" + with_items: + - docker exec {{ pdpgsql_pgsm_container }} bash ./pgsm_run_queries.sh & + + - name: Copy a file into the container + community.docker.docker_container_copy_into: + container: "{{ pdpgsql_pgsm_container }}" + path: ./scripts/pgsql_load.sql + container_path: /pgsql_load.sql + + - name: Create database if it doesn't exist + community.docker.docker_container_exec: + container: "{{ pdpgsql_pgsm_container }}" + command: > + psql -U postgres -c "CREATE DATABASE school;" + + - name: Run SQL script using docker exec + community.docker.docker_container_exec: + container: "{{ pdpgsql_pgsm_container }}" + command: > + psql -U postgres -d school -f /pgsql_load.sql diff --git a/pmm_qa/percona-distribution-postgresql/data/background_load.sql b/pmm_qa/percona-distribution-postgresql/data/background_load.sql new file mode 100644 index 00000000..d0dbb9a6 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/background_load.sql @@ -0,0 +1,25 @@ +-- Step 1: Create a test table +CREATE TABLE IF NOT EXISTS test_users ( + id SERIAL PRIMARY KEY, + name VARCHAR(100), + email VARCHAR(150), + created_at TIMESTAMP DEFAULT NOW(), + bio TEXT +); + +-- Step 2: Insert 10,000 rows +INSERT INTO test_users (name, email, bio) +SELECT + 'User_' || gs::TEXT AS name, + 'user_' || gs::TEXT || '@example.com' AS email, + 'This is a generated bio for user #' || gs::TEXT +FROM generate_series(1, 10000) AS gs; + +-- Step 3: Query the data +SELECT * FROM test_users; + +-- Step 4: Delete the data +DELETE FROM test_users; + +-- Step 5: Drop the table +DROP TABLE test_users; diff --git a/pmm_qa/percona-distribution-postgresql/data/etcd.conf.yaml.j2 b/pmm_qa/percona-distribution-postgresql/data/etcd.conf.yaml.j2 new file mode 100644 index 00000000..6ecb4235 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/etcd.conf.yaml.j2 @@ -0,0 +1,12 @@ +name: 'node{{item}}' +data-dir: /var/lib/etcd +initial-cluster-token: PostgreSQL_HA_Cluster_1 +initial-cluster-state: new + +listen-peer-urls: http://0.0.0.0:2380 +initial-advertise-peer-urls: http://pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}:2380 + +listen-client-urls: http://0.0.0.0:2379 +advertise-client-urls: http://pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}:2379 + +initial-cluster: node1=http://pdpgsql_pmm_patroni_{{ pg_version }}_1:2380,node2=http://pdpgsql_pmm_patroni_{{ pg_version }}_2:2380,node3=http://pdpgsql_pmm_patroni_{{ pg_version }}_3:2380 diff --git a/pmm_qa/percona-distribution-postgresql/data/patroni.yml.j2 b/pmm_qa/percona-distribution-postgresql/data/patroni.yml.j2 new file mode 100644 index 00000000..13ec02f5 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/patroni.yml.j2 @@ -0,0 +1,83 @@ +scope: patroni_cls +namespace: /var/lib/pgsql/config/ +name: pg_node_{{ item }} + +restapi: + listen: 0.0.0.0:8008 + connect_address: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}:8008" + +etcd3: + host: "pdpgsql_pmm_patroni_{{ pg_version }}_1:2379" + +bootstrap: + dcs: + ttl: 30 + loop_wait: 10 + retry_timeout: 10 + maximum_lag_on_failover: 1048576 + + postgresql: + use_pg_rewind: true + use_slots: true + parameters: + shared_preload_libraries: 'pg_stat_monitor' + wal_level: replica + hot_standby: "on" + wal_keep_size: 128MB + max_wal_senders: 10 + max_replication_slots: 10 + wal_log_hints: "on" + archive_mode: "on" + archive_timeout: 600s + archive_command: "cp -f %p /home/postgres/archived/%f" + pg_hba: + - host replication replicator 127.0.0.1/32 md5 + - host replication replicator 172.18.0.0/16 md5 + - host all all 0.0.0.0/0 md5 + - local all postgres trust + + initdb: + - encoding: UTF8 + - data-checksums + - waldir: /pg_wal + - wal-segsize=512 + + users: + admin: + password: admin + options: + - createrole + - createdb + +postgresql: + listen: 0.0.0.0:5432 + connect_address: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}:5432" + data_dir: /var/lib/pgsql/{{ pg_version}}/data + bin_dir: /usr/pgsql-{{ pg_version }}/bin + pgpass: /tmp/pgpass + + authentication: + replication: + username: replicator + password: replPasswd + superuser: + username: postgres + password: postgres + + create_replica_methods: + - pgbackrest + - basebackup + + pgbackrest: + command: pgbackrest --stanza=patroni_backup restore --type=none + keep_data: true + no_params: true + + basebackup: + checkpoint: fast + +tags: + nofailover: false + noloadbalance: false + clonefrom: false + nosync: false diff --git a/pmm_qa/percona-distribution-postgresql/data/pg_hba.conf.j2 b/pmm_qa/percona-distribution-postgresql/data/pg_hba.conf.j2 new file mode 100644 index 00000000..8488d8cc --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/pg_hba.conf.j2 @@ -0,0 +1,3 @@ +host replication {{ replication_user }} 0.0.0.0/0 md5 +host all all 0.0.0.0/0 md5 +local all postgres trust diff --git a/pmm_qa/percona-distribution-postgresql/data/pg_hba_replica.conf.j2 b/pmm_qa/percona-distribution-postgresql/data/pg_hba_replica.conf.j2 new file mode 100644 index 00000000..8488d8cc --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/pg_hba_replica.conf.j2 @@ -0,0 +1,3 @@ +host replication {{ replication_user }} 0.0.0.0/0 md5 +host all all 0.0.0.0/0 md5 +local all postgres trust diff --git a/pmm_qa/percona-distribution-postgresql/data/postgres-replica.conf b/pmm_qa/percona-distribution-postgresql/data/postgres-replica.conf new file mode 100644 index 00000000..3336ed77 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/postgres-replica.conf @@ -0,0 +1,14 @@ +hba_file = '/etc/postgresql/pg_hba.conf' +shared_preload_libraries = 'pg_stat_monitor' +track_activity_query_size=2048 +track_io_timing=ON +max_connections=1000 +listen_addresses = '*' +pg_stat_monitor.pgsm_enable_query_plan = 'yes' +pg_stat_monitor.pgsm_query_max_len = 2048 +pg_stat_monitor.pgsm_normalized_query=1 +pg_stat_monitor.pgsm_enable_query_plan=1 +log_connections = on +log_disconnections = on +log_replication_commands = on +log_statement = 'all' diff --git a/pmm_qa/percona-distribution-postgresql/data/postgresql-primary.conf b/pmm_qa/percona-distribution-postgresql/data/postgresql-primary.conf new file mode 100644 index 00000000..eb09610b --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/data/postgresql-primary.conf @@ -0,0 +1,16 @@ +wal_level = logical +max_wal_senders = 10 +wal_keep_size = 64MB +hot_standby = on +listen_addresses = '*' +hba_file = '/etc/postgresql/pg_hba.conf' +shared_preload_libraries = 'pg_stat_monitor' +track_activity_query_size=2048 +track_io_timing=ON +max_connections=1000 +listen_addresses = '*' +pg_stat_monitor.pgsm_enable_query_plan = 'yes' +log_connections = on +log_disconnections = on +log_replication_commands = on +log_statement = 'all' diff --git a/pmm_qa/percona-distribution-postgresql/percona-distribution-postgres-setup.yml b/pmm_qa/percona-distribution-postgresql/percona-distribution-postgres-setup.yml new file mode 100644 index 00000000..ce64aed4 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/percona-distribution-postgres-setup.yml @@ -0,0 +1,36 @@ +--- +# Percona Distribution Postgresql Replication +- name: Setup Postgresql replication + hosts: localhost + connection: local + gather_facts: yes + vars: + pg_version: "{{ lookup('env', 'PGSQL_VERSION') | default('17', true) }}" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + pdpgsql_port: 5432 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('2', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/pgsql_cluster_data" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('pgstatements', true) }}" + metrics_mode: "auto" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + random_service_name_value: "" + docker_repo: "percona/percona-distribution-postgresql" + + tasks: + - name: Display binary log status for primary + debug: + msg: "{{ setup_type }}" + + - name: Install Percona Distribution for Postgres with Replication + include_tasks: tasks/percona-distribution-postgres-replication-setup.yml + when: setup_type == "replication" + + - name: Install Percona Distribution for Postgres with Patroni replication + include_tasks: tasks/percona-distribution-postgres-patroni-setup.yml + when: setup_type == "patroni" diff --git a/pmm_qa/percona-distribution-postgresql/tasks/install_pg_stat-monitor.yml b/pmm_qa/percona-distribution-postgresql/tasks/install_pg_stat-monitor.yml new file mode 100644 index 00000000..146f17e7 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/tasks/install_pg_stat-monitor.yml @@ -0,0 +1,49 @@ +- name: Detect OS inside the container + community.docker.docker_container_exec: + container: "{{ container_name }}_1" + command: cat /etc/os-release + register: container_os_info + +- name: Set distro family (debian/rhel) + set_fact: + distro_family: >- + {{ + ( + 'debian' if 'debian' in container_os_info.stdout | lower else + 'rhel' if 'rhel' in container_os_info.stdout | lower or 'centos' in container_os_info.stdout | lower or 'fedora' in container_os_info.stdout | lower + else 'unknown' + ) | trim + }} + +- name: Install dependencies inside Debian-family container + community.docker.docker_container_exec: + container: "{{ container_name }}_{{ item }}" + command: > + sh -c ' + apt-get update && + apt-get install percona-pg-stat-monitor{{ pg_version }} + ' + user: "root" + when: distro_family == "debian" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install dependencies inside RHEL-family container + community.docker.docker_container_exec: + container: "{{ container_name }}_{{ item }}" + command: > + sh -c ' + microdnf install percona-pg-stat-monitor{{ pg_version }} + ' + user: "root" + when: distro_family == "rhel" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Create pg_stat_statements extension + community.docker.docker_container_exec: + container: "{{ container_name }}_1" + user: postgres + command: > + psql -U postgres -d 'postgres' -c " + CREATE EXTENSION IF NOT EXISTS pg_stat_monitor; + SELECT pg_stat_monitor_version(); + " diff --git a/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-patroni-setup.yml b/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-patroni-setup.yml new file mode 100644 index 00000000..fdf71fc0 --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-patroni-setup.yml @@ -0,0 +1,199 @@ +- name: Set correct nodes count for patroni setup + set_fact: + nodes_count: 3 + when: nodes_count | int < 3 +- name: Set external facing port in patroni setup + set_fact: + pdpgsql_port: 6432 + +- name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + +- name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Fix permissions on data directory + become: true + file: + path: "{{ data_dir }}/node{{ item }}/data" + owner: 1001 + group: 1001 + recurse: yes + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Generate etcd configuration + template: + src: data/etcd.conf.yaml.j2 + dest: "{{ data_dir }}/node{{ item }}/etcd.conf.yaml" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Generate patroni configuration + template: + src: data/patroni.yml.j2 + dest: "{{ data_dir }}/node{{ item }}/patroni.yml" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + ignore_errors: yes + +- name: Start PostgreSQL containers + community.docker.docker_container: + name: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + image: oraclelinux:9 + state: started + restart_policy: always + command: sleep infinity + networks: + - name: "{{ network_name }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/data/db" + - "{{ data_dir }}/node{{ item }}/etcd.conf.yaml:/etcd.conf.yaml:ro" + - "{{ data_dir }}/node{{ item }}/patroni.yml:/etc/patroni/patroni.yml:ro" + ports: + - "{{ pdpgsql_port + item - 1 }}:5432" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install required packages in Oracle Linux container + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + user: root + command: > + /bin/sh -c ' + dnf config-manager --set-enabled ol9_codeready_builder + dnf install perl-IPC-Run -y + dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + dnf config-manager --set-enabled crb + dnf install -y python3-pip python3-devel binutils python3-click + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install Percona Distribution for PostgresSQL + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + user: root + command: > + /bin/sh -c ' + dnf module disable postgresql + dnf install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm + percona-release enable ppg-{{ pg_version }} + dnf install -y percona-pg-stat-monitor{{ pg_version }} percona-postgresql{{ pg_version }}-server percona-patroni etcd python3-python-etcd percona-pgbackrest + patroni --version + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Start etcd service + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + user: root + command: > + /bin/sh -c ' + rm -rf /var/lib/pgsql/{{ pg_version }}/data/* + ps aux | grep postgres + rm -f /data/db/postmaster.pid + mkdir -p /data/db/logs + mkdir /pg_wal + touch /dev/watchdog + nohup etcd --config-file /etcd.conf.yaml > /data/db/logs/etcd.log 2>&1 & + chown -R postgres:postgres /data/db/logs + chown -R postgres:postgres /dev/watchdog + chown -R postgres:postgres /pg_wal + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + +- name: Wait 5 seconds to etcd to start + pause: + seconds: 5 + +- name: Start patroni service + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + user: postgres + command: > + /bin/sh -c ' + nohup patroni /etc/patroni/patroni.yml > /data/db/logs/patroni.log 2>&1 & + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install microdnf + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + user: root + command: > + /bin/sh -c ' + dnf install -y microdnf + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install pg stat monitor. + include_tasks: ./tasks/install_pg_stat-monitor.yml + vars: + container_name: "pdpgsql_pmm_patroni_{{ pg_version }}" + +- name: Grant pg_monitor to postgres user + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_1" + user: postgres + command: > + bash -c ' + psql -U postgres -d {{ db_name | default("postgres") }} -c " + GRANT pg_monitor TO postgres; + " + ' + +- name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Get already connected services to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_1" + command: > + sh -c 'curl --location --insecure -u"admin:{{ admin_password }}" -s --request GET "http://{{ pmm_server_ip }}:{{ '80' if pmm_server_ip is match('^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$') else '8080' }}/v1/management/services" | jq -r ".services[].service_name"' + register: pmm_server_services + +- name: Display already connected services to pmm server + debug: + msg: "{{ pmm_server_services.stdout | split('\n') }}" + +- name: Find out if service is already connected to pmm server + block: + - name: Loop through percona servers + set_fact: + random_service_name_value: "_{{ 9999 | random + 1 }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: "('pdpgsql_pmm_patroni_' ~ pg_version ~ '_' ~ item) in pmm_server_services.stdout" + +- name: Add service to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + command: pmm-admin add postgresql --username=postgres --cluster=pdpgsql_patroni_cluster --environment=pdpgsql_patroni_environment --password=postgres --query-source=pgstatmonitor pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:5432 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Add patroni service to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_1" + command: pmm-admin add external --listen-port=8008 --service-name=patroni_service_1{{ random_service_name_value }} + +- name: Add patroni service to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_{{ item }}" + command: pmm-admin add external --listen-port=8008 --cluster=pdpgsql_patroni_service_cluster --environment=pdpgsql_patroni_service_environment --service-name=patroni_service_{{ item }}{{ random_service_name_value }} + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Log Patroni cluster + community.docker.docker_container_exec: + container: "pdpgsql_pmm_patroni_{{ pg_version }}_1" + command: > + /bin/sh -c ' + patronictl -c /etc/patroni/patroni.yml list + ' diff --git a/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-replication-setup.yml b/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-replication-setup.yml new file mode 100644 index 00000000..1dc6a06d --- /dev/null +++ b/pmm_qa/percona-distribution-postgresql/tasks/percona-distribution-postgres-replication-setup.yml @@ -0,0 +1,273 @@ +- name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + ignore_errors: yes + +- name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + +- name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Generate pg_hba.conf for primary node + template: + src: data/pg_hba.conf.j2 + dest: "{{ data_dir }}/node1/pg_hba.conf" + +- name: Generate pg_hba.conf for replica node + template: + src: data/pg_hba_replica.conf.j2 + dest: "{{ data_dir }}/node{{ item }}/pg_hba.conf" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Fix permissions on data directory + become: true + file: + path: "{{ data_dir }}/node{{ item }}/data" + owner: 1001 + group: 1001 + recurse: yes + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Remove old PostgreSQL primary container + community.docker.docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_1" + image: "{{ docker_repo }}:{{ pg_version }}" + restart_policy: always + state: absent + ignore_errors: yes + +- name: Start PostgreSQL primary container + community.docker.docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_1" + image: "{{ docker_repo }}:{{ pg_version }}" + restart_policy: always + state: started + recreate: true + networks: + - name: "{{ network_name }}" + env: + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node1/data:/data/db" + - "./data/postgresql-primary.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node1/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + command: -c config_file=/etc/postgresql/postgresql.conf + ports: + - "{{ pdpgsql_port }}:5432" + +- name: Wait for PgSQL to be available + wait_for: + host: localhost + port: "{{ pdpgsql_port }}" + delay: 10 + timeout: 300 + +- name: Create replication user + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: > + psql -c " + CREATE ROLE {{ replication_user }} WITH REPLICATION LOGIN ENCRYPTED PASSWORD '{{ replication_password }}'; + " + +- name: Stop and remove replica if exists + community.docker.docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + state: absent + loop: "{{ range(2, nodes_count | int + 1) | list }}" + ignore_errors: yes + +- name: Start Percona Distribution PostgreSQL replica container + community.docker.docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + image: "{{ docker_repo }}:{{ pg_version }}" + restart_policy: "no" + state: started + command: sleep infinity + networks: + - name: "{{ network_name }}" + env: + POSTGRES_INITDB_SKIP: "yes" + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/data/db" + - "./data/postgres-replica.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node{{ item }}/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Wipe replica data directory before basebackup + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + user: root + command: rm -rf /data/db/* + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Create PostgreSQL user 'pmm' with password + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: > + bash -c ' + psql -U postgres -d {{ db_name | default("postgres") }} -c " + CREATE USER pmm WITH PASSWORD '\''pmm'\''; + GRANT pg_monitor TO pmm; + " + ' + +- name: Create custom database + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: > + bash -c " + echo \" + CREATE DATABASE test_database; + \\c test_database + GRANT CONNECT ON DATABASE test_database TO pmm; + GRANT USAGE ON SCHEMA public TO pmm; + GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO pmm; + ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO pmm; + \" | psql -U postgres -v ON_ERROR_STOP=1 + " + +- name: Run pg_basebackup from primary to replica + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + user: root + command: > + bash -c " + export PGPASSWORD='{{ replication_password }}' && \ + timeout 120s \ + pg_basebackup --pgdata=/data/db -R -v -Fp -Xs -P \ + --host=pdpgsql_pmm_replication_{{ pg_version }}_1 --port=5432 -U {{ replication_user }} > /tmp/pg_basebackup.log 2>&1 + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Remove temporary backup container + docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + state: absent + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Fix permissions on data directory + become: true + file: + path: "{{ data_dir }}/node{{ item }}/data" + owner: 1001 + group: 1001 + recurse: yes + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Restart Percona Distribution PostgreSQL container with custom command + community.docker.docker_container: + name: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + image: "{{ docker_repo }}:{{ pg_version }}" + restart: true + state: started + command: -c config_file=/etc/postgresql/postgresql.conf + networks: + - name: "{{ network_name }}" + env: + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/data/db" + - "./data/postgres-replica.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node1/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + ports: + - "{{ pdpgsql_port + item - 1}}:5432" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Install pg stat monitor. + include_tasks: ./tasks/install_pg_stat-monitor.yml + vars: + container_name: "pdpgsql_pmm_replication_{{ pg_version }}" + +- name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Get already connected services to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + command: > + sh -c 'curl --location --insecure -u"admin:{{ admin_password }}" -s --request GET "http://{{ pmm_server_ip }}:{{ '80' if pmm_server_ip is match('^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$') else '8080' }}/v1/management/services" | jq -r ".services[].service_name"' + register: pmm_server_services + +- name: Display already connected services to pmm server + debug: + msg: "{{ pmm_server_services.stdout | split('\n') }}" + +- name: Find out if service is already connected to pmm server + block: + - name: Loop through percona servers + set_fact: + random_service_name_value: "_{{ 9999 | random + 1 }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: "('pdpgsql_pmm_' ~ pg_version ~ '_' ~ item) in pmm_server_services.stdout" + +- name: Add service to pmm server + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}" + command: pmm-admin add postgresql --username=pmm --password=pmm --cluster=pdpgsql_replication_cluster --environment=pdpgsql_replication_environment --query-source=pgstatmonitor pdpgsql_pmm_replication_{{ pg_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:5432 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Create pg_custom_publication view using psql + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: > + bash -c 'psql -U postgres -d postgres < + psql -U postgres -d test_database -c " + CREATE EXTENSION IF NOT EXISTS pg_stat_monitor; + SELECT pg_stat_monitor_version(); + " + +- name: Copy sample load into container + community.docker.docker_container_copy_into: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + path: ../data/load_pgsql.sql + container_path: /load_pgsql.sql + +- name: Start permanent SQL load in background + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + command: > + sh -c "nohup bash -c 'while true; do echo Starting insert at $(date +\"%Y-%m-%d %H:%M:%S\"); psql -U postgres -d test_database -f /load_pgsql.sql; sleep 30; done' > /tmp/sql_loop.log 2>&1 &" + +- name: Create logical replication slots + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: psql -U postgres -d postgres -c "SELECT * FROM pg_create_logical_replication_slot('test_slot', 'test_decoding');" + +- name: Check replication status on primary + community.docker.docker_container_exec: + container: "pdpgsql_pmm_replication_{{ pg_version }}_1" + user: postgres + command: psql -c "SELECT * FROM pg_stat_replication;" diff --git a/pmm_qa/percona_server/data/init-async-replication.sql.j2 b/pmm_qa/percona_server/data/init-async-replication.sql.j2 new file mode 100644 index 00000000..a96ef137 --- /dev/null +++ b/pmm_qa/percona_server/data/init-async-replication.sql.j2 @@ -0,0 +1,13 @@ +-- Create replication user and grant necessary privileges +SET SQL_LOG_BIN=0; +CREATE USER '{{ replication_user }}'@'%' IDENTIFIED WITH 'caching_sha2_password' BY '{{ replication_password }}' REQUIRE NONE; +GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; +GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +FLUSH PRIVILEGES; +SET SQL_LOG_BIN=1; + +{% if item == 1 %} +-- Primary server: enable binary logging for replication +FLUSH BINARY LOGS; +{% endif %} diff --git a/pmm_qa/percona_server/data/init-group-replication.sql.j2 b/pmm_qa/percona_server/data/init-group-replication.sql.j2 new file mode 100644 index 00000000..19185831 --- /dev/null +++ b/pmm_qa/percona_server/data/init-group-replication.sql.j2 @@ -0,0 +1,15 @@ +-- Create replication user and grant necessary privileges +SET SQL_LOG_BIN=0; +CREATE USER '{{ replication_user }}'@'%' IDENTIFIED BY '{{ replication_password }}'; +GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; +GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +GRANT GROUP_REPLICATION_STREAM ON *.* TO '{{ replication_user }}'@'%'; +-- GRANT SERVICE_CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +-- GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '{{ replication_user }}'@'%'; +FLUSH PRIVILEGES; +SET SQL_LOG_BIN=1; + +-- Configure group replication recovery credentials +CHANGE REPLICATION SOURCE TO SOURCE_USER='{{ replication_user }}', SOURCE_PASSWORD='{{ replication_password }}' FOR CHANNEL 'group_replication_recovery'; + diff --git a/pmm_qa/percona_server/data/my-async-replication.cnf.j2 b/pmm_qa/percona_server/data/my-async-replication.cnf.j2 new file mode 100644 index 00000000..014d37f7 --- /dev/null +++ b/pmm_qa/percona_server/data/my-async-replication.cnf.j2 @@ -0,0 +1,43 @@ +[mysqld] +# General server configuration +server_id={{ item }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# Authentication settings for caching_sha2_password +caching_sha2_password_auto_generate_rsa_keys=ON +# The following two parameters tell MySQL where to store the RSA key pair +caching_sha2_password_private_key_path=private_key.pem +caching_sha2_password_public_key_path=public_key.pem + +# Replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +log_bin=binlog +log_replica_updates=ON +sync_binlog=1 +binlog_checksum=NONE +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +# MacOS-specific, where table names are case-sensitive +lower_case_table_names=2 + +# MySQL 8.4 compatibility settings +report_host=ps_pmm_{{ ps_version }}_{{ item }} + +# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) +{% if item != 1 %} +# Replica specific settings +replica_parallel_workers=4 +replica_parallel_type=LOGICAL_CLOCK +replica_preserve_commit_order=1 +{% endif %} + +# Crash-safe replication settings +relay-log=ps_pmm_{{ ps_version }}_{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server/data/my-group-replication.cnf.j2 b/pmm_qa/percona_server/data/my-group-replication.cnf.j2 new file mode 100644 index 00000000..8fcdbbf6 --- /dev/null +++ b/pmm_qa/percona_server/data/my-group-replication.cnf.j2 @@ -0,0 +1,45 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +binlog_checksum=NONE +log_bin=binlog +log_replica_updates=ON +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +lower_case_table_names=2 # MacOS-specific, but also good generally + +# MySQL 8.4 compatibility settings +report_host=ps_pmm_{{ps_version}}_{{ item }} + +# Group Replication Settings +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='ps_pmm_{{ps_version}}_{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds='{% for i in range(1, nodes_count | int + 1) %}ps_pmm_{{ps_version}}_{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %}' +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_get_public_key=ON +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log=ps_pmm_{{ps_version}}_{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server/data/my.cnf.j2 b/pmm_qa/percona_server/data/my.cnf.j2 new file mode 100644 index 00000000..fd4b27f2 --- /dev/null +++ b/pmm_qa/percona_server/data/my.cnf.j2 @@ -0,0 +1,2 @@ +[mysqld] +userstat=1 \ No newline at end of file diff --git a/pmm_qa/percona_server/data/ps_load.sql b/pmm_qa/percona_server/data/ps_load.sql new file mode 100644 index 00000000..6df95275 --- /dev/null +++ b/pmm_qa/percona_server/data/ps_load.sql @@ -0,0 +1,94 @@ +-- ======================================== +-- CREATE TABLES +-- ======================================== + +CREATE TABLE students ( + student_id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE +); + +CREATE TABLE classes ( + class_id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100), + teacher VARCHAR(100) +); + +CREATE TABLE enrollments ( + enrollment_id INT AUTO_INCREMENT PRIMARY KEY, + student_id INT, + class_id INT, + enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (student_id) REFERENCES students(student_id), + FOREIGN KEY (class_id) REFERENCES classes(class_id) +); + +-- ======================================== +-- INSERT INITIAL DATA +-- ======================================== + +INSERT INTO students (first_name, last_name, birth_date) VALUES +('Alice', 'Smith', '2005-04-10'), +('Bob', 'Johnson', '2006-08-15'), +('Charlie', 'Brown', '2004-12-01'); + +INSERT INTO classes (name, teacher) VALUES +('Mathematics', 'Mrs. Taylor'), +('History', 'Mr. Anderson'), +('Science', 'Dr. Reynolds'); + +INSERT INTO enrollments (student_id, class_id) VALUES +(1, 1), +(1, 2), +(2, 2), +(3, 1), +(3, 3); + +-- ======================================== +-- SELECT: View all data after insert +-- ======================================== + +-- View all students +SELECT * FROM students; + +-- View all classes +SELECT * FROM classes; + +-- View all enrollments +SELECT * FROM enrollments; + +-- View students enrolled in Mathematics +SELECT s.first_name, s.last_name +FROM students s +JOIN enrollments e ON s.student_id = e.student_id +JOIN classes c ON e.class_id = c.class_id +WHERE c.name = 'Mathematics'; + +-- Count students per class +SELECT c.name AS class_name, COUNT(e.student_id) AS student_count +FROM classes c +LEFT JOIN enrollments e ON c.class_id = e.class_id +GROUP BY c.name; + +-- ======================================== +-- UPDATE DATA +-- ======================================== + +UPDATE students +SET last_name = 'Williams' +WHERE first_name = 'Bob' AND last_name = 'Johnson'; + +UPDATE classes +SET teacher = 'Ms. Carter' +WHERE name = 'History'; + +-- ======================================== +-- DELETE DATA +-- ======================================== + +DELETE FROM enrollments +WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); + +DELETE FROM students +WHERE first_name = 'Alice' AND last_name = 'Smith'; \ No newline at end of file diff --git a/pmm_qa/percona_server/percona-server-setup.yml b/pmm_qa/percona_server/percona-server-setup.yml new file mode 100644 index 00000000..34bd2658 --- /dev/null +++ b/pmm_qa/percona_server/percona-server-setup.yml @@ -0,0 +1,269 @@ +--- +# Percona Server 8.4 and higher single instance and also Cluster with Group Replication +- name: Setup Percona Server 8.4 and higher. Cluster with Group Replication in Docker + hosts: localhost + connection: local + gather_facts: yes + vars: + ps_version: "{{ lookup('env', 'PS_VERSION') | default('8.4', true) }}" + cluster_name: "mysql_cluster" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + mysql_port: 33066 + mysql_listen_port: 3306 + group_seeds_port: 34061 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('3', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/mysql_cluster_data" + server_id_start: 1 + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('perfschema', true) }}" + metrics_mode: "{{ lookup('env', 'metrics_mode') }}" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + random_service_name_value: "" + my_rocks: "{{ lookup('env', 'MY_ROCKS') | default(false, true) }}" + + + tasks: + - name: Mofidy the node count for group replication + set_fact: + nodes_count: 3 + when: nodes_count | int < 3 and setup_type == "gr" + + - name: Chance to correct nodes count for async replication + set_fact: + nodes_count: 2 + when: nodes_count | int < 2 and setup_type == "replication" + + - name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + + - name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Remove old percona server containers + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: absent + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + + - name: Recursively change ownership of a directory + shell: "sudo chown -R 1001:1001 {{ data_dir }}/node{{ item }}/data" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Setup Percona Server group replication + include_tasks: ./tasks/percona-server-group-replication-setup.yml + when: setup_type == "gr" + + - name: Setup Percona Server with async replication + include_tasks: ./tasks/percona-server-async-replication-setup.yml + when: setup_type == "replication" + + - name: Setup Percona Server + include_tasks: tasks/percona-server-setup-single.yml + when: setup_type != "gr" and setup_type != "replication" + + - name: Wait 10 seconds for setup to finish + pause: + seconds: 10 + + - name: Create slowlog configuration for mysql nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL slow_query_log='ON'; + SET GLOBAL long_query_time=0; + SET GLOBAL log_slow_admin_statements=ON; + SET GLOBAL log_slow_slave_statements=ON; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: query_source == "slowlog" + + - name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "ps_pmm_{{ ps_version }}_{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Generate random service name suffix + set_fact: + random_service_name_value: "_{{ 99999 | random + 1 }}" + + - name: Add service to pmm server + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-gr-dev --cluster=ps-gr-dev-cluster --replication-set=ps-gr-replication ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Add service to pmm server + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-replication-dev --cluster=ps-replication-dev-cluster --replication-set=ps-async-replication ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "replication" + + - name: Add service to pmm server + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --cluster=ps-single-dev-cluster --environment=ps-dev ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Install sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + user: "root" + command: > + /bin/sh -c " + wget -O epel-release.rpm --progress=dot:giga https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && + rpm -i epel-release.rpm && + microdnf install -y sysbench + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL super_read_only = OFF; + SET GLOBAL read_only = OFF; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE sbtest; + CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; + CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; + FLUSH PRIVILEGES; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Prepare sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE sbtest; + CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; + CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; + GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; + FLUSH PRIVILEGES; + " + when: setup_type == "gr" or setup_type == "replication" + + - name: Prepare data for sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + prepare + when: setup_type != "gr" and setup_type != "replication" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare data for sysbench inside of first percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + prepare + when: setup_type == "gr" or setup_type == "replication" + + - name: Run load for sysbench inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + sysbench /usr/share/sysbench/oltp_read_write.lua + --mysql-host=127.0.0.1 + --mysql-port=3306 + --mysql-user=sbtest + --mysql-password=password + --mysql-db=sbtest + --tables=10 + --table-size=100000 + --threads=16 + --time=60 + run + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Copy a load file into the container + community.docker.docker_container_copy_into: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + path: ./data/ps_load.sql + container_path: /ps_load.sql + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Wait 10 seconds for node to be connected + pause: + seconds: 10 + + - name: Run load inside of first percona server node + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + /bin/sh -c ' + mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + mysql -uroot -p{{ root_password }} school < /ps_load.sql + ' + when: setup_type == "gr" or setup_type == "replication" + + - name: Run load inside of all percona server nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + /bin/sh -c ' + mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + mysql -uroot -p{{ root_password }} school < /ps_load.sql + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Enable MySQL MyRocks + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + /bin/sh -c ' + ps-admin --enable-rocksdb -u root -p{{ root_password }} + ' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: my_rocks | bool diff --git a/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml b/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml new file mode 100644 index 00000000..67cebb47 --- /dev/null +++ b/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml @@ -0,0 +1,129 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-async-replication.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Create initialization script for each node + template: + src: ./data/init-async-replication.sql.j2 + dest: "{{ data_dir }}/node{{ item }}/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Start Percona Server containers with async replication + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: started + networks: + - name: "{{ network_name }}" + env: + MYSQL_ROOT_PASSWORD: "{{ root_password }}" + ports: + - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" + - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" + - "{{ data_dir }}/node{{ item }}/init.sql:/docker-entrypoint-initdb.d/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Wait for MySQL to be available + wait_for: + host: localhost + port: "{{ mysql_port + item - 1 }}" + delay: 10 + timeout: 300 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Wait 5 seconds for percona server start to complete + pause: + seconds: 5 + +- name: Reset configuration for all nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + RESET BINARY LOGS AND GTIDS; + RESET REPLICA ALL; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + +- name: Get primary ps_pmm_{{ ps_version }}_1 binary log status + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password }} -e " + SHOW BINARY LOG STATUS\G + " + register: primary_status + changed_when: false + + +- name: Display binary log status for primary + debug: + msg: "{{ primary_status.stdout | split('\n') }}" + +- name: Configure replica servers (container2-containerN) + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + CHANGE REPLICATION SOURCE TO + SOURCE_HOST='ps_pmm_{{ ps_version }}_1', + SOURCE_PORT={{ mysql_listen_port }}, + SOURCE_USER='{{ replication_user }}', + SOURCE_PASSWORD='{{ replication_password }}', + SOURCE_AUTO_POSITION=1, + SOURCE_PUBLIC_KEY_PATH='', + GET_SOURCE_PUBLIC_KEY=1; + START REPLICA; + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Create and seed a test database on primary + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password}} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1');" + +- name: Check replication status on replica nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: mysql -uroot -p{{ root_password }} -e "SHOW REPLICA STATUS\G" + register: replication_status + loop: "{{ range(2, nodes_count | int + 1) | list }}" + changed_when: false + +- name: Display replication status for each replica + debug: + msg: "{{ replication_status.results[item - 2].stdout_lines }}" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete with asynchronous replication! + + To verify replication is working: + 1. Connect to the primary (ps_pmm_{{ ps_version }}_1): + docker exec -it ps_pmm_{{ ps_version }}_1 mysql -uroot -p{{ root_password }} + + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + + 3. Connect to replicas and verify data is replicated: + docker exec -it ps_pmm_{{ ps_version }}_2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; + +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml b/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml new file mode 100644 index 00000000..cd179239 --- /dev/null +++ b/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml @@ -0,0 +1,127 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-group-replication.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Create initialization script for each node + template: + src: ./data/init-group-replication.sql.j2 + dest: "{{ data_dir }}/node{{ item }}/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Start Percona Server containers with group replication + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: started + networks: + - name: "{{ network_name }}" + env: + MYSQL_ROOT_PASSWORD: "{{ root_password }}" + ports: + - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" + - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" + - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" + - "{{ data_dir }}/node{{ item }}/init.sql:/docker-entrypoint-initdb.d/init.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Wait for MySQL to be available + wait_for: + host: localhost + port: "{{ mysql_port + item - 1 }}" + delay: 10 + timeout: 300 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Reset configuration for all nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: > + mysql -uroot -p{{ root_password }} -e " + RESET BINARY LOGS AND GTIDS; + RESET REPLICA ALL; + SET GLOBAL gtid_purged=''; + " + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Bootstrap first node in the cluster + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password }} -e " + SET GLOBAL group_replication_bootstrap_group=ON; + START GROUP_REPLICATION; + SET GLOBAL group_replication_bootstrap_group=OFF;" + retries: 5 + delay: 10 + +- name: Wait 5 seconds for bootstrap to complete + pause: + seconds: 5 + +- name: Start group replication on other nodes + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_{{ item }}" + command: mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + ignore_errors: yes + +- name: Wait 10 seconds for the other nodes to join + pause: + seconds: 10 + +- name: Create and seed a test database on primary + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: > + mysql -uroot -p{{ root_password}} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1');" + +- name: Check replication status on first node + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: mysql -uroot -p{{ root_password }} -e "SELECT * FROM performance_schema.replication_group_members;" + register: replication_status + +- name: Display replication status + debug: + var: replication_status.stdout + +- name: Check replication group members count + community.docker.docker_container_exec: + container: "ps_pmm_{{ ps_version }}_1" + command: mysql -uroot -p{{ root_password }} -e "SELECT COUNT(*) AS count FROM performance_schema.replication_group_members;" + register: member_count + +- name: Display member count + debug: + var: member_count.stdout + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete! + + To verify replication is working: + 1. Connect to the first node: + docker exec -it ps_pmm_{{ ps_version }}_1 mysql -uroot -p{{ root_password }} + + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + + 3. Connect to other nodes and verify data is replicated: + docker exec -it ps_pmm_{{ ps_version }}_2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; + +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/percona_server/tasks/percona-server-setup-single.yml b/pmm_qa/percona_server/tasks/percona-server-setup-single.yml new file mode 100644 index 00000000..9fbad84c --- /dev/null +++ b/pmm_qa/percona_server/tasks/percona-server-setup-single.yml @@ -0,0 +1,31 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Start Percona Server containers + community.docker.docker_container: + name: "ps_pmm_{{ ps_version }}_{{ item }}" + image: "percona/percona-server:{{ ps_version }}" + restart_policy: always + state: started + networks: + - name: "{{ network_name }}" + env: + MYSQL_ROOT_PASSWORD: "{{ root_password }}" + ports: + - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" + - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" + volumes: + - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" + - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Wait for MySQL to be available + wait_for: + host: localhost + port: "{{ mysql_port + item - 1 }}" + delay: 10 + timeout: 300 + loop: "{{ range(1, nodes_count | int + 1) | list }}" diff --git a/pmm_qa/pg_stat_monitor_setup.sh b/pmm_qa/pg_stat_monitor_setup.sh new file mode 100644 index 00000000..cc6fb54e --- /dev/null +++ b/pmm_qa/pg_stat_monitor_setup.sh @@ -0,0 +1,118 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +# If postgres server version is not provided then it will default to version 14. +if [ -z "$pgsql_version" ] +then + export pgsql_version=14 +fi + +# If branch/tag is not provided then it will default to main branch +if [ -z "$pgstat_monitor_branch" ] +then + export pgstat_monitor_branch="2.1.0" +fi + +# If repo is not provided then it will default to percona PGSM repository +if [ -z "$pgstat_monitor_repo" ] +then + export pgstat_monitor_repo=percona/pg_stat_monitor +fi + +# If distribution is not provided then it will default to percona distribution 'PPG' +# For PG community distribution please use 'PGDG' +if [ -z "$distribution" ] +then + export distribution=PPG +fi + +# Need to add a user postgres either here or in Dockerfile +cd /home +mkdir postgres +useradd postgres +chown -R postgres:postgres postgres +cd postgres + +# Install the dependencies +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +apt-get -y install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev bison make flex libipc-run-perl wget +sleep 10 + +# Install the PG server from selected distribution +if [[ $distribution == "PGDG" ]]; +then + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - + sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + apt update + apt -y install postgresql-${pgsql_version} postgresql-server-dev-${pgsql_version} +else + wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb + dpkg -i percona-release_latest.generic_all.deb + percona-release setup ppg-${pgsql_version} + apt-get -y update + apt-get -y install percona-postgresql-${pgsql_version} percona-postgresql-contrib percona-postgresql-server-dev-all +fi + +sleep 10 +sed -i 's/\(host\s*all\s*all\s*127.0.0.1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(host\s*all\s*all\s*::1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*postgres.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*all.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +service postgresql restart + +sleep 10 +chown -R postgres:postgres /var/lib/postgresql/${pgsql_version}/main +chmod 0700 -R /var/lib/postgresql/${pgsql_version}/main +sed -i "s/#listen_addresses.*/listen_addresses = '*'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/${pgsql_version}/main/pg_hba.conf + +sleep 10 +service postgresql restart + +export PATH="/usr/lib/postgresql/${pgsql_version}/bin:$PATH" +echo $PATH +cp /usr/lib/postgresql/${pgsql_version}/bin/pg_config /usr/bin + +# Clone PGSM repo and move to /home/postgres/pg_stat_monitor dir +cd /home/postgres +git clone --depth 1 --branch ${pgstat_monitor_branch} https://github.com/${pgstat_monitor_repo} +chown -R postgres:postgres pg_stat_monitor +cd pg_stat_monitor + +# Build PGSM +make USE_PGXS=1 + +# Install built PGSM library into server +make USE_PGXS=1 install + +# Stop server and edit postgresql.conf to load PGSM library with required configurations +service postgresql stop +echo "shared_preload_libraries = 'pg_stat_monitor'" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "track_activity_query_size=2048" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "track_io_timing=ON" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "max_connections=1000" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "listen_addresses = '*'" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "pg_stat_monitor.pgsm_enable_query_plan = 'yes'" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf + +# Create init.sql file required by PMM +echo "CREATE DATABASE sbtest1;" >> /home/postgres/init.sql +echo "CREATE DATABASE sbtest2;" >> /home/postgres/init.sql +echo "CREATE USER pmm WITH SUPERUSER ENCRYPTED PASSWORD 'pmm';" >> /home/postgres/init.sql +echo "GRANT pg_monitor TO pmm;" >> /home/postgres/init.sql +echo "ALTER USER postgres PASSWORD 'pass+this';" >> /home/postgres/init.sql + +# Start server, run init.sql and Create extension PGSM +service postgresql start +su postgres bash -c 'psql -f /home/postgres/init.sql' +su postgres bash -c 'psql -c "CREATE DATABASE contrib_regression;"' +su postgres bash -c 'psql -U postgres -c "CREATE EXTENSION pg_stat_monitor;"' diff --git a/pmm_qa/pg_stat_statements_setup.sh b/pmm_qa/pg_stat_statements_setup.sh new file mode 100644 index 00000000..2521c4c4 --- /dev/null +++ b/pmm_qa/pg_stat_statements_setup.sh @@ -0,0 +1,93 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +# If postgres server version is not provided then it will default to version 14. +if [ -z "$pgsql_version" ] +then + export pgsql_version=15 +fi + +# If distribution is not provided then it will default to percona distribution 'PPG' +# For PG community distribution please use 'PGDG' +if [ -z "$distribution" ] +then + export distribution=PGDG +fi + +# Need to add a user postgres either here or in Dockerfile +cd /home +mkdir postgres +useradd postgres +chown -R postgres:postgres postgres +cd postgres + +# Install the dependencies +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +apt-get -y install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev bison make flex libipc-run-perl wget +sleep 10 + +# Install the PG server from selected distribution +if [[ $distribution == "PGDG" ]]; +then + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - + sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + apt update + apt -y install postgresql-${pgsql_version} postgresql-server-dev-${pgsql_version} postgresql-contrib-${pgsql_version} +else + wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb + dpkg -i percona-release_latest.generic_all.deb + percona-release setup ppg-${pgsql_version} + apt-get -y update + apt-get -y install percona-postgresql-${pgsql_version} percona-postgresql-contrib percona-postgresql-server-dev-all +fi + +sleep 10 +sed -i 's/\(host\s*all\s*all\s*127.0.0.1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(host\s*all\s*all\s*::1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*postgres.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*all.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +service postgresql restart + +sleep 10 +chown -R postgres:postgres /var/lib/postgresql/${pgsql_version}/main +chmod 0700 -R /var/lib/postgresql/${pgsql_version}/main +sed -i "s/#listen_addresses.*/listen_addresses = '*'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/${pgsql_version}/main/pg_hba.conf + +sleep 10 +service postgresql restart + +export PATH="/usr/lib/postgresql/${pgsql_version}/bin:$PATH" +echo $PATH +cp /usr/lib/postgresql/${pgsql_version}/bin/pg_config /usr/bin + +# Stop server and edit postgresql.conf to load pg_stat_sstatement library with required configurations +service postgresql stop +echo "shared_preload_libraries = 'pg_stat_statements'" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "track_activity_query_size=2048" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "track_io_timing=ON" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "pg_stat_statements.track=all" >> /etc/postgresql/${pgsql_version}/main/postgresql.conf + +# Create init.sql file required by PMM +echo "CREATE DATABASE sbtest1;" >> /home/postgres/init.sql +echo "CREATE DATABASE sbtest2;" >> /home/postgres/init.sql +echo "CREATE USER pmm WITH PASSWORD 'pmm';" >> /home/postgres/init.sql +echo "GRANT pg_monitor TO pmm;" >> /home/postgres/init.sql +echo "ALTER USER postgres PASSWORD 'pass+this';" >> /home/postgres/init.sql +echo "ALTER SYSTEM SET max_locks_per_transaction = 1024;" >> /home/postgres/init.sql + +# Start server, run init.sql and Create extension PGSM +service postgresql start +su postgres bash -c 'psql -f /home/postgres/init.sql' +su postgres bash -c 'psql -c "CREATE DATABASE contrib_regression;"' +su postgres bash -c 'psql -U postgres -c "CREATE EXTENSION pg_stat_statements;"' diff --git a/pmm_qa/pgsm_run_queries.sh b/pmm_qa/pgsm_run_queries.sh new file mode 100644 index 00000000..0afaa2be --- /dev/null +++ b/pmm_qa/pgsm_run_queries.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +## Running Queries +wget https://raw.githubusercontent.com/percona/pmm-agent/main/testqueries/postgres/pg_stat_monitor_load.sql +while true +do + su postgres bash -c 'psql -d contrib_regression -f pg_stat_monitor_load.sql' + su postgres bash -c 'psql -d sbtest1 -f pg_stat_monitor_load.sql' + sleep 30 +done diff --git a/pmm_qa/pgsql-vacuum.sh b/pmm_qa/pgsql-vacuum.sh new file mode 100644 index 00000000..184bed48 --- /dev/null +++ b/pmm_qa/pgsql-vacuum.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +export DOCKER_CONTAINER_NAME=pgsql_vacuum_db +export PGSQL_VACUUM_VERSION="latest" +echo "Setting up Postgres for vacuum monitoring" +if [ ! -z $@ ]; then + PGSQL_VACUUM_VERSION=$1 +fi +docker stop pgsql_vacuum_db || true +docker rm pgsql_vacuum_db || true +docker run --name pgsql_vacuum_db -p 7432:5432 -e POSTGRES_PASSWORD=YIn7620U1SUc -d postgres:$PGSQL_VACUUM_VERSION \ + -c shared_preload_libraries='pg_stat_statements' -c pg_stat_statements.max=10000 -c pg_stat_statements.track=all +sleep 20 +# --network pmm-qa \ +docker exec pgsql_vacuum_db apt-get update +docker exec pgsql_vacuum_db apt-get install -y wget unzip +docker exec pgsql_vacuum_db wget https://www.postgresqltutorial.com/wp-content/uploads/2019/05/dvdrental.zip +docker exec pgsql_vacuum_db unzip dvdrental.zip +docker exec pgsql_vacuum_db psql -U postgres -c "CREATE EXTENSION pg_stat_statements;" +docker exec pgsql_vacuum_db psql -U postgres -c 'create database dvdrental;' +docker exec pgsql_vacuum_db pg_restore -U postgres -d dvdrental dvdrental.tar + +## Prepare Data with 1000 tables and each table having around 10k records +## Get 10000 Rows into Testing Table, Import the sample Database +rm dvdrental.tar.xz || true +rm dvdrental.sql || true +wget https://github.com/percona/pmm-qa/raw/PMM-10244-2/pmm-tests/postgres/SampleDB/dvdrental.tar.xz +tar -xvf dvdrental.tar.xz ## only works on Linux/Mac based OS +docker cp dvdrental.sql pgsql_vacuum_db:/ +docker exec pgsql_vacuum_db psql -d dvdrental -f dvdrental.sql -U postgres + +pmm-admin add postgresql --username=postgres --password=YIn7620U1SUc pgsql_vacuum_db localhost:7432 + +## Update & Delete tables using a while loop with sleep +j=0 +while [ $j -lt 3 ] +do + export LENGTH=$(shuf -i 100-120 -n 1) + export LENGTH_NEW=$(shuf -i 100-120 -n 1) + export TABLE=$(shuf -i 1-1000 -n 1) + export COUNT=$(docker exec pgsql_vacuum_db psql -U postgres -d dvdrental -c "select count(*) from film_testing_${TABLE} where length=${LENGTH};" | tail -3 | head -1 | xargs) + docker exec pgsql_vacuum_db psql -U postgres -d dvdrental -c "delete from film_testing_${TABLE} where length=${LENGTH};" + i=0 + while [ "$i" -le ${COUNT} ]; do + docker exec pgsql_vacuum_db psql -U postgres -d dvdrental -c "insert into film_testing_${TABLE} values (${i}, 'title for ${i}', 'Description for ${i}', ${LENGTH});" + i=$(( i + 1 )) + done + docker exec pgsql_vacuum_db psql -U postgres -d dvdrental -c "update film_testing_${TABLE} set length=${LENGTH_NEW} where length=${LENGTH};" + sleep 5 + j=$(( j + 1 )) +done diff --git a/pmm_qa/pgsql_pgss_setup.yml b/pmm_qa/pgsql_pgss_setup.yml new file mode 100644 index 00000000..b1674c07 --- /dev/null +++ b/pmm_qa/pgsql_pgss_setup.yml @@ -0,0 +1,73 @@ +--- +# This playbook does following: +# install postgres with pgstatement enabled for QAN and PMM Integration testing + +- hosts: all + become: true + become_method: sudo + vars: + pgsql_version: "{{ lookup('vars', 'extra_pgsql_version', default=lookup('env','PGSQL_VERSION') | default('14', true) ) }}" + pgsql_pgss_container: "{{ lookup('vars', 'extra_pgsql_pgss_container', default=lookup('env','PGSQL_PGSS_CONTAINER') | default('pgsql_pgss', true) ) }}" + pgsql_pgss_port: "{{ lookup('vars', 'extra_pgsql_port', default=lookup('env','PGSQL_PGSS_PORT') | default('5448', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ pgsql_pgss_container }}" | grep -q . && docker stop {{ pgsql_pgss_container }} && docker rm -fv {{ pgsql_pgss_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PostgreSQL + shell: > + sudo docker run -d --name={{ pgsql_pgss_container }} + -p {{ pgsql_pgss_port }}:5432 + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker pgsql_pgss_container + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_pgss_container }} mkdir -p artifacts + - docker cp ./pg_stat_statements_setup.sh {{ pgsql_pgss_container }}:/ + - docker cp ./pgsm_run_queries.sh {{ pgsql_pgss_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ pgsql_pgss_container }}:/ + + - name: Execute Setup script inside the pgsql pgsql_pgss_container + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_pgss_container }} bash -xe ./pg_stat_statements_setup.sh --pgsql_version {{ pgsql_version }} > setup_pgsql_pgss_{{ pgsql_version }}.log + + - name: Install pmm2-client on the pgsql_pgss_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ pgsql_pgss_container }} + - docker exec {{ pgsql_pgss_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Remove pgsql service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_pgss_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove postgresql {{ pgsql_pgss_container }}_service_{{ random_number }}' + ignore_errors: true + + - name: Add pgsql_pgss for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_pgss_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ pgsql_pgss_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add postgresql --username=pmm --password=pmm --query-source=pgstatements {{ pgsql_pgss_container }}_service_{{ random_number }}' + + - name: Run queries for generating data, using the same script for pgsm + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_pgss_container }} bash ./pgsm_run_queries.sh & diff --git a/pmm_qa/pgstatmonitor_metrics_queries.sh b/pmm_qa/pgstatmonitor_metrics_queries.sh new file mode 100644 index 00000000..c208da4a --- /dev/null +++ b/pmm_qa/pgstatmonitor_metrics_queries.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +export PDPGSQL_CONTAINER=$1 +db=1 +docker exec ${PDPGSQL_CONTAINER} mkdir /tmp/sql +wget https://raw.githubusercontent.com/percona/pmm-agent/main/testqueries/postgres/pg_stat_monitor_load.sql +docker exec ${PDPGSQL_CONTAINER} bash -c "psql -h localhost -U postgres -c 'create database test1'" +docker cp pg_stat_monitor_load.sql ${PDPGSQL_CONTAINER}:/tmp/sql +while true +do + docker exec -u postgres ${PDPGSQL_CONTAINER} psql test1 postgres -f /tmp/sql/pg_stat_monitor_load.sql + sleep 30 +done diff --git a/pmm_qa/pmm-framework.py b/pmm_qa/pmm-framework.py new file mode 100755 index 00000000..7b477332 --- /dev/null +++ b/pmm_qa/pmm-framework.py @@ -0,0 +1,962 @@ +import subprocess +import argparse +import os +import sys +import requests +import re +import shutil +import yaml +from scripts.get_env_value import get_value +from scripts.database_options import database_options as database_configs +from scripts.run_ansible_playbook import run_ansible_playbook + +def get_running_container_name(): + container_image_name = "pmm-server" + container_name = '' + try: + # Run 'docker ps' to get a list of running containers + output = subprocess.check_output(['docker', 'ps', '--format', 'table {{.ID}}\t{{.Image}}\t{{.Names}}']) + # Split the output into a list of container + containers = output.strip().decode('utf-8').split('\n')[1:] + # Check each line for the docker image name + for line in containers: + # Extract the image name + info_parts = line.split('\t')[0] + image_info = info_parts.split()[1] + # Check if the container is in the list of running containers + # and establish N/W connection with it. + if container_image_name in image_info: + container_name = info_parts.split()[2] + # Check if pmm-qa n/w exists and already connected to running container n/w + # if not connect it. + result = subprocess.run(['docker', 'network', 'inspect', 'pmm-qa'], capture_output=True, text=True) + if result.returncode != 0: + subprocess.run(['docker', 'network', 'create', 'pmm-qa']) + subprocess.run(['docker', 'network', 'connect', 'pmm-qa', container_name]) + else: + networks = result.stdout + if container_name not in networks: + subprocess.run(['docker', 'network', 'connect', 'pmm-qa', container_name]) + return container_name + + except subprocess.CalledProcessError: + # Handle the case where the 'docker ps' command fails + return None + + return None + +def setup_ps(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Check Setup Types + setup_type = '' + no_of_nodes = 1 + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + if setup_type_value in ("group_replication", "gr"): + setup_type = 1 + no_of_nodes = 1 + elif setup_type_value in ("replication", "replica"): + setup_type = '' + no_of_nodes = 2 + + # Gather Version details + ps_version = os.getenv('PS_VERSION') or db_version or database_configs[db_type]["versions"][-1] + ps_version_int = int(ps_version.replace(".", "")) + if ps_version_int >= 84: + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'SETUP_TYPE': setup_type_value, + 'NODES_COUNT': get_value('NODES_COUNT', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PS_VERSION': ps_version, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'MY_ROCKS': get_value('MY_ROCKS', db_type, args, db_config), + } + + run_ansible_playbook('percona_server/percona-server-setup.yml', env_vars, args) + else: + # Define environment variables for playbook + env_vars = { + 'GROUP_REPLICATION': setup_type, + 'PS_NODES': no_of_nodes, + 'PS_VERSION': ps_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PS_CONTAINER': 'ps_pmm_' + str(ps_version) + ( + '_replica' if setup_type_value in ("replication", "replica") else ''), + 'PS_PORT': 3318 if setup_type_value in ("replication", "replica") else 3317, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PS_TARBALL': get_value('TARBALL', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'ps_pmm_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_mysql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running.., Exiting") + exit() + + # Gather Version details + ms_version = os.getenv('MS_VERSION') or db_version or database_configs[db_type]["versions"][-1] + ms_version_int = int(ms_version.replace(".", "")) + + # Check Setup Types + setup_type = '' + no_of_nodes = 1 + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + if setup_type_value in ("group_replication", "gr"): + setup_type = 1 + no_of_nodes = 1 + elif setup_type_value in ("replication", "replica"): + setup_type = '' + no_of_nodes = 2 + + # Define environment variables for playbook + env_vars = { + 'GROUP_REPLICATION': setup_type, + 'MS_NODES': no_of_nodes, + 'MS_VERSION': ms_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'MS_CONTAINER': 'mysql_pmm_' + str(ms_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'MS_TARBALL': get_value('TARBALL', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'ms_pmm_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + +def setup_ssl_mysql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Check Setup Types + setup_type = None + no_of_nodes = 1 + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + + # Gather Version details + ms_version = os.getenv('MS_VERSION') or db_version or database_configs[db_type]["versions"][-1] + # Define environment variables for playbook + env_vars = { + 'MYSQL_VERSION': ms_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'MYSQL_SSL_CONTAINER': 'mysql_ssl_' + str(ms_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'tls-ssl-setup/mysql_tls_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_pdpgsql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + pdpgsql_version = os.getenv('PDPGSQL_VERSION') or db_version or database_configs[db_type]["versions"][-1] + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + print(f"Setup type is {setup_type_value}") + + if setup_type_value in ("replication", "replica", "patroni"): + # Define environment variables for playbook + env_vars = { + 'PGSTAT_MONITOR_BRANCH': 'main', + 'PDPGSQL_VERSION': pdpgsql_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PDPGSQL_PGSM_CONTAINER': 'pdpgsql_pgsm_pmm_' + str(pdpgsql_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'USE_SOCKET': get_value('USE_SOCKET', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PDPGSQL_PGSM_PORT': 5447, + 'DISTRIBUTION': '', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3', + 'SETUP_TYPE': setup_type_value + } + + # Ansible playbook filename + playbook_filename = 'percona-distribution-postgresql/percona-distribution-postgres-setup.yml' + else: + # Define environment variables for playbook + env_vars = { + 'PGSTAT_MONITOR_BRANCH': 'main', + 'PDPGSQL_VERSION': pdpgsql_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PDPGSQL_PGSM_CONTAINER': 'pdpgsql_pgsm_pmm_' + str(pdpgsql_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'USE_SOCKET': get_value('USE_SOCKET', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PDPGSQL_PGSM_PORT': 5447, + 'DISTRIBUTION': '', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'pdpgsql_pgsm_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_ssl_pdpgsql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + pdpgsql_version = os.getenv('PDPGSQL_VERSION') or db_version or database_configs[db_type]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'PGSTAT_MONITOR_BRANCH': 'main', + 'PGSQL_VERSION': pdpgsql_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PGSQL_SSL_CONTAINER': 'pdpgsql_pgsm_ssl_' + str(pdpgsql_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'USE_SOCKET': get_value('USE_SOCKET', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'tls-ssl-setup/postgresql_tls_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_pgsql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + pgsql_version = os.getenv('PGSQL_VERSION') or db_version or database_configs[db_type]["versions"][-1] + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + + print(f"Setup type is {setup_type_value}") + + if setup_type_value in ("replication", "replica"): + # Define environment variables for playbook + env_vars = { + 'PGSQL_VERSION': pgsql_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PGSQL_PGSS_CONTAINER': 'pgsql_pgss_pmm_' + str(pgsql_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'USE_SOCKET': get_value('USE_SOCKET', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PGSQL_PGSS_PORT': 5448, + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3', + 'SETUP_TYPE': setup_type_value + } + + # Ansible playbook filename + playbook_filename = 'postgresql/postgresql-setup.yml' + else: + # Define environment variables for playbook + env_vars = { + 'PGSQL_VERSION': pgsql_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PGSQL_PGSS_CONTAINER': 'pgsql_pgss_pmm_' + str(pgsql_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'USE_SOCKET': get_value('USE_SOCKET', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PGSQL_PGSS_PORT': 5448, + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'pgsql_pgss_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_haproxy(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'HAPROXY_CONTAINER': 'haproxy_pmm', + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'haproxy_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_external(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + redis_version = os.getenv('REDIS_VERSION') or db_version or database_configs["EXTERNAL"]["REDIS"]["versions"][-1] + nodeprocess_version = os.getenv('NODE_PROCESS_VERSION') or db_version or \ + database_configs["EXTERNAL"]["NODEPROCESS"]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'REDIS_EXPORTER_VERSION': redis_version, + 'NODE_PROCESS_EXPORTER_VERSION': nodeprocess_version, + 'EXTERNAL_CONTAINER': 'external_pmm', + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'external_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_mlaunch_psmdb(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + psmdb_version = os.getenv('PSMDB_VERSION') or db_version or \ + database_configs[db_type]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'PSMDB_VERSION': psmdb_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PSMDB_CONTAINER': 'psmdb_pmm_' + str(psmdb_version), + 'PSMDB_SETUP': get_value('SETUP_TYPE', db_type, args, db_config), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'mlaunch_psmdb_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_mlaunch_modb(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + modb_version = os.getenv('MODB_VERSION') or db_version or \ + database_configs[db_type]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'MODB_VERSION': modb_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'MODB_CONTAINER': 'modb_pmm_' + str(modb_version), + 'MODB_SETUP': get_value('SETUP_TYPE', db_type, args, db_config), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'mlaunch_modb_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def execute_shell_scripts(shell_scripts, project_relative_scripts_dir, env_vars, args): + # Get script directory + current_directory = os.getcwd() + shell_scripts_path = os.path.abspath(os.path.join(current_directory, os.pardir, project_relative_scripts_dir)) + + # Get the original working directory + original_dir = os.getcwd() + + if args.verbose: + print(f'Options set after considering defaults: {env_vars}') + + # Set environment variables if provided + if env_vars: + for key, value in env_vars.items(): + os.environ[key] = value + + # Execute each shell script + for script in shell_scripts: + result: subprocess.CompletedProcess + try: + print(f'running script {script}') + # Change directory to where the script is located + os.chdir(shell_scripts_path) + print(f'changed directory {os.getcwd()}') + result = subprocess.run(['bash', script], capture_output=True, text=True, check=True) + print("Output:") + print(result.stdout) + print(f"Shell script '{script}' executed successfully.") + except subprocess.CalledProcessError as e: + print( + f"Shell script '{script}' failed with return code: {e.returncode}! \n {e.stderr} \n Output: \n {e.stdout} ") + exit(e.returncode) + except Exception as e: + print("Unexpected error occurred:", e) + finally: + # Return to the original working directory + os.chdir(original_dir) + + +# Temporary method for Sharding Setup. +def mongo_sharding_setup(script_filename, args): + # Get script directory + script_path = os.path.abspath(sys.argv[0]) + script_dir = os.path.dirname(script_path) + scripts_path = script_dir + "/../pmm_psmdb-pbm_setup/" + + # Temporary shell script filename + shell_file_path = scripts_path + script_filename + + # Temporary docker compose filename + compose_filename = f'docker-compose-sharded-no-server.yaml' + compose_file_path = scripts_path + compose_filename + + # Create pmm-qa n/w used in workaround + result = subprocess.run(['docker', 'network', 'inspect', 'pmm-qa'], capture_output=True) + if not result: + subprocess.run(['docker', 'network', 'create', 'pmm-qa']) + + no_server = True + # Add workaround (copy files) till sharding only support is ready. + try: + if no_server: + # Search & Replace content in the temporary compose files + subprocess.run( + ['cp', f'{scripts_path}docker-compose-sharded.yaml', f'{compose_file_path}']) + admin_password = os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin' + subprocess.run(['sed', '-i', f's/password/{admin_password}/g', f'{compose_file_path}']) + subprocess.run(['sed', '-i', '/- test-network/a\\ - pmm-qa', f'{compose_file_path}']) + subprocess.run(['sed', '-i', '/driver: bridge/a\\ pmm-qa:\\n name: pmm-qa\\n external: true', + f'{compose_file_path}']) + subprocess.run( + ['sed', '-i', '/^ pmm-server:/,/^$/{/^ test:/!d}', f'{compose_file_path}']) + with open(f'{compose_file_path}', 'a') as f: + subprocess.run(['echo', ' backups: null'], stdout=f, text=True, check=True) + + # Search replace content in the temporary shell files + subprocess.run(['cp', f'{scripts_path}start-sharded.sh', f'{shell_file_path}']) + subprocess.run(['sed', '-i', '/echo "configuring pmm-server/,/sleep 30/d', + f'{shell_file_path}']) + subprocess.run(['sed', '-i', f's/docker-compose-sharded.yaml/{compose_filename}/g', + f'{shell_file_path}']) + except subprocess.CalledProcessError as e: + print(f"Error occurred: {e}") + + +def get_latest_psmdb_version(psmdb_version): + if psmdb_version == "latest": + return psmdb_version + + # Define the data to be sent in the POST request + data = { + 'version': f'percona-server-mongodb-{psmdb_version}' + } + + # Make the POST request + response = requests.post('https://www.percona.com/products-api.php', data=data) + + # Extract the version number using regular expression + version_number = [v.split('|')[0] for v in re.findall(r'value="([^"]*)"', response.text)] + + + if version_number: + # Sort the version numbers and extract the latest one + latest_version = sorted(version_number, key=lambda x: tuple(map(int, x.split('-')[-1].split('.'))))[-1] + + # Extract the full version number + major_version = latest_version.split('-')[3].strip() # Trim spaces + minor_version = latest_version.split('-')[4].strip() # Trim spaces + + return f'{major_version}-{minor_version}' + else: + return None + + +def setup_psmdb(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running...Exiting") + exit(1) + + # Gather Version details + psmdb_version = os.getenv('PSMDB_VERSION') or get_latest_psmdb_version(db_version) or \ + database_configs[db_type]["versions"][-1] + + # Handle port address for external or internal address + server_hostname = container_name + port = 8443 + + if args.pmm_server_ip: + port = 443 + server_hostname = args.pmm_server_ip + + server_address = f'{server_hostname}:{port}' + + # Define environment variables for playbook + env_vars = { + 'PSMDB_VERSION': psmdb_version, + 'PMM_SERVER_CONTAINER_ADDRESS': server_address, + 'PSMDB_CONTAINER': 'psmdb_pmm_' + str(psmdb_version), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'COMPOSE_PROFILES': get_value('COMPOSE_PROFILES', db_type, args, db_config), + 'MONGO_SETUP_TYPE': get_value('SETUP_TYPE', db_type, args, db_config), + 'OL_VERSION': get_value('OL_VERSION', db_type, args, db_config), + 'GSSAPI': get_value('GSSAPI', db_type, args, db_config), + 'TESTS': 'no', + 'CLEANUP': 'no' + } + + shell_scripts = [] + scripts_folder = "pmm_psmdb-pbm_setup" + setup_type = get_value('SETUP_TYPE', db_type, args, db_config).lower() + + if setup_type in ("pss", "psa"): + shell_scripts = ['start-rs-only.sh'] + elif setup_type in ("shards", "sharding"): + shell_scripts = ['start-sharded-no-server.sh'] + mongo_sharding_setup(shell_scripts[0], args) + + # Execute shell scripts + if not shell_scripts == []: + execute_shell_scripts(shell_scripts, scripts_folder, env_vars, args) + + +# Temporary method for Mongo SSL Setup. +def mongo_ssl_setup(script_filename, args): + # Get script directory + script_path = os.path.abspath(sys.argv[0]) + script_dir = os.path.dirname(script_path) + scripts_path = script_dir + "/../pmm_psmdb_diffauth_setup/" + + # Temporary shell script filename + shellscript_file_path = scripts_path + script_filename + + # Temporary docker compose filename + compose_filename = f'docker-compose-psmdb.yml' + compose_file_path = scripts_path + compose_filename + compose_file_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/pmm_psmdb_diffauth_setup/' + + # Create pmm-qa n/w used in workaround + result = subprocess.run(['docker', 'network', 'inspect', 'pmm-qa'], capture_output=True) + if not result: + subprocess.run(['docker', 'network', 'create', 'pmm-qa']) + + no_server = True + # Add workaround (copy files) till sharding only support is ready. + try: + if no_server: + shutil.copy(compose_file_folder + 'docker-compose-pmm-psmdb.yml', compose_file_folder + compose_filename) + print(f'File location is: {compose_file_folder + compose_filename}') + admin_password = os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin' + with open(compose_file_folder + compose_filename, 'r') as f: + data = yaml.safe_load(f) + + if 'services' in data and 'pmm-server' in data['services']: + del data['services']['pmm-server'] + + if 'services' in data and 'kerberos' in data['services']: + del data['services']['kerberos'] + + if 'pmm-agent setup 2' in data: + data = data.replace('pmm-agent setup 2', 'pmm-agent setup --server-insecure-tls 2') + + for service in data.get('services', {}).values(): + networks = service.get('networks', []) + if isinstance(networks, list): + if 'pmm-qa' not in networks: + networks.append('pmm-qa') + service['networks'] = networks + elif isinstance(networks, dict): + networks['pmm-qa'] = {} + else: + service['networks'] = ['pmm-qa'] + + # Ensure the network is declared globally + if 'networks' not in data: + data['networks'] = {} + + data['networks']['pmm-qa'] = {'external': True, 'name': 'pmm-qa'} + + psmdb_service = data.get('services', {}).get('psmdb-server') + if psmdb_service: + env = psmdb_service.get('environment', []) + + # If environment is a list (common in Docker Compose) + if isinstance(env, list): + for i, entry in enumerate(env): + if entry.startswith('PMM_AGENT_SERVER_PASSWORD='): + env[i] = f'PMM_AGENT_SERVER_PASSWORD={admin_password}' + break + else: + env.append(f'PMM_AGENT_SERVER_PASSWORD={admin_password}') + psmdb_service['environment'] = env + + # If environment is a dict (less common but valid) + elif isinstance(env, dict): + env['PMM_AGENT_SERVER_PASSWORD'] = admin_password + psmdb_service['environment'] = env + + depends_on = psmdb_service.get('depends_on') + print(f'Service depends on: {depends_on}') + if 'pmm-server' in depends_on or 'kerberos' in depends_on: + del psmdb_service['depends_on'] + + # Save it back + with open(compose_file_path, 'w') as f: + yaml.dump(data, f, sort_keys=False, default_flow_style=False) + except yaml.YAMLError as e: + print(f"Error occurred: {e}") + + try: + subprocess.run(['sed', '-i', f's/docker-compose-pmm-psmdb.yml/{compose_filename}/g', f'{shellscript_file_path}']) + except subprocess.CalledProcessError as e: + print(f"Error occurred: {e}") + +def setup_ssl_psmdb(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running...Exiting") + exit(1) + + # Gather Version details + psmdb_version = os.getenv('PSMDB_VERSION') or get_latest_psmdb_version(db_version) or \ + database_configs[db_type]["versions"][-1] + + # Handle port address for external or internal address + server_hostname = container_name + port = 8443 + + if args.pmm_server_ip: + port = 443 + server_hostname = args.pmm_server_ip + + server_address = f'{server_hostname}:{port}' + + # Define environment variables for playbook + env_vars = { + 'PSMDB_VERSION': psmdb_version, + 'PMM_SERVER_CONTAINER_ADDRESS': server_address, + 'PSMDB_CONTAINER': 'psmdb_pmm_' + str(psmdb_version), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'COMPOSE_PROFILES': get_value('COMPOSE_PROFILES', db_type, args, db_config), + 'MONGO_SETUP_TYPE': get_value('SETUP_TYPE', db_type, args, db_config), + 'TESTS': 'no', + 'CLEANUP': 'no' + } + + scripts_folder = "pmm_psmdb_diffauth_setup" + + shell_scripts = ['test-auth.sh'] + mongo_ssl_setup(shell_scripts[0], args) + + # Execute shell scripts + if not shell_scripts == []: + execute_shell_scripts(shell_scripts, scripts_folder, env_vars, args) + + +def setup_ssl_mlaunch(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running...Exiting") + exit(1) + + # Gather Version details + psmdb_version = os.getenv('PSMDB_VERSION') or db_version or \ + database_configs[db_type]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'MONGODB_VERSION': psmdb_version, + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'MONGODB_SSL_CONTAINER': 'psmdb_ssl_pmm_' + str(psmdb_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'tls-ssl-setup/mlaunch_tls_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_pxc_proxysql(db_type, db_version=None, db_config=None, args=None): + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + pxc_version = os.getenv('PXC_VERSION') or db_version or database_configs[db_type]["versions"][-1] + proxysql_version = os.getenv('PROXYSQL_VERSION') or database_configs["PROXYSQL"]["versions"][-1] + + # Define environment variables for playbook + env_vars = { + 'PXC_NODES': '3', + 'PXC_VERSION': pxc_version, + 'PROXYSQL_VERSION': proxysql_version, + 'PXC_TARBALL': get_value('TARBALL', db_type, args, db_config), + 'PROXYSQL_PACKAGE': get_value('PACKAGE', 'PROXYSQL', args, db_config), + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'PXC_CONTAINER': 'pxc_proxysql_pmm_' + str(pxc_version), + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' + } + + # Ansible playbook filename + playbook_filename = 'pxc_proxysql_setup.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + + +def setup_dockerclients(db_type, db_version=None, db_config=None, args=None): + # Define environment variables for shell script + env_vars = {} + + # Shell script filename + shell_scripts = ['setup_docker_client_images.sh'] + shell_scripts_path = 'pmm_qa' + + # Call the function to run the setup_docker_client_images script + execute_shell_scripts(shell_scripts, shell_scripts_path, env_vars, args) + +def setup_valkey(db_type, db_version=None, db_config=None, args=None): + + # Check if PMM server is running + container_name = get_running_container_name() + if container_name is None and args.pmm_server_ip is None: + print(f"Check if PMM Server is Up and Running..Exiting") + exit() + + # Gather Version details + valkey_version = os.getenv('VALKEY_VERSION') or db_version or database_configs[db_type]["versions"][-1] + setup_type_value = get_value('SETUP_TYPE', db_type, args, db_config).lower() + + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'VALKEY_VERSION': valkey_version, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3', + 'SETUP_TYPE': setup_type_value + } + + # Choose playbook based on SETUP_TYPE (cluster is default; sentinel only when explicitly requested) + if setup_type_value in ("sentinel", "sentinels"): + playbook_filename = 'valkey/valkey-sentinel.yml' + else: + playbook_filename = 'valkey/valkey-cluster.yml' + + # Call the function to run the Ansible playbook + run_ansible_playbook(playbook_filename, env_vars, args) + +# Set up databases based on arguments received +def setup_database(db_type, db_version=None, db_config=None, args=None): + if args.verbose: + if db_version: + print(f"Setting up {db_type} version {db_version}", end=" ") + else: + print(f"Setting up {db_type}", end=" ") + + if db_config: + print(f"with configuration: {db_config}") + else: + print() + + if db_type == 'MYSQL': + setup_mysql(db_type, db_version, db_config, args) + elif db_type == 'PS': + setup_ps(db_type, db_version, db_config, args) + elif db_type == 'PGSQL': + setup_pgsql(db_type, db_version, db_config, args) + elif db_type == 'PDPGSQL': + setup_pdpgsql(db_type, db_version, db_config, args) + elif db_type == 'PSMDB': + setup_psmdb(db_type, db_version, db_config, args) + elif db_type == 'PXC': + setup_pxc_proxysql(db_type, db_version, db_config, args) + elif db_type == 'HAPROXY': + setup_haproxy(db_type, db_version, db_config, args) + elif db_type == 'EXTERNAL': + setup_external(db_type, db_version, db_config, args) + elif db_type == 'DOCKERCLIENTS': + setup_dockerclients(db_type, db_version, db_config, args) + elif db_type == 'SSL_MYSQL': + setup_ssl_mysql(db_type, db_version, db_config, args) + elif db_type == 'SSL_PDPGSQL': + setup_ssl_pdpgsql(db_type, db_version, db_config, args) + elif db_type == 'SSL_PSMDB': + setup_ssl_psmdb(db_type, db_version, db_config, args) + elif db_type == 'MLAUNCH_PSMDB': + setup_mlaunch_psmdb(db_type, db_version, db_config, args) + elif db_type == 'MLAUNCH_MODB': + setup_mlaunch_modb(db_type, db_version, db_config, args) + elif db_type == 'SSL_MLAUNCH': + setup_ssl_mlaunch(db_type, db_version, db_config, args) + elif db_type == 'BUCKET': + setup_bucket(db_type, db_version, db_config, args) + elif db_type == 'VALKEY': + setup_valkey(db_type, db_version, db_config, args) + else: + print(f"Database type {db_type} is not recognised, Exiting...") + exit(1) + + +def setup_bucket(db_type, db_version=None, db_config=None, args=None): + print("Setting up bucket") + bucket_names_value = get_value('BUCKET_NAMES', db_type, args, db_config).lower().replace('"', '').split(';') + print(bucket_names_value) + env_vars = { + 'BUCKETS': bucket_names_value + } + + run_ansible_playbook('tasks/create_minio_container.yml', env_vars, args) + + +# Main +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='PMM Framework Script to setup Multiple Databases', + usage=argparse.SUPPRESS) + # Add subparsers for database types + subparsers = parser.add_subparsers(dest='database_type', help='Choose database type above') + + # Add subparser for each database type dynamically + for db_type, options in database_configs.items(): + db_parser = subparsers.add_parser(db_type.lower()) + for config, value in options['configurations'].items(): + db_parser.add_argument(f'{config}', metavar='', help=f'{config} for {db_type} (default: {value})') + + # Add arguments + parser.add_argument("--database", action='append', nargs=1, + metavar='db_name[,=version][,option1=value1,option2=value2,...]') + parser.add_argument("--pmm-server-ip", nargs='?', help='PMM Server IP to connect') + parser.add_argument("--pmm-server-password", nargs='?', help='PMM Server password') + parser.add_argument("--client-version", nargs='?', help='PMM Client version/tarball') + parser.add_argument("--verbose", "--v", action='store_true', help='Display verbose information') + parser.add_argument("--verbosity-level", nargs='?', help='Display verbose information level') + args = parser.parse_args() + + if args.verbosity_level is not None and not args.verbosity_level.isnumeric(): + print(f"Option {args.verbosity_level} is invalid verbosity level option, please provide number 1-5") + exit(1) + + # Parse arguments + try: + for db in args.database: + db_parts = db[0].split(',') + configs = db_parts[0:] if len(db_parts) > 1 else db[0:] + db_type = None + db_version = None + db_config = {} + + if configs: + for config in configs: + if "=" in config: + key, value = config.split('=') + else: + key, value = config, None + + # Convert all arguments/options only to uppercase + key = key.upper() + + try: + if key in database_configs: + db_type = key + if "versions" in database_configs[db_type]: + if value in database_configs[db_type]["versions"]: + db_version = value + else: + if args.verbose: + print( + f"Value {value} is not recognised for Option {key}, will be using Default value") + elif key in database_configs[db_type]["configurations"]: + db_config[key] = value + else: + if args.verbose: + print(f"Option {key} is not recognised, will be using default option") + continue + except KeyError as e: + print(f"Option {key} is not recognised with error {e}, Please check and try again") + parser.print_help() + exit(1) + # Set up the specified databases + setup_database(db_type, db_version, db_config, args) + except argparse.ArgumentError as e: + print(f"Option is not recognised:", e) + parser.print_help() + exit(1) + except Exception as e: + print("An unexpected error occurred:", e) + parser.print_help() + exit(1) diff --git a/pmm_qa/pmm3-client-setup-centos.sh b/pmm_qa/pmm3-client-setup-centos.sh new file mode 100644 index 00000000..8b2ed8b2 --- /dev/null +++ b/pmm_qa/pmm3-client-setup-centos.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +echo "start installing pmm-agent" + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$admin_password" ]; then + export admin_password=admin +fi + +if [ -z "$pmm_server_ip" ]; then + export pmm_server_ip=127.0.0.1 +fi + +if [ -z "$client_version" ]; then + export client_version=dev-latest +fi + +if [ -z "$install_client" ]; then + export install_client=yes +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=auto +fi + +if [ -z "$use_metrics_mode" ]; then + export use_metrics_mode=yes +fi + +if [ ! -z "$upgrade" ]; then + upgrade="-u" +fi + +port=8443 +if [[ "$pmm_server_ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + port=443 +fi + +microdnf install -y wget gnupg2 jq +wget https://repo.percona.com/yum/percona-release-latest.noarch.rpm +rpm -i ./percona-release-latest.noarch.rpm +export PMM_AGENT_SETUP_NODE_NAME=client_container_$(echo $((1 + $RANDOM % 9999))) + +if [[ "$client_version" == "3-dev-latest" ]]; then + echo "Installing 3-dev-latest pmm client" + percona-release enable-only pmm3-client experimental + microdnf install -y pmm-client +fi + +if [[ "$client_version" == "pmm3-rc" ]]; then + echo "Installing testing pmm client" + percona-release enable-only pmm3-client testing + microdnf install -y pmm-client +fi + +if [[ "$client_version" == "pmm3-latest" ]]; then + echo "Installing release pmm client" + microdnf -y install pmm-client +fi + +if [[ "$client_version" =~ ^3\.[0-9]+\.[0-9]+$ ]]; then + wget -O pmm-client.deb https://repo.percona.com/pmm3-client/yum/release/9/RPMS/x86_64/pmm-client-${client_version}-7.el9.x86_64.rpm + rpm -i pmm-client.deb +fi + +## Default Binary path +path="/usr/local/percona/pmm"; +## As export PATH is not working link the paths +ln -sf ${path}/bin/pmm-admin /usr/local/bin/pmm-admin +ln -sf ${path}/bin/pmm-agent /usr/local/bin/pmm-agent + +if [[ "$client_version" == http* ]]; then + if [[ "$install_client" == "yes" ]]; then + wget -O pmm-client.tar.gz --progress=dot:giga "${client_version}" + fi + tar -zxpf pmm-client.tar.gz + rm -r pmm-client.tar.gz + PMM_CLIENT=`ls -1td pmm-client* 2>/dev/null | grep -v ".tar" | grep -v ".sh" | head -n1` + echo ${PMM_CLIENT} + rm -rf pmm-client + mv ${PMM_CLIENT} pmm-client + rm -rf /usr/local/bin/pmm-client + mv -f pmm-client /usr/local/bin + pushd /usr/local/bin/pmm-client + ## only setting up all binaries in default path /usr/local/percona/pmm + bash -x ./install_tarball ${upgrade} + pwd + popd + pmm-admin --version +fi + +## Check if we are upgrading or attempting fresh install. +if [[ -z "$upgrade" ]]; then + if [[ "$use_metrics_mode" == "yes" ]]; then + echo "setup pmm-agent when metrics mode yes" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --metrics-mode=${metrics_mode} --server-username=admin --server-password=${admin_password} + else + echo "setup pmm-agent when metrics mode no" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --server-username=admin --server-password=${admin_password} + fi + sleep 10 + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml > pmm-agent.log 2>&1 & + sleep 10 +else + pid=`ps -ef | grep pmm-agent | grep -v grep | awk -F ' ' '{print $2}'` + if [ -n "$pid" ]; then + kill -9 $pid + echo "Killing and restarting pmm agent...." + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml >> pmm-agent.log 2>&1 & + sleep 10 + fi +fi +echo "pmm-admin status" +pmm-admin status diff --git a/pmm_qa/pmm3-client-setup.sh b/pmm_qa/pmm3-client-setup.sh new file mode 100755 index 00000000..15a02b35 --- /dev/null +++ b/pmm_qa/pmm3-client-setup.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +echo "start installing pmm-agent" + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$admin_password" ]; then + export admin_password=admin +fi + +if [ -z "$pmm_server_ip" ]; then + export pmm_server_ip=127.0.0.1 +fi + +if [ -z "$client_version" ]; then + export client_version=dev-latest +fi + +if [ -z "$install_client" ]; then + export install_client=yes +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=auto +fi + +if [ -z "$use_metrics_mode" ]; then + export use_metrics_mode=yes +fi + +if [ ! -z "$upgrade" ]; then + upgrade="-u" +fi + +port=8443 +if [[ "$pmm_server_ip" =~ \. ]]; then + port=443 +fi + +apt-get update +apt-get install -y wget gnupg2 libtinfo-dev libnuma-dev mysql-client postgresql-client +wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb +dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb +apt-get update +export PMM_AGENT_SETUP_NODE_NAME=client_container_$(echo $((1 + $RANDOM % 9999))) +mv -v /artifacts/* . + +if [[ "$client_version" == "3-dev-latest" ]]; then + percona-release enable-only pmm3-client experimental + apt-get update + apt-get -y install pmm-client +fi + +if [[ "$client_version" == "pmm3-rc" ]]; then + percona-release enable-only pmm3-client testing + apt-get update + apt-get -y install pmm-client +fi + +if [[ "$client_version" == "pmm3-latest" ]]; then + percona-release enable-only pmm3-client release + apt-get -y install pmm-client + apt-get -y update + percona-release enable-only pmm3-client experimental +fi + +## Only supported for debian based systems for now +if [[ "$client_version" =~ ^3\.[0-9]+\.[0-9]+$ ]]; then + wget -O pmm-client.deb https://repo.percona.com/pmm3-client/apt/pool/main/p/pmm-client/pmm-client_${client_version}-7.$(lsb_release -sc)_amd64.deb + dpkg -i pmm-client.deb +fi + +## Default Binary path +path="/usr/local/percona/pmm"; +## As export PATH is not working link the paths +ln -sf ${path}/bin/pmm-admin /usr/local/bin/pmm-admin +ln -sf ${path}/bin/pmm-agent /usr/local/bin/pmm-agent + +if [[ "$client_version" == http* ]]; then + if [[ "$install_client" == "yes" ]]; then + wget -O pmm-client.tar.gz --progress=dot:giga "${client_version}" + fi + tar -zxpf pmm-client.tar.gz + rm -r pmm-client.tar.gz + PMM_CLIENT=`ls -1td pmm-client* 2>/dev/null | grep -v ".tar" | grep -v ".sh" | head -n1` + echo ${PMM_CLIENT} + rm -rf pmm-client + mv ${PMM_CLIENT} pmm-client + rm -rf /usr/local/bin/pmm-client + mv -f pmm-client /usr/local/bin + pushd /usr/local/bin/pmm-client + ## only setting up all binaries in default path /usr/local/percona/pmm + bash -x ./install_tarball ${upgrade} + pwd + popd + pmm-admin --version +fi + +## Check if we are upgrading or attempting fresh install. +if [[ -z "$upgrade" ]]; then + if [[ "$use_metrics_mode" == "yes" ]]; then + echo "setup pmm-agent" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --metrics-mode=${metrics_mode} --server-username=admin --server-password=${admin_password} + else + echo "setup pmm-agent" + pmm-agent setup --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml --server-address=${pmm_server_ip}:${port} --server-insecure-tls --server-username=admin --server-password=${admin_password} + fi + sleep 10 + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml > pmm-agent.log 2>&1 & + sleep 10 +else + pid=`ps -ef | grep pmm-agent | grep -v grep | awk -F ' ' '{print $2}'` + if [[ ! -z "$pid" ]]; then + kill -9 $pid + echo "Killing and restarting pmm agent...." + pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml >> pmm-agent.log 2>&1 & + sleep 10 + fi +fi +echo "pmm-admin status" +pmm-admin status diff --git a/pmm_qa/postgresql/data/pg_hba.conf.j2 b/pmm_qa/postgresql/data/pg_hba.conf.j2 new file mode 100644 index 00000000..43a9816a --- /dev/null +++ b/pmm_qa/postgresql/data/pg_hba.conf.j2 @@ -0,0 +1,3 @@ +host replication {{ replication_user }} 0.0.0.0/0 md5 +host all all 0.0.0.0/0 md5 +local all postgres trust diff --git a/pmm_qa/postgresql/data/postgres-replica.conf b/pmm_qa/postgresql/data/postgres-replica.conf new file mode 100644 index 00000000..9005fb12 --- /dev/null +++ b/pmm_qa/postgresql/data/postgres-replica.conf @@ -0,0 +1,6 @@ +hba_file = '/etc/postgresql/pg_hba.conf' +shared_preload_libraries = 'pg_stat_statements' +pg_stat_statements.track=all +track_io_timing=ON +track_activity_query_size=2048 +listen_addresses = '*' diff --git a/pmm_qa/postgresql/data/postgresql-primary.conf b/pmm_qa/postgresql/data/postgresql-primary.conf new file mode 100644 index 00000000..4ca543a8 --- /dev/null +++ b/pmm_qa/postgresql/data/postgresql-primary.conf @@ -0,0 +1,10 @@ +wal_level = replica +max_wal_senders = 10 +wal_keep_size = 64MB +hot_standby = on +listen_addresses = '*' +hba_file = '/etc/postgresql/pg_hba.conf' +shared_preload_libraries = 'pg_stat_statements' +pg_stat_statements.track=all +track_io_timing=ON +track_activity_query_size=2048 diff --git a/pmm_qa/postgresql/postgresql-setup.yml b/pmm_qa/postgresql/postgresql-setup.yml new file mode 100644 index 00000000..42b14ea7 --- /dev/null +++ b/pmm_qa/postgresql/postgresql-setup.yml @@ -0,0 +1,233 @@ +--- +# Postgresql Replication +- name: Setup Postgresql replication + hosts: localhost + connection: local + gather_facts: yes + vars: + pg_version: "{{ lookup('env', 'PGSQL_VERSION') | default('17', true) }}" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + pgsql_port: 6432 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('2', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/pgsql_cluster_data" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('pgstatements', true) }}" + metrics_mode: "auto" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + random_service_name_value: "" + + tasks: + - name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + ignore_errors: yes + + - name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + + - name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Generate pg_hba.conf for primary node + template: + src: data/pg_hba.conf.j2 + dest: "{{ data_dir }}/node1/pg_hba.conf" + + - name: Remove old PostgreSQL primary container + community.docker.docker_container: + name: "pgsql_pmm_{{ pg_version }}_1" + image: "postgres:{{ pg_version }}-bookworm" + restart_policy: always + state: absent + ignore_errors: yes + + - name: Start PostgreSQL primary container + community.docker.docker_container: + name: "pgsql_pmm_{{ pg_version }}_1" + image: "postgres:{{ pg_version }}-bookworm" + restart_policy: always + state: started + recreate: true + networks: + - name: "{{ network_name }}" + env: + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node1/data:/var/lib/postgresql/data" + - "./data/postgresql-primary.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node1/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + command: -c config_file=/etc/postgresql/postgresql.conf + ports: + - "{{ pgsql_port }}:5432" + + - name: Wait for PgSQL to be available + wait_for: + host: localhost + port: "{{ pgsql_port }}" + delay: 10 + timeout: 300 + + - name: Create replication user + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + user: postgres + command: > + psql -c " + CREATE ROLE {{ replication_user }} WITH REPLICATION LOGIN ENCRYPTED PASSWORD '{{ replication_password }}'; + " + + - name: Stop and remove replica if exists + community.docker.docker_container: + name: "pgsql_pmm_{{ pg_version }}_{{ item }}" + state: absent + loop: "{{ range(2, nodes_count | int + 1) | list }}" + ignore_errors: yes + + - name: Start PostgreSQL replica container + community.docker.docker_container: + name: "pgsql_pmm_{{ pg_version }}_{{ item }}" + image: "postgres:{{ pg_version }}-bookworm" + restart_policy: "no" + state: started + command: sleep infinity + networks: + - name: "{{ network_name }}" + env: + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/postgresql/data" + - "./data/postgresql-replica.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node1/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + + - name: Wipe replica data directory before basebackup + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_{{ item }}" + user: root + command: rm -rf /var/lib/postgresql/data/* + loop: "{{ range(2, nodes_count | int + 1) | list }}" + + - name: Create PostgreSQL user 'pmm' with password + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + user: postgres + command: > + bash -c ' + psql -U postgres -d {{ db_name | default("postgres") }} -c " + CREATE USER pmm WITH PASSWORD '\''pmm'\''; + GRANT pg_monitor TO pmm; + " + ' + + - name: Create custom database for pgbench + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + user: postgres + command: > + bash -c " + echo \" + CREATE DATABASE pgbench; + \\c pgbench + GRANT CONNECT ON DATABASE pgbench TO pmm; + GRANT USAGE ON SCHEMA public TO pmm; + GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO pmm; + ALTER DEFAULT PRIVILEGES IN SCHEMA public + GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO pmm; + \" | psql -U postgres -v ON_ERROR_STOP=1 + " + + - name: Run pg_basebackup from primary to replica + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_{{ item }}" + user: root + command: > + bash -c " + export PGPASSWORD='{{ replication_password }}' && \ + timeout 120s \ + pg_basebackup --pgdata=/var/lib/postgresql/data -R -v -Fp -Xs -P \ + --host=pgsql_pmm_{{ pg_version }}_1 --port=5432 -U {{ replication_user }} + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + + - name: Restart PostgreSQL container with custom command + community.docker.docker_container: + name: "pgsql_pmm_{{ pg_version }}_{{ item }}" + image: "postgres:{{ pg_version }}-bookworm" + restart: true + state: started + command: -c config_file=/etc/postgresql/postgresql.conf + networks: + - name: "{{ network_name }}" + env: + POSTGRES_PASSWORD: "{{ root_password }}" + volumes: + - "{{ data_dir }}/node{{ item }}/data:/var/lib/postgresql/data" + - "./data/postgres-replica.conf:/etc/postgresql/postgresql.conf:ro" + - "{{ data_dir }}/node1/pg_hba.conf:/etc/postgresql/pg_hba.conf:ro" + ports: + - "{{ pgsql_port + item - 1 }}:5432" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + + - name: Create pg_stat_statements extension + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + user: postgres + command: > + psql -U postgres -d 'postgres' -c " + CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + " + + - name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "pgsql_pmm_{{ pg_version }}_{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Get already connected services to pmm server + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + command: > + sh -c 'curl --location --insecure -u"admin:{{ admin_password }}" -s --request GET "http://{{ pmm_server_ip }}:{{ '80' if pmm_server_ip is match('^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$') else '8080' }}/v1/management/services" | jq -r ".services[].service_name"' + register: pmm_server_services + + - name: Display already connected services to pmm server + debug: + msg: "{{ pmm_server_services.stdout | split('\n') }}" + + - name: Find out if service is already connected to pmm server + block: + - name: Loop through percona servers + set_fact: + random_service_name_value: "_{{ 9999 | random + 1 }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: "('pgsql_pmm_' ~ pg_version ~ '_' ~ item) in pmm_server_services.stdout" + + - name: Add service to pmm server + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_{{ item }}" + command: pmm-admin add postgresql --username=pmm --password=pmm --query-source=pgstatements pgsql_pmm_{{ pg_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:5432 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Run load on primary node. + include_tasks: ./tasks/run_load_pgsql.yml + vars: + node_name: "pgsql_pmm_{{ pg_version }}_1" + pgbench_clients: 10 + pgbench_time: 120 + pgbench_scale: 1000 + + - name: Check replication status on primary + community.docker.docker_container_exec: + container: "pgsql_pmm_{{ pg_version }}_1" + user: postgres + command: psql -c "SELECT * FROM pg_stat_replication;" diff --git a/pmm_qa/postgresql/tasks/run_load_pgsql.yml b/pmm_qa/postgresql/tasks/run_load_pgsql.yml new file mode 100644 index 00000000..7e2cc3b0 --- /dev/null +++ b/pmm_qa/postgresql/tasks/run_load_pgsql.yml @@ -0,0 +1,57 @@ +# This ansible file run load on docker container of pgsql with variable name node_name + +- name: Detect OS inside the container + community.docker.docker_container_exec: + container: "{{ node_name }}" + command: cat /etc/os-release + register: container_os_info + +- name: Set distro family (debian/rhel) + set_fact: + distro_family: >- + {{ + ( + 'debian' if 'debian' in container_os_info.stdout | lower else + 'rhel' if 'rhel' in container_os_info.stdout | lower or 'centos' in container_os_info.stdout | lower or 'fedora' in container_os_info.stdout | lower + else 'unknown' + ) | trim + }} + + + +- name: Ensure pgbench is installed (Debian-based container) + community.docker.docker_container_exec: + container: "{{ node_name }}" + user: root + command: > + /bin/sh -c " + apt-get update && + apt-get install -y postgresql-contrib + " + when: distro_family == "debian" + +- name: Ensure pgbench is installed (RHEL-based container) + community.docker.docker_container_exec: + container: "{{ node_name }}" + user: root + command: microdnf install -y postgresql-contrib + when: distro_family == "rhel" + +- name: Initialize pgbench database + community.docker.docker_container_exec: + container: "{{ node_name }}" + user: postgres + command: > + pgbench -i -s {{ pgbench_scale }} pgbench + +- name: Run pgbench benchmark + community.docker.docker_container_exec: + container: "{{ node_name }}" + user: postgres + command: > + pgbench -c {{ pgbench_clients }} -T {{ pgbench_time }} -j 4 pgbench + register: pgbench_result + +- name: Print pgbench results + debug: + var: pgbench_result.stdout_lines diff --git a/pmm_qa/product_version_download_helper b/pmm_qa/product_version_download_helper new file mode 100644 index 00000000..f774f47d --- /dev/null +++ b/pmm_qa/product_version_download_helper @@ -0,0 +1,30 @@ +##ProductName ##Version ##Tarball/Package Link + +pxc # 5.7.43 # https://downloads.percona.com/downloads/Percona-XtraDB-Cluster-57/Percona-XtraDB-Cluster-5.7.43/binary/tarball/Percona-XtraDB-Cluster-5.7.43-rel47-65.1.Linux.x86_64.glibc2.17-minimal.tar.gz + +pxc # 8.0.33 # https://downloads.percona.com/downloads/Percona-XtraDB-Cluster-80/Percona-XtraDB-Cluster-8.0.33/binary/tarball/Percona-XtraDB-Cluster_8.0.33-25.1_Linux.x86_64.glibc2.17-minimal.tar.gz + +psmdb # 4.4.16 # https://downloads.percona.com/downloads/percona-server-mongodb-4.4/percona-server-mongodb-4.4.16-16/binary/tarball/percona-server-mongodb-4.4.16-16-x86_64.glibc2.17-minimal.tar.gz + +psmdb # 4.2.23 # https://downloads.percona.com/downloads/TESTING/psmdb-4.2.23/percona-server-mongodb-4.2.23-23-x86_64.glibc2.17-minimal.tar.gz + +psmdb # 4.2.22 # https://downloads.percona.com/downloads/percona-server-mongodb-4.2/percona-server-mongodb-4.2.22-22/binary/tarball/percona-server-mongodb-4.2.22-22-x86_64.glibc2.17-minimal.tar.gz + +psmdb # 5.0.11 # https://downloads.percona.com/downloads/percona-server-mongodb-LATEST/percona-server-mongodb-5.0.11-10/binary/tarball/percona-server-mongodb-5.0.11-10-x86_64.glibc2.17-minimal.tar.gz + +psmdb # 6.0.2 # https://downloads.percona.com/downloads/percona-distribution-mongodb-6.0/percona-distribution-mongodb-6.0.12/binary/tarball/percona-server-mongodb-6.0.12-9-x86_64.glibc2.17-minimal.tar.gz + +psmdb # 7.0.2 # https://downloads.percona.com/downloads/percona-server-mongodb-7.0/percona-server-mongodb-7.0.2-1/binary/tarball/percona-server-mongodb-7.0.2-1-x86_64.glibc2.17.tar.gz + +ps # 8.4.3 # https://downloads.percona.com/downloads/Percona-Server-8.4/Percona-Server-8.4.3-3/binary/tarball/Percona-Server-8.4.3-3-Linux.x86_64.glibc2.35-minimal.tar.gz + +ps # 8.0.40 # https://downloads.percona.com/downloads/Percona-Server-8.0/Percona-Server-8.0.40-31/binary/tarball/Percona-Server-8.0.40-31-Linux.x86_64.glibc2.35-minimal.tar.gz + +ps # 5.7.44 # https://downloads.percona.com/downloads/Percona-Server-5.7/Percona-Server-5.7.44-48/binary/tarball/Percona-Server-5.7.44-48-Linux.x86_64.glibc2.35-minimal.tar.gz + +ms # 8.4 # https://dev.mysql.com/get/Downloads/MySQL-8.4/mysql-8.4.3-linux-glibc2.17-x86_64-minimal.tar.xz + +ms # 8.0.40 # https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-8.0.40-linux-glibc2.17-x86_64-minimal.tar.xz + +proxysql # 2.6.2 # https://downloads.percona.com/downloads/proxysql2/proxysql2-2.6.2/binary/debian/jammy/x86_64/proxysql2_2.6.2-1.1.jammy_amd64.deb + diff --git a/pmm_qa/prometheus.base.yml b/pmm_qa/prometheus.base.yml new file mode 100644 index 00000000..7f69fc89 --- /dev/null +++ b/pmm_qa/prometheus.base.yml @@ -0,0 +1,23 @@ +scrape_configs: +- job_name: blackbox80 + params: + module: + - http_2xx + scrape_interval: 1m + scrape_timeout: 10s + metrics_path: /probe + scheme: http + + static_configs: + - targets: + - http://www.alfaexploit.com/archive + + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - source_labels: [__param_target] + target_label: node_name + - target_label: __address__ + replacement: pmm.alfaexploit.com:9115 diff --git a/pmm_qa/ps_pmm_setup.yml b/pmm_qa/ps_pmm_setup.yml new file mode 100644 index 00000000..2e28ac77 --- /dev/null +++ b/pmm_qa/ps_pmm_setup.yml @@ -0,0 +1,73 @@ +--- +# This playbook does following: +# enables Percona testing repository + +- hosts: all + become: true + become_method: sudo + vars: + ps_version: "{{ lookup('vars', 'extra_ps_version', default=lookup('env','PS_VERSION') | default('8.0', true) ) }}" + ps_tarball: "{{ lookup('vars', 'extra_ps_tarball', default=lookup('env','PS_TARBALL') | default('', true) ) }}" + ps_container: "{{ lookup('vars', 'extra_ps_container', default=lookup('env','PS_CONTAINER') | default('ps_container', true) ) }}" + query_source: "{{ lookup('vars', 'extra_query_source', default=lookup('env','QUERY_SOURCE') | default('slowlog', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + number_of_nodes: "{{ lookup('vars', 'extra_number_of_nodes', default=lookup('env','PS_NODES') | default('3', true) ) }}" + ps_port: "{{ lookup('vars', 'extra_number_of_nodes', default=lookup('env','PS_PORT') | default('3317', true) ) }}" + group_replication: "{{ lookup('vars', 'extra_group_replication', default=lookup('env','GROUP_REPLICATION') | default('', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ ps_container }}" | grep -q . && docker stop {{ ps_container }} && docker rm -fv {{ ps_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for ps and Proxysql + shell: > + docker run -d --name={{ ps_container }} + -p {{ ps_port }}:3307 + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker ps_container + shell: "{{ item }}" + with_items: + - docker exec {{ ps_container }} mkdir -p artifacts + - docker cp ./client_container_ps_setup.sh {{ ps_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ ps_container }}:/ + + - name: Get Product tarball URL based on the version + shell: awk -F'# ' '/-{{ ps_version | regex_escape }}/ && !/pxc/ && /ps/ {print $3; exit}' product_version_download_helper + register: tarball + when: lookup('env', 'PS_TARBALL') == '' + + - name: Set Product URL if environment or paramater are not defined + set_fact: + ps_tarball: "{{tarball.stdout | default(ps_tarball,true)}}" + + - name: Setup libraries required inside the container + shell: "{{ item }}" + with_items: + - docker exec {{ ps_container }} apt-get update + - docker exec {{ ps_container }} apt-get -y install wget curl git gnupg2 lsb-release + - docker exec {{ ps_container }} apt-get -y install libaio1 libaio-dev libnuma-dev socat + - docker exec {{ ps_container }} apt install -y sysbench + + - name: Install pmm2-client on the ps_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ ps_container }} + - docker exec {{ ps_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Execute Setup script inside the ps ps_container + shell: "{{ item }}" + with_items: + - docker exec {{ ps_container }} bash -xe ./client_container_ps_setup.sh --ps_version {{ ps_version }} --ps_tarball {{ ps_tarball }} --number_of_nodes {{ number_of_nodes }} --query_source {{ query_source }} --group_replication {{ group_replication }} > setup_ps_{{ ps_container }}.log + diff --git a/pmm_qa/psmdb_setup.sh b/pmm_qa/psmdb_setup.sh new file mode 100644 index 00000000..dfb243c0 --- /dev/null +++ b/pmm_qa/psmdb_setup.sh @@ -0,0 +1,129 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + shift +done + +if [ -z "$mongodb_version" ]; then + export mongodb_version=4.4 +fi + +if [ -z "$mongdb_setup" ]; then + export mongdb_setup=replica +fi + +if [ -z "$metrics_mode" ]; then + export metrics_mode=push +fi + +# Mongo user credtials for the replicat set cluster +export user="dba" +export pwd="test1234" + +# Install the dependencies +source ~/.bash_profile || true; +apt-get update +apt-get -y install wget curl jq git gnupg2 lsb-release +apt-get -y install libreadline6-dev systemtap-sdt-dev zlib1g-dev libssl-dev libpam0g-dev python-dev bison make flex libipc-run-perl +sleep 10 + +wget https://raw.githubusercontent.com/Percona-QA/percona-qa/master/mongo_startup.sh +chmod +x mongo_startup.sh +export SERVICE_RANDOM_NUMBER=$(echo $((1 + $RANDOM % 9999))) + +### Detect latest tarball link for specified mongodb_version: 8.0 | 7.0 | 6.0 | 5.0 | 4.4 | 4.2 at the moment +psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-${mongodb_version}" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) +if [[ "$mongodb_version" == "4.4" ]]; then + psmdb_tarball=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep glibc2\.17-minimal) +else + psmdb_tarball=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep focal-minimal) +fi + +echo "Downloading ${psmdb_latest} ..." +wget -O percona_server_mongodb.tar.gz ${psmdb_tarball} +tar -xvf percona_server_mongodb.tar.gz + +export extracted_folder_name=$(ls | grep percona-server-mongodb) +echo "Extracted folder name ${extracted_folder_name}" +mv ${extracted_folder_name} psmdb_${mongodb_version} + +# TODO: refactor if to match range of versions 6.0+ +if [[ "$mongodb_version" == "6.0" || "$mongodb_version" == "7.0" || "$mongodb_version" == "8.0" ]]; then +### PSMDB 6+ requires "percona-mongodb-mongosh" additionally + if [[ "$mongodb_version" == "8.0" ]]; then + # Use Mongo 7.0 mongosh itself for 8.0 + psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-7.0" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) + mongosh_link=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep mongosh || true) + if [ -z "$mongosh_link" ]; then + psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-6.0" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) + fi + fi + mongosh_link=$(wget -q --post-data "version_files=${psmdb_latest}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep mongosh) + echo "Downloading mongosh ${mongosh_link}..." + wget -O mongosh.tar.gz ${mongosh_link} + tar -xvf mongosh.tar.gz + mv percona-mongodb-mongosh* mongosh + cp mongosh/bin/mongosh ./psmdb_${mongodb_version}/bin/mongo + rm mongosh.tar.gz +fi +rm percona_server_mongodb.tar.gz* + +if [ "$mongodb_setup" == "sharded" ]; then + bash ./mongo_startup.sh -s -e wiredTiger --mongosExtra="--slowms 1" --mongodExtra="--profile 2 --slowms 1" --configExtra="--profile 2 --slowms 1" --b=./psmdb_${mongodb_version}/bin + pmm-admin add mongodb --cluster mongodb_node_cluster --environment=mongodb_shraded_node mongodb_shraded_node_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27017 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27027 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27028 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=config --environment=mongodb_config_node mongodb_config_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27029 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27018 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27019 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27020 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_1_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:28018 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_2_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:28019 + sleep 2 + pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs2 --environment=mongodb_rs_node mongodb_rs2_3_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:28020 + sleep 20 + #./nodes/cl_mongos.sh mongodb_user_setup.js +fi + +if [ "$mongodb_setup" == "replica" ]; then + bash ./mongo_startup.sh -r -e wiredTiger --mongosExtra="--slowms 1" --mongodExtra="--profile 2 --slowms 1" --configExtra="--profile 2 --slowms 1" --b=./psmdb_${mongodb_version}/bin + sleep 20 + pmm-admin remove mongodb mongodb_rs1_1 || true; pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_1_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_2 || true; pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_2_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs1_3 || true; pmm-admin add mongodb --cluster mongodb_node_cluster --replication-set=rs1 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs1_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + +#Arbiter setup with Auth enabled (keyfile) +if [ "$mongodb_setup" == "arbiter" ]; then + bash ./mongo_startup.sh -x -r -a -e wiredTiger --mongosExtra="--slowms 1" --mongodExtra="--profile 2 --slowms 1" --configExtra="--profile 2 --slowms 1" --b=./psmdb_${mongodb_version}/bin + sleep 20 + pmm-admin remove mongodb mongodb_rs2_1 || true; pmm-admin add mongodb --cluster mongodb_node_cluster2 --replication-set=rs2 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs2_1_${SERVICE_RANDOM_NUMBER} --debug --username=${user} --password=${pwd} 127.0.0.1:27017 + sleep 2 + pmm-admin remove mongodb mongodb_rs2_2 || true; pmm-admin add mongodb --cluster mongodb_node_cluster2 --replication-set=rs2 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs2_2_${SERVICE_RANDOM_NUMBER} --debug --username=${user} --password=${pwd} 127.0.0.1:27018 + sleep 2 + pmm-admin remove mongodb mongodb_rs2_3 || true; pmm-admin add mongodb --cluster mongodb_node_cluster2 --replication-set=rs2 --environment=mongodb_rs_node --metrics-mode=$metrics_mode mongodb_rs2_3_${SERVICE_RANDOM_NUMBER} --debug 127.0.0.1:27019 + sleep 20 +fi + + +if [ "$mongodb_setup" == "regular" ]; then + bash ./mongo_startup.sh -m -e wiredTiger --mongosExtra="--slowms 1" --mongodExtra="--profile 2 --slowms 1" --configExtra="--profile 2 --slowms 1" --b=./psmdb_${mongodb_version}/bin + pmm-admin add mongodb --cluster mongodb_node_cluster --environment=mongodb_single_node mongodb_rs_single_${SERVICE_RANDOM_NUMBER} --metrics-mode=$metrics_mode --debug 127.0.0.1:27017 + sleep 20 +fi +rm percona_server_mongodb.tar.gz* diff --git a/pmm_qa/psmdb_setup.yml b/pmm_qa/psmdb_setup.yml new file mode 100644 index 00000000..c27e60c5 --- /dev/null +++ b/pmm_qa/psmdb_setup.yml @@ -0,0 +1,89 @@ +--- + +- hosts: all + become: true + become_method: sudo + vars: + psmdb_version: "{{ lookup('vars', 'extra_psmdb_version', default=lookup('env','PSMDB_VERSION') | default('4.4', true) ) }}" + psmdb_tarball: "{{ lookup('vars', 'extra_psmdb_tarball', default=lookup('env','PSMDB_TARBALL') | default('', true) ) }}" + psmdb_setup: "{{ lookup('vars', 'extra_psmdb_setup', default=lookup('env','PSMDB_SETUP') | default('regular', true) ) }}" + psmdb_container: "{{ lookup('vars', 'extra_psmdb_container', default=lookup('env','PSMDB_CONTAINER') | default('psmdb', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ psmdb_container }}" | grep -q . && docker stop {{ psmdb_container }} && docker rm -fv {{ psmdb_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PSMDB + shell: > + docker run -d --name={{ psmdb_container }} + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker psmdb_container + shell: "{{ item }}" + with_items: + - docker cp ./psmdb_setup.sh {{ psmdb_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ psmdb_container }}:/ + - docker exec {{ psmdb_container }} apt-get update + - docker exec {{ psmdb_container }} apt-get -y install wget curl git gnupg2 lsb-release jq + + - name: Install pmm2-client on the psmdb_container + shell: "{{ item }}" + with_items: + - docker exec {{ psmdb_container }} wget https://raw.githubusercontent.com/percona/pmm-qa/{{ pmm_qa_branch }}/pmm-tests/mongodb_user_setup.js + - docker network connect pmm-qa {{ psmdb_container }} + - docker exec {{ psmdb_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Setup psmdb for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ psmdb_container }} bash -x ./psmdb_setup.sh --mongodb_version {{ psmdb_version }} --mongodb_setup {{ psmdb_setup }} > setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + + - name: Setup Load Running Docker Container + shell: "{{ item }}" + with_items: + - rm -r ~/psmdb_{{ psmdb_version }} || true; mkdir ~/psmdb_{{ psmdb_version }} + - wget -P ~/psmdb_{{ psmdb_version }}/ "https://raw.githubusercontent.com/percona/pmm-qa/{{ pmm_qa_branch }}/pmm-tests/Dockerfile" + - wget -P ~/psmdb_{{ psmdb_version }}/ "https://raw.githubusercontent.com/percona/pmm-qa/{{ pmm_qa_branch }}/pmm-tests/mongodb_query.php" + - docker build --tag php-db ~/psmdb_{{ psmdb_version }}/ > ~/docker-build_mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }}.log || true + - docker rm mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} || true + - docker run --rm --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db composer require mongodb/mongodb || true + + - name: Run User setup script when Running Regular MongoDB setup + shell: docker exec {{ psmdb_container }} ./nodes/cl.sh mongodb_user_setup.js + when: psmdb_setup == "regular" + + - name: Run load on Replica Set Master(PSS) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27018 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "replica" or psmdb_setup == "arbiter" + + - name: Run load on Replica Set Master(PSA) + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27018 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 MONGODB_USER="dba" -e MONGODB_PASSWORD="test1234" --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "arbiter" + + - name: Run load on Regular + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27017 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "regular" + + - name: Run load on Sharded Clusters Master + shell: "{{ item }}" + with_items: + - docker run --name mongodb_load_{{ psmdb_version }}_{{ psmdb_setup }} -d -e MONGODB_HOST={{ psmdb_container }} -e MONGODB_PORT=27019 -e TEST_TARGET_QPS=10 -e TEST_COLLECTION=10 -e TEST_DB=30 --network=pmm-qa -v $(pwd):/usr/src/myapp -w /usr/src/myapp php-db php mongodb_query.php >> setup_psmdb_{{ psmdb_version }}_{{ psmdb_setup }}.log + when: psmdb_setup == "sharded" diff --git a/pmm_qa/pxc_proxysql_setup.sh b/pmm_qa/pxc_proxysql_setup.sh new file mode 100644 index 00000000..08511106 --- /dev/null +++ b/pmm_qa/pxc_proxysql_setup.sh @@ -0,0 +1,66 @@ +#!/bin/bash +export number_of_nodes=$1 +export pxc_version=$2 +export query_source=$3 + +sudo dnf install -y socat +wget https://raw.githubusercontent.com/Percona-QA/percona-qa/master/pxc-tests/pxc-startup.sh +sed -i 's/log-output=none/log-output=file/g' pxc-startup.sh +## bug https://bugs.mysql.com/bug.php?id=90553 workaround +sed -i 's+${MID} --datadir+${MID} --socket=\\${node}/socket.sock --port=\\${RBASE1} --datadir+g' pxc-startup.sh + +## Download right PXC version +if [ "$pxc_version" == "5.7" ]; then + wget https://downloads.percona.com/downloads/Percona-XtraDB-Cluster-57/Percona-XtraDB-Cluster-5.7.34-31.51/binary/tarball/Percona-XtraDB-Cluster-5.7.34-rel37-51.1.Linux.x86_64.glibc2.12-minimal.tar.gz + sudo dnf install -y percona-xtrabackup-24 +fi +if [ "$pxc_version" == "8.0" ]; then + sed -i 's+wsrep_node_incoming_address=$ADDR+wsrep_node_incoming_address=$ADDR:$RBASE1+g' pxc-startup.sh + wget https://downloads.percona.com/downloads/Percona-XtraDB-Cluster-LATEST/Percona-XtraDB-Cluster-8.0.27/binary/tarball/Percona-XtraDB-Cluster_8.0.27-18.1_Linux.x86_64.glibc2.17-minimal.tar.gz +fi +tar -xzf Percona-XtraDB-Cluster* +rm -r Percona-XtraDB-Cluster*.tar.gz +mv Percona-XtraDB-Cluster* PXC +cd PXC + +## start PXC +bash ../pxc-startup.sh +bash ./start_pxc $number_of_nodes +touch sysbench_run_node1_prepare.txt +touch sysbench_run_node1_read_write.txt +touch sysbench_run_node1_read_only.txt + +## Install proxysql2 +sudo dnf install -y proxysql2 + +### enable slow log +if [ "$query_source" == "slowlog" ]; then + for j in `seq 1 ${number_of_nodes}`; + do + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL slow_query_log='ON';" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL long_query_time=0;" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_rate_limit=1;" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_verbosity='full';" + bin/mysql -A -uroot -Snode$j/socket.sock -e "SET GLOBAL log_slow_rate_type='query';" + done +fi + +bin/mysql -A -uroot -Snode1/socket.sock -e "create user admin@localhost identified with mysql_native_password by 'admin';" +bin/mysql -A -uroot -Snode1/socket.sock -e "grant all on *.* to admin@localhost;" +bin/mysql -A -uroot -Snode1/socket.sock -e "create user sysbench@'%' identified with mysql_native_password by 'test';" +bin/mysql -A -uroot -Snode1/socket.sock -e "grant all on *.* to sysbench@'%';" +bin/mysql -A -uroot -Snode1/socket.sock -e "drop database if exists sbtest;create database sbtest;" + +### update proxysql configuration use, correct port +export node1_port=$(cat node1.cnf | grep port | awk -F"=" '{print $2}') +sudo sed -i "s/3306/${node1_port}/" /etc/proxysql-admin.cnf + +sudo service proxysql start +sleep 20 +sudo proxysql-admin -e + +## Start Running Load +#sysbench /usr/share/sysbench/oltp_insert.lua --mysql-db=sbtest --mysql-user=sysbench --mysql-socket=node1/socket.sock --mysql-password=test --db-driver=mysql --threads=5 --tables=10 --table-size=1000 prepare > sysbench_run_node1_prepare.txt 2>&1 & +#sleep 20 +#sysbench /usr/share/sysbench/oltp_read_only.lua --mysql-db=sbtest --mysql-user=sysbench --mysql-socket=node1/socket.sock --mysql-password=test --db-driver=mysql --threads=5 --tables=10 --table-size=1000 --time=12000 run > sysbench_run_node1_read_only.txt 2>&1 & +#sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-db=sbtest --mysql-user=sysbench --mysql-socket=node1/socket.sock --mysql-password=test --db-driver=mysql --threads=5 --tables=10 --table-size=1000 --time=12000 run > sysbench_run_node1_read_write.txt 2>&1 & diff --git a/pmm_qa/pxc_proxysql_setup.yml b/pmm_qa/pxc_proxysql_setup.yml new file mode 100644 index 00000000..0e9b7f18 --- /dev/null +++ b/pmm_qa/pxc_proxysql_setup.yml @@ -0,0 +1,125 @@ +# This playbook does following: +# enables Percona testing repository + +- hosts: all + become: true + become_method: sudo + vars: + pxc_version: "{{ lookup('vars', 'extra_pxc_version', default=lookup('env','PXC_VERSION') | default('8.0', true) ) }}" + pxc_tarball: "{{ lookup('vars', 'extra_pxc_tarball', default=lookup('env','PXC_TARBALL') | default('', true) ) }}" + proxysql_version: "{{ lookup('vars', 'extra_proxysql_version', default=lookup('env','PROXYSQL_VERSION') | default('2', true) ) }}" + proxysql_package: "{{ lookup('vars', 'extra_proxysql_package', default=lookup('env','PROXYSQL_PACKAGE') | default('', true) ) }}" + pxc_container: "{{ lookup('vars', 'extra_pxc_container', default=lookup('env','PXC_CONTAINER') | default('pxc_container', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('host.docker.internal', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + number_of_nodes: "{{ lookup('vars', 'extra_number_of_nodes', default=lookup('env','PXC_NODES') | default('3', true) ) }}" + pxc_dev_cluster: "{{ lookup('vars', 'extra_pxc_cluster_name', default=lookup('env','PXC_CLUSTER_NAME') | default('pxc-dev-cluster', true) ) }}" + query_source: "{{ lookup('vars', 'extra_query_source', default=lookup('env','QUERY_SOURCE') | default('perfschema', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ pxc_container }}" | grep -q . && docker stop {{ pxc_container }} && docker rm -fv {{ pxc_container }} + ignore_errors: true + tags: + - cleanup + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PXC and Proxysql + shell: > + docker run -d --name={{ pxc_container }} + -p 6033:6033 + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} mkdir -p artifacts + - docker cp ./client_container_pxc_setup.sh {{ pxc_container }}:/ + - docker cp ./client_container_proxysql_setup.sh {{ pxc_container }}:/ + - docker cp ./pmm3-client-setup.sh {{ pxc_container }}:/ + + - name: Get PXC Product tarball URL based on the version + shell: cat product_version_download_helper | grep pxc | grep "\-{{ pxc_version }}" | head -1 | awk -F'# ' '{print $3}' + register: tarball + when: lookup('env', 'PXC_TARBALL') == '' + + - name: Set PXC Product URL if environment or paramater are not defined + set_fact: + pxc_tarball: "{{tarball.stdout | default(pxc_tarball,true)}}" + + - name: Get ProxySQL Product package URL based on the version + shell: cat product_version_download_helper | grep proxysql | grep "\-{{ proxysql_version }}" | head -1 | awk -F'# ' '{print $3}' + register: package + when: lookup('env', 'PROXYSQL_PACKAGE') == '' + + - name: Set ProxySQL Product URL if environment or paramater are not defined + set_fact: + proxysql_package: "{{package.stdout | default(proxysql_package,true)}}" + + - name : Get filename from URL for ProxySQL + shell: basename {{ proxysql_package }} + register: file_name + + - name: Execute User & Proxysql Setup inside the PXC pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} apt-get update + - docker exec {{ pxc_container }} apt-get -y install wget curl git gnupg2 lsb-release debconf-utils + - docker exec {{ pxc_container }} apt-get -y install libaio1 libaio-dev libnuma-dev socat + - docker exec {{ pxc_container }} adduser --disabled-password --gecos "" pxc + - docker exec {{ pxc_container }} wget {{ proxysql_package }} + - docker exec {{ pxc_container }} dpkg -i {{ file_name.stdout }} + - docker exec {{ pxc_container }} apt install -y sysbench + + - name: Install pmm2-client on the pxc_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ pxc_container }} + - docker exec {{ pxc_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Configure PXC for PMM Client if non-tarball install + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} usermod -aG pmm-agent pxc + when: "'http' not in client_version" + + - name: Execute User & Proxysql Setup inside the PXC pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} percona-release enable-only tools release + - docker exec {{ pxc_container }} apt-get update + - docker exec {{ pxc_container }} apt-get install -y percona-xtrabackup-24 + when: pxc_version == "5.7" + + - name: Execute Setup script inside the PXC pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} chmod 1777 /tmp + - docker exec --user pxc {{ pxc_container }} bash -xe ./client_container_pxc_setup.sh --pxc_version {{ pxc_version }} --pxc_tarball {{ pxc_tarball }} --number_of_nodes {{ number_of_nodes }} --pxc_dev_cluster {{ pxc_dev_cluster }} --query_source {{query_source}} > setup_pxc_{{ pxc_version }}.log + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Start ProxySQL inside the PXC extra_pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} bash -c 'sed -i s#3306#'"\$(grep 'port' /home/pxc/PXC/node1.cnf | cut -d= -f2)"'# /etc/proxysql.cnf' + - docker exec {{ pxc_container }} proxysql -c /etc/proxysql.cnf + - docker exec {{ pxc_container }} sleep 20 + - docker exec {{ pxc_container }} bash -c 'sed -i s#3306#'"\$(grep 'port' /home/pxc/PXC/node1.cnf | cut -d= -f2)"'# /etc/proxysql-admin.cnf' + - docker exec {{ pxc_container }} proxysql-admin --config-file=/etc/proxysql-admin.cnf --enable + - docker exec {{ pxc_container }} sleep 20 + - docker exec {{ pxc_container }} pmm-admin add proxysql --username=admin --password=admin --service-name=my-new-proxysql_{{ pxc_container }}_{{ random_number }} --host=127.0.0.1 --port=6032 + + - name: Execute Service, Sysbench Script inside the PXC pxc_container + shell: "{{ item }}" + with_items: + - docker exec {{ pxc_container }} bash -xe ./client_container_proxysql_setup.sh > setup_proxy_{{ pxc_version }}.log + diff --git a/pmm_qa/requirements.txt b/pmm_qa/requirements.txt new file mode 100644 index 00000000..4566316a --- /dev/null +++ b/pmm_qa/requirements.txt @@ -0,0 +1,2 @@ +ansible-runner==2.3.2 +requests==2.26.0 diff --git a/pmm_qa/scripts/__init__.py b/pmm_qa/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pmm_qa/scripts/database_options.py b/pmm_qa/scripts/database_options.py new file mode 100644 index 00000000..b58373fd --- /dev/null +++ b/pmm_qa/scripts/database_options.py @@ -0,0 +1,84 @@ +database_options = { + "PSMDB": { + "versions": ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "pss", "COMPOSE_PROFILES": "classic", + "TARBALL": "", "OL_VERSION": "9", "GSSAPI": "false"} + }, + "MLAUNCH_PSMDB": { + "versions": ["4.4", "5.0", "6.0", "7.0", "8.0"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "pss", "TARBALL": ""} + }, + "MLAUNCH_MODB": { + "versions": ["4.4", "5.0", "6.0", "7.0", "8.0"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "pss", "TARBALL": ""} + }, + "SSL_MLAUNCH": { + "versions": ["4.4", "5.0", "6.0", "7.0", "8.0"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "pss", "COMPOSE_PROFILES": "classic", + "TARBALL": ""} + }, + "SSL_PSMDB": { + "versions": ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "pss", "COMPOSE_PROFILES": "classic", + "TARBALL": ""} + }, + "MYSQL": { + "versions": ["8.0", "8.4"], + "configurations": {"QUERY_SOURCE": "perfschema", "SETUP_TYPE": "", "CLIENT_VERSION": "3-dev-latest", + "TARBALL": ""} + }, + "PS": { + "versions": ["5.7", "8.4", "8.0"], + "configurations": {"QUERY_SOURCE": "perfschema", "SETUP_TYPE": "", "CLIENT_VERSION": "3-dev-latest", + "TARBALL": "", "NODES_COUNT": 1, "MY_ROCKS": "false"} + }, + "SSL_MYSQL": { + "versions": ["5.7", "8.4", "8.0"], + "configurations": {"QUERY_SOURCE": "perfschema", "SETUP_TYPE": "", "CLIENT_VERSION": "3-dev-latest", + "TARBALL": ""} + }, + "PGSQL": { + "versions": ["11", "12", "13", "14", "15", "16", "17"], + "configurations": {"QUERY_SOURCE": "pgstatements", "CLIENT_VERSION": "3-dev-latest", "USE_SOCKET": "", + "SETUP_TYPE": ""} + }, + "PDPGSQL": { + "versions": ["11", "12", "13", "14", "15", "16", "17"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "USE_SOCKET": "", "SETUP_TYPE": ""} + }, + "SSL_PDPGSQL": { + "versions": ["11", "12", "13", "14", "15", "16", "17"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "USE_SOCKET": ""} + }, + "PXC": { + "versions": ["5.7", "8.0"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "QUERY_SOURCE": "perfschema", "TARBALL": ""} + }, + "PROXYSQL": { + "versions": ["2"], + "configurations": {"PACKAGE": ""} + }, + "HAPROXY": { + "versions": [""], + "configurations": {"CLIENT_VERSION": "3-dev-latest"} + }, + "EXTERNAL": { + "REDIS": { + "versions": ["1.14.0", "1.58.0"], + }, + "NODEPROCESS": { + "versions": ["0.7.5", "0.7.10"], + }, + "configurations": {"CLIENT_VERSION": "3-dev-latest"} + }, + "DOCKERCLIENTS": { + "configurations": {} # Empty dictionary for consistency + }, + "BUCKET": { + "configurations": {"BUCKET_NAMES": 'bcp'} + }, + "VALKEY": { + "versions": ["7", "8"], + "configurations": {"CLIENT_VERSION": "3-dev-latest", "SETUP_TYPE": "", "TARBALL": ""} + } +} diff --git a/pmm_qa/scripts/get_env_value.py b/pmm_qa/scripts/get_env_value.py new file mode 100644 index 00000000..74d65149 --- /dev/null +++ b/pmm_qa/scripts/get_env_value.py @@ -0,0 +1,20 @@ +import os +from .database_options import database_options + +def get_value(key, db_type, args, db_config): + # Check if the variable exists in the environment + env_value = os.environ.get(key) + if env_value is not None: + return env_value + + # Only for client_version we accept global command line argument + if key == "CLIENT_VERSION" and args.client_version is not None: + return args.client_version + + # Check if the variable exists in the args config + config_value = db_config.get(key) + if config_value is not None: + return config_value + + # Fall back to default configs value or empty '' + return database_options[db_type]["configurations"].get(key, '') diff --git a/pmm_qa/scripts/pgsql_load.sql b/pmm_qa/scripts/pgsql_load.sql new file mode 100644 index 00000000..81a4fa90 --- /dev/null +++ b/pmm_qa/scripts/pgsql_load.sql @@ -0,0 +1,115 @@ +-- ======================================== +-- CREATE TABLES +-- ======================================== + +CREATE TABLE students ( + student_id SERIAL PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE +); + +CREATE TABLE classes ( + class_id SERIAL PRIMARY KEY, + name VARCHAR(100), + teacher VARCHAR(100) +); + +CREATE TABLE enrollments ( + enrollment_id SERIAL PRIMARY KEY, + student_id INTEGER REFERENCES students(student_id), + class_id INTEGER REFERENCES classes(class_id), + enrollment_date DATE DEFAULT CURRENT_DATE +); + +-- ======================================== +-- INSERT MOCK DATA +-- ======================================== + +INSERT INTO students (first_name, last_name, birth_date) VALUES +('Alice', 'Smith', '2005-04-10'), +('Bob', 'Johnson', '2006-08-15'), +('Charlie', 'Brown', '2004-12-01'); + +INSERT INTO classes (name, teacher) VALUES +('Mathematics', 'Mrs. Taylor'), +('History', 'Mr. Anderson'), +('Science', 'Dr. Reynolds'); + +INSERT INTO enrollments (student_id, class_id) VALUES +(1, 1), +(1, 2), +(2, 2), +(3, 1), +(3, 3); + +-- ======================================== +-- SIMULATE DEAD TUPLES +-- ======================================== + + +INSERT INTO students (first_name, last_name, birth_date) +SELECT 'John', 'Doe', CURRENT_DATE - (random() * 5000)::int +FROM generate_series(1, 100000); + +-- These updates and deletes will create dead tuples + +-- Update records (old versions become dead) +UPDATE students +SET last_name = last_name || '_updated' +WHERE student_id IN (1, 2); + +-- Delete records (deleted rows become dead) +DELETE FROM enrollments +WHERE enrollment_id IN (SELECT enrollment_id FROM enrollments LIMIT 2); + +-- Disable autovacuum temporarily (for demo) +ALTER TABLE students SET (autovacuum_enabled = false); +ALTER TABLE enrollments SET (autovacuum_enabled = false); + +-- ======================================== +-- SELECT QUERIES +-- ======================================== + +-- Get all students +SELECT * FROM students; + +-- Get all students enrolled in Mathematics +SELECT s.first_name, s.last_name +FROM students s +JOIN enrollments e ON s.student_id = e.student_id +JOIN classes c ON e.class_id = c.class_id +WHERE c.name = 'Mathematics'; + +-- Count students per class +SELECT c.name, COUNT(e.student_id) AS student_count +FROM classes c +LEFT JOIN enrollments e ON c.class_id = e.class_id +GROUP BY c.name; + +-- ======================================== +-- UPDATE QUERIES +-- ======================================== + +-- Change Bob's last name +UPDATE students +SET last_name = 'Williams' +WHERE first_name = 'Bob' AND last_name = 'Johnson'; + +-- Update the teacher for the History class +UPDATE classes +SET teacher = 'Ms. Carter' +WHERE name = 'History'; + +-- ======================================== +-- DELETE QUERIES +-- ======================================== + +-- Remove Charlie from Science class +DELETE FROM enrollments +WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Charlie') + AND class_id = (SELECT class_id FROM classes WHERE name = 'Science'); + +-- Delete a student completely +DELETE FROM students +WHERE first_name = 'Alice' AND last_name = 'Smith'; diff --git a/pmm_qa/scripts/run_ansible_playbook.py b/pmm_qa/scripts/run_ansible_playbook.py new file mode 100644 index 00000000..b683ee9c --- /dev/null +++ b/pmm_qa/scripts/run_ansible_playbook.py @@ -0,0 +1,34 @@ +import os +import ansible_runner +import sys +import subprocess + +def run_ansible_playbook(playbook_filename, env_vars, args): + # Get Script Dir + script_path = os.path.abspath(sys.argv[0]) + script_dir = os.path.dirname(script_path) + playbook_path = script_dir + "/" + playbook_filename + verbosity_level = 1 + # Install community docker plugin for ansible + subprocess.run(["ansible-galaxy", "collection", "install", "community.docker"], capture_output=True, text=True) + if args.verbosity_level is not None: + verbosity_level = int(args.verbosity_level) + + if args.verbose: + print(f'Options set after considering Defaults: {env_vars}') + + r = ansible_runner.run( + private_data_dir=script_dir, + playbook=playbook_path, + inventory='127.0.0.1', + cmdline='-l localhost, --connection=local', + envvars=env_vars, + suppress_env_files=True, + verbosity=verbosity_level, + ) + + print(f'{playbook_filename} playbook execution {r.status}') + + if r.rc != 0: + exit(1) + diff --git a/pmm_qa/setup_docker_client_images.sh b/pmm_qa/setup_docker_client_images.sh new file mode 100755 index 00000000..076a4d69 --- /dev/null +++ b/pmm_qa/setup_docker_client_images.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -xe + +echo +echo "Configuring Multiple Docker Images with PMM Server and Client" +echo "Please wait...." +docker network create docker-client-check || true +docker compose -f docker-compose-clients.yaml down -v --remove-orphans +docker compose -f docker-compose-clients.yaml build --no-cache +CLIENT_DOCKER_VERSION=$CLIENT_DOCKER_VERSION docker compose -f docker-compose-clients.yaml up -d +sleep 20 +echo "Adding DB Clients to PMM Server" +docker exec pmm-client-1 pmm-admin add mysql --username=pmm --password=pmm-pass --service-name=ps-8.0 --query-source=perfschema --host=ps-1 --port=3306 --server-url=https://admin:admin@pmm-server-1:8443 --server-insecure-tls=true +docker exec pmm-client-1 pmm-admin add postgresql --query-source=pgstatements --username=pmm --password=pmm-pass --service-name=pdpgsql-1 --host=pdpgsql-1 --port=5432 --server-url=https://admin:admin@pmm-server-1:8443 --server-insecure-tls=true +docker exec pmm-client-1 pmm-admin add mongodb --username=pmm --password=pmm-pass --service-name=mongodb-7.0 --host=psmdb-1 --port=27017 --server-url=https://admin:admin@pmm-server-1:8443 --server-insecure-tls=true diff --git a/pmm_qa/tasks/add_mysql_to_pmm_server.yml b/pmm_qa/tasks/add_mysql_to_pmm_server.yml new file mode 100644 index 00000000..55d54ebf --- /dev/null +++ b/pmm_qa/tasks/add_mysql_to_pmm_server.yml @@ -0,0 +1,17 @@ +- name: Install and add pmm client + include_tasks: install_pmm_client.yml + +- name: Set unique service name + include_tasks: ./set_unique_service_name.yml + +- name: Add group replication service to pmm server + community.docker.docker_container_exec: + container: "{{ container_name }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-gr-dev --cluster=ps-gr-dev-cluster --replication-set=ps-gr-replication {{ service_name }} --debug 127.0.0.1:3306 + when: setup_type == "gr" + +- name: Add service to pmm server + community.docker.docker_container_exec: + container: "{{ container_name }}" + command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-dev {{ service_name }} --debug 127.0.0.1:3306 + when: setup_type != "gr" diff --git a/pmm_qa/tasks/create_minio_container.yml b/pmm_qa/tasks/create_minio_container.yml new file mode 100644 index 00000000..326043c6 --- /dev/null +++ b/pmm_qa/tasks/create_minio_container.yml @@ -0,0 +1,80 @@ +--- +- name: Deploy MinIO bucket. + hosts: localhost + connection: local + gather_facts: yes + vars: + network_name: "pmm-qa" + buckets: "{{ lookup('env', 'BUCKETS') | default('bcp', true) }}" + minio_access_key: minio1234 + minio_secret_key: minio1234 + minio_volume_name: minio_backups + minio_ports: + - "9010:9000" + - "9001:9001" + + tasks: + - name: Run docker ps with port info + shell: docker ps -a + register: docker_ps_output + + - name: Create Docker network + community.docker.docker_network: + name: "{{ network_name }}" + state: present + ignore_errors: yes + + - name: Remove old MinIO docker container + community.docker.docker_container: + name: minio + image: minio/minio + restart_policy: always + state: absent + ignore_errors: yes + + - name: Remove MinIO Docker volume + community.docker.docker_volume: + name: "{{ minio_volume_name }}" + state: absent + ignore_errors: true + + - name: Create MinIO Docker volume + community.docker.docker_volume: + name: "{{ minio_volume_name }}" + + - name: Run MinIO container + community.docker.docker_container: + name: minio + image: minio/minio + restart_policy: unless-stopped + ports: "{{ minio_ports }}" + networks: + - name: "{{ network_name }}" + volumes: + - "{{ minio_volume_name }}:/backups" + env: + MINIO_ACCESS_KEY: "{{ minio_access_key }}" + MINIO_SECRET_KEY: "{{ minio_secret_key }}" + command: server /backups --address 0.0.0.0:9000 --console-address 0.0.0.0:9001 + + - name: Show the list of buckets + debug: + var: buckets + + - name: Set MinIO alias inside the container + community.docker.docker_container_exec: + container: "minio" + command: > + /bin/sh -c " + sleep 5; + /usr/bin/mc alias set myminio http://127.0.0.1:9000 minio1234 minio1234; + exit 0;" + + - name: Create MinIO buckets + community.docker.docker_container_exec: + container: "minio" + command: > + /bin/sh -c " + /usr/bin/mc mb myminio/{{ item }} || echo 'Bucket {{ item }} already exists'; + exit 0;" + loop: "{{ buckets }}" diff --git a/pmm_qa/tasks/install_pmm_client.yml b/pmm_qa/tasks/install_pmm_client.yml new file mode 100644 index 00000000..430f2b05 --- /dev/null +++ b/pmm_qa/tasks/install_pmm_client.yml @@ -0,0 +1,186 @@ +- name: Set default metrics mode + set_fact: + metrics_mode: 'auto' + when: metrics_mode is not defined + +- name: Get PMM server address + shell: 'docker ps -f name=-server --format "{{ "{{" }}.Names{{ "}}" }}"' + register: pmm_server_ip_output + when: pmm_server_ip == "127.0.0.1" + +- name: Set correct pmm server address + set_fact: + pmm_server_ip: "{{ pmm_server_ip_output.stdout }}" + when: pmm_server_ip == "127.0.0.1" + +- name: Set correct pmm server port + set_fact: + pmm_server_port: 443 + when: pmm_server_ip | default('') | regex_search('\.') + +- name: Set correct pmm server port + set_fact: + pmm_server_port: 8443 + when: not (pmm_server_ip | default('') | regex_search('\.')) + +- name: Detect OS inside the container + shell: docker exec {{ container_name }} cat /etc/os-release + register: container_os_info + +- name: Set distro family (debian/rhel) + set_fact: + distro_family: >- + {{ + ( + 'debian' if 'debian' in container_os_info.stdout | lower else + 'rhel' if 'rhel' in container_os_info.stdout | lower or 'centos' in container_os_info.stdout | lower or 'fedora' in container_os_info.stdout | lower + else 'unknown' + ) | trim + }} + +- name: Install dependencies inside Debian-family container + shell: | + docker exec --user root {{ container_name }} apt-get update + docker exec --user root {{ container_name }} apt-get install -y wget gnupg2 jq lsb-base lsb-release curl + when: distro_family == "debian" + +- name: Install microdnf on RHEL-family containers + shell: docker exec --user root {{ container_name }} dnf install -y microdnf + when: distro_family == "rhel" + ignore_errors: true + +- name: Install dependencies inside RHEL-family container + shell: docker exec --user root {{ container_name }} microdnf install -y wget gnupg2 jq + when: distro_family == "rhel" + +- name: Install curl on RHEL-family containers + shell: docker exec --user root {{ container_name }} microdnf install -y curl-minimal + when: distro_family == "rhel" + ignore_errors: true + +- name: Install percona release on Debian-family containers + shell: | + docker exec --user root {{ container_name }} wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb + docker exec --user root {{ container_name }} apt install -y ./percona-release_latest.generic_all.deb + when: + - distro_family == 'debian' + ignore_errors: true + +- name: Install percona release on RHEL-family containers + shell: | + docker exec --user root {{ container_name }} wget https://repo.percona.com/yum/percona-release-latest.noarch.rpm + docker exec --user root {{ container_name }} microdnf -y install ./percona-release-latest.noarch.rpm + when: distro_family == 'rhel' + ignore_errors: true + +- name: Install pmm client experimental on Debian-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client experimental + docker exec --user root {{ container_name }} apt-get update + docker exec --user root {{ container_name }} apt-get -y install pmm-client + when: distro_family == "debian" and client_version == "3-dev-latest" + +- name: Install pmm client experimental on RHEL-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client experimental + docker exec --user root {{ container_name }} microdnf install -y pmm-client + when: distro_family == "rhel" and client_version == "3-dev-latest" + +- name: Install pmm client testing on Debian-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client testing + docker exec --user root {{ container_name }} apt-get update + docker exec --user root {{ container_name }} apt-get -y install pmm-client + when: distro_family == "debian" and client_version == "pmm3-rc" + +- name: Install pmm client testing on RHEL-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client testing + docker exec --user root {{ container_name }} microdnf install -y pmm-client + when: distro_family == "rhel" and client_version == "pmm3-rc" + +- name: Install pmm client release on Debian-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client release + docker exec --user root {{ container_name }} apt-get update + docker exec --user root {{ container_name }} apt-get -y install pmm-client + when: distro_family == "debian" and client_version == "pmm3-latest" + +- name: Install pmm client release on RHEL-family containers + shell: | + docker exec --user root {{ container_name }} percona-release enable-only pmm3-client release + docker exec --user root {{ container_name }} microdnf install -y pmm-client + when: distro_family == "rhel" and client_version == "pmm3-latest" + +- name: Install specific PMM client version on Debian-family containers + shell: | + docker exec --user root {{ container_name }} bash -c 'wget -O /pmm-client.deb "https://repo.percona.com/pmm3-client/apt/pool/main/p/pmm-client/pmm-client_{{ client_version }}-7.$(lsb_release -sc)_amd64.deb"' + docker exec --user root {{ container_name }} dpkg -i /pmm-client.deb + when: + - distro_family == 'debian' + - client_version is match('^3\\.[0-9]+\\.[0-9]+$') + +- name: Install specific PMM client version on RHEL-family containers + shell: | + docker exec --user root {{ container_name }} wget -O /pmm-client.rpm https://repo.percona.com/pmm3-client/yum/release/9/RPMS/x86_64/pmm-client-{{ client_version }}-7.el9.x86_64.rpm + docker exec --user root {{ container_name }} rpm -i /pmm-client.rpm + when: + - distro_family == 'rhel' + - client_version is match('^3\\.[0-9]+\\.[0-9]+$') + +- name: Install tarball PMM client version + shell: | + docker exec --user root {{ container_name }} sh -c ' + wget -O /pmm-client.tar.gz "{{ client_version }}" && + tar -zxpf /pmm-client.tar.gz && + PMM_CLIENT=`ls -1td pmm-client* 2>/dev/null | grep -v ".tar" | grep -v ".sh" | head -n1` && + echo ${PMM_CLIENT} && + rm -rf pmm-client && + mv ${PMM_CLIENT} pmm-client && + rm -rf /usr/local/bin/pmm-client && + mv -f pmm-client /usr/local/bin && + bash -x /usr/local/bin/pmm-client/install_tarball && + ln -sf /usr/local/percona/pmm/bin/pmm-admin /usr/local/bin/pmm-admin && + ln -sf /usr/local/percona/pmm/bin/pmm-agent /usr/local/bin/pmm-agent && + pmm-admin --version + ' + when: + - client_version | regex_search('^https?://.*\\.tar\\.gz$') is not none + +- name: Connect pmm client to pmm server using metrics mode + shell: | + docker exec --user root {{ container_name }} \ + pmm-agent setup \ + --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml \ + --server-address={{ pmm_server_ip }}:{{ pmm_server_port }} \ + --server-insecure-tls \ + --metrics-mode={{ metrics_mode }} \ + --server-username=admin \ + --server-password={{ admin_password }} \ + {{ container_name }} + when: metrics_mode | length > 0 + +- name: Connect pmm client to pmm server using default metrics mode + shell: | + docker exec --user root {{ container_name }} \ + pmm-agent setup \ + --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml \ + --server-address={{ pmm_server_ip }}:{{ pmm_server_port }} \ + --server-insecure-tls \ + --server-username=admin \ + --server-password={{ admin_password }} \ + {{ container_name }} + when: metrics_mode | length == 0 + +- name: Wait 5 seconds for connection to complete + pause: + seconds: 5 + +- name: Start pmm client + shell: | + docker exec --user root {{ container_name }} \ + sh -c 'nohup pmm-agent --config-file=/usr/local/percona/pmm/config/pmm-agent.yaml > /var/log/pmm-agent.log 2>&1 &' + +- name: Wait 5 seconds for start to complete + pause: + seconds: 5 diff --git a/pmm_qa/tasks/set_unique_service_name.yml b/pmm_qa/tasks/set_unique_service_name.yml new file mode 100644 index 00000000..e2492dff --- /dev/null +++ b/pmm_qa/tasks/set_unique_service_name.yml @@ -0,0 +1,44 @@ +- name: Install jq via appropriate package manager + become: true + block: + - name: Install jq on Debian/Ubuntu + apt: + name: jq + state: present + when: ansible_facts['os_family'] == 'Debian' + + - name: Install jq on RHEL/CentOS/Alma/Rocky + dnf: + name: jq + state: present + when: ansible_facts['os_family'] == 'RedHat' + +- name: Verify that service with expected name is not connected to pmm server + shell: | + SERVICE_NAME="{{ container_name }}" + echo "$SERVICE_NAME" + curl -u admin:{{ admin_password }} --location 'http://{{ pmm_server_ip}}/v1/management/services' | jq -r '.services[].service_name' | grep -q $SERVICE_NAME + register: service_exists_old + ignore_errors: yes + +- name: Set correct service name in pmm server + set_fact: + service_name: "{{ service_exists_old.stdout }}" + when: service_exists_old.rc == 1 + +- name: Create a new service name that is not already connected to pmm server + shell: | + RANDOM_ID="_$(shuf -i 1-10000 -n 1)" + SERVICE_NAME="{{ container_name }}$RANDOM_ID" + echo "$SERVICE_NAME" + curl -u admin:{{ admin_password }} --location 'http://{{ pmm_server_ip}}/v1/management/services' | jq -r '.services[].service_name' | grep -q $SERVICE_NAME + register: service_exists_new + ignore_errors: yes + retries: 5 + until: service_exists_new.rc == 1 + when: service_exists_old.rc == 0 + +- name: Set correct service name in pmm server + set_fact: + service_name: "{{ service_exists_new.stdout }}" + when: service_exists_old.rc == 0 diff --git a/pmm_qa/tls-ssl-setup/create_certs.sh b/pmm_qa/tls-ssl-setup/create_certs.sh new file mode 100644 index 00000000..809723f3 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/create_certs.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +export PWD=$(pwd) +export HOST=localhost +mkdir -p certificates +pushd certificates +echo -e "\n=== Generating SSL certificates in ${PWD}/certificates ===" +# Generate self signed root CA cert +openssl req -nodes -x509 -newkey rsa:4096 -keyout ca.key -out ca.crt -subj "/C=US/ST=California/L=San Francisco/O=Percona/OU=root/CN=${HOST}/emailAddress=test@percona.com" +# Generate server cert to be signed +openssl req -nodes -newkey rsa:4096 -keyout server.key -out server.csr -subj "/C=US/ST=California/L=San Francisco/O=Percona/OU=server/CN=${HOST}/emailAddress=test@percona.com" +# Sign server sert +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt +# Create server PEM file +cat server.key server.crt > server.pem +# Generate client cert to be signed +openssl req -nodes -newkey rsa:4096 -keyout client.key -out client.csr -subj "/C=US/ST=California/L=San Francisco/O=Percona/OU=client/CN=${HOST}/emailAddress=test@percona.com" +# Sign the client cert +openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt +# Create client PEM file +cat client.key client.crt > client.pem +popd diff --git a/pmm_qa/tls-ssl-setup/mlaunch_tls_setup.yml b/pmm_qa/tls-ssl-setup/mlaunch_tls_setup.yml new file mode 100644 index 00000000..f47d8852 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/mlaunch_tls_setup.yml @@ -0,0 +1,94 @@ +--- + +- hosts: all + become: true + become_method: sudo + vars: + mongodb_version: "{{ lookup('vars', 'extra_mongodb_version', default=lookup('env','MONGODB_VERSION') | default('4.4', true) ) }}" + mongodb_ssl_container: "{{ lookup('vars', 'extra_mongodb_ssl_container', default=lookup('env','MONGODB_SSL_CONTAINER') | default('mongodb_ssl', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: Cleanup Docker container for client and DB setup + shell: > + docker ps -a --filter "name={{ mongodb_ssl_container }}" | grep -q . && docker stop {{ mongodb_ssl_container }} && docker rm -fv {{ mongodb_ssl_container }} + ignore_errors: true + tags: + - cleanup + - name: delete network if exist + shell: docker network rm "{{ mongodb_ssl_container }}_network" + ignore_errors: true + tags: + - cleanup + + - name: Create a network + shell: docker network create "{{ mongodb_ssl_container }}_network" + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for mongodb ssl container + shell: > + docker run -d --name={{ mongodb_ssl_container }} + --network "{{ mongodb_ssl_container }}_network" + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} mkdir -p artifacts + - docker cp ./mongodb/mlaunch_ssl_setup.sh {{ mongodb_ssl_container }}:/ + - docker cp ./create_certs.sh {{ mongodb_ssl_container }}:/ + - docker cp ../pmm3-client-setup.sh {{ mongodb_ssl_container }}:/ + + - name: Install required software's to the docker psmdb_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} apt-get update + - docker exec {{ mongodb_ssl_container }} apt-get -y install wget curl git gnupg2 lsb-release jq python3 pip + - docker exec {{ mongodb_ssl_container }} python3 -m pip install --upgrade pip + - docker exec {{ mongodb_ssl_container }} pip3 install 'mtools[all]' + + - name: Execute Certs Script inside the mongodb mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -xe ./create_certs.sh > mongodb/setup_mongodb_ssl_{{ mongodb_version }}.log + + - name: Execute Setup script inside the mongodb mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -xe ./mlaunch_ssl_setup.sh --mongodb_version {{ mongodb_version }} >> mongodb/setup_mongodb_ssl_{{ mongodb_version }}.log + + - name: Install pmm2-client on the mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ mongodb_ssl_container }} + - docker exec {{ mongodb_ssl_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Add pmm-admin binary to path when tar ball installation + shell: docker exec {{ mongodb_ssl_container }} echo "export PATH=$PATH:/pmm2-client/bin" > setup_path.sh + when: '"http" in client_version' + + - name: Remove mongodb service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove mongodb {{ mongodb_ssl_container }}_service' + ignore_errors: true + + - name: Add mongodb_ssl for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add mongodb --tls --tls-skip-verify --authentication-mechanism=MONGODB-X509 --authentication-database=$external --tls-certificate-key-file=/certificates/client.pem --tls-certificate-key-file-password=/certificates/client.key --tls-ca-file=/certificates/ca.crt {{ mongodb_ssl_container }}_ssl_service' + + - name: Get client cert Files on host + shell: "{{ item }}" + with_items: + - mkdir -p mongodb/{{ mongodb_version }} || true + - docker exec {{ mongodb_ssl_container }} cat /certificates/ca.crt > mongodb/{{ mongodb_version }}/ca.crt + - docker exec {{ mongodb_ssl_container }} cat /certificates/client.key > mongodb/{{ mongodb_version }}/client.key + - docker exec {{ mongodb_ssl_container }} cat /certificates/client.pem > mongodb/{{ mongodb_version }}/client.pem diff --git a/pmm_qa/tls-ssl-setup/mongodb/mlaunch_ssl_setup.sh b/pmm_qa/tls-ssl-setup/mongodb/mlaunch_ssl_setup.sh new file mode 100644 index 00000000..92d44816 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/mongodb/mlaunch_ssl_setup.sh @@ -0,0 +1,70 @@ +#!/bin/sh + +while [ $# -gt 0 ]; do + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + shift +done + +if [ -z "$mongodb_version" ]; then + export mongodb_version=4.4 +fi + +wget https://raw.githubusercontent.com/percona/pmm-qa/main/pmm-tests/mongodb_user_setup.js +### Detect latest tarball link for specified mongodb_version: 7.0 | 6.0 | 5.0 | 4.4 | 4.2 at the moment +#psmdb_latest=$(wget -q --post-data "version=percona-server-mongodb-${mongodb_version}" https://www.percona.com/products-api.php -O - | grep -oP "(?<=value\=\")[^\"]*" | sort -V | tail -1) +psmdb_tarball=$(wget -q --post-data "version_files=percona-server-mongodb-${mongodb_version}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep glibc2\.17-minimal) + +echo "Downloading ${mongodb_version} ..." +wget -O percona_server_mongodb.tar.gz ${psmdb_tarball} +tar -xvf percona_server_mongodb.tar.gz +mv percona-server-mongodb-${mongodb_version}.* psmdb_${mongodb_version} +rm percona_server_mongodb.tar.gz* + +# TODO: refactor if to match range of versions 6.0+ +if [[ "$mongodb_version" == "6.0" || "$mongodb_version" == "7.0" ]]; then + ### PSMDB 6+ requires "percona-mongodb-mongosh" additionally + echo "Downloading mongosh ..." + mongosh_link=$(wget -q --post-data "version_files=percona-server-mongodb-${mongodb_version}&software_files=binary" https://www.percona.com/products-api.php -O - | jq -r '.[] | select(.link | contains("sha") | not) | .link' | grep mongosh) + wget -O mongosh.tar.gz ${mongosh_link} + tar -xvf mongosh.tar.gz + mv percona-mongodb-mongosh* mongosh + cp mongosh/bin/mongosh ./psmdb_${mongodb_version}/bin/mongo + rm mongosh.tar.gz +fi + +# For mongodb dependency in Debian +wget http://http.us.debian.org/debian/pool/main/o/openldap/libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb +apt install -y ./libldap-2.4-2_2.4.47+dfsg-3+deb10u7_amd64.deb + +mlaunch init --bind_ip 0.0.0.0 --binarypath "./psmdb_${mongodb_version}/bin" --replicaset --name rs1 --nodes 3 --sslMode requireSSL --sslPEMKeyFile /certificates/server.pem --sslCAFile /certificates/ca.crt --sslClientCertificate /certificates/client.pem +#bash ./mongo_startup.sh -m --ssl -x -e wiredTiger --mongodExtra="--profile 2 --slowms 1 --bind_ip_all" --b=/psmdb_${mongodb_version}/bin +sleep 20 +./psmdb_${mongodb_version}/bin/mongo --tls --host localhost --port 27017 --tlsCAFile /certificates/ca.crt --tlsCertificateKeyFile /certificates/client.pem mongodb_user_setup.js +cat > add_new_ssl_user.js < add_new_ssl_user.js < + docker ps -a --filter "name={{ mongodb_ssl_container }}" | grep -q . && docker stop {{ mongodb_ssl_container }} && docker rm -fv {{ mongodb_ssl_container }} + ignore_errors: true + tags: + - cleanup + - name: delete network if exist + shell: docker network rm "{{ mongodb_ssl_container }}_network" + ignore_errors: true + tags: + - cleanup + + - name: Create a network + shell: docker network create "{{ mongodb_ssl_container }}_network" + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for mongodb ssl container + shell: > + docker run -d --name={{ mongodb_ssl_container }} + --network "{{ mongodb_ssl_container }}_network" + phusion/baseimage:focal-1.1.0 + + - name: Copy all required Artifacts to the docker mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} mkdir -p artifacts + - docker cp ./mongodb/mongodb_ssl_setup.sh {{ mongodb_ssl_container }}:/ + + - name: Execute Setup script inside the mongodb mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -xe ./mongodb_ssl_setup.sh --mongodb_version {{ mongodb_version }} > mongodb/setup_mongodb_ssl_{{ mongodb_version }}.log + + - name: Install pmm2-client on the mongodb_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} wget https://raw.githubusercontent.com/percona/pmm-qa/{{ pmm_qa_branch }}/pmm-tests/pmm2-client-setup.sh + - docker network connect pmm-qa {{ mongodb_ssl_container }} + - docker exec {{ mongodb_ssl_container }} bash -x ./pmm2-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Add pmm-admin binary to path when tar ball installation + shell: docker exec {{ mongodb_ssl_container }} echo "export PATH=$PATH:/pmm2-client/bin" > setup_path.sh + when: '"http" in client_version' + + - name: Remove mongodb service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove mongodb {{ mongodb_ssl_container }}_service' + ignore_errors: true + + - name: Add mongodb_ssl for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ mongodb_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add mongodb --tls --tls-skip-verify --authentication-mechanism=MONGODB-X509 --authentication-database=$external --tls-certificate-key-file=/nodes/certificates/client.pem --tls-certificate-key-file-password=/nodes/certificates/client.key --tls-ca-file=/nodes/certificates/ca.crt {{ mongodb_ssl_container }}_ssl_service' + + - name: Get client cert Files on host + shell: "{{ item }}" + with_items: + - mkdir -p mongodb/{{ mongodb_version }} || true + - docker exec {{ mongodb_ssl_container }} cat /nodes/certificates/ca.crt > mongodb/{{ mongodb_version }}/ca.crt + - docker exec {{ mongodb_ssl_container }} cat /nodes/certificates/client.key > mongodb/{{ mongodb_version }}/client.key + - docker exec {{ mongodb_ssl_container }} cat /nodes/certificates/client.pem > mongodb/{{ mongodb_version }}/client.pem diff --git a/pmm_qa/tls-ssl-setup/mysql/mysql_ssl_setup.sh b/pmm_qa/tls-ssl-setup/mysql/mysql_ssl_setup.sh new file mode 100644 index 00000000..7e9278ca --- /dev/null +++ b/pmm_qa/tls-ssl-setup/mysql/mysql_ssl_setup.sh @@ -0,0 +1,93 @@ +#!/bin/sh + + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$mysql_version" ] +then + export mysql_version=8.0 +fi + +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb +dpkg -i percona-release_latest.generic_all.deb +sleep 10 +if [ "$mysql_version" == "8.0" ]; then + percona-release setup ps80 + sleep 10 + DEBIAN_FRONTEND=noninteractive apt-get -y install percona-server-server sysbench bc screen +cat > /etc/mysql/my.cnf << EOF +[mysqld] +innodb_buffer_pool_size=256M +innodb_buffer_pool_instances=1 +innodb_log_file_size=1G +innodb_flush_method=O_DIRECT +innodb_numa_interleave=1 +innodb_flush_neighbors=0 +log_bin +server_id=1 +binlog_expire_logs_seconds=600 +log_output=file +slow_query_log=ON +long_query_time=0 +log_slow_rate_limit=1 +log_slow_rate_type=query +log_slow_verbosity=full +log_slow_admin_statements=ON +log_slow_slave_statements=ON +slow_query_log_always_write_time=1 +slow_query_log_use_global_control=all +innodb_monitor_enable=all +userstat=1 +bind-address=0.0.0.0 +require_secure_transport=ON +EOF + +fi + +if [ "$mysql_version" == "5.7" ]; then + percona-release setup ps57 + sleep 10 + DEBIAN_FRONTEND=noninteractive apt-get -y install percona-server-server-5.7 +cat > /etc/mysql/my.cnf << EOF +[mysqld] +innodb_buffer_pool_size=256M +innodb_buffer_pool_instances=1 +innodb_log_file_size=1G +innodb_flush_method=O_DIRECT +innodb_numa_interleave=1 +innodb_flush_neighbors=0 +log_bin +server_id=1 +expire_logs_days=1 +log_output=file +slow_query_log=ON +long_query_time=0 +log_slow_rate_limit=1 +log_slow_rate_type=query +log_slow_verbosity=full +log_slow_admin_statements=ON +log_slow_slave_statements=ON +slow_query_log_always_write_time=1 +slow_query_log_use_global_control=all +innodb_monitor_enable=all +userstat=1 +bind-address=0.0.0.0 +require_secure_transport=ON +EOF + +fi +service mysql restart +mysql -e "create user pmm@'%' identified by \"pmm\"" +mysql -e "grant all on *.* to pmm@'%'" +mysql -e "CREATE USER 'pmm_tls'@'%' REQUIRE X509" +service mysql restart diff --git a/pmm_qa/tls-ssl-setup/mysql_tls_setup.yml b/pmm_qa/tls-ssl-setup/mysql_tls_setup.yml new file mode 100644 index 00000000..e1e07b21 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/mysql_tls_setup.yml @@ -0,0 +1,88 @@ +--- +# This playbook does following: +# enables Percona testing repository +# Install Percona Server at Version 8.0.25 +# Install all required tools for backups in compatible version + +- hosts: all + become: true + become_method: sudo + vars: + mysql_version: "{{ lookup('vars', 'extra_mysql_version', default=lookup('env','MYSQL_VERSION') | default('8.0', true) ) }}" + mysql_ssl_container: "{{ lookup('vars', 'extra_mysql_ssl_container', default=lookup('env','MYSQL_SSL_CONTAINER') | default('mysql_ssl', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('pmm3admin!', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: Cleanup Docker container for client and DB setup + shell: > + docker ps -a --filter "name={{ mysql_ssl_container }}" | grep -q . && docker stop {{ mysql_ssl_container }} && docker rm -fv {{ mysql_ssl_container }} + ignore_errors: true + tags: + - cleanup + - name: delete network if exist + shell: docker network rm "{{ mysql_ssl_container }}_network" + ignore_errors: true + tags: + - cleanup + + - name: Create a network + shell: docker network create "{{ mysql_ssl_container }}_network" + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for mysql ssl container + shell: > + docker run -d --name={{ mysql_ssl_container }} + --network "{{ mysql_ssl_container }}_network" + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} mkdir -p artifacts + - docker cp ./mysql/mysql_ssl_setup.sh {{ mysql_ssl_container }}:/ + - docker cp ../pmm3-client-setup.sh {{ mysql_ssl_container }}:/ + + - name: Execute Setup script inside the mysql mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -xe ./mysql_ssl_setup.sh --mysql_version {{ mysql_version }} > mysql/setup_mysql_ssl_{{ mysql_version }}.log + + - name: Install pmm2-client on the mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ mysql_ssl_container }} + - docker exec {{ mysql_ssl_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Add pmm-admin binary to path when tar ball installation + shell: docker exec {{ mysql_ssl_container }} echo "export PATH=$PATH:/pmm2-client/bin" > setup_path.sh + when: '"http" in client_version' + + - name: Remove mysql service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove mysql {{ mysql_ssl_container }}_service' + ignore_errors: true + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Add mysql_ssl for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add mysql --username=pmm --password=pmm --query-source=perfschema --tls --tls-skip-verify --tls-ca=/var/lib/mysql/ca.pem --tls-cert=/var/lib/mysql/client-cert.pem --tls-key=/var/lib/mysql/client-key.pem {{ mysql_ssl_container }}_ssl_service_{{ random_number }}' + + - name: Get client cert Files on host + shell: "{{ item }}" + with_items: + - mkdir -p mysql/{{ mysql_version }} || true + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/ca.pem > mysql/{{ mysql_version }}/ca.pem + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/client-key.pem > mysql/{{ mysql_version }}/client-key.pem + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/client-cert.pem > mysql/{{ mysql_version }}/client-cert.pem diff --git a/pmm_qa/tls-ssl-setup/postgres/init.sql b/pmm_qa/tls-ssl-setup/postgres/init.sql new file mode 100644 index 00000000..e11a91a9 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/postgres/init.sql @@ -0,0 +1,8 @@ +CREATE DATABASE sbtest1; +CREATE DATABASE sbtest2; +CREATE USER pmm WITH PASSWORD 'pmm'; +GRANT pg_monitor TO pmm; +CREATE EXTENSION pg_stat_statements; +ALTER SYSTEM SET shared_preload_libraries TO 'pg_stat_statements'; +ALTER SYSTEM SET track_activity_query_size=2048; +ALTER SYSTEM SET track_io_timing=ON; diff --git a/pmm_qa/tls-ssl-setup/postgres/setup_pgsql.sh b/pmm_qa/tls-ssl-setup/postgres/setup_pgsql.sh new file mode 100644 index 00000000..5c2d70fa --- /dev/null +++ b/pmm_qa/tls-ssl-setup/postgres/setup_pgsql.sh @@ -0,0 +1,53 @@ +#!/bin/sh + + +while [ $# -gt 0 ]; do + + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + + shift +done + +if [ -z "$pgsql_version" ] +then + export pgsql_version=13 +fi + +apt-get update +apt-get -y install wget curl git gnupg2 lsb-release +wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb +dpkg -i percona-release_latest.generic_all.deb +percona-release setup ppg${pgsql_version} +sleep 10 +pushd artifacts +bash -x create_certs.sh +popd +sleep 10 +pwd +apt -y install percona-postgresql-${pgsql_version} +apt -y install percona-postgresql-contrib +sleep 10 +sed -i 's/\(host\s*all\s*all\s*127.0.0.1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(host\s*all\s*all\s*::1.*\) md5/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*postgres.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sed -i 's/\(local\s*all\s*all.*\) peer/\1 trust/g' /etc/postgresql/${pgsql_version}/main/pg_hba.conf +service postgresql restart +sleep 10 +cp -a ./artifacts/certificates/. /var/lib/postgresql/${pgsql_version}/main/ +ls -la ./artifacts/certificates/ +chown -R postgres:postgres /var/lib/postgresql/${pgsql_version}/main +chmod 0700 -R /var/lib/postgresql/${pgsql_version}/main +sed -i "s/ssl_cert_file.*/ssl_cert_file = 'server.crt'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +sed -i "s/#listen_addresses.*/listen_addresses = '*'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +sed -i "s/ssl_key_file.*/ssl_key_file = 'server.key'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +sed -i "s/ssl_ca_file.*/ssl_ca_file = 'ca.crt'/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +sed -i "s/#ssl_prefer_server_ciphers.*/ssl_prefer_server_ciphers = on/g" /etc/postgresql/${pgsql_version}/main/postgresql.conf +echo "hostssl all all 0.0.0.0/0 md5" >> /etc/postgresql/${pgsql_version}/main/pg_hba.conf +echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/${pgsql_version}/main/pg_hba.conf +sleep 10 +service postgresql restart +su postgres bash -c 'psql -f init.sql' +service postgresql restart diff --git a/pmm_qa/tls-ssl-setup/postgresql_tls_setup.yml b/pmm_qa/tls-ssl-setup/postgresql_tls_setup.yml new file mode 100644 index 00000000..ff6bbc3f --- /dev/null +++ b/pmm_qa/tls-ssl-setup/postgresql_tls_setup.yml @@ -0,0 +1,87 @@ +--- +# This playbook does following: +# enables Percona testing repository +# Install Percona Server at Version 8.0.25 +# Install all required tools for backups in compatible version + +- hosts: all + become: true + become_method: sudo + vars: + pgsql_version: "{{ lookup('vars', 'extra_pgsql_version', default=lookup('env','PGSQL_VERSION') | default('13', true) ) }}" + pgsql_ssl_container: "{{ lookup('vars', 'extra_pgsql_ssl_container', default=lookup('env','PGSQL_SSL_CONTAINER') | default('pgsql_ssl', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('pmm3admin!', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: cleanup container for client and DB setup + shell: > + docker ps -a --filter "name={{ pgsql_ssl_container }}" | grep -q . && docker stop {{ pgsql_ssl_container }} && docker rm -fv {{ pgsql_ssl_container }} + ignore_errors: true + tags: + - cleanup + - name: delete network if exist + shell: docker network rm "{{ pgsql_ssl_container }}_network" + ignore_errors: true + tags: + - cleanup + + - name: Create a network + shell: docker network create "{{ pgsql_ssl_container }}_network" + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for PostgreSQL + shell: > + docker run -d --name={{ pgsql_ssl_container }} + --network "{{ pgsql_ssl_container }}_network" + phusion/baseimage:jammy-1.0.1 + + - name: Copy all required Artifacts to the docker pgsql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_ssl_container }} mkdir -p artifacts + - docker cp ./create_certs.sh {{ pgsql_ssl_container }}:/artifacts/ + - docker cp ./postgres/setup_pgsql.sh {{ pgsql_ssl_container }}:/ + - docker cp ./postgres/init.sql {{ pgsql_ssl_container }}:/ + - docker cp ../pmm3-client-setup.sh {{ pgsql_ssl_container }}:/ + + - name: Execute Setup script inside the pgsql pgsql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_ssl_container }} bash -xe ./setup_pgsql.sh --pgsql_version {{ pgsql_version }} > postgres/setup_pgsql_{{ pgsql_version }}.log + + - name: Install pmm2-client on the pgsql_ssl_container + shell: "{{ item }}" + with_items: + - docker network connect pmm-qa {{ pgsql_ssl_container }} + - docker exec {{ pgsql_ssl_container }} bash -x ./pmm3-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Remove pgsql service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove postgresql {{ pgsql_ssl_container }}_ssl_service' + ignore_errors: true + + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Add pgsql_ssl for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ pgsql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ pgsql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add postgresql --username=pmm --password=pmm --query-source="pgstatements" --tls --tls-ca-file=./certificates/ca.crt --tls-cert-file=./certificates/client.crt --tls-key-file=./certificates/client.pem {{ pgsql_ssl_container }}_ssl_service{{ random_number }}' + # - docker exec {{ pgsql_ssl_container }} bash -c 'source ~/.bash_profile; pmm-admin add postgresql --socket=/var/run/postgresql postgresql_socket' + + - name: Get client cert Files on host + shell: "{{ item }}" + with_items: + - mkdir -p postgres/{{ pgsql_version }} + - docker exec {{ pgsql_ssl_container }} cat ./certificates/ca.crt > postgres/{{ pgsql_version }}/ca.crt + - docker exec {{ pgsql_ssl_container }} cat ./certificates/client.crt > postgres/{{ pgsql_version }}/client.crt + - docker exec {{ pgsql_ssl_container }} cat ./certificates/client.pem > postgres/{{ pgsql_version }}/client.pem diff --git a/pmm_qa/tls-ssl-setup/setup_mysql b/pmm_qa/tls-ssl-setup/setup_mysql new file mode 100644 index 00000000..66ce3320 --- /dev/null +++ b/pmm_qa/tls-ssl-setup/setup_mysql @@ -0,0 +1,84 @@ +--- +# This playbook does following: +# enables Percona testing repository +# Install Percona Server at Version 8.0.25 +# Install all required tools for backups in compatible version + +- hosts: all + become: true + become_method: sudo + vars: + mysql_version: "{{ lookup('vars', 'extra_mysql_version', default=lookup('env','MYSQL_VERSION') | default('8.0', true) ) }}" + mysql_ssl_container: "{{ lookup('vars', 'extra_mysql_ssl_container', default=lookup('env','MYSQL_SSL_CONTAINER') | default('mysql_ssl', true) ) }}" + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + pmm_qa_branch: "{{ lookup('vars', 'extra_pmm_qa_branch', default=lookup('env','PMM_QA_GIT_BRANCH') | default('main', true) ) }}" + + tasks: + - name: Cleanup Docker container for client and DB setup + shell: > + docker ps -a --filter "name={{ mysql_ssl_container }}" | grep -q . && docker stop {{ mysql_ssl_container }} && docker rm -fv {{ mysql_ssl_container }} + ignore_errors: true + tags: + - cleanup + - name: delete network if exist + shell: docker network rm "{{ mysql_ssl_container }}_network" + ignore_errors: true + tags: + - cleanup + + - name: Create a network + shell: docker network create "{{ mysql_ssl_container }}_network" + + - name: Create pmm-qa network if not exist + shell: docker network create pmm-qa + ignore_errors: true + + - name: Prepare Container for mysql ssl container + shell: > + docker run -d --name={{ mysql_ssl_container }} + --network "{{ mysql_ssl_container }}_network" + phusion/baseimage:focal-1.1.0 + + - name: Copy all required Artifacts to the docker mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} mkdir -p artifacts + - docker cp ./mysql/mysql_ssl_setup.sh {{ mysql_ssl_container }}:/ + + - name: Execute Setup script inside the mysql mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -xe ./mysql_ssl_setup.sh --mysql_version {{ mysql_version }} > mysql/setup_mysql_ssl_{{ mysql_version }}.log + + - name: Install pmm2-client on the mysql_ssl_container + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} wget https://raw.githubusercontent.com/percona/pmm-qa/{{ pmm_qa_branch }}/pmm-tests/pmm2-client-setup.sh + - docker network connect pmm-qa {{ mysql_ssl_container }} + - docker exec {{ mysql_ssl_container }} bash -x ./pmm2-client-setup.sh --pmm_server_ip {{ pmm_server_ip }} --client_version {{ client_version }} --admin_password {{ admin_password }} --use_metrics_mode no + + - name: Add pmm-admin binary to path when tar ball installation + shell: docker exec {{ mysql_ssl_container }} echo "export PATH=$PATH:/pmm2-client/bin" > setup_path.sh + when: '"http" in client_version' + + - name: Remove mysql service if already added previously + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin remove mysql {{ mysql_ssl_container }}_service' + ignore_errors: true + + - name: Add mysql_ssl for monitoring + shell: "{{ item }}" + with_items: + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin list' + - docker exec {{ mysql_ssl_container }} bash -c 'source ~/.bash_profile || true; pmm-admin add mysql --username=pmm --password=pmm --query-source=perfschema --tls --tls-skip-verify --tls-ca=/var/lib/mysql/ca.pem --tls-cert=/var/lib/mysql/client-cert.pem --tls-key=/var/lib/mysql/client-key.pem {{ mysql_ssl_container }}_ssl_service' + + - name: Get client cert Files on host + shell: "{{ item }}" + with_items: + - mkdir -p mysql/{{ mysql_version }} || true + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/ca.pem > mysql/{{ mysql_version }}/ca.pem + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/client-key.pem > mysql/{{ mysql_version }}/client-key.pem + - docker exec {{ mysql_ssl_container }} cat /var/lib/mysql/client-cert.pem > mysql/{{ mysql_version }}/client-cert.pem diff --git a/pmm_qa/valkey/cleanup.sh b/pmm_qa/valkey/cleanup.sh new file mode 100644 index 00000000..63be4187 --- /dev/null +++ b/pmm_qa/valkey/cleanup.sh @@ -0,0 +1,15 @@ +#!/bin/bash -e + +docker exec -it pmm-server pmm-admin remove valkey valkey-primary-svc || : +docker exec -it pmm-server pmm-admin remove valkey valkey-replica1-svc || : +docker exec -it pmm-server pmm-admin remove valkey valkey-replica2-svc || : +docker exec -it pmm-server pmm-admin remove valkey sentinel1-svc || : +docker exec -it pmm-server pmm-admin remove valkey sentinel2-svc || : +docker exec -it pmm-server pmm-admin remove valkey sentinel3-svc || : + +docker rm -vf valkey-primary valkey-replica-1 valkey-replica-2 || : +docker rm -vf sentinel-1 sentinel-2 sentinel-3 || : + +docker volume rm -f valkey-primary-data valkey-replica-1-data valkey-replica-2-data || : + +rm -rf "$HOME/valkey" diff --git a/pmm_qa/valkey/sentinel.conf.j2 b/pmm_qa/valkey/sentinel.conf.j2 new file mode 100644 index 00000000..521b07d9 --- /dev/null +++ b/pmm_qa/valkey/sentinel.conf.j2 @@ -0,0 +1,24 @@ +# sentinel.conf +bind 0.0.0.0 + +port 26379 + +# Monitor the master +sentinel monitor valkey-primary valkey-primary 6379 {{ sentinel_quorum }} +sentinel auth-user valkey-primary default +sentinel auth-pass valkey-primary "{{ valkey_password }}" +sentinel resolve-hostnames yes + +# Failover timeouts +sentinel down-after-milliseconds valkey-primary 5000 +sentinel failover-timeout valkey-primary 10000 +sentinel parallel-syncs valkey-primary 1 + +# Security +protected-mode no + +# Logging +loglevel notice +logfile "" + +maxmemory 1gb diff --git a/pmm_qa/valkey/valkey-cluster.yml b/pmm_qa/valkey/valkey-cluster.yml new file mode 100644 index 00000000..aaa3d065 --- /dev/null +++ b/pmm_qa/valkey/valkey-cluster.yml @@ -0,0 +1,263 @@ +--- +- name: Deploy Valkey Native Cluster + hosts: localhost + gather_facts: false + vars: + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + metrics_mode: "{{ lookup('env', 'metrics_mode') }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + valkey_version: "{{ lookup('env', 'VALKEY_VERSION') | default('7', true) }}" + valkey_image: "valkey/valkey:{{ valkey_version }}-bookworm" + valkey_network_name: "pmm-qa" + valkey_password: "VKvl41568AsE" + valkey_cluster_node_count: 6 # total nodes + valkey_cluster_primaries: 3 # number of primary nodes + valkey_cluster_replicas: 1 # replicas per primary + valkey_cluster_start_port: 6379 # Base host port to map sequentially + valkey_config_dir: "{{ lookup('env', 'HOME') }}/valkey/cluster-config" + valkey_primary_prefix: "valkey-primary-" + valkey_replica_prefix: "valkey-replica-" + pmm_server_name: "pmm-server" + + tasks: + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Validate cluster counts + assert: + that: + - valkey_cluster_node_count == valkey_cluster_primaries + (valkey_cluster_primaries * valkey_cluster_replicas) + fail_msg: "Mismatch: total nodes must equal primaries + primaries*replicas" + + - name: Create Docker network + community.docker.docker_network: + name: "{{ valkey_network_name }}" + driver: bridge + state: present + + - name: Create cluster config directory + file: + path: "{{ valkey_config_dir }}" + state: directory + mode: "0755" + + - name: Create per-primary config directories + file: + path: "{{ valkey_config_dir }}/{{ valkey_primary_prefix }}{{ item }}" + state: directory + mode: "0755" + loop: "{{ range(1, valkey_cluster_primaries + 1) | list }}" + + - name: Create per-replica config directories + file: + path: "{{ valkey_config_dir }}/{{ valkey_replica_prefix }}{{ item }}" + state: directory + mode: "0755" + loop: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | list }}" + + - name: Generate base configuration for each primary + copy: + dest: "{{ valkey_config_dir }}/{{ valkey_primary_prefix }}{{ item }}/valkey.conf" + mode: "0644" + content: | + port 6379 + requirepass {{ valkey_password }} + masterauth {{ valkey_password }} + protected-mode no + appendonly yes + cluster-enabled yes + cluster-config-file nodes.conf + cluster-node-timeout 5000 + # Minimal persistence & logging + save 900 1 + save 300 10 + save 60 10000 + loglevel notice + maxmemory-policy allkeys-lru + maxmemory 1gb + loop: "{{ range(1, valkey_cluster_primaries + 1) | list }}" + + - name: Generate base configuration for each replica + copy: + dest: "{{ valkey_config_dir }}/{{ valkey_replica_prefix }}{{ item }}/valkey.conf" + mode: "0644" + content: | + port 6379 + requirepass {{ valkey_password }} + masterauth {{ valkey_password }} + protected-mode no + appendonly yes + cluster-enabled yes + cluster-config-file nodes.conf + cluster-node-timeout 5000 + # Minimal persistence & logging + save 900 1 + save 300 10 + save 60 10000 + loglevel notice + maxmemory-policy allkeys-lru + maxmemory 1gb + loop: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | list }}" + + - name: Create docker volumes for primary nodes + community.docker.docker_volume: + name: "{{ valkey_primary_prefix }}{{ item }}-data" + state: present + loop: "{{ range(1, valkey_cluster_primaries + 1) | list }}" + + - name: Create docker volumes for replica nodes + community.docker.docker_volume: + name: "{{ valkey_replica_prefix }}{{ item }}-data" + state: present + loop: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | list }}" + + - name: Start primary node containers + community.docker.docker_container: + name: "{{ valkey_primary_prefix }}{{ item }}" + hostname: "{{ valkey_primary_prefix }}{{ item }}-node-{{ random_number }}" + image: "{{ valkey_image }}" + state: started + restart_policy: unless-stopped + networks: + - name: "{{ valkey_network_name }}" + ports: + - "{{ valkey_cluster_start_port + item - 1 }}:6379" + volumes: + - "{{ valkey_primary_prefix }}{{ item }}-data:/data" + - "{{ valkey_config_dir }}/{{ valkey_primary_prefix }}{{ item }}/valkey.conf:/usr/local/etc/valkey/valkey.conf:ro" + command: ["valkey-server", "/usr/local/etc/valkey/valkey.conf"] + healthcheck: + test: ["CMD", "valkey-cli", "-a", "{{ valkey_password }}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + loop: "{{ range(1, valkey_cluster_primaries + 1) | list }}" + + - name: Start replica node containers + community.docker.docker_container: + name: "{{ valkey_replica_prefix }}{{ item }}" + hostname: "{{ valkey_replica_prefix }}{{ item }}-node-{{ random_number }}" + image: "{{ valkey_image }}" + state: started + restart_policy: unless-stopped + networks: + - name: "{{ valkey_network_name }}" + ports: + - "{{ (valkey_cluster_start_port + valkey_cluster_primaries - 1) + item }}:6379" + volumes: + - "{{ valkey_replica_prefix }}{{ item }}-data:/data" + - "{{ valkey_config_dir }}/{{ valkey_replica_prefix }}{{ item }}/valkey.conf:/usr/local/etc/valkey/valkey.conf:ro" + command: ["valkey-server", "/usr/local/etc/valkey/valkey.conf"] + healthcheck: + test: ["CMD", "valkey-cli", "-a", "{{ valkey_password }}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + loop: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | list }}" + + - name: Wait for primary node ports + wait_for: + host: localhost + port: "{{ valkey_cluster_start_port + item - 1 }}" + timeout: 30 + delay: 1 + loop: "{{ range(1, valkey_cluster_primaries + 1) | list }}" + + - name: Wait for replica node ports + wait_for: + host: localhost + port: "{{ (valkey_cluster_start_port + valkey_cluster_primaries - 1) + item }}" + timeout: 30 + delay: 1 + loop: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | list }}" + + - name: Build list of internal container addresses + set_fact: + primary_nodes: "{{ range(1, valkey_cluster_primaries + 1) | map('string') | map('regex_replace', '^(.*)$', valkey_primary_prefix ~ '\\1') | list }}" + replica_nodes: "{{ range(valkey_cluster_primaries + 1, valkey_cluster_node_count + 1) | map('string') | map('regex_replace', '^(.*)$', valkey_replica_prefix ~ '\\1') | list }}" + + - name: Build combined list of internal container addresses + set_fact: + cluster_node_addresses: "{{ (primary_nodes | map('regex_replace', '^(.*)$', '\\1:6379') | list) + (replica_nodes | map('regex_replace', '^(.*)$', '\\1:6379') | list) }}" + + - name: Display cluster node addresses + debug: + var: cluster_node_addresses + + - name: Create the cluster (run once) + community.docker.docker_container_exec: + container: "{{ valkey_primary_prefix }}1" + command: >- + bash -c "yes 'yes' | valkey-cli --cluster create {{ cluster_node_addresses | join(' ') }} --cluster-replicas {{ valkey_cluster_replicas }} -a {{ valkey_password }}" + register: cluster_create_output + changed_when: "'[OK]' in cluster_create_output.stdout" + + - name: Show cluster creation output + debug: + msg: "{{ cluster_create_output.stdout_lines }}" + + - name: Check cluster info on first primary + community.docker.docker_container_exec: + container: "{{ valkey_primary_prefix }}1" + command: valkey-cli -a "{{ valkey_password }}" cluster info + register: cluster_info + + - name: Display cluster info + debug: + msg: "{{ cluster_info.stdout_lines }}" + + - name: Install PMM Client in each container + ansible.builtin.include_tasks: ../tasks/install_pmm_client.yml + loop: "{{ primary_nodes + replica_nodes }}" + loop_control: + loop_var: current_container_name + vars: + container_name: "{{ current_container_name }}" + + - name: Add primary nodes to monitoring + community.docker.docker_container_exec: + container: "{{ item }}" + command: >- + pmm-admin add valkey --cluster=valkey-native-cluster --environment=valkey-test --username=default + --password="{{ valkey_password }}" --service-name={{ item }}-svc-{{ random_number }} + --host={{ item }} --port=6379 --custom-labels='role=primary' + loop: "{{ primary_nodes }}" + ignore_errors: yes + + - name: Add replica nodes to monitoring + community.docker.docker_container_exec: + container: "{{ item }}" + command: >- + pmm-admin add valkey --cluster=valkey-native-cluster --environment=valkey-test --username=default + --password="{{ valkey_password }}" --service-name={{ item }}-svc-{{ random_number }} + --host={{ item }} --port=6379 --custom-labels='role=replica' + loop: "{{ replica_nodes }}" + ignore_errors: yes + + - name: Seed sample list data on first primary + community.docker.docker_container_exec: + container: "{{ valkey_primary_prefix }}1" + command: valkey-cli -a "{{ valkey_password }}" RPUSH mylist "one" "two" "three" "four" "five" + + - name: Pop one item from sample list on first primary + community.docker.docker_container_exec: + container: "{{ valkey_primary_prefix }}1" + command: valkey-cli -a "{{ valkey_password }}" RPOP mylist + + - name: Generate workload on primary nodes (latency metrics) + community.docker.docker_container_exec: + container: "{{ item }}" + command: >- + bash -c "for i in $(seq 1 50); do valkey-cli -a {{ valkey_password }} SET k$i v$i >/dev/null; valkey-cli -a {{ valkey_password }} GET k$i >/dev/null; valkey-cli -a {{ valkey_password }} HSET h$i f v >/dev/null; valkey-cli -a {{ valkey_password }} LPUSH l$i a b c >/dev/null; valkey-cli -a {{ valkey_password }} RPUSH l$i d e f >/dev/null; valkey-cli -a {{ valkey_password }} LRANGE l$i 0 -1 >/dev/null; valkey-cli -a {{ valkey_password }} LPOP l$i >/dev/null || true; valkey-cli -a {{ valkey_password }} RPOP l$i >/dev/null || true; done" + loop: "{{ primary_nodes }}" + ignore_errors: yes + + - name: Generate read workload on replica nodes (latency metrics) + community.docker.docker_container_exec: + container: "{{ item }}" + command: >- + bash -c "for i in $(seq 1 50); do valkey-cli -a {{ valkey_password }} GET k$i >/dev/null || true; valkey-cli -a {{ valkey_password }} LRANGE l$i 0 -1 >/dev/null || true; done" + loop: "{{ replica_nodes }}" + ignore_errors: yes diff --git a/pmm_qa/valkey/valkey-primary.conf.j2 b/pmm_qa/valkey/valkey-primary.conf.j2 new file mode 100644 index 00000000..04a8f158 --- /dev/null +++ b/pmm_qa/valkey/valkey-primary.conf.j2 @@ -0,0 +1,28 @@ +# Basic Valkey configuration for primary +bind 0.0.0.0 +port 6379 + +requirepass "{{ valkey_password }}" +masterauth "{{ valkey_password }}" + +# Persistence +save 900 1 +save 300 10 +save 60 10000 + +# Replication +replica-serve-stale-data yes +replica-read-only yes +repl-diskless-sync no +repl-diskless-sync-delay 5 + +# Security +protected-mode no + +# Logging +loglevel notice +logfile "" + +# Memory management +maxmemory-policy allkeys-lru +maxmemory 1gb diff --git a/pmm_qa/valkey/valkey-replica.conf.j2 b/pmm_qa/valkey/valkey-replica.conf.j2 new file mode 100644 index 00000000..b2e255c3 --- /dev/null +++ b/pmm_qa/valkey/valkey-replica.conf.j2 @@ -0,0 +1,29 @@ +# Basic Valkey configuration for replica +bind 0.0.0.0 +port 6379 + +requirepass "{{ valkey_password }}" +masterauth "{{ valkey_password }}" + +# Replication +replicaof valkey-primary 6379 +replica-serve-stale-data yes +replica-read-only yes +repl-diskless-sync no +repl-diskless-sync-delay 5 + +# Persistence +save 900 1 +save 300 10 +save 60 10000 + +# Security +protected-mode no + +# Logging +loglevel notice +logfile "" + +# Memory management +maxmemory-policy allkeys-lru +maxmemory 1gb diff --git a/pmm_qa/valkey/valkey-sentinel.yml b/pmm_qa/valkey/valkey-sentinel.yml new file mode 100644 index 00000000..fe838d8e --- /dev/null +++ b/pmm_qa/valkey/valkey-sentinel.yml @@ -0,0 +1,254 @@ +--- +- name: Deploy Valkey Cluster with Sentinel + hosts: localhost + gather_facts: false + vars: + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + metrics_mode: "{{ lookup('env', 'metrics_mode') }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + valkey_version: "{{ lookup('env', 'VALKEY_VERSION') | default('7', true) }}" + valkey_image: "valkey/valkey:{{ valkey_version }}-bookworm" + valkey_network_name: "pmm-qa" + valkey_password: "VKvl41568AsE" + valkey_data_dir: "{{ lookup('env', 'HOME') }}/valkey/data" + valkey_config_dir: "{{ lookup('env', 'HOME') }}/valkey/config" + valkey_primary_port: 6379 + valkey_replica_count: 2 + valkey_replica_start_port: 6380 + sentinel_count: 3 + sentinel_start_port: 26379 + sentinel_quorum: 2 + + pmm_server_name: "pmm-server" + + tasks: + - name: Set Random Number Fact + set_fact: + random_number: "{{ (10000 | random) | int }}" + + - name: Create Docker network + community.docker.docker_network: + name: "{{ valkey_network_name }}" + driver: bridge + state: present + + - name: Create config directory + file: + path: "{{ valkey_config_dir }}" + state: directory + mode: "0755" + + - name: Create a config directory per Sentinel + file: + path: "{{ valkey_config_dir }}/sentinel-{{ item }}" + state: directory + mode: "0755" + loop: "{{ range(1, sentinel_count + 1) | list }}" + + - name: Create data directory + file: + path: "{{ valkey_data_dir }}" + state: directory + mode: "0755" + + - name: Generate Valkey primary configuration + template: + src: valkey-primary.conf.j2 + dest: "{{ valkey_config_dir }}/valkey-primary.conf" + mode: "0644" + + - name: Generate Valkey replica configurations + template: + src: valkey-replica.conf.j2 + dest: "{{ valkey_config_dir }}/valkey-replica-{{ item }}.conf" + mode: "0644" + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + + - name: Generate Sentinel configurations + template: + src: sentinel.conf.j2 + dest: "{{ valkey_config_dir }}/sentinel-{{ item }}/sentinel.conf" + mode: "0664" + loop: "{{ range(1, sentinel_count + 1) | list }}" + + - name: Create Docker volume for primary data + community.docker.docker_volume: + name: "valkey-primary-data" + state: present + + - name: Create Docker volumes for replica data + community.docker.docker_volume: + name: "valkey-replica-{{ item }}-data" + state: present + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + + - name: Start Valkey primary container + community.docker.docker_container: + name: "valkey-primary" + hostname: "valkey-primary-node-{{ random_number }}" + image: "{{ valkey_image }}" + state: started + restart_policy: unless-stopped + networks: + - name: "{{ valkey_network_name }}" + ports: + - "{{ valkey_primary_port }}:6379" + volumes: + - "valkey-primary-data:/data" + - "{{ valkey_config_dir }}/valkey-primary.conf:/usr/local/etc/valkey/valkey.conf:ro" + command: ["valkey-server", "/usr/local/etc/valkey/valkey.conf"] + healthcheck: + test: ["CMD", "valkey-cli", "-a", "{{ valkey_password }}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + + - name: Wait for the primary to be ready + wait_for: + host: localhost + port: "{{ valkey_primary_port }}" + timeout: 30 + delay: 1 + + - name: Start Valkey replica containers + community.docker.docker_container: + name: "valkey-replica-{{ item }}" + hostname: "valkey-replica-{{ item }}-node-{{ random_number }}" + image: "{{ valkey_image }}" + state: started + restart_policy: unless-stopped + networks: + - name: "{{ valkey_network_name }}" + ports: + - "{{ valkey_replica_start_port + item - 1 }}:6379" + volumes: + - "valkey-replica-{{ item }}-data:/data" + - "{{ valkey_config_dir }}/valkey-replica-{{ item }}.conf:/usr/local/etc/valkey/valkey.conf:ro" + command: ["valkey-server", "/usr/local/etc/valkey/valkey.conf"] + healthcheck: + test: ["CMD", "valkey-cli", "-a", "{{ valkey_password }}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + + - name: Wait for replicas to be ready + wait_for: + host: localhost + port: "{{ valkey_replica_start_port + item - 1 }}" + timeout: 30 + delay: 1 + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + + - name: Start Sentinel containers + community.docker.docker_container: + name: "sentinel-{{ item }}" + hostname: "sentinel-{{ item }}-node-{{ random_number }}" + image: "{{ valkey_image }}" + state: started + restart_policy: unless-stopped + networks: + - name: "{{ valkey_network_name }}" + ports: + - "{{ sentinel_start_port + item - 1 }}:26379" + volumes: + - "{{ valkey_config_dir }}/sentinel-{{ item }}:/usr/local/etc/valkey" + command: ["valkey-sentinel", "/usr/local/etc/valkey/sentinel.conf"] + healthcheck: + test: ["CMD", "valkey-cli", "-p", "{{ sentinel_start_port }}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + loop: "{{ range(1, sentinel_count + 1) | list }}" + + - name: Wait for Sentinels to be ready + wait_for: + host: localhost + port: "{{ sentinel_start_port + item - 1 }}" + timeout: 30 + delay: 1 + loop: "{{ range(1, sentinel_count + 1) | list }}" + + - name: Verify cluster status + community.docker.docker_container_exec: + container: "valkey-primary" + command: valkey-cli -a "{{ valkey_password }}" info replication + register: cluster_status + + - name: Display cluster status + debug: + msg: "{{ cluster_status.stdout_lines }}" + + - name: Run Sentinel status command + community.docker.docker_container_exec: + container: "sentinel-1" + command: valkey-cli -p {{ sentinel_start_port }} sentinel masters + register: sentinel_status + + - name: Display Sentinel status + debug: + msg: "{{ sentinel_status.stdout_lines }}" + + - name: Install PMM Client in each container + ansible.builtin.include_tasks: ../tasks/install_pmm_client.yml + loop: >- + {{ ['valkey-primary'] + + (range(1, valkey_replica_count + 1) | map('string') | map('regex_replace', '^(.*)$', 'valkey-replica-\1') | list) + + (range(1, sentinel_count + 1) | map('string') | map('regex_replace', '^(.*)$', 'sentinel-\1') | list) + }} + loop_control: + loop_var: current_container_name + vars: + container_name: "{{ current_container_name }}" + - name: Add the primary to monitoring + community.docker.docker_container_exec: + container: "valkey-primary" + command: >- + pmm-admin add valkey --cluster=valkey-cluster --replication-set=valkey-repl --environment=valkey-test + --username=default --password="{{ valkey_password }}" --service-name=valkey-primary-svc-{{ random_number }} + --host=valkey-primary --port=6379 --custom-labels='role=primary' + ignore_errors: yes + - name: Add the replicas to monitoring + community.docker.docker_container_exec: + container: "valkey-replica-{{ item }}" + command: >- + pmm-admin add valkey --cluster=valkey-cluster --replication-set=valkey-repl --environment=valkey-test + --username=default --password="{{ valkey_password }}" --service-name=valkey-replica{{ item }}-svc-{{ random_number }} + --host=valkey-replica-{{ item }} --port=6379 --custom-labels='role=replica' + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + ignore_errors: yes + - name: Add Sentinels to monitoring + community.docker.docker_container_exec: + container: "sentinel-{{ item }}" + command: >- + pmm-admin add valkey --cluster=valkey-cluster --environment=valkey-test --username=default + --password="{{ valkey_password }}" --service-name=sentinel{{ item }}-svc-{{ random_number }} + --host=sentinel-{{ item }} --port={{ sentinel_start_port }} --custom-labels='role=sentinel' + loop: "{{ range(1, sentinel_count + 1) | list }}" + ignore_errors: yes + + - name: Seed sample list data on primary (sentinel setup) + community.docker.docker_container_exec: + container: "valkey-primary" + command: valkey-cli -a "{{ valkey_password }}" RPUSH mylist "one" "two" "three" "four" "five" + + - name: Pop one item from sample list (sentinel setup) + community.docker.docker_container_exec: + container: "valkey-primary" + command: valkey-cli -a "{{ valkey_password }}" RPOP mylist + + - name: Generate workload on primary (sentinel setup) + community.docker.docker_container_exec: + container: "valkey-primary" + command: >- + bash -c "for i in $(seq 1 50); do valkey-cli -a {{ valkey_password }} SET k$i v$i >/dev/null; valkey-cli -a {{ valkey_password }} GET k$i >/dev/null; valkey-cli -a {{ valkey_password }} HSET h$i f v >/dev/null; valkey-cli -a {{ valkey_password }} LPUSH l$i a b c >/dev/null; valkey-cli -a {{ valkey_password }} RPUSH l$i d e f >/dev/null; valkey-cli -a {{ valkey_password }} LRANGE l$i 0 -1 >/dev/null; valkey-cli -a {{ valkey_password }} LPOP l$i >/dev/null || true; valkey-cli -a {{ valkey_password }} RPOP l$i >/dev/null || true; done" + ignore_errors: yes + + - name: Generate read workload on replicas (sentinel setup) + community.docker.docker_container_exec: + container: "valkey-replica-{{ item }}" + command: >- + bash -c "for i in $(seq 1 50); do valkey-cli -a {{ valkey_password }} GET k$i >/dev/null || true; valkey-cli -a {{ valkey_password }} LRANGE l$i 0 -1 >/dev/null || true; done" + loop: "{{ range(1, valkey_replica_count + 1) | list }}" + ignore_errors: yes