diff --git a/jobs/director/monit b/jobs/director/monit index f21140259b8..d418410cc6c 100644 --- a/jobs/director/monit +++ b/jobs/director/monit @@ -7,8 +7,17 @@ check process director <% (1..(p('director.workers'))).each do |index| %> check process worker_<%= index %> with pidfile /var/vcap/sys/run/director/worker_<%= index %>.pid - start program "/var/vcap/jobs/director/bin/worker_ctl start <%= index %>" - stop program "/var/vcap/jobs/director/bin/worker_ctl stop <%= index %>" + start program "/var/vcap/jobs/director/bin/worker_ctl start worker_<%= index %>" + stop program "/var/vcap/jobs/director/bin/worker_ctl stop worker_<%= index %>" + group vcap + depends on director +<% end %> + +<% (1..(p('director.dynamic_disks_workers'))).each do |index| %> +check process dynamic_disks_worker_<%= index %> + with pidfile /var/vcap/sys/run/director/dynamic_disks_worker_<%= index %>.pid + start program "/var/vcap/jobs/director/bin/worker_ctl start dynamic_disks_worker_<%= index %> dynamic_disks" + stop program "/var/vcap/jobs/director/bin/worker_ctl stop dynamic_disks_worker_<%= index %> dynamic_disks" group vcap depends on director <% end %> diff --git a/jobs/director/spec b/jobs/director/spec index a7c88883521..32a39767ed6 100644 --- a/jobs/director/spec +++ b/jobs/director/spec @@ -75,6 +75,9 @@ properties: director.workers: description: Number of director workers default: 3 + director.dynamic_disks_workers: + description: Number of director workers dedicated for dynamic disks jobs + default: 0 director.enable_dedicated_status_worker: description: "Separate worker for 'bosh vms' and 'bosh ssh'" default: false diff --git a/jobs/director/templates/drain b/jobs/director/templates/drain index 8965f88989d..0ea66e40d70 100755 --- a/jobs/director/templates/drain +++ b/jobs/director/templates/drain @@ -18,7 +18,7 @@ mkdir -p $LOG_DIR export BOSH_DIRECTOR_LOG_FILE=$LOG_DIR/drain.workers.stdout.log stop_worker() { - pidfile=/var/vcap/sys/run/director/worker_$1.pid + pidfile=/var/vcap/sys/run/director/$1.pid if [ -f "$pidfile" ] ; then pid="$( cat "$pidfile" )" @@ -33,16 +33,25 @@ stop_dedicated_worker () { stop_worker "$@"; } <% (1..p('director.workers')).each do |index| %> <% if index > dedicated_workers %> - stop_worker <%= index %> + stop_worker worker_<%= index %> <% end %> <% end %> +<% (1..p('director.dynamic_disks_workers')).each do |index| %> + stop_worker dynamic_disks_worker_<%= index %> +<% end %> + <% if dedicated_workers > 0 %> /var/vcap/packages/director/bin/bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue normal \ 2>>$LOG_DIR/drain.stderr.log + <% if p('director.dynamic_disks_workers') > 0 %> + /var/vcap/packages/director/bin/bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue dynamic_disks \ + 2>>$LOG_DIR/drain.stderr.log + <% end %> + <% (1..dedicated_workers).each do |index| %> - stop_dedicated_worker <%= index %> + stop_dedicated_worker worker_<%= index %> <% end %> <% end %> diff --git a/jobs/director/templates/ps_utils.sh b/jobs/director/templates/ps_utils.sh index d59d08cd26f..fb588e81bed 100755 --- a/jobs/director/templates/ps_utils.sh +++ b/jobs/director/templates/ps_utils.sh @@ -22,7 +22,7 @@ function kill_process { function list_child_processes { ps -eo pid,command | grep bosh-director-worker | - grep -- "-i $1" | + grep -- "-n $1" | awk '{print $1}' | grep -v ^$1$ } diff --git a/jobs/director/templates/worker_ctl.erb b/jobs/director/templates/worker_ctl.erb index 15675695ffb..31367e005ba 100755 --- a/jobs/director/templates/worker_ctl.erb +++ b/jobs/director/templates/worker_ctl.erb @@ -1,10 +1,10 @@ #!/bin/bash -INDEX=$2 +NAME=$2 RUN_DIR=/var/vcap/sys/run/director LOG_DIR=/var/vcap/sys/log/director -PIDFILE=$RUN_DIR/worker_$INDEX.pid +PIDFILE=${RUN_DIR}/${NAME}.pid RUNAS=vcap # Postgres @@ -25,9 +25,9 @@ export LANG=en_US.UTF-8 export TMPDIR=/var/vcap/data/director/tmp -export QUEUE="normal,urgent" +export QUEUE="${3:-normal,urgent}" <% if (p('director.enable_dedicated_status_worker')) && (p('director.workers') > 1) %> -if [ $INDEX -eq 1 ]; then +if [ "${NAME}" = "worker_1" ]; then export QUEUE="urgent" fi <% end %> @@ -49,15 +49,15 @@ case $1 in exec chpst -u $RUNAS:$RUNAS \ /var/vcap/packages/director/bin/bosh-director-worker \ - -c /var/vcap/jobs/director/config/director.yml -i $INDEX \ - >>$LOG_DIR/worker_$INDEX.stdout.log \ - 2>>$LOG_DIR/worker_$INDEX.stderr.log + -c /var/vcap/jobs/director/config/director.yml -n $NAME \ + >>$LOG_DIR/${NAME}.stdout.log \ + 2>>$LOG_DIR/${NAME}.stderr.log ;; stop) PID=$(head -1 $PIDFILE) kill_process $PID # prevent the parent from fork()ing new children - for CHILD in $(list_child_processes $INDEX); do + for CHILD in $(list_child_processes $NAME); do kill_process $CHILD done rm -f $PIDFILE diff --git a/spec/director_templates_spec.rb b/spec/director_templates_spec.rb index 9534c7b3731..d69774e548b 100644 --- a/spec/director_templates_spec.rb +++ b/spec/director_templates_spec.rb @@ -271,25 +271,52 @@ let(:rendered_template) { template.render(properties) } let(:enable_dedicated_status_worker) { false } + let(:dynamic_disks_workers) { 0 } let(:properties) do properties = default_properties.dup properties['director']['enable_dedicated_status_worker'] = enable_dedicated_status_worker + properties['director']['dynamic_disks_workers'] = dynamic_disks_workers properties end - it 'renders' do - expect(rendered_template).to match(/.+stop_worker 1.+stop_worker 2.+stop_worker 3/m) - expect(rendered_template).to include('bosh-director-drain-workers') + it 'renders to drain all jobs' do + expect(rendered_template).to match(/.+stop_worker worker_1.+stop_worker worker_2.+stop_worker worker_3/m) + expect(rendered_template).to include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml') + expect(rendered_template).to_not include('--queue normal') + expect(rendered_template).to_not include('--queue dynamic_disks') + end + + context 'dynamic disks workers' do + let(:dynamic_disks_workers) { 2 } + + it 'renders to drain all jobs' do + expect(rendered_template).to match(/.+stop_worker worker_2.+stop_worker worker_3.+.+stop_worker dynamic_disks_worker_1.+stop_worker dynamic_disks_worker_2/m) + + expect(rendered_template).to include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml') + expect(rendered_template).to_not include('--queue normal') + expect(rendered_template).to_not include('--queue dynamic_disks') + end end context 'dedicated status workers' do let(:enable_dedicated_status_worker) { true } - it 'renders' do - expect(rendered_template).to match(/.+stop_worker 2.+stop_worker 3.+stop_dedicated_worker 1/m) + it 'renders to drain normal jobs and then the rest' do + expect(rendered_template).to match(/.+stop_worker worker_2.+stop_worker worker_3.+stop_dedicated_worker worker_1/m) + + expect(rendered_template).to include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue normal') + expect(rendered_template).to_not include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue dynamic_disks') + end + + context 'dynamic disks workers' do + let(:dynamic_disks_workers) { 2 } - expect(rendered_template).to include('--queue normal') - expect(rendered_template).to include('bosh-director-drain-workers') + it 'renders to drain normal jobs, then dynamic_disks jobs and then the rest' do + expect(rendered_template).to match(/.+stop_worker worker_2.+stop_worker worker_3.+.+stop_worker dynamic_disks_worker_1.+stop_worker dynamic_disks_worker_2.+stop_dedicated_worker worker_1/m) + + expect(rendered_template).to include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue normal') + expect(rendered_template).to include('bosh-director-drain-workers -c /var/vcap/jobs/director/config/director.yml --queue dynamic_disks') + end end end end @@ -438,6 +465,30 @@ end end end + + describe 'worker_ctl' do + let(:template) { job.template('bin/worker_ctl') } + let(:rendered_template) { template.render(properties) } + + let(:enable_dedicated_status_worker) { false } + let(:properties) do + properties = default_properties.dup + properties['director']['enable_dedicated_status_worker'] = enable_dedicated_status_worker + properties + end + + it 'renders' do + expect(rendered_template).to include('export QUEUE="${3:-normal,urgent}"') + end + + context 'dedicated status workers' do + let(:enable_dedicated_status_worker) { true } + + it 'renders' do + expect(rendered_template).to include('export QUEUE="urgent"') + end + end + end end end diff --git a/spec/support/ps_utils_tests.sh b/spec/support/ps_utils_tests.sh index b51dcbc7797..dbf4af90d43 100755 --- a/spec/support/ps_utils_tests.sh +++ b/spec/support/ps_utils_tests.sh @@ -39,14 +39,14 @@ function test_kill_process { function test_list_child_processes { function ps { cat < false, :key => [:id] + foreign_key :vm_id, :vms, :key => [:id], :on_delete => :set_null + column :disk_cid, 'varchar(255)', :null => false + column :disk_hint_json, 'varchar(255)' + column :name, 'varchar(255)', :null => false + column :disk_pool_name, 'varchar(255)', :null => false + column :cpi, 'varchar(255)', :default => '' + column :size, 'integer', :null => false + column :metadata_json, 'text' + column :availability_zone, 'varchar(255)' + index [:name], :unique => true + end + when :mysql2 + create_table(:dynamic_disks) do + primary_key :id + foreign_key :deployment_id, :deployments, :null => false, :key => [:id] + foreign_key :vm_id, :vms, :key => [:id], :on_delete => :set_null + column :disk_cid, 'varchar(255)', :null => false + column :disk_hint_json, 'varchar(255)' + column :name, 'varchar(255)', :null => false + column :disk_pool_name, 'varchar(255)', :null => false + column :cpi, 'varchar(255)', :default => '' + column :size, 'integer', :null => false + column :metadata_json, 'longtext' + column :availability_zone, 'varchar(255)' + index [:name], :unique => true + end + when :sqlite + create_table(:dynamic_disks) do + primary_key :id + foreign_key :deployment_id, :deployments, :null => false, :key => [:id] + foreign_key :vm_id, :vms, :key => [:id], :on_delete => :set_null + column :disk_cid, 'varchar(255)', :null => false + column :disk_hint_json, 'varchar(255)' + column :name, 'varchar(255)', :null => false + column :disk_pool_name, 'varchar(255)', :null => false + column :cpi, 'varchar(255)', :default => '' + column :size, 'integer', :null => false + column :metadata_json, 'TEXT' + column :availability_zone, 'varchar(255)' + index [:name], :unique => true + end + else + raise "Unknown adapter_scheme: #{adapter_scheme}" + end + end + + down do + delete_table(:dynamic_disks) + end +end diff --git a/src/bosh-director/lib/bosh/director.rb b/src/bosh-director/lib/bosh/director.rb index 8696bb2d36a..f38efe42f7d 100644 --- a/src/bosh-director/lib/bosh/director.rb +++ b/src/bosh-director/lib/bosh/director.rb @@ -128,6 +128,7 @@ module Director require 'bosh/director/instance_updater/state_applier' require 'bosh/director/instance_updater/update_procedure' require 'bosh/director/disk_manager' +require 'bosh/director/disk_deleter' require 'bosh/director/orphan_disk_manager' require 'bosh/director/orphan_network_manager' require 'bosh/director/stopper' @@ -229,6 +230,9 @@ module Director require 'bosh/director/jobs/helpers' require 'bosh/director/jobs/db_job' require 'bosh/director/jobs/orphan_disk' +require 'bosh/director/jobs/dynamic_disks/provide_dynamic_disk' +require 'bosh/director/jobs/dynamic_disks/detach_dynamic_disk' +require 'bosh/director/jobs/dynamic_disks/delete_dynamic_disk' require 'bosh/director/models/helpers/model_helper' @@ -251,6 +255,7 @@ module Bosh::Director require 'bosh/director/api/controllers/deployments_controller' require 'bosh/director/api/controllers/disks_controller' require 'bosh/director/api/controllers/director_controller' +require 'bosh/director/api/controllers/dynamic_disks_controller' require 'bosh/director/api/controllers/networks_controller' require 'bosh/director/api/controllers/orphan_disks_controller' require 'bosh/director/api/controllers/orphaned_vms_controller' diff --git a/src/bosh-director/lib/bosh/director/agent_client.rb b/src/bosh-director/lib/bosh/director/agent_client.rb index c71d1fa72d6..8cbcaba5297 100644 --- a/src/bosh-director/lib/bosh/director/agent_client.rb +++ b/src/bosh-director/lib/bosh/director/agent_client.rb @@ -115,6 +115,14 @@ def remove_persistent_disk(*args) safe_send_message(:remove_persistent_disk, *args) end + def add_dynamic_disk(*args) + safe_send_message(:add_dynamic_disk, *args) + end + + def remove_dynamic_disk(*args) + safe_send_message(:remove_dynamic_disk, *args) + end + def shutdown fire_and_forget(:shutdown) end diff --git a/src/bosh-director/lib/bosh/director/api/controllers/dynamic_disks_controller.rb b/src/bosh-director/lib/bosh/director/api/controllers/dynamic_disks_controller.rb new file mode 100644 index 00000000000..bb940ccda8b --- /dev/null +++ b/src/bosh-director/lib/bosh/director/api/controllers/dynamic_disks_controller.rb @@ -0,0 +1,54 @@ +require 'bosh/director/api/controllers/base_controller' + +module Bosh::Director + module Api::Controllers + class DynamicDisksController < BaseController + include ValidationHelper + + post '/provide', scope: :update_dynamic_disks, consumes: :json do + request_hash = JSON.parse(request.body.read) + + instance_id = safe_property(request_hash, 'instance_id', class: String, min_length: 1) + disk_name = safe_property(request_hash, 'disk_name', class: String, min_length: 1) + disk_pool_name = safe_property(request_hash, 'disk_pool_name', class: String, min_length: 1) + disk_size = safe_property(request_hash, 'disk_size', class: Integer, min: 1) + metadata = safe_property(request_hash, 'metadata', class: Hash, optional: true) + + task = JobQueue.new.enqueue( + current_user, + Jobs::DynamicDisks::ProvideDynamicDisk, + 'provide dynamic disk', + [instance_id, disk_name, disk_pool_name, disk_size, metadata] + ) + + redirect "/tasks/#{task.id}" + end + + post '/:disk_name/detach', scope: :update_dynamic_disks do + disk_name = safe_property(params, 'disk_name', class: String, min_length: 1) + + task = JobQueue.new.enqueue( + current_user, + Jobs::DynamicDisks::DetachDynamicDisk, + 'detach dynamic disk', + [disk_name] + ) + + redirect "/tasks/#{task.id}" + end + + delete '/:disk_name', scope: :delete_dynamic_disks do + disk_name = safe_property(params, 'disk_name', class: String, min_length: 1) + + task = JobQueue.new.enqueue( + current_user, + Jobs::DynamicDisks::DeleteDynamicDisk, + 'delete dynamic disk', + [disk_name] + ) + + redirect "/tasks/#{task.id}" + end + end + end +end diff --git a/src/bosh-director/lib/bosh/director/api/route_configuration.rb b/src/bosh-director/lib/bosh/director/api/route_configuration.rb index 10098a00215..b93e3c7389c 100644 --- a/src/bosh-director/lib/bosh/director/api/route_configuration.rb +++ b/src/bosh-director/lib/bosh/director/api/route_configuration.rb @@ -18,6 +18,7 @@ def controllers controllers['/deployment_configs'] = Bosh::Director::Api::Controllers::DeploymentConfigsController.new(@config) controllers['/disks'] = Bosh::Director::Api::Controllers::DisksController.new(@config) controllers['/director'] = Bosh::Director::Api::Controllers::DirectorController.new(@config) + controllers['/dynamic_disks'] = Bosh::Director::Api::Controllers::DynamicDisksController.new(@config) controllers['/networks'] = Bosh::Director::Api::Controllers::NetworksController.new(@config) controllers['/orphan_disks'] = Bosh::Director::Api::Controllers::OrphanDisksController.new(@config) controllers['/orphaned_vms'] = Bosh::Director::Api::Controllers::OrphanedVmsController.new(@config) diff --git a/src/bosh-director/lib/bosh/director/cloudcheck_helper.rb b/src/bosh-director/lib/bosh/director/cloudcheck_helper.rb index 617d8ddb25d..fbb4ea32979 100644 --- a/src/bosh-director/lib/bosh/director/cloudcheck_helper.rb +++ b/src/bosh-director/lib/bosh/director/cloudcheck_helper.rb @@ -1,5 +1,7 @@ module Bosh::Director module CloudcheckHelper + include LockHelper + # Helper functions that come in handy for # cloudcheck: # 1. VM/agent interactions @@ -10,7 +12,7 @@ def reboot_vm(instance) vm = instance.active_vm cloud = CloudFactory.create.get(vm.cpi) - cloud.reboot_vm(vm.cid) + with_vm_lock(vm.cid) { cloud.reboot_vm(vm.cid) } begin agent_client(instance.agent_id, instance.name).wait_until_ready diff --git a/src/bosh-director/lib/bosh/director/deployment_deleter.rb b/src/bosh-director/lib/bosh/director/deployment_deleter.rb index cfe7e203071..011f14355d5 100644 --- a/src/bosh-director/lib/bosh/director/deployment_deleter.rb +++ b/src/bosh-director/lib/bosh/director/deployment_deleter.rb @@ -8,7 +8,7 @@ def initialize(event_log, logger, max_threads) @variables_interpolator = ConfigServer::VariablesInterpolator.new end - def delete(deployment_model, instance_deleter, vm_deleter) + def delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) instance_plans = deployment_model.instances.map do |instance_model| DeploymentPlan::InstancePlan.new( existing_instance: instance_model, @@ -41,6 +41,11 @@ def delete(deployment_model, instance_deleter, vm_deleter) end end + event_log_stage.advance_and_track('Deleting dynamic disks') do + @logger.info('Deleting dynamic disks') + disk_deleter.delete_dynamic_disks(deployment_model, event_log_stage, max_threads: @max_threads) + end + if Config.network_lifecycle_enabled? deployment_model.networks.each do |network| with_network_lock(network.name) do diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb index 0c2e802d33a..d55d43279ac 100644 --- a/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps.rb @@ -12,7 +12,9 @@ module Steps require 'bosh/director/deployment_plan/steps/commit_instance_network_settings_step' require 'bosh/director/deployment_plan/steps/delete_vm_step' require 'bosh/director/deployment_plan/steps/detach_disk_step' +require 'bosh/director/deployment_plan/steps/detach_dynamic_disk_step' require 'bosh/director/deployment_plan/steps/detach_instance_disks_step' +require 'bosh/director/deployment_plan/steps/delete_dynamic_disk_step' require 'bosh/director/deployment_plan/steps/elect_active_vm_step' require 'bosh/director/deployment_plan/steps/mount_disk_step' require 'bosh/director/deployment_plan/steps/mount_instance_disks_step' diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/attach_disk_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/attach_disk_step.rb index 6d46222523a..87ec4ec22ec 100644 --- a/src/bosh-director/lib/bosh/director/deployment_plan/steps/attach_disk_step.rb +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/attach_disk_step.rb @@ -2,6 +2,8 @@ module Bosh::Director module DeploymentPlan module Steps class AttachDiskStep + include LockHelper + def initialize(disk, tags) @disk = disk @logger = Config.logger @@ -17,7 +19,7 @@ def perform(report) cloud_factory = CloudFactory.create attach_disk_cloud = cloud_factory.get(@disk.cpi, instance_active_vm.stemcell_api_version) @logger.info("Attaching disk #{@disk.disk_cid}") - disk_hint = attach_disk_cloud.attach_disk(@disk.instance.vm_cid, @disk.disk_cid) + disk_hint = with_vm_lock(@disk.instance.vm_cid) { attach_disk_cloud.attach_disk(@disk.instance.vm_cid, @disk.disk_cid) } if disk_hint agent_client(@disk.instance).wait_until_ready diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_dynamic_disk_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_dynamic_disk_step.rb new file mode 100644 index 00000000000..7f8c9fd0783 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_dynamic_disk_step.rb @@ -0,0 +1,22 @@ +module Bosh::Director + module DeploymentPlan + module Steps + class DeleteDynamicDiskStep + def initialize(disk) + @disk = disk + @logger = Config.logger + end + + def perform(_report) + return if @disk.nil? + + @logger.info("Deleting dynamic disk '#{@disk.disk_cid}'") + cloud = Bosh::Director::CloudFactory.create.get(@disk.cpi) + cloud.delete_disk(@disk.disk_cid) if cloud.has_disk(@disk.disk_cid) + + @disk.destroy + end + end + end + end +end diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_vm_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_vm_step.rb index a1186c9a694..9857baae966 100644 --- a/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_vm_step.rb +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/delete_vm_step.rb @@ -2,6 +2,8 @@ module Bosh::Director module DeploymentPlan module Steps class DeleteVmStep + include LockHelper + def initialize(store_event = true, force = false, enable_virtual_delete_vm = false) @store_event = store_event @logger = Config.logger @@ -24,7 +26,7 @@ def perform(report) cloud = CloudFactory.create.get(vm.cpi, vm.stemcell_api_version) begin - cloud.delete_vm(vm_cid) unless @enable_virtual_delete_vm + with_vm_lock(vm_cid) { cloud.delete_vm(vm_cid) } unless @enable_virtual_delete_vm rescue Bosh::Clouds::VMNotFound @logger.warn("VM '#{vm_cid}' might have already been deleted from the cloud") end diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_disk_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_disk_step.rb index ff27bd63b12..be855c09c77 100644 --- a/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_disk_step.rb +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_disk_step.rb @@ -2,6 +2,8 @@ module Bosh::Director module DeploymentPlan module Steps class DetachDiskStep + include LockHelper + def initialize(disk) @disk = disk @logger = Config.logger @@ -18,7 +20,7 @@ def perform(_report) cloud_factory = CloudFactory.create cloud = cloud_factory.get(@disk.cpi, instance_active_vm.stemcell_api_version) @logger.info("Detaching disk #{@disk.disk_cid}") - cloud.detach_disk(@disk.instance.vm_cid, @disk.disk_cid) + with_vm_lock(@disk.instance.vm_cid) { cloud.detach_disk(@disk.instance.vm_cid, @disk.disk_cid) } rescue Bosh::Clouds::DiskNotAttached if @disk.active raise CloudDiskNotAttached, diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_dynamic_disk_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_dynamic_disk_step.rb new file mode 100644 index 00000000000..d03ffcc4eb4 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_dynamic_disk_step.rb @@ -0,0 +1,30 @@ +module Bosh::Director + module DeploymentPlan + module Steps + class DetachDynamicDiskStep + def initialize(disk) + @disk = disk + @logger = Config.logger + end + + def perform(_report) + return if @disk.nil? || @disk.vm.nil? + + cloud = CloudFactory.create.get(@disk.cpi, @disk.vm.stemcell_api_version) + @logger.info("Detaching dynamic disk #{@disk.disk_cid}") + + instance_name = @disk.vm.instance.nil? ? nil : @disk.vm.instance.name + agent_client = AgentClient.with_agent_id(@disk.vm.agent_id, instance_name) + agent_client.remove_dynamic_disk(@disk.disk_cid) + + cloud.detach_disk(@disk.vm.cid, @disk.disk_cid) + @disk.update(vm_id: nil) + rescue Bosh::Clouds::DiskNotAttached + raise CloudDiskNotAttached, + "'#{@disk.vm.cid}' VM should have dynamic disk " \ + "'#{@disk.disk_cid}' attached but it doesn't (according to CPI)" + end + end + end + end +end diff --git a/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_instance_disks_step.rb b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_instance_disks_step.rb index 5add89a373c..23d469c3624 100644 --- a/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_instance_disks_step.rb +++ b/src/bosh-director/lib/bosh/director/deployment_plan/steps/detach_instance_disks_step.rb @@ -13,6 +13,10 @@ def perform(report) @instance_model.persistent_disks.each do |disk| DetachDiskStep.new(disk).perform(report) end + + @instance_model.active_vm.dynamic_disks.each do |disk| + DetachDynamicDiskStep.new(disk).perform(report) + end end end end diff --git a/src/bosh-director/lib/bosh/director/disk_deleter.rb b/src/bosh-director/lib/bosh/director/disk_deleter.rb new file mode 100644 index 00000000000..8d5c3422bd4 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/disk_deleter.rb @@ -0,0 +1,49 @@ +module Bosh::Director + class DiskDeleter + def initialize(logger, disk_manager, options = {}) + @disk_manager = disk_manager + force = options.fetch(:force, false) + @error_ignorer = ErrorIgnorer.new(force, @logger) + end + + def delete_dynamic_disks(deployment_model, event_log_stage, options = {}) + max_threads = options[:max_threads] || Config.max_threads + ThreadPool.new(:max_threads => max_threads).wrap do |pool| + deployment_model.dynamic_disks.each do |dynamic_disk| + pool.process { delete_dynamic_disk(dynamic_disk, event_log_stage) } + end + end + end + + private + + def delete_dynamic_disk(dynamic_disk, event_log_stage) + parent_id = add_event(dynamic_disk.deployment.name, dynamic_disk.name) + event_log_stage.advance_and_track(dynamic_disk.to_s) do + @error_ignorer.with_force_check do + @disk_manager.delete_dynamic_disk(dynamic_disk) + end + + dynamic_disk.destroy + end + rescue Exception => e + raise e + ensure + add_event(deployment_name, dynamic_disk.name, parent_id, e) if parent_id + end + + def add_event(deployment_name, disk_name, parent_id = nil, error = nil) + event = Config.current_job.event_manager.create_event( + parent_id: parent_id, + user: Config.current_job.username, + action: 'delete', + object_type: 'disk', + object_name: disk_name, + task: Config.current_job.task_id, + deployment: deployment_name, + error: error, + ) + event.id + end + end +end diff --git a/src/bosh-director/lib/bosh/director/disk_manager.rb b/src/bosh-director/lib/bosh/director/disk_manager.rb index e82693719e1..7efb6d1e303 100644 --- a/src/bosh-director/lib/bosh/director/disk_manager.rb +++ b/src/bosh-director/lib/bosh/director/disk_manager.rb @@ -65,6 +65,10 @@ def delete_persistent_disks(instance_model) end end + def delete_dynamic_disk(disk) + DeploymentPlan::Steps::DeleteDynamicDiskStep.new(disk).perform(step_report) + end + def attach_disk(disk, tags) report = step_report DeploymentPlan::Steps::AttachDiskStep.new(disk, tags).perform(report) diff --git a/src/bosh-director/lib/bosh/director/jobs/delete_deployment.rb b/src/bosh-director/lib/bosh/director/jobs/delete_deployment.rb index df113996a7b..ef1e946f1a4 100644 --- a/src/bosh-director/lib/bosh/director/jobs/delete_deployment.rb +++ b/src/bosh-director/lib/bosh/director/jobs/delete_deployment.rb @@ -30,10 +30,11 @@ def perform force: @force, stop_intent: :delete_deployment, ) + disk_deleter = DiskDeleter.new(logger, disk_manager, force: @force) deployment_deleter = DeploymentDeleter.new(Config.event_log, logger, Config.max_threads) vm_deleter = Bosh::Director::VmDeleter.new(logger, @force, Config.enable_virtual_delete_vms) - deployment_deleter.delete(deployment_model, instance_deleter, vm_deleter) + deployment_deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) add_event(parent_id) "/deployments/#{@deployment_name}" diff --git a/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/delete_dynamic_disk.rb b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/delete_dynamic_disk.rb new file mode 100644 index 00000000000..a8f8bdf4d4d --- /dev/null +++ b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/delete_dynamic_disk.rb @@ -0,0 +1,30 @@ +module Bosh::Director + module Jobs::DynamicDisks + class DeleteDynamicDisk < Jobs::BaseJob + include Jobs::Helpers::DynamicDiskHelpers + + @queue = :dynamic_disks + + def self.job_type + :delete_dynamic_disk + end + + def initialize(disk_name) + @disk_name = disk_name + end + + def perform + disk_model = Models::DynamicDisk.find(name: @disk_name) + return "disk with name `#{@disk_name}` was already deleted" if disk_model.nil? + + cloud = Bosh::Director::CloudFactory.create.get(disk_model.cpi) + disk_cid = disk_model.disk_cid + + cloud.delete_disk(disk_cid) if cloud.has_disk(disk_cid) + disk_model.destroy + + "deleted disk `#{disk_cid}`" + end + end + end +end \ No newline at end of file diff --git a/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/detach_dynamic_disk.rb b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/detach_dynamic_disk.rb new file mode 100644 index 00000000000..c87a9bd73c6 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/detach_dynamic_disk.rb @@ -0,0 +1,41 @@ +module Bosh::Director + module Jobs::DynamicDisks + class DetachDynamicDisk < Jobs::BaseJob + include Jobs::Helpers::DynamicDiskHelpers + include LockHelper + + @queue = :dynamic_disks + + def self.job_type + :detach_dynamic_disk + end + + def initialize( disk_name) + @disk_name = disk_name + end + + def perform + disk_model = Models::DynamicDisk.find(name: @disk_name) + raise "disk `#{@disk_name}` can not be found in the database" if disk_model.nil? + + return "disk `#{disk_model.disk_cid}` was already detached" if disk_model.vm.nil? + + cloud = Bosh::Director::CloudFactory.create.get(disk_model.cpi, disk_model.vm.stemcell_api_version) + vm_cid = disk_model.vm.cid + + instance_name = disk_model.vm.instance.nil? ? nil : disk_model.vm.instance.name + agent_client = AgentClient.with_agent_id(disk_model.vm.agent_id, instance_name) + agent_client.remove_dynamic_disk(disk_model.disk_cid) + + with_vm_lock(vm_cid, timeout: VM_LOCK_TIMEOUT) { cloud.detach_disk(vm_cid, disk_model.disk_cid) } + + disk_model.update(vm_id: nil, disk_hint: '') + + "detached disk `#{disk_model.disk_cid}` from vm `#{vm_cid}`" + rescue Bosh::Clouds::DiskNotAttached + disk_model.update(vm_id: nil) + "disk `#{disk_model.disk_cid}` was already detached" + end + end + end +end \ No newline at end of file diff --git a/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/provide_dynamic_disk.rb b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/provide_dynamic_disk.rb new file mode 100644 index 00000000000..63484f71286 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/jobs/dynamic_disks/provide_dynamic_disk.rb @@ -0,0 +1,73 @@ +module Bosh::Director + module Jobs::DynamicDisks + class ProvideDynamicDisk < Jobs::BaseJob + include Jobs::Helpers::DynamicDiskHelpers + include LockHelper + + @queue = :dynamic_disks + + def self.job_type + :provide_dynamic_disk + end + + def initialize(instance_id, disk_name, disk_pool_name, disk_size, metadata) + @instance_id = instance_id + @disk_name = disk_name + @disk_pool_name = disk_pool_name + @disk_size = disk_size + @metadata = metadata + end + + def perform + instance = Models::Instance.find(uuid: @instance_id) + raise "instance `#{@instance_id}` not found" if instance.nil? + + vm = instance.active_vm + raise "no active vm found for instance `#{@instance_id}`" if vm.nil? + + cloud_properties = find_disk_cloud_properties(instance, @disk_pool_name) + cloud = Bosh::Director::CloudFactory.create.get(vm.cpi) + + disk_model = Models::DynamicDisk.find(name: @disk_name) + if disk_model.nil? + cloud_properties['name'] = @disk_name + disk_cid = cloud.create_disk(@disk_size, cloud_properties, vm.cid) + disk_model = Models::DynamicDisk.create( + name: @disk_name, + disk_cid: disk_cid, + deployment_id: instance.deployment.id, + size: @disk_size, + disk_pool_name: @disk_pool_name, + cpi: vm.cpi, + ) + end + + if disk_model.vm.nil? + disk_hint = with_vm_lock(vm.cid, timeout: VM_LOCK_TIMEOUT) { cloud.attach_disk(vm.cid, disk_model.disk_cid) } + disk_model.update(vm_id: vm.id, availability_zone: vm.instance.availability_zone, disk_hint: disk_hint) + else + if disk_model.vm.id != vm.id + raise "disk is attached to a different vm `#{disk_model.vm.cid}`" + end + end + + if !@metadata.nil? && disk_model.metadata != @metadata + MetadataUpdater.build.update_dynamic_disk_metadata(cloud, disk_model, @metadata) + disk_model.update(metadata: @metadata) + end + + unless disk_model.disk_hint.nil? + agent_client = AgentClient.with_agent_id(vm.agent_id, instance.name) + agent_client.add_dynamic_disk(disk_model.disk_cid, disk_model.disk_hint) + end + + disk_info = { "disk_cid": disk_model.disk_cid } + + task_result.write(JSON.generate(disk_info)) + task_result.write("\n") + + "attached disk `#{@disk_name}` to `#{vm.cid}` in deployment `#{vm.instance.deployment.name}`" + end + end + end +end \ No newline at end of file diff --git a/src/bosh-director/lib/bosh/director/jobs/helpers.rb b/src/bosh-director/lib/bosh/director/jobs/helpers.rb index 744faeb028c..3402db7e263 100644 --- a/src/bosh-director/lib/bosh/director/jobs/helpers.rb +++ b/src/bosh-director/lib/bosh/director/jobs/helpers.rb @@ -8,3 +8,4 @@ require 'bosh/director/jobs/helpers/release_version_deleter' require 'bosh/director/jobs/helpers/release_deleter' require 'bosh/director/jobs/helpers/name_version_release_deleter' +require 'bosh/director/jobs/helpers/dynamic_disk_helpers' diff --git a/src/bosh-director/lib/bosh/director/jobs/helpers/dynamic_disk_helpers.rb b/src/bosh-director/lib/bosh/director/jobs/helpers/dynamic_disk_helpers.rb new file mode 100644 index 00000000000..2e46f7736e1 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/jobs/helpers/dynamic_disk_helpers.rb @@ -0,0 +1,19 @@ +module Bosh::Director + module Jobs::Helpers + module DynamicDiskHelpers + VM_LOCK_TIMEOUT = 60 + + def find_disk_cloud_properties(instance, disk_pool_name) + teams = instance.deployment.teams + configs = Models::Config.latest_set_for_teams('cloud', *teams) + raise 'No cloud configs provided' if configs.empty? + + consolidated_configs = Bosh::Director::CloudConfig::CloudConfigsConsolidator.new(configs) + cloud_config_disk_type = DeploymentPlan::CloudManifestParser.new(logger).parse(consolidated_configs.raw_manifest).disk_type(disk_pool_name) + raise "Could not find disk pool by name `#{disk_pool_name}`" if cloud_config_disk_type.nil? + + cloud_config_disk_type.cloud_properties + end + end + end +end \ No newline at end of file diff --git a/src/bosh-director/lib/bosh/director/lock_helper.rb b/src/bosh-director/lib/bosh/director/lock_helper.rb index a3237a7c4e3..4a471414332 100644 --- a/src/bosh-director/lib/bosh/director/lock_helper.rb +++ b/src/bosh-director/lib/bosh/director/lock_helper.rb @@ -16,6 +16,12 @@ def locked_deployments Models::Lock.where { Sequel.like(:name, 'lock:deployment:%') & (expired_at > Time.now) }.all end + def with_vm_lock(vm_cid, opts = {}) + timeout = opts[:timeout] || 10 + Config.logger.info("Acquiring VM lock on #{vm_cid}") + Lock.new("lock:vm:#{vm_cid}", timeout: timeout).lock { yield } + end + def with_network_lock(name, opts = {}) timeout = opts[:timeout] || 10 Config.logger.info("Acquiring network lock on #{name}") diff --git a/src/bosh-director/lib/bosh/director/metadata_updater.rb b/src/bosh-director/lib/bosh/director/metadata_updater.rb index 7b294be8a94..46207c675d6 100644 --- a/src/bosh-director/lib/bosh/director/metadata_updater.rb +++ b/src/bosh-director/lib/bosh/director/metadata_updater.rb @@ -42,5 +42,16 @@ def update_disk_metadata(cloud, disk, metadata) rescue Bosh::Clouds::NotImplemented => e @logger.debug(e.inspect) end + + def update_dynamic_disk_metadata(cloud, disk, metadata) + if cloud.respond_to?(:set_disk_metadata) + metadata = metadata.merge(@director_metadata) + metadata['deployment'] = disk.deployment.name + + cloud.set_disk_metadata(disk.disk_cid, metadata) + end + rescue Bosh::Clouds::NotImplemented => e + @logger.debug(e.inspect) + end end end diff --git a/src/bosh-director/lib/bosh/director/models.rb b/src/bosh-director/lib/bosh/director/models.rb index 97c392328dc..74e8d2b87a2 100644 --- a/src/bosh-director/lib/bosh/director/models.rb +++ b/src/bosh-director/lib/bosh/director/models.rb @@ -31,6 +31,7 @@ require 'bosh/director/models/orphaned_vm' require 'bosh/director/models/package' require 'bosh/director/models/persistent_disk' +require 'bosh/director/models/dynamic_disk' require 'bosh/director/models/release' require 'bosh/director/models/release_version' require 'bosh/director/models/rendered_templates_archive' diff --git a/src/bosh-director/lib/bosh/director/models/deployment.rb b/src/bosh-director/lib/bosh/director/models/deployment.rb index e14881994a3..09a2b64fe54 100644 --- a/src/bosh-director/lib/bosh/director/models/deployment.rb +++ b/src/bosh-director/lib/bosh/director/models/deployment.rb @@ -12,6 +12,7 @@ def self.join_table_block(type) many_to_many :release_versions one_to_many :job_instances, :class => 'Bosh::Director::Models::Instance' one_to_many :instances + one_to_many :dynamic_disks one_to_many :properties, :class => "Bosh::Director::Models::DeploymentProperty" one_to_many :problems, :class => "Bosh::Director::Models::DeploymentProblem" one_to_many :link_consumers, :class => 'Bosh::Director::Models::Links::LinkConsumer' diff --git a/src/bosh-director/lib/bosh/director/models/dynamic_disk.rb b/src/bosh-director/lib/bosh/director/models/dynamic_disk.rb new file mode 100644 index 00000000000..7a1d63b34f3 --- /dev/null +++ b/src/bosh-director/lib/bosh/director/models/dynamic_disk.rb @@ -0,0 +1,33 @@ +module Bosh::Director::Models + class DynamicDisk < Sequel::Model(Bosh::Director::Config.db) + many_to_one :deployment + many_to_one :vm + + def validate + validates_presence [:name, :disk_cid] + validates_unique [:name, :disk_cid] + end + + def disk_hint + result = disk_hint_json + result ? JSON.parse(result) : nil + end + + def disk_hint=(disk_hint) + self.disk_hint_json = JSON.generate(disk_hint) + end + + def metadata + result = self.metadata_json + result ? JSON.parse(result) : {} + end + + def metadata=(metadata) + self.metadata_json = JSON.generate(metadata) + end + + def to_s + "#{self.name}/#{self.disk_cid}" + end + end +end diff --git a/src/bosh-director/lib/bosh/director/models/vm.rb b/src/bosh-director/lib/bosh/director/models/vm.rb index b6790f57830..a5c1c0dffa2 100644 --- a/src/bosh-director/lib/bosh/director/models/vm.rb +++ b/src/bosh-director/lib/bosh/director/models/vm.rb @@ -2,6 +2,7 @@ module Bosh::Director::Models class Vm < Sequel::Model(Bosh::Director::Config.db) many_to_one :instance one_to_many :ip_addresses + one_to_many :dynamic_disks def before_destroy ip_addresses_dataset.each do |ip_address| diff --git a/src/bosh-director/lib/bosh/director/permission_authorizer.rb b/src/bosh-director/lib/bosh/director/permission_authorizer.rb index 9de640a130b..ebe28986944 100644 --- a/src/bosh-director/lib/bosh/director/permission_authorizer.rb +++ b/src/bosh-director/lib/bosh/director/permission_authorizer.rb @@ -86,7 +86,7 @@ def get_director_scopes(expected_scope, permission, user_scopes) expected_scope << bosh_team_admin_scopes(user_scopes) when :update_configs expected_scope << bosh_team_admin_scopes(user_scopes) - when :read, :upload_releases, :upload_stemcells + when :read, :upload_releases, :upload_stemcells, :update_dynamic_disks, :delete_dynamic_disks expected_scope << director_permissions[permission] else raise ArgumentError, "Unexpected permission for director: #{permission}" @@ -111,6 +111,8 @@ def director_permissions admin: ['bosh.admin', "bosh.#{@uuid_provider.uuid}.admin"], upload_stemcells: ['bosh.stemcells.upload'], upload_releases: ['bosh.releases.upload'], + update_dynamic_disks: ['bosh.dynamic_disks.update'], + delete_dynamic_disks: ['bosh.dynamic_disks.delete'], } end diff --git a/src/bosh-director/lib/bosh/director/problem_handlers/inactive_disk.rb b/src/bosh-director/lib/bosh/director/problem_handlers/inactive_disk.rb index e3ef9a99e11..66031369798 100644 --- a/src/bosh-director/lib/bosh/director/problem_handlers/inactive_disk.rb +++ b/src/bosh-director/lib/bosh/director/problem_handlers/inactive_disk.rb @@ -81,7 +81,7 @@ def delete_disk if active_vm begin cloud = CloudFactory.create.get(active_vm.cpi, active_vm.stemcell_api_version) - cloud.detach_disk(active_vm.cid, @disk.disk_cid) + with_vm_lock(active_vm.cid) { cloud.detach_disk(active_vm.cid, @disk.disk_cid) } rescue => e # We are going to delete this disk anyway # and we know it's not in use, so we can ignore diff --git a/src/bosh-director/lib/bosh/director/problem_handlers/missing_disk.rb b/src/bosh-director/lib/bosh/director/problem_handlers/missing_disk.rb index e49f12636a2..c433093acc6 100644 --- a/src/bosh-director/lib/bosh/director/problem_handlers/missing_disk.rb +++ b/src/bosh-director/lib/bosh/director/problem_handlers/missing_disk.rb @@ -75,7 +75,7 @@ def delete_disk_reference begin @logger.debug('Sending cpi request: detach_disk') - cloud.detach_disk(@instance.vm_cid, @disk.disk_cid) + with_vm_lock(@instance.vm_cid) { cloud.detach_disk(@instance.vm_cid, @disk.disk_cid) } rescue Bosh::Clouds::DiskNotAttached, Bosh::Clouds::DiskNotFound end end diff --git a/src/bosh-director/lib/bosh/director/problem_handlers/mount_info_mismatch.rb b/src/bosh-director/lib/bosh/director/problem_handlers/mount_info_mismatch.rb index 943d87cb4a7..2205a4e0c00 100644 --- a/src/bosh-director/lib/bosh/director/problem_handlers/mount_info_mismatch.rb +++ b/src/bosh-director/lib/bosh/director/problem_handlers/mount_info_mismatch.rb @@ -67,7 +67,7 @@ def instance_group def reattach_disk(reboot = false) az_cloud_factory = AZCloudFactory.create_with_latest_configs(@instance.deployment) cloud_for_attach_disk = az_cloud_factory.get_for_az(@instance.availability_zone, @vm_stemcell_api_version) - disk_hint = cloud_for_attach_disk.attach_disk(@vm_cid, @disk_cid) + disk_hint = with_vm_lock(@vm_cid) { cloud_for_attach_disk.attach_disk(@vm_cid, @disk_cid) } cloud_for_update_metadata = az_cloud_factory.get_for_az(@instance.availability_zone) MetadataUpdater.build.update_disk_metadata(cloud_for_update_metadata, @disk, @disk.instance.deployment.tags) diff --git a/src/bosh-director/lib/bosh/director/tasks/db.rake b/src/bosh-director/lib/bosh/director/tasks/db.rake index 6f99afe9757..1d7f243e717 100644 --- a/src/bosh-director/lib/bosh/director/tasks/db.rake +++ b/src/bosh-director/lib/bosh/director/tasks/db.rake @@ -6,7 +6,7 @@ namespace :db do timestamp = Time.new.getutc.strftime('%Y%m%d%H%M%S') new_migration_path = "bosh-director/db/migrations/#{timestamp}_#{name}.rb" - new_migration_spec_path = "bosh-director/spec/unit/db/migrations/#{timestamp}_#{name}_spec.rb" + new_migration_spec_path = "bosh-director/spec/unit/bosh/director/db/migrations/#{timestamp}_#{name}_spec.rb" puts "Creating #{new_migration_spec_path}" File.write new_migration_spec_path, < options[:max_length] + raise ValidationViolatedMax, validation_message.invalid_max_length(options, property, result) + end + result end end @@ -59,6 +67,14 @@ def invalid_max(options, property, result) def invalid_min(options, property, result) "'#{property}' value (#{result.inspect}) should be greater than #{options[:min].inspect}" end + + def invalid_max_length(options, property, result) + "'#{property}' length (#{result.length.inspect}) should be less than #{options[:max_length].inspect}" + end + + def invalid_min_length(options, property, result) + "'#{property}' length (#{result.length.inspect}) should be greater than #{options[:min_length].inspect}" + end end class DefaultPropertyValidationMessage diff --git a/src/bosh-director/lib/bosh/director/vm_deleter.rb b/src/bosh-director/lib/bosh/director/vm_deleter.rb index 2cdd7a31fe6..7b114a2f505 100644 --- a/src/bosh-director/lib/bosh/director/vm_deleter.rb +++ b/src/bosh-director/lib/bosh/director/vm_deleter.rb @@ -1,5 +1,7 @@ module Bosh::Director class VmDeleter + include LockHelper + def initialize(logger, force = false, enable_virtual_delete_vm = false) @logger = logger @error_ignorer = ErrorIgnorer.new(force, @logger) @@ -20,7 +22,7 @@ def delete_vm_by_cid(cid, stemcell_api_version, cpi_name = nil) @logger.info('Deleting VM') @error_ignorer.with_force_check do cloud_factory = CloudFactory.create - cloud_factory.get(cpi_name, stemcell_api_version).delete_vm(cid) unless @enable_virtual_delete_vm + with_vm_lock(cid) { cloud_factory.get(cpi_name, stemcell_api_version).delete_vm(cid) } unless @enable_virtual_delete_vm end end diff --git a/src/bosh-director/lib/bosh/director/worker.rb b/src/bosh-director/lib/bosh/director/worker.rb index 0dcf3f35731..e174a34a607 100644 --- a/src/bosh-director/lib/bosh/director/worker.rb +++ b/src/bosh-director/lib/bosh/director/worker.rb @@ -4,9 +4,9 @@ module Bosh module Director class Worker - def initialize(config, index = 0) + def initialize(config, name) @config = config - @index = index + @name = name @retry_interval = 5 end @@ -36,7 +36,7 @@ def prep end def start - @delayed_job_worker.name = "worker_#{@index}" + @delayed_job_worker.name = @name @delayed_job_worker.logger.info("Starting worker #{@delayed_job_worker.name}.") Bosh::Director::Config.log_director_start_event('worker', @delayed_job_worker.name, {}) diff --git a/src/bosh-director/spec/factories.rb b/src/bosh-director/spec/factories.rb index 4cb0101ab5d..81353482d45 100644 --- a/src/bosh-director/spec/factories.rb +++ b/src/bosh-director/spec/factories.rb @@ -289,6 +289,15 @@ association :instance, factory: :models_instance, strategy: :create end + factory :models_dynamic_disk, class: Bosh::Director::Models::DynamicDisk do + sequence(:name) { |i| "dynamic-disk-name-#{i}" } + sequence(:disk_cid) { |i| "dynamic-disk-cid-#{i}" } + sequence(:disk_pool_name) { |i| "dynamic-disk-pool-name-#{i}" } + sequence(:cpi) { |i| "dynamic-disk-cpi-#{i}" } + sequence(:size) { |i| 1024 + i } + association :deployment, factory: :models_deployment, strategy: :create + end + factory :models_release, class: Bosh::Director::Models::Release do sequence(:name) { |i| "release-#{i}" } end diff --git a/src/bosh-director/spec/support/test_identity_provider.rb b/src/bosh-director/spec/support/test_identity_provider.rb index fd4820edb22..5abf3db1bdb 100644 --- a/src/bosh-director/spec/support/test_identity_provider.rb +++ b/src/bosh-director/spec/support/test_identity_provider.rb @@ -39,6 +39,8 @@ def user_scopes 'director-reader' => ["bosh.#{@uuid_provider.uuid}.read"], 'dev-team-member' => ['bosh.teams.dev.admin'], 'dev-team-read-member' => ['bosh.teams.dev.read'], + 'dynamic-disks-updater' => ['bosh.dynamic_disks.update'], + 'dynamic-disks-deleter' => ['bosh.dynamic_disks.delete'], 'outsider' => ['uaa.admin'], } end diff --git a/src/bosh-director/spec/unit/bosh/director/agent_client_spec.rb b/src/bosh-director/spec/unit/bosh/director/agent_client_spec.rb index 411c93cc961..e22c01a04aa 100644 --- a/src/bosh-director/spec/unit/bosh/director/agent_client_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/agent_client_spec.rb @@ -136,6 +136,8 @@ def self.it_acts_as_message_with_timeout(message_name) it_acts_as_asynchronous_message :start it_acts_as_asynchronous_message :add_persistent_disk it_acts_as_asynchronous_message :remove_persistent_disk + it_acts_as_asynchronous_message :add_dynamic_disk + it_acts_as_asynchronous_message :remove_dynamic_disk end describe 'update_settings' do @@ -288,6 +290,30 @@ def self.it_acts_as_message_with_timeout(message_name) end end + describe 'add_dynamic_disk' do + it 'logs a warning if the message is not handled' do + allow(client).to receive(:handle_method).and_raise(RpcRemoteException, 'unknown message add_dynamic_disk') + + expect(Config.logger).to receive(:warn).with( + "Ignoring add_dynamic_disk 'unknown message' error from the agent: "\ + '#', + ) + expect { client.add_dynamic_disk('diskCID', 'hint') }.to_not raise_error + end + end + + describe 'remove_dynamic_disk' do + it 'logs a warning if the message is not handled' do + allow(client).to receive(:handle_method).and_raise(RpcRemoteException, 'unknown message remove_dynamic_disk') + + expect(Config.logger).to receive(:warn).with( + "Ignoring remove_dynamic_disk 'unknown message' error from the agent: "\ + '#', + ) + expect { client.remove_dynamic_disk('diskCID') }.to_not raise_error + end + end + context 'task can time out' do it_acts_as_message_with_timeout :stop end diff --git a/src/bosh-director/spec/unit/bosh/director/api/controllers/dynamic_disks_controller_spec.rb b/src/bosh-director/spec/unit/bosh/director/api/controllers/dynamic_disks_controller_spec.rb new file mode 100644 index 00000000000..bcefb35926a --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/api/controllers/dynamic_disks_controller_spec.rb @@ -0,0 +1,244 @@ +require 'spec_helper' +require 'rack/test' + +module Bosh::Director + module Api + describe Controllers::DynamicDisksController do + include Rack::Test::Methods + + subject(:app) { linted_rack_app(described_class.new(config)) } + let(:config) do + config = Config.load_hash(SpecHelper.director_config_hash) + identity_provider = Support::TestIdentityProvider.new(config.get_uuid_provider) + allow(config).to receive(:identity_provider).and_return(identity_provider) + config + end + before { App.new(config) } + + let(:instance_id) { 'fake-instance-id' } + let(:disk_pool_name) { 'fake_disk_pool_name' } + let(:disk_name) { 'fake_disk_name' } + let(:disk_size) { 1000 } + let(:metadata) { { 'some-key' => 'some-value' } } + + describe 'POST', '/provide' do + let(:content) do + JSON.generate({ + 'instance_id' => instance_id, + 'disk_pool_name' => disk_pool_name, + 'disk_name' => disk_name, + 'disk_size' => disk_size, + 'metadata' => metadata + }) + end + + context 'when user is reader' do + before { basic_authorize('reader', 'reader') } + + it 'forbids access' do + expect(post('/provide', content, { 'CONTENT_TYPE' => 'application/json' }).status).to eq(401) + end + end + + context 'user has admin permissions' do + before { authorize 'admin', 'admin' } + + it 'enqueues a ProvideDynamicDisk task' do + expect_any_instance_of(Bosh::Director::JobQueue).to receive(:enqueue).with( + 'admin', + Jobs::DynamicDisks::ProvideDynamicDisk, + 'provide dynamic disk', + [instance_id, disk_name, disk_pool_name, disk_size, metadata], + ).and_call_original + + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect_redirect_to_queued_task(last_response) + end + + context 'content is invalid' do + context 'disk_pool_name is nil' do + let(:disk_pool_name) { nil } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40000, + 'description' => "Property 'disk_pool_name' value (nil) did not match the required type 'String'", + ) + end + end + + context 'disk_pool_name is empty' do + let(:disk_pool_name) { '' } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40002, + 'description' => "'disk_pool_name' length (0) should be greater than 1", + ) + end + end + + context 'disk_name is nil' do + let(:disk_name) { nil } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40000, + 'description' => "Property 'disk_name' value (nil) did not match the required type 'String'", + ) + end + end + + context 'disk_name is empty' do + let(:disk_name) { '' } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40002, + 'description' => "'disk_name' length (0) should be greater than 1", + ) + end + end + + context 'disk_size is empty' do + let(:disk_size) { nil } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40000, + 'description' => "Property 'disk_size' value (nil) did not match the required type 'Integer'", + ) + end + end + + context 'disk_size is 0' do + let(:disk_size) { 0 } + + it 'raises an error' do + post '/provide', content, { 'CONTENT_TYPE' => 'application/json' } + + expect(last_response.status).to eq(400) + expect(JSON.parse(last_response.body)).to eq( + 'code' => 40002, + 'description' => "'disk_size' value (0) should be greater than 1", + ) + end + end + end + end + end + + describe 'POST', '/:disk_name/detach' do + context 'when user is reader' do + before { basic_authorize('reader', 'reader') } + + it 'forbids access' do + expect(post('/disk_name/detach').status).to eq(401) + end + end + + context 'when user has bosh.dynamic_disks.update scope' do + before { basic_authorize('dynamic-disks-updater', 'dynamic-disks-updater') } + + it 'enqueues a ProvideDynamicDisk task' do + expect_any_instance_of(Bosh::Director::JobQueue).to receive(:enqueue).with( + 'dynamic-disks-updater', + Jobs::DynamicDisks::DetachDynamicDisk, + 'detach dynamic disk', + ['disk_name'], + ).and_call_original + + post '/disk_name/detach' + + expect_redirect_to_queued_task(last_response) + end + end + + context 'user has admin permissions' do + before { authorize 'admin', 'admin' } + + it 'enqueues a ProvideDynamicDisk task' do + expect_any_instance_of(Bosh::Director::JobQueue).to receive(:enqueue).with( + 'admin', + Jobs::DynamicDisks::DetachDynamicDisk, + 'detach dynamic disk', + ['disk_name'], + ).and_call_original + + post '/disk_name/detach' + + expect_redirect_to_queued_task(last_response) + end + end + end + + describe 'DELETE', '/:disk_name' do + context 'when user is reader' do + before { basic_authorize('reader', 'reader') } + + it 'forbids access' do + expect(delete('/disk_name').status).to eq(401) + end + end + + context 'when user has bosh.dynamic_disks.update scope' do + before { basic_authorize('dynamic-disks-updater', 'dynamic-disks-updater') } + + it 'forbids access' do + expect(delete('/disk_name').status).to eq(401) + end + end + + context 'when user has bosh.dynamic_disks.delete scope' do + before { authorize 'dynamic-disks-deleter', 'dynamic-disks-deleter' } + + it 'enqueues a ProvideDynamicDisk task' do + expect_any_instance_of(Bosh::Director::JobQueue).to receive(:enqueue).with( + 'dynamic-disks-deleter', + Jobs::DynamicDisks::DeleteDynamicDisk, + 'delete dynamic disk', + ['disk_name'], + ).and_call_original + + delete '/disk_name' + + expect_redirect_to_queued_task(last_response) + end + end + + context 'user has admin permissions' do + before { authorize 'admin', 'admin' } + + it 'enqueues a ProvideDynamicDisk task' do + expect_any_instance_of(Bosh::Director::JobQueue).to receive(:enqueue).with( + 'admin', + Jobs::DynamicDisks::DeleteDynamicDisk, + 'delete dynamic disk', + ['disk_name'], + ).and_call_original + + delete '/disk_name' + + expect_redirect_to_queued_task(last_response) + end + end + end + end + end +end \ No newline at end of file diff --git a/src/bosh-director/spec/unit/bosh/director/cloudcheck_helper_spec.rb b/src/bosh-director/spec/unit/bosh/director/cloudcheck_helper_spec.rb index a606e901afe..edbd57123f4 100644 --- a/src/bosh-director/spec/unit/bosh/director/cloudcheck_helper_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/cloudcheck_helper_spec.rb @@ -119,7 +119,8 @@ def fake_job_context expect(cloud_factory).to receive(:get).with(instance.active_vm.cpi).and_return(cloud) end - it 'reboots the vm on success' do + it 'reboots the vm on success with vm lock' do + expect(test_problem_handler).to receive(:with_vm_lock).with(vm.cid).and_yield allow(agent_client).to receive(:wait_until_ready) test_problem_handler.reboot_vm(instance) end diff --git a/src/bosh-director/spec/unit/bosh/director/db/migrations/20250623223600_add_dynamic_disks_spec.rb b/src/bosh-director/spec/unit/bosh/director/db/migrations/20250623223600_add_dynamic_disks_spec.rb new file mode 100644 index 00000000000..c7e19513afa --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/db/migrations/20250623223600_add_dynamic_disks_spec.rb @@ -0,0 +1,29 @@ +require 'db_spec_helper' + +module Bosh::Director + describe '20250623223600_add_dynamic_disks.rb' do + let(:db) { DBSpecHelper.db } + + before { DBSpecHelper.migrate_all_before(subject) } + + it 'creates dynamic_disks table' do + expect(db.table_exists?(:dynamic_disks)).to be_falsy + + DBSpecHelper.migrate(subject) + + expect(db.table_exists?(:dynamic_disks)).to be_truthy + expect(db[:dynamic_disks].columns).to include( + :id, + :deployment_id, + :vm_id, + :disk_cid, + :name, + :disk_pool_name, + :cpi, + :size, + :metadata_json, + :availability_zone + ) + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/db/migrations/verify_migration_digest_spec.rb b/src/bosh-director/spec/unit/bosh/director/db/migrations/verify_migration_digest_spec.rb index 3c2ae7a9ef6..d356c2aca30 100644 --- a/src/bosh-director/spec/unit/bosh/director/db/migrations/verify_migration_digest_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/db/migrations/verify_migration_digest_spec.rb @@ -24,6 +24,10 @@ module Bosh::Director filename: '20250618102610_migrate_ip_address_representation_from_integer_to_cidr_notation.rb', sha1: '83f219a46b0943b5e27355cb3792e639c7507707', }, + { + filename: '20250623223600_add_dynamic_disks.rb', + sha1: '25e2bb3567fbc1c853b078920989699bca95277d', + }, ] end diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_deleter_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_deleter_spec.rb index c7575c25f84..44dd9941aba 100644 --- a/src/bosh-director/spec/unit/bosh/director/deployment_deleter_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/deployment_deleter_spec.rb @@ -9,6 +9,7 @@ module Bosh::Director let(:blobstore) { instance_double(Bosh::Director::Blobstore::Client) } let(:instance_deleter) { instance_double(InstanceDeleter) } let(:vm_deleter) { instance_double(VmDeleter) } + let(:disk_deleter) { instance_double(DiskDeleter) } let(:dns_enabled) { false } let(:task) { FactoryBot.create(:models_task, id: 42) } let(:task_writer) { Bosh::Director::TaskDBWriter.new(:event_output, task.id) } @@ -31,6 +32,7 @@ module Bosh::Director deployment_model.add_property(FactoryBot.create(:models_deployment_property)) allow(instance_deleter).to receive(:delete_instance_plans) + allow(disk_deleter).to receive(:delete_dynamic_disks) allow(deployment_model).to receive(:destroy) end @@ -41,29 +43,38 @@ module Bosh::Director expect(options).to eq(max_threads: 3) end - deleter.delete(deployment_model, instance_deleter, vm_deleter) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) end it 'removes all stemcells' do expect(deployment_stemcell.deployments).to include(deployment_model) - deleter.delete(deployment_model, instance_deleter, vm_deleter) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) expect(deployment_stemcell.reload.deployments).to be_empty end it 'removes all releases' do expect(deployment_release_version.deployments).to include(deployment_model) - deleter.delete(deployment_model, instance_deleter, vm_deleter) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) expect(deployment_release_version.reload.deployments).to be_empty end it 'deletes all properties' do - deleter.delete(deployment_model, instance_deleter, vm_deleter) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) expect(Models::DeploymentProperty.all.size).to eq(0) end + it 'deletes all dynamic disks' do + expect(disk_deleter).to receive(:delete_dynamic_disks).with( + deployment_model, + instance_of(EventLog::Stage), + {max_threads: 3} + ) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) + end + it 'destroys deployment model' do expect(deployment_model).to receive(:destroy) - deleter.delete(deployment_model, instance_deleter, vm_deleter) + deleter.delete(deployment_model, instance_deleter, vm_deleter, disk_deleter) end end end diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/attach_disk_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/attach_disk_step_spec.rb index 33aaa040682..65da4cad93e 100644 --- a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/attach_disk_step_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/attach_disk_step_spec.rb @@ -19,6 +19,7 @@ module Steps let(:meta_updater) { instance_double(MetadataUpdater, update_disk_metadata: nil) } let(:report) { instance_double(Stages::Report).as_null_object } let(:agent_client) { instance_double(AgentClient).as_null_object } + let(:job) { instance_double(Bosh::Director::Jobs::BaseJob, username: 'user', task_id: 42) } before do allow(CloudFactory).to receive(:create).and_return(cloud_factory) @@ -27,9 +28,11 @@ module Steps allow(cloud).to receive(:attach_disk) allow(MetadataUpdater).to receive(:build).and_return(meta_updater) allow(AgentClient).to receive(:with_agent_id).and_return(agent_client) + allow(Config).to receive(:current_job).and_return(job) end - it 'calls out to cpi associated with disk to attach disk' do + it 'calls out to cpi associated with disk to attach disk with vm lock' do + expect(step).to receive(:with_vm_lock).with(vm.cid).and_yield expect(cloud_factory).to receive(:get).with(disk&.cpi, stemcell_api_version).once.and_return(cloud) expect(cloud).to receive(:attach_disk).with(vm.cid, disk.disk_cid) @@ -68,6 +71,7 @@ module Steps end it 'logs notification of attaching' do + expect(per_spec_logger).to receive(:info).with("Acquiring VM lock on #{vm.cid}") expect(per_spec_logger).to receive(:info).with("Attaching disk #{disk.disk_cid}") step.perform(report) diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_dynamic_disk_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_dynamic_disk_step_spec.rb new file mode 100644 index 00000000000..c020812f6a3 --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_dynamic_disk_step_spec.rb @@ -0,0 +1,66 @@ +require 'spec_helper' + +module Bosh::Director + module DeploymentPlan + module Steps + describe DeleteDynamicDiskStep do + subject(:step) { DeleteDynamicDiskStep.new(disk) } + + let!(:disk) { FactoryBot.create(:models_dynamic_disk, name: 'disk-name') } + let(:cloud_factory) { instance_double(CloudFactory) } + let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } + let(:report) { Stages::Report.new } + + before do + allow(CloudFactory).to receive(:create).and_return(cloud_factory) + allow(cloud_factory).to receive(:get).with(disk&.cpi).once.and_return(cloud) + allow(cloud).to receive(:delete_disk) + allow(cloud).to receive(:has_disk).and_return(true) + end + + it 'calls out to cpi associated with disk to delete disk' do + expect(cloud_factory).to receive(:get).with(disk&.cpi).once.and_return(cloud) + expect(cloud).to receive(:delete_disk).with(disk.disk_cid) + + step.perform(report) + end + + it 'deletes dynamic disk from the database' do + step.perform(report) + + expect(Models::DynamicDisk.find(disk_cid: disk.disk_cid)).to be_nil + end + + context 'when disk does not exist' do + before do + allow(cloud).to receive(:has_disk).and_return(false) + end + + it 'does not perform any cloud actions' do + expect(cloud).to_not receive(:delete_disk) + + step.perform(report) + + expect(Models::DynamicDisk.find(disk_cid: disk.disk_cid)).to be_nil + end + end + + it 'logs notification of deleting' do + expect(per_spec_logger).to receive(:info).with("Deleting dynamic disk '#{disk.disk_cid}'") + + step.perform(report) + end + + context 'when given nil disk' do + let(:disk) { nil } + + it 'does not perform any cloud actions' do + expect(cloud).to_not receive(:delete_disk) + + step.perform(report) + end + end + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_vm_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_vm_step_spec.rb index 826022186bb..f152cf7c595 100644 --- a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_vm_step_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/delete_vm_step_spec.rb @@ -58,11 +58,12 @@ module Steps it 'deletes the instances vm and stores an event' do expect(job).to receive(:event_manager).twice.and_return(event_manager) - expect(job).to receive(:username).twice.and_return('fake-username') - expect(job).to receive(:task_id).twice.and_return('fake-task-id') + expect(job).to receive(:username).exactly(4).times.and_return('fake-username') + expect(job).to receive(:task_id).exactly(3).times.and_return('fake-task-id') expect(per_spec_logger).to receive(:info).with('Deleting VM') - expect(Config).to receive(:current_job).and_return(job).exactly(6).times + expect(per_spec_logger).to receive(:info).with('Acquiring VM lock on vm-cid') + expect(Config).to receive(:current_job).and_return(job).exactly(9).times expect(Models::LocalDnsRecord.all).to eq([local_dns_record]) expect(cloud).to receive(:delete_vm).with('vm-cid') @@ -100,6 +101,7 @@ module Steps context 'when vm has already been deleted from the IaaS' do it 'should log a warning' do expect(per_spec_logger).to receive(:info).with('Deleting VM') + expect(per_spec_logger).to receive(:info).with('Acquiring VM lock on vm-cid') expect(per_spec_logger).to receive(:warn) .with("VM '#{vm_model.cid}' might have already been deleted from the cloud") expect(cloud).to receive(:delete_vm).with(vm_model.cid).and_raise Bosh::Clouds::VMNotFound @@ -131,11 +133,12 @@ module Steps it 'creates requests a cloud instance with that stemcell api version' do expect(cloud_factory).to receive(:get).with('cpi1', 25).and_return(cloud) expect(job).to receive(:event_manager).twice.and_return(event_manager) - expect(job).to receive(:username).twice.and_return('fake-username') - expect(job).to receive(:task_id).twice.and_return('fake-task-id') + expect(job).to receive(:username).exactly(4).times.and_return('fake-username') + expect(job).to receive(:task_id).exactly(3).times.and_return('fake-task-id') expect(per_spec_logger).to receive(:info).with('Deleting VM') - expect(Config).to receive(:current_job).and_return(job).exactly(6).times + expect(per_spec_logger).to receive(:info).with('Acquiring VM lock on vm-cid') + expect(Config).to receive(:current_job).and_return(job).exactly(9).times expect(Models::LocalDnsRecord.all).to eq([local_dns_record]) expect(cloud).to receive(:delete_vm).with('vm-cid') @@ -146,6 +149,7 @@ module Steps context 'when trying to delete VM mutiple times' do it 'deletes the instances vm and stores an event' do expect(per_spec_logger).to receive(:info).with('Deleting VM').twice + expect(per_spec_logger).to receive(:info).with('Acquiring VM lock on vm-cid').twice expect(Models::LocalDnsRecord.all).to eq([local_dns_record]) expect(cloud).to receive(:delete_vm).with('vm-cid').twice diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_disk_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_disk_step_spec.rb index 10d50d43e12..a36d3ea7135 100644 --- a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_disk_step_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_disk_step_spec.rb @@ -15,12 +15,14 @@ module Steps let(:agent_client) do instance_double(AgentClient, list_disk: [disk&.disk_cid], remove_persistent_disk: nil) end + let(:job) { instance_double(Bosh::Director::Jobs::BaseJob, username: 'user', task_id: 42) } before do allow(AgentClient).to receive(:with_agent_id).with(vm.agent_id, vm.instance.name).and_return(agent_client) allow(CloudFactory).to receive(:create).and_return(cloud_factory) allow(cloud_factory).to receive(:get).with(disk&.cpi, 25).once.and_return(cloud) allow(cloud).to receive(:detach_disk) + allow(Config).to receive(:current_job).and_return(job) end it 'sends remove_persistent_disk method to agent' do @@ -29,8 +31,9 @@ module Steps step.perform(report) end - it 'calls out to cpi associated with disk to detach disk' do + it 'calls out to cpi associated with disk to detach disk with vm lock' do expect(cloud_factory).to receive(:get).with(disk&.cpi, 25).once.and_return(cloud) + expect(step).to receive(:with_vm_lock).with(vm.cid).and_yield expect(cloud).to receive(:detach_disk).with(vm.cid, disk.disk_cid) step.perform(report) @@ -38,6 +41,7 @@ module Steps it 'logs notification of detaching' do expect(per_spec_logger).to receive(:info).with("Detaching disk #{disk.disk_cid}") + expect(per_spec_logger).to receive(:info).with("Acquiring VM lock on #{vm.cid}") step.perform(report) end diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_dynamic_disk_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_dynamic_disk_step_spec.rb new file mode 100644 index 00000000000..a995e20b4de --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_dynamic_disk_step_spec.rb @@ -0,0 +1,75 @@ +require 'spec_helper' + +module Bosh::Director + module DeploymentPlan + module Steps + describe DetachDynamicDiskStep do + subject(:step) { DetachDynamicDiskStep.new(disk) } + + let!(:vm) { FactoryBot.create(:models_vm, instance: instance, stemcell_api_version: 25) } + let(:instance) { FactoryBot.create(:models_instance) } + let!(:disk) { FactoryBot.create(:models_dynamic_disk, vm: vm, name: 'disk-name') } + let(:cloud_factory) { instance_double(CloudFactory) } + let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } + let(:report) { Stages::Report.new } + let(:agent_client) { instance_double(AgentClient) } + + before do + allow(CloudFactory).to receive(:create).and_return(cloud_factory) + allow(cloud_factory).to receive(:get).with(disk&.cpi, 25).once.and_return(cloud) + allow(cloud).to receive(:detach_disk) + allow(agent_client).to receive(:remove_dynamic_disk) + allow(AgentClient).to receive(:with_agent_id).with(vm.agent_id, instance.name).and_return(agent_client) + end + + it 'calls out to cpi associated with disk to detach disk' do + expect(cloud_factory).to receive(:get).with(disk&.cpi, 25).once.and_return(cloud) + expect(agent_client).to receive(:remove_dynamic_disk).with(disk.disk_cid) + expect(cloud).to receive(:detach_disk).with(vm.cid, disk.disk_cid) + + step.perform(report) + end + + it 'clears vm_cid from disk' do + step.perform(report) + expect(Models::DynamicDisk.find(disk_cid: disk.disk_cid).vm_id).to be_nil + end + + it 'logs notification of detaching' do + expect(per_spec_logger).to receive(:info).with("Detaching dynamic disk #{disk.disk_cid}") + + step.perform(report) + end + + context 'when the CPI reports that a disk is not attached' do + before do + allow(cloud).to receive( :detach_disk) + .with(vm.cid, disk.disk_cid) + .and_raise(Bosh::Clouds::DiskNotAttached.new('foo')) + end + + it 'raises a CloudDiskNotAttached error' do + expect { + step.perform(report) + }.to raise_error( + CloudDiskNotAttached, + "'#{vm.cid}' VM should have dynamic disk '#{disk.disk_cid}' attached " \ + "but it doesn't (according to CPI)", + ) + end + end + + context 'when given nil disk' do + let(:disk) { nil } + + it 'does not perform any cloud actions' do + expect(agent_client).to_not receive(:remove_dynamic_disk) + expect(cloud).to_not receive(:detach_disk) + + step.perform(report) + end + end + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_instance_disks_step_spec.rb b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_instance_disks_step_spec.rb index 0829bd564ae..6e409be3720 100644 --- a/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_instance_disks_step_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/deployment_plan/steps/detach_instance_disks_step_spec.rb @@ -10,21 +10,29 @@ module Steps let!(:vm) { FactoryBot.create(:models_vm, instance: instance, active: true, cpi: 'vm-cpi') } let!(:disk1) { FactoryBot.create(:models_persistent_disk, instance: instance, name: '') } let!(:disk2) { FactoryBot.create(:models_persistent_disk, instance: instance, name: 'unmanaged') } + let!(:dynamic_disk_1) { FactoryBot.create(:models_dynamic_disk, vm: vm) } + let!(:dynamic_disk_2) { FactoryBot.create(:models_dynamic_disk, vm: vm) } let(:detach_disk_1) { instance_double(DetachDiskStep) } let(:detach_disk_2) { instance_double(DetachDiskStep) } + let(:detach_dynamic_disk_1) { instance_double(DetachDynamicDiskStep) } + let(:detach_dynamic_disk_2) { instance_double(DetachDynamicDiskStep) } let(:report) { Stages::Report.new } before do allow(DetachDiskStep).to receive(:new).with(disk1).and_return(detach_disk_1) allow(DetachDiskStep).to receive(:new).with(disk2).and_return(detach_disk_2) + allow(DetachDynamicDiskStep).to receive(:new).with(dynamic_disk_1).and_return(detach_dynamic_disk_1) + allow(DetachDynamicDiskStep).to receive(:new).with(dynamic_disk_2).and_return(detach_dynamic_disk_2) allow(detach_disk_1).to receive(:perform).with(report).once allow(detach_disk_2).to receive(:perform).with(report).once + allow(detach_dynamic_disk_1).to receive(:perform).with(report).once + allow(detach_dynamic_disk_2).to receive(:perform).with(report).once end - it 'calls out to vms cpi to detach all attached disks' do + it 'calls out to vms cpi to detach all attached persistent disks' do expect(DetachDiskStep).to receive(:new).with(disk1) expect(DetachDiskStep).to receive(:new).with(disk2) expect(detach_disk_1).to receive(:perform).with(report).once @@ -33,6 +41,15 @@ module Steps step.perform(report) end + it 'calls out to vms cpi to detach all attached dynamic disks' do + expect(DetachDynamicDiskStep).to receive(:new).with(dynamic_disk_1) + expect(DetachDynamicDiskStep).to receive(:new).with(dynamic_disk_2) + expect(detach_dynamic_disk_1).to receive(:perform).with(report).once + expect(detach_dynamic_disk_2).to receive(:perform).with(report).once + + step.perform(report) + end + context 'when the instance does not have an active vm' do before do vm.update(active: false) @@ -43,6 +60,10 @@ module Steps expect(DetachDiskStep).not_to receive(:new) expect(detach_disk_1).not_to receive(:perform).with(report) expect(detach_disk_2).not_to receive(:perform).with(report) + expect(DetachDynamicDiskStep).not_to receive(:new) + expect(DetachDynamicDiskStep).not_to receive(:new) + expect(detach_dynamic_disk_1).not_to receive(:perform).with(report) + expect(detach_dynamic_disk_2).not_to receive(:perform).with(report) step.perform(report) end diff --git a/src/bosh-director/spec/unit/bosh/director/disk_deleter_spec.rb b/src/bosh-director/spec/unit/bosh/director/disk_deleter_spec.rb new file mode 100644 index 00000000000..395b8419cef --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/disk_deleter_spec.rb @@ -0,0 +1,43 @@ +require 'spec_helper' + +module Bosh::Director + describe DiskDeleter do + subject(:deleter) { described_class.new(per_spec_logger, disk_manager, {}) } + let(:disk_manager) { DiskManager.new(per_spec_logger) } + let(:options) { {} } + let(:event_log) { Bosh::Director::EventLog::Log.new(task_writer) } + let(:event_log_stage) { instance_double('Bosh::Director::EventLog::Stage') } + let!(:deployment_model) { FactoryBot.create(:models_deployment, name: 'fake-deployment') } + let(:delete_job) { Jobs::DeleteDeployment.new('test_deployment', {}) } + let(:task) { FactoryBot.create(:models_task, id: 42, username: 'user') } + + before do + allow(event_log_stage).to receive(:advance_and_track).and_yield + allow(delete_job).to receive(:task_id).and_return(task.id) + allow(Config).to receive(:current_job).and_return(delete_job) + end + + describe '#delete_dynamic_disks' do + let(:five_disks) { + 5.times.map do | index | + FactoryBot.create(:models_dynamic_disk, deployment: deployment_model) + end + } + + it 'should delete disks with the config max threads option' do + allow(Config).to receive(:max_threads).and_return(5) + pool = double('pool') + expect(ThreadPool).to receive(:new).with(max_threads: 5).and_return(pool) + expect(pool).to receive(:wrap).and_yield(pool) + expect(pool).to receive(:process).exactly(5).times.and_yield + + + 5.times do | index| + expect(disk_manager).to receive(:delete_dynamic_disk).with(five_disks[index]) + end + + deleter.delete_dynamic_disks(deployment_model, event_log_stage) + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/disk_manager_spec.rb b/src/bosh-director/spec/unit/bosh/director/disk_manager_spec.rb index fb8a7e38ce7..e8c925fbf8b 100644 --- a/src/bosh-director/spec/unit/bosh/director/disk_manager_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/disk_manager_spec.rb @@ -778,6 +778,22 @@ module Bosh::Director end end + describe '#delete_dynamic_disk' do + let!(:dynamic_disk) { FactoryBot.create(:models_dynamic_disk, vm: instance_model.active_vm) } + before do + allow(cloud_factory).to receive(:get).with(dynamic_disk.cpi).and_return(cloud) + allow(cloud).to receive(:has_disk).and_return(true) + end + + it 'deletes disks for instance' do + expect(Models::DynamicDisk.all.size).to eq(1) + allow(cloud).to receive(:delete_disk).with(dynamic_disk.disk_cid) + + disk_manager.delete_dynamic_disk(dynamic_disk) + expect(Models::DynamicDisk.all.size).to eq(0) + end + end + describe '#attach_disks_if_needed' do let(:cloud_for_set_disk_metadata) { instance_double(Bosh::Clouds::ExternalCpi) } diff --git a/src/bosh-director/spec/unit/bosh/director/instance_deleter_spec.rb b/src/bosh-director/spec/unit/bosh/director/instance_deleter_spec.rb index ee87dfee2c4..2c0cb8b5295 100644 --- a/src/bosh-director/spec/unit/bosh/director/instance_deleter_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/instance_deleter_spec.rb @@ -120,7 +120,7 @@ module Bosh::Director expect do deleter.delete_instance_plans([instance_plan], event_log_stage) - end.to change { Bosh::Director::Models::Event.count }.from(0).to(8) + end.to change { Bosh::Director::Models::Event.count }.from(0).to(10) event1 = Bosh::Director::Models::Event.order(:id).first expect(event1.user).to eq(task.username) @@ -204,7 +204,7 @@ module Bosh::Director expect do deleter.delete_instance_plans([instance_plan], event_log_stage) - end.to change { Bosh::Director::Models::Event.count }.from(0).to(8) + end.to change { Bosh::Director::Models::Event.count }.from(0).to(10) end end diff --git a/src/bosh-director/spec/unit/bosh/director/jobs/delete_vm_spec.rb b/src/bosh-director/spec/unit/bosh/director/jobs/delete_vm_spec.rb index f0aa7596def..a56b507d388 100644 --- a/src/bosh-director/spec/unit/bosh/director/jobs/delete_vm_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/jobs/delete_vm_spec.rb @@ -76,7 +76,7 @@ module Bosh::Director expect(cloud).to receive(:delete_vm).with(vm_cid) expect do job.perform - end.to change { Bosh::Director::Models::Event.count }.by(2) + end.to change { Bosh::Director::Models::Event.count }.by(4) event1 = Bosh::Director::Models::Event.order_by(:id).first expect(event1.user).to eq(task.username) diff --git a/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/delete_dynamic_disk_spec.rb b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/delete_dynamic_disk_spec.rb new file mode 100644 index 00000000000..dcc9d7fb11b --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/delete_dynamic_disk_spec.rb @@ -0,0 +1,83 @@ +require 'spec_helper' + +module Bosh::Director + describe Jobs::DynamicDisks::DeleteDynamicDisk do + let(:disk_name) { 'fake-disk-name' } + let(:disk_cid) { 'fake-disk-cid' } + let(:disk_pool_name) { 'fake-disk-pool-name' } + + let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } + let(:delete_dynamic_disk_job) { Jobs::DynamicDisks::DeleteDynamicDisk.new(disk_name) } + let!(:vm) { FactoryBot.create(:models_vm) } + let(:cloud_factory) { instance_double(Bosh::Director::CloudFactory, get: cloud) } + + before do + allow(Bosh::Director::Config).to receive(:name).and_return('fake-director-name') + allow(Bosh::Director::Config).to receive(:cloud_options).and_return('provider' => { 'path' => '/path/to/default/cpi' }) + allow(Bosh::Director::Config).to receive(:preferred_cpi_api_version).and_return(2) + allow(CloudFactory).to receive(:create).and_return(cloud_factory) + allow(cloud).to receive(:has_disk).and_return(false) + end + + describe 'DJ job class expectations' do + let(:job_type) { :delete_dynamic_disk } + let(:queue) { :dynamic_disks } + it_behaves_like 'a DelayedJob job' + end + + describe '#perform' do + context 'when disk exists in database but not in the cloud' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + disk_pool_name: disk_pool_name + ) + end + + it 'deletes the disk' do + expect(delete_dynamic_disk_job.perform).to eq("deleted disk `#{disk_cid}`") + expect(Models::DynamicDisk.where(disk_cid: disk_cid).count).to eq(0) + end + end + + context 'when disk exists in database and in the cloud' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + disk_pool_name: disk_pool_name + ) + end + + before do + allow(cloud).to receive(:has_disk).and_return(true) + end + + it 'deletes the disk' do + expect(cloud).to receive(:delete_disk).with(disk_cid) + expect(delete_dynamic_disk_job.perform).to eq("deleted disk `#{disk_cid}`") + expect(Models::DynamicDisk.where(disk_cid: disk_cid).count).to eq(0) + end + + context 'when deleting disk returns an error' do + it 'returns an error from delete_disk call' do + expect(cloud).to receive(:delete_disk).with(disk_cid).and_raise('some-error') + expect { delete_dynamic_disk_job.perform }.to raise_error('some-error') + end + end + end + + context 'when disk does not exist' do + it 'does not delete disk and returns no error' do + expect(cloud).not_to receive(:delete_disk).with(disk_cid) + expect(delete_dynamic_disk_job.perform).to eq("disk with name `#{disk_name}` was already deleted") + end + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/detach_dynamic_disk_spec.rb b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/detach_dynamic_disk_spec.rb new file mode 100644 index 00000000000..906343b5d22 --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/detach_dynamic_disk_spec.rb @@ -0,0 +1,90 @@ +require 'spec_helper' + +module Bosh::Director + describe Jobs::DynamicDisks::DetachDynamicDisk do + let(:disk_name) { 'fake-disk-name' } + let(:disk_cid) { 'fake-disk-cid' } + let(:disk_pool_name) { 'fake-disk-pool-name' } + + let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } + let(:agent_client) { instance_double(AgentClient) } + let(:detach_dynamic_disk_job) { Jobs::DynamicDisks::DetachDynamicDisk.new(disk_name) } + let!(:vm) { FactoryBot.create(:models_vm, instance: instance) } + let(:instance) { FactoryBot.create(:models_instance) } + let(:cloud_factory) { instance_double(Bosh::Director::CloudFactory, get: cloud) } + + before do + allow(Bosh::Director::Config).to receive(:name).and_return('fake-director-name') + allow(Bosh::Director::Config).to receive(:cloud_options).and_return('provider' => { 'path' => '/path/to/default/cpi' }) + allow(Bosh::Director::Config).to receive(:preferred_cpi_api_version).and_return(2) + allow(CloudFactory).to receive(:create).and_return(cloud_factory) + allow(cloud).to receive(:has_disk).and_return(true) + allow(AgentClient).to receive(:with_agent_id).with(vm.agent_id, instance.name).and_return(agent_client) + end + + describe 'DJ job class expectations' do + let(:job_type) { :detach_dynamic_disk } + let(:queue) { :dynamic_disks } + it_behaves_like 'a DelayedJob job' + end + + describe '#perform' do + context 'when disk exists in database' do + context 'when disk has no vm assigned' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + disk_pool_name: disk_pool_name + ) + end + + it 'does not detach the disk' do + expect(detach_dynamic_disk_job.perform).to eq("disk `#{disk_cid}` was already detached") + end + end + + context 'when disk has vm assigned' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + vm: vm, + disk_pool_name: disk_pool_name + ) + end + + it 'detaches the disk' do + expect(detach_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:detach_disk).with(vm.cid, disk_cid) + expect(agent_client).to receive(:remove_dynamic_disk).with(disk_cid) + expect(detach_dynamic_disk_job.perform).to eq("detached disk `#{disk_cid}` from vm `#{vm.cid}`") + updated_disk_model = Models::DynamicDisk.find(disk_cid: disk_cid) + expect(updated_disk_model.vm_id).to be_nil + expect(updated_disk_model.disk_hint).to be_empty + end + + context 'when disk is already detached' do + it 'does not return an error' do + expect(detach_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:detach_disk).with(vm.cid, disk_cid).and_raise(Bosh::Clouds::DiskNotAttached.new(false)) + expect(agent_client).to receive(:remove_dynamic_disk).with(disk_cid) + expect(detach_dynamic_disk_job.perform).to eq("disk `#{disk_cid}` was already detached") + expect(Models::DynamicDisk.find(disk_cid: disk_cid).vm_id).to be_nil + end + end + end + end + + context 'when disk does not exist in database' do + it 'raises an error' do + expect { detach_dynamic_disk_job.perform }.to raise_error("disk `#{disk_name}` can not be found in the database") + end + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/provide_dynamic_disk_spec.rb b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/provide_dynamic_disk_spec.rb new file mode 100644 index 00000000000..160fbf49546 --- /dev/null +++ b/src/bosh-director/spec/unit/bosh/director/jobs/dynamic_disks/provide_dynamic_disk_spec.rb @@ -0,0 +1,224 @@ +require 'spec_helper' + +module Bosh::Director + describe Jobs::DynamicDisks::ProvideDynamicDisk do + let(:disk_name) { 'fake-disk-name' } + let(:disk_cid) { 'fake-disk-cid' } + let(:disk_pool_name) { 'fake-disk-pool-name' } + let(:disk_cloud_properties) { { 'fake-disk-cloud-property-key' => 'fake-disk-cloud-property-value' } } + let(:disk_size) { 1000 } + let(:metadata) { { 'fake-key' => 'fake-value' } } + let(:disk_hint) { { "id" => "fake-disk-id" } } + + let(:agent_client) { instance_double(AgentClient) } + let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } + + let(:instance) { FactoryBot.create(:models_instance) } + let!(:vm) { FactoryBot.create(:models_vm, instance: instance, active: true) } + + let(:task_result) { TaskDBWriter.new(:result_output, task.id) } + let(:task) {FactoryBot.create(:models_task, id: 42, username: 'user')} + + def parsed_task_result + JSON.parse(Models::Task.first(id: 42).result_output) + end + + let(:provide_dynamic_disk_job) { Jobs::DynamicDisks::ProvideDynamicDisk.new(instance.uuid, disk_name, disk_pool_name, disk_size, metadata) } + + let!(:cloud_config) do + FactoryBot.create(:models_config_cloud, content: YAML.dump( + SharedSupport::DeploymentManifestHelper.simple_cloud_config.merge( + 'disk_types' => [ + { 'name' => disk_pool_name, + 'disk_size' => 1024, + 'cloud_properties' => disk_cloud_properties + } + ] + ) + )) + end + let(:cloud_factory) { instance_double(CloudFactory, get: cloud) } + + before do + allow(Config).to receive(:name).and_return('fake-director-name') + allow(Config).to receive(:cloud_options).and_return('provider' => { 'path' => '/path/to/default/cpi' }) + allow(Config).to receive(:preferred_cpi_api_version).and_return(2) + allow(Config).to receive(:result).and_return(task_result) + allow(CloudFactory).to receive(:create).and_return(cloud_factory) + allow(cloud).to receive(:respond_to?).with(:set_disk_metadata).and_return(false) + allow(AgentClient).to receive(:with_agent_id).with(vm.agent_id, instance.name).and_return(agent_client) + allow(provide_dynamic_disk_job).to receive(:task_id).and_return(task.id) + end + + describe 'DJ job class expectations' do + let(:job_type) { :provide_dynamic_disk } + let(:queue) { :dynamic_disks } + it_behaves_like 'a DelayedJob job' + end + + describe '#perform' do + context 'when disk exists' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + disk_pool_name: disk_pool_name, + ) + end + + it 'attaches the disk to VM and updates disk vm and availability zone' do + expect(cloud).not_to receive(:create_disk) + expect(provide_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:attach_disk).with(vm.cid, disk_cid).and_return(disk_hint) + expect(agent_client).to receive(:add_dynamic_disk).with(disk_cid, disk_hint) + expect(provide_dynamic_disk_job.perform).to eq("attached disk `#{disk_name}` to `#{vm.cid}` in deployment `#{vm.instance.deployment.name}`") + expect(parsed_task_result).to eq({'disk_cid' => disk_cid}) + + model = Models::DynamicDisk.where(disk_cid: disk_cid).first + expect(model.vm).to eq(vm) + expect(model.availability_zone).to eq(vm.instance.availability_zone) + end + end + + context 'when disk does not exist' do + it 'creates the disk and attaches it to VM' do + expected_cloud_properties = disk_cloud_properties.merge('name' => 'fake-disk-name') + expect(cloud).to receive(:create_disk).with(disk_size, expected_cloud_properties, vm.cid).and_return(disk_cid) + expect(provide_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:attach_disk).with(vm.cid, disk_cid).and_return(disk_hint) + + expect(agent_client).to receive(:add_dynamic_disk).with(disk_cid, disk_hint) + expect(provide_dynamic_disk_job.perform).to eq("attached disk `#{disk_name}` to `#{vm.cid}` in deployment `#{vm.instance.deployment.name}`") + expect(parsed_task_result).to eq({'disk_cid' => disk_cid}) + + model = Models::DynamicDisk.where(disk_cid: disk_cid).first + expect(model.name).to eq(disk_name) + expect(model.size).to eq(disk_size) + expect(model.deployment_id).to eq(vm.instance.deployment.id) + expect(model.disk_pool_name).to eq(disk_pool_name) + expect(model.cpi).to eq(vm.cpi) + expect(model.vm).to eq(vm) + expect(model.availability_zone).to eq(vm.instance.availability_zone) + expect(model.metadata).to eq(metadata) + end + end + + context 'when disk exists in database but not in the cloud' do + let!(:disk) do + FactoryBot.create( + :models_dynamic_disk, + name: disk_name, + disk_cid: disk_cid, + deployment: vm.instance.deployment, + disk_pool_name: disk_pool_name, + ) + end + + it 'returns an error from attach_disk call' do + expect(provide_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:attach_disk).with(vm.cid, disk_cid).and_raise('some-error') + expect { provide_dynamic_disk_job.perform }.to raise_error('some-error') + end + end + + context 'when disk type can not be found in cloud config' do + let!(:cloud_config) do + FactoryBot.create(:models_config_cloud, content: YAML.dump( + SharedSupport::DeploymentManifestHelper.simple_cloud_config.merge( + 'disk_types' => [ + { 'name' => 'different-disk-type', + 'disk_size' => 1024, + 'cloud_properties' => {} + } + ] + ) + )) + end + + it 'responds with error' do + expect { provide_dynamic_disk_job.perform }.to raise_error("Could not find disk pool by name `fake-disk-pool-name`") + end + end + + context 'when cpi supports set_disk_metadata' do + it 'sets disk metadata' do + expected_cloud_properties = disk_cloud_properties.merge('name' => 'fake-disk-name') + expect(cloud).to receive(:create_disk).with(disk_size, expected_cloud_properties, vm.cid).and_return('fake-disk-cid') + expect(cloud).to receive(:respond_to?).with(:set_disk_metadata).and_return(true) + expect(cloud).to receive(:set_disk_metadata).with( + 'fake-disk-cid', + { + "deployment" => vm.instance.deployment.name, + "director" => 'fake-director-name', + "fake-key" => "fake-value" + } + ) + expect(provide_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:attach_disk).with(vm.cid, disk_cid).and_return(disk_hint) + + expect(agent_client).to receive(:add_dynamic_disk).with(disk_cid, disk_hint) + expect(provide_dynamic_disk_job.perform).to eq("attached disk `#{disk_name}` to `#{vm.cid}` in deployment `#{vm.instance.deployment.name}`") + end + end + + context 'when teams are set on the deployment' do + let(:team_1) { FactoryBot.create(:models_team, name: 'team-1') } + let(:team_2) { FactoryBot.create(:models_team, name: 'team-2') } + let(:other_team) { FactoryBot.create(:models_team, name: 'other_team') } + let(:other_disk_cloud_properties) { { 'other-disk-cloud-properties' => {} } } + let!(:latest_other_cloud_config) do + FactoryBot.create(:models_config_cloud, team_id: other_team.id, content: YAML.dump( + SharedSupport::DeploymentManifestHelper.simple_cloud_config.merge( + 'disk_types' => [ + { 'name' => disk_pool_name, + 'disk_size' => 1024, + 'cloud_properties' => other_disk_cloud_properties, + } + ] + ) + )) + end + + before do + vm.instance.deployment.update(teams: [team_1, team_2]) + cloud_config.update(team_id: team_1.id) + end + + it 'gets the disk cloud properties from the latest cloud config for those teams' do + expected_cloud_properties = disk_cloud_properties.merge('name' => 'fake-disk-name') + expect(cloud).to receive(:create_disk).with(disk_size, expected_cloud_properties, vm.cid).and_return(disk_cid) + expect(provide_dynamic_disk_job).to receive(:with_vm_lock).with(vm.cid, timeout: 60).and_yield + expect(cloud).to receive(:attach_disk).with(vm.cid, disk_cid).and_return(disk_hint) + expect(agent_client).to receive(:add_dynamic_disk).with(disk_cid, disk_hint) + expect(provide_dynamic_disk_job.perform).to eq("attached disk `#{disk_name}` to `#{vm.cid}` in deployment `#{vm.instance.deployment.name}`") + end + end + + context 'when instance cannot be found' do + let(:provide_dynamic_disk_job) { Jobs::DynamicDisks::ProvideDynamicDisk.new('unknown-instance-id', disk_name, disk_pool_name, disk_size, metadata) } + + it 'responds with error' do + expect { provide_dynamic_disk_job.perform }.to raise_error("instance `unknown-instance-id` not found") + end + end + + context 'when active vm for instance cannot be found' do + let(:vm) { FactoryBot.create(:models_vm, instance: instance, active: false) } + + it 'responds with error' do + expect { provide_dynamic_disk_job.perform }.to raise_error("no active vm found for instance `#{instance.uuid}`") + end + end + + context 'when instance has no vms' do + let(:vm) { FactoryBot.create(:models_vm) } + + it 'responds with error' do + expect { provide_dynamic_disk_job.perform }.to raise_error("no active vm found for instance `#{instance.uuid}`") + end + end + end + end +end diff --git a/src/bosh-director/spec/unit/bosh/director/lock_helper_spec.rb b/src/bosh-director/spec/unit/bosh/director/lock_helper_spec.rb index cf885c506e2..50e8494130c 100644 --- a/src/bosh-director/spec/unit/bosh/director/lock_helper_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/lock_helper_spec.rb @@ -124,6 +124,20 @@ class TestClass end end + describe :with_vm_lock do + it 'creates a lock for the given vm cid' do + lock = double(:lock) + allow(Lock).to receive(:new).with('lock:vm:bar', timeout: 5).and_return(lock) + expect(lock).to receive(:lock).and_yield + + called = false + @test_instance.with_vm_lock('bar', timeout: 5) do + called = true + end + expect(called).to be(true) + end + end + describe :with_release_lock do it 'creates a lock for the given name' do lock = double(:lock) diff --git a/src/bosh-director/spec/unit/bosh/director/permission_authorizer_spec.rb b/src/bosh-director/spec/unit/bosh/director/permission_authorizer_spec.rb index 3f38db7aa8d..7518100a54a 100644 --- a/src/bosh-director/spec/unit/bosh/director/permission_authorizer_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/permission_authorizer_spec.rb @@ -328,6 +328,60 @@ module Bosh::Director it_behaves_like :admin_read_team_admin_scopes end + describe 'checking update_dynamic_disks rights' do + let(:acl_right) { :update_dynamic_disks } + it 'allows bosh.dynamic_disks.update scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.dynamic_disks.update'])).to eq(true) + end + + it 'allows bosh.admin scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.admin'])).to eq(true) + end + + it 'allows bosh.X.admin scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.fake-director-uuid.admin'])).to eq(true) + end + + it 'denies others' do + expect(subject.is_granted?(acl_subject, acl_right, [ + 'bosh.unexpected-uuid.admin', + 'bosh.read', + 'bosh.teams.anything.admin', + 'bosh.teams.anything.read', + 'bosh.fake-director-uuid.read', + 'bosh.unexpected-uuid.read', + 'bosh.teams.security.unexpected', + ])).to eq(false) + end + end + + describe 'checking delete_dynamic_disks rights' do + let(:acl_right) { :delete_dynamic_disks } + it 'allows bosh.dynamic_disks.delete scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.dynamic_disks.delete'])).to eq(true) + end + + it 'allows bosh.admin scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.admin'])).to eq(true) + end + + it 'allows bosh.X.admin scope' do + expect(subject.is_granted?(acl_subject, acl_right, ['bosh.fake-director-uuid.admin'])).to eq(true) + end + + it 'denies others' do + expect(subject.is_granted?(acl_subject, acl_right, [ + 'bosh.unexpected-uuid.admin', + 'bosh.read', + 'bosh.teams.anything.admin', + 'bosh.teams.anything.read', + 'bosh.fake-director-uuid.read', + 'bosh.unexpected-uuid.read', + 'bosh.teams.security.unexpected', + ])).to eq(false) + end + end + describe 'checking for invalid rights' do let(:acl_right) { :what_I_fancy } diff --git a/src/bosh-director/spec/unit/bosh/director/problem_handlers/inactive_disk_spec.rb b/src/bosh-director/spec/unit/bosh/director/problem_handlers/inactive_disk_spec.rb index 48f459c3bca..e8da45cd59d 100644 --- a/src/bosh-director/spec/unit/bosh/director/problem_handlers/inactive_disk_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/problem_handlers/inactive_disk_spec.rb @@ -108,8 +108,9 @@ def make_handler(disk_id, data = {}) }.to raise_error(Bosh::Director::ProblemHandlerError, 'Disk is currently in use') end - it 'detaches disk from VM and deletes it and its snapshots from DB (if instance has VM)' do + it 'detaches disk from VM with VM lock and deletes it and its snapshots from DB (if instance has VM)' do expect(@agent).to receive(:list_disk).and_return(['other-disk']) + expect(@handler).to receive(:with_vm_lock).with(@instance.vm_cid).and_yield expect(cloud).to receive(:detach_disk).with(@instance.vm_cid, 'disk-cid') expect(cloud_factory).to receive(:get).with(@instance.active_vm.cpi, 25).and_return(cloud) @handler.apply_resolution(:delete_disk) diff --git a/src/bosh-director/spec/unit/bosh/director/problem_handlers/missing_disk_spec.rb b/src/bosh-director/spec/unit/bosh/director/problem_handlers/missing_disk_spec.rb index 1193c426319..cb8dfff3211 100644 --- a/src/bosh-director/spec/unit/bosh/director/problem_handlers/missing_disk_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/problem_handlers/missing_disk_spec.rb @@ -92,12 +92,13 @@ def self.it_ignores_cloud_disk_errors it_ignores_cloud_disk_errors - it 'deactivates, unmounts, detaches, deletes snapshots, deletes disk reference' do + it 'deactivates, unmounts, detaches with VM lock, deletes snapshots, deletes disk reference' do expect(agent_client).to receive(:unmount_disk) do db_disk = Bosh::Director::Models::PersistentDisk[disk.id] expect(db_disk.active).to be(false) end.ordered + expect(handler).to receive(:with_vm_lock).with('vm-cid').and_yield.ordered expect(cloud).to receive(:detach_disk).with('vm-cid', 'disk-cid').ordered handler.apply_resolution(:delete_disk_reference) @@ -115,6 +116,7 @@ def self.it_ignores_cloud_disk_errors end it 'detaches disk, deletes snapshots, deletes disk reference' do + expect(handler).to receive(:with_vm_lock).with('vm-cid').and_yield.ordered expect(cloud).to receive(:detach_disk).with('vm-cid', 'disk-cid').ordered handler.apply_resolution(:delete_disk_reference) @@ -132,6 +134,7 @@ def self.it_ignores_cloud_disk_errors end it 'raises the error' do + expect(handler).to_not receive(:with_vm_lock).with('vm-cid').and_yield.ordered expect(cloud).to_not receive(:detach_disk).with('vm-cid', 'disk-cid').ordered expect(cloud).to_not receive(:delete_snapshot).with('snapshot-cid').ordered @@ -155,6 +158,7 @@ def self.it_ignores_cloud_disk_errors it 'deactivates, detaches, deletes snapshots, deletes disk reference' do expect(agent_client).to_not receive(:unmount_disk) + expect(handler).to receive(:with_vm_lock).with('vm-cid').and_yield.ordered expect(cloud).to receive(:detach_disk).with('vm-cid', 'disk-cid') do db_disk = Bosh::Director::Models::PersistentDisk[disk.id] expect(db_disk.active).to be(false) @@ -177,6 +181,7 @@ def self.it_ignores_cloud_disk_errors it 'detaches disk, deletes snapshots, deletes disk reference' do expect(agent_client).to_not receive(:unmount_disk) + expect(handler).to receive(:with_vm_lock).with('vm-cid').and_yield.ordered expect(cloud).to receive(:detach_disk).ordered handler.apply_resolution(:delete_disk_reference) @@ -197,6 +202,7 @@ def self.it_ignores_cloud_disk_errors it 'deletes snapshots, deletes disk reference' do expect(agent_client).to_not receive(:unmount_disk) + expect(handler).to_not receive(:with_vm_lock) expect(cloud).to_not receive(:detach_disk) handler.apply_resolution(:delete_disk_reference) diff --git a/src/bosh-director/spec/unit/bosh/director/problem_handlers/mount_info_mismatch_spec.rb b/src/bosh-director/spec/unit/bosh/director/problem_handlers/mount_info_mismatch_spec.rb index 6d139c33f6b..77028b42fd1 100644 --- a/src/bosh-director/spec/unit/bosh/director/problem_handlers/mount_info_mismatch_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/problem_handlers/mount_info_mismatch_spec.rb @@ -11,6 +11,7 @@ def make_handler(disk_id, data = {}) let(:manifest) do { 'tags' => { 'mytag' => 'myvalue' } } end + let(:current_job) { instance_double(Bosh::Director::Jobs::CloudCheck::ApplyResolutions, username: 'user', task_id: 42) } before(:each) do @agent = double('agent') @@ -37,6 +38,7 @@ def make_handler(disk_id, data = {}) allow(Bosh::Director::AZCloudFactory).to receive(:create_from_deployment).and_return(az_cloud_factory) allow(Bosh::Director::CloudFactory).to receive(:create).and_return(base_cloud_factory) allow(az_cloud_factory).to receive(:get_for_az).with('az1', 25).and_return(cloud) + allow(Bosh::Director::Config).to receive(:current_job).and_return(current_job) end it 'registers under inactive_disk type' do @@ -96,6 +98,7 @@ def make_handler(disk_id, data = {}) end it 'attaches disk and reboots the vm' do + expect(@handler).to receive(:with_vm_lock).twice.with(@instance.vm_cid).and_yield expect(cloud).to receive(:attach_disk).with(@instance.vm_cid, @disk.disk_cid) expect(cloud).to receive(:reboot_vm).with(@instance.vm_cid) expect(cloud_for_update_metadata).to_not receive(:attach_disk) @@ -107,6 +110,7 @@ def make_handler(disk_id, data = {}) end it 'sets disk metadata with deployment information' do + expect(@handler).to receive(:with_vm_lock).twice.with(@instance.vm_cid).and_yield expect(cloud).to receive(:attach_disk).with(@instance.vm_cid, @disk.disk_cid) expect(cloud).to receive(:reboot_vm).with(@instance.vm_cid) expect(cloud_for_update_metadata).to_not receive(:attach_disk) @@ -144,6 +148,7 @@ def make_handler(disk_id, data = {}) end it 'attaches disk and reboots the vm' do + expect(@handler).to receive(:with_vm_lock).twice.with(@instance.vm_cid).and_yield expect(cloud).to receive(:attach_disk).with(@instance.vm_cid, @disk.disk_cid).and_return(disk_hint) expect(cloud).to receive(:reboot_vm).with(@instance.vm_cid) expect(cloud_for_update_metadata).to_not receive(:attach_disk) diff --git a/src/bosh-director/spec/unit/bosh/director/validation_helper_spec.rb b/src/bosh-director/spec/unit/bosh/director/validation_helper_spec.rb index af9ceed94fb..a38c9975780 100644 --- a/src/bosh-director/spec/unit/bosh/director/validation_helper_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/validation_helper_spec.rb @@ -179,6 +179,38 @@ def self.it_raises_unknown_object_error(hash, options) end end + describe 'when length is constrained' do + it 'should pass if strings do not have constraints' do + expect(obj.safe_property({ 'test' => '' }, 'test', class: String)).to eql('') + end + + it 'should pass if strings pass min constraints' do + expect(obj.safe_property({ 'test' => 'abc' }, 'test', min_length: 2)).to eql('abc') + end + + it 'should pass if strings pass max constraints' do + expect(obj.safe_property({ 'test' => 'abc' }, 'test', max_length: 4)).to eql('abc') + end + + it 'should fail if strings do not pass min constraints' do + expect do + expect(obj.safe_property({ 'test' => 'abc' }, 'test', min_length: 4)).to eql('abc') + end.to raise_error( + Bosh::Director::ValidationViolatedMin, + "'test' length (3) should be greater than 4", + ) + end + + it 'should fail if strings do not pass max constraints' do + expect do + expect(obj.safe_property({ 'test' => 'abc' }, 'test', max_length: 2)).to eql('abc') + end.to raise_error( + Bosh::Director::ValidationViolatedMax, + "'test' length (3) should be less than 2", + ) + end + end + describe 'when numeric constraints' do it 'should pass if numbers do not have constraints' do expect(obj.safe_property({ 'test' => 1 }, 'test', class: Numeric)).to eql(1) diff --git a/src/bosh-director/spec/unit/bosh/director/vm_deleter_spec.rb b/src/bosh-director/spec/unit/bosh/director/vm_deleter_spec.rb index 1bcec0c5208..84564bfe0a9 100644 --- a/src/bosh-director/spec/unit/bosh/director/vm_deleter_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/vm_deleter_spec.rb @@ -8,6 +8,7 @@ module Director let(:cloud) { instance_double(Bosh::Clouds::ExternalCpi) } let(:cloud_factory) { instance_double(CloudFactory) } let(:deployment) { FactoryBot.create(:models_deployment, name: 'deployment_name') } + let(:current_job) { instance_double(Bosh::Director::Jobs::CloudCheck::ApplyResolutions, username: 'user', task_id: 42) } let(:vm_model) { FactoryBot.create(:models_vm, cid: 'vm-cid', instance_id: instance_model.id, cpi: 'cpi1') } let(:instance_model) do FactoryBot.create(:models_instance, @@ -30,6 +31,7 @@ module Director allow(CloudFactory).to receive(:create).and_return(cloud_factory) allow(cloud_factory).to receive(:get).with(nil, nil).and_return(cloud) + allow(Bosh::Director::Config).to receive(:current_job).and_return(current_job) end describe '#delete_for_instance' do @@ -95,8 +97,9 @@ module Director allow(cloud_factory).to receive(:get).with('cpi1', 25).and_return(cloud) end - it 'calls delete_vm' do + it 'calls delete_vm with vm lock' do expect(per_spec_logger).to receive(:info).with('Deleting VM') + expect(per_spec_logger).to receive(:info).with("Acquiring VM lock on vm-cid") expect(cloud).to receive(:delete_vm).with(vm_model.cid) subject.delete_vm_by_cid(vm_model.cid, 25, 'cpi1') end diff --git a/src/bosh-director/spec/unit/bosh/director/worker_spec.rb b/src/bosh-director/spec/unit/bosh/director/worker_spec.rb index 67342c6db1f..eabf2748c76 100644 --- a/src/bosh-director/spec/unit/bosh/director/worker_spec.rb +++ b/src/bosh-director/spec/unit/bosh/director/worker_spec.rb @@ -12,7 +12,7 @@ def require(path) module Bosh::Director describe Worker do - subject(:worker) { Worker.new(config, 0) } + subject(:worker) { Worker.new(config, "worker_1") } let(:config) { Config.load_hash(SpecHelper.director_config_hash) } @@ -111,7 +111,7 @@ module Bosh::Director expect(event.user).to eq('_director') expect(event.action).to eq('start') expect(event.object_type).to eq('worker') - expect(event.object_name).to eq('worker_0') + expect(event.object_name).to eq('worker_1') expect(event.context).to eq({}) end end