diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 000000000..03e343dd1 --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,37 @@ +--- +# NOTE(priteau): Rule file imported from kolla-ansible +strict: true +use_default_rules: true +skip_list: + # [E301] Commands should not change things if nothing needs doing + # TODO(mnasiadka): Fix tasks that fail this check in a later iteration + - no-changed-when + # [E503] Tasks that run when changed should likely be handlers + - no-handler + # [unnamed-task] All tasks should be named + # FIXME(mgoddard): Add names to all tasks + - unnamed-task + # Package installs should not use latest + - package-latest + # NOTE(frickler): Agreed at Zed PTG not to use FQCN for builtin actions for now, due to + # conflicts with open patches and backports. + - fqcn-builtins + # Allow Jinja templating inside task and play names + - name[template] + # FQCNs again, now for module actions + - fqcn[action] + # role name check matching ^*$ + - role-name + # Allow long lines + - yaml[line-length] + # TODO(frickler): Discuss these in detail, skipping for now to unblock things + - command-instead-of-module + - command-instead-of-shell + - deprecated-local-action + - key-order[task] + - name[play] + - risky-file-permissions + - risky-shell-pipe + - run-once[task] + - var-naming[no-reserved] + - var-naming[no-role-prefix] diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..e9a948a0d --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @stackhpc/kayobe diff --git a/.github/workflows/tag-and-release.yml b/.github/workflows/tag-and-release.yml new file mode 100644 index 000000000..cff2f940d --- /dev/null +++ b/.github/workflows/tag-and-release.yml @@ -0,0 +1,12 @@ +--- +name: Tag & Release +'on': + push: + branches: + - stackhpc/master +permissions: + actions: read + contents: write +jobs: + tag-and-release: + uses: stackhpc/.github/.github/workflows/tag-and-release.yml@main diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml new file mode 100644 index 000000000..8713f0e02 --- /dev/null +++ b/.github/workflows/tox.yml @@ -0,0 +1,7 @@ +--- +name: Tox Continuous Integration +'on': + pull_request: +jobs: + tox: + uses: stackhpc/.github/.github/workflows/tox.yml@main diff --git a/.gitignore b/.gitignore index f8b8b4942..edc6eab5a 100644 --- a/.gitignore +++ b/.gitignore @@ -58,6 +58,7 @@ ansible/*.retry ansible/roles/*/tests/*.retry # Ansible Galaxy roles & collections +.ansible ansible/roles/*\.*/ ansible/collections/ diff --git a/ansible/action_plugins/template_content.py b/ansible/action_plugins/template_content.py new file mode 100644 index 000000000..88eae2588 --- /dev/null +++ b/ansible/action_plugins/template_content.py @@ -0,0 +1,19 @@ +# Copyright (c) 2025 StackHPC Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +__metaclass__ = type + +import kayobe.plugins.action.template_content + +ActionModule = kayobe.plugins.action.template_content.ActionModule diff --git a/ansible/apt.yml b/ansible/apt.yml index 0172249e0..1dabb4038 100644 --- a/ansible/apt.yml +++ b/ansible/apt.yml @@ -1,6 +1,6 @@ --- - name: Ensure APT is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ apt_max_fail_percentage | default(host_configure_max_fail_percentage) | @@ -11,7 +11,7 @@ tags: - apt tasks: - - name: include apt role + - name: Include apt role include_role: name: apt when: ansible_facts.os_family == 'Debian' diff --git a/ansible/baremetal-compute-introspection-data-save.yml b/ansible/baremetal-compute-introspection-data-save.yml index 85f17ce31..28ae86c96 100644 --- a/ansible/baremetal-compute-introspection-data-save.yml +++ b/ansible/baremetal-compute-introspection-data-save.yml @@ -8,11 +8,11 @@ - name: Set up openstack cli virtualenv pip: virtualenv: "{{ venv }}" - virtualenv_command: python3 -m venv name: - python-openstackclient - - python-ironic-inspector-client + - python-ironicclient state: latest + virtualenv_command: python3.{{ ansible_facts.python.version.minor }} -m venv extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" - name: Ensure the baremetal compute nodes' hardware introspection data is saved @@ -31,7 +31,7 @@ tasks: - name: Query baremetal compute nodes' hardware introspection data command: > - {{ venv }}/bin/openstack baremetal introspection data save {{ inventory_hostname }} + {{ venv }}/bin/openstack baremetal node inventory save {{ inventory_hostname }} register: save_result changed_when: False # Ignore errors, log a message later. @@ -44,14 +44,14 @@ ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" - name: Ensure introspection data output directory exists - local_action: - module: file + delegate_to: localhost + file: path: "{{ output_dir }}" state: directory - name: Ensure introspection data is saved locally - local_action: - module: copy + delegate_to: localhost + copy: content: "{{ introspection_data_map[output_format | lower] }}" dest: "{{ output_dir }}/{{ inventory_hostname }}.{{ output_format | lower }}" when: save_result.rc == 0 diff --git a/ansible/baremetal-compute-register.yml b/ansible/baremetal-compute-register.yml index 0dedfe527..95b18f97e 100644 --- a/ansible/baremetal-compute-register.yml +++ b/ansible/baremetal-compute-register.yml @@ -39,42 +39,42 @@ fail_msg: One or more Ironic variables are undefined. - block: - - name: Show baremetal node - ansible.builtin.command: - cmd: "{{ venv }}/bin/openstack baremetal node show {{ inventory_hostname }}" - register: node_show - failed_when: - - '"HTTP 404" not in node_show.stderr' - - node_show.rc != 0 - changed_when: false + - name: Show baremetal node + ansible.builtin.command: + cmd: "{{ venv }}/bin/openstack baremetal node show {{ inventory_hostname }}" + register: node_show + failed_when: + - '"HTTP 404" not in node_show.stderr' + - node_show.rc != 0 + changed_when: false - # NOTE: The openstack.cloud.baremetal_node module cannot be used in this - # script due to requiring a MAC address pre-defined, instead, this should - # be discovered by inpsection following this script. - # - # NOTE: IPMI address must be passed with Redfish address to ensure existing - # Ironic nodes match with new nodes during inspection. - - name: Create baremetal nodes - ansible.builtin.shell: - cmd: | - {{ venv }}/bin/openstack baremetal node create \ - --name {{ inventory_hostname }} \ - --driver {{ ironic_driver }} \ - {% for key, value in ironic_driver_info.items() %} - --driver-info {{ key }}={{ value }} \ - {% endfor %} - {% for key, value in ironic_properties.items() %} - --property {{ key }}={{ value }} \ - {% endfor %} - --resource-class {{ ironic_resource_class }} - when: - - node_show.rc != 0 + # NOTE: The openstack.cloud.baremetal_node module cannot be used in this + # script due to requiring a MAC address pre-defined, instead, this should + # be discovered by inpsection following this script. + # + # NOTE: IPMI address must be passed with Redfish address to ensure existing + # Ironic nodes match with new nodes during inspection. + - name: Create baremetal nodes + ansible.builtin.shell: + cmd: | + {{ venv }}/bin/openstack baremetal node create \ + --name {{ inventory_hostname }} \ + --driver {{ ironic_driver }} \ + {% for key, value in ironic_driver_info.items() %} + --driver-info {{ key }}={{ value }} \ + {% endfor %} + {% for key, value in ironic_properties.items() %} + --property {{ key }}={{ value }} \ + {% endfor %} + --resource-class {{ ironic_resource_class }} + when: + - node_show.rc != 0 - - name: Manage baremetal nodes - ansible.builtin.command: - cmd: "{{ venv }}/bin/openstack baremetal node manage {{ inventory_hostname }} --wait" - when: - - node_show.rc != 0 + - name: Manage baremetal nodes + ansible.builtin.command: + cmd: "{{ venv }}/bin/openstack baremetal node manage {{ inventory_hostname }} --wait" + when: + - node_show.rc != 0 delegate_to: "{{ controller_host }}" vars: # NOTE: Without this, the controller's ansible_host variable will not diff --git a/ansible/baremetal-compute-rename.yml b/ansible/baremetal-compute-rename.yml index d1ec5ddf8..7a4ef1cdc 100644 --- a/ansible/baremetal-compute-rename.yml +++ b/ansible/baremetal-compute-rename.yml @@ -5,7 +5,7 @@ - name: Rename baremetal compute nodes hosts: controllers[0] - gather_facts: False + gather_facts: True vars: venv: "{{ virtualenv_path }}/openstack-cli" pre_tasks: @@ -16,6 +16,7 @@ - python-openstackclient - python-ironicclient state: latest + virtualenv_command: "python3.{{ ansible_facts.python.version.minor }} -m venv" extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" - name: Rename baremetal compute nodes @@ -53,7 +54,7 @@ - name: Rename baremetal compute nodes command: > - {{ venv }}/bin/openstack baremetal node set --name "{{ inventory_hostname }}" "{{ node['UUID'] }}" + {{ venv }}/bin/openstack baremetal node set --name "{{ inventory_hostname }}" "{{ node['uuid'] }}" delegate_to: "{{ controller_host }}" environment: "{{ openstack_auth_env }}" vars: @@ -61,8 +62,8 @@ # be respected when using delegate_to. ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" ipmi_address: "{{ hostvars[inventory_hostname].ipmi_address }}" - matching_nodes: "{{ (nodes.stdout | from_json) | selectattr('Driver Info.ipmi_address', 'defined') | selectattr('Driver Info.ipmi_address', 'equalto', ipmi_address) | list }}" + matching_nodes: "{{ (nodes.stdout | from_json) | selectattr('driver_info.ipmi_address', 'defined') | selectattr('driver_info.ipmi_address', 'equalto', ipmi_address) | list }}" node: "{{ matching_nodes | first }}" when: - matching_nodes | length > 0 - - node['Name'] != inventory_hostname + - node['name'] != inventory_hostname diff --git a/ansible/baremetal-compute-serial-console.yml b/ansible/baremetal-compute-serial-console.yml index d6fedccb0..cd987a485 100644 --- a/ansible/baremetal-compute-serial-console.yml +++ b/ansible/baremetal-compute-serial-console.yml @@ -5,7 +5,7 @@ - name: Setup OpenStack Environment hosts: controllers[0] - gather_facts: False + gather_facts: True vars: venv: "{{ virtualenv_path }}/openstack-cli" pre_tasks: @@ -16,38 +16,37 @@ - python-openstackclient - python-ironicclient state: latest + virtualenv_command: "python3.{{ ansible_facts.python.version.minor }} -m venv" extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}" - block: - - name: Fail if allocation pool start not defined - fail: - msg: > - The variable, ironic_serial_console_tcp_pool_start is not defined. - This variable is required to run this playbook. - when: not ironic_serial_console_tcp_pool_start + - name: Fail if allocation pool start not defined + fail: + msg: > + The variable, ironic_serial_console_tcp_pool_start is not defined. + This variable is required to run this playbook. + when: not ironic_serial_console_tcp_pool_start - - name: Fail if allocation pool end not defined - fail: - msg: > - The variable, ironic_serial_console_tcp_pool_end is not defined. - This variable is required to run this playbook. - when: - - not ironic_serial_console_tcp_pool_end + - name: Fail if allocation pool end not defined + fail: + msg: > + The variable, ironic_serial_console_tcp_pool_end is not defined. + This variable is required to run this playbook. + when: + - not ironic_serial_console_tcp_pool_end - - name: Get list of nodes that we should configure serial consoles on - set_fact: - baremetal_nodes: >- - {{ query('inventory_hostnames', console_compute_node_limit | - default('baremetal-compute') ) | unique }} + - name: Get list of nodes that we should configure serial consoles on + set_fact: + baremetal_nodes: "{{ query('inventory_hostnames', console_compute_node_limit | default('baremetal-compute')) | unique }}" # noqa jinja[invalid] - - name: Reserve TCP ports for ironic serial consoles - include_role: - name: console-allocation - vars: - console_allocation_pool_start: "{{ ironic_serial_console_tcp_pool_start }}" - console_allocation_pool_end: "{{ ironic_serial_console_tcp_pool_end }}" - console_allocation_ironic_nodes: "{{ baremetal_nodes }}" - console_allocation_filename: "{{ kayobe_env_config_path }}/console-allocation.yml" + - name: Reserve TCP ports for ironic serial consoles + include_role: + name: console-allocation + vars: + console_allocation_pool_start: "{{ ironic_serial_console_tcp_pool_start }}" + console_allocation_pool_end: "{{ ironic_serial_console_tcp_pool_end }}" + console_allocation_ironic_nodes: "{{ baremetal_nodes }}" + console_allocation_filename: "{{ kayobe_env_config_path }}/console-allocation.yml" when: cmd == "enable" - name: Enable serial console @@ -76,41 +75,41 @@ ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" - block: - - name: Fail if console interface is not ipmitool-socat - fail: - msg: >- - In order to use the serial console you must set the console_interface to ipmitool-socat. - when: node["Console Interface"] != "ipmitool-socat" + - name: Fail if console interface is not ipmitool-socat + fail: + msg: >- + In order to use the serial console you must set the console_interface to ipmitool-socat. + when: node["console_interface"] != "ipmitool-socat" - - name: Set IPMI serial console terminal port - vars: - name: "{{ node['Name'] }}" - port: "{{ hostvars[controller_host].console_allocation_result.ports[name] }}" - # NOTE: Without this, the controller's ansible_host variable will not - # be respected when using delegate_to. - ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" - command: > - {{ venv }}/bin/openstack baremetal node set {{ name }} --driver-info ipmi_terminal_port={{ port }} - delegate_to: "{{ controller_host }}" - environment: "{{ openstack_auth_env }}" - when: >- - node['Driver Info'].ipmi_terminal_port is not defined or - node['Driver Info'].ipmi_terminal_port | int != port | int + - name: Set IPMI serial console terminal port + vars: + name: "{{ node['name'] }}" + port: "{{ hostvars[controller_host].console_allocation_result.ports[name] }}" + # NOTE: Without this, the controller's ansible_host variable will not + # be respected when using delegate_to. + ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" + command: > + {{ venv }}/bin/openstack baremetal node set {{ name }} --driver-info ipmi_terminal_port={{ port }} + delegate_to: "{{ controller_host }}" + environment: "{{ openstack_auth_env }}" + when: >- + node['driver_info'].ipmi_terminal_port is not defined or + node['driver_info'].ipmi_terminal_port | int != port | int - - name: Enable the IPMI socat serial console - vars: - # NOTE: Without this, the controller's ansible_host variable will not - # be respected when using delegate_to. - ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" - command: > - {{ venv }}/bin/openstack baremetal node console enable {{ node['Name'] }} - delegate_to: "{{ controller_host }}" - environment: "{{ openstack_auth_env }}" - when: not node['Console Enabled'] + - name: Enable the IPMI socat serial console + vars: + # NOTE: Without this, the controller's ansible_host variable will not + # be respected when using delegate_to. + ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" + command: > + {{ venv }}/bin/openstack baremetal node console enable {{ node['name'] }} + delegate_to: "{{ controller_host }}" + environment: "{{ openstack_auth_env }}" + when: not node['console_enabled'] vars: matching_nodes: >- - {{ (nodes.stdout | from_json) | selectattr('Name', 'defined') | - selectattr('Name', 'equalto', inventory_hostname ) | list }} + {{ (nodes.stdout | from_json) | selectattr('name', 'defined') | + selectattr('name', 'equalto', inventory_hostname) | list }} node: "{{ matching_nodes | first }}" when: - cmd == "enable" @@ -123,14 +122,14 @@ # be respected when using delegate_to. ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" command: > - {{ venv }}/bin/openstack baremetal node console disable {{ node['Name'] }} + {{ venv }}/bin/openstack baremetal node console disable {{ node['name'] }} delegate_to: "{{ controller_host }}" environment: "{{ openstack_auth_env }}" - when: node['Console Enabled'] + when: node['console_enabled'] vars: matching_nodes: >- - {{ (nodes.stdout | from_json) | selectattr('Name', 'defined') | - selectattr('Name', 'equalto', inventory_hostname ) | list }} + {{ (nodes.stdout | from_json) | selectattr('name', 'defined') | + selectattr('name', 'equalto', inventory_hostname) | list }} node: "{{ matching_nodes | first }}" when: - cmd == "disable" diff --git a/ansible/compute-node-discovery.yml b/ansible/compute-node-discovery.yml index 84f8bca35..60d5674af 100644 --- a/ansible/compute-node-discovery.yml +++ b/ansible/compute-node-discovery.yml @@ -51,7 +51,7 @@ # be respected when using delegate_to. ansible_host: "{{ hostvars[controller_host].ansible_host | default(controller_host) }}" - - name: Pause to prevent overwhelming BMCs + - name: Pause again to prevent overwhelming BMCs pause: seconds: 5 diff --git a/ansible/container-engine.yml b/ansible/container-engine.yml index 988bed210..7e6b30a23 100644 --- a/ansible/container-engine.yml +++ b/ansible/container-engine.yml @@ -19,7 +19,7 @@ docker_http_proxy: "{{ kolla_http_proxy }}" docker_https_proxy: "{{ kolla_https_proxy }}" docker_no_proxy: "{{ kolla_no_proxy | select | join(',') }}" - when: container_engine == "docker" + when: container_engine_enabled | default(true) | bool and container_engine == "docker" - name: Ensure podman is configured hosts: container-engine @@ -34,4 +34,4 @@ tasks: - include_role: name: openstack.kolla.podman - when: container_engine == "podman" + when: container_engine_enabled | default(true) | bool and container_engine == "podman" diff --git a/ansible/container-image-build.yml b/ansible/container-image-build.yml index 16e4f8071..026c5cec7 100644 --- a/ansible/container-image-build.yml +++ b/ansible/container-image-build.yml @@ -53,7 +53,7 @@ kolla-build --config-dir {{ kolla_build_config_path }} --engine {{ container_engine }} - {% if kolla_docker_registry is not none %}--registry {{ kolla_docker_registry }}{% endif %} + {% if kolla_docker_registry %}--registry {{ kolla_docker_registry }}{% endif %} {% if push_images | bool %}--push{% endif %} {% if nocache | bool %}--nocache{% endif %} {% if kolla_base_arch != ansible_facts.architecture %}--platform {{ platform }}{% endif %} diff --git a/ansible/control-host-configure.yml b/ansible/control-host-configure.yml new file mode 100644 index 000000000..fb58add50 --- /dev/null +++ b/ansible/control-host-configure.yml @@ -0,0 +1,24 @@ +--- +- import_playbook: "ssh-known-host.yml" +- import_playbook: "kayobe-ansible-user.yml" +- import_playbook: "logging.yml" +- import_playbook: "proxy.yml" +- import_playbook: "apt.yml" +- import_playbook: "dnf.yml" +- import_playbook: "pip.yml" +- import_playbook: "kayobe-target-venv.yml" +- import_playbook: "wipe-disks.yml" +- import_playbook: "users.yml" +- import_playbook: "dev-tools.yml" +- import_playbook: "selinux.yml" +- import_playbook: "network.yml" +- import_playbook: "firewall.yml" +- import_playbook: "fail2ban.yml" +- import_playbook: "tuned.yml" +- import_playbook: "sysctl.yml" +- import_playbook: "time.yml" +- import_playbook: "mdadm.yml" +- import_playbook: "luks.yml" +- import_playbook: "lvm.yml" +- import_playbook: "swap.yml" +- import_playbook: "container-engine.yml" diff --git a/ansible/dev-tools.yml b/ansible/dev-tools.yml index 5f6f3ed32..228d1643c 100644 --- a/ansible/dev-tools.yml +++ b/ansible/dev-tools.yml @@ -1,6 +1,6 @@ --- - name: Ensure development tools are installed - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ dev_tools_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/dnf.yml b/ansible/dnf.yml index 73999b911..3aa99c486 100644 --- a/ansible/dnf.yml +++ b/ansible/dnf.yml @@ -1,6 +1,6 @@ --- - name: Ensure DNF repos are configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ dnf_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/docker-registry.yml b/ansible/docker-registry.yml index c721e023a..2fb05bb90 100644 --- a/ansible/docker-registry.yml +++ b/ansible/docker-registry.yml @@ -12,5 +12,5 @@ - docker-registry roles: - role: docker-registry - docker_registry_action: "{{ kayobe_action }}" + docker_registry_action: "{{ kayobe_action | default('deploy') }}" docker_registry_config_path: "{{ config_path }}/docker-registry" diff --git a/ansible/drac-facts.yml b/ansible/drac-facts.yml index 41bf7e486..83af97d00 100644 --- a/ansible/drac-facts.yml +++ b/ansible/drac-facts.yml @@ -11,8 +11,8 @@ - role: stackhpc.drac-facts tasks: - name: Gather facts via DRAC - local_action: - module: drac_facts + delegate_to: localhost + drac_facts: address: "{{ ipmi_address }}" username: "{{ ipmi_username }}" password: "{{ ipmi_password }}" @@ -23,7 +23,7 @@ var: result - name: Write facts to a file - local_action: - module: copy + delegate_to: localhost + copy: content: "{{ result }}" dest: "/tmp/drac-facts-{{ inventory_hostname }}.json" diff --git a/ansible/dump-config.yml b/ansible/dump-config.yml index 46ebc089f..f740a063a 100644 --- a/ansible/dump-config.yml +++ b/ansible/dump-config.yml @@ -20,27 +20,27 @@ dump_hosts: all tasks: - name: Create configuration dump directory - local_action: - module: file + delegate_to: localhost + file: path: "{{ dump_path }}" state: directory - name: Write host config to file - local_action: - module: copy + delegate_to: localhost + copy: content: "{{ hostvars[inventory_hostname] | to_nice_yaml }}" dest: "{{ dump_path }}/{{ inventory_hostname }}.yml" when: dump_var_name is not defined - name: Write host variable to file - local_action: - module: copy + delegate_to: localhost + copy: content: "{{ hostvars[inventory_hostname][dump_var_name] | to_nice_yaml }}" dest: "{{ dump_path }}/{{ inventory_hostname }}.yml" when: dump_var_name is defined # - name: Write merged config to file -# local_action: -# module: copy +# delegate_to: localhost +# copy: # content: "{{ hostvars | merge_config | to_nice_yaml }}" # dest: "{{ dump_path }}/merged.yml diff --git a/ansible/external-net.yml b/ansible/external-net.yml deleted file mode 100644 index e3e7a89fd..000000000 --- a/ansible/external-net.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Ensure external network and subnet are registered in neutron - # Only required to run on a single host. - hosts: controllers[0] - roles: - - role: stackhpc.openstack.os_networks - os_openstacksdk_install_epel: "{{ dnf_install_epel }}" - os_openstacksdk_upper_constraints_file: "{{ openstacksdk_upper_constraints_file }}" - os_networks_venv: "{{ virtualenv_path }}/openstacksdk" - os_networks_openstack_auth_type: "{{ openstack_auth_type }}" - os_networks_openstack_auth: "{{ openstack_auth }}" - # Network configuration. - os_networks_name: "{{ item }}" - os_networks_type: "{% if item | net_vlan %}vlan{% else %}flat{% endif %}" - os_networks_physical_network: "physnet1" - os_networks_segmentation_id: "{{ item | net_vlan }}" - os_networks_shared: True - os_networks_external: True - # Subnet configuration. - os_networks_subnet_name: "{{ item }}" - os_networks_cidr: "{{ item | net_cidr }}" - os_networks_gateway_ip: "{{ item | net_gateway }}" - os_networks_allocation_pool_start: "{{ item | net_neutron_allocation_pool_start }}" - os_networks_allocation_pool_end: "{{ item | net_neutron_allocation_pool_end }}" - with_items: "{{ external_net_names }}" diff --git a/ansible/fail2ban.yml b/ansible/fail2ban.yml new file mode 100644 index 000000000..5f85f0a0e --- /dev/null +++ b/ansible/fail2ban.yml @@ -0,0 +1,17 @@ +--- +- name: Configure fail2ban + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control + max_fail_percentage: >- + {{ fail2ban_max_fail_percentage | + default(host_configure_max_fail_percentage) | + default(kayobe_max_fail_percentage) | + default(100) }} + tags: + - fail2ban + roles: + - role: robertdebock.fail2ban + become: true + when: fail2ban_enabled | bool + vars: + # TODO (L-Chams): Remove fail2ban_sender override when PR https://github.com/robertdebock/ansible-role-fail2ban/pull/18 is merged. + fail2ban_sender: root@{{ ansible_facts.fqdn }} diff --git a/ansible/firewall.yml b/ansible/firewall.yml index 8455d05f2..066b2d82c 100644 --- a/ansible/firewall.yml +++ b/ansible/firewall.yml @@ -1,6 +1,6 @@ --- - name: Ensure firewall is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ firewall_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/host-command-run.yml b/ansible/host-command-run.yml index ba5497db6..258994aa7 100644 --- a/ansible/host-command-run.yml +++ b/ansible/host-command-run.yml @@ -1,6 +1,6 @@ --- - name: Run a command - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: False max_fail_percentage: >- {{ host_command_run_max_fail_percentage | diff --git a/ansible/host-package-update.yml b/ansible/host-package-update.yml index 94b014ba1..77be7c722 100644 --- a/ansible/host-package-update.yml +++ b/ansible/host-package-update.yml @@ -1,6 +1,6 @@ --- - name: Update host packages - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ host_package_update_max_fail_percentage | default(kayobe_max_fail_percentage) | diff --git a/ansible/infra-vm-host-configure.yml b/ansible/infra-vm-host-configure.yml index 35d13a092..2d1595074 100644 --- a/ansible/infra-vm-host-configure.yml +++ b/ansible/infra-vm-host-configure.yml @@ -13,6 +13,7 @@ - import_playbook: "selinux.yml" - import_playbook: "network.yml" - import_playbook: "firewall.yml" +- import_playbook: "fail2ban.yml" - import_playbook: "tuned.yml" - import_playbook: "sysctl.yml" - import_playbook: "disable-glean.yml" @@ -21,4 +22,5 @@ - import_playbook: "mdadm.yml" - import_playbook: "luks.yml" - import_playbook: "lvm.yml" +- import_playbook: "swap.yml" - import_playbook: "container-engine.yml" diff --git a/ansible/infra-vm-provision.yml b/ansible/infra-vm-provision.yml index d13754725..9ed3834f4 100644 --- a/ansible/infra-vm-provision.yml +++ b/ansible/infra-vm-provision.yml @@ -44,5 +44,5 @@ state: started # NOTE: Ensure we exceed the 5 minute DHCP timeout of the eth0 # interface if necessary. - timeout: 360 + timeout: "{{ infra_vm_provision_timeout | default(360) | int }}" delegate_to: localhost diff --git a/ansible/inspection-store.yml b/ansible/inspection-store.yml deleted file mode 100644 index d33968e6c..000000000 --- a/ansible/inspection-store.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -# Deploy/pull/reconfigure/stop/upgrade inspection data store. -# -# Follows kolla-ansible service deployment patterns. -# -# Variables: -# action: One of deploy, destroy, pull, reconfigure, upgrade - -- name: Ensure inspection store is deployed - hosts: controllers[0] - tags: - - inspection-store - roles: - - role: inspection-store - inspection_store_action: "{{ kayobe_action }}" - inspection_store_enabled: "{{ inspector_store_enabled }}" - inspection_store_port: "{{ inspector_store_port }}" - inspection_store_config_path: "{{ config_path }}/inspection-store" diff --git a/ansible/inventory/group_vars/all/ansible-control b/ansible/inventory/group_vars/all/ansible-control new file mode 100644 index 000000000..635024df0 --- /dev/null +++ b/ansible/inventory/group_vars/all/ansible-control @@ -0,0 +1,159 @@ +--- +############################################################################### +# Ansible control host configuration. + +# User with which to access the Ansible control host via SSH during bootstrap, +# in order to setup the Kayobe user account. Default is {{ os_distribution }}. +ansible_control_bootstrap_user: "{{ os_distribution }}" + +############################################################################### +# Ansible control host network interface configuration. + +# List of networks to which Ansible control host are attached. +ansible_control_network_interfaces: > + {{ (ansible_control_default_network_interfaces + + ansible_control_extra_network_interfaces) | select | unique | list }} + +# List of default networks to which Ansible control host are attached. +ansible_control_default_network_interfaces: > + {{ [admin_oc_net_name] | select | unique | list }} + +# List of extra networks to which Ansible control host are attached. +ansible_control_extra_network_interfaces: [] + +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +ansible_control_mdadm_arrays: [] + +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +ansible_control_luks_devices: [] + +############################################################################### +# Ansible control host LVM configuration. + +# List of Ansible control host volume groups. See mrlesmithjr.manage_lvm role +# for format. +ansible_control_lvm_groups: "{{ ansible_control_lvm_groups_default + ansible_control_lvm_groups_extra }}" + +# Default list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +ansible_control_lvm_groups_default: "{{ [ansible_control_lvm_group_data] if ansible_control_lvm_group_data_enabled | bool else [] }}" + +# Additional list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +ansible_control_lvm_groups_extra: [] + +# Whether a 'data' LVM volume group should exist on the Ansible control host. +# By default this contains a 'docker-volumes' logical volume for Docker volume +# storage. Default is false. +ansible_control_lvm_group_data_enabled: false + +# Ansible control host LVM volume group for data. See mrlesmithjr.manage_lvm +# role for format. +ansible_control_lvm_group_data: + vgname: data + disks: "{{ ansible_control_lvm_group_data_disks }}" + create: True + lvnames: "{{ ansible_control_lvm_group_data_lvs }}" + +# List of disks for use by Ansible control host LVM data volume group. Default +# to an invalid value to require configuration. +ansible_control_lvm_group_data_disks: + - changeme + +# List of LVM logical volumes for the data volume group. +ansible_control_lvm_group_data_lvs: + - "{{ ansible_control_lvm_group_data_lv_docker_volumes }}" + +# Docker volumes LVM backing volume. +ansible_control_lvm_group_data_lv_docker_volumes: + lvname: docker-volumes + size: "{{ ansible_control_lvm_group_data_lv_docker_volumes_size }}" + create: True + filesystem: "{{ ansible_control_lvm_group_data_lv_docker_volumes_fs }}" + mount: True + mntp: /var/lib/docker/volumes + +# Size of docker volumes LVM backing volume. +ansible_control_lvm_group_data_lv_docker_volumes_size: 75%VG + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +ansible_control_lvm_group_data_lv_docker_volumes_fs: ext4 + +############################################################################### +# Ansible control host sysctl configuration. + +# Dict of sysctl parameters to set. +ansible_control_sysctl_parameters: {} + +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. Default is throughput-performance. +ansible_control_tuned_active_builtin_profile: "throughput-performance" + +############################################################################### +# Ansible control host user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +ansible_control_users: "{{ users_default }}" + +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +ansible_control_firewalld_enabled: false + +# A list of zones to create. Each item is a dict containing a 'zone' item. +ansible_control_firewalld_zones: [] + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +ansible_control_firewalld_default_zone: + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +ansible_control_firewalld_rules: [] + +############################################################################### +# Ansible control host fail2ban configuration. + +# Whether to install and enable fail2ban. Default is false. +ansible_control_fail2ban_enabled: false + +# List of fail2ban jails for the Ansible control host. +ansible_control_fail2ban_jail_configuration: >- + {{ ansible_control_fail2ban_jail_configuration_default + + ansible_control_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the Ansible control host. +ansible_control_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the Ansible control host. +ansible_control_fail2ban_jail_configuration_extra: [] + +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +ansible_control_swap: [] + +############################################################################### +# Ansible control host container engine configuration. + +# Whether a container engine should be configured. Default is false. +ansible_control_container_engine_enabled: false diff --git a/ansible/inventory/group_vars/all/bifrost b/ansible/inventory/group_vars/all/bifrost index 4d2e41aec..f307c4118 100644 --- a/ansible/inventory/group_vars/all/bifrost +++ b/ansible/inventory/group_vars/all/bifrost @@ -4,6 +4,9 @@ ############################################################################### # Bifrost installation. +# Whether to install Bifrost. Default is true. +kolla_enable_bifrost: true + # URL of Bifrost source code repository. kolla_bifrost_source_url: "https://opendev.org/openstack/bifrost" @@ -65,8 +68,8 @@ kolla_bifrost_deploy_image_filename: "deployment_image.qcow2" # UUID of the root filesystem contained within the deployment image. # See below URL for instructions on how to extract it: # https://docs.openstack.org/ironic/latest/admin/raid.html#image-requirements -# Default is none. -kolla_bifrost_deploy_image_rootfs: +# Default is an empty string. +kolla_bifrost_deploy_image_rootfs: '' # Custom cloud-init user-data passed to deploy of the deployment image. # Default is an empty string. @@ -86,13 +89,17 @@ kolla_bifrost_extra_kernel_options: [] ############################################################################### # Ironic Inspector configuration. -# List of of inspector processing plugins. -kolla_bifrost_inspector_processing_hooks: "{{ inspector_processing_hooks }}" +# List of of inspector plugins. +kolla_bifrost_inspector_hooks: "{{ inspector_hooks }}" # Which MAC addresses to add as ports during introspection. One of 'all', # 'active' or 'pxe'. kolla_bifrost_inspector_port_addition: "{{ inspector_add_ports }}" +# Which ports to keep after introspection. One of 'all', 'present', or 'added'. +# Default follows Bifrost's default of 'present'. +kolla_bifrost_inspector_keep_ports: "present" + # List of extra kernel parameters for the inspector default PXE configuration. # Default is {{ inspector_extra_kernel_options }}, defined in inspector.yml. # When customising this variable, the default extra kernel parameters should be @@ -100,7 +107,7 @@ kolla_bifrost_inspector_port_addition: "{{ inspector_add_ports }}" kolla_bifrost_inspector_extra_kernel_options: "{{ inspector_extra_kernel_options }}" # List of introspection rules for Bifrost's Ironic Inspector service. -kolla_bifrost_inspector_rules: "{{ inspector_rules + [inspector_rule_legacy_deploy_kernel] }}" +kolla_bifrost_inspector_rules: "{{ inspector_rules }}" # Ironic inspector IPMI username to set. kolla_bifrost_inspector_ipmi_username: "{{ ipmi_username }}" @@ -124,9 +131,6 @@ kolla_bifrost_inspector_deploy_kernel: "http://{{ provision_oc_net_name | net_ip # Ironic inspector deployment ramdisk location. kolla_bifrost_inspector_deploy_ramdisk: "http://{{ provision_oc_net_name | net_ip }}:8080/ipa.initramfs" -# Ironic inspector legacy deployment kernel location. -kolla_bifrost_inspector_legacy_deploy_kernel: "http://{{ provision_oc_net_name | net_ip }}:8080/ipa.vmlinuz" - # Timeout of hardware inspection on overcloud nodes, in seconds. Default is # {{ inspector_inspection_timeout }}. kolla_bifrost_inspection_timeout: "{{ inspector_inspection_timeout }}" diff --git a/ansible/inventory/group_vars/all/compute b/ansible/inventory/group_vars/all/compute index 6e0dd86c7..99fb9dc3c 100644 --- a/ansible/inventory/group_vars/all/compute +++ b/ansible/inventory/group_vars/all/compute @@ -2,9 +2,10 @@ ############################################################################### # Compute node configuration. -# User with which to access the computes via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. -compute_bootstrap_user: "{{ os_distribution }}" +# User with which to access the compute nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. +compute_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Compute network interface configuration. @@ -184,6 +185,26 @@ compute_firewalld_default_zone: # - state: enabled compute_firewalld_rules: [] +############################################################################### +# Compute node fail2ban configuration. + +# Whether to install and enable fail2ban. +compute_fail2ban_enabled: false + +# List of fail2ban jails for the compute node. +compute_fail2ban_jail_configuration: >- + {{ compute_fail2ban_jail_configuration_default + + compute_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the compute node. +compute_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the compute node. +compute_fail2ban_jail_configuration_extra: [] + ############################################################################### # Compute node host libvirt configuration. @@ -239,8 +260,8 @@ compute_libvirt_enable_tls: false compute_libvirt_ceph_repo_install: true # Ceph package repository release to install on CentOS and Rocky hosts when -# compute_libvirt_ceph_repo_install is true. Default is 'pacific'. -compute_libvirt_ceph_repo_release: pacific +# compute_libvirt_ceph_repo_install is true. Default is 'squid'. +compute_libvirt_ceph_repo_release: squid ############################################################################### # Compute node swap configuration. diff --git a/ansible/inventory/group_vars/all/controllers b/ansible/inventory/group_vars/all/controllers index b1fa12b07..f8be1616a 100644 --- a/ansible/inventory/group_vars/all/controllers +++ b/ansible/inventory/group_vars/all/controllers @@ -3,8 +3,9 @@ # Controller node configuration. # User with which to access the controllers via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. -controller_bootstrap_user: "{{ os_distribution }}" +# to setup the Kayobe user account. Default is 'cloud-user' if os_distribution +# is set to centos, otherwise 'os_distribution'. +controller_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Controller groups. @@ -161,6 +162,7 @@ controller_lvm_group_data_lv_docker_volumes: mount: True mntp: "{{ docker_volumes_path }}" +# Podman volumes LVM backing volume. controller_lvm_group_data_lv_podman_volumes: lvname: podman-volumes size: "{{ controller_lvm_group_data_lv_podman_volumes_size }}" @@ -222,6 +224,26 @@ controller_firewalld_default_zone: # - state: enabled controller_firewalld_rules: [] +############################################################################### +# Controller node fail2ban configuration. + +# Whether to install and enable fail2ban. +controller_fail2ban_enabled: false + +# List of fail2ban jails for the controller node. +controller_fail2ban_jail_configuration: >- + {{ controller_fail2ban_jail_configuration_default + + controller_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the controller node. +controller_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the controller node. +controller_fail2ban_jail_configuration_extra: [] + ############################################################################### # Controller node swap configuration. diff --git a/ansible/inventory/group_vars/all/globals b/ansible/inventory/group_vars/all/globals index 5e50c513b..719368a6f 100644 --- a/ansible/inventory/group_vars/all/globals +++ b/ansible/inventory/group_vars/all/globals @@ -52,12 +52,12 @@ kayobe_ansible_user: "stack" # is "rocky". os_distribution: "rocky" -# OS release. Valid options are "9-stream" when os_distribution is "centos", or -# "9" when os_distribution is "rocky", or "noble" when os_distribution is +# OS release. Valid options are "10-stream" when os_distribution is "centos", +# "10" when os_distribution is "rocky", or "noble" when os_distribution is # "ubuntu". os_release: >- - {{ '9-stream' if os_distribution == 'centos' - else '9' if os_distribution == 'rocky' + {{ '10-stream' if os_distribution == 'centos' + else '10' if os_distribution == 'rocky' else 'noble' }} ############################################################################### diff --git a/ansible/inventory/group_vars/all/infra-vms b/ansible/inventory/group_vars/all/infra-vms index 6ae0c1991..df0b8bcaa 100644 --- a/ansible/inventory/group_vars/all/infra-vms +++ b/ansible/inventory/group_vars/all/infra-vms @@ -42,19 +42,18 @@ infra_vm_root_format: qcow2 # Base image for the infra VM root volume. Default is # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" -# when os_distribution is "ubuntu", or -# https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 -# when os_distribution is "rocky", -# or -# "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2" +# when os_distribution is "ubuntu", +# "https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2" +# when os_distribution is "rocky", or +# "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2" # otherwise. infra_vm_root_image: >- {%- if os_distribution == 'ubuntu' %} https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img {%- elif os_distribution == 'rocky' %} - https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 + https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2 {%- else -%} - https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2 + https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2 {%- endif %} # Capacity of the infra VM data volume. @@ -86,8 +85,9 @@ infra_vm_machine: # Infrastructure VM node configuration. # User with which to access the infrastructure vm via SSH during bootstrap, in -# order to setup the Kayobe user account. Default is {{ os_distribution }}. -infra_vm_bootstrap_user: "{{ os_distribution }}" +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. +infra_vm_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Infrastructure VM network interface configuration. @@ -230,6 +230,26 @@ infra_vm_firewalld_default_zone: # - state: enabled infra_vm_firewalld_rules: [] +############################################################################### +# Infrastructure VM node fail2ban configuration. + +# Whether to install and enable fail2ban. +infra_vm_fail2ban_enabled: false + +# List of fail2ban jails for the infrastructure VM node. +infra_vm_fail2ban_jail_configuration: >- + {{ infra_vm_fail2ban_jail_configuration_default + + infra_vm_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the infrastructure VM node. +infra_vm_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the infrastructure VM node. +infra_vm_fail2ban_jail_configuration_extra: [] + ############################################################################### # Infrastructure VM node swap configuration. diff --git a/ansible/inventory/group_vars/all/inspector b/ansible/inventory/group_vars/all/inspector index 5b0272e70..b65faaf80 100644 --- a/ansible/inventory/group_vars/all/inspector +++ b/ansible/inventory/group_vars/all/inspector @@ -31,40 +31,47 @@ inspector_ipa_ramdisk_checksum_url: "{{ ipa_ramdisk_checksum_url }}" inspector_ipa_ramdisk_checksum_algorithm: "{{ ipa_ramdisk_checksum_algorithm }}" ############################################################################### -# Ironic inspector processing configuration. +# Ironic inspector processing configuration for the inspector implementation +# built-in to Ironic. + +# List of hooks to enable for inspection. Default is [$default_hooks, memory, +# boot-mode, cpu-capabilities, pci-devices, parse-lldp]. +inspector_hooks_default: + - $default_hooks + - memory + - boot-mode + - cpu-capabilities + - pci-devices + - parse-lldp + +# List of extra inspection hooks to enable. Default is an empty list. +inspector_hooks_extra: [] + +# List of of additional inspector hooks to enable. Default is +# {{ inspector_hooks_default + inspector_hooks_extra }}. +inspector_hooks: > + {{ inspector_hooks_default + inspector_hooks_extra }} -# List of of default inspector processing plugins. -inspector_processing_hooks_default: - - ramdisk_error - - scheduler - - validate_interfaces - - capabilities - - pci_devices - - lldp_basic - - local_link_connection - -# List of of additional inspector processing plugins. -inspector_processing_hooks_extra: [] - -# List of of additional inspector processing plugins. -inspector_processing_hooks: > - {{ inspector_processing_hooks_default + inspector_processing_hooks_extra }} +############################################################################### +# Common Ironic Inspector processing configuration. # Which MAC addresses to add as ports during introspection. One of 'all', -# 'active' or 'pxe'. +# 'active' or 'pxe'. Default is 'pxe'. inspector_add_ports: "pxe" # Which ports to keep after introspection. One of 'all', 'present', or 'added'. +# Default is 'added'. inspector_keep_ports: "added" -# Whether to enable discovery of nodes not managed by Ironic. +# Whether to enable discovery of nodes not managed by Ironic. Default is true. inspector_enable_discovery: True -# The Ironic driver with which to register newly discovered nodes. +# The Ironic driver with which to register newly discovered nodes. Default is +# 'ipmi'. inspector_discovery_enroll_node_driver: "ipmi" ############################################################################### -# Ironic inspector configuration. +# Ironic inspector introspection rules configuration. # Ironic inspector IPMI username to set. inspector_ipmi_username: "{{ ipmi_username }}" @@ -86,9 +93,6 @@ inspector_lldp_switch_port_interface_default: eth0 # check for an LLDP switch port description to use as the node's name. inspector_lldp_switch_port_interface_map: {} -############################################################################### -# Ironic inspector introspection rules configuration. - # Enable IPMI rules: inspector_rules_ipmi_enabled: True @@ -114,17 +118,26 @@ inspector_rule_var_redfish_verify_ca: True inspector_rule_ipmi_credentials: description: "Set IPMI driver_info if no credentials" conditions: - - field: "node://driver_info.ipmi_username" - op: "is-empty" - - field: "node://driver_info.ipmi_password" - op: "is-empty" + - args: + # If value matches itself as a regex, this is becaused interpolation + # failed which means the ipmi_username key was not set. + value: "{node.driver_info[ipmi_username]}" + regex: "{node\\.driver_info\\[ipmi_username\\]}" + op: "matches" + - args: + value: "{node.driver_info[ipmi_password]}" + regex: "{node\\.driver_info\\[ipmi_password\\]}" + op: "matches" + sensitive: "true" actions: - - action: "set-attribute" - path: "driver_info/ipmi_username" - value: "{{ inspector_rule_var_ipmi_username }}" - - action: "set-attribute" - path: "driver_info/ipmi_password" - value: "{{ inspector_rule_var_ipmi_password }}" + - op: "set-attribute" + args: + path: "/driver_info/ipmi_username" + value: "{{ inspector_rule_var_ipmi_username }}" + - op: "set-attribute" + args: + path: "/driver_info/ipmi_password" + value: "{{ inspector_rule_var_ipmi_password }}" # Deployment kernel referenced by inspector rule. inspector_rule_var_deploy_kernel: @@ -133,24 +146,15 @@ inspector_rule_var_deploy_kernel: inspector_rule_deploy_kernel: description: "Set deploy kernel" conditions: - - field: "node://driver_info.deploy_kernel" - op: "is-empty" - actions: - - action: "set-attribute" - path: "driver_info/deploy_kernel" - value: "{{ inspector_rule_var_deploy_kernel }}" - -# Ironic inspector rule to update deployment kernel from legacy location. -inspector_rule_legacy_deploy_kernel: - description: "Update deploy kernel from legacy" - conditions: - - field: "node://driver_info.deploy_kernel" - op: "eq" - value: "{{ inspector_rule_var_legacy_deploy_kernel }}" + - args: + value: "{node.driver_info[deploy_kernel]}" + regex: "{node\\.driver_info\\[deploy_kernel\\]}" + op: "matches" actions: - - action: "set-attribute" - path: "driver_info/deploy_kernel" - value: "{{ inspector_rule_var_deploy_kernel }}" + - op: "set-attribute" + args: + path: "/driver_info/deploy_kernel" + value: "{{ inspector_rule_var_deploy_kernel }}" # Deployment ramdisk referenced by inspector rule. inspector_rule_var_deploy_ramdisk: @@ -159,220 +163,235 @@ inspector_rule_var_deploy_ramdisk: inspector_rule_deploy_ramdisk: description: "Set deploy ramdisk" conditions: - - field: "node://driver_info.deploy_ramdisk" - op: "is-empty" - actions: - - action: "set-attribute" - path: "driver_info/deploy_ramdisk" - value: "{{ inspector_rule_var_deploy_ramdisk }}" - -# Ironic inspector rule to initialise root device hints. -inspector_rule_root_hint_init: - description: "Initialise root device hint" - conditions: - - field: "node://properties.root_device" - op: "is-empty" + - args: + value: "{node.driver_info[deploy_ramdisk]}" + regex: "{node\\.driver_info\\[deploy_ramdisk\\]}" + op: "matches" actions: - # Inspector can't combine references to introspection data with non-string - # types, see https://bugs.launchpad.net/ironic-inspector/+bug/1670768. We - # must therefore first set the root_device property to an empty dict, then - # update the fields within it. - - action: "set-attribute" - path: "properties/root_device" - value: {} + - op: "set-attribute" + args: + path: "/driver_info/deploy_ramdisk" + value: "{{ inspector_rule_var_deploy_ramdisk }}" # Ironic inspector rule to set serial root device hint. inspector_rule_root_hint_serial: description: "Set serial root device hint" conditions: - - field: "data://root_disk.serial" - op: "is-empty" - invert: True + - args: + value: "{node.properties[root_device]}" + regex: "{node\\.properties\\[root_device\\]}" + op: "matches" + - args: + value: "{plugin_data[root_disk][serial]}" + op: "!is-empty" actions: - - action: "set-attribute" - path: "properties/root_device/serial" - value: "{data[root_disk][serial]}" + - op: "set-attribute" + args: + path: "/properties/root_device/serial" + value: "{plugin_data[root_disk][serial]}" # Ironic inspector rule to set the interface on which the node PXE booted. inspector_rule_set_pxe_interface_mac: description: "Set node PXE interface MAC address" conditions: - - field: "data://boot_interface" - op: "is-empty" - invert: True + - args: + value: "{plugin_data[boot_interface]}" + regex: "{plugin_data\\[boot_interface\\]}" + op: "!matches" actions: - - action: "set-attribute" - path: "extra/pxe_interface_mac" - value: "{data[boot_interface]}" + - op: "set-attribute" + args: + path: "/extra/pxe_interface_mac" + value: "{plugin_data[boot_interface]}" # Name of network interface to use for LLDP referenced by switch port # description rule. inspector_rule_var_lldp_switch_port_interface: +# Internal variables. Not meant for use externally. +_inspector_rule_interface_path: "{all_interfaces.{{ inspector_rule_var_lldp_switch_port_interface }}}" +_inspector_rule_lldp_processed_path: "{all_interfaces.{{ inspector_rule_var_lldp_switch_port_interface }}.lldp_processed}" +_inspector_rule_switch_port_description_path: "{all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description}" + # Ironic inspector rule to set the node's name from an interface's LLDP switch # port description. inspector_rule_lldp_switch_port_desc_to_name: description: "Set node name from {{ inspector_rule_var_lldp_switch_port_interface }} LLDP switch port description" conditions: # Check for the existence of the switch_port_description field. - - field: "data://all_interfaces.{{ inspector_rule_var_lldp_switch_port_interface }}" - op: "is-empty" - invert: True - - field: "data://all_interfaces.{{ inspector_rule_var_lldp_switch_port_interface }}.lldp_processed" - op: "is-empty" - invert: True - - field: "data://all_interfaces.{{ inspector_rule_var_lldp_switch_port_interface }}.lldp_processed.switch_port_description" - op: "is-empty" - invert: True + - args: + value: "{{ _inspector_rule_interface_path }}" + regex: "{{ _inspector_rule_interface_path | regex_escape }}" + op: "!matches" + - args: + value: "{{ _inspector_rule_lldp_processed_path }}" + regex: "{{ _inspector_rule_lldp_processed_path | regex_escape }}" + op: "!matches" + - args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "{{ _inspector_rule_switch_port_description_path | regex_escape }}" + op: "!matches" # Filter out switch port descriptions using the switch's interface names. # On some switches (including Dell Network OS 9.10(0.1) and some Ruckus # switches), the port description TLV is sent but contains the interface # name rather than the interface's description. Dell switches use a space # character between port type and port number, while Ruckus switches don't. - - field: "data://all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description" - op: "matches" - value: "^GigabitEthernet ?([0-9/]*)$" - invert: True - - field: "data://all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description" - op: "matches" - value: "^TenGigabitEthernet ?([0-9/]*)$" - invert: True - - field: "data://all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description" - op: "matches" - value: "^twentyFiveGigE ?([0-9/]*)$" - invert: True - - field: "data://all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description" - op: "matches" - value: "^fortyGigE ?([0-9/]*)$" - invert: True - - field: "data://all_interfaces.{{inspector_rule_var_lldp_switch_port_interface}}.lldp_processed.switch_port_description" - op: "matches" - value: "^Port-channel ?([0-9/]*)$" - invert: True - actions: - - action: "set-attribute" - path: "name" - value: "{data[all_interfaces][{{ inspector_rule_var_lldp_switch_port_interface }}][lldp_processed][switch_port_description]}" - -# Ironic inspector rule to initialise system vendor data in the node's metadata. -inspector_rule_save_system_vendor_init: - description: "Initialise system vendor data in Ironic node metadata" - conditions: - - field: "data://inventory.system_vendor" - op: "is-empty" - invert: True - - field: "node://extra.system_vendor" - op: "is-empty" + - op: "!matches" + args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "^GigabitEthernet ?([0-9/]*)$" + - op: "!matches" + args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "^TenGigabitEthernet ?([0-9/]*)$" + - op: "!matches" + args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "^twentyFiveGigE ?([0-9/]*)$" + - op: "!matches" + args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "^fortyGigE ?([0-9/]*)$" + - op: "!matches" + args: + value: "{{ _inspector_rule_switch_port_description_path }}" + regex: "^Port-channel ?([0-9/]*)$" actions: - - action: "set-attribute" - path: "extra/system_vendor" - value: {} + - op: "set-attribute" + args: + path: "/name" + value: "{{ _inspector_rule_switch_port_description_path }}" # Ironic inspector rule to save system vendor manufacturer data in the node's # metadata. inspector_rule_save_system_vendor_manufacturer: description: "Save system vendor manufacturer data in Ironic node metadata" conditions: - - field: "data://inventory.system_vendor" - op: "is-empty" - invert: True - - field: "data://inventory.system_vendor.manufacturer" - op: "is-empty" - invert: True + - args: + value: "{inventory[system_vendor]}" + regex: "{inventory\\[system_vendor\\]}" + op: "!matches" + - args: + value: "{inventory[system_vendor][manufacturer]}" + regex: "{inventory\\[system_vendor\\]\\[manufacturer\\]}" + op: "!matches" actions: - - action: "set-attribute" - path: "extra/system_vendor/manufacturer" - value: "{data[inventory][system_vendor][manufacturer]}" + - op: "set-attribute" + args: + path: "/extra/system_vendor/manufacturer" + value: "{inventory[system_vendor][manufacturer]}" # Ironic inspector rule to save system vendor serial number in the node's # metadata. inspector_rule_save_system_vendor_serial_number: description: "Save system vendor serial number in Ironic node metadata" conditions: - - field: "data://inventory.system_vendor" - op: "is-empty" - invert: True - - field: "data://inventory.system_vendor.serial_number" - op: "is-empty" - invert: True + - args: + value: "{inventory[system_vendor]}" + regex: "{inventory\\[system_vendor\\]}" + op: "!matches" + - args: + value: "{inventory[system_vendor][serial_number]}" + regex: "{inventory\\[system_vendor\\]\\[serial_number\\]}" + op: "!matches" actions: - - action: "set-attribute" - path: "extra/system_vendor/serial_number" - value: "{data[inventory][system_vendor][serial_number]}" + - op: "set-attribute" + args: + path: "/extra/system_vendor/serial_number" + value: "{inventory[system_vendor][serial_number]}" # Ironic inspector rule to save system vendor product name in the node's # metadata. inspector_rule_save_system_vendor_product_name: description: "Save system vendor product name in Ironic node metadata" conditions: - - field: "data://inventory.system_vendor" - op: "is-empty" - invert: True - - field: "data://inventory.system_vendor.product_name" - op: "is-empty" - invert: True + - args: + value: "{inventory[system_vendor]}" + regex: "{inventory\\[system_vendor\\]}" + op: "!matches" + - args: + value: "{inventory[system_vendor][product_name]}" + regex: "{inventory\\[system_vendor\\]\\[product_name\\]}" + op: "!matches" actions: - - action: "set-attribute" - path: "extra/system_vendor/product_name" - value: "{data[inventory][system_vendor][product_name]}" + - op: "set-attribute" + args: + path: "/extra/system_vendor/product_name" + value: "{inventory[system_vendor][product_name]}" # Ironic inspector rule to save introspection data to the node. inspector_rule_save_data: description: "Save introspection data to Ironic node" conditions: [] actions: - - action: "set-attribute" - path: "extra/introspection_data" - value: "{data}" + - op: "set-attribute" + args: + path: "/extra/introspection_data/inventory" + value: "{inventory}" + - op: "set-attribute" + args: + path: "/extra/introspection_data/plugin_data" + value: "{plugin_data}" # Redfish rules # Ironic inspector rule to set Redfish credentials. inspector_rule_redfish_credentials: description: "Set Redfish driver_info if no credentials" conditions: - - field: "node://driver_info.redfish_username" - op: "is-empty" - - field: "node://driver_info.redfish_password" - op: "is-empty" + - args: + value: "{node.driver_info[redfish_username]}" + regex: "{node\\.driver_info\\[redfish_username\\]}" + op: "matches" + - args: + value: "{node.driver_info[redfish_password]}" + regex: "{node\\.driver_info\\[redfish_password\\]}" + op: "matches" + sensitive: true actions: - - action: "set-attribute" - path: "driver_info/redfish_username" - value: "{{ inspector_rule_var_redfish_username }}" - - action: "set-attribute" - path: "driver_info/redfish_password" - value: "{{ inspector_rule_var_redfish_password }}" + - op: "set-attribute" + args: + path: "/driver_info/redfish_username" + value: "{{ inspector_rule_var_redfish_username }}" + - op: "set-attribute" + args: + path: "/driver_info/redfish_password" + value: "{{ inspector_rule_var_redfish_password }}" # Ironic inspector rule to set Redfish address. inspector_rule_redfish_address: description: "Set Redfish address" conditions: - - field: "node://driver_info.redfish_address" - op: "is-empty" + - args: + value: "{node.driver_info[redfish_address]}" + regex: "{node\\.driver_info\\[redfish_address\\]}" + op: "matches" actions: - - action: "set-attribute" - path: "driver_info/redfish_address" - value: "{data[inventory][bmc_address]}" + - op: "set-attribute" + args: + path: "/driver_info/redfish_address" + value: "{inventory[bmc_address]}" # Ironic inspector rule to set Redfish certificate authority. inspector_rule_redfish_verify_ca: description: "Set Redfish Verify CA" conditions: - - field: "node://driver_info.redfish_verify_ca" - op: "is-empty" + - args: + value: "{node.driver_info[redfish_verify_ca]}" + regex: "{node\\.driver_info\\[redfish_verify_ca\\]}" + op: "matches" actions: - - action: "set-attribute" - path: "driver_info/redfish_verify_ca" - value: "{{ inspector_rule_var_redfish_verify_ca }}" + - op: "set-attribute" + args: + path: "/driver_info/redfish_verify_ca" + value: "{{ inspector_rule_var_redfish_verify_ca }}" # List of default ironic inspector rules. inspector_rules_default: - "{{ inspector_rule_deploy_kernel }}" - "{{ inspector_rule_deploy_ramdisk }}" - - "{{ inspector_rule_root_hint_init }}" - "{{ inspector_rule_root_hint_serial }}" - "{{ inspector_rule_set_pxe_interface_mac }}" - "{{ inspector_rule_lldp_switch_port_desc_to_name }}" - - "{{ inspector_rule_save_system_vendor_init }}" - "{{ inspector_rule_save_system_vendor_manufacturer }}" - "{{ inspector_rule_save_system_vendor_serial_number }}" - "{{ inspector_rule_save_system_vendor_product_name }}" @@ -406,14 +425,3 @@ inspector_rules: "{{ inspector_rules_default + inspector_rules_extra + (inspecto # Ansible group containing switch hosts to which the workaround should be # applied. inspector_dell_switch_lldp_workaround_group: - -############################################################################### -# Inspection store configuration. -# The inspection store provides a Swift-like service for storing inspection -# data which may be useful in environments without Swift. - -# Whether the inspection data store is enabled. -inspector_store_enabled: "{{ kolla_enable_ironic_inspector | bool and not kolla_enable_swift | bool }}" - -# Port on which the inspection data store should listen. -inspector_store_port: 8080 diff --git a/ansible/inventory/group_vars/all/ipa b/ansible/inventory/group_vars/all/ipa index 21fbff2df..d8542b929 100644 --- a/ansible/inventory/group_vars/all/ipa +++ b/ansible/inventory/group_vars/all/ipa @@ -20,19 +20,19 @@ ipa_builder_source_url: "https://opendev.org/openstack/ironic-python-agent-build ipa_builder_source_version: "{{ openstack_branch }}" # List of additional build host packages to install. -ipa_build_dib_host_packages_extra: [] +ipa_build_dib_host_packages_extra: [ 'zstd' ] # List of default Diskimage Builder (DIB) elements to use when building IPA -# images. Default is ["centos", "dynamic-login", "enable-serial-console", -# "ironic-python-agent-ramdisk"] when os_distribution is "rocky", and -# ["ubuntu", "dynamic-login", "enable-serial-console", +# images. Default is ["rocky-container", "dynamic-login", +# "enable-serial-console", "ironic-python-agent-ramdisk"] when os_distribution +# is "rocky", and ["ubuntu", "dynamic-login", "enable-serial-console", # "ironic-python-agent-ramdisk"] otherwise. ipa_build_dib_elements_default: - # TODO(mattcrees): Use {{ os_distribution }} here when Rocky IPA builds work. - - "{{ 'centos' if os_distribution == 'rocky' else os_distribution }}" + - "{{ 'rocky-container' if os_distribution == 'rocky' else os_distribution }}" - dynamic-login - enable-serial-console - ironic-python-agent-ramdisk + - baremetal # List of additional Diskimage Builder (DIB) elements to use when building IPA # images. Default is none. @@ -48,11 +48,13 @@ ipa_build_dib_elements: > # Dictionary of default environment variables to provide to Diskimage Builder # (DIB) during IPA image build. ipa_build_dib_env_default: - # TODO(mattcrees): Use {{ os_release }} here when Rocky IPA builds work. - DIB_RELEASE: "{{ '9-stream' if os_distribution == 'rocky' else os_release }}" + DIB_RELEASE: "{{ os_release }}" + DIB_CONTAINERFILE_RUNTIME: "{{ container_engine }}" + DIB_CONTAINERFILE_RUNTIME_ROOT: "{{ (container_engine == 'podman') | int }}" DIB_REPOLOCATION_ironic_python_agent: "{{ ipa_build_source_url }}" DIB_REPOREF_ironic_python_agent: "{{ ipa_build_source_version }}" DIB_REPOREF_requirements: "{{ ipa_build_source_version }}" + DIB_IPA_COMPRESS_CMD: 'zstd -19' # Dictionary of additional environment variables to provide to Diskimage # Builder (DIB) during IPA image build. @@ -83,8 +85,10 @@ ipa_build_dib_git_elements_extra: [] ipa_build_dib_git_elements: >- {{ ipa_build_dib_git_elements_default + ipa_build_dib_git_elements_extra }} -# List of DIB packages to install. Default is none. -ipa_build_dib_packages: [] +# List of DIB packages to install. Default is ["python3-yaml"] when +# when os_distribution is "rocky", otherwise []. +ipa_build_dib_packages: >- + {{ ["python3-yaml"] if os_distribution == 'rocky' else [] }} # Upper constraints file for installing packages in the virtual environment # used for building IPA images. Default is {{ pip_upper_constraints_file }}. diff --git a/ansible/inventory/group_vars/all/ironic b/ansible/inventory/group_vars/all/ironic index 72f26e2f7..0e37aba9d 100644 --- a/ansible/inventory/group_vars/all/ironic +++ b/ansible/inventory/group_vars/all/ironic @@ -94,24 +94,34 @@ kolla_ironic_default_vendor_interface: # Name of the Neutron network to use for cleaning. kolla_ironic_cleaning_network: "{{ kolla_ironic_provisioning_network if cleaning_net_name == provision_wl_net_name else 'cleaning-net' }}" +# Name of the Neutron network to use for inspection. +kolla_ironic_inspection_network: "{{ kolla_ironic_provisioning_network if inspection_net_name == provision_wl_net_name else 'inspection-net' }}" + # Name of the Neutron network to use for provisioning. kolla_ironic_provisioning_network: 'provision-net' # List of default kernel parameters to append for baremetal PXE boot. -kolla_ironic_pxe_append_params_default: +kolla_ironic_pxe_append_params_default: "{{ kolla_ironic_kernel_append_params_default }}" + +# List of additional kernel parameters to append for baremetal PXE boot. +kolla_ironic_pxe_append_params_extra: "{{ kolla_ironic_kernel_append_params_extra }}" + +# List of kernel parameters to append for baremetal PXE boot. +kolla_ironic_pxe_append_params: "{{ kolla_ironic_pxe_append_params_default + kolla_ironic_pxe_append_params_extra }}" + +# List of default kernel parameters to append for baremetal boot. +kolla_ironic_kernel_append_params_default: - nofb - nomodeset - vga=normal - console=tty0 - console=ttyS0,115200n8 -# List of additional kernel parameters to append for baremetal PXE boot. -kolla_ironic_pxe_append_params_extra: [] +# List of additional kernel parameters to append for baremetal boot. +kolla_ironic_kernel_append_params_extra: [] -# List of kernel parameters to append for baremetal PXE boot. -kolla_ironic_pxe_append_params: > - {{ kolla_ironic_pxe_append_params_default + - kolla_ironic_pxe_append_params_extra }} +# List of kernel parameters to append for baremetal boot. +kolla_ironic_kernel_append_params: "{{ kolla_ironic_kernel_append_params_default + kolla_ironic_kernel_append_params_extra }}" ############################################################################### # Ironic Node Configuration diff --git a/ansible/inventory/group_vars/all/kolla b/ansible/inventory/group_vars/all/kolla index e0e4c949e..411b116e6 100644 --- a/ansible/inventory/group_vars/all/kolla +++ b/ansible/inventory/group_vars/all/kolla @@ -63,9 +63,9 @@ kolla_base_distro: "{{ os_distribution }}" # Kolla base container image distribution version default map. # Defines default versions for each distribution. kolla_base_distro_version_default_map: { - "centos": "stream9", - "debian": "bullseye", - "rocky": "9", + "centos": "stream10", + "debian": "bookworm", + "rocky": "10", "ubuntu": "noble", } @@ -153,7 +153,7 @@ overcloud_container_image_regex_map: - regex: ^designate enabled: "{{ kolla_enable_designate | bool }}" - regex: ^dnsmasq - enabled: "{{ kolla_enable_ironic | bool }}" + enabled: "{{ kolla_enable_ironic_dnsmasq | bool }}" - regex: ^etcd enabled: "{{ kolla_enable_etcd | bool }}" - regex: ^fluentd @@ -208,8 +208,10 @@ overcloud_container_image_regex_map: enabled: "{{ kolla_enable_multipathd | bool }}" - regex: "neutron-\\(server\\|metadata-agent\\)" enabled: "{{ kolla_enable_neutron | bool }}" - - regex: "neutron-\\(dhcp\\|l3\\|linuxbridge\\|openvswitch\\)-agent" + - regex: "neutron-\\(dhcp\\|l3\\|openvswitch\\)-agent" enabled: "{{ kolla_build_neutron_ovs | default(kolla_enable_neutron | bool and not kolla_enable_ovn | bool) }}" + - regex: neutron-bgp-dragent + enabled: "{{ kolla_enable_neutron_bgp_dragent | bool }}" - regex: neutron-mlnx-agent enabled: "{{ kolla_enable_neutron_mlnx | bool }}" - regex: neutron-ovn-agent @@ -546,7 +548,8 @@ kolla_enable_heat: "{{ kolla_enable_openstack_core | bool }}" kolla_enable_horizon: "{{ kolla_enable_openstack_core | bool }}" kolla_enable_influxdb: "{{ kolla_enable_cloudkitty | bool }}" kolla_enable_ironic: "no" -kolla_enable_ironic_inspector: "{{ kolla_enable_ironic | bool }}" +kolla_enable_ironic_dnsmasq: "{{ kolla_enable_ironic | bool and kolla_inspector_enable_discovery | bool }}" +kolla_enable_ironic_pxe_filter: "{{ kolla_enable_ironic | bool and kolla_inspector_enable_discovery | bool }}" kolla_enable_ironic_neutron_agent: "{{ kolla_enable_neutron | bool and kolla_enable_ironic | bool }}" kolla_enable_iscsid: "{{ kolla_enable_cinder | bool and kolla_enable_cinder_backend_iscsi | bool }}" kolla_enable_kuryr: "no" @@ -556,6 +559,7 @@ kolla_enable_manila: "no" kolla_enable_masakari: "no" kolla_enable_mistral: "no" kolla_enable_multipathd: "no" +kolla_enable_neutron_bgp_dragent: "no" kolla_enable_neutron_mlnx: "no" kolla_enable_neutron_provider_networks: "no" kolla_enable_neutron_sriov: "no" @@ -693,3 +697,7 @@ kolla_https_proxy: "{{ https_proxy }}" # List of domains, hostnames, IP addresses and networks for which no proxy is # used. Default value is "{{ no_proxy }}". kolla_no_proxy: "{{ no_proxy }}" + +############################################################################## +# Inspector configuration +kolla_inspector_enable_discovery: "{{ inspector_enable_discovery | bool }}" \ No newline at end of file diff --git a/ansible/inventory/group_vars/all/monitoring b/ansible/inventory/group_vars/all/monitoring index 56da639e9..61492f2b2 100644 --- a/ansible/inventory/group_vars/all/monitoring +++ b/ansible/inventory/group_vars/all/monitoring @@ -3,7 +3,8 @@ # Monitoring node configuration. # User with which to access the monitoring nodes via SSH during bootstrap, in -# order to setup the Kayobe user account. +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. monitoring_bootstrap_user: "{{ controller_bootstrap_user }}" ############################################################################### @@ -123,6 +124,26 @@ monitoring_firewalld_default_zone: "{{ controller_firewalld_default_zone }}" # - state: enabled monitoring_firewalld_rules: "{{ controller_firewalld_rules }}" +############################################################################### +# Monitoring node fail2ban configuration. + +# Whether to install and enable fail2ban. +monitoring_fail2ban_enabled: false + +# List of fail2ban jails for the monitoring node. +monitoring_fail2ban_jail_configuration: >- + {{ monitoring_fail2ban_jail_configuration_default + + monitoring_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the monitoring node. +monitoring_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the monitoring node. +monitoring_fail2ban_jail_configuration_extra: [] + ############################################################################### # Monitoring node swap configuration. diff --git a/ansible/inventory/group_vars/all/openstack b/ansible/inventory/group_vars/all/openstack index e0cb9f783..430293d3f 100644 --- a/ansible/inventory/group_vars/all/openstack +++ b/ansible/inventory/group_vars/all/openstack @@ -2,13 +2,18 @@ ############################################################################### # OpenStack release configuration. -# Name of the current OpenStack release. Default is "2025.1". -openstack_release: "2025.1" +# Name of the current OpenStack release. Default is "master". +openstack_release: "master" -# Name of the current OpenStack branch. Default is "stable/2025.1". +# Name of the current OpenStack branch. Default is "master". openstack_branch: >- {% if openstack_release != 'master' %}stable/{% endif %}{{ openstack_release | lower }} +############################################################################### +# OpenStack virtualenv configuration. + +os_virtualenv_python: "{{ '/usr/bin/python3.12' if ansible_facts.os_family == 'RedHat' else '/usr/bin/python3' }}" + ############################################################################### # OpenStack authentication configuration. @@ -28,6 +33,19 @@ openstack_auth: auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}" system_scope: "{{ lookup('env', 'OS_SYSTEM_SCOPE') }}" +# Internal variable to set the system scope authentication. +openstack_auth_system_scope_value: 'all' + +# Overcloud authentication parameters for system scope. By default this will +# use the user defined in openstack_auth. +# NOTE(wszumski): Not all projects support system scope yet and we sometimes need +# to use system scope and project scope in the same ansible run. +openstack_auth_system_scope: >- + {{ openstack_auth | combine( + {'system_scope': openstack_auth_system_scope_value, + 'project_domain_name': '', + 'project_name': ''})}} + # Overcloud CA certificate path. openstack_cacert: "{{ lookup('env', 'OS_CACERT') }}" @@ -49,6 +67,14 @@ openstack_auth_env: OS_CACERT: "{{ lookup('env', 'OS_CACERT') }}" OS_SYSTEM_SCOPE: "{{ lookup('env', 'OS_SYSTEM_SCOPE') }}" +# Overcloud authentication environment variables for system scope. By default +# this will use the user defined in openstack_auth_env. +openstack_auth_env_system_scope: >- + {{ openstack_auth_env | combine( + {'OS_SYSTEM_SCOPE': openstack_auth_system_scope_value, + 'OS_PROJECT_DOMAIN_NAME': '', + 'OS_PROJECT_NAME': ''})}} + # List of parameters required in openstack_auth when openstack_auth_type is # password. openstack_auth_password_required_params: @@ -56,3 +82,10 @@ openstack_auth_password_required_params: - "username" - "password" - "auth_url" + +# List of parameters required in openstack_auth when openstack_auth_type is +# password and using system scope +openstack_auth_password_required_params_system: + - "username" + - "password" + - "auth_url" diff --git a/ansible/inventory/group_vars/all/seed b/ansible/inventory/group_vars/all/seed index eddee7426..22c0bc6d4 100644 --- a/ansible/inventory/group_vars/all/seed +++ b/ansible/inventory/group_vars/all/seed @@ -3,8 +3,9 @@ # Seed node configuration. # User with which to access the seed via SSH during bootstrap, in order to -# setup the Kayobe user account. Default is {{ os_distribution }}. -seed_bootstrap_user: "{{ os_distribution }}" +# setup the Kayobe user account. Default is 'cloud-user' if os_distribution is +# set to centos, otherwise 'os_distribution'. +seed_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Seed network interface configuration. @@ -136,9 +137,10 @@ seed_users: "{{ users_default }}" # Example: # seed_containers: # squid: -# image: "stackhpc/squid:3.5.20-1" +# image: "docker.io/stackhpc/squid" # pre: "{{ kayobe_env_config_path }}/containers/squid/pre.yml" # post: "{{ kayobe_env_config_path }}/containers/squid/post.yml" +# tag: "3.5.20-1" # seed_containers: {} @@ -167,6 +169,26 @@ seed_firewalld_default_zone: # - state: enabled seed_firewalld_rules: [] +############################################################################### +# Seed node fail2ban configuration. + +# Whether to install and enable fail2ban. +seed_fail2ban_enabled: false + +# List of fail2ban jails for the seed node. +seed_fail2ban_jail_configuration: >- + {{ seed_fail2ban_jail_configuration_default + + seed_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the seed node. +seed_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the seed node. +seed_fail2ban_jail_configuration_extra: [] + ############################################################################### # Seed node swap configuration. diff --git a/ansible/inventory/group_vars/all/seed-hypervisor b/ansible/inventory/group_vars/all/seed-hypervisor index 9b9cf889f..47c64b3f7 100644 --- a/ansible/inventory/group_vars/all/seed-hypervisor +++ b/ansible/inventory/group_vars/all/seed-hypervisor @@ -3,8 +3,9 @@ # Seed hypervisor node configuration. # User with which to access the seed hypervisor via SSH during bootstrap, in -# order to setup the Kayobe user account. Default is {{ os_distribution }}. -seed_hypervisor_bootstrap_user: "{{ os_distribution }}" +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. +seed_hypervisor_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Seed hypervisor network interface configuration. @@ -43,7 +44,8 @@ seed_hypervisor_luks_devices: [] # volume group for libvirt storage. seed_hypervisor_lvm_groups: [] -# Suggested list of seed hypervisor volume groups for libvirt. Not used by default. +# Suggested list of seed hypervisor volume groups for libvirt. Not used by +# default. seed_hypervisor_lvm_groups_with_data: - "{{ seed_hypervisor_lvm_group_data }}" @@ -160,6 +162,26 @@ seed_hypervisor_firewalld_default_zone: # - state: enabled seed_hypervisor_firewalld_rules: [] +############################################################################### +# Seed hypervisor node fail2ban configuration. + +# Whether to install and enable fail2ban. +seed_hypervisor_fail2ban_enabled: false + +# List of fail2ban jails for the seed hypervisor node. +seed_hypervisor_fail2ban_jail_configuration: >- + {{ seed_hypervisor_fail2ban_jail_configuration_default + + seed_hypervisor_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the seed hypervisor node. +seed_hypervisor_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the seed hypervisor node. +seed_hypervisor_fail2ban_jail_configuration_extra: [] + ############################################################################### # Seed hypervisor node swap configuration. diff --git a/ansible/inventory/group_vars/all/seed-vm b/ansible/inventory/group_vars/all/seed-vm index 9687e6de3..73c50ed73 100644 --- a/ansible/inventory/group_vars/all/seed-vm +++ b/ansible/inventory/group_vars/all/seed-vm @@ -43,18 +43,17 @@ seed_vm_root_format: qcow2 # Base image for the seed VM root volume. Default is # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" # when os_distribution is "ubuntu", -# https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 -# when os_distribution is "rocky", -# or -# "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2" +# "https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2" +# when os_distribution is "rocky", or +# "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2" # otherwise. seed_vm_root_image: >- {%- if os_distribution == 'ubuntu' %} https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img {%- elif os_distribution == 'rocky' %} - https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 + https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2 {%- else -%} - https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2 + https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2 {%- endif %} # Capacity of the seed VM data volume. @@ -63,7 +62,20 @@ seed_vm_data_capacity: 100G # Format of the seed VM data volume. seed_vm_data_format: qcow2 -# List of network interfaces to attach to the seed VM. +# List of network interfaces to attach to the seed VM. Format is as accepted by +# the stackhpc.libvirt-vm role's libvirt_vms.interfaces variable. Default is +# one interface for each network in 'network_interfaces'. +# +# Example with one interface connected to a libvirt network called +# 'libvirt-net', and another interface directly plugged into a host device +# called 'eth1': +# +# seed_vm_interfaces: +# - network: libvirt-net +# - type: direct +# source: +# dev: eth1 +# seed_vm_interfaces: "{{ network_interfaces | sort | map('net_libvirt_vm_network') | list }}" # Boot firmware. Possible values are 'bios' or 'efi'. Default is 'efi'. diff --git a/ansible/inventory/group_vars/all/storage b/ansible/inventory/group_vars/all/storage index ff4f20d5f..46ea4bc79 100644 --- a/ansible/inventory/group_vars/all/storage +++ b/ansible/inventory/group_vars/all/storage @@ -2,9 +2,10 @@ ############################################################################### # Storage node configuration. -# User with which to access the storages via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. -storage_bootstrap_user: "{{ os_distribution }}" +# User with which to access the storage nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. +storage_bootstrap_user: "{{ 'cloud-user' if os_distribution == 'centos' else os_distribution }}" ############################################################################### # Storage network interface configuration. @@ -172,6 +173,26 @@ storage_firewalld_default_zone: # - state: enabled storage_firewalld_rules: [] +############################################################################### +# Storage node fail2ban configuration. + +# Whether to install and enable fail2ban. +storage_fail2ban_enabled: false + +# List of fail2ban jails for the storage node. +storage_fail2ban_jail_configuration: >- + {{ storage_fail2ban_jail_configuration_default + + storage_fail2ban_jail_configuration_extra }} + +# List of default fail2ban jails for the storage node. +storage_fail2ban_jail_configuration_default: + - option: enabled + value: "true" + section: sshd + +# List of extra fail2ban jails for the storage node. +storage_fail2ban_jail_configuration_extra: [] + ############################################################################### # Storage node swap configuration. diff --git a/ansible/inventory/group_vars/all/switches/junos b/ansible/inventory/group_vars/all/switches/junos index d833c2937..df82bb48c 100644 --- a/ansible/inventory/group_vars/all/switches/junos +++ b/ansible/inventory/group_vars/all/switches/junos @@ -1,28 +1,9 @@ --- # Switch configuration. -############################################################################### -# Authentication configuration. - -# For Juniper switches, this defines a 'provider' argument to the junos_* -# modules. -switch_junos_provider: - host: "{{ ansible_host|default(inventory_hostname) }}" - username: "{{ ansible_user }}" - password: "{{ ansible_ssh_pass|default(omit) }}" - ssh_keyfile: "{{ ansible_ssh_private_key_file|default(omit) }}" - timeout: "{{ switch_junos_timeout }}" - ############################################################################### # Configuration format. # Format of configuration in junos_switch_config and # junos_switch_interface_config. May be one of 'set', 'text' or 'json'. switch_junos_config_format: text - -############################################################################### -# Timeout. - -# Timeout in seconds for communicating with the network device either for -# connecting or sending commands. -switch_junos_timeout: 10 diff --git a/ansible/inventory/group_vars/ansible-control/ansible-host b/ansible/inventory/group_vars/ansible-control/ansible-host new file mode 100644 index 000000000..3d291ad41 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/ansible-host @@ -0,0 +1,3 @@ +--- +# Host/IP with which to access the Ansible control host via SSH. +ansible_host: "{{ admin_oc_net_name | net_ip }}" diff --git a/ansible/inventory/group_vars/ansible-control/ansible-user b/ansible/inventory/group_vars/ansible-control/ansible-user new file mode 100644 index 000000000..d9a48787e --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/ansible-user @@ -0,0 +1,7 @@ +--- +# User with which to access the Ansible control host via SSH. +ansible_user: "{{ kayobe_ansible_user }}" + +# User with which to access the Ansible control host before the +# kayobe_ansible_user account has been created. +bootstrap_user: "{{ ansible_control_bootstrap_user }}" diff --git a/ansible/inventory/group_vars/ansible-control/container-engine b/ansible/inventory/group_vars/ansible-control/container-engine new file mode 100644 index 000000000..e92c6388d --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/container-engine @@ -0,0 +1,5 @@ +--- +############################################################################### +# Ansible control host container engine configuration. + +container_engine_enabled: "{{ ansible_control_container_engine_enabled }}" diff --git a/ansible/inventory/group_vars/ansible-control/fail2ban b/ansible/inventory/group_vars/ansible-control/fail2ban new file mode 100644 index 000000000..d3b3e149b --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ ansible_control_fail2ban_enabled }}" + +# List of fail2ban jails for the Ansible control host. +fail2ban_jail_configuration: "{{ ansible_control_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/ansible-control/firewall b/ansible/inventory/group_vars/ansible-control/firewall new file mode 100644 index 000000000..24bbf8ec6 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/firewall @@ -0,0 +1,21 @@ +--- +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +firewalld_enabled: "{{ ansible_control_firewalld_enabled }}" + +# A list of zones to create. Each item is a dict containing a 'zone' item. +firewalld_zones: "{{ ansible_control_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +firewalld_default_zone: "{{ ansible_control_firewalld_default_zone }}" + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +firewalld_rules: "{{ ansible_control_firewalld_rules }}" diff --git a/ansible/inventory/group_vars/ansible-control/luks b/ansible/inventory/group_vars/ansible-control/luks new file mode 100644 index 000000000..842e10c64 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/luks @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +luks_devices: "{{ ansible_control_luks_devices }}" diff --git a/ansible/inventory/group_vars/ansible-control/lvm b/ansible/inventory/group_vars/ansible-control/lvm new file mode 100644 index 000000000..ad913dfde --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/lvm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host LVM configuration. + +# List of LVM volume groups. +lvm_groups: "{{ ansible_control_lvm_groups }}" diff --git a/ansible/inventory/group_vars/ansible-control/mdadm b/ansible/inventory/group_vars/ansible-control/mdadm new file mode 100644 index 000000000..d5a5cccea --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/mdadm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +mdadm_arrays: "{{ ansible_control_mdadm_arrays }}" diff --git a/ansible/inventory/group_vars/ansible-control/network b/ansible/inventory/group_vars/ansible-control/network new file mode 100644 index 000000000..a14971c67 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/network @@ -0,0 +1,6 @@ +--- +############################################################################### +# Network interface attachments. + +# List of networks to which these nodes are attached. +network_interfaces: "{{ ansible_control_network_interfaces | unique | list }}" diff --git a/ansible/inventory/group_vars/ansible-control/swap b/ansible/inventory/group_vars/ansible-control/swap new file mode 100644 index 000000000..c2d990bd8 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/swap @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +swap: "{{ ansible_control_swap }}" diff --git a/ansible/inventory/group_vars/ansible-control/sysctl b/ansible/inventory/group_vars/ansible-control/sysctl new file mode 100644 index 000000000..dba23c496 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/sysctl @@ -0,0 +1,3 @@ +--- +# Dict of sysctl parameters to set. +sysctl_parameters: "{{ ansible_control_sysctl_parameters }}" diff --git a/ansible/inventory/group_vars/ansible-control/tuned b/ansible/inventory/group_vars/ansible-control/tuned new file mode 100644 index 000000000..a442126f4 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/tuned @@ -0,0 +1,7 @@ +--- +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. +tuned_active_builtin_profile: "{{ ansible_control_tuned_active_builtin_profile }}" diff --git a/ansible/inventory/group_vars/ansible-control/users b/ansible/inventory/group_vars/ansible-control/users new file mode 100644 index 000000000..5414b4cac --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/users @@ -0,0 +1,4 @@ +--- +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +users: "{{ ansible_control_users }}" diff --git a/ansible/inventory/group_vars/compute/fail2ban b/ansible/inventory/group_vars/compute/fail2ban new file mode 100644 index 000000000..bef1836ec --- /dev/null +++ b/ansible/inventory/group_vars/compute/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ compute_fail2ban_enabled }}" + +# List of fail2ban jails for the compute node. +fail2ban_jail_configuration: "{{ compute_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/controllers/fail2ban b/ansible/inventory/group_vars/controllers/fail2ban new file mode 100644 index 000000000..72693b64a --- /dev/null +++ b/ansible/inventory/group_vars/controllers/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ controller_fail2ban_enabled }}" + +# List of fail2ban jails for the controller node. +fail2ban_jail_configuration: "{{ controller_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/infra-vms/fail2ban b/ansible/inventory/group_vars/infra-vms/fail2ban new file mode 100644 index 000000000..f1e6f2a4a --- /dev/null +++ b/ansible/inventory/group_vars/infra-vms/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ infra_vm_fail2ban_enabled }}" + +# List of fail2ban jails for the infrastructure VM node. +fail2ban_jail_configuration: "{{ infra_vm_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/monitoring/fail2ban b/ansible/inventory/group_vars/monitoring/fail2ban new file mode 100644 index 000000000..9160d962f --- /dev/null +++ b/ansible/inventory/group_vars/monitoring/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ monitoring_fail2ban_enabled }}" + +# List of fail2ban jails for the monitoring node. +fail2ban_jail_configuration: "{{ monitoring_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/seed-hypervisor/fail2ban b/ansible/inventory/group_vars/seed-hypervisor/fail2ban new file mode 100644 index 000000000..f1106c883 --- /dev/null +++ b/ansible/inventory/group_vars/seed-hypervisor/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ seed_hypervisor_fail2ban_enabled }}" + +# List of fail2ban jails for the seed hypervisor node. +fail2ban_jail_configuration: "{{ seed_hypervisor_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/seed/fail2ban b/ansible/inventory/group_vars/seed/fail2ban new file mode 100644 index 000000000..dcdf156f9 --- /dev/null +++ b/ansible/inventory/group_vars/seed/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ seed_fail2ban_enabled }}" + +# List of fail2ban jails for the seed node. +fail2ban_jail_configuration: "{{ seed_fail2ban_jail_configuration }}" diff --git a/ansible/inventory/group_vars/storage/fail2ban b/ansible/inventory/group_vars/storage/fail2ban new file mode 100644 index 000000000..024920388 --- /dev/null +++ b/ansible/inventory/group_vars/storage/fail2ban @@ -0,0 +1,6 @@ +--- +# Whether to install and enable fail2ban +fail2ban_enabled: "{{ storage_fail2ban_enabled }}" + +# List of fail2ban jails for the storage node. +fail2ban_jail_configuration: "{{ storage_fail2ban_jail_configuration }}" diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml index b82d934a8..46c49b2a5 100644 --- a/ansible/ip-allocation.yml +++ b/ansible/ip-allocation.yml @@ -1,6 +1,6 @@ --- - name: Ensure IP addresses are allocated - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ ip_allocation_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/kayobe-ansible-user.yml b/ansible/kayobe-ansible-user.yml index c54aa703f..c0d826453 100644 --- a/ansible/kayobe-ansible-user.yml +++ b/ansible/kayobe-ansible-user.yml @@ -7,7 +7,7 @@ # bootstrap process if the account is inaccessible. - name: Determine whether user bootstrapping is required - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: false max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | @@ -36,8 +36,8 @@ attempting bootstrap when: ssh_result.unreachable | default(false) -- name: Ensure python is installed and the Kayobe Ansible user account exists - hosts: kayobe_user_bootstrap_required_True +- name: Ensure Python is installed + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: no max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | @@ -46,14 +46,14 @@ default(100) }} vars: ansible_user: "{{ bootstrap_user }}" - # We can't assume that a virtualenv exists at this point, so use the system - # python interpreter. - ansible_python_interpreter: /usr/bin/python3 + apt_options: + - "-y" + - "{% if apt_proxy_http %}-o Acquire::http::proxy='{{ apt_proxy_http }}'{% endif %}" + - "{% if apt_proxy_https %}-o Acquire::https::proxy='{{ apt_proxy_https }}'{% endif %}" dnf_options: - "-y" - "{% if 'proxy' in dnf_config %}--setopt=proxy={{ dnf_config['proxy'] }}{% endif %}" tags: - - kayobe-ansible-user - ensure-python tasks: - name: Check if python is installed @@ -62,11 +62,26 @@ failed_when: false register: check_python - # TODO(priteau): Support apt proxy - - name: Ensure python is installed - raw: "test -e /usr/bin/apt && (sudo apt -y update && sudo apt install -y python3-minimal) || (sudo dnf {{ dnf_options | select | join(' ') }} install python3)" + - name: Ensure Python is installed + raw: "(test -e /usr/bin/apt && sudo apt {{ apt_options | select | join(' ') }} update && sudo apt install {{ apt_options | select | join(' ') }} python3-minimal) || (test -e /usr/bin/dnf && sudo dnf {{ dnf_options | select | join(' ') }} install python3)" when: check_python.rc != 0 +- name: Ensure the Kayobe Ansible user account exists + hosts: kayobe_user_bootstrap_required_True + gather_facts: no + max_fail_percentage: >- + {{ kayobe_ansible_user_max_fail_percentage | + default(host_configure_max_fail_percentage) | + default(kayobe_max_fail_percentage) | + default(100) }} + vars: + ansible_user: "{{ bootstrap_user }}" + # We can't assume that a virtualenv exists at this point, so use the system + # python interpreter. + ansible_python_interpreter: /usr/bin/python3 + tags: + - kayobe-ansible-user + tasks: - import_role: name: singleplatform-eng.users vars: @@ -88,7 +103,7 @@ become: True - name: Verify that the Kayobe Ansible user account is accessible - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: false max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | diff --git a/ansible/kayobe-target-venv.yml b/ansible/kayobe-target-venv.yml index 53c0ef0c2..4d4ae092f 100644 --- a/ansible/kayobe-target-venv.yml +++ b/ansible/kayobe-target-venv.yml @@ -3,7 +3,7 @@ # when running kayobe. - name: Ensure a virtualenv exists for kayobe - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control gather_facts: False max_fail_percentage: >- {{ kayobe_target_venv_max_fail_percentage | @@ -15,20 +15,29 @@ tasks: - name: Set a fact about the kayobe target virtualenv set_fact: - virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" + kayobe_virtualenv: "{{ ansible_python_interpreter | dirname | dirname }}" when: - ansible_python_interpreter is defined - not ansible_python_interpreter.startswith('/bin') - not ansible_python_interpreter.startswith('/usr/bin') - - block: - - name: Gather facts - setup: - filter: "{{ kayobe_ansible_setup_filter }}" - gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" - when: not ansible_facts - register: gather_facts + - name: Gather facts with system Python interpreter + setup: + filter: "{{ kayobe_ansible_setup_filter }}" + gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" + when: + #TODO(mattcrees): Enable this check once this bug is fixed: + # https://bugs.launchpad.net/kayobe/+bug/2144548 + # - ansible_facts is undefined or ansible_facts is falsy + - kayobe_virtualenv is defined + register: gather_facts_result + # Before any facts are gathered, ansible doesn't know about + # python virtualenv. + # Use default python3 to be safe for this task. + vars: + ansible_python_interpreter: /usr/bin/python3 + - block: - name: Ensure the Python venv package is installed on Debian family systems package: name: python3-venv @@ -47,12 +56,12 @@ mode: 0755 # Check whether the virtualenv directory is a subdirectory of the # global virtualenv directory. - when: virtualenv.startswith(virtualenv_path) + when: kayobe_virtualenv.startswith(virtualenv_path) become: True - name: Ensure kayobe virtualenv directory exists file: - path: "{{ virtualenv }}" + path: "{{ kayobe_virtualenv }}" state: directory owner: "{{ ansible_facts.user_uid }}" group: "{{ ansible_facts.user_gid }}" @@ -69,7 +78,7 @@ pip: name: pip state: latest - virtualenv: "{{ virtualenv }}" + virtualenv: "{{ kayobe_virtualenv }}" # Site packages are required for using the dnf module, which is not # available via PyPI. virtualenv_site_packages: True @@ -79,25 +88,25 @@ pip: name: selinux state: latest - virtualenv: "{{ virtualenv }}" + virtualenv: "{{ kayobe_virtualenv }}" when: - ansible_facts.os_family == 'RedHat' vars: # Use the system python interpreter since the virtualenv might not # exist. - ansible_python_interpreter: /usr/bin/python3 - when: virtualenv is defined + ansible_python_interpreter: "{{ ansible_facts.python.executable }}" + when: kayobe_virtualenv is defined # If we gathered facts earlier it would have been with a different Python # interpreter. For gathering modes that may use a fact cache, gather facts # again using the interpreter from the virtual environment. - - name: Gather facts + - name: Gather facts with virtualenv Python interpreter setup: filter: "{{ kayobe_ansible_setup_filter }}" gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" when: - - virtualenv is defined - - gather_facts is not skipped + - kayobe_virtualenv is defined + - gather_facts_result is not skipped - lookup('config', 'DEFAULT_GATHERING') != 'implicit' - block: @@ -110,15 +119,15 @@ name: "{{ packages | select | list }}" state: present become: True - when: virtualenv is not defined + when: kayobe_virtualenv is not defined - - name: Ensure kolla-ansible virtualenv has docker SDK for python installed + - name: Ensure kayobe virtualenv has docker SDK for python installed pip: name: docker state: latest - virtualenv: "{{ virtualenv | default(omit) }}" + virtualenv: "{{ kayobe_virtualenv | default(omit) }}" extra_args: "{% if docker_upper_constraints_file %}-c {{ docker_upper_constraints_file }}{% endif %}" - become: "{{ virtualenv is not defined }}" + become: "{{ kayobe_virtualenv is not defined }}" vars: docker_upper_constraints_file: "{{ pip_upper_constraints_file }}" when: @@ -127,9 +136,9 @@ - name: Ensure kayobe virtualenv has podman SDK installed import_role: - name: openstack.kolla.podman_sdk + name: openstack.kolla.podman_sdk vars: - virtualenv: "{{ virtualenv }}" + virtualenv: "{{ kayobe_virtualenv }}" podman_sdk_upper_constraints_file: "{{ pip_upper_constraints_file }}" when: - "'container-engine' in group_names" diff --git a/ansible/kolla-ansible.yml b/ansible/kolla-ansible.yml index d1d67d7a7..5a0505e52 100644 --- a/ansible/kolla-ansible.yml +++ b/ansible/kolla-ansible.yml @@ -9,6 +9,7 @@ tags: - kolla-ansible - config-validation + - kayobe-generate-config tasks: - name: Validate serial console configuration block: @@ -26,6 +27,7 @@ hosts: localhost tags: - kolla-ansible + - kayobe-generate-config gather_facts: false pre_tasks: - block: @@ -93,17 +95,18 @@ kolla_ansible_passwords_path: "{{ kayobe_env_config_path }}/kolla/passwords.yml" kolla_overcloud_inventory_search_paths_static: - "{{ kayobe_config_path }}" - kolla_overcloud_inventory_search_paths: "{{ kolla_overcloud_inventory_search_paths_static + kayobe_env_search_paths }}" + kolla_overcloud_inventory_search_paths: "{{ kolla_overcloud_inventory_search_paths_static + kayobe_env_search_paths | default([]) }}" kolla_ansible_certificates_path: "{{ kayobe_env_config_path }}/kolla/certificates" kolla_inspector_dhcp_pool_start: "{{ inspection_net_name | net_inspection_allocation_pool_start }}" kolla_inspector_dhcp_pool_end: "{{ inspection_net_name | net_inspection_allocation_pool_end }}" kolla_inspector_netmask: "{{ inspection_net_name | net_mask }}" kolla_inspector_default_gateway: "{{ inspection_net_name | net_inspection_gateway or inspection_net_name | net_gateway }}" + kolla_inspector_dns_servers: "{{ inspection_net_name | net_inspection_dns_servers }}" kolla_inspector_extra_kernel_options: "{{ inspector_extra_kernel_options }}" kolla_libvirt_tls: "{{ compute_libvirt_enable_tls | bool }}" kolla_globals_paths_static: - "{{ kayobe_config_path }}" - kolla_globals_paths_extra: "{{ kolla_globals_paths_static + kayobe_env_search_paths }}" + kolla_globals_paths_extra: "{{ kolla_globals_paths_static + kayobe_env_search_paths | default([]) }}" kolla_ironic_inspector_host: "{{ groups[controller_ironic_inspector_group][0] if groups[controller_ironic_inspector_group] | length > 0 else '' }}" - name: Generate Kolla Ansible host vars for the seed host @@ -112,6 +115,7 @@ - config - config-validation - kolla-ansible + - kayobe-generate-config gather_facts: False tasks: - name: Set Kolla Ansible host variables @@ -120,13 +124,13 @@ - var_name: "kolla_bifrost_network_interface" description: "Bifrost provisioning network" network: "{{ provision_oc_net_name }}" - required: True + required: "{{ kolla_enable_bifrost | bool }}" # Strictly api_interface is not required but kolla-ansible currently # references it in prechecks. - var_name: "kolla_api_interface" description: "Bifrost provisioning network" network: "{{ provision_oc_net_name }}" - required: True + required: "{{ kolla_enable_bifrost | bool }}" - import_role: name: kolla-ansible-host-vars @@ -141,6 +145,7 @@ - config - config-validation - kolla-ansible + - kayobe-generate-config gather_facts: False tasks: - name: Set Kolla Ansible host variables diff --git a/ansible/kolla-bifrost-hostvars.yml b/ansible/kolla-bifrost-hostvars.yml index ce330fe15..21e1849c4 100644 --- a/ansible/kolla-bifrost-hostvars.yml +++ b/ansible/kolla-bifrost-hostvars.yml @@ -19,14 +19,14 @@ bifrost_hostvars: addressing_mode: static deploy_image_filename: "{{ kolla_bifrost_deploy_image_filename }}" - deploy_image_rootfs: "{{ kolla_bifrost_deploy_image_rootfs | default(omit, true) }}" + deploy_image_rootfs: "{{ kolla_bifrost_deploy_image_rootfs }}" ipv4_interface_mac: "{% if kolla_bifrost_ipv4_interface_mac is defined %}{{ kolla_bifrost_ipv4_interface_mac }}{% else %}{% raw %}{{ extra.pxe_interface_mac | default }}{% endraw %}{% endif %}" ipv4_address: "{{ admin_oc_net_name | net_ip }}" ipv4_subnet_mask: "{{ admin_oc_net_name | net_mask }}" # If the admin network does not have a gateway defined and seed SNAT is # enabled, use the seed as a gateway to allow external access until other # networks have been configured. Otherwise, do not set any gateway. - ipv4_gateway: "{{ (admin_oc_net_name | net_gateway) or (admin_oc_net_name | net_ip(seed_host) if seed_enable_snat | bool) }}" + ipv4_gateway: "{{ (admin_oc_net_name | net_gateway) or (admin_oc_net_name | net_ip(seed_host) if seed_enable_snat | bool) or '' }}" ipv4_nameserver: "{{ resolv_nameservers }}" network_mtu: "{{ admin_oc_net_name | net_mtu or '1500' }}" vlan_id: "{{ '' if admin_oc_net_name == provision_oc_net_name else (admin_oc_net_name | net_vlan) }}" @@ -61,7 +61,7 @@ -e @/etc/bifrost/dib.yml --limit {{ inventory_hostname }} -m shell - -a "env OS_CLOUD=bifrost baremetal introspection data save {% raw %}{{ inventory_hostname }}{% endraw %}"' + -a "env OS_CLOUD=bifrost baremetal inventory save {% raw %}{{ inventory_hostname }}{% endraw %}"' register: save_result changed_when: False # Ignore errors, log a message later. diff --git a/ansible/kolla-bifrost.yml b/ansible/kolla-bifrost.yml index b22c9eb42..88b7b4a1b 100644 --- a/ansible/kolla-bifrost.yml +++ b/ansible/kolla-bifrost.yml @@ -3,6 +3,7 @@ hosts: localhost tags: - kolla-bifrost + - bifrost roles: - role: kolla-bifrost @@ -17,3 +18,4 @@ kolla_bifrost_config_paths_static: - "{{ kayobe_config_path }}" kolla_bifrost_config_paths_extra: "{{ kolla_bifrost_config_paths_static + kayobe_env_search_paths }}" + when: kolla_enable_bifrost | bool diff --git a/ansible/kolla-openstack.yml b/ansible/kolla-openstack.yml index c7b9d56a7..a2c1152f0 100644 --- a/ansible/kolla-openstack.yml +++ b/ansible/kolla-openstack.yml @@ -7,6 +7,7 @@ - config-validation - kolla-ansible - kolla-openstack + - kayobe-generate-config tasks: - name: Create controllers group with ironic enabled group_by: @@ -19,6 +20,7 @@ tags: - kolla-ansible - kolla-openstack + - kayobe-generate-config vars: # These are the filenames generated by overcloud-ipa-build.yml. ipa_image_name: "ipa" @@ -57,6 +59,7 @@ tags: - kolla-ansible - kolla-openstack + - kayobe-generate-config vars: switch_type_to_device_type: arista: netmiko_arista_eos @@ -131,17 +134,11 @@ roles: - role: kolla-openstack # Ironic inspector configuration. - kolla_inspector_processing_hooks: "{{ inspector_processing_hooks }}" + kolla_inspector_hooks: "{{ inspector_hooks }}" kolla_inspector_add_ports: "{{ inspector_add_ports }}" kolla_inspector_keep_ports: "{{ inspector_keep_ports }}" kolla_inspector_enable_discovery: "{{ inspector_enable_discovery }}" kolla_inspector_discovery_enroll_node_driver: "{{ inspector_discovery_enroll_node_driver }}" - # Ironic inspector swift store configuration. Currently only supports the - # 'fake' inspection store. - kolla_inspector_enable_swift: "{{ inspector_store_enabled }}" - kolla_inspector_swift_auth: - auth_type: none - endpoint_override: "http://{% raw %}{{ api_interface_address }}{% endraw %}:{{ inspector_store_port }}" kolla_inspector_ipa_host: "{{ groups['controllers_with_ironic_enabled_True'][0] }}" kolla_openstack_custom_config_paths_extra_multi_env_static: - "{{ kayobe_config_path }}" diff --git a/ansible/kolla-target-venv.yml b/ansible/kolla-target-venv.yml index 698623996..8e3b2e9c1 100644 --- a/ansible/kolla-target-venv.yml +++ b/ansible/kolla-target-venv.yml @@ -57,7 +57,7 @@ virtualenv: "{{ kolla_ansible_target_venv }}" extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" become: True - when: "{{ container_engine == 'docker' }}" + when: container_engine == 'docker' - name: Ensure kolla-ansible virtualenv has podman SDK installed pip: @@ -66,7 +66,7 @@ virtualenv: "{{ kolla_ansible_target_venv }}" extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" become: True - when: "{{ container_engine == 'podman' }}" + when: container_engine == 'podman' - name: Ensure kolla-ansible virtualenv has SELinux bindings installed pip: diff --git a/ansible/logging.yml b/ansible/logging.yml index 801ee3a72..9008149f1 100644 --- a/ansible/logging.yml +++ b/ansible/logging.yml @@ -1,6 +1,6 @@ --- - name: Ensure Logging configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ logging_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/luks.yml b/ansible/luks.yml index 57e4796f1..c47f19d70 100644 --- a/ansible/luks.yml +++ b/ansible/luks.yml @@ -1,6 +1,6 @@ --- - name: Ensure encryption configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ luks_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/lvm.yml b/ansible/lvm.yml index 3d46edadd..39ce81f51 100644 --- a/ansible/lvm.yml +++ b/ansible/lvm.yml @@ -1,6 +1,6 @@ --- - name: Ensure LVM configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ lvm_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/mdadm.yml b/ansible/mdadm.yml index 14338dd0a..e78f56ac3 100644 --- a/ansible/mdadm.yml +++ b/ansible/mdadm.yml @@ -1,6 +1,6 @@ --- - name: Ensure software RAID configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ mdadm_max_fail_percentage | default(host_configure_max_fail_percentage) | @@ -9,7 +9,7 @@ tags: - mdadm roles: - - name: mrlesmithjr.mdadm + - role: mrlesmithjr.mdadm become: True when: - mdadm_arrays is defined diff --git a/ansible/network-connectivity.yml b/ansible/network-connectivity.yml index 745032aa4..8259f898a 100644 --- a/ansible/network-connectivity.yml +++ b/ansible/network-connectivity.yml @@ -1,11 +1,13 @@ --- - name: Check network connectivity between hosts - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ network_connectivity_max_fail_percentage | default(kayobe_max_fail_percentage) | default(100) }} vars: + # Skip external connectivity check when behind a proxy. + nc_skip_external_net: "{{ http_proxy | truthy }}" # Set this to an external IP address to check. nc_external_ip: 8.8.8.8 # Set this to an external hostname to check. @@ -14,29 +16,31 @@ # (20 bytes) headers. icmp_overhead_bytes: 28 tasks: - - name: Display next action - debug: - msg: > - Checking whether hosts have access to an external IP address, - {{ nc_external_ip }}. - run_once: True + - block: + - name: "Display next action: external IP address check" + debug: + msg: > + Checking whether hosts have access to an external IP address, + {{ nc_external_ip }}. + run_once: True - - name: Ensure an external IP is reachable - command: ping -c1 {{ nc_external_ip }} - changed_when: False + - name: Ensure an external IP is reachable + command: ping -c1 {{ nc_external_ip }} + changed_when: False - - name: Display next action - debug: - msg: > - Checking whether hosts have access to an external hostname, - {{ nc_external_hostname }}. - run_once: True + - name: "Display next action: external hostname check" + debug: + msg: > + Checking whether hosts have access to an external hostname, + {{ nc_external_hostname }}. + run_once: True - - name: Ensure an external host is reachable - command: ping -c1 {{ nc_external_hostname }} - changed_when: False + - name: Ensure an external host is reachable + command: ping -c1 {{ nc_external_hostname }} + changed_when: False + when: not nc_skip_external_net - - name: Display next action + - name: "Display next action: gateway check" debug: msg: > Checking whether hosts have access to any configured gateways. @@ -57,7 +61,7 @@ # network and try to ping it. Set the packet size according to the # network's MTU. - - name: Display next action + - name: "Display next action: host connectivity check" debug: msg: > Checking whether hosts have access to other hosts on the same @@ -68,9 +72,12 @@ command: > ping {{ remote_ip }} -c1 -M do {% if mtu %} -s {{ mtu | int - icmp_overhead_bytes }}{% endif %} with_items: "{{ network_interfaces }}" + loop_control: + label: "{{ remote_host | default('none', true) }} on {{ item }}" when: - - item | net_ip - remote_hosts | length > 0 + - remote_ip | length > 0 + - item | net_ip changed_when: False vars: # Select other hosts targeted by this play which have this network @@ -83,6 +90,10 @@ rejectattr('inventory_hostname', 'equalto', inventory_hostname) | map(attribute='inventory_hostname') | list }} - remote_host: "{{ remote_hosts | random }}" - remote_ip: "{{ item | net_ip(remote_host) }}" + # NOTE(wszumski): Needed to fix random choice for the run otherwise the + # when check: remote_ip | length > 0, would pass, but remote_ip was '' + # in the command. Assumption was that this was being evaluated once + # for the when clause and then again for the command. Bug? + remote_host: "{{ remote_hosts | random(seed=ansible_facts.date_time.iso8601) if remote_hosts | length > 0 else '' }}" + remote_ip: "{{ lookup('cached', 'vars', item ~ '_ips', default={})[remote_host] | default('', true) }}" mtu: "{{ item | net_mtu }}" diff --git a/ansible/network.yml b/ansible/network.yml index c5b99ad0c..e584b7fd8 100644 --- a/ansible/network.yml +++ b/ansible/network.yml @@ -1,6 +1,6 @@ --- - name: Ensure networking is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ network_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/opensm.yml b/ansible/opensm.yml index bee7e5175..19b94dd9d 100644 --- a/ansible/opensm.yml +++ b/ansible/opensm.yml @@ -12,4 +12,4 @@ - opensm roles: - role: opensm - opensm_action: "{{ kayobe_action }}" + opensm_action: "{{ kayobe_action | default('deploy') }}" diff --git a/ansible/overcloud-deprovision.yml b/ansible/overcloud-deprovision.yml index e7bf91448..c0d9d1bc5 100644 --- a/ansible/overcloud-deprovision.yml +++ b/ansible/overcloud-deprovision.yml @@ -43,6 +43,8 @@ {{ play_hosts | join(', ') }} If you want to proceed type: yes register: pause_prompt + delegate_to: localhost + run_once: true when: not confirm_deprovision - name: Fail if deprovision is not confirmed @@ -51,6 +53,8 @@ msg: > Deprovision has not been confirmed. You must either type 'yes' when prompted, or set ``confirm_deprovision=yes``. + delegate_to: localhost + run_once: true - name: Get PXE MAC address command: > diff --git a/ansible/overcloud-extras.yml b/ansible/overcloud-extras.yml index ad16dc86b..0a5d4fa4e 100644 --- a/ansible/overcloud-extras.yml +++ b/ansible/overcloud-extras.yml @@ -8,5 +8,4 @@ # action: One of deploy, destroy, pull, reconfigure, upgrade - import_playbook: docker-registry.yml -- import_playbook: inspection-store.yml - import_playbook: opensm.yml diff --git a/ansible/overcloud-host-configure.yml b/ansible/overcloud-host-configure.yml index fff5bc398..a6cdf4d8b 100644 --- a/ansible/overcloud-host-configure.yml +++ b/ansible/overcloud-host-configure.yml @@ -13,6 +13,7 @@ - import_playbook: "selinux.yml" - import_playbook: "network.yml" - import_playbook: "firewall.yml" +- import_playbook: "fail2ban.yml" - import_playbook: "etc-hosts.yml" - import_playbook: "tuned.yml" - import_playbook: "sysctl.yml" diff --git a/ansible/overcloud-introspection-data-save.yml b/ansible/overcloud-introspection-data-save.yml index 236a06bbb..0e2b16111 100644 --- a/ansible/overcloud-introspection-data-save.yml +++ b/ansible/overcloud-introspection-data-save.yml @@ -25,7 +25,7 @@ -e @/etc/bifrost/dib.yml --limit {{ inventory_hostname }} -m shell - -a "env OS_CLOUD=bifrost baremetal introspection data save {% raw %}{{ inventory_hostname }}{% endraw %}"' + -a "env OS_CLOUD=bifrost baremetal node inventory save {% raw %}{{ inventory_hostname }}{% endraw %}"' register: save_result changed_when: False # Ignore errors, log a message later. @@ -38,14 +38,14 @@ become: "{{ container_engine == 'podman' }}" - name: Ensure introspection data output directory exists - local_action: - module: file + delegate_to: localhost + file: path: "{{ output_dir }}" state: directory - name: Ensure introspection data is saved locally - local_action: - module: copy + delegate_to: localhost + copy: content: "{{ introspection_data_map[output_format | lower] }}" dest: "{{ output_dir }}/{{ inventory_hostname }}.{{ output_format | lower }}" when: save_result.rc == 0 diff --git a/ansible/overcloud-introspection-rules.yml b/ansible/overcloud-introspection-rules.yml index 6f460968f..4c4f1126c 100644 --- a/ansible/overcloud-introspection-rules.yml +++ b/ansible/overcloud-introspection-rules.yml @@ -5,9 +5,9 @@ tags: - introspection-rules tasks: - - name: Create controllers group with ironic inspector enabled + - name: Create controllers group with ironic enabled group_by: - key: "controllers_for_introspection_rules_{{ kolla_enable_ironic_inspector | bool }}" + key: "controllers_for_introspection_rules_{{ kolla_enable_ironic | bool }}" changed_when: false - name: Ensure introspection rules are registered in Ironic Inspector @@ -63,7 +63,7 @@ ironic_inspector_venv: "{{ venv }}" ironic_inspector_upper_constraints_file: "{{ openstacksdk_upper_constraints_file }}" ironic_inspector_auth_type: "{{ openstack_auth_type }}" - ironic_inspector_auth: "{{ openstack_auth }}" + ironic_inspector_auth: "{{ openstack_auth_system_scope }}" ironic_inspector_cacert: "{{ openstack_cacert }}" ironic_inspector_interface: "{{ openstack_interface }}" ironic_inspector_rules: "{{ inspector_rules }}" diff --git a/ansible/overcloud-inventory-discover.yml b/ansible/overcloud-inventory-discover.yml index cabcaa5fd..c41466a53 100644 --- a/ansible/overcloud-inventory-discover.yml +++ b/ansible/overcloud-inventory-discover.yml @@ -23,8 +23,8 @@ ironic_inventory: "{{ inventory_result.stdout | from_json }}" - name: Ensure Kayobe overcloud inventory exists - local_action: - module: copy + delegate_to: localhost + copy: content: | # Managed by Ansible - do not edit. # This is the Kayobe overcloud inventory, autogenerated from the seed diff --git a/ansible/overcloud-provision.yml b/ansible/overcloud-provision.yml index 6b49a6fa0..424b45756 100644 --- a/ansible/overcloud-provision.yml +++ b/ansible/overcloud-provision.yml @@ -221,8 +221,8 @@ - final_provision_state != 'active' - name: Wait for SSH access to the nodes - local_action: - module: wait_for + delegate_to: localhost + wait_for: host: "{{ ansible_host }}" port: 22 state: started diff --git a/ansible/physical-network.yml b/ansible/physical-network.yml index f3bfaf00f..3361301c4 100644 --- a/ansible/physical-network.yml +++ b/ansible/physical-network.yml @@ -173,7 +173,6 @@ - role: junos-switch junos_switch_type: "{{ switch_type }}" - junos_switch_provider: "{{ switch_junos_provider }}" junos_switch_config_format: "{{ switch_junos_config_format }}" junos_switch_config: "{{ switch_config }}" junos_switch_interface_config: "{{ switch_interface_config }}" diff --git a/ansible/pip.yml b/ansible/pip.yml index 3bea4a70d..98e0473fa 100644 --- a/ansible/pip.yml +++ b/ansible/pip.yml @@ -1,6 +1,6 @@ --- - name: Configure local PyPi mirror - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ pip_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/provision-net.yml b/ansible/provision-net.yml index 66d28adc7..c64a70867 100644 --- a/ansible/provision-net.yml +++ b/ansible/provision-net.yml @@ -5,17 +5,32 @@ tags: - provision-net - cleaning-net + - inspection-net tasks: - name: Create controllers group with ironic enabled group_by: key: "controllers_for_provision_net_{{ kolla_enable_ironic | bool }}" changed_when: false -- name: Ensure provisioning and cleaning networks and subnets are registered in neutron +- name: Ensure inspection, provisioning and cleaning networks and subnets are registered in neutron # Only required to run on a single host. hosts: controllers_for_provision_net_True[0] vars: venv: "{{ virtualenv_path }}/openstacksdk" + inspection_net: + name: "{{ kolla_ironic_inspection_network }}" + mtu: "{{ inspection_net_name | net_mtu | default(omit, True) }}" + provider_network_type: "{% if inspection_net_name | net_vlan %}vlan{% else %}flat{% endif %}" + provider_physical_network: "{{ inspection_net_name | net_physical_network | default('physnet1', True) }}" + provider_segmentation_id: "{{ inspection_net_name | net_vlan }}" + # Flat networks need to be shared to allow instances to use them. + shared: "{{ (inspection_net_name | net_vlan) is none }}" + subnets: + - name: "{{ kolla_ironic_inspection_network }}" + cidr: "{{ inspection_net_name | net_cidr }}" + gateway_ip: "{{ inspection_net_name | net_neutron_gateway or provision_wl_net_name | net_gateway | default(omit, True) }}" + allocation_pool_start: "{{ inspection_net_name | net_neutron_allocation_pool_start }}" + allocation_pool_end: "{{ inspection_net_name | net_neutron_allocation_pool_end }}" provision_net: name: "{{ kolla_ironic_provisioning_network }}" mtu: "{{ provision_wl_net_name | net_mtu | default(omit, True) }}" @@ -48,6 +63,7 @@ tags: - provision-net - cleaning-net + - inspection-net tasks: - name: Validate OpenStack password authentication parameters fail: diff --git a/ansible/proxy.yml b/ansible/proxy.yml index 1f39eb6d1..ffde65f3e 100644 --- a/ansible/proxy.yml +++ b/ansible/proxy.yml @@ -1,5 +1,6 @@ +--- - name: Configure HTTP(S) proxy settings - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ proxy_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/roles/bootstrap/tasks/main.yml b/ansible/roles/bootstrap/tasks/main.yml index f5a1e1b4f..ee673a204 100644 --- a/ansible/roles/bootstrap/tasks/main.yml +++ b/ansible/roles/bootstrap/tasks/main.yml @@ -61,8 +61,10 @@ user: "{{ ansible_facts.user_id }}" key: "{{ lookup('file', bootstrap_ssh_private_key_path ~ '.pub') }}" +# NOTE(priteau): Exclude comments from ssh-keyscan output because they break +# known_hosts on centos/rocky 10. - name: Scan for SSH keys - command: ssh-keyscan {{ item }} + shell: ssh-keyscan {{ item }} | grep -v '^#' with_items: - localhost - 127.0.0.1 diff --git a/ansible/roles/console-allocation/tasks/main.yml b/ansible/roles/console-allocation/tasks/main.yml index 2445181b7..cb67e2f12 100644 --- a/ansible/roles/console-allocation/tasks/main.yml +++ b/ansible/roles/console-allocation/tasks/main.yml @@ -35,8 +35,8 @@ # NOTE(mgoddard): Use the Python interpreter used to run ansible-playbook, # since this has Python dependencies available to it (PyYAML). ansible_python_interpreter: "{{ ansible_playbook_python }}" - local_action: - module: console_allocation + delegate_to: localhost + console_allocation: allocation_file: "{{ console_allocation_filename }}" nodes: "{{ console_allocation_ironic_nodes }}" allocation_pool_start: "{{ console_allocation_pool_start }}" diff --git a/ansible/roles/dell-switch/tasks/main.yml b/ansible/roles/dell-switch/tasks/main.yml index fc3d6268e..958b04b71 100644 --- a/ansible/roles/dell-switch/tasks/main.yml +++ b/ansible/roles/dell-switch/tasks/main.yml @@ -1,23 +1,23 @@ --- - name: Ensure DellOS6 switches are configured - local_action: - module: dellos6_config + delegate_to: localhost + dellemc.os6.os6: provider: "{{ dell_switch_provider }}" src: "{{ lookup('template', 'dellos6-config.j2') }}" save: "{{ dell_switch_save | bool }}" when: dell_switch_type == 'dellos6' - name: Ensure DellOS9 switches are configured - local_action: - module: dellos9_config + delegate_to: localhost + dellemc.os9.os9: provider: "{{ dell_switch_provider }}" src: "{{ lookup('template', 'dellos9-config.j2') }}" save: "{{ dell_switch_save | bool }}" when: dell_switch_type == 'dellos9' - name: Ensure DellOS10 switches are configured - local_action: - module: dellos10_config + delegate_to: localhost + dellemc.os10.os10: provider: "{{ dell_switch_provider }}" src: "{{ lookup('template', 'dellos10-config.j2') }}" save: "{{ dell_switch_save | bool }}" diff --git a/ansible/roles/dev-tools/defaults/main.yml b/ansible/roles/dev-tools/defaults/main.yml index b2f843617..adeac39e1 100644 --- a/ansible/roles/dev-tools/defaults/main.yml +++ b/ansible/roles/dev-tools/defaults/main.yml @@ -15,4 +15,4 @@ dev_tools_packages_system: - acl # List of packages to install. -dev_tools_packages: "{{ dev_tools_packages_default + dev_tools_packages_extra + dev_tools_packages_system }}" +dev_tools_packages: "{{ (dev_tools_packages_default + dev_tools_packages_extra + dev_tools_packages_system) | select | list }}" diff --git a/ansible/roles/disable-cloud-init/handlers/main.yml b/ansible/roles/disable-cloud-init/handlers/main.yml index b30534747..8b7dc20ca 100644 --- a/ansible/roles/disable-cloud-init/handlers/main.yml +++ b/ansible/roles/disable-cloud-init/handlers/main.yml @@ -1,5 +1,5 @@ --- -- name: reload systemd daemon +- name: Reload systemd daemon systemd: name: cloud-init daemon_reload: yes diff --git a/ansible/roles/disable-cloud-init/tasks/main.yml b/ansible/roles/disable-cloud-init/tasks/main.yml index b17d59bb0..bc591a557 100644 --- a/ansible/roles/disable-cloud-init/tasks/main.yml +++ b/ansible/roles/disable-cloud-init/tasks/main.yml @@ -5,5 +5,5 @@ state: touch mode: "u=rw,g=r,o=r" notify: - - reload systemd daemon + - Reload systemd daemon become: True diff --git a/ansible/roles/dnf/tasks/custom-repo.yml b/ansible/roles/dnf/tasks/custom-repo.yml index b7e66de76..6a7e0434e 100644 --- a/ansible/roles/dnf/tasks/custom-repo.yml +++ b/ansible/roles/dnf/tasks/custom-repo.yml @@ -3,28 +3,28 @@ yum_repository: name: "{{ item.key }}" description: "{% if 'description' in item.value %}{{ item.value.description }}{% else %}{{ item.key }} repository{% endif %}" - baseurl: "{{ item.value.baseurl | default(omit)}}" - file: "{{ item.value.file | default(omit)}}" - gpgkey: "{{ item.value.gpgkey | default(omit)}}" - gpgcheck: "{{ item.value.gpgcheck | default(omit)}}" - cost: "{{ item.value.cost | default(omit)}}" - enabled: "{{ item.value.enabled | default(omit)}}" - exclude: "{{ item.value.exclude | default(omit)}}" - gpgcakey: "{{ item.value.gpgcakey | default(omit)}}" - includepkgs: "{{ item.value.includepkgs | default(omit)}}" - metadata_expire: "{{ item.value.metadata_expire | default(omit)}}" - metalink: "{{ item.value.metalink | default(omit)}}" - mirrorlist: "{{ item.value.mirrorlist | default(omit)}}" - mirrorlist_expire: "{{ item.value.mirrorlist_expire | default(omit)}}" + baseurl: "{{ item.value.baseurl | default(omit) }}" + file: "{{ item.value.file | default(omit) }}" + gpgkey: "{{ item.value.gpgkey | default(omit) }}" + gpgcheck: "{{ item.value.gpgcheck | default(omit) }}" + cost: "{{ item.value.cost | default(omit) }}" + enabled: "{{ item.value.enabled | default(omit) }}" + exclude: "{{ item.value.exclude | default(omit) }}" + gpgcakey: "{{ item.value.gpgcakey | default(omit) }}" + includepkgs: "{{ item.value.includepkgs | default(omit) }}" + metadata_expire: "{{ item.value.metadata_expire | default(omit) }}" + metalink: "{{ item.value.metalink | default(omit) }}" + mirrorlist: "{{ item.value.mirrorlist | default(omit) }}" + mirrorlist_expire: "{{ item.value.mirrorlist_expire | default(omit) }}" password: "{{ item.value.password | default(omit) }}" - priority: "{{ item.value.priority | default(omit)}}" - proxy: "{{ item.value.proxy | default(omit)}}" - proxy_password: "{{ item.value.proxy_password | default(omit)}}" - proxy_username: "{{ item.value.proxy_username | default(omit)}}" - repo_gpgcheck: "{{ item.value.repo_gpgcheck | default(omit)}}" - sslverify: "{{ item.value.sslverify | default(omit)}}" + priority: "{{ item.value.priority | default(omit) }}" + proxy: "{{ item.value.proxy | default(omit) }}" + proxy_password: "{{ item.value.proxy_password | default(omit) }}" + proxy_username: "{{ item.value.proxy_username | default(omit) }}" + repo_gpgcheck: "{{ item.value.repo_gpgcheck | default(omit) }}" + sslverify: "{{ item.value.sslverify | default(omit) }}" username: "{{ item.value.username | default(omit) }}" - state: "{{ item.value.state | default(omit)}}" + state: "{{ item.value.state | default(omit) }}" with_dict: "{{ dnf_custom_repos }}" loop_control: label: "{{ item.key }}" diff --git a/ansible/roles/dnf/tasks/main.yml b/ansible/roles/dnf/tasks/main.yml index 41feff3ff..d39b8a145 100644 --- a/ansible/roles/dnf/tasks/main.yml +++ b/ansible/roles/dnf/tasks/main.yml @@ -4,7 +4,7 @@ path: /etc/dnf/dnf.conf section: "main" option: "{{ item.key }}" - value: "{{ item.value }}" + value: "{{ item.value }}" loop: "{{ query('dict', dnf_config) }}" become: true diff --git a/ansible/roles/dnf/templates/epel.repo.j2 b/ansible/roles/dnf/templates/epel.repo.j2 index 0c924f239..ad5d8c774 100644 --- a/ansible/roles/dnf/templates/epel.repo.j2 +++ b/ansible/roles/dnf/templates/epel.repo.j2 @@ -3,14 +3,14 @@ name=Extra Packages for Enterprise Linux $releasever - $basearch baseurl=http://{{ dnf_epel_mirror_host }}/{{ dnf_epel_mirror_directory }}/$releasever/Everything/$basearch enabled=1 gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }} fastestmirror=0 [epel-debuginfo] name=Extra Packages for Enterprise Linux $releasever - $basearch - Debug baseurl=http://{{ dnf_epel_mirror_host }}/{{ dnf_epel_mirror_directory }}/$releasever/Everything/$basearch/debug enabled=0 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }} gpgcheck=1 fastestmirror=0 @@ -18,6 +18,6 @@ fastestmirror=0 name=Extra Packages for Enterprise Linux $releasever - $basearch - Source baseurl=http://{{ dnf_epel_mirror_host }}/{{ dnf_epel_mirror_directory }}/$releasever/Everything/SRPMS enabled=0 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_facts.distribution_major_version }} gpgcheck=1 fastestmirror=0 diff --git a/ansible/roles/docker-registry/tasks/destroy.yml b/ansible/roles/docker-registry/tasks/destroy.yml index bead5b9a6..527118151 100644 --- a/ansible/roles/docker-registry/tasks/destroy.yml +++ b/ansible/roles/docker-registry/tasks/destroy.yml @@ -14,4 +14,4 @@ - volumes when: "'/' not in volume" vars: - volume: "{{ item.1.split(':')[0] }}" \ No newline at end of file + volume: "{{ item.1.split(':')[0] }}" diff --git a/ansible/roles/drac-pxe/tasks/main.yml b/ansible/roles/drac-pxe/tasks/main.yml index cf8d51382..aa97364ad 100644 --- a/ansible/roles/drac-pxe/tasks/main.yml +++ b/ansible/roles/drac-pxe/tasks/main.yml @@ -114,7 +114,7 @@ - name: Set a fact containing the UEFI PXE interface set_fact: - current_pxe_interface: "{{ result.stdout_lines[-1].partition('=')[2]}] }}" + current_pxe_interface: "{{ result.stdout_lines[-1].partition('=')[2] }}" required_pxe_interface: "{{ 'NIC.Integrated.1-' ~ drac_pxe_interface ~ '-1' }}" - name: Fail if there are pending UEFI PXE interface changes diff --git a/ansible/roles/firewalld/tasks/enabled.yml b/ansible/roles/firewalld/tasks/enabled.yml index 048645169..1ab86bcd6 100644 --- a/ansible/roles/firewalld/tasks/enabled.yml +++ b/ansible/roles/firewalld/tasks/enabled.yml @@ -47,7 +47,7 @@ zone: "{{ item | net_zone }}" become: true loop: "{{ network_interfaces }}" - when: item | net_zone + when: item | net_zone is truthy notify: Restart firewalld - name: Ensure firewalld rules are applied diff --git a/ansible/roles/gather-facts-delegated/defaults/main.yml b/ansible/roles/gather-facts-delegated/defaults/main.yml index ad6fe6444..2bba8cfc4 100644 --- a/ansible/roles/gather-facts-delegated/defaults/main.yml +++ b/ansible/roles/gather-facts-delegated/defaults/main.yml @@ -4,5 +4,4 @@ gather_facts_delegated_batch_index: "{{ ansible_play_batch.index(inventory_hostn gather_facts_delegated_batch_count: "{{ ansible_play_batch | length }}" # Use a python list slice to divide the group up. # Syntax: [::] -gather_facts_delegated_delegate_hosts: >- - {{ gather_facts_delegated_limit_hosts[gather_facts_delegated_batch_index | int::gather_facts_delegated_batch_count | int] }} +gather_facts_delegated_delegate_hosts: "{{ gather_facts_delegated_limit_hosts[gather_facts_delegated_batch_index | int::gather_facts_delegated_batch_count | int] }}" # noqa jinja[spacing] diff --git a/ansible/roles/infra-vms/tasks/main.yml b/ansible/roles/infra-vms/tasks/main.yml index a7306935e..fc367ccd4 100644 --- a/ansible/roles/infra-vms/tasks/main.yml +++ b/ansible/roles/infra-vms/tasks/main.yml @@ -1,7 +1,7 @@ --- - import_tasks: prerequisites.yml -- name: list all VMs on hypervisor +- name: List all VMs on hypervisor virt: command: list_vms register: all_vms diff --git a/ansible/roles/inspection-store/README.md b/ansible/roles/inspection-store/README.md deleted file mode 100644 index 4c9fb18e8..000000000 --- a/ansible/roles/inspection-store/README.md +++ /dev/null @@ -1,40 +0,0 @@ -Inspection Store -================ - -Ironic inspector can make use of Swift to store introspection data. Not all -OpenStack deployments feature Swift, so it may be useful to provide a minimal -HTTP interface that emulates Swift for storing ironic inspector's introspection -data. This role deploys such an interface using nginx. Note that no -authentication mechanism is provided. - -Requirements ------------- - -The host executing the role has the following requirements: - -* Docker engine -* Python ``docker >= 2.0.0`` - -Role Variables --------------- - -Dependencies ------------- - -None - -Example Playbook ----------------- - -The following playbook deploys an inspection store. - - --- - - hosts: all - - roles: - - role: inspection-store - -Author Information ------------------- - -- Mark Goddard () diff --git a/ansible/roles/inspection-store/defaults/main.yml b/ansible/roles/inspection-store/defaults/main.yml deleted file mode 100644 index 7d1997d53..000000000 --- a/ansible/roles/inspection-store/defaults/main.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# Roughly follows kolla-ansible's service deployment patterns. - -# Action to perform. One of 'deploy', 'destroy', 'pull', 'reconfigure', -# 'stop', 'upgrade'. -inspection_store_action: deploy - -# Whether an inspection store is enabled. -inspection_store_enabled: true - -# Service deployment definition. -inspection_store_services: - inspection_store: - container_name: inspection_store - enabled: "{{ inspection_store_enabled }}" - image: "{{ inspection_store_image_full }}" - network_mode: host - volumes: - - "/etc/localtime:/etc/localtime:ro" - - "{{ inspection_store_config_path }}/nginx.conf:/etc/nginx/nginx.conf:ro" - - "inspection_store:/data" - -# The port on which the inspection store server should listen. -inspection_store_port: 8080 - -# Path in which to store inspection store server configuration. -inspection_store_config_path: "/etc/inspection-store" - -#################### -# Inspection Store -#################### -inspection_store_namespace: "library" -inspection_store: docker.io -inspection_store_image: "{{ inspection_store ~ '/' if inspection_store | default else '' }}{{ inspection_store_namespace ~ '/' if inspection_store_namespace else '' }}nginx" -inspection_store_tag: "stable" -inspection_store_image_full: "{{ inspection_store_image }}:{{ inspection_store_tag }}" - -inspection_store_restart_policy: "unless-stopped" -#inspection_store_restart_retries: diff --git a/ansible/roles/inspection-store/handlers/main.yml b/ansible/roles/inspection-store/handlers/main.yml deleted file mode 100644 index a75eed5c9..000000000 --- a/ansible/roles/inspection-store/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Restart inspection store container - kayobe_container: - name: "{{ item.value.container_name }}" - state: started - restart: True - # NOTE: The image argument shouldn't be required, but without it this - # handler fails on Ansible 2.3. Related bug: - # https://github.com/ansible/ansible/issues/21188. - image: "{{ item.value.image }}" - with_dict: "{{ inspection_store_services }}" - when: item.value.enabled - become: "{{ container_engine == 'podman' }}" - -- name: Ensure inspection store data directory exists - command: > - {{ container_engine }} exec {{ inspection_store_services.inspection_store.container_name }} - bash -c "mkdir -p /data/ironic-inspector && - chown nginx:nginx /data/ironic-inspector" - when: inspection_store_services.inspection_store.enabled - become: "{{ container_engine == 'podman' }}" diff --git a/ansible/roles/inspection-store/tasks/config.yml b/ansible/roles/inspection-store/tasks/config.yml deleted file mode 100644 index e798f0214..000000000 --- a/ansible/roles/inspection-store/tasks/config.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Ensure inspection store configuration path exists - file: - path: "{{ inspection_store_config_path }}" - state: directory - owner: "{{ ansible_facts.user_uid }}" - group: "{{ ansible_facts.user_gid }}" - mode: 0750 - become: True - -- name: Ensure inspection store server is configured - template: - src: nginx.conf - dest: "{{ inspection_store_config_path }}/nginx.conf" - become: True - notify: - - Restart inspection store container - - Ensure inspection store data directory exists diff --git a/ansible/roles/inspection-store/tasks/deploy.yml b/ansible/roles/inspection-store/tasks/deploy.yml deleted file mode 100644 index d1f8db5d8..000000000 --- a/ansible/roles/inspection-store/tasks/deploy.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- include_tasks: config.yml -- include_tasks: start.yml diff --git a/ansible/roles/inspection-store/tasks/destroy.yml b/ansible/roles/inspection-store/tasks/destroy.yml deleted file mode 100644 index a311bf3e1..000000000 --- a/ansible/roles/inspection-store/tasks/destroy.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Ensure inspection store container is stopped - kayobe_container: - name: "{{ item.value.container_name }}" - state: "absent" - with_dict: "{{ inspection_store_services }}" - become: "{{ container_engine == 'podman' }}" - -- name: Ensure inspection store volumes are absent - kayobe_container_volume: - name: "{{ volume }}" - state: absent - with_subelements: - - "{{ inspection_store_services }}" - - volumes - when: "'/' not in volume" - failed_when: - - volume_result.rc != 0 - - "'no such volume' not in volume_result.stderr | lower" - vars: - volume: "{{ item.1.split(':')[0] }}" - become: "{{ container_engine == 'podman' }}" diff --git a/ansible/roles/inspection-store/tasks/main.yml b/ansible/roles/inspection-store/tasks/main.yml deleted file mode 100644 index 23541719d..000000000 --- a/ansible/roles/inspection-store/tasks/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include_tasks: "{{ inspection_store_action }}.yml" diff --git a/ansible/roles/inspection-store/tasks/pull.yml b/ansible/roles/inspection-store/tasks/pull.yml deleted file mode 100644 index 88068c305..000000000 --- a/ansible/roles/inspection-store/tasks/pull.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Pulling inspection store container image - kayobe_container_image: - name: "{{ item.value.image }}" - source: pull - state: present - with_dict: "{{ inspection_store_services }}" - when: - - item.value.enabled - - inspection_store_action != 'destroy' - become: "{{ container_engine == 'podman' }}" diff --git a/ansible/roles/inspection-store/tasks/reconfigure.yml b/ansible/roles/inspection-store/tasks/reconfigure.yml deleted file mode 100644 index f670a5b78..000000000 --- a/ansible/roles/inspection-store/tasks/reconfigure.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- include_tasks: deploy.yml diff --git a/ansible/roles/inspection-store/tasks/start.yml b/ansible/roles/inspection-store/tasks/start.yml deleted file mode 100644 index a2735af38..000000000 --- a/ansible/roles/inspection-store/tasks/start.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Ensure inspection store container is running - kayobe_container: - image: "{{ item.value.image }}" - name: "{{ item.value.container_name }}" - ports: "{{ item.value.ports | default(omit) }}" - privileged: "{{ item.value.privileged | default(omit) }}" - read_only: "{{ item.value.read_only | default(omit) }}" - restart_policy: "{{ inspection_store_restart_policy }}" - restart_retries: "{{ inspection_store_restart_retries | default(omit) }}" - state: started - volumes: "{{ item.value.volumes }}" - network_mode: "{{ item.value.network_mode | default(omit) }}" - with_dict: "{{ inspection_store_services }}" - notify: - - Ensure inspection store data directory exists - become: "{{ container_engine == 'podman' }}" - when: item.value.enabled | bool diff --git a/ansible/roles/inspection-store/tasks/stop.yml b/ansible/roles/inspection-store/tasks/stop.yml deleted file mode 100644 index d01750c21..000000000 --- a/ansible/roles/inspection-store/tasks/stop.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Ensure inspection store container is stopped - kayobe_container: - image: "{{ item.value.image }}" - name: "{{ item.value.container_name }}" - state: "stopped" - with_dict: "{{ inspection_store_services }}" - when: - - item.value.enabled | bool - become: "{{ container_engine == 'podman' }}" diff --git a/ansible/roles/inspection-store/tasks/upgrade.yml b/ansible/roles/inspection-store/tasks/upgrade.yml deleted file mode 100644 index 99348ae91..000000000 --- a/ansible/roles/inspection-store/tasks/upgrade.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- include_tasks: pull.yml -- include_tasks: deploy.yml diff --git a/ansible/roles/inspection-store/templates/nginx.conf b/ansible/roles/inspection-store/templates/nginx.conf deleted file mode 100644 index cea01e58d..000000000 --- a/ansible/roles/inspection-store/templates/nginx.conf +++ /dev/null @@ -1,40 +0,0 @@ -user nginx; -worker_processes 1; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - - -events { - worker_connections 1024; -} - - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - #tcp_nopush on; - - keepalive_timeout 65; - - #gzip on; - - server { - listen {{ inspection_store_port }}; - root /data; - location /ironic-inspector { - return 200 ""; - } - location /ironic-inspector/ { - dav_methods PUT DELETE; - } - } -} diff --git a/ansible/roles/ip-allocation/tasks/main.yml b/ansible/roles/ip-allocation/tasks/main.yml index 93360a00c..a30457be5 100644 --- a/ansible/roles/ip-allocation/tasks/main.yml +++ b/ansible/roles/ip-allocation/tasks/main.yml @@ -4,8 +4,8 @@ # NOTE(mgoddard): Use the Python interpreter used to run ansible-playbook, # since this has Python dependencies available to it (PyYAML). ansible_python_interpreter: "{{ ansible_playbook_python }}" - local_action: - module: ip_allocation + delegate_to: localhost + ip_allocation: allocation_file: "{{ ip_allocation_filename }}" hostname: "{{ ip_allocation_hostname }}" net_name: "{{ item.net_name }}" diff --git a/ansible/roles/ipa-images/tasks/set-driver-info.yml b/ansible/roles/ipa-images/tasks/set-driver-info.yml index 4592da109..6ddcbc89e 100644 --- a/ansible/roles/ipa-images/tasks/set-driver-info.yml +++ b/ansible/roles/ipa-images/tasks/set-driver-info.yml @@ -23,9 +23,9 @@ - name: Change system scope to all for Ironic operations set_fact: ipa_images_ironic_openstack_auth_env: "{{ ipa_images_openstack_auth_env | - combine ({ 'OS_PROJECT_NAME': omit }) | - combine ({ 'OS_PROJECT_DOMAIN_NAME': omit }) | - combine ({ 'OS_SYSTEM_SCOPE': 'all' }) }}" + combine({'OS_PROJECT_NAME': omit}) | + combine({'OS_PROJECT_DOMAIN_NAME': omit}) | + combine({'OS_SYSTEM_SCOPE': 'all'}) }}" - name: Get a list of ironic nodes command: | @@ -36,11 +36,11 @@ - name: Make sure openstack nodes are in baremetal-compute group add_host: - name: "{{ item.Name }}" + name: "{{ item.name }}" groups: baremetal-compute when: - - item.Name is not none - - item.Name not in groups["baremetal-compute"] + - item.name is not none + - item.name not in groups["baremetal-compute"] with_items: "{{ ipa_images_ironic_node_list.stdout | from_json }}" - name: Set fact containing filtered list of nodes @@ -55,15 +55,15 @@ set_fact: ipa_images_ironic_nodes: "{{ ipa_images_ironic_nodes + [item] }}" with_items: "{{ ipa_images_ironic_node_list.stdout | from_json }}" - when: item['Name'] in ipa_images_compute_node_whitelist + when: item['name'] in ipa_images_compute_node_whitelist - name: Ensure ironic nodes use the new Ironic Python Agent (IPA) images command: > - {{ ipa_images_venv }}/bin/openstack baremetal node set {{ item.UUID }} + {{ ipa_images_venv }}/bin/openstack baremetal node set {{ item.uuid }} --driver-info deploy_kernel={{ ipa_images_kernel_uuid }} --driver-info deploy_ramdisk={{ ipa_images_ramdisk_uuid }} with_items: "{{ ipa_images_ironic_nodes }}" when: - item["Driver Info"].deploy_kernel != ipa_images_kernel_uuid or - item["Driver Info"].deploy_ramdisk != ipa_images_ramdisk_uuid + item["driver_info"].deploy_kernel != ipa_images_kernel_uuid or + item["driver_info"].deploy_ramdisk != ipa_images_ramdisk_uuid environment: "{{ ipa_images_ironic_openstack_auth_env }}" diff --git a/ansible/roles/ironic-inspector-rules/defaults/main.yml b/ansible/roles/ironic-inspector-rules/defaults/main.yml index ee38abae1..fd36cc0cf 100644 --- a/ansible/roles/ironic-inspector-rules/defaults/main.yml +++ b/ansible/roles/ironic-inspector-rules/defaults/main.yml @@ -2,7 +2,7 @@ # Path to a directory in which to create a virtualenv. ironic_inspector_venv: -# Upper constraints file for installation of python-ironic-inspector-client. +# Upper constraints file for installation of python-ironicclient. ironic_inspector_upper_constraints_file: # Authentication type. diff --git a/ansible/roles/ironic-inspector-rules/library/os_ironic_inspector_rule.py b/ansible/roles/ironic-inspector-rules/library/os_ironic_inspector_rule.py index 5db8f8088..c84743a51 100644 --- a/ansible/roles/ironic-inspector-rules/library/os_ironic_inspector_rule.py +++ b/ansible/roles/ironic-inspector-rules/library/os_ironic_inspector_rule.py @@ -53,6 +53,10 @@ description: - List of actions to be taken when the conditions are met. required: true + sensitive: + description: + - Whether to mark the rule as sensitive in Ironic + required: false """ EXAMPLES = """ @@ -74,13 +78,13 @@ def _get_client(module, cloud): """Return an Ironic inspector client.""" - return cloud.baremetal_introspection + return cloud.baremetal def _ensure_rule_present(module, client): """Ensure that an inspector rule is present.""" if module.params['uuid']: - response = client.get('/rules/{}'.format(module.params['uuid'])) + response = client.get('/inspection_rules/{}'.format(module.params['uuid']), headers={'X-OpenStack-Ironic-API-Version': '1.96'}) if not response.ok: if response.status_code != 404: module.fail_json(msg="Failed retrieving Inspector rule %s: %s" @@ -88,7 +92,7 @@ def _ensure_rule_present(module, client): else: rule = response.json() # Check whether the rule differs from the request. - keys = ('conditions', 'actions', 'description') + keys = ('conditions', 'actions', 'description', 'sensitive') for key in keys: expected = module.params[key] if key == 'conditions': @@ -96,9 +100,10 @@ def _ensure_rule_present(module, client): # conditions that may not be in the requested rule. Apply # defaults to allow the comparison to succeed. expected = copy.deepcopy(expected) + if key == 'actions': + expected = copy.deepcopy(expected) for condition in expected: - condition.setdefault('invert', False) - condition.setdefault('multiple', 'any') + condition.setdefault('loop', []) if rule[key] != expected: break else: @@ -111,9 +116,10 @@ def _ensure_rule_present(module, client): "conditions": module.params['conditions'], "actions": module.params['actions'], "description": module.params['description'], + "sensitive": module.params['sensitive'], "uuid": module.params['uuid'], } - response = client.post("/rules", json=rule) + response = client.post("/inspection_rules", json=rule, headers={'X-OpenStack-Ironic-API-Version': '1.96'}) if not response.ok: module.fail_json(msg="Failed creating Inspector rule %s: %s" % (module.params['uuid'], response.text)) @@ -124,7 +130,7 @@ def _ensure_rule_absent(module, client): """Ensure that an inspector rule is absent.""" if not module.params['uuid']: module.fail_json(msg="UUID is required to ensure rules are absent") - response = client.delete("/rules/{}".format(module.params['uuid'])) + response = client.delete("/inspection_rules/{}".format(module.params['uuid']), headers={'X-OpenStack-Ironic-API-Version': '1.96'}) if not response.ok: # If the rule does not exist, no problem and no change. if response.status_code == 404: @@ -140,6 +146,7 @@ def main(): actions=dict(type='list', required=True), description=dict(required=False), uuid=dict(required=False), + sensitive=dict(type='bool', required=False, default=False), state=dict(required=False, default='present', choices=['present', 'absent']), ) diff --git a/ansible/roles/ironic-inspector-rules/tasks/main.yml b/ansible/roles/ironic-inspector-rules/tasks/main.yml index 93fbe7fcb..41a1aab7e 100644 --- a/ansible/roles/ironic-inspector-rules/tasks/main.yml +++ b/ansible/roles/ironic-inspector-rules/tasks/main.yml @@ -8,9 +8,12 @@ cacert: "{{ ironic_inspector_cacert | default(omit, true) }}" cloud: "{{ ironic_inspector_cloud | default(omit, true) }}" interface: "{{ ironic_inspector_interface | default(omit, true) }}" - conditions: "{{ item.conditions }}" - actions: "{{ item.actions }}" - description: "{{ item.description | default(omit) }}" - uuid: "{{ item.uuid | default(item.description | to_uuid) | default(omit) }}" + conditions: "{{ ironic_inspector_rules[item].conditions }}" + actions: "{{ ironic_inspector_rules[item].actions }}" + description: "{{ ironic_inspector_rules[item].description | default(omit) }}" + uuid: "{{ ironic_inspector_rules[item].uuid | default(ironic_inspector_rules[item].description | to_uuid) | default(omit) }}" + sensitive: "{{ ironic_inspector_rules[item].sensitive | default(omit) }}" state: present - with_items: "{{ ironic_inspector_rules }}" + loop_control: + label: "{{ ironic_inspector_rules[item].description }}" + with_items: "{{ range(0, ironic_inspector_rules | length) | list }}" diff --git a/ansible/roles/junos-switch/README.md b/ansible/roles/junos-switch/README.md index ed0652dfc..fa7604883 100644 --- a/ansible/roles/junos-switch/README.md +++ b/ansible/roles/junos-switch/README.md @@ -1,10 +1,10 @@ JunOS Switch ============ -This role configures Juniper switches using the `junos` Ansible modules. It -provides a fairly minimal abstraction of the configuration interface provided -by the `junos` modules, allowing for application of arbitrary switch -configuration options. +This role configures Juniper switches using the `junipernetworks.junos` Ansible +collection. It provides a fairly minimal abstraction of the configuration +interface provided by the collection, allowing for application of arbitrary +switch configuration options. Requirements ------------ @@ -14,12 +14,6 @@ The switches should be configured to allow access to NETCONF via SSH. Role Variables -------------- -`junos_switch_delegate_to` is the host on which to execute the `junos` Ansible -modules. - -`junos_switch_provider` is authentication provider information passed as the -`provider` argument to the `junos` modules. - `junos_switch_config_format` is the format of configuration in `junos_switch_config` and `junos_switch_interface_config`. May be one of `set`, `text` or `json`. @@ -53,11 +47,6 @@ passwords. It applies global configuration for LLDP, and enables two gather_facts: no roles: - role: junos-switch - junos_switch_delegate_to: localhost - junos_switch_provider: - host: "{{ switch_host }}" - username: "{{ switch_user }}" - password: "{{ switch_password }}" junos_switch_config: - "protocols {" - " lldp {" diff --git a/ansible/roles/junos-switch/defaults/main.yml b/ansible/roles/junos-switch/defaults/main.yml index 09da1a600..8df0d41fb 100644 --- a/ansible/roles/junos-switch/defaults/main.yml +++ b/ansible/roles/junos-switch/defaults/main.yml @@ -1,7 +1,4 @@ --- -# Authentication provider information. -junos_switch_provider: - # Format of configuration in junos_switch_config and # junos_switch_interface_config. May be one of 'set', 'text' or 'json'. junos_switch_config_format: text diff --git a/ansible/roles/junos-switch/tasks/main.yml b/ansible/roles/junos-switch/tasks/main.yml index c06a377a0..c76f006f1 100644 --- a/ansible/roles/junos-switch/tasks/main.yml +++ b/ansible/roles/junos-switch/tasks/main.yml @@ -6,8 +6,8 @@ # NOTE(mgoddard): 0.6.7 includes a fix for host key checking: # https://github.com/ncclient/ncclient/issues/302. ncclient_version: ">=0.6.7,<0.7.0" - local_action: - module: pip + delegate_to: localhost + pip: name: "ncclient{{ ncclient_version }}" virtualenv: "{{ lookup('env', 'VIRTUAL_ENV') | default(omit, true) }}" become: "{{ lookup('env', 'VIRTUAL_ENV') == None }}" @@ -15,9 +15,7 @@ run_once: true - name: Ensure Juniper switches are configured - local_action: - module: junos_config - provider: "{{ junos_switch_provider }}" + junos_config: src: "{{ junos_switch_src }}" src_format: "{{ junos_switch_config_format }}" vars: diff --git a/ansible/roles/kolla-ansible/defaults/main.yml b/ansible/roles/kolla-ansible/defaults/main.yml index 7321c8182..6ab577683 100644 --- a/ansible/roles/kolla-ansible/defaults/main.yml +++ b/ansible/roles/kolla-ansible/defaults/main.yml @@ -26,7 +26,7 @@ kolla_ansible_venv_extra_requirements: [] # tested code. Changes to this limit should be tested. It is possible to only # install ansible-core by setting kolla_ansible_venv_ansible to None. kolla_ansible_venv_ansible: -kolla_ansible_venv_ansible_core: 'ansible-core>=2.17,<2.19' +kolla_ansible_venv_ansible_core: 'ansible-core>=2.19,<2.21' # Path to a requirements.yml file for Ansible collections. kolla_ansible_requirements_yml: "{{ kolla_ansible_venv }}/share/kolla-ansible/requirements.yml" @@ -200,6 +200,9 @@ kolla_inspector_netmask: # Default gateway to use for inspection network. kolla_inspector_default_gateway: +# DNS servers for inspection network. +kolla_inspector_dns_servers: [] + # List of extra kernel parameters for the inspector default PXE configuration. kolla_inspector_extra_kernel_options: [] @@ -320,3 +323,9 @@ kolla_https_proxy: # List of domains, hostnames, IP addresses and networks for which no proxy is # used. kolla_no_proxy: + +############################################################################### +# Bifrost configuration + +# Whether to install Bifrost. Default is true. +kolla_enable_bifrost: true diff --git a/ansible/roles/kolla-ansible/tasks/install.yml b/ansible/roles/kolla-ansible/tasks/install.yml index 2e0d4c8c4..d328ed4f2 100644 --- a/ansible/roles/kolla-ansible/tasks/install.yml +++ b/ansible/roles/kolla-ansible/tasks/install.yml @@ -141,7 +141,7 @@ - "{{ kolla_ansible_venv_ansible_core }}" - "{{ kolla_ansible_venv_ansible }}" pip: - name: "{{ (kolla_ansible_packages + kolla_ansible_venv_extra_requirements) | select | list }}" + name: "{{ (kolla_ansible_packages | default([]) + kolla_ansible_venv_extra_requirements | default([])) | select | list }}" state: latest extra_args: "{% if kolla_upper_constraints_file %}-c {{ kolla_upper_constraints_file }}{% endif %}" virtualenv: "{{ kolla_ansible_venv }}" @@ -159,4 +159,4 @@ # newer versions. ANSIBLE_COLLECTIONS_SCAN_SYS_PATH: "False" # NOTE(wszumski): Don't use path configured for kayobe - ANSIBLE_COLLECTIONS_PATH: + ANSIBLE_COLLECTIONS_PATH: '' diff --git a/ansible/roles/kolla-ansible/templates/kolla/globals.yml b/ansible/roles/kolla-ansible/templates/kolla/globals.yml index 4ac8a9c45..77135cd05 100644 --- a/ansible/roles/kolla-ansible/templates/kolla/globals.yml +++ b/ansible/roles/kolla-ansible/templates/kolla/globals.yml @@ -119,7 +119,7 @@ docker_registry_username: "{{ kolla_docker_registry_username }}" #bifrost_network_address_family: "{% raw %}{{ network_address_family }}{% endraw %}" #dns_address_family: "{% raw %}{{ network_address_family }}{% endraw %}" -# Valid options are [ openvswitch, linuxbridge ] +# Valid options are [ openvswitch, ovn ] neutron_plugin_agent: "{% if kolla_enable_ovn | default(False) | bool %}ovn{% else %}openvswitch{% endif %}" # Valid options are [ internal, infoblox ] @@ -176,7 +176,6 @@ neutron_tenant_network_types: {{ kolla_neutron_ml2_tenant_network_types | join(' # ulimits: - ############# # TLS options ############# @@ -402,9 +401,12 @@ ironic_dnsmasq_dhcp_ranges: {% if kolla_inspector_default_gateway %} routers: "{{ kolla_inspector_default_gateway }}" {% endif %} +{% if kolla_inspector_dns_servers %} + dns_servers: "{{ kolla_inspector_dns_servers | join(',') }}" +{% endif %} {% endif %} {% if kolla_inspector_extra_kernel_options %} -ironic_inspector_kernel_cmdline_extras: +ironic_kernel_cmdline_extras: {{ kolla_inspector_extra_kernel_options | to_nice_yaml }} {% endif %} # PXE bootloader file for Ironic Inspector, relative to /var/lib/ironic/tftpboot. @@ -505,6 +507,14 @@ ironic_inspector_kernel_cmdline_extras: grafana_admin_username: "{{ grafana_local_admin_user_name }}" {% endif %} +########################### +# Target python environment +########################### +{% if kolla_ansible_target_venv is not none %} +# Execute ansible modules on the remote target hosts using a virtualenv. +virtualenv: "{{ kolla_ansible_target_venv }}" +{% endif %} + {% if kolla_extra_globals %} ####################### # Extra configuration diff --git a/ansible/roles/kolla-ansible/templates/overcloud-components.j2 b/ansible/roles/kolla-ansible/templates/overcloud-components.j2 index c6c193f66..6734b619b 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud-components.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud-components.j2 @@ -11,9 +11,11 @@ network compute storage monitoring +bifrost [tls-backend:children] control +network # You can explicitly specify which hosts run each project by updating the # groups in the sections below. Common services are grouped together. @@ -149,14 +151,11 @@ control [skyline:children] control -[redis:children] +[valkey:children] control [blazar:children] control -[venus:children] -monitoring - [letsencrypt:children] loadbalancer diff --git a/ansible/roles/kolla-ansible/templates/overcloud-services.j2 b/ansible/roles/kolla-ansible/templates/overcloud-services.j2 index 7d9777e88..086045cb1 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud-services.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud-services.j2 @@ -88,38 +88,47 @@ nova {% endif %} # Neutron -[neutron-server:children] -control - -[neutron-dhcp-agent:children] +[ironic-neutron-agent:children] neutron -[neutron-l3-agent:children] +[neutron-bgp-dragent:children] neutron -[neutron-metadata-agent:children] +[neutron-dhcp-agent:children] neutron -[neutron-ovn-metadata-agent:children] -compute -network +[neutron-infoblox-ipam-agent:children] +neutron -[neutron-bgp-dragent:children] +[neutron-l3-agent:children] neutron -[neutron-infoblox-ipam-agent:children] +[neutron-metadata-agent:children] neutron [neutron-metering-agent:children] neutron -[ironic-neutron-agent:children] -neutron +[neutron-periodic-worker:children] +control + +[neutron-rpc-server:children] +control + +[neutron-server:children] +control [neutron-ovn-agent:children] compute network +[neutron-ovn-maintenance-worker:children] +control + +[neutron-ovn-metadata-agent:children] +compute +network + # Cinder [cinder-api:children] cinder @@ -189,13 +198,8 @@ ironic [ironic-conductor:children] ironic -{% if kolla_ironic_inspector_host %} -[ironic-inspector] -{{ kolla_ironic_inspector_host }} -{% else %} -[ironic-inspector:children] +[ironic-dnsmasq:children] ironic -{% endif %} [ironic-tftp:children] ironic @@ -443,12 +447,6 @@ ovn-database [ovn-sb-db-relay:children] ovn-database -[venus-api:children] -venus - -[venus-manager:children] -venus - [letsencrypt-webserver:children] letsencrypt diff --git a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 index 26953081f..c0efff9e8 100644 --- a/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 +++ b/ansible/roles/kolla-ansible/templates/overcloud-top-level.j2 @@ -23,11 +23,6 @@ ansible_user={{ kolla_ansible_user }} {% if kolla_ansible_become | bool %} ansible_become=true {% endif %} -{% if kolla_ansible_target_venv is not none %} -# Execute ansible modules on the remote target hosts using a virtualenv. -ansible_python_interpreter={{ kolla_ansible_target_venv }}/bin/python -{% endif %} - {% for kolla_group, kolla_group_config in kolla_overcloud_inventory_top_level_group_map.items() %} {% if 'groups' in kolla_group_config %} diff --git a/ansible/roles/kolla-ansible/templates/seed.j2 b/ansible/roles/kolla-ansible/templates/seed.j2 index 18aa56610..e09562837 100644 --- a/ansible/roles/kolla-ansible/templates/seed.j2 +++ b/ansible/roles/kolla-ansible/templates/seed.j2 @@ -8,11 +8,13 @@ ansible_user={{ kolla_ansible_user }} {% if kolla_ansible_target_venv is not none %} # Execute ansible modules on the remote target hosts using a virtualenv. -ansible_python_interpreter={{ kolla_ansible_target_venv }}/bin/python +virtualenv={{ kolla_ansible_target_venv }} {% endif %} [baremetal:children] seed [bifrost:children] +{% if kolla_enable_bifrost | bool %} seed +{% endif %} diff --git a/ansible/roles/kolla-ansible/tests/test-defaults.yml b/ansible/roles/kolla-ansible/tests/test-defaults.yml index f42353a3a..f5832c0d1 100644 --- a/ansible/roles/kolla-ansible/tests/test-defaults.yml +++ b/ansible/roles/kolla-ansible/tests/test-defaults.yml @@ -11,7 +11,7 @@ - block: - name: Test the kolla-ansible role with default values include_role: - name: ../../kolla-ansible + name: "{{ playbook_dir }}/.." vars: kolla_ansible_source_path: "{{ temp_path }}/src" kolla_ansible_ctl_install_type: "source" @@ -137,7 +137,7 @@ - name: Check that no inventory overrides are configured assert: that: - - kolla_ansible_overcloud_inventory_overrides.matched == 0 + - kolla_ansible_overcloud_inventory_overrides.matched == 0 msg: > Overcloud group vars were found when they should not be set. diff --git a/ansible/roles/kolla-ansible/tests/test-extras.yml b/ansible/roles/kolla-ansible/tests/test-extras.yml index bef29ad3b..607d7945e 100644 --- a/ansible/roles/kolla-ansible/tests/test-extras.yml +++ b/ansible/roles/kolla-ansible/tests/test-extras.yml @@ -165,7 +165,6 @@ kolla_enable_horizon: True kolla_enable_influxdb: True kolla_enable_ironic: True - kolla_enable_ironic_inspector: True kolla_enable_ironic_neutron_agent: True kolla_enable_kuryr: True kolla_enable_magnum: True diff --git a/ansible/roles/kolla-ansible/vars/RedHat.yml b/ansible/roles/kolla-ansible/vars/RedHat.yml index 768efbfc0..dd9e59bc3 100644 --- a/ansible/roles/kolla-ansible/vars/RedHat.yml +++ b/ansible/roles/kolla-ansible/vars/RedHat.yml @@ -5,6 +5,6 @@ kolla_ansible_package_dependencies: - git - libffi-devel - openssl-devel - - python3.12 - - python3.12-devel + - python3 + - python3-devel - rsync diff --git a/ansible/roles/kolla-ansible/vars/main.yml b/ansible/roles/kolla-ansible/vars/main.yml index 389a6be03..6533227b0 100644 --- a/ansible/roles/kolla-ansible/vars/main.yml +++ b/ansible/roles/kolla-ansible/vars/main.yml @@ -136,15 +136,14 @@ kolla_feature_flags: - horizon_octavia - horizon_tacker - horizon_trove - - horizon_venus - horizon_watcher - horizon_zun - influxdb - ironic - ironic_dnsmasq - - ironic_inspector - ironic_neutron_agent - ironic_prometheus_exporter + - ironic_pxe_filter - iscsid - keepalived - keystone @@ -228,12 +227,11 @@ kolla_feature_flags: - prometheus_server - proxysql - rabbitmq - - redis - skyline - tacker - telegraf - trove - trove_singletenant - - venus + - valkey - watcher - zun diff --git a/ansible/roles/kolla-bifrost/defaults/main.yml b/ansible/roles/kolla-bifrost/defaults/main.yml index ae2e5fa47..2ab957d03 100644 --- a/ansible/roles/kolla-bifrost/defaults/main.yml +++ b/ansible/roles/kolla-bifrost/defaults/main.yml @@ -42,12 +42,15 @@ kolla_bifrost_dnsmasq_dns_servers: [] kolla_bifrost_domain: # List of of inspector processing plugins. -kolla_bifrost_inspector_processing_hooks: +kolla_bifrost_inspector_hooks: # Which MAC addresses to add as ports during introspection. One of 'all', # 'active' or 'pxe'. kolla_bifrost_inspector_port_addition: +# Which ports to keep after introspection. One of 'all', 'present', or 'added'. +kolla_bifrost_inspector_keep_ports: + # List of extra kernel parameters for the inspector default PXE configuration. kolla_bifrost_inspector_extra_kernel_options: diff --git a/ansible/roles/kolla-bifrost/templates/kolla/config/bifrost/bifrost.yml b/ansible/roles/kolla-bifrost/templates/kolla/config/bifrost/bifrost.yml index 5269328fe..712bed6de 100644 --- a/ansible/roles/kolla-bifrost/templates/kolla/config/bifrost/bifrost.yml +++ b/ansible/roles/kolla-bifrost/templates/kolla/config/bifrost/bifrost.yml @@ -28,9 +28,9 @@ dnsmasq_dns_servers: "{{ kolla_bifrost_dnsmasq_dns_servers | join(',') }}" domain: "{{ kolla_bifrost_domain }}" {% endif %} -{% if kolla_bifrost_inspector_processing_hooks %} +{% if kolla_bifrost_inspector_hooks %} # Comma-separated list of inspector processing plugins. -inspector_processing_hooks: "{{ kolla_bifrost_inspector_processing_hooks | join(',') }}" +inspector_hooks: "{{ kolla_bifrost_inspector_hooks | join(',') }}" {% endif %} {% if kolla_bifrost_inspector_port_addition %} @@ -39,6 +39,11 @@ inspector_processing_hooks: "{{ kolla_bifrost_inspector_processing_hooks | join( inspector_port_addition: "{{ kolla_bifrost_inspector_port_addition }}" {% endif %} +{% if kolla_bifrost_inspector_keep_ports %} +# Which ports to keep after introspection. One of 'all', 'present', or 'added'. +inspector_keep_ports: "{{ kolla_bifrost_inspector_keep_ports }}" +{% endif %} + {% if kolla_bifrost_inspector_extra_kernel_options %} # Extra kernel parameters for the inspector default PXE configuration. inspector_extra_kernel_options: "{{ kolla_bifrost_inspector_extra_kernel_options if kolla_bifrost_inspector_extra_kernel_options is string else kolla_bifrost_inspector_extra_kernel_options | join(' ') }}" diff --git a/ansible/roles/kolla-build/templates/kolla/kolla-build.conf b/ansible/roles/kolla-build/templates/kolla/kolla-build.conf index 4e84cec0d..78ccd60bf 100644 --- a/ansible/roles/kolla-build/templates/kolla/kolla-build.conf +++ b/ansible/roles/kolla-build/templates/kolla/kolla-build.conf @@ -40,6 +40,13 @@ location = {{ source_def.location }} # Reference of source for {{ source_name }}. reference = {{ source_def.reference }} {% endif %} +{% if source_def.sha256 is defined %} +{# generates sha256 = amd64:xxx,arm64:yyy #} +sha256 = {{ source_def.sha256.keys() | zip (source_def.sha256.values()) | map("join", ":") | join(",") }} +{% endif %} +{% if source_def.version is defined %} +version = {{ source_def.version }} +{% endif %} {% endfor %} diff --git a/ansible/roles/kolla-openstack/defaults/main.yml b/ansible/roles/kolla-openstack/defaults/main.yml index 63b7856c1..0fabde950 100644 --- a/ansible/roles/kolla-openstack/defaults/main.yml +++ b/ansible/roles/kolla-openstack/defaults/main.yml @@ -92,10 +92,6 @@ kolla_openstack_custom_config_include_globs_default: glob: horizon/** - enabled: '{{ kolla_enable_influxdb | bool }}' glob: influx* - - enabled: '{{ kolla_enable_ironic_inspector | bool }}' - glob: ironic-inspector.conf - - enabled: '{{ kolla_enable_ironic_inspector | bool }}' - glob: ironic-inspector/** - enabled: '{{ kolla_enable_ironic | bool }}' glob: ironic.conf - enabled: '{{ kolla_enable_ironic | bool }}' @@ -138,7 +134,7 @@ kolla_openstack_custom_config_include_globs_default: glob: nova/** - enabled: '{{ kolla_enable_nova | bool }}' glob: nova_compute/** - - enabled: '{{ kolla_enable_octavia | bool }}' + - enabled: '{{ kolla_enable_octavia | bool }}' glob: octavia.conf - enabled: '{{ kolla_enable_octavia | bool }}' glob: octavia/** @@ -551,10 +547,16 @@ kolla_ironic_default_vendor_interface: # Name or UUID of the Neutron network to use for cleaning. kolla_ironic_cleaning_network: +# Name or UUID of the Neutron network to use for inspection. +kolla_ironic_inspection_network: + # Name or UUID of the Neutron network to use for provisioning. kolla_ironic_provisioning_network: -# List of additional append parameters for baremetal PXE boot. +# List of additional append parameters for baremetal boot. +kolla_ironic_kernel_append_params: [] + +#List of additional append parameters for baremetal PXE boot. kolla_ironic_pxe_append_params: [] # Deprecated: @@ -564,11 +566,10 @@ kolla_extra_ironic: ############################################################################### # Ironic inspector configuration. -# Whether to enable Ironic inspector. -kolla_enable_ironic_inspector: "{{ kolla_enable_ironic | bool }}" - # Comma-separated list of inspector processing plugins. -kolla_inspector_processing_hooks: + +# Comma-separated list of inspector processing plugins for built-in inspector +kolla_inspector_hooks: # Which MAC addresses to add as ports during introspection. One of 'all', # 'active' or 'pxe'. diff --git a/ansible/roles/kolla-openstack/molecule/default/molecule.yml b/ansible/roles/kolla-openstack/molecule/default/molecule.yml index 241f5d400..6bef4caa3 100644 --- a/ansible/roles/kolla-openstack/molecule/default/molecule.yml +++ b/ansible/roles/kolla-openstack/molecule/default/molecule.yml @@ -8,8 +8,8 @@ driver: lint: | yamllint . platforms: - - name: centos-stream9 - image: quay.io/centos/centos:stream9 + - name: ubuntu-noble + image: quay.io/opendevmirror/ubuntu:24.04 network_mode: host provisioner: name: ansible diff --git a/ansible/roles/kolla-openstack/molecule/enable-everything/molecule.yml b/ansible/roles/kolla-openstack/molecule/enable-everything/molecule.yml index 345cc4eef..035a4cb09 100644 --- a/ansible/roles/kolla-openstack/molecule/enable-everything/molecule.yml +++ b/ansible/roles/kolla-openstack/molecule/enable-everything/molecule.yml @@ -8,8 +8,8 @@ driver: lint: | yamllint . platforms: - - name: centos-stream9 - image: quay.io/centos/centos:stream9 + - name: ubuntu-noble + image: quay.io/opendevmirror/ubuntu:24.04 network_mode: host provisioner: name: ansible @@ -80,9 +80,6 @@ provisioner: kolla_extra_ironic: | [extra-ironic.conf] foo=bar - kolla_extra_inspector: | - [extra-ironic-inspector.conf] - foo=bar kolla_inspector_ipa_kernel_path: ${MOLECULE_TEMP_PATH:-/tmp/molecule}/ironic-agent.kernel kolla_inspector_ipa_ramdisk_path: ${MOLECULE_TEMP_PATH:-/tmp/molecule}/ironic-agent.initramfs kolla_enable_keepalived: true diff --git a/ansible/roles/kolla-openstack/molecule/enable-everything/prepare.yml b/ansible/roles/kolla-openstack/molecule/enable-everything/prepare.yml index 3c491fa1e..4e945b3aa 100644 --- a/ansible/roles/kolla-openstack/molecule/enable-everything/prepare.yml +++ b/ansible/roles/kolla-openstack/molecule/enable-everything/prepare.yml @@ -4,8 +4,8 @@ gather_facts: false tasks: - name: Ensure ironic inspector kernel and ramdisk image directory exists - local_action: - module: file + delegate_to: localhost + file: path: "{{ item | dirname }}" state: directory recurse: true @@ -18,8 +18,8 @@ # versions of docker. Using non-empty files seems to resolve the issue. # See https://github.com/ansible/ansible/issues/36725. - name: Ensure ironic inspector kernel and ramdisk images exist - local_action: - module: copy + delegate_to: localhost + copy: content: fake image dest: "{{ item }}" with_items: @@ -63,7 +63,6 @@ - grafana.ini - heat.conf - ironic.conf - - ironic-inspector.conf - keystone.conf - magnum.conf - manila.conf @@ -118,8 +117,8 @@ label: "{{ item.dest }}" - name: Ensure nova libvirt certificates directory exists - local_action: - module: file + delegate_to: localhost + file: path: "{{ kolla_nova_libvirt_certificates_src }}" state: directory @@ -128,8 +127,8 @@ # versions of docker. Using non-empty files seems to resolve the issue. # See https://github.com/ansible/ansible/issues/36725. - name: Ensure nova libvirt certificates exist - local_action: - module: copy + delegate_to: localhost + copy: content: fake cert dest: "{{ kolla_nova_libvirt_certificates_src }}/{{ item }}" with_items: diff --git a/ansible/roles/kolla-openstack/molecule/enable-everything/tests/test_default.py b/ansible/roles/kolla-openstack/molecule/enable-everything/tests/test_default.py index 4e9a40a22..72d74d0b0 100644 --- a/ansible/roles/kolla-openstack/molecule/enable-everything/tests/test_default.py +++ b/ansible/roles/kolla-openstack/molecule/enable-everything/tests/test_default.py @@ -37,7 +37,6 @@ 'grafana.ini', 'heat.conf', 'ironic.conf', - 'ironic-inspector.conf', 'keystone.conf', 'magnum.conf', 'manila.conf', @@ -70,7 +69,6 @@ def test_service_ini_file(host, path): 'grafana.ini', 'heat.conf', 'ironic.conf', - 'ironic-inspector.conf', 'keystone.conf', 'magnum.conf', 'manila.conf', diff --git a/ansible/roles/kolla-openstack/tasks/config.yml b/ansible/roles/kolla-openstack/tasks/config.yml index 2aa8948a3..ce318fbc6 100644 --- a/ansible/roles/kolla-openstack/tasks/config.yml +++ b/ansible/roles/kolla-openstack/tasks/config.yml @@ -119,11 +119,11 @@ params: content: | {%- for path in item.sources -%} - {{ lookup('template', path) }} + {{ lookup('file', path) }} {%- endfor -%} dest: "{{ item.dest }}" mode: 0640 - copy: "{{ params | combine(item.params) }}" + template_content: "{{ params | combine(item.params) }}" with_items: "{{ kolla_custom_config_info.concat }}" - name: Ensure unnecessary extra configuration files are absent diff --git a/ansible/roles/kolla-openstack/templates/kolla/config/ironic-inspector.conf b/ansible/roles/kolla-openstack/templates/kolla/config/ironic-inspector.conf deleted file mode 100644 index d085c2bec..000000000 --- a/ansible/roles/kolla-openstack/templates/kolla/config/ironic-inspector.conf +++ /dev/null @@ -1,50 +0,0 @@ -[DEFAULT] - -[processing] -{% if kolla_inspector_processing_hooks %} -# Comma-separated list of inspector processing plugins. -processing_hooks = {{ kolla_inspector_processing_hooks | join(',') }} -{% endif %} - -{% if kolla_inspector_add_ports %} -# Which MAC addresses to add as ports during introspection. One of 'all', -# 'active' or 'pxe'. -add_ports = {{ kolla_inspector_add_ports }} -{% endif %} - -{% if kolla_inspector_keep_ports %} -keep_ports = {{ kolla_inspector_keep_ports }} -{% endif %} - -# Store logs returned by the inspection ramdisk. -always_store_ramdisk_logs = True - -{% if kolla_inspector_enable_discovery %} -# Enable discovery when nodes do not exist in Ironic. -node_not_found_hook = enroll -{% endif %} - -{% if kolla_inspector_enable_swift %} -store_data = swift -{% endif %} - -{% if kolla_inspector_enable_swift %} -[swift] -{% for key, value in kolla_inspector_swift_auth.items() %} -{{ key }} = {{ value }} -{% endfor %} -{% endif %} - -{% if kolla_inspector_enable_discovery %} -[discovery] -# The driver with which to enroll newly discovered nodes in Ironic. -enroll_node_driver = {{ kolla_inspector_discovery_enroll_node_driver }} -{% endif %} - -{% if kolla_extra_inspector %} -####################### -# Extra configuration -####################### - -{{ kolla_extra_inspector }} -{% endif %} diff --git a/ansible/roles/kolla-openstack/templates/kolla/config/ironic.conf b/ansible/roles/kolla-openstack/templates/kolla/config/ironic.conf index f91aad0db..3961eb631 100644 --- a/ansible/roles/kolla-openstack/templates/kolla/config/ironic.conf +++ b/ansible/roles/kolla-openstack/templates/kolla/config/ironic.conf @@ -1,4 +1,11 @@ [DEFAULT] +enabled_inspect_interfaces = redfish,no-inspect,agent +{% if kolla_inspector_enable_discovery | bool %} +# Setting default_inspect_interface is required for the inspection flow to +# continue correctly after the node creation. See: +# https://docs.openstack.org/ironic/latest/admin/inspection/discovery.html +default_inspect_interface = agent +{% endif %} {% if kolla_ironic_enabled_hardware_types %} enabled_hardware_types: {{ kolla_ironic_enabled_hardware_types | join(',') }} {% endif %} @@ -18,8 +25,14 @@ enabled_hardware_types: {{ kolla_ironic_enabled_hardware_types | join(',') }} [agent] deploy_logs_local_path = /var/log/kolla/ironic/deploy +[redfish] +{% if kolla_ironic_kernel_append_params %} +kernel_append_params = {{ kolla_ironic_kernel_append_params | join(' ') }} +{% endif %} + [neutron] cleaning_network = {{ kolla_ironic_cleaning_network }} +inspection_network = {{ kolla_ironic_inspection_network }} provisioning_network = {{ kolla_ironic_provisioning_network }} [pxe] @@ -31,6 +44,31 @@ kernel_append_params = {{ kolla_ironic_pxe_append_params | join(' ') }} tftp_server = {{ hostvars[inventory_hostname].ansible_facts[api_interface | replace('-', '_')]['ipv4']['address'] }} {% endraw %} +[auto_discovery] +enabled = {{ kolla_inspector_enable_discovery }} +driver = {{ kolla_inspector_discovery_enroll_node_driver }} + +[inspector] +{% if kolla_inspector_enable_discovery | bool %} +# Under unmanaged inspection we understand in-band inspection where the boot +# configuration (iPXE scripts, DHCP options, etc) is not provided by the Bare +# Metal service. In this case, the node is simply set to boot from network and +# powered on. See: +# https://docs.openstack.org/ironic/latest/admin/inspection/managed.html#unmanaged-inspection +require_managed_boot = False +{% endif %} +{% if kolla_inspector_add_ports %} +add_ports = {{ kolla_inspector_add_ports }} +{% endif %} + +{% if kolla_inspector_keep_ports %} +keep_ports = {{ kolla_inspector_keep_ports }} +{% endif %} + +{% if kolla_inspector_hooks %} +hooks = {{ kolla_inspector_hooks | join(',') }} +{% endif %} + {% if kolla_extra_ironic %} ####################### # Extra configuration diff --git a/ansible/roles/manage-containers/tasks/deploy.yml b/ansible/roles/manage-containers/tasks/deploy.yml index d450ddd37..a735e9b26 100644 --- a/ansible/roles/manage-containers/tasks/deploy.yml +++ b/ansible/roles/manage-containers/tasks/deploy.yml @@ -10,8 +10,8 @@ become: "{{ container_engine == 'podman' }}" - name: Deploy containers (loop) - include_tasks: deploy.yml + include_tasks: deploy-container.yml vars: container_name: "{{ item.key }}" container_config: "{{ item.value }}" - with_dict: "{{ seed_containers }}" \ No newline at end of file + with_dict: "{{ seed_containers }}" diff --git a/ansible/roles/manage-containers/tasks/destroy.yml b/ansible/roles/manage-containers/tasks/destroy.yml index acf0d25bd..5059832fa 100644 --- a/ansible/roles/manage-containers/tasks/destroy.yml +++ b/ansible/roles/manage-containers/tasks/destroy.yml @@ -1,6 +1,7 @@ +--- - name: Destroy containers (loop) include_tasks: destroy-container.yml vars: container_name: "{{ item.key }}" container_config: "{{ item.value }}" - with_dict: "{{ seed_containers }}" \ No newline at end of file + with_dict: "{{ seed_containers }}" diff --git a/ansible/roles/network-debian/tasks/main.yml b/ansible/roles/network-debian/tasks/main.yml index 27091a841..c1c75ea53 100644 --- a/ansible/roles/network-debian/tasks/main.yml +++ b/ansible/roles/network-debian/tasks/main.yml @@ -48,4 +48,4 @@ become: true command: "udevadm trigger --verbose --subsystem-match=net --action=add" changed_when: false - when: network_interfaces | networkd_links | length + when: network_interfaces | networkd_links | length > 0 diff --git a/ansible/roles/network-redhat/tasks/main.yml b/ansible/roles/network-redhat/tasks/main.yml index c73e31152..c8f07780e 100644 --- a/ansible/roles/network-redhat/tasks/main.yml +++ b/ansible/roles/network-redhat/tasks/main.yml @@ -18,7 +18,7 @@ - option: rc-manager value: unmanaged when: - - ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version == "9" + - ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version | int >= 9 register: dns_config_task - name: Reload NetworkManager with DNS config diff --git a/ansible/roles/public-openrc/templates/public-openrc.sh.j2 b/ansible/roles/public-openrc/templates/public-openrc.sh.j2 index d11c795dd..49225fc60 100644 --- a/ansible/roles/public-openrc/templates/public-openrc.sh.j2 +++ b/ansible/roles/public-openrc/templates/public-openrc.sh.j2 @@ -11,6 +11,8 @@ export OS_ENDPOINT_TYPE=publicURL export OS_MANILA_ENDPOINT_TYPE=publicURL {% elif "export OS_MISTRAL_ENDPOINT_TYPE" in line %} export OS_MISTRAL_ENDPOINT_TYPE=publicURL +{% elif "export OS_CACERT" in line %} +{# NOTE(bbezak): drop admin OS_CACERT; public-openrc sets its own. -#} {% else %} {{ line }} {% endif %} diff --git a/ansible/roles/selinux/defaults/main.yml b/ansible/roles/selinux/defaults/main.yml index 80481b39f..8966e67f3 100644 --- a/ansible/roles/selinux/defaults/main.yml +++ b/ansible/roles/selinux/defaults/main.yml @@ -11,3 +11,8 @@ selinux_do_reboot: false # Number of seconds to wait for hosts to become accessible via SSH after being # rebooted. selinux_reboot_timeout: + +# Whether or not to update the selinux kernel parameter. Can be useful if you +# have selinux= set on the kernel command line. Default is to use +# the module default. +selinux_update_kernel_param: "{{ omit }}" diff --git a/ansible/roles/selinux/tasks/main.yml b/ansible/roles/selinux/tasks/main.yml index 2e375d5a3..43551d998 100644 --- a/ansible/roles/selinux/tasks/main.yml +++ b/ansible/roles/selinux/tasks/main.yml @@ -16,6 +16,7 @@ selinux: policy: "{{ selinux_policy }}" state: "{{ selinux_state }}" + update_kernel_param: "{{ selinux_update_kernel_param }}" register: selinux_result vars: ansible_python_interpreter: /usr/bin/python3 diff --git a/ansible/roles/ssh-known-host/tasks/main.yml b/ansible/roles/ssh-known-host/tasks/main.yml index 2cb8cc49e..75257f71c 100644 --- a/ansible/roles/ssh-known-host/tasks/main.yml +++ b/ansible/roles/ssh-known-host/tasks/main.yml @@ -13,11 +13,13 @@ vm provision' and 'kayobe overcloud inventory discover'. when: not ansible_host | default(inventory_hostname) +# NOTE(priteau): Exclude comments from ssh-keyscan output because they break +# known_hosts on centos/rocky 10. - name: Scan for SSH keys - local_action: - module: command ssh-keyscan {{ item }} + delegate_to: localhost + shell: ssh-keyscan {{ item }} | grep -v '^#' with_items: - - "{{ ansible_host|default(inventory_hostname) }}" + - "{{ ansible_host | default(inventory_hostname) }}" register: keyscan_result changed_when: False @@ -25,8 +27,8 @@ # concurrently, and some keys can end up being dropped. For more details see # https://github.com/ansible/proposals/issues/113 - name: Ensure SSH keys are in known hosts - local_action: - module: known_hosts + delegate_to: localhost + known_hosts: host: "{{ item[0].item }}" key: "{{ item[1] }}" with_subelements: diff --git a/ansible/roles/swift-block-devices/tests/test-bootstrapped.yml b/ansible/roles/swift-block-devices/tests/test-bootstrapped.yml index 6917ca03f..577b19ab7 100644 --- a/ansible/roles/swift-block-devices/tests/test-bootstrapped.yml +++ b/ansible/roles/swift-block-devices/tests/test-bootstrapped.yml @@ -32,7 +32,7 @@ - block: - name: Test the swift-block-devices role include_role: - name: ../../swift-block-devices + name: ../../swift-block-devices vars: swift_block_devices: - device: "{{ loopback.stdout }}" diff --git a/ansible/roles/swift-block-devices/tests/test-invalid-format.yml b/ansible/roles/swift-block-devices/tests/test-invalid-format.yml index 66eea7521..22df7be5e 100644 --- a/ansible/roles/swift-block-devices/tests/test-invalid-format.yml +++ b/ansible/roles/swift-block-devices/tests/test-invalid-format.yml @@ -7,7 +7,7 @@ - block: - name: Test the swift-block-devices role include_role: - name: ../../swift-block-devices + name: ../../swift-block-devices vars: swift_block_devices: - /dev/fake diff --git a/ansible/roles/swift-block-devices/tests/test-mount.yml b/ansible/roles/swift-block-devices/tests/test-mount.yml index 88bdb8ef9..5489160d9 100644 --- a/ansible/roles/swift-block-devices/tests/test-mount.yml +++ b/ansible/roles/swift-block-devices/tests/test-mount.yml @@ -24,7 +24,7 @@ - block: - name: Test the swift-block-devices role include_role: - name: ../../swift-block-devices + name: ../../swift-block-devices vars: swift_block_devices: - device: "{{ loopback.stdout }}" diff --git a/ansible/seed-credentials.yml b/ansible/seed-credentials.yml index a5f6e393c..7d8cadf7e 100644 --- a/ansible/seed-credentials.yml +++ b/ansible/seed-credentials.yml @@ -9,39 +9,42 @@ vars: openstack_config_dir: "{{ ansible_facts.env.HOME }}/.config/openstack" tasks: - - name: Ensure OpenStack config directory exists - file: - path: "{{ openstack_config_dir }}" - state: directory - mode: 0700 + - name: Ensure credentials are available on the host + when: kolla_enable_bifrost | bool + block: + - name: Ensure OpenStack config directory exists + file: + path: "{{ openstack_config_dir }}" + state: directory + mode: 0700 - - name: Get clouds.yaml from Bifrost container - command: - cmd: "{{ container_engine }} exec bifrost_deploy cat /root/.config/openstack/clouds.yaml" - changed_when: false - register: clouds_yaml - no_log: true - become: "{{ container_engine == 'podman' }}" + - name: Get clouds.yaml from Bifrost container + command: + cmd: "{{ container_engine }} exec bifrost_deploy cat /root/.config/openstack/clouds.yaml" + changed_when: false + register: clouds_yaml + no_log: true + become: "{{ container_engine == 'podman' }}" - - name: Write clouds.yaml - copy: - content: | - {%- set clouds = clouds_yaml.stdout | from_yaml -%} - {%- for cloud in clouds.clouds.keys() | list -%} - {%- if 'cacert' in clouds.clouds[cloud] -%} - {%- set _ = clouds.clouds[cloud].update({'cacert': openstack_config_dir ~ '/bifrost.crt'}) -%} - {%- endif -%} - {%- endfor -%} - {{ clouds | to_nice_yaml }} - dest: "{{ openstack_config_dir }}/clouds.yaml" - mode: 0600 + - name: Write clouds.yaml + copy: + content: | + {%- set clouds = clouds_yaml.stdout | from_yaml -%} + {%- for cloud in clouds.clouds.keys() | list -%} + {%- if 'cacert' in clouds.clouds[cloud] -%} + {%- set _ = clouds.clouds[cloud].update({'cacert': openstack_config_dir ~ '/bifrost.crt'}) -%} + {%- endif -%} + {%- endfor -%} + {{ clouds | to_nice_yaml }} + dest: "{{ openstack_config_dir }}/clouds.yaml" + mode: 0600 - - name: Copy CA certificate from Bifrost container - vars: - clouds: "{{ clouds_yaml.stdout | from_yaml }}" - cacerts: "{{ clouds.clouds.values() | selectattr('cacert', 'defined') | map(attribute='cacert') | list }}" - command: - cmd: "{{ container_engine }} cp bifrost_deploy:{{ cacerts[0] }} {{ openstack_config_dir }}/bifrost.crt" - changed_when: false - when: cacerts | length > 0 - become: "{{ container_engine == 'podman' }}" + - name: Copy CA certificate from Bifrost container + vars: + clouds: "{{ clouds_yaml.stdout | from_yaml }}" + cacerts: "{{ clouds.clouds.values() | selectattr('cacert', 'defined') | map(attribute='cacert') | list }}" + command: + cmd: "{{ container_engine }} cp bifrost_deploy:{{ cacerts[0] }} {{ openstack_config_dir }}/bifrost.crt" + changed_when: false + when: cacerts | length > 0 + become: "{{ container_engine == 'podman' }}" diff --git a/ansible/seed-host-configure.yml b/ansible/seed-host-configure.yml index 64604d74e..25b0dcc16 100644 --- a/ansible/seed-host-configure.yml +++ b/ansible/seed-host-configure.yml @@ -13,6 +13,7 @@ - import_playbook: "selinux.yml" - import_playbook: "network.yml" - import_playbook: "firewall.yml" +- import_playbook: "fail2ban.yml" - import_playbook: "tuned.yml" - import_playbook: "sysctl.yml" - import_playbook: "ip-routing.yml" @@ -22,6 +23,7 @@ - import_playbook: "mdadm.yml" - import_playbook: "luks.yml" - import_playbook: "lvm.yml" +- import_playbook: "swap.yml" - import_playbook: "kolla-ansible-user.yml" - import_playbook: "kolla-pip.yml" - import_playbook: "kolla-target-venv.yml" diff --git a/ansible/seed-hypervisor-host-configure.yml b/ansible/seed-hypervisor-host-configure.yml index 9bcb1f50c..56f240848 100644 --- a/ansible/seed-hypervisor-host-configure.yml +++ b/ansible/seed-hypervisor-host-configure.yml @@ -13,6 +13,7 @@ - import_playbook: "selinux.yml" - import_playbook: "network.yml" - import_playbook: "firewall.yml" +- import_playbook: "fail2ban.yml" - import_playbook: "tuned.yml" - import_playbook: "sysctl.yml" - import_playbook: "ip-routing.yml" @@ -21,4 +22,5 @@ - import_playbook: "mdadm.yml" - import_playbook: "luks.yml" - import_playbook: "lvm.yml" +- import_playbook: "swap.yml" - import_playbook: "seed-hypervisor-libvirt-host.yml" diff --git a/ansible/seed-introspection-rules.yml b/ansible/seed-introspection-rules.yml index 2ceb050cb..edcb031bf 100644 --- a/ansible/seed-introspection-rules.yml +++ b/ansible/seed-introspection-rules.yml @@ -19,4 +19,4 @@ inspector_rule_var_lldp_switch_port_interface: "{{ kolla_bifrost_inspector_lldp_switch_port_interface }}" inspector_rule_var_deploy_kernel: "{{ kolla_bifrost_inspector_deploy_kernel }}" inspector_rule_var_deploy_ramdisk: "{{ kolla_bifrost_inspector_deploy_ramdisk }}" - inspector_rule_var_legacy_deploy_kernel: "{{ kolla_bifrost_inspector_legacy_deploy_kernel }}" + when: kolla_enable_bifrost | bool diff --git a/ansible/seed-manage-containers.yml b/ansible/seed-manage-containers.yml index dd81a079b..10cd6c0ab 100644 --- a/ansible/seed-manage-containers.yml +++ b/ansible/seed-manage-containers.yml @@ -1,10 +1,10 @@ --- -- name: "Ensure defined container images are {{ kayobe_action }}ed on seed node" +- name: "Ensure defined container images are {{ kayobe_action | default('deploy') }}ed on seed node" hosts: seed tags: - seed-deploy-containers - seed-manage-containers vars: - manage_containers_action: "{{ kayobe_action }}" + manage_containers_action: "{{ kayobe_action | default('deploy') }}" roles: - role: manage-containers diff --git a/ansible/seed-vm-provision.yml b/ansible/seed-vm-provision.yml index 8be7364ef..87a142121 100644 --- a/ansible/seed-vm-provision.yml +++ b/ansible/seed-vm-provision.yml @@ -124,11 +124,11 @@ console_log_enabled: true tasks: - name: Wait for SSH access to the seed VM - local_action: - module: wait_for + delegate_to: localhost + wait_for: host: "{{ hostvars[seed_host].ansible_host }}" port: 22 state: started # NOTE: Ensure we exceed the 5 minute DHCP timeout of the eth0 # interface if necessary. - timeout: 360 + timeout: "{{ seed_vm_provision_timeout | default(360) | int }}" diff --git a/ansible/selinux.yml b/ansible/selinux.yml index aa00d4ce4..489967c03 100644 --- a/ansible/selinux.yml +++ b/ansible/selinux.yml @@ -1,6 +1,6 @@ --- - name: Configure SELinux state and reboot if required - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ selinux_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/ssh-known-host.yml b/ansible/ssh-known-host.yml index a13ffba3a..58d68bd8a 100644 --- a/ansible/ssh-known-host.yml +++ b/ansible/ssh-known-host.yml @@ -11,4 +11,3 @@ - ssh-known-host roles: - role: ssh-known-host - diff --git a/ansible/swap.yml b/ansible/swap.yml index 82ccbba85..c3c9d3d96 100644 --- a/ansible/swap.yml +++ b/ansible/swap.yml @@ -1,6 +1,6 @@ --- - name: Configure swap - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control become: true max_fail_percentage: >- {{ swap_max_fail_percentage | diff --git a/ansible/sysctl.yml b/ansible/sysctl.yml index cf2a2793e..7565014c7 100644 --- a/ansible/sysctl.yml +++ b/ansible/sysctl.yml @@ -1,6 +1,6 @@ --- - name: Ensure sysctl parameters are configured - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ sysctl_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/time.yml b/ansible/time.yml index 2c02e5bbe..8a5d1ecf0 100644 --- a/ansible/time.yml +++ b/ansible/time.yml @@ -1,6 +1,6 @@ --- - name: Ensure timezone is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ time_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/tuned.yml b/ansible/tuned.yml index 69c5ba8d6..87d8268f6 100644 --- a/ansible/tuned.yml +++ b/ansible/tuned.yml @@ -1,6 +1,6 @@ --- - name: Configure tuned profile - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ tuned_max_fail_percentage | default(host_configure_max_fail_percentage) | @@ -9,8 +9,7 @@ tags: - tuned roles: - - name: giovtorres.tuned + - role: giovtorres.tuned become: true when: - tuned_active_builtin_profile != "" - - ansible_facts.os_family == 'RedHat' diff --git a/ansible/users.yml b/ansible/users.yml index 6afc1fd53..67260616c 100644 --- a/ansible/users.yml +++ b/ansible/users.yml @@ -1,6 +1,6 @@ --- - name: Ensure users exist - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ users_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/wipe-disks.yml b/ansible/wipe-disks.yml index 79fe2edf1..37caeb641 100644 --- a/ansible/wipe-disks.yml +++ b/ansible/wipe-disks.yml @@ -8,7 +8,7 @@ # also closed and removed from crypttab. - name: Ensure that all unmounted block devices are wiped - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ wipe_disks_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/bindep.txt b/bindep.txt index e3d190257..d99bfd217 100644 --- a/bindep.txt +++ b/bindep.txt @@ -1,5 +1,6 @@ -libpcre3-dev [platform:dpkg test] -pcre-devel [platform:rpm test] +# whereto dependencies +libpcre2-dev [platform:dpkg test] +pcre2-devel [platform:rpm test] # PDF Docs package dependencies tex-gyre [platform:dpkg doc] diff --git a/dev/ansible-control-host-configure.sh b/dev/ansible-control-host-configure.sh new file mode 100755 index 000000000..af65250a4 --- /dev/null +++ b/dev/ansible-control-host-configure.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -eu +set -o pipefail + +# Simple script to configure a development environment as an Ansible control host. + +PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source "${PARENT}/functions" + + +function main { + config_init + ansible_control_host_configure +} + +main diff --git a/dev/dev-vagrant-network-allocation.yml b/dev/dev-vagrant-network-allocation.yml index 66bff42df..39e3fcf38 100644 --- a/dev/dev-vagrant-network-allocation.yml +++ b/dev/dev-vagrant-network-allocation.yml @@ -1,2 +1,3 @@ +--- aio_ips: controller1: 192.168.33.3 diff --git a/dev/functions b/dev/functions index d59c28ca6..64c335724 100644 --- a/dev/functions +++ b/dev/functions @@ -141,18 +141,10 @@ function is_yum { fi } -function get_python { - if is_dnf; then - echo python3.12 - else - echo python3 - fi -} - function install_dependencies { echo "Installing package dependencies for kayobe" if is_dnf; then - sudo dnf -y install gcc git vim python3-devel python3-pyyaml libffi-devel python3.12-devel python3.12 python3.12-pyyaml + sudo dnf -y install gcc git vim python3-devel python3-pyyaml libffi-devel elif is_yum; then echo "CentOS 7 is no longer supported" exit 1 @@ -174,7 +166,7 @@ function install_venv { fi if [[ ! -f "${venv_path}/bin/activate" ]]; then echo "Creating virtual environment in ${venv_path}" - $(get_python) -m venv "${venv_path}" + python3 -m venv "${venv_path}" # NOTE: Virtualenv's activate and deactivate scripts reference an # unbound variable. set +u @@ -200,9 +192,7 @@ function install_venv_system_site_packages { fi if [[ ! -f "${venv_path}/bin/activate" ]]; then echo "Creating virtual environment in ${venv_path}" - # NOTE(wszumski): tenks doesn't currently support not using the system python - # interpreter with: "Failed to detect selinux python bindings" - /usr/bin/python3 -m venv --system-site-packages "${venv_path}" + python3 -m venv --system-site-packages "${venv_path}" # NOTE: Virtualenv's activate and deactivate scripts reference an # unbound variable. set +u @@ -228,9 +218,7 @@ function install_kayobe_dev_venv { function upgrade_kayobe_venv { echo "Upgrading kayobe virtual environment in ${KAYOBE_VENV_PATH}" - # NOTE(wszumski): We need to recreate the old virtualenv to switch to python3.12 - rm -rf "${KAYOBE_VENV_PATH}" - $(get_python) -m venv "${KAYOBE_VENV_PATH}" + python3 -m venv "${KAYOBE_VENV_PATH}" # NOTE: Virtualenv's activate and deactivate scripts reference an # unbound variable. set +u @@ -243,6 +231,11 @@ function upgrade_kayobe_venv { # Deployment +function is_compute_libvirt_enabled { + compute_libvirt_enabled=$(kayobe configuration dump --host localhost --var-name compute_libvirt_enabled) + to_bool "$compute_libvirt_enabled" +} + function is_deploy_image_built_locally { ipa_build_images=$(kayobe configuration dump --host localhost --var-name ipa_build_images) to_bool "$ipa_build_images" @@ -327,6 +320,16 @@ function control_host_upgrade { echo "Upgraded control host after $i attempts" } +function ansible_control_host_configure { + # Deploy an Ansible control host. + environment_setup + + control_host_bootstrap + + echo "Configuring the Ansible control host" + run_kayobe control host configure +} + function seed_hypervisor_deploy { # Deploy a seed hypervisor. environment_setup @@ -443,26 +446,23 @@ function overcloud_deploy { control_host_bootstrap + # NOTE(mgoddard): There is a chicken and egg when generating libvirt TLS + # certificates using the kolla-ansible certificates command, and host + # libvirt. The certificates command needs to be able to gather facts for + # all hosts, but if the host configure step hasn't been run, we don't have + # SSH or the kolla user configured yet. However, we can't run host + # configure without the libvirt TLS certificates. + # Workaround: skip libvirt-host tag until later + echo "Configuring the controller host" + run_kayobe overcloud host configure --skip-tags libvirt-host + if [[ ${KAYOBE_OVERCLOUD_GENERATE_CERTIFICATES} = 1 ]]; then echo "Generate TLS certificates" run_kayobe playbook run $KAYOBE_SOURCE_PATH/ansible/kolla-ansible.yml -t config - # NOTE(mgoddard): There is a chicken and egg when generating libvirt - # TLS certificates using the kolla-ansible certificates command, and - # host libvirt. The certificates command needs to be able to gather - # facts for all hosts, but since the host configure step hasn't been - # run, we don't have SSH or the kolla user configured yet. However, we - # can't run host configure without the libvirt TLS certificates. - # Workaround: add the host to SSH known hosts and SSH as $USER. - run_kayobe playbook run $KAYOBE_SOURCE_PATH/ansible/ssh-known-host.yml -l overcloud - - # Avoid populating the fact cache with this weird setup. - export ANSIBLE_CACHE_PLUGIN=memory + run_kayobe kolla ansible run certificates \ --kolla-extra kolla_certificates_dir=${KAYOBE_CONFIG_PATH}/kolla/certificates \ - --kolla-extra ansible_user=$USER \ - --kolla-extra ansible_python_interpreter=/usr/bin/python3 \ --skip-tags kolla-openstack - unset ANSIBLE_CACHE_PLUGIN # Add CA cert to trust store. ca_cert=${KAYOBE_CONFIG_PATH}/kolla/certificates/ca/root.crt @@ -477,8 +477,10 @@ function overcloud_deploy { fi fi - echo "Configuring the controller host" - run_kayobe overcloud host configure + if is_compute_libvirt_enabled; then + echo "Configuring libvirt daemon" + run_kayobe overcloud host configure --tags libvirt-host + fi # FIXME(mgoddard): Perform host upgrade workarounds to ensure hostname # resolves to IP address of API interface for RabbitMQ. This seems to be @@ -575,9 +577,9 @@ function overcloud_test_init { environment_setup if [[ ! -z "$UPPER_CONSTRAINTS_FILE" ]]; then - pip install python-openstackclient -c "$UPPER_CONSTRAINTS_FILE" + pip install python-openstackclient python-ironicclient -c "$UPPER_CONSTRAINTS_FILE" else - pip install python-openstackclient + pip install python-openstackclient python-ironicclient fi source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc.sh" @@ -630,6 +632,47 @@ function overcloud_test_bounce_interface { run_kayobe overcloud host configure -t network } +function overcloud_inspection_rule_dump { + echo "Listing inspection rules ..." + openstack baremetal inspection rule list + echo "Dumping inspection rules ..." + openstack baremetal inspection rule list -c UUID -f value | xargs -L 1 openstack baremetal inspection rule show +} + +function overcloud_test_inspect { + set -eu + node=$1 + + environment_setup + + source "${KOLLA_CONFIG_PATH:-/etc/kolla}/admin-openrc-system.sh" + + overcloud_inspection_rule_dump + + echo "Baremetal node: $node before inspection" + openstack baremetal node show "$node" + + # NOTE(wszumski): Switch to using kayobe playbooks to manage and inspect + # when we switch to node registration + + if [ "$(openstack baremetal node show -c provision_state -f value $node)" != "manageable" ]; then + openstack baremetal node manage "$node" --wait + fi + + # Run inspection + openstack baremetal node inspect "$node" --wait + + echo "Baremetal node: $node after inspection" + openstack baremetal node show $node + openstack baremetal node inventory save $node + + # Use Kayobe to save introspection data + run_kayobe baremetal compute introspection data save --limit "baremetal-compute,controllers[0]" --output-dir /tmp/baremetal-compute-inspection-data + + # Move back to available + openstack baremetal node provide "$node" --wait +} + function overcloud_test { set -eu diff --git a/dev/overcloud-test-inspect.sh b/dev/overcloud-test-inspect.sh new file mode 100755 index 000000000..ea6aec667 --- /dev/null +++ b/dev/overcloud-test-inspect.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -eu +set -o pipefail + +PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source "${PARENT}/functions" + + +function main { + config_init + overcloud_test_inspect tk0 +} + +main diff --git a/dev/tenks-deploy-config-compute-libvirt-on-host.yml b/dev/tenks-deploy-config-compute-libvirt-on-host.yml index a6b80c104..53f53d325 100644 --- a/dev/tenks-deploy-config-compute-libvirt-on-host.yml +++ b/dev/tenks-deploy-config-compute-libvirt-on-host.yml @@ -4,7 +4,7 @@ node_types: type0: - memory_mb: 1024 + memory_mb: 3072 vcpus: 1 volumes: # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: @@ -15,7 +15,10 @@ node_types: console_log_enabled: true # We seem to hit issues with missing cpu features in CI as a result of using host-model, e.g: # https://zuul.opendev.org/t/openstack/build/02c33ab51664419a88a5a54ad22852a9/log/primary/system_logs/libvirt/qemu/tk0.txt.gz#38 - cpu_mode: + # NOTE(bbezak): Force QEMU to expose x86-64‑v2 features, so the CentOS + # Stream IPA doesn't fail with "Fatal glibc error: CPU does not support + # x86-64-v2". + cpu_mode: maximum specs: - type: type0 diff --git a/dev/tenks-deploy-config-compute.yml b/dev/tenks-deploy-config-compute.yml index 984852bcb..feb46439a 100644 --- a/dev/tenks-deploy-config-compute.yml +++ b/dev/tenks-deploy-config-compute.yml @@ -4,7 +4,7 @@ node_types: type0: - memory_mb: 1024 + memory_mb: 3072 vcpus: 1 volumes: # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: @@ -15,7 +15,10 @@ node_types: console_log_enabled: true # We seem to hit issues with missing cpu features in CI as a result of using host-model, e.g: # https://zuul.opendev.org/t/openstack/build/02c33ab51664419a88a5a54ad22852a9/log/primary/system_logs/libvirt/qemu/tk0.txt.gz#38 - cpu_mode: + # NOTE(bbezak): Force QEMU to expose x86-64‑v2 features, so the CentOS + # Stream IPA doesn't fail with "Fatal glibc error: CPU does not support + # x86-64-v2". + cpu_mode: maximum specs: - type: type0 diff --git a/doc/requirements.txt b/doc/requirements.txt index 51a2c6498..604a1d205 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -6,4 +6,4 @@ openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD -whereto>=0.3.0 # Apache-2.0 +whereto>=0.5.0 # Apache-2.0 diff --git a/doc/source/administration/ansible-control.rst b/doc/source/administration/ansible-control.rst new file mode 100644 index 000000000..21c3f0dbc --- /dev/null +++ b/doc/source/administration/ansible-control.rst @@ -0,0 +1,59 @@ +=================================== +Ansible Control Host Administration +=================================== + +Updating Packages +================= + +It is possible to update packages on the Ansible control host. + +Package Repositories +-------------------- + +If using custom DNF package repositories on CentOS or Rocky, it may be +necessary to update these prior to running a package update. To do this, update +the configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following +command:: + + (kayobe) $ kayobe control host configure --tags dnf + +Package Update +-------------- + +To update one or more packages:: + + (kayobe) $ kayobe control host package update --packages , + +To update all eligible packages, use ``*``, escaping if necessary:: + + (kayobe) $ kayobe control host package update --packages "*" + +To only install updates that have been marked security related:: + + (kayobe) $ kayobe control host package update --packages "*" --security + +Note that these commands do not affect packages installed in containers, only +those installed on the host. + +Kernel Updates +-------------- + +If the kernel has been updated, you will probably want to reboot the host +to boot into the new kernel. This can be done using a command such as the +following:: + + (kayobe) $ kayobe control host command run --command "shutdown -r" --become + +Running Commands +================ + +It is possible to run a command on the host:: + + (kayobe) $ kayobe control host command run --command "" + +For example:: + + (kayobe) $ kayobe control host command run --command "service docker restart" + +To execute the command with root privileges, add the ``--become`` argument. +Adding the ``--verbose`` argument allows the output of the command to be seen. diff --git a/doc/source/administration/index.rst b/doc/source/administration/index.rst index 79cee41ce..dbdef7672 100644 --- a/doc/source/administration/index.rst +++ b/doc/source/administration/index.rst @@ -9,6 +9,7 @@ administrative tasks. :maxdepth: 2 general + ansible-control seed infra-vms overcloud diff --git a/doc/source/administration/overcloud.rst b/doc/source/administration/overcloud.rst index 13080a33f..f27187d22 100644 --- a/doc/source/administration/overcloud.rst +++ b/doc/source/administration/overcloud.rst @@ -15,7 +15,7 @@ necessary to update these prior to running a package update. To do this, update the configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following command:: - (kayobe) $ kayobe overcloud host configure --tags dnf --kolla-tags none + (kayobe) $ kayobe overcloud host configure --tags dnf Package Update -------------- @@ -80,10 +80,9 @@ improved by specifying Ansible tags to limit the tasks run in kayobe and/or kolla-ansible's playbooks. This may require knowledge of the inner workings of these tools but in general, kolla-ansible tags the play used to configure each service by the name of that service. For example: ``nova``, ``neutron`` or -``ironic``. Use ``-t`` or ``--tags`` to specify kayobe tags and ``-kt`` or -``--kolla-tags`` to specify kolla-ansible tags. For example:: +``ironic``. Use ``-t`` or ``--tags`` to specify tags. For example:: - (kayobe) $ kayobe overcloud service reconfigure --tags config --kolla-tags nova,ironic + (kayobe) $ kayobe overcloud service reconfigure --tags nova,ironic Deploying Updated Container Images ================================== @@ -105,10 +104,9 @@ improved by specifying Ansible tags to limit the tasks run in kayobe and/or kolla-ansible's playbooks. This may require knowledge of the inner workings of these tools but in general, kolla-ansible tags the play used to configure each service by the name of that service. For example: ``nova``, ``neutron`` or -``ironic``. Use ``-t`` or ``--tags`` to specify kayobe tags and ``-kt`` or -``--kolla-tags`` to specify kolla-ansible tags. For example:: +``ironic``. Use ``-t`` or ``--tags`` to specify tags. For example:: - (kayobe) $ kayobe overcloud service deploy containers --kolla-tags nova,ironic + (kayobe) $ kayobe overcloud service deploy containers --tags nova,ironic Upgrading Containerised Services ================================ @@ -126,9 +124,9 @@ To upgrade the containerised control plane services:: (kayobe) $ kayobe overcloud service upgrade As for the reconfiguration command, it is possible to specify tags for Kayobe -and/or kolla-ansible:: +and kolla-ansible:: - (kayobe) $ kayobe overcloud service upgrade --tags config --kolla-tags keystone + (kayobe) $ kayobe overcloud service upgrade --tags keystone Running Prechecks ================= @@ -137,10 +135,10 @@ Sometimes it may be useful to run prechecks without deploying services:: (kayobe) $ kayobe overcloud service prechecks -As for other similar commands, it is possible to specify tags for Kayobe and/or +As for other similar commands, it is possible to specify tags for Kayobe and kolla-ansible:: - (kayobe) $ kayobe overcloud service upgrade --tags config --kolla-tags keystone + (kayobe) $ kayobe overcloud service upgrade --tags keystone Stopping the Overcloud Services =============================== @@ -156,12 +154,11 @@ To stop the overcloud services:: It should be noted that this state is persistent - containers will remain stopped after a reboot of the host on which they are running. -It is possible to limit the operation to particular hosts via -``--kolla-limit``, or to particular services via ``--kolla-tags``. It is also -possible to avoid stopping the common containers via ``--kolla-skip-tags -common``. For example: +It is possible to limit the operation to particular hosts via ``--limit``, or +to particular services via ``--tags``. It is also possible to avoid stopping +the common containers via ``--skip-tags common``. For example: - (kayobe) $ kayobe overcloud service stop --kolla-tags glance,nova --kolla-skip-tags common + (kayobe) $ kayobe overcloud service stop --tags glance,nova --skip-tags common Destroying the Overcloud Services ================================= diff --git a/doc/source/administration/seed.rst b/doc/source/administration/seed.rst index 4880e5cf5..0e2f8ba35 100644 --- a/doc/source/administration/seed.rst +++ b/doc/source/administration/seed.rst @@ -31,10 +31,7 @@ To destroy the seed services:: This can optionally be used with a tag:: - (kayobe) $ kayobe seed service destroy --yes-i-really-really-mean-it -kt none -t docker-registry - -Care must be taken to set both kayobe and kolla tags to avoid accidentally -destroying other services. + (kayobe) $ kayobe seed service destroy --yes-i-really-really-mean-it -t docker-registry Updating Packages ================= @@ -49,7 +46,7 @@ necessary to update these prior to running a package update. To do this, update the configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following command:: - (kayobe) $ kayobe seed host configure --tags dnf --kolla-tags none + (kayobe) $ kayobe seed host configure --tags dnf Package Update -------------- @@ -146,7 +143,7 @@ Backup Podman, simply change ``docker`` for ``sudo podman`` in the command. It should be safe to keep services running during the backup, but for maximum -safety they may optionally be stopped: +safety they may optionally be stopped:: docker exec -it bifrost_deploy systemctl stop ironic ironic-inspector diff --git a/doc/source/configuration/reference/bifrost.rst b/doc/source/configuration/reference/bifrost.rst index 90e270f75..62ab636be 100644 --- a/doc/source/configuration/reference/bifrost.rst +++ b/doc/source/configuration/reference/bifrost.rst @@ -77,7 +77,7 @@ information on building disk images. The default configuration builds a whole disk (partitioned) image using the selected :ref:`OS distribution ` with serial console enabled, -and SELinux disabled if CentOS Stream is used. Rocky Linux 9 users should use +and SELinux disabled if CentOS Stream is used. Rocky Linux 10 users should use the default method of building images with :ref:`Diskimage builder directly `. @@ -268,14 +268,17 @@ Ironic Inspector configuration The following options configure the Ironic Inspector service in the ``bifrost-deploy`` container. -``kolla_bifrost_inspector_processing_hooks`` +``kolla_bifrost_inspector_hooks`` List of of inspector processing plugins. Default is ``{{ - inspector_processing_hooks }}``, defined in + inspector_hooks }}``, defined in ``${KAYOBE_CONFIG_PATH}/inspector.yml``. ``kolla_bifrost_inspector_port_addition`` Which MAC addresses to add as ports during introspection. One of ``all``, ``active`` or ``pxe``. Default is ``{{ inspector_add_ports }}``, defined in ``${KAYOBE_CONFIG_PATH}/inspector.yml``. +``kolla_bifrost_inspector_keep_ports`` + Which ports to keep after introspection. One of ``all``, ``present`` or + ``added``. Default is ``present`` to align with Bifrost's defaults. ``kolla_bifrost_inspector_extra_kernel_options`` List of extra kernel parameters for the inspector default PXE configuration. Default is ``{{ inspector_extra_kernel_options }}``, defined diff --git a/doc/source/configuration/reference/hosts.rst b/doc/source/configuration/reference/hosts.rst index 9e06722e3..da2a1467f 100644 --- a/doc/source/configuration/reference/hosts.rst +++ b/doc/source/configuration/reference/hosts.rst @@ -7,6 +7,7 @@ Host Configuration This section covers configuration of hosts. It does not cover configuration or deployment of containers. Hosts that are configured by Kayobe include: +* Ansible control host (``kayobe control host configure``) * Seed hypervisor (``kayobe seed hypervisor host configure``) * Seed (``kayobe seed host configure``) * Infra VMs (``kayobe infra vm host configure``) @@ -26,6 +27,7 @@ Some host configuration options are set via global variables, and others have a variable for each type of host. The latter variables are included in the following files under ``${KAYOBE_CONFIG_PATH}``: +* ``ansible-control.yml`` * ``seed-hypervisor.yml`` * ``seed.yml`` * ``compute.yml`` @@ -79,9 +81,11 @@ is ``stack``. Typically, the image used to provision these hosts will not include this user account, so Kayobe performs a bootstrapping step to create it, as a different user. In cloud images, there is often a user named after the OS distro, e.g. -``centos``, ``rocky`` or ``ubuntu``. This user defaults to the -``os_distribution`` variable, but may be set via the following variables: +``rocky`` or ``ubuntu``. This user defaults to the ``os_distribution`` +variable, except for CentOS which uses ``cloud-user``, but may be set via the +following variables: +* ``ansible_control_bootstrap_user`` * ``seed_hypervisor_bootstrap_user`` * ``seed_bootstrap_user`` * ``infra_vm_bootstrap_user`` @@ -180,6 +184,7 @@ that used by the ``users`` variable of the `singleplatform-eng.users `__ role. The following variables can be used to set the users for specific types of hosts: +* ``ansible_control_users`` * ``seed_hypervisor_users`` * ``seed_users`` * ``infra_vm_users`` @@ -518,7 +523,7 @@ Development tools | ``dev-tools`` Development tools (additional OS packages) can be configured to be installed -on hosts. By default Ddvelopment tools are installed on all +on hosts. By default development tools are installed on all ``seed-hypervisor``, ``seed``, ``overcloud`` and ``infra-vms`` hosts. The following variables can be used to set which packages to install: @@ -560,6 +565,9 @@ the SELinux configuration. The timeout for waiting for systems to reboot is ``selinux_reboot_timeout``. Alternatively, the reboot may be avoided by setting ``selinux_do_reboot`` to ``false``. +The ``selinux_update_kernel_param`` variable can be used to change the selinux +state set on the kernel command line; it takes a boolean value. + Network Configuration ===================== *tags:* @@ -581,6 +589,7 @@ Ubuntu systems. The following variables can be used to set whether to enable firewalld: +* ``ansible_control_firewalld_enabled`` * ``seed_hypervisor_firewalld_enabled`` * ``seed_firewalld_enabled`` * ``infra_vm_firewalld_enabled`` @@ -592,6 +601,7 @@ The following variables can be used to set whether to enable firewalld: When firewalld is enabled, the following variables can be used to configure a list of zones to create. Each item is a dict containing a ``zone`` item: +* ``ansible_control_firewalld_zones`` * ``seed_hypervisor_firewalld_zones`` * ``seed_firewalld_zones`` * ``infra_vm_firewalld_zones`` @@ -603,6 +613,7 @@ list of zones to create. Each item is a dict containing a ``zone`` item: The following variables can be used to set a default zone. The default is unset, in which case the default zone will not be changed: +* ``ansible_control_firewalld_default_zone`` * ``seed_hypervisor_firewalld_default_zone`` * ``seed_firewalld_default_zone`` * ``infra_vm_firewalld_default_zone`` @@ -617,6 +628,7 @@ are omitted if not provided, with the following exceptions: ``offline`` (default ``true``), ``permanent`` (default ``true``), ``state`` (default ``enabled``): +* ``ansible_control_firewalld_rules`` * ``seed_hypervisor_firewalld_rules`` * ``seed_firewalld_rules`` * ``infra_vm_firewalld_rules`` @@ -679,6 +691,45 @@ follows: Note that despite the name, this will not actively enable UFW. It may do so in the future. +Fail2Ban +======== +@tags: + | ``fail2ban`` + +Fail2Ban can be used to ban IP addresses that show malicious signs, such as +ones that conduct too many failed login attempts. Kayobe can install and configure +Fail2Ban on hosts. + +In order to use fail2ban, it is important to note that the user should enable +``dnf_install_epel`` in their configuration when using Rocky Linux or CentOS. + +The following variables can be used to set whether to enable fail2ban: + +* ``ansible_control_fail2ban_enabled`` +* ``seed_hypervisor_fail2ban_enabled`` +* ``seed_fail2ban_enabled`` +* ``infra_vm_fail2ban_enabled`` +* ``compute_fail2ban_enabled`` +* ``controller_fail2ban_enabled`` +* ``monitoring_fail2ban_enabled`` +* ``storage_fail2ban_enabled`` + +The following example demonstrates how to enable fail2ban on controllers. + +.. code-block:: yaml + + controller_fail2ban_enabled: true + +The following should be added in the configuration file to set the default +fail2ban sshd jail: + +.. code-block:: yaml + + fail2ban_jail_configuration: + - option: enabled + value: "true" + section: sshd + .. _configuration-hosts-tuned: Tuned @@ -686,11 +737,10 @@ Tuned *tags:* | ``tuned`` -.. note:: Tuned configuration only supports CentOS/Rocky systems for now. - Built-in ``tuned`` profiles can be applied to hosts. The following variables can be used to set a ``tuned`` profile to specific types of hosts: +* ``ansible_control_tuned_active_builtin_profile`` * ``seed_hypervisor_tuned_active_builtin_profile`` * ``seed_tuned_active_builtin_profile`` * ``compute_tuned_active_builtin_profile`` @@ -702,6 +752,7 @@ can be used to set a ``tuned`` profile to specific types of hosts: By default, Kayobe applies a ``tuned`` profile matching the role of each host in the system: +* Ansible control host: ``throughput-performance`` * seed hypervisor: ``virtual-host`` * seed: ``virtual-guest`` * infrastructure VM: ``virtual-guest`` @@ -727,6 +778,7 @@ Arbitrary ``sysctl`` configuration can be applied to hosts. The variable format is a dict/map, mapping parameter names to their required values. The following variables can be used to set ``sysctl`` configuration specific types of hosts: +* ``ansible_control_sysctl_parameters`` * ``seed_hypervisor_sysctl_parameters`` * ``seed_sysctl_parameters`` * ``infra_vm_sysctl_parameters`` @@ -826,6 +878,8 @@ Kayobe will configure `Chrony `__ on all hosts in seed seed-hypervisor overcloud + infra-vms + ansible-control This provides a flexible way to opt in or out of having kayobe manage the NTP service. @@ -868,6 +922,7 @@ arrays they want to manage with Kayobe. Software RAID arrays may be configured via the ``mdadm_arrays`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_mdadm_arrays`` * ``seed_hypervisor_mdadm_arrays`` * ``seed_mdadm_arrays`` * ``infra_vm_mdadm_arrays`` @@ -904,6 +959,7 @@ Encryption Encrypted block devices may be configured via the ``luks_devices`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_luks_devices`` * ``seed_hypervisor_luks_devices`` * ``seed_luks_devices`` * ``infra_vm_luks_devices`` @@ -941,6 +997,7 @@ Logical Volume Manager (LVM) physical volumes, volume groups, and logical volumes may be configured via the ``lvm_groups`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_lvm_groups`` * ``seed_hypervisor_lvm_groups`` * ``seed_lvm_groups`` * ``infra_vm_lvm_groups`` @@ -978,6 +1035,7 @@ can optionally be created. The logical volume is created in volume group called This configuration is enabled by the following variables, which default to ``false``: +* ``ansible_control_lvm_group_data_enabled`` * ``compute_lvm_group_data_enabled`` * ``controller_lvm_group_data_enabled`` * ``seed_lvm_group_data_enabled`` @@ -987,6 +1045,7 @@ This configuration is enabled by the following variables, which default to To use this configuration, a list of disks must be configured via the following variables: +* ``ansible_control_lvm_group_data_disks`` * ``seed_lvm_group_data_disks`` * ``infra_vm_lvm_group_data_disks`` * ``compute_lvm_group_data_disks`` @@ -1006,6 +1065,7 @@ For example, to configure two of the seed's disks for use by LVM: The Docker volumes LVM volume is assigned a size given by the following variables, with a default value of 75% (of the volume group's capacity): +* ``ansible_control_lvm_group_data_lv_docker_volumes_size`` * ``seed_lvm_group_data_lv_docker_volumes_size`` * ``infra_vm_lvm_group_data_lv_docker_volumes_size`` * ``compute_lvm_group_data_lv_docker_volumes_size`` @@ -1036,6 +1096,7 @@ Custom LVM To define additional logical logical volumes in the default ``data`` volume group, modify one of the following variables: +* ``ansible_control_lvm_group_data_lvs`` * ``seed_lvm_group_data_lvs`` * ``infra_vm_lvm_group_data_lvs`` * ``compute_lvm_group_data_lvs`` @@ -1061,6 +1122,7 @@ include the LVM volume for Docker volume data: It is possible to define additional LVM volume groups via the following variables: +* ``ansible_control_lvm_groups_extra`` * ``seed_lvm_groups_extra`` * ``infra_vm_lvm_groups_extra`` * ``compute_lvm_groups_extra`` @@ -1132,6 +1194,25 @@ example, to use podman: container_engine: podman +The container engine is deployed on hosts in the ``container-engine`` group. By +default this includes the following groups: + +.. code-block:: ini + + [container-engine:children] + # Hosts in this group will have Docker/Podman installed. + seed + controllers + network + monitoring + storage + compute + ansible-control + +Note that deployment of a container engine is disabled by default on the +Ansible control host. This can be changed by setting +``ansible_control_container_engine_enabled`` to ``true``. + Podman ------ @@ -1236,7 +1317,7 @@ are relevant only when using the libvirt daemon rather than the Default is ``true``. ``compute_libvirt_ceph_repo_release`` Ceph package repository release to install on CentOS and Rocky hosts when - ``compute_libvirt_ceph_repo_install`` is ``true``. Default is ``pacific``. + ``compute_libvirt_ceph_repo_install`` is ``true``. Default is ``squid``. Example: custom libvirtd.conf ----------------------------- @@ -1358,6 +1439,7 @@ Swap Swap files and devices may be configured via the ``swap`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_swap`` * ``seed_swap`` * ``seed_hypervisor_swap`` * ``infra_vm_swap`` diff --git a/doc/source/configuration/reference/ironic-python-agent.rst b/doc/source/configuration/reference/ironic-python-agent.rst index 23f7f4f06..401fa2225 100644 --- a/doc/source/configuration/reference/ironic-python-agent.rst +++ b/doc/source/configuration/reference/ironic-python-agent.rst @@ -45,8 +45,7 @@ image build``. ``ipa_builder_source_version`` Version of IPA builder source repository. Default is ``master``. ``ipa_build_dib_host_packages_extra`` - List of additional build host packages to install. Default is an empty - list. + List of additional build host packages to install. Default is ``[ 'zstd' ]``. ``ipa_build_dib_elements_default`` List of default Diskimage Builder (DIB) elements to use when building IPA images. Default is ``["centos", "dynamic-login", "enable-serial-console", @@ -162,12 +161,12 @@ Bifrost can be configured to use ``dynamic-login`` with the The updated configuration is applied with ``kayobe seed service deploy``. Overcloud Ironic can be configured with the -``kolla_ironic_pxe_append_params_extra`` variable: +``kolla_ironic_kernel_append_params_extra`` variable: .. code-block:: yaml :caption: ``ironic.yml`` - kolla_ironic_pxe_append_params_extra: + kolla_ironic_kernel_append_params_extra: - sshkey="ssh-rsa BBA1..." The updated configuration is applied with ``kayobe overcloud service deploy``. diff --git a/doc/source/configuration/reference/kolla-ansible.rst b/doc/source/configuration/reference/kolla-ansible.rst index be35761a3..5fc6e7e45 100644 --- a/doc/source/configuration/reference/kolla-ansible.rst +++ b/doc/source/configuration/reference/kolla-ansible.rst @@ -721,8 +721,6 @@ which files are supported. ``heat/*`` Extended heat configuration. ``horizon/*`` Extended horizon configuration. ``influx*`` InfluxDB configuration. - ``ironic-inspector.conf`` Ironic inspector configuration. - ``ironic-inspector/*`` Extended Ironic inspector configuration ``ironic.conf`` Ironic configuration. ``ironic/*`` Extended ironic configuration. ``keepalived/*`` Extended keepalived configuration. diff --git a/doc/source/configuration/reference/kolla.rst b/doc/source/configuration/reference/kolla.rst index b920944eb..3a03dfb94 100644 --- a/doc/source/configuration/reference/kolla.rst +++ b/doc/source/configuration/reference/kolla.rst @@ -247,7 +247,7 @@ default is to specify the URL and version of Bifrost, as defined in ``${KAYOBE_CONFIG_PATH}/bifrost.yml``. For example, to specify a custom source location for the ``ironic-base`` -package: +package and a custom version of cadvisor: .. code-block:: yaml :caption: ``kolla.yml`` @@ -261,6 +261,11 @@ package: type: "git" location: https://git.example.com/ironic reference: downstream + prometheus-cadvisor: + version: "0.54.1" + sha256: + amd64: xxxx + arm64: yyyy This will result in Kayobe adding the following configuration to ``kolla-build.conf``: @@ -278,9 +283,17 @@ This will result in Kayobe adding the following configuration to location = https://git.example.com/ironic reference = downstream + [prometheus-cadvisor] + version = 0.54.1 + sha256 = amd64:xxxx,arm64:yyyy + Note that it is currently necessary to include the Bifrost source location if using a seed. +Note that it is not necessary to specify the prometheus-cadvisor ``type: url`` +and ``location: https://github.com/...`` because they are inherited from kolla +(``kolla/common/sources.py``). + Plugins & additions ------------------- diff --git a/doc/source/configuration/reference/network.rst b/doc/source/configuration/reference/network.rst index 595d1e110..9e6789006 100644 --- a/doc/source/configuration/reference/network.rst +++ b/doc/source/configuration/reference/network.rst @@ -39,6 +39,8 @@ supported: IP address of the gateway for the hardware introspection network. ``neutron_gateway`` IP address of the gateway for a neutron subnet based on this network. +``inspection_dns_servers`` + List of DNS servers used during hardware introspection. ``vlan`` VLAN ID. ``mtu`` @@ -302,8 +304,7 @@ String format rules (CentOS Stream/Rocky Linux only) The string format of a rule is the string which would be appended to ``ip rule `` to create or delete the rule. Note that when using NetworkManager -(the default since Zed and in Yoga when using Rocky Linux 9) the table must be -specified by ID. +(the default when using Rocky Linux 10) the table must be specified by ID. To configure a network called ``example`` with an IP routing policy rule to handle traffic from the subnet ``10.1.0.0/24`` using the routing table with ID @@ -396,9 +397,9 @@ The following attributes are supported: ``bridge_stp`` .. note:: - For Rocky Linux 9, the ``bridge_stp`` attribute is set to false to preserve - backwards compatibility with network scripts. This is because the Network - Manager sets STP to true by default on bridges. + For Rocky Linux 10, the ``bridge_stp`` attribute is set to false to + preserve backwards compatibility with network scripts. This is because + the Network Manager sets STP to true by default on bridges. Enable or disable the Spanning Tree Protocol (STP) on this bridge. Should be set to a boolean value. The default is not set on Ubuntu systems. @@ -828,19 +829,22 @@ If using the overcloud to inspect bare metal workload (compute) hosts, it is necessary to define a DHCP allocation pool for the overcloud's ironic inspector DHCP server using the ``inspection_allocation_pool_start`` and ``inspection_allocation_pool_end`` attributes of the workload provisioning -network. +network. If ``kolla_internal_fqdn`` is set, it is mandatory to also supply one +or more DNS servers using ``inspection_dns_servers``. .. note:: This example assumes that the ``example`` network is mapped to ``provision_wl_net_name``. -To configure a network called ``example`` with an inspection allocation pool: +To configure a network called ``example`` with an inspection allocation pool +and inspection DNS servers: .. code-block:: yaml example_inspection_allocation_pool_start: 10.0.1.196 example_inspection_allocation_pool_end: 10.0.1.254 + example_inspection_dns_servers: [10.0.1.10, 10.0.1.11] .. note:: @@ -876,6 +880,19 @@ Kayobe's playbook group variables define some sensible defaults for this variable for hosts in the top level standard groups. These defaults are set using the network roles typically required by the group. +Ansible Control Host +-------------------- + +By default, the Ansible control host is attached to the following network: + +* overcloud admin network + +This list may be extended by setting +``ansible_control_extra_network_interfaces`` to a list of names of additional +networks to attach. Alternatively, the list may be completely overridden by +setting ``ansible_control_network_interfaces``. These variables are found in +``${KAYOBE_CONFIG_PATH}/ansible-control.yml``. + Seed ---- diff --git a/doc/source/configuration/reference/os-distribution.rst b/doc/source/configuration/reference/os-distribution.rst index 1a3bfb3b1..5f29d95ec 100644 --- a/doc/source/configuration/reference/os-distribution.rst +++ b/doc/source/configuration/reference/os-distribution.rst @@ -15,10 +15,10 @@ or ``rocky`` or ``ubuntu``, and defaults to ``rocky``. The ``os_release`` variable in ``etc/kayobe/globals.yml`` can be used to set the release of the OS. When ``os_distribution`` is set to ``centos`` it may be -set to ``9-stream``, and this is its default value. When ``os_distribution`` is -set to ``ubuntu`` it may be set to ``noble``, and this is its default value. -When ``os_distribution`` is set to ``rocky`` it may be set to ``9``, and this -is its default value. +set to ``10-stream``, and this is its default value. When ``os_distribution`` +is set to ``rocky`` it may be set to ``10``, and this is its default value. +When ``os_distribution`` is set to ``ubuntu`` it may be set to ``noble``, and +this is its default value. These variables are used to set various defaults, including: diff --git a/doc/source/configuration/reference/overcloud-dib.rst b/doc/source/configuration/reference/overcloud-dib.rst index 1593e20b4..db537f9a6 100644 --- a/doc/source/configuration/reference/overcloud-dib.rst +++ b/doc/source/configuration/reference/overcloud-dib.rst @@ -34,7 +34,7 @@ how these images are built. Consult the information on building disk images. The default configuration builds a whole disk (partitioned) image using the -selected :ref:`OS distribution ` (Rocky Linux 9 by default) +selected :ref:`OS distribution ` (Rocky Linux 10 by default) with serial console enabled, and SELinux disabled if CentOS Stream or Rocky Linux is used. `Cloud-init `__ is used to process diff --git a/doc/source/configuration/reference/physical-network.rst b/doc/source/configuration/reference/physical-network.rst index e0e69cc18..d1bbf8999 100644 --- a/doc/source/configuration/reference/physical-network.rst +++ b/doc/source/configuration/reference/physical-network.rst @@ -293,24 +293,14 @@ module. configuration. The variable is passed as the ``src_format`` argument to the ``junos_config`` module. The default value is ``text``. -Provider -^^^^^^^^ - * ``ansible_host`` is the hostname or IP address. Optional. - * ``ansible_user`` is the SSH username. - * ``ansible_ssh_pass`` is the SSH password. Mutually exclusive with ``ansible_ssh_private_key_file``. - * ``ansible_ssh_private_key_file`` is the SSH private key file. Mutually exclusive with ``ansible_ssh_pass``. - -* ``switch_junos_timeout`` may be set to a timeout in seconds for communicating - with the device. - -Alternatively, set ``switch_junos_provider`` to the value to be passed as the -``provider`` argument to the ``junos_config`` module. +* ``ansible_connection`` should be ``ansible.netcommon.netconf``. +* ``ansible_network_os`` should be ``junipernetworks.junos.junos``. Mellanox MLNX OS ---------------- diff --git a/doc/source/configuration/reference/seed-custom-containers.rst b/doc/source/configuration/reference/seed-custom-containers.rst index a6f094c51..70cc748ca 100644 --- a/doc/source/configuration/reference/seed-custom-containers.rst +++ b/doc/source/configuration/reference/seed-custom-containers.rst @@ -17,11 +17,12 @@ For example, to deploy a squid container image: seed_containers: squid: - image: "stackhpc/squid:3.5.20-1" + image: "docker.io/stackhpc/squid" pre: "{{ kayobe_env_config_path }}/containers/squid/pre.yml" post: "{{ kayobe_env_config_path }}/containers/squid/post.yml" pre_destroy: "{{ kayobe_env_config_path }}/containers/squid/pre_destroy.yml" post_destroy: "{{ kayobe_env_config_path }}/containers/squid/post_destroy.yml" + tag: "3.5.20-1" Please notice the *optional* pre, post, pre_destroy, and post_destroy Ansible task files - those need to be created in ``kayobe-config`` path. The table below describes diff --git a/doc/source/configuration/reference/vgpu.rst b/doc/source/configuration/reference/vgpu.rst index 693c89d21..fcfe6cef9 100644 --- a/doc/source/configuration/reference/vgpu.rst +++ b/doc/source/configuration/reference/vgpu.rst @@ -226,7 +226,7 @@ To apply the configuration to Nova: .. code:: shell - (kayobe) $ kayobe overcloud service deploy -kt nova + (kayobe) $ kayobe overcloud service deploy -t nova OpenStack flavors ================= @@ -307,4 +307,4 @@ Reconfigure nova to match the change: .. code:: shell - (kayobe) $ kayobe overcloud service reconfigure -kt nova --kolla-limit computegpu000 --skip-prechecks + (kayobe) $ kayobe overcloud service reconfigure -t nova --limit computegpu000 --skip-prechecks diff --git a/doc/source/configuration/scenarios/all-in-one/index.rst b/doc/source/configuration/scenarios/all-in-one/index.rst index 750a2487d..1c7e33d71 100644 --- a/doc/source/configuration/scenarios/all-in-one/index.rst +++ b/doc/source/configuration/scenarios/all-in-one/index.rst @@ -30,7 +30,7 @@ It also requires a single host running a :ref:`supported operating system * at least one network interface that has Internet access You will need access to a user account with passwordless sudo. The default user -in a cloud image (e.g. ``centos`` or ``rocky`` or ``ubuntu``) is typically +in a cloud image (e.g. ``cloud-user`` or ``rocky`` or ``ubuntu``) is typically sufficient. This user will be used to run Kayobe commands. It will also be used by Kayobe to bootstrap other user accounts. diff --git a/doc/source/configuration/scenarios/all-in-one/overcloud.rst b/doc/source/configuration/scenarios/all-in-one/overcloud.rst index 2992877ab..2677df323 100644 --- a/doc/source/configuration/scenarios/all-in-one/overcloud.rst +++ b/doc/source/configuration/scenarios/all-in-one/overcloud.rst @@ -219,11 +219,11 @@ or ``rocky`` if using Rocky Linux.. os_distribution: "ubuntu" Kayobe uses a bootstrap user to create a ``stack`` user account. By default, -this user is ``centos`` on CentOS, ``rocky`` on Rocky and ``ubuntu`` on Ubuntu, -in line with the default user in the official cloud images. If you are using -a different bootstrap user, set the ``controller_bootstrap_user`` variable in -``etc/kayobe/controllers.yml``. For example, to set it to ``cloud-user`` (as -seen in MAAS): +this user is ``cloud-user`` on CentOS, ``rocky`` on Rocky and ``ubuntu`` on +Ubuntu, in line with the default user in the official cloud images. If you are +using a different bootstrap user, set the ``controller_bootstrap_user`` +variable in ``etc/kayobe/controllers.yml``. For example, to set it to +``cloud-user`` (as seen in MAAS): .. code-block:: yaml :caption: ``etc/kayobe/controllers.yml`` diff --git a/doc/source/contributor/automated.rst b/doc/source/contributor/automated.rst index 8db00d530..316588b4a 100644 --- a/doc/source/contributor/automated.rst +++ b/doc/source/contributor/automated.rst @@ -309,8 +309,8 @@ It is now possible to discover, inspect and provision the controller VM:: kayobe overcloud hardware inspect kayobe overcloud provision -The controller VM is now accessible via SSH as the bootstrap user (``centos`` -or ``ubuntu``) at ``192.168.33.3``. +The controller VM is now accessible via SSH as the bootstrap user +(``cloud-user``, ``rocky`` or ``ubuntu``) at ``192.168.33.3``. The machines and networking created by Tenks can be cleaned up via ``dev/tenks-teardown-overcloud.sh``:: diff --git a/doc/source/contributor/releases.rst b/doc/source/contributor/releases.rst index b0d17aba6..b54ee295a 100644 --- a/doc/source/contributor/releases.rst +++ b/doc/source/contributor/releases.rst @@ -242,3 +242,27 @@ Stable Releases Stable branch releases should be made periodically for each supported stable branch, no less than once every 45 days. + +Transitioning to Unmaintained +============================= + +When an OpenStack release transitions to `Unmaintained +`__, +all references to ``stable/`` need to be changed to +``unmaintained/``. This change needs to be made on the new +unmaintained branch. For example, see +https://review.opendev.org/c/openstack/kayobe/+/968298. + +More recent releases which include upgrade jobs from the unmaintained release +should update their CI configuration to use the +``unmaintained/`` branch. For example, see +https://review.opendev.org/c/openstack/kayobe/+/969411 and +https://review.opendev.org/c/openstack/kayobe/+/970016. + +Transitioning to End of Life (EOL) +================================== + +When an OpenStack release transitions to `End of Life (EOL) +`__, +upgrade jobs in later releases need to be removed. For example, see +https://review.opendev.org/c/openstack/kayobe/+/968296. diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst index adb7a64fa..c2ed7088a 100644 --- a/doc/source/contributor/testing.rst +++ b/doc/source/contributor/testing.rst @@ -18,7 +18,7 @@ running kayobe's tests. sudo apt-get install build-essential python3-dev libssl-dev python3-pip git -* Fedora or CentOS Stream 9/Rocky 9/RHEL 9:: +* Fedora or CentOS Stream 10/Rocky 10/RHEL 10:: sudo dnf install python3-devel openssl-devel python3-pip git gcc @@ -75,10 +75,10 @@ Environments The following tox environments are provided: -alint - Run Ansible linter. ansible Run Ansible tests for some ansible roles using Ansible playbooks. +ansible-lint + Run Ansible linter. ansible-syntax Run a syntax check for all Ansible files. docs diff --git a/doc/source/deployment.rst b/doc/source/deployment.rst index 5f653d5af..5148700a8 100644 --- a/doc/source/deployment.rst +++ b/doc/source/deployment.rst @@ -29,6 +29,35 @@ To bootstrap the Ansible control host:: (kayobe) $ kayobe control host bootstrap +Since the Gazpacho 20.0.0 release it is possible to manage the Ansible control +host's configuration in the same way as other hosts. If using this feature, the +Ansible control host should be added to the Kayobe inventory in the +``ansible-control`` group. Typically this host will be ``localhost``, although +it is also possible to manage an Ansible control host remotely. For example: + +.. code-block:: ini + :caption: ``${KAYOBE_CONFIG_PATH}/inventory/groups`` + + [ansible-control] + localhost + +To configure the Ansible control host OS:: + + (kayobe) $ kayobe control host configure + +.. note:: + + If the Ansible control host uses disks that have been in use in a previous + installation, it may be necessary to wipe partition and LVM data from those + disks. To wipe all disks that are not mounted during host configuration:: + + (kayobe) $ kayobe control host configure --wipe-disks + +.. seealso:: + + Information on configuration of hosts is available :ref:`here + `. + .. _physical-network: Physical Network @@ -188,9 +217,10 @@ After this command has completed the seed services will be active. .. note:: - Bifrost deployment behaviour is split between Kayobe and Kolla-Ansible. As - such, you should use both ``--tags kolla-bifrost`` and ``--kolla-tags - bifrost`` if you want to limit to Bifrost deployment. + You can use ``--tags bifrost`` if you want to limit to just the Bifrost + deployment. Note however that using tags is not tested in either Kayobe or + Kolla-Ansible CI, and as such should only be used if you know what you're + doing. Proceed with caution. .. seealso:: diff --git a/doc/source/installation.rst b/doc/source/installation.rst index 01fd80f2f..b301e8d18 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -15,8 +15,8 @@ Prerequisites Currently Kayobe supports the following Operating Systems on the Ansible control host: -- CentOS Stream 9 (since Zed 13.0.0 release) -- Rocky Linux 9 (since Zed 13.0.0 release) +- CentOS Stream 10 (since Flamingo 19.0.0 release) +- Rocky Linux 10 (since Flamingo 19.0.0 release) - Ubuntu Noble 24.04 (since Dalmatian 17.0.0 release) See the :doc:`support matrix ` for details of supported diff --git a/doc/source/resources.rst b/doc/source/resources.rst index 31b67e565..8874f7fc8 100644 --- a/doc/source/resources.rst +++ b/doc/source/resources.rst @@ -22,8 +22,9 @@ OpenStack using Kolla, Ansible and Kayobe. The guide makes use of baremetal environment running on a single hypervisor. To complete the walkthrough you will require a baremetal or VM hypervisor -running CentOS Stream 9, Rocky Linux 9 or Ubuntu Noble 24.04 (since Dalmatian -17.0.0) with at least 32GB RAM & 80GB disk space. Preparing the deployment can -take some time - where possible it is beneficial to snapshot the hypervisor. We -advise making a snapshot after creating the initial 'seed' VM as this will make -additional deployments significantly faster. +running CentOS Stream 10 (since Flamingo 19.0.0), Rocky Linux 10 (since +Flamingo 19.0.0) or Ubuntu Noble 24.04 (since Dalmatian 17.0.0) with at least +32GB RAM & 80GB disk space. Preparing the deployment can take some time - where +possible it is beneficial to snapshot the hypervisor. We advise making a +snapshot after creating the initial 'seed' VM as this will make additional +deployments significantly faster. diff --git a/doc/source/support-matrix.rst b/doc/source/support-matrix.rst index e52fa111c..a11503a91 100644 --- a/doc/source/support-matrix.rst +++ b/doc/source/support-matrix.rst @@ -9,22 +9,23 @@ Supported Operating Systems Kayobe supports the following host Operating Systems (OS): -* Rocky Linux 9 (since Zed 13.0.0 release) +* Rocky Linux 10 (since Flamingo 19.0.0 release) * Ubuntu Noble 24.04 (since Dalmatian 17.0.0 release) -In addition to that CentOS Stream 9 host OS is functional, but not officially -supported. Kolla does not publish CentOS Stream 9 images to Docker Hub/Quay.io, +In addition to that CentOS Stream 10 host OS is functional, but not officially +supported. Kolla does not publish CentOS Stream 10 images to Docker Hub/Quay.io, therefore users need to build them by themselves. .. note:: - CentOS Stream 8 is no longer supported as a host OS. The Yoga release - supports both CentOS Stream 8 and 9, and provides a route for migration. + CentOS Stream 9 is no longer supported as a host OS. The 2025.1 Epoxy + release supports both CentOS Stream 9 and 10 to provide a route for + migration. .. note:: - Rocky Linux 8 is no longer supported as a host OS. The Yoga release supports - both Rocky Linux 8 and 9, and provides a route for migration. + Rocky Linux 9 is no longer supported as a host OS. The 2025.1 Epoxy release + supports both CentOS Stream 9 and 10 to provide a route for migration. Supported container images ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/upgrading.rst b/doc/source/upgrading.rst index bbe6b1d96..b0e221aca 100644 --- a/doc/source/upgrading.rst +++ b/doc/source/upgrading.rst @@ -450,7 +450,13 @@ To upgrade the containerised control plane services:: (kayobe) $ kayobe overcloud service upgrade -It is possible to specify tags for Kayobe and/or kolla-ansible to restrict the +It is possible to specify tags for Kayobe and kolla-ansible to restrict the scope of the upgrade:: - (kayobe) $ kayobe overcloud service upgrade --tags config --kolla-tags keystone + (kayobe) $ kayobe overcloud service upgrade --tags keystone + +.. note:: + + Using tags is not tested in either Kayobe or Kolla-Ansible CI, and as such + should only be used if you know what you're doing. Proceed with caution. + diff --git a/doc/source/usage.rst b/doc/source/usage.rst index a311490dc..94269f959 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -48,17 +48,26 @@ Environment variable: ``ANSIBLE_VAULT_PASSWORD_FILE`` password from a (plain text) file, with the path to that file being read from the environment. +Whilst the kolla passwords file ``kolla/passwords.yml`` should remain encrypted +at all times it can be useful to view the contents of this file to acquire a +password for a given service. +This can be done with ``ansible-vault view`` however if an absolute path is not +provided it will cause the command to fail. +Therefore, to make reading the contents of this file easier for administrators +it is possible to use ``kayobe overcloud service passwords view`` which will +temporarily decrypt and display the contents of ``kolla/passwords.yml`` for the +active kayobe environment. + Limiting Hosts -------------- Sometimes it may be necessary to limit execution of kayobe or kolla-ansible plays to a subset of the hosts. The ``--limit `` argument allows the -kayobe ansible hosts to be limited. The ``--kolla-limit `` argument -allows the kolla-ansible hosts to be limited. These two options may be -combined in a single command. In both cases, the argument provided should be +kayobe and kolla-ansible hosts to be limited. The argument provided should be an `Ansible host pattern `_, and will -ultimately be passed to ``ansible-playbook`` as a ``--limit`` argument. +ultimately be passed to ``ansible-playbook`` for both kayobe and kolla-ansible +as a ``--limit`` argument. .. _usage-tags: @@ -67,12 +76,15 @@ Tags `Ansible tags `_ provide a useful mechanism for executing a subset of the plays or tasks in a -playbook. The ``--tags `` argument allows execution of kayobe ansible -playbooks to be limited to matching plays and tasks. The ``--kolla-tags -`` argument allows execution of kolla-ansible ansible playbooks to be -limited to matching plays and tasks. The ``--skip-tags `` and -``--kolla-skip-tags `` arguments allow for avoiding execution of matching -plays and tasks. +playbook. The ``--tags `` argument allows execution of kayobe and +kolla-ansible playbooks to be limited to matching plays and tasks. The +``--skip-tags `` argument allows for avoiding execution of matching plays +and tasks. + +.. note:: + + Using tags is not tested in either Kayobe or Kolla-Ansible CI, and as such + should only be used if you know what you're doing. Proceed with caution. Check and diff mode ------------------- diff --git a/etc/kayobe/ansible-control.yml b/etc/kayobe/ansible-control.yml new file mode 100644 index 000000000..5232d4470 --- /dev/null +++ b/etc/kayobe/ansible-control.yml @@ -0,0 +1,143 @@ +--- +############################################################################### +# Ansible control host configuration. + +# User with which to access the Ansible control host via SSH during bootstrap, +# in order to setup the Kayobe user account. Default is {{ os_distribution }}. +#ansible_control_bootstrap_user: + +############################################################################### +# Ansible control host network interface configuration. + +# List of networks to which Ansible control host are attached. +#ansible_control_network_interfaces: + +# List of default networks to which Ansible control host are attached. +#ansible_control_default_network_interfaces: + +# List of extra networks to which Ansible control host are attached. +#ansible_control_extra_network_interfaces: + +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +#ansible_control_mdadm_arrays: + +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +#ansible_control_luks_devices: + +############################################################################### +# Ansible control host LVM configuration. + +# List of Ansible control host volume groups. See mrlesmithjr.manage_lvm role +# for format. +#ansible_control_lvm_groups: + +# Default list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +#ansible_control_lvm_groups_default: + +# Additional list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +#ansible_control_lvm_groups_extra: + +# Whether a 'data' LVM volume group should exist on the Ansible control host. +# By default this contains a 'docker-volumes' logical volume for Docker volume +# storage. Default is false. +#ansible_control_lvm_group_data_enabled: + +# Ansible control host LVM volume group for data. See mrlesmithjr.manage_lvm +# role for format. +#ansible_control_lvm_group_data: + +# List of disks for use by Ansible control host LVM data volume group. Default +# to an invalid value to require configuration. +#ansible_control_lvm_group_data_disks: + +# List of LVM logical volumes for the data volume group. +#ansible_control_lvm_group_data_lvs: + +# Docker volumes LVM backing volume. +#ansible_control_lvm_group_data_lv_docker_volumes: + +# Size of docker volumes LVM backing volume. +#ansible_control_lvm_group_data_lv_docker_volumes_size: + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +#ansible_control_lvm_group_data_lv_docker_volumes_fs: + +############################################################################### +# Ansible control host sysctl configuration. + +# Dict of sysctl parameters to set. +#ansible_control_sysctl_parameters: + +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. Default is throughput-performance. +#ansible_control_tuned_active_builtin_profile: + +############################################################################### +# Ansible control host user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +#ansible_control_users: + +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +#ansible_control_firewalld_enabled: + +# A list of zones to create. Each item is a dict containing a 'zone' item. +#ansible_control_firewalld_zones: + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +#ansible_control_firewalld_default_zone: + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +#ansible_control_firewalld_rules: + +############################################################################### +# Ansible control host fail2ban configuration. + +# Whether to install and enable fail2ban. +#ansible_control_fail2ban_enabled: + +# List of fail2ban jails for the Ansible control host. +#ansible_control_fail2ban_jail_configuration: + +# List of default fail2ban jails for the Ansible control host. +#ansible_control_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the Ansible control host. +#ansible_control_fail2ban_jail_configuration_extra: + +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#ansible_control_swap: + +############################################################################### +# Ansible control host container engine configuration. + +# Whether a container engine should be configured. Default is false. +#ansible_control_container_engine_enabled: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/bifrost.yml b/etc/kayobe/bifrost.yml index 741bbd8ca..d65c9d27d 100644 --- a/etc/kayobe/bifrost.yml +++ b/etc/kayobe/bifrost.yml @@ -4,6 +4,9 @@ ############################################################################### # Bifrost installation. +# Whether to install Bifrost. Default is true. +#kolla_enable_bifrost: + # URL of Bifrost source code repository. #kolla_bifrost_source_url: @@ -66,7 +69,7 @@ # UUID of the root filesystem contained within the deployment image. # See below URL for instructions on how to extract it: # https://docs.openstack.org/ironic/latest/admin/raid.html#image-requirements -# Default is none. +# Default is an empty string. #kolla_bifrost_deploy_image_rootfs: # Custom cloud-init user-data passed to deploy of the deployment image. @@ -87,12 +90,16 @@ # Ironic Inspector configuration. # List of of inspector processing plugins. -#kolla_bifrost_inspector_processing_hooks: +#kolla_bifrost_inspector_hooks: # Which MAC addresses to add as ports during introspection. One of 'all', # 'active' or 'pxe'. #kolla_bifrost_inspector_port_addition: +# Which ports to keep after introspection. One of 'all', 'present', or 'added'. +# Default follows Bifrost's default of 'present'. +#kolla_bifrost_inspector_keep_ports: + # List of extra kernel parameters for the inspector default PXE configuration. # Default is {{ inspector_extra_kernel_options }}, defined in inspector.yml. # When customising this variable, the default extra kernel parameters should be @@ -124,9 +131,6 @@ # Ironic inspector deployment ramdisk location. #kolla_bifrost_inspector_deploy_ramdisk: -# Ironic inspector legacy deployment kernel location. -#kolla_bifrost_inspector_legacy_deploy_kernel: - # Timeout of hardware inspection on overcloud nodes, in seconds. Default is # {{ inspector_inspection_timeout }}. #kolla_bifrost_inspection_timeout: diff --git a/etc/kayobe/compute.yml b/etc/kayobe/compute.yml index 5572bbe00..5240624f1 100644 --- a/etc/kayobe/compute.yml +++ b/etc/kayobe/compute.yml @@ -2,8 +2,9 @@ ############################################################################### # Compute node configuration. -# User with which to access the computes via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. +# User with which to access the compute nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. #compute_bootstrap_user: ############################################################################### @@ -158,6 +159,21 @@ # - state: enabled #compute_firewalld_rules: +############################################################################### +# Compute node fail2ban configuration. + +# Whether to install and enable fail2ban. +#compute_fail2ban_enabled: + +# List of fail2ban jails for the compute node. +#compute_fail2ban_jail_configuration: + +# List of default fail2ban jails for the compute node. +#compute_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the compute node. +#compute_fail2ban_jail_configuration_extra: + ############################################################################### # Compute node host libvirt configuration. @@ -208,9 +224,15 @@ #compute_libvirt_ceph_repo_install: # Ceph package repository release to install on CentOS and Rocky hosts when -# compute_libvirt_ceph_repo_install is true. Default is 'pacific'. +# compute_libvirt_ceph_repo_install is true. Default is 'squid'. #compute_libvirt_ceph_repo_release: +############################################################################### +# Compute node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#compute_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/controllers.yml b/etc/kayobe/controllers.yml index d974cc6b1..7dd3199f2 100644 --- a/etc/kayobe/controllers.yml +++ b/etc/kayobe/controllers.yml @@ -3,7 +3,8 @@ # Controller node configuration. # User with which to access the controllers via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. +# to setup the Kayobe user account. Default is 'cloud-user' if os_distribution +# is set to centos, otherwise 'os_distribution'. #controller_bootstrap_user: ############################################################################### @@ -105,8 +106,7 @@ # Whether a 'data' LVM volume group should exist on controller hosts. By # default this contains a 'docker-volumes' logical volume for Docker volume -# storage. -# Default is false. +# storage. Default is false. #controller_lvm_group_data_enabled: # Controller LVM volume group for data. See mrlesmithjr.manage_lvm role for @@ -117,18 +117,33 @@ # invalid value to require configuration. #controller_lvm_group_data_disks: +# List of LVM logical volumes for the data volume group when using docker. +#controller_lvm_group_data_docker_lvs: + +# List of LVM logical volumes for the data volume group when using podman. +#controller_lvm_group_data_podman_lvs: + # List of LVM logical volumes for the data volume group. #controller_lvm_group_data_lvs: # Docker volumes LVM backing volume. #controller_lvm_group_data_lv_docker_volumes: +# Podman volumes LVM backing volume. +#controller_lvm_group_data_lv_podman_volumes: + # Size of docker volumes LVM backing volume. #controller_lvm_group_data_lv_docker_volumes_size: # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. #controller_lvm_group_data_lv_docker_volumes_fs: +# Size of podman volumes LVM backing volume. +#controller_lvm_group_data_lv_podman_volumes_size: + +# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking. +#controller_lvm_group_data_lv_podman_volumes_fs: + ############################################################################### # Controller node sysctl configuration. @@ -170,6 +185,27 @@ # - state: enabled #controller_firewalld_rules: +############################################################################### +# Controller node fail2ban configuration. + +# Whether to install and enable fail2ban. +#controller_fail2ban_enabled: + +# List of fail2ban jails for the controller node. +#controller_fail2ban_jail_configuration: + +# List of default fail2ban jails for the controller node. +#controller_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the controller node. +#controller_fail2ban_jail_configuration_extra: + +############################################################################### +# Controller node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#controller_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/globals.yml b/etc/kayobe/globals.yml index ce4570f39..498b5886b 100644 --- a/etc/kayobe/globals.yml +++ b/etc/kayobe/globals.yml @@ -48,8 +48,8 @@ # is "rocky". #os_distribution: -# OS release. Valid options are "9-stream" when os_distribution is "centos", or -# "9" when os_distribution is "rocky", or "noble" when os_distribution is +# OS release. Valid options are "10-stream" when os_distribution is "centos", +# "10" when os_distribution is "rocky", or "noble" when os_distribution is # "ubuntu". #os_release: diff --git a/etc/kayobe/infra-vms.yml b/etc/kayobe/infra-vms.yml index cbfa34eae..50362e59d 100644 --- a/etc/kayobe/infra-vms.yml +++ b/etc/kayobe/infra-vms.yml @@ -31,11 +31,10 @@ # Base image for the infra VM root volume. Default is # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" -# when os_distribution is "ubuntu", or -# https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 -# when os_distribution is "rocky", -# or -# "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2" +# when os_distribution is "ubuntu", +# "https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2" +# when os_distribution is "rocky", or +# "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2" # otherwise. #infra_vm_root_image: @@ -68,7 +67,8 @@ # Infrastructure VM node configuration. # User with which to access the infrastructure vm via SSH during bootstrap, in -# order to setup the Kayobe user account. +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. #infra_vm_bootstrap_user: ############################################################################### @@ -123,6 +123,12 @@ # an invalid value to require configuration. #infra_vm_lvm_group_data_disks: +# List of LVM logical volumes for the data volume group when using docker. +#infra_vm_lvm_group_data_docker_lvs: + +# List of LVM logical volumes for the data volume group when using podman. +#infra_vm_lvm_group_data_podman_lvs: + # List of LVM logical volumes for the data volume group. #infra_vm_lvm_group_data_lvs: @@ -135,6 +141,12 @@ # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. #infra_vm_lvm_group_data_lv_docker_volumes_fs: +# Size of podman volumes LVM backing volume. +#infra_vm_lvm_group_data_lv_podman_volumes_size: + +# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking. +#infra_vm_lvm_group_data_lv_podman_volumes_fs: + ############################################################################### # Infrastructure VM node sysctl configuration. @@ -176,6 +188,27 @@ # - state: enabled #infra_vm_firewalld_rules: +############################################################################### +# Infrastructure VM node fail2ban configuration. + +# Whether to install and enable fail2ban. +#infra_vm_fail2ban_enabled: + +# List of fail2ban jails for the infrastructure VM node. +#infra_vm_fail2ban_jail_configuration: + +# List of default fail2ban jails for the infrastructure VM node. +#infra_vm_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the infrastructure VM node. +#infra_vm_fail2ban_jail_configuration_extra: + +############################################################################### +# Infrastructure VM node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#infra_vm_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inspector.yml b/etc/kayobe/inspector.yml index 123481a5f..926316b71 100644 --- a/etc/kayobe/inspector.yml +++ b/etc/kayobe/inspector.yml @@ -31,32 +31,40 @@ #inspector_ipa_ramdisk_checksum_algorithm: ############################################################################### -# Ironic inspector processing configuration. +# Ironic inspector processing configuration for the inspector implementation +# built-in to Ironic. -# List of of default inspector processing plugins. -#inspector_processing_hooks_default: +# List of hooks to enable for inspection. Default is [$default_hooks, memory, +# boot-mode, cpu-capabilities, pci-devices, parse-lldp]. +#inspector_hooks_default: -# List of of additional inspector processing plugins. -#inspector_processing_hooks_extra: +# List of extra inspection hooks to enable. Default is an empty list. +#inspector_hooks_extra: -# List of of additional inspector processing plugins. -#inspector_processing_hooks: +# List of of additional inspector hooks to enable. Default is +# {{ inspector_hooks_default + inspector_hooks_extra }}. +#inspector_hooks: + +############################################################################### +# Common Ironic Inspector processing configuration. # Which MAC addresses to add as ports during introspection. One of 'all', -# 'active' or 'pxe'. +# 'active' or 'pxe'. Default is 'pxe'. #inspector_add_ports: # Which ports to keep after introspection. One of 'all', 'present', or 'added'. +# Default is 'added'. #inspector_keep_ports: -# Whether to enable discovery of nodes not managed by Ironic. +# Whether to enable discovery of nodes not managed by Ironic. Default is true. #inspector_enable_discovery: -# The Ironic driver with which to register newly discovered nodes. +# The Ironic driver with which to register newly discovered nodes. Default is +# 'ipmi'. #inspector_discovery_enroll_node_driver: ############################################################################### -# Ironic inspector configuration. +# Ironic inspector introspection rules configuration. # Ironic inspector option to enable IPMI rules. Set to 'True' by default. #inspector_rules_ipmi_enabled: @@ -90,9 +98,6 @@ # Redfish CA setting. Set to 'True' by default #inspector_rule_var_redfish_verify_ca: -############################################################################### -# Ironic inspector introspection rules configuration. - # Ironic inspector rule to set IPMI credentials. #inspector_rule_ipmi_credentials: @@ -102,9 +107,6 @@ # Ironic inspector rule to set deployment ramdisk. #inspector_rule_deploy_ramdisk: -# Ironic inspector rule to initialise root device hints. -#inspector_rule_root_hint_init: - # Ironic inspector rule to set serial root device hint. #inspector_rule_root_hint_serial: @@ -143,17 +145,6 @@ # applied. #inspector_dell_switch_lldp_workaround_group: -############################################################################### -# Inspection store configuration. -# The inspection store provides a Swift-like service for storing inspection -# data which may be useful in environments without Swift. - -# Whether the inspection data store is enabled. -#inspector_store_enabled: - -# Port on which the inspection data store should listen. -#inspector_store_port: - ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter b/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter new file mode 100644 index 000000000..54abbf23c --- /dev/null +++ b/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter @@ -0,0 +1,3 @@ +--- +# Use a virtual environment for remote operations. +ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python" diff --git a/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces b/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces new file mode 100644 index 000000000..6880128da --- /dev/null +++ b/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces @@ -0,0 +1,20 @@ +--- +############################################################################### +# Network interface definitions for the ansible-control group. + +# NOTE: The content of this section is very deployment-specific, since it +# depends on the names and types of networks in the deployment. It should +# define the group-specific attributes of networks. The following example shows +# a basic configuration for a network called "example": +# +# example_interface: eth0 +# +# Global network attributes such as subnet CIDRs are typically configured in +# etc/kayobe/networks.yml. +# +# Further information on the available network attributes is provided in the +# network configuration reference in the Kayobe documentation. + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups index a870bc8b5..fee8c48e7 100644 --- a/etc/kayobe/inventory/groups +++ b/etc/kayobe/inventory/groups @@ -1,6 +1,12 @@ # Kayobe groups inventory file. This file should generally not be modified. # If declares the top-level groups and sub-groups. +############################################################################### +# Ansible control host groups. + +[ansible-control] +# Empty group to provide declaration of ansible-control group. + ############################################################################### # Seed groups. @@ -73,6 +79,7 @@ network monitoring storage compute +ansible-control [docker-registry:children] # Hosts in this group will have a Docker Registry deployed. This group should @@ -85,6 +92,8 @@ seed seed seed-hypervisor overcloud +infra-vms +ansible-control ############################################################################### # Baremetal compute node groups. diff --git a/etc/kayobe/ipa.yml b/etc/kayobe/ipa.yml index 00a9b9e97..bfe675d97 100644 --- a/etc/kayobe/ipa.yml +++ b/etc/kayobe/ipa.yml @@ -19,13 +19,13 @@ # Version of IPA builder source repository. Default is {{ openstack_branch }}. #ipa_builder_source_version: -# List of additional build host packages to install. Default is an empty list. +# List of additional build host packages to install. Default is [ 'zstd' ]. #ipa_build_dib_host_packages_extra: # List of default Diskimage Builder (DIB) elements to use when building IPA -# images. Default is ["centos", "dynamic-login", "enable-serial-console", -# "ironic-python-agent-ramdisk"] when os_distribution is "rocky", and -# ["ubuntu", "dynamic-login", "enable-serial-console", +# images. Default is ["rocky-container", "dynamic-login", +# "enable-serial-console", "ironic-python-agent-ramdisk"] when os_distribution +# is "rocky", and ["ubuntu", "dynamic-login", "enable-serial-console", # "ironic-python-agent-ramdisk"] otherwise. #ipa_build_dib_elements_default: @@ -64,7 +64,8 @@ # ipa_build_dib_git_elements_default and ipa_build_dib_git_elements_extra. #ipa_build_dib_git_elements: -# List of DIB packages to install. Default is none. +# List of DIB packages to install. Default is ["python3-yaml"] when +# when os_distribution is "rocky", otherwise []. #ipa_build_dib_packages: # Upper constraints file for installing packages in the virtual environment diff --git a/etc/kayobe/ironic.yml b/etc/kayobe/ironic.yml index 1298fcb67..442c5411f 100644 --- a/etc/kayobe/ironic.yml +++ b/etc/kayobe/ironic.yml @@ -94,6 +94,9 @@ # Name of the Neutron network to use for cleaning. #kolla_ironic_cleaning_network: +# Name of the Neutron network to use for inspection. +#kolla_ironic_inspection_network: + # Name of the Neutron network to use for provisioning. #kolla_ironic_provisioning_network: @@ -106,6 +109,14 @@ # List of kernel parameters to append for baremetal PXE boot. #kolla_ironic_pxe_append_params: +# List of default kernel parameters to append for baremetal boot. +#kolla_ironic_kernel_append_params_default: + +# List of additional kernel parameters to append for baremetal boot. +#kolla_ironic_kernel_append_params_extra: + +# List of kernel parameters to append for baremetal boot. +#kolla_ironic_kernel_append_params: ############################################################################### # Ironic Node Configuration diff --git a/etc/kayobe/kolla.yml b/etc/kayobe/kolla.yml index 929fc6d31..6eb02a5ec 100644 --- a/etc/kayobe/kolla.yml +++ b/etc/kayobe/kolla.yml @@ -86,7 +86,7 @@ # case Quay.io will be used. #kolla_docker_registry: -# Docker namespace to use for Kolla images. Default is 'kolla'. +# Docker namespace to use for Kolla images. Default is 'openstack.kolla'. #kolla_docker_namespace: # Username to use to access a docker registry. Default is not set, in which @@ -343,15 +343,14 @@ #kolla_enable_horizon_octavia: #kolla_enable_horizon_tacker: #kolla_enable_horizon_trove: -#kolla_enable_horizon_venus: #kolla_enable_horizon_watcher: #kolla_enable_horizon_zun: #kolla_enable_influxdb: #kolla_enable_ironic: #kolla_enable_ironic_dnsmasq: -#kolla_enable_ironic_inspector: #kolla_enable_ironic_neutron_agent: #kolla_enable_ironic_prometheus_exporter: +#kolla_enable_ironic_pxe_filter: #kolla_enable_iscsid: #kolla_enable_keepalived: #kolla_enable_keystone: @@ -435,13 +434,12 @@ #kolla_enable_prometheus_server: #kolla_enable_proxysql: #kolla_enable_rabbitmq: -#kolla_enable_redis: #kolla_enable_skyline: #kolla_enable_tacker: #kolla_enable_telegraf: #kolla_enable_trove: #kolla_enable_trove_singletenant: -#kolla_enable_venus: +#kolla_enable_valkey: #kolla_enable_watcher: #kolla_enable_zun: diff --git a/etc/kayobe/monitoring.yml b/etc/kayobe/monitoring.yml index 5468936d3..463b3090b 100644 --- a/etc/kayobe/monitoring.yml +++ b/etc/kayobe/monitoring.yml @@ -3,7 +3,8 @@ # Monitoring node configuration. # User with which to access the monitoring nodes via SSH during bootstrap, in -# order to setup the Kayobe user account. +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. #monitoring_bootstrap_user: ############################################################################### @@ -116,6 +117,27 @@ # - state: enabled #monitoring_firewalld_rules: +############################################################################### +# Monitoring node fail2ban configuration. + +# Whether to install and enable fail2ban. +#monitoring_fail2ban_enabled: + +# List of fail2ban jails for the monitoring node. +#monitoring_fail2ban_jail_configuration: + +# List of default fail2ban jails for the monitoring node. +#monitoring_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the monitoring node. +#monitoring_fail2ban_jail_configuration_extra: + +############################################################################### +# Monitoring node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#monitoring_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/networks.yml b/etc/kayobe/networks.yml index 17c9028c4..2132fd179 100644 --- a/etc/kayobe/networks.yml +++ b/etc/kayobe/networks.yml @@ -106,6 +106,9 @@ ############################################################################### # Network connectivity check configuration. +# Whether to skip the external network connectivity check. Default is false. +#nc_skip_external_net: + # External IP address to check. Default is 8.8.8.8. #nc_external_ip: diff --git a/etc/kayobe/openstack.yml b/etc/kayobe/openstack.yml index 081e4be24..1acfd0756 100644 --- a/etc/kayobe/openstack.yml +++ b/etc/kayobe/openstack.yml @@ -2,10 +2,10 @@ ############################################################################### # OpenStack release configuration. -# Name of the current OpenStack release. Default is "2025.1". +# Name of the current OpenStack release. Default is "master". #openstack_release: -# Name of the current OpenStack branch. Default is "stable/2025.1". +# Name of the current OpenStack branch. Default is "master". #openstack_branch: ############################################################################### diff --git a/etc/kayobe/seed-hypervisor.yml b/etc/kayobe/seed-hypervisor.yml index dd8fbca23..8a063703b 100644 --- a/etc/kayobe/seed-hypervisor.yml +++ b/etc/kayobe/seed-hypervisor.yml @@ -3,7 +3,8 @@ # Seed hypervisor node configuration. # User with which to access the seed hypervisor via SSH during bootstrap, in -# order to setup the Kayobe user account. Default is {{ os_distribution }}. +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. #seed_hypervisor_bootstrap_user: ############################################################################### @@ -135,6 +136,27 @@ # - state: enabled #seed_hypervisor_firewalld_rules: +############################################################################### +# Seed hypervisor node fail2ban configuration. + +# Whether to install and enable fail2ban. +#seed_hypervisor_fail2ban_enabled: + +# List of fail2ban jails for the seed hypervisor node. +#seed_hypervisor_fail2ban_jail_configuration: + +# List of default fail2ban jails for the seed hypervisor node. +#seed_hypervisor_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the seed hypervisor node. +#seed_hypervisor_fail2ban_jail_configuration_extra: + +############################################################################### +# Seed hypervisor node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#seed_hypervisor_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/seed-vm.yml b/etc/kayobe/seed-vm.yml index aa805d847..86be2708f 100644 --- a/etc/kayobe/seed-vm.yml +++ b/etc/kayobe/seed-vm.yml @@ -32,10 +32,9 @@ # Base image for the seed VM root volume. Default is # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" # when os_distribution is "ubuntu", -# https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2 -# when os_distribution is "rocky", -# or -# "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2" +# "https://dl.rockylinux.org/pub/rocky/10/images/x86_64/Rocky-10-GenericCloud-Base.latest.x86_64.qcow2" +# when os_distribution is "rocky", or +# "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-latest.x86_64.qcow2" # otherwise. #seed_vm_root_image: diff --git a/etc/kayobe/seed.yml b/etc/kayobe/seed.yml index 541c07808..46f1cddc6 100644 --- a/etc/kayobe/seed.yml +++ b/etc/kayobe/seed.yml @@ -3,7 +3,8 @@ # Seed node configuration. # User with which to access the seed via SSH during bootstrap, in order to -# setup the Kayobe user account. Default is {{ os_distribution }}. +# setup the Kayobe user account. Default is 'cloud-user' if os_distribution is +# set to centos, otherwise 'os_distribution'. #seed_bootstrap_user: ############################################################################### @@ -59,6 +60,12 @@ # value to require configuration. #seed_lvm_group_data_disks: +# List of LVM logical volumes for the data volume group when using docker. +#seed_lvm_group_data_docker_lvs: + +# List of LVM logical volumes for the data volume group when using podman. +#seed_lvm_group_data_podman_lvs: + # List of LVM logical volumes for the data volume group. #seed_lvm_group_data_lvs: @@ -71,6 +78,12 @@ # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. #seed_lvm_group_data_lv_docker_volumes_fs: +# Size of podman volumes LVM backing volume. +#seed_lvm_group_data_lv_podman_volumes_size: + +# Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking. +#seed_lvm_group_data_lv_podman_volumes_fs: + ############################################################################### # Seed node sysctl configuration. @@ -98,9 +111,10 @@ # Example: # seed_containers: # squid: -# image: "stackhpc/squid:3.5.20-1" +# image: "docker.io/stackhpc/squid" # pre: "{{ kayobe_env_config_path }}/containers/squid/pre.yml" # post: "{{ kayobe_env_config_path }}/containers/squid/post.yml" +# tag: "3.5.20-1" # #seed_containers: @@ -129,6 +143,27 @@ # - state: enabled #seed_firewalld_rules: +############################################################################### +# Seed node fail2ban configuration. + +# Whether to install and enable fail2ban. +#seed_fail2ban_enabled: + +# List of fail2ban jails for the seed node. +#seed_fail2ban_jail_configuration: + +# List of default fail2ban jails for the seed node. +#seed_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the seed node. +#seed_fail2ban_jail_configuration_extra: + +############################################################################### +# Seed node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#seed_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/storage.yml b/etc/kayobe/storage.yml index 2cdac5bed..53376b4ef 100644 --- a/etc/kayobe/storage.yml +++ b/etc/kayobe/storage.yml @@ -2,8 +2,9 @@ ############################################################################### # Storage node configuration. -# User with which to access the storages via SSH during bootstrap, in order -# to setup the Kayobe user account. Default is {{ os_distribution }}. +# User with which to access the storage nodes via SSH during bootstrap, in +# order to setup the Kayobe user account. Default is 'cloud-user' if +# os_distribution is set to centos, otherwise 'os_distribution'. #storage_bootstrap_user: ############################################################################### @@ -146,6 +147,27 @@ # - state: enabled #storage_firewalld_rules: +############################################################################### +# Storage node fail2ban configuration. + +# Whether to install and enable fail2ban. +#storage_fail2ban_enabled: + +# List of fail2ban jails for the storage node. +#storage_fail2ban_jail_configuration: + +# List of default fail2ban jails for the storage node. +#storage_fail2ban_jail_configuration_default: + +# List of extra fail2ban jails for the storage node. +#storage_fail2ban_jail_configuration_extra: + +############################################################################### +# Storage node swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#storage_swap: + ############################################################################### # Dummy variable to allow Ansible to accept this file. workaround_ansible_issue_8743: yes diff --git a/kayobe/ansible.py b/kayobe/ansible.py index 882e2385f..6abd416a3 100644 --- a/kayobe/ansible.py +++ b/kayobe/ansible.py @@ -21,7 +21,7 @@ import sys import tempfile -from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.parsing.vault import EncryptedString from kayobe import exception from kayobe import utils @@ -213,6 +213,8 @@ def build_args(parsed_args, playbooks, cmd += ["--skip-tags", parsed_args.skip_tags] if parsed_args.tags or tags: all_tags = [t for t in [parsed_args.tags, tags] if t] + # Always run kayobe-generate-config (unless the tag is skipped). + all_tags += ["kayobe-generate-config"] cmd += ["--tags", ",".join(all_tags)] cmd += playbooks return cmd @@ -222,6 +224,9 @@ def _get_environment(parsed_args, external_playbook=False): """Return an environment dict for executing an Ansible playbook.""" env = os.environ.copy() vault.update_environment(parsed_args, env) + # TODO(wszusmki): Kayobe still uses broken conditions. Work on fixing these + # and remove when that work is complete. + env.setdefault("ANSIBLE_ALLOW_BROKEN_CONDITIONALS", "true") # If the configuration path has been specified via --config-path, ensure # the environment variable is set, so that it can be referenced by # playbooks. @@ -340,7 +345,7 @@ def run_playbook(parsed_args, playbook, *args, **kwargs): def _sanitise_hostvar(var): """Sanitise a host variable.""" - if isinstance(var, AnsibleVaultEncryptedUnicode): + if isinstance(var, EncryptedString): return "******" # Recursively sanitise dicts and lists. if isinstance(var, dict): diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py index be233af7f..a0213be70 100644 --- a/kayobe/cli/commands.py +++ b/kayobe/cli/commands.py @@ -14,6 +14,7 @@ import glob import json +import logging import os import re import sys @@ -30,6 +31,8 @@ # This is set to an arbitrary large number to simplify the sorting logic DEFAULT_SEQUENCE_NUMBER = sys.maxsize +LOG = logging.getLogger(__name__) + def _build_playbook_list(*playbooks): """Return a list of names of playbook files given their basenames.""" @@ -115,6 +118,31 @@ def generate_kolla_ansible_config(self, parsed_args, install=False, self.run_kayobe_playbooks(parsed_args, playbooks, ignore_limit=True, check=False) + def handle_kolla_tags_limits_deprecation(self, parsed_args): + if (parsed_args.kolla_limit or parsed_args.kolla_tags or + parsed_args.kolla_skip_tags): + self.app.LOG.warning("The use of --kolla-tags, --kolla-limit, and " + "--kolla-skip-tags is deprecated. Please " + "switch to just using --tags, --limit, or " + "--skip-tags, these are now passed into " + "kolla-ansible too. Kolla tags/limit will be " + "removed in the next release.") + if parsed_args.limit and parsed_args.kolla_limit: + self.app.LOG.error("You can no longer use both --limit and " + "--kolla-limit at the same time. Please switch " + "to just using --limit") + sys.exit(1) + if parsed_args.tags and parsed_args.kolla_tags: + self.app.LOG.error("You can no longer use both --tags and " + "--kolla-tags at the same time. Please switch " + "to just using --tags") + sys.exit(1) + if parsed_args.skip_tags and parsed_args.kolla_skip_tags: + self.app.LOG.error("You can no longer use both --skip-tags and " + "--kolla-skip-tags at the same time. Please " + "switch to just using --skip-tags") + sys.exit(1) + class KollaAnsibleMixin(object): """Mixin class for commands running Kolla Ansible.""" @@ -277,6 +305,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Bootstrapping Kayobe Ansible control host") + self.handle_kolla_tags_limits_deprecation(parsed_args) ansible.install_galaxy_roles(parsed_args) ansible.install_galaxy_collections(parsed_args) playbooks = _build_playbook_list("bootstrap") @@ -310,6 +339,106 @@ def take_action(self, parsed_args): self.run_kayobe_playbooks(parsed_args, playbooks) +class ControlHostConfigure(KayobeAnsibleMixin, VaultMixin, Command): + """Configure the Ansible control host OS and services. + + * Allocate IP addresses for all configured networks. + * Add the host to SSH known hosts. + * Configure a user account for use by kayobe for SSH access. + * Configure proxy settings. + * Configure package repos. + * Configure a PyPI mirror. + * Optionally, create a virtualenv for remote target hosts. + * Optionally, wipe unmounted disk partitions (--wipe-disks). + * Configure user accounts, group associations, and authorised SSH keys. + * Configure SELinux. + * Configure the host's network interfaces. + * Configure a firewall. + * Configure tuned profile. + * Set sysctl parameters. + * Configure timezone and ntp. + * Optionally, configure software RAID arrays. + * Optionally, configure encryption. + * Configure LVM volumes. + * Configure swap. + * Optionally, configure a container engine. + """ + + def get_parser(self, prog_name): + parser = super(ControlHostConfigure, self).get_parser(prog_name) + group = parser.add_argument_group("Host Configuration") + group.add_argument("--wipe-disks", action='store_true', + help="wipe partition and LVM data from all disks " + "that are not mounted. Warning: this can " + "result in the loss of data") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Configuring Ansible control host OS") + + # Allocate IP addresses. + playbooks = _build_playbook_list("ip-allocation") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control") + + # Kayobe playbooks. + kwargs = {} + if parsed_args.wipe_disks: + kwargs["extra_vars"] = {"wipe_disks": True} + playbooks = _build_playbook_list("control-host-configure") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", **kwargs) + + +class ControlHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command): + """Run command on the Ansible control host.""" + + def get_parser(self, prog_name): + parser = super(ControlHostCommandRun, self).get_parser(prog_name) + group = parser.add_argument_group("Host Command Run") + group.add_argument("--command", required=True, + help="Command to run (required).") + group.add_argument("--show-output", action='store_true', + help="Show command output") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Run command on Ansible control host") + extra_vars = { + "host_command_to_run": utils.escape_jinja(parsed_args.command), + "show_output": parsed_args.show_output} + playbooks = _build_playbook_list("host-command-run") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", + extra_vars=extra_vars) + + +class ControlHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command): + """Update packages on the Ansible control host.""" + + def get_parser(self, prog_name): + parser = super(ControlHostPackageUpdate, self).get_parser(prog_name) + group = parser.add_argument_group("Host Package Updates") + group.add_argument("--packages", required=True, + help="List of packages to update. Use '*' to " + "update all packages.") + group.add_argument("--security", action='store_true', + help="Only install updates that have been marked " + "security related.") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Updating Ansible control host packages") + extra_vars = { + "host_package_update_packages": parsed_args.packages, + "host_package_update_security": parsed_args.security, + } + playbooks = _build_playbook_list("host-package-update") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", + extra_vars=extra_vars) + + class ControlHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command): """Upgrade the Kayobe control environment. @@ -403,6 +532,8 @@ def add_kolla_ansible_args(self, group): def take_action(self, parsed_args): self.app.LOG.debug("Running Kolla Ansible command") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -583,6 +714,7 @@ class SeedVMProvision(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Provisioning seed VM") + self.handle_kolla_tags_limits_deprecation(parsed_args) self.run_kayobe_playbook(parsed_args, _get_playbook_path("ip-allocation"), limit="seed") @@ -601,6 +733,7 @@ class SeedVMDeprovision(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Deprovisioning seed VM") + self.handle_kolla_tags_limits_deprecation(parsed_args) self.run_kayobe_playbook(parsed_args, _get_playbook_path("seed-vm-deprovision")) @@ -736,6 +869,7 @@ class SeedServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Deploying seed services") + self.handle_kolla_tags_limits_deprecation(parsed_args) playbooks = _build_playbook_list( "seed-manage-containers") extra_vars = {"kayobe_action": "deploy"} @@ -770,6 +904,7 @@ def take_action(self, parsed_args): "you understand this.") sys.exit(1) self.app.LOG.debug("Destroying seed services") + self.handle_kolla_tags_limits_deprecation(parsed_args) self.generate_kolla_ansible_config(parsed_args, service_config=False, bifrost_config=False) extra_args = ["--yes-i-really-really-mean-it"] @@ -811,6 +946,7 @@ class SeedServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Upgrading seed services") + self.handle_kolla_tags_limits_deprecation(parsed_args) playbooks = _build_playbook_list( "seed-manage-containers") extra_vars = {"kayobe_action": "deploy"} @@ -1178,6 +1314,8 @@ class OvercloudFactsGather(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, def take_action(self, parsed_args): self.app.LOG.debug("Gathering overcloud host facts") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # Gather facts for Kayobe. playbooks = _build_playbook_list("overcloud-facts-gather") self.run_kayobe_playbooks(parsed_args, playbooks) @@ -1315,6 +1453,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Performing overcloud database backup") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args, service_config=False) @@ -1343,6 +1483,8 @@ def take_action(self, parsed_args): self.app.LOG.debug("Performing overcloud database recovery") extra_vars = {} + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args, service_config=True) @@ -1380,6 +1522,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Generating overcloud service configuration") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1411,6 +1555,7 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Validating overcloud service configuration") + self.handle_kolla_tags_limits_deprecation(parsed_args) extra_vars = {} if parsed_args.output_dir: extra_vars[ @@ -1475,8 +1620,8 @@ class OvercloudServiceDeploy(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, * Configure and deploy kayobe extra services. * Generate openrc files for the admin user. - This can be used in conjunction with the --tags and --kolla-tags arguments - to deploy specific services. + This can be used in conjunction with the --tags argument to deploy specific + services. """ def get_parser(self, prog_name): @@ -1489,6 +1634,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Deploying overcloud services") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1524,8 +1671,8 @@ class OvercloudServiceDeployContainers(KollaAnsibleMixin, KayobeAnsibleMixin, * Perform a kolla-ansible deployment of the overcloud service containers. * Configure and deploy kayobe extra services. - This can be used in conjunction with the --tags and --kolla-tags arguments - to deploy specific services. + This can be used in conjunction with the --tags argument to deploy specific + services. """ def get_parser(self, prog_name): @@ -1539,6 +1686,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Deploying overcloud services (containers only)") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1565,13 +1714,15 @@ class OvercloudServicePrechecks(KollaAnsibleMixin, KayobeAnsibleMixin, * Perform kolla-ansible prechecks to verify the system state for deployment. - This can be used in conjunction with the --tags and --kolla-tags arguments - to check specific services. + This can be used in conjunction with the --tags argument to check specific + services. """ def take_action(self, parsed_args): self.app.LOG.debug("Running overcloud prechecks") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1579,6 +1730,14 @@ def take_action(self, parsed_args): self.run_kolla_ansible_overcloud(parsed_args, "prechecks") +class OvercloudServicePasswordsView(KayobeAnsibleMixin, VaultMixin, Command): + """View Passwords.""" + + def take_action(self, parsed_args): + self.app.LOG.debug("Displaying Passwords") + vault.view_passwords(parsed_args) + + class OvercloudServiceReconfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, Command): """Reconfigure the overcloud services. @@ -1591,8 +1750,8 @@ class OvercloudServiceReconfigure(KollaAnsibleMixin, KayobeAnsibleMixin, * Configure and deploy kayobe extra services. * Generate openrc files for the admin user. - This can be used in conjunction with the --tags and --kolla-tags arguments - to reconfigure specific services. + This can be used in conjunction with the --tags argument to reconfigure + specific services. """ def get_parser(self, prog_name): @@ -1605,6 +1764,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Reconfiguring overcloud services") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1638,8 +1799,8 @@ class OvercloudServiceStop(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin, * Perform a kolla-ansible stop of the overcloud services. * Stop kayobe extra services. - This can be used in conjunction with the --tags and --kolla-tags arguments - to stop specific services. + This can be used in conjunction with the --tags argument to stop specific + services. """ def get_parser(self, prog_name): @@ -1660,6 +1821,8 @@ def take_action(self, parsed_args): self.app.LOG.debug("Stopping overcloud services") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1687,8 +1850,8 @@ class OvercloudServiceUpgrade(KollaAnsibleMixin, KayobeAnsibleMixin, * Configure and upgrade kayobe extra services. * Regenerate openrc files for the admin user. - This can be used in conjunction with the --tags and --kolla-tags arguments - to upgrade specific services. + This can be used in conjunction with the --tags argument to upgrade + specific services. """ def get_parser(self, prog_name): @@ -1701,6 +1864,8 @@ def get_parser(self, prog_name): def take_action(self, parsed_args): self.app.LOG.debug("Upgrading overcloud services") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args, install=True) @@ -1752,6 +1917,8 @@ def take_action(self, parsed_args): self.app.LOG.debug("Destroying overcloud services") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args) @@ -1774,6 +1941,8 @@ class OvercloudContainerImagePull(KayobeAnsibleMixin, KollaAnsibleMixin, def take_action(self, parsed_args): self.app.LOG.debug("Pulling overcloud container images") + self.handle_kolla_tags_limits_deprecation(parsed_args) + # First prepare configuration. self.generate_kolla_ansible_config(parsed_args, service_config=False) diff --git a/kayobe/cmd/kayobe.py b/kayobe/cmd/kayobe.py index 5e3fadf15..25f5f44ec 100644 --- a/kayobe/cmd/kayobe.py +++ b/kayobe/cmd/kayobe.py @@ -18,6 +18,31 @@ from kayobe import version +import logging + + +class CustomFormatter(logging.Formatter): + + grey = "\x1b[38;20m" + yellow = "\x1b[33;20m" + red = "\x1b[31;20m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + format = "[%(levelname)s]: %(message)s" + + FORMATS = { + logging.DEBUG: grey + format + reset, + logging.INFO: grey + format + reset, + logging.WARNING: yellow + format + reset, + logging.ERROR: red + format + reset, + logging.CRITICAL: bold_red + format + reset + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + class KayobeApp(App): @@ -33,6 +58,14 @@ def __init__(self): def initialize_app(self, argv): self.LOG.debug('initialize_app') + def configure_logging(self): + super().configure_logging() + root_logger = logging.getLogger('') + # Override log formatter + for handler in root_logger.handlers: + if isinstance(handler, logging.StreamHandler): + handler.setFormatter(CustomFormatter()) + def prepare_to_run_command(self, cmd): self.LOG.debug('prepare_to_run_command %s', cmd.__class__.__name__) diff --git a/kayobe/kolla_ansible.py b/kayobe/kolla_ansible.py index c06faafc3..7304fe82d 100644 --- a/kayobe/kolla_ansible.py +++ b/kayobe/kolla_ansible.py @@ -54,16 +54,20 @@ def add_args(parser): "Kolla Ansible" % (CONFIG_PATH_ENV, DEFAULT_CONFIG_PATH), action='append') + # TODO(mattcrees): Remove kl, kt, and kolla-skip-tags in 2026.2. parser.add_argument("-kl", "--kolla-limit", metavar="SUBSET", - help="further limit selected hosts to an additional " + help="[DEPRECATED: Please use -l or --limit instead] " + "further limit selected hosts to an additional " "pattern") parser.add_argument("-kp", "--kolla-playbook", metavar="PLAYBOOK", help="path to Ansible playbook file") parser.add_argument("--kolla-skip-tags", metavar="TAGS", - help="only run plays and tasks whose tags do not " - "match these values in Kolla Ansible") + help="[DEPRECATED: Please use -skip-tags instead] " + "only run plays and tasks whose tags " + "do not match these values in Kolla Ansible") parser.add_argument("-kt", "--kolla-tags", metavar="TAGS", - help="only run plays and tasks tagged with these " + help="[DEPRECATED: Please use -t or --tags instead] " + "only run plays and tasks tagged with these " "values in Kolla Ansible") parser.add_argument("--kolla-venv", metavar="VENV", default=default_venv, help="path to virtualenv where Kolla Ansible is " @@ -162,16 +166,24 @@ def build_args(parsed_args, command, inventory_filename, extra_vars=None, # Quote and escape variables originating within the python CLI. extra_var_value = utils.quote_and_escape(extra_var_value) cmd += ["-e", "%s=%s" % (extra_var_name, extra_var_value)] - if parsed_args.kolla_limit or limit: - limit_arg = utils.intersect_limits(parsed_args.kolla_limit, limit) + if parsed_args.limit or parsed_args.kolla_limit or limit: + limit_arg = utils.intersect_limits(parsed_args.limit, limit) + limit_arg = utils.intersect_limits(parsed_args.kolla_limit, limit_arg) cmd += ["--limit", utils.quote_and_escape(limit_arg)] - if parsed_args.kolla_skip_tags: - cmd += ["--skip-tags", parsed_args.kolla_skip_tags] - if parsed_args.kolla_tags or tags: - all_tags = [t for t in [parsed_args.kolla_tags, tags] if t] + if parsed_args.skip_tags or parsed_args.kolla_skip_tags: + all_tags = [t for t in [parsed_args.skip_tags, + parsed_args.kolla_skip_tags] if t] + cmd += ["--skip-tags", ",".join(all_tags)] + if parsed_args.tags or parsed_args.kolla_tags or tags: + all_tags = [t for t in [parsed_args.tags, parsed_args.kolla_tags, + tags] if t] cmd += ["--tags", ",".join(all_tags)] if parsed_args.list_tasks: cmd += ["--list-tasks"] + if parsed_args.check: + cmd += ["--check"] + if parsed_args.diff: + cmd += ["--diff"] if extra_args: cmd += extra_args return cmd @@ -191,14 +203,6 @@ def _get_environment(parsed_args): ansible_cfg_path = os.path.join(parsed_args.config_path, "ansible.cfg") if utils.is_readable_file(ansible_cfg_path)["result"]: env.setdefault("ANSIBLE_CONFIG", ansible_cfg_path) - # kolla-ansible allows passing additional arguments to ansible-playbook via - # EXTRA_OPTS. - if parsed_args.check or parsed_args.diff: - extra_opts = env.setdefault("EXTRA_OPTS", "") - if parsed_args.check and "--check" not in extra_opts: - env["EXTRA_OPTS"] += " --check" - if parsed_args.diff and "--diff" not in extra_opts: - env["EXTRA_OPTS"] += " --diff" return env diff --git a/kayobe/plugins/action/kolla_ansible_host_vars.py b/kayobe/plugins/action/kolla_ansible_host_vars.py index b4ee02db8..085e60004 100644 --- a/kayobe/plugins/action/kolla_ansible_host_vars.py +++ b/kayobe/plugins/action/kolla_ansible_host_vars.py @@ -13,6 +13,7 @@ # under the License. from ansible.plugins.action import ActionBase +from ansible.template import trust_as_template class ConfigError(Exception): @@ -28,6 +29,11 @@ class ActionModule(ActionBase): TRANSFERS_FILES = False + def trusted_template(self, input): + # Mark all input as trusted. + trusted_input = trust_as_template(input) + return self._templar.template(trusted_input) + def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() @@ -97,11 +103,11 @@ def _run(self, interfaces, external_networks): def _get_interface_fact(self, net_name, required, description): # Check whether the network is mapped to this host. condition = "{{ '%s' in network_interfaces }}" % net_name - condition = self._templar.template(condition) + condition = self.trusted_template(condition) if condition: # Get the network interface for this network. iface = ("{{ '%s' | net_interface }}" % net_name) - iface = self._templar.template(iface) + iface = self.trusted_template(iface) if required and not iface: msg = ("Required network '%s' (%s) does not have an interface " "configured for this host" % (net_name, description)) @@ -114,20 +120,20 @@ def _get_interface_fact(self, net_name, required, description): def _get_external_interface(self, net_name, required): condition = "{{ '%s' in network_interfaces }}" % net_name - condition = self._templar.template(condition) + condition = self.trusted_template(condition) if condition: - iface = self._templar.template("{{ '%s' | net_interface }}" % - net_name) + iface = self.trusted_template("{{ '%s' | net_interface }}" % + net_name) if iface: # When these networks are VLANs, we need to use the # underlying tagged bridge interface rather than the # untagged interface. We therefore strip the . suffix # of the interface name. We use a union here as a single # tagged interface may be shared between these networks. - vlan = self._templar.template("{{ '%s' | net_vlan }}" % - net_name) - parent = self._templar.template("{{ '%s' | net_parent }}" % - net_name) + vlan = self.trusted_template("{{ '%s' | net_vlan }}" % + net_name) + parent = self.trusted_template("{{ '%s' | net_parent }}" % + net_name) if vlan and parent: iface = parent elif vlan and iface.endswith(".%s" % vlan): @@ -146,15 +152,15 @@ def _get_external_interface_facts(self, external_interfaces): neutron_external_interfaces = [] neutron_physical_networks = [] missing_physical_networks = [] - bridge_suffix = self._templar.template( + bridge_suffix = self.trusted_template( "{{ network_bridge_suffix_ovs }}") - patch_prefix = self._templar.template("{{ network_patch_prefix }}") - patch_suffix = self._templar.template("{{ network_patch_suffix_ovs }}") + patch_prefix = self.trusted_template("{{ network_patch_prefix }}") + patch_suffix = self.trusted_template("{{ network_patch_suffix_ovs }}") for interface, iface_networks in external_interfaces.items(): is_bridge = ("{{ '%s' in (network_interfaces |" "net_select_bridges |" "map('net_interface')) }}" % interface) - is_bridge = self._templar.template(is_bridge) + is_bridge = self.trusted_template(is_bridge) neutron_bridge_names.append(interface + bridge_suffix) # For a bridge, use a veth pair connected to the bridge. Otherwise # use the interface directly. @@ -171,7 +177,7 @@ def _get_external_interface_facts(self, external_interfaces): # attribute set, and if so, whether they are consistent. iface_physical_networks = [] for iface_network in iface_networks: - physical_network = self._templar.template( + physical_network = self.trusted_template( "{{ '%s' | net_physical_network }}" % iface_network) if (physical_network and physical_network not in iface_physical_networks): diff --git a/kayobe/plugins/action/merge_configs.py b/kayobe/plugins/action/merge_configs.py index 3aff18d53..605414bfc 100644 --- a/kayobe/plugins/action/merge_configs.py +++ b/kayobe/plugins/action/merge_configs.py @@ -24,10 +24,13 @@ from ansible import constants from ansible.plugins import action +from ansible.template import trust_as_template + from io import StringIO from oslo_config import iniparser + _ORPHAN_SECTION = 'TEMPORARY_ORPHAN_VARIABLE_SECTION' DOCUMENTATION = ''' @@ -154,7 +157,7 @@ def read_config(self, source, config): # Only use config if present if os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/kayobe/plugins/action/merge_yaml.py b/kayobe/plugins/action/merge_yaml.py index 73f954832..35d250371 100644 --- a/kayobe/plugins/action/merge_yaml.py +++ b/kayobe/plugins/action/merge_yaml.py @@ -27,6 +27,8 @@ from ansible import errors as ansible_errors from ansible.plugins import action +from ansible.template import trust_as_template + DOCUMENTATION = ''' --- module: merge_yaml @@ -95,7 +97,7 @@ def read_config(self, source): # Only use config if present if source and os.access(source, os.R_OK): with open(source, 'r') as f: - template_data = f.read() + template_data = trust_as_template(f.read()) # set search path to mimic 'template' module behavior searchpath = [ diff --git a/kayobe/plugins/action/template_content.py b/kayobe/plugins/action/template_content.py new file mode 100644 index 000000000..cd330bdca --- /dev/null +++ b/kayobe/plugins/action/template_content.py @@ -0,0 +1,47 @@ +# Copyright (c) 2025 StackHPC Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import tempfile + +from ansible.module_utils.common.text.converters import to_bytes +from ansible.plugins.action.template import ActionModule as TemplateBase + +from ansible import errors as ansible_errors + + +class ActionModule(TemplateBase): + """Decorates template action to support using content parameter""" + + def run(self, *args, **kwargs): + module_args = self._task.args + if "src" in module_args and "content" in module_args: + raise ansible_errors.AnsibleActionFail( + "Invalid argument: content and src are mutually exclusive." + ) + if "content" not in module_args and "src" not in module_args: + raise ansible_errors.AnsibleActionFail( + "Invalid argument: You must speicfy either content or src" + ) + + if "src" in module_args: + return super().run(*args, **kwargs) + + with tempfile.NamedTemporaryFile() as fp: + content = module_args.pop("content", "") + fp.write(to_bytes(content)) + fp.flush() + tempfile_path = os.path.join(tempfile.gettempdir(), str(fp.name)) + module_args["src"] = tempfile_path + return super().run(*args, **kwargs) diff --git a/kayobe/plugins/filter/networks.py b/kayobe/plugins/filter/networks.py index f949d7982..5a48dce70 100644 --- a/kayobe/plugins/filter/networks.py +++ b/kayobe/plugins/filter/networks.py @@ -246,6 +246,12 @@ def net_inspection_allocation_pool_end(context, name, inventory_hostname=None): net_inspection_gateway = _make_attr_filter('inspection_gateway') +@jinja2.pass_context +def net_inspection_dns_servers(context, name, inventory_hostname=None): + return net_attr(context, name, 'inspection_dns_servers', + inventory_hostname) + + @jinja2.pass_context def net_neutron_allocation_pool_start(context, name, inventory_hostname=None): return net_attr(context, name, 'neutron_allocation_pool_start', @@ -283,9 +289,9 @@ def net_macaddress(context, name, inventory_hostname=None): def net_bridge_stp(context, name, inventory_hostname=None): """Return the Spanning Tree Protocol (STP) state for a bridge. - On RL9 if STP is not defined, default it to 'false' to preserve - compatibility with network scripts. STP is 'true' in NetworkManager - by default, so we set it to 'false' here. + On RL10 if STP is not defined, default it to 'false' to preserve + compatibility with network scripts. STP is 'true' in NetworkManager by + default, so we set it to 'false' here. :param context: Jinja2 Context object. :param name: The name of the network. @@ -791,6 +797,7 @@ def get_filters(): 'net_inspection_allocation_pool_end': ( net_inspection_allocation_pool_end), 'net_inspection_gateway': net_inspection_gateway, + 'net_inspection_dns_servers': net_inspection_dns_servers, 'net_neutron_allocation_pool_start': net_neutron_allocation_pool_start, 'net_neutron_allocation_pool_end': net_neutron_allocation_pool_end, 'net_neutron_gateway': net_neutron_gateway, diff --git a/kayobe/tests/unit/cli/test_commands.py b/kayobe/tests/unit/cli/test_commands.py index f50a6e1e7..a72d76ecd 100644 --- a/kayobe/tests/unit/cli/test_commands.py +++ b/kayobe/tests/unit/cli/test_commands.py @@ -117,6 +117,113 @@ def test_control_host_bootstrap_with_passwords( ] self.assertListEqual(expected_calls, mock_kolla_run.call_args_list) + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_configure(self, mock_run): + command = commands.ControlHostConfigure(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [utils.get_data_files_path("ansible", "ip-allocation.yml")], + limit="ansible-control", + ), + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "control-host-configure.yml"), + ], + limit="ansible-control", + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_configure_wipe_disks(self, mock_run): + command = commands.ControlHostConfigure(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--wipe-disks"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [utils.get_data_files_path("ansible", "ip-allocation.yml")], + limit="ansible-control", + ), + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "control-host-configure.yml"), + ], + limit="ansible-control", + extra_vars={"wipe_disks": True}, + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_command_run(self, mock_run): + command = commands.ControlHostCommandRun(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--command", "ls -a", + "--show-output"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path("ansible", + "host-command-run.yml"), + ], + limit="ansible-control", + extra_vars={ + "host_command_to_run": utils.escape_jinja("ls -a"), + "show_output": True} + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_package_update_all(self, mock_run): + command = commands.ControlHostPackageUpdate(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--packages", "*"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "host-package-update.yml"), + ], + limit="ansible-control", + extra_vars={ + "host_package_update_packages": "*", + "host_package_update_security": False, + }, + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + @mock.patch.object(ansible, "install_galaxy_roles", autospec=True) @mock.patch.object(ansible, "install_galaxy_collections", autospec=True) @mock.patch.object(ansible, "prune_galaxy_roles", autospec=True) diff --git a/kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py b/kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py index 3ae00e1a9..480a5cf2a 100644 --- a/kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py +++ b/kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py @@ -50,7 +50,8 @@ class FakeTemplar(object): def __init__(self, variables): self.variables = variables - self.env = jinja2.Environment() + # Bandit complains about Jinja2 autoescaping without nosec. + self.env = jinja2.Environment() # nosec self.env.filters['net_interface'] = _net_interface self.env.filters['net_parent'] = _net_parent self.env.filters['net_vlan'] = _net_vlan diff --git a/kayobe/tests/unit/test_ansible.py b/kayobe/tests/unit/test_ansible.py index 458373f9a..10352dd25 100644 --- a/kayobe/tests/unit/test_ansible.py +++ b/kayobe/tests/unit/test_ansible.py @@ -28,6 +28,9 @@ from kayobe import utils from kayobe import vault +from ansible.parsing.vault import VaultSecret +from ansible.parsing.vault import VaultSecretsContext + @mock.patch.dict(os.environ, clear=True) class TestCase(unittest.TestCase): @@ -56,6 +59,7 @@ def test_run_playbooks(self, mock_validate, mock_vars, mock_run): ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": ":".join([ "/etc/kayobe/ansible/roles", @@ -106,6 +110,7 @@ def test_run_playbooks_internal(self, mock_validate, mock_vars, mock_run): ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": ":".join([ utils.get_data_files_path("ansible", "roles"), @@ -217,12 +222,13 @@ def test_run_playbooks_all_the_args(self, mock_validate, mock_vars, "--check", "--diff", "--limit", "group1:host", - "--tags", "tag1,tag2", + "--tags", "tag1,tag2,kayobe-generate-config", "playbook1.yml", "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/path/to/config", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": ":".join([ @@ -294,11 +300,12 @@ def test_run_playbooks_all_the_long_args(self, mock_ask, mock_validate, "--diff", "--limit", "group1:host1", "--skip-tags", "tag3,tag4", - "--tags", "tag1,tag2", + "--tags", "tag1,tag2,kayobe-generate-config", "playbook1.yml", "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/path/to/config", "KAYOBE_ENVIRONMENT": "test-env", "KAYOBE_VAULT_PASSWORD": "test-pass", @@ -342,6 +349,7 @@ def test_run_playbooks_vault_password_file(self, mock_update, "playbook1.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, @@ -379,6 +387,7 @@ def test_run_playbooks_vault_password_helper(self, mock_validate, "playbook1.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_VAULT_PASSWORD": "test-pass", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -441,11 +450,12 @@ def test_run_playbooks_func_args(self, mock_validate, mock_vars, mock_run): "--check", "--diff", "--limit", "group1:host1:&group2:host2", - "--tags", "tag1,tag2,tag3,tag4", + "--tags", "tag1,tag2,tag3,tag4,kayobe-generate-config", "playbook1.yml", "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, @@ -483,6 +493,7 @@ def test_run_playbooks_ignore_limit(self, mock_validate, mock_vars, "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, @@ -520,6 +531,7 @@ def test_run_playbooks_list_tasks_arg(self, mock_validate, mock_vars, "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, @@ -553,6 +565,7 @@ def test_run_playbooks_ansible_cfg(self, mock_validate, mock_vars, expected_env = { "ANSIBLE_CONFIG": "/etc/kayobe/ansible.cfg", "KAYOBE_CONFIG_PATH": "/etc/kayobe", + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, "ANSIBLE_ACTION_PLUGINS": mock.ANY, @@ -585,6 +598,7 @@ def test_run_playbooks_ansible_cfg_env(self, mock_validate, mock_vars, "playbook1.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "ANSIBLE_CONFIG": "/path/to/ansible.cfg", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -656,6 +670,10 @@ def test_config_dump(self, mock_mkdtemp, mock_run, mock_listdir, mock_read, @mock.patch.object(tempfile, 'mkdtemp') def test_config_dump_vaulted(self, mock_mkdtemp, mock_run, mock_listdir, mock_read, mock_rmtree): + + secret = VaultSecret(b'test-pass') + VaultSecretsContext.initialize( + VaultSecretsContext(secrets=[('default', secret)])) parser = argparse.ArgumentParser() parsed_args = parser.parse_args([]) dump_dir = "/path/to/dump" @@ -663,31 +681,31 @@ def test_config_dump_vaulted(self, mock_mkdtemp, mock_run, mock_listdir, mock_listdir.return_value = ["host1.yml", "host2.yml"] config = """--- key1: !vault | - $ANSIBLE_VAULT;1.1;AES256 - 633230623736383232323862393364323037343430393530316636363961626361393133646437 - 643438663261356433656365646138666133383032376532310a63323432306431303437623637 - 346236316161343635636230613838316566383933313338636237616338326439616536316639 - 6334343462333062363334300a3930313762313463613537626531313230303731343365643766 - 666436333037 + $ANSIBLE_VAULT;1.1;AES256 + 65393836643335346138373665636564643436353231623838636261373565633731303835653139 + 6335343464383063373734636161323236636431316532650a333366333366396262353635313531 + 64666236636262326662323931313065376533333961356239363637333363623464666636616233 + 6130373664393533350a663266613165646565346433313536313461653236303563643262323936 + 6262 key2: value2 key3: - !vault | $ANSIBLE_VAULT;1.1;AES256 - 633230623736383232323862393364323037343430393530316636363961626361393133646437 - 643438663261356433656365646138666133383032376532310a63323432306431303437623637 - 346236316161343635636230613838316566383933313338636237616338326439616536316639 - 6334343462333062363334300a3930313762313463613537626531313230303731343365643766 - 666436333037 + 65393836643335346138373665636564643436353231623838636261373565633731303835653139 + 6335343464383063373734636161323236636431316532650a333366333366396262353635313531 + 64666236636262326662323931313065376533333961356239363637333363623464666636616233 + 6130373664393533350a663266613165646565346433313536313461653236303563643262323936 + 6262 """ config_nested = """--- key1: key2: !vault | $ANSIBLE_VAULT;1.1;AES256 - 633230623736383232323862393364323037343430393530316636363961626361393133646437 - 643438663261356433656365646138666133383032376532310a63323432306431303437623637 - 346236316161343635636230613838316566383933313338636237616338326439616536316639 - 6334343462333062363334300a3930313762313463613537626531313230303731343365643766 - 666436333037 + 65393836643335346138373665636564643436353231623838636261373565633731303835653139 + 6335343464383063373734636161323236636431316532650a333366333366396262353635313531 + 64666236636262326662323931313065376533333961356239363637333363623464666636616233 + 6130373664393533350a663266613165646565346433313536313461653236303563643262323936 + 6262 """ mock_read.side_effect = [config, config_nested] result = ansible.config_dump(parsed_args) @@ -951,6 +969,7 @@ def test_multiple_inventory_args(self, mock_validate, mock_vars, mock_run): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "ANSIBLE_ROLES_PATH": mock.ANY, "ANSIBLE_COLLECTIONS_PATH": mock.ANY, @@ -994,6 +1013,7 @@ def exists_replacement(path): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -1036,6 +1056,7 @@ def exists_replacement(path): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -1079,6 +1100,7 @@ def exists_replacement(path): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -1127,6 +1149,7 @@ def exists_replacement(path): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": mock.ANY, @@ -1207,6 +1230,7 @@ def exists_replacement(path): "playbook2.yml", ] expected_env = { + "ANSIBLE_ALLOW_BROKEN_CONDITIONALS": "true", "KAYOBE_CONFIG_PATH": "/etc/kayobe", "KAYOBE_ENVIRONMENT": "test-env", "ANSIBLE_ROLES_PATH": mock.ANY, diff --git a/kayobe/tests/unit/test_kolla_ansible.py b/kayobe/tests/unit/test_kolla_ansible.py index 23b2eccf8..d74e753b8 100644 --- a/kayobe/tests/unit/test_kolla_ansible.py +++ b/kayobe/tests/unit/test_kolla_ansible.py @@ -61,8 +61,8 @@ def test_run_all_the_args(self, mock_validate, mock_run): "--kolla-config-path", "/path/to/config", "-ke", "ev_name1=ev_value1", "-ki", "/path/to/inventory", - "-kl", "host1:host2", - "-kt", "tag1,tag2", + "-l", "host1:host2", + "-t", "tag1,tag2", "-kp", "/path/to/playbook", ] parsed_args = parser.parse_args(args) @@ -77,9 +77,10 @@ def test_run_all_the_args(self, mock_validate, mock_run): "-e", "ev_name1=ev_value1", "--limit", "'host1:host2'", "--tags", "tag1,tag2", + "--check", "--diff" ] expected_cmd = " ".join(expected_cmd) - expected_env = {"EXTRA_OPTS": " --check --diff"} + expected_env = {} mock_run.assert_called_once_with(expected_cmd, shell=True, quiet=False, env=expected_env) @@ -99,9 +100,9 @@ def test_run_all_the_long_args(self, mock_ask, mock_validate, mock_run): "--kolla-config-path", "/path/to/config", "--kolla-extra-vars", "ev_name1=ev_value1", "--kolla-inventory", "/path/to/inventory", - "--kolla-limit", "host1:host2", - "--kolla-skip-tags", "tag3,tag4", - "--kolla-tags", "tag1,tag2", + "--limit", "host1:host2", + "--skip-tags", "tag3,tag4", + "--tags", "tag1,tag2", "--kolla-playbook", "/path/to/playbook", ] parsed_args = parser.parse_args(args) @@ -119,10 +120,10 @@ def test_run_all_the_long_args(self, mock_ask, mock_validate, mock_run): "--limit", "'host1:host2'", "--skip-tags", "tag3,tag4", "--tags", "tag1,tag2", + "--check", "--diff" ] expected_cmd = " ".join(expected_cmd) - expected_env = {"EXTRA_OPTS": " --check --diff", - "KAYOBE_VAULT_PASSWORD": "test-pass"} + expected_env = {"KAYOBE_VAULT_PASSWORD": "test-pass"} expected_calls = [ mock.call(["which", "kayobe-vault-password-helper"], check_output=True, universal_newlines=True), @@ -193,7 +194,7 @@ def test_run_func_args(self, mock_validate, mock_run): vault.add_args(parser) args = [ "--kolla-extra-vars", "ev_name1=ev_value1", - "--kolla-tags", "tag1,tag2", + "--tags", "tag1,tag2", ] parsed_args = parser.parse_args(args) kwargs = { diff --git a/kayobe/tests/unit/test_utils.py b/kayobe/tests/unit/test_utils.py index 91d076937..736678e9f 100644 --- a/kayobe/tests/unit/test_utils.py +++ b/kayobe/tests/unit/test_utils.py @@ -18,7 +18,7 @@ import unittest from unittest import mock -from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.parsing.vault import EncryptedString import yaml from kayobe import exception @@ -167,9 +167,9 @@ def test_read_config_dump_yaml_file_vaulted(self, mock_read): mock_read.return_value = config result = utils.read_config_dump_yaml_file("/path/to/file") # Can't read the value without an encryption key, so just check type. - self.assertIsInstance(result["key1"], AnsibleVaultEncryptedUnicode) + self.assertIsInstance(result["key1"], EncryptedString) self.assertEqual(result["key2"], "value2") - self.assertIsInstance(result["key3"][0], AnsibleVaultEncryptedUnicode) + self.assertIsInstance(result["key3"][0], EncryptedString) mock_read.assert_called_once_with("/path/to/file") @mock.patch.object(utils, "read_file") diff --git a/kayobe/utils.py b/kayobe/utils.py index b70cb1029..14cbf6637 100644 --- a/kayobe/utils.py +++ b/kayobe/utils.py @@ -24,6 +24,8 @@ import shutil import subprocess import sys +from urllib.parse import unquote +from urllib.parse import urlparse from ansible.parsing.yaml.loader import AnsibleLoader import yaml @@ -67,7 +69,7 @@ def _get_direct_url_if_editable(dist): url = direct_url_content['url'] prefix = 'file://' if url.startswith(prefix): - return url[len(prefix):] + return unquote(urlparse(url).path) return None @@ -187,7 +189,8 @@ def read_config_dump_yaml_file(path): sys.exit(1) try: # AnsibleLoader supports loading vault encrypted variables. - return AnsibleLoader(content).get_single_data() + data = AnsibleLoader(content).get_single_data() + return data except yaml.YAMLError as e: print("Failed to decode config dump YAML file %s: %s" % (path, repr(e))) diff --git a/kayobe/vault.py b/kayobe/vault.py index 82bc0f208..3a4d32b9b 100644 --- a/kayobe/vault.py +++ b/kayobe/vault.py @@ -172,3 +172,22 @@ def update_environment(parsed_args, env): if vault_password is not None: env[VAULT_PASSWORD_ENV] = vault_password + + +def view_passwords(parsed_args): + """View passwords stored in the Ansible Vault. + + :param parsed_args: Parsed command line arguments. + """ + env_path = utils.get_kayobe_environment_path( + parsed_args.config_path, parsed_args.environment) + path = env_path if env_path else parsed_args.config_path + passwords_path = os.path.join(path, 'kolla', 'passwords.yml') + cmd = ["ansible-vault", "view", passwords_path] + cmd += ["--vault-password-file", _get_vault_password_helper()] + try: + utils.run_command(cmd) + except subprocess.CalledProcessError as e: + LOG.error("Failed to view passwords via ansible-vault " + "returncode %d", e.returncode) + sys.exit(e.returncode) diff --git a/playbooks/kayobe-base/post.yml b/playbooks/kayobe-base/post.yml index d2e5eb345..89314aee2 100644 --- a/playbooks/kayobe-base/post.yml +++ b/playbooks/kayobe-base/post.yml @@ -11,11 +11,15 @@ --- - hosts: localhost tasks: - - name: Testing become fails - command: "true" - become: true - register: result - failed_when: '"CONTROL_HOST_BECOME_VIOLATION" not in result.module_stderr' + - block: + - name: Testing become fails + command: "true" + become: true + register: result + rescue: + - name: Check for become failure + fail: + when: '"CONTROL_HOST_BECOME_VIOLATION" not in result.msg' dest: /tmp/test-control-host-become.yml - name: Check that that kayobe become validator was correctly configured diff --git a/playbooks/kayobe-base/pre.yml b/playbooks/kayobe-base/pre.yml index 34a317a0e..3cf2b598f 100644 --- a/playbooks/kayobe-base/pre.yml +++ b/playbooks/kayobe-base/pre.yml @@ -15,7 +15,7 @@ callbacks_enabled = ansible.posix.profile_tasks # Improve readability of ansible output. - stdout_callback = yaml + callback_result_format = yaml [ssh_connection] # NOTE(wszusmki): Disable pipelining due to: @@ -36,7 +36,7 @@ callbacks_enabled = ansible.posix.profile_tasks # Improve readability of ansible output. - stdout_callback = yaml + callback_result_format = yaml [ssh_connection] # NOTE(wszusmki): Disable pipelining due to: @@ -139,7 +139,7 @@ - name: Ensure previous kayobe repository is cloned command: >- - git clone {{ kayobe_src_dir }} {{ previous_kayobe_src_dir }} -b stable/{{ previous_release | lower }} + git clone {{ kayobe_src_dir }} {{ previous_kayobe_src_dir }} -b {{ branch_prefix }}/{{ previous_release | lower }} - name: Ensure previous kayobe-config directory exists file: @@ -148,7 +148,7 @@ - name: Ensure kayobe-config repository is cloned command: >- - git clone {{ kayobe_config_src_dir }} {{ previous_kayobe_config_src_dir }} -b stable/{{ previous_release | lower }} + git clone {{ kayobe_config_src_dir }} {{ previous_kayobe_config_src_dir }} -b {{ branch_prefix }}/{{ previous_release | lower }} - name: Ensure previous kolla-ansible directory exists file: @@ -157,7 +157,7 @@ - name: Ensure previous kolla-ansible repository is cloned command: >- - git clone {{ kolla_ansible_src_dir }} {{ previous_kolla_ansible_src_dir }} -b stable/{{ previous_release | lower }} + git clone {{ kolla_ansible_src_dir }} {{ previous_kolla_ansible_src_dir }} -b {{ branch_prefix }}/{{ previous_release | lower }} - name: Ensure previous kayobe is executed in verbose mode lineinfile: @@ -180,14 +180,14 @@ name: kayobe-galaxy-requirements vars: kayobe_galaxy_requirements_src_dir: "{{ previous_kayobe_src_dir }}" - kayobe_galaxy_requirements_branch: "stable/{{ previous_release | lower }}" + kayobe_galaxy_requirements_branch: "{{ branch_prefix }}/{{ previous_release | lower }}" - name: Update kolla-ansible requirements.yml include_role: name: kayobe-galaxy-requirements vars: kayobe_galaxy_requirements_src_dir: "{{ previous_kolla_ansible_src_dir }}" - kayobe_galaxy_requirements_branch: "stable/{{ previous_release | lower }}" + kayobe_galaxy_requirements_branch: "{{ branch_prefix }}/{{ previous_release | lower }}" kayobe_galaxy_requirements_dest_path: "/tmp/previous-kolla-ansible-requirements.yml" when: is_upgrade diff --git a/playbooks/kayobe-infra-vm-base/infra-vms-group-vars.j2 b/playbooks/kayobe-infra-vm-base/infra-vms-group-vars.j2 index 109707e11..1d30f2122 100644 --- a/playbooks/kayobe-infra-vm-base/infra-vms-group-vars.j2 +++ b/playbooks/kayobe-infra-vm-base/infra-vms-group-vars.j2 @@ -1,5 +1,10 @@ --- +{% if infra_vm_use_cirros | default(true) %} aio_interface: eth0 +{% else %} +# Required for official cloud images (CentOS Stream 10, Rocky Linux 10, Ubuntu) +aio_interface: ens2 +{% endif %} # Route via the seed-hypervisor to the outside world. aio_gateway: 192.168.33.4 diff --git a/playbooks/kayobe-infra-vm-base/overrides.yml.j2 b/playbooks/kayobe-infra-vm-base/overrides.yml.j2 index 0c3865b48..c6d5cc075 100644 --- a/playbooks/kayobe-infra-vm-base/overrides.yml.j2 +++ b/playbooks/kayobe-infra-vm-base/overrides.yml.j2 @@ -31,7 +31,8 @@ infra_vm_vcpus: 1 # Reduce the memory footprint of the infra VM. infra_vm_memory_mb: "{{ 1 * 1024 }}" -# Use cirros rather than CentOS for the VM. +{% if infra_vm_use_cirros | default(true) %} +# Use cirros rather than distribution cloud image for the VM. infra_vm_bootstrap_user: cirros infra_vm_root_image: /opt/cache/files/cirros-0.5.3-x86_64-disk.img @@ -42,6 +43,9 @@ vm_configdrive_device: disk # /etc/network/interfaces. configdrive_os_family: Debian configdrive_debian_network_interfaces_supports_glob: false +{% else %} +infra_vm_provision_timeout: 600 +{% endif %} # NOTE(mgoddard): CentOS 8 removes interfaces from their bridge during ifdown, # and removes the bridge if there are no interfaces left. When Kayobe bounces diff --git a/playbooks/kayobe-overcloud-base/baremetal.j2 b/playbooks/kayobe-overcloud-base/baremetal.j2 index 013468258..47920424d 100644 --- a/playbooks/kayobe-overcloud-base/baremetal.j2 +++ b/playbooks/kayobe-overcloud-base/baremetal.j2 @@ -1,2 +1,4 @@ [baremetal-compute] hv100 +tk0 +tk1 diff --git a/playbooks/kayobe-overcloud-base/overrides.yml.j2 b/playbooks/kayobe-overcloud-base/overrides.yml.j2 index c13f16f1a..16f94d9c2 100644 --- a/playbooks/kayobe-overcloud-base/overrides.yml.j2 +++ b/playbooks/kayobe-overcloud-base/overrides.yml.j2 @@ -4,9 +4,6 @@ docker_daemon_debug: true docker_registry_mirrors: - "http://{{ zuul_site_mirror_fqdn }}:8082/" -kolla_docker_namespace: "openstack.kolla" -# use the published images from a site mirror of quay.io -kolla_docker_registry: "{{ zuul_site_mirror_fqdn }}:4447" kolla_source_url: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/kolla'].src_dir }}" kolla_source_version: "{{ zuul.projects['opendev.org/openstack/kolla'].checkout }}" kolla_ansible_source_url: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/kolla-ansible'].src_dir }}" @@ -52,12 +49,12 @@ compute_libvirt_enable_tls: true kolla_enable_tls_external: "yes" kolla_enable_tls_internal: "yes" -kolla_ironic_pxe_append_params_extra: +kolla_ironic_kernel_append_params_extra: - ipa-insecure=1 {% endif %} -# NOTE(bbezak): Kolla does not build CentOS Stream 9 container images. -# Using Rocky Linux 9 images on CentOS Stream 9 in CI. +# NOTE(bbezak): Kolla does not build CentOS Stream 10 container images. +# Using Rocky Linux 10 images on CentOS Stream 10 in CI. kolla_base_distro: "{% raw %}{{ 'rocky' if os_distribution == 'centos' else os_distribution }}{% endraw %}" # Support overriding container_engine diff --git a/playbooks/kayobe-overcloud-base/run.yml b/playbooks/kayobe-overcloud-base/run.yml index 76ecd8af6..e5c617bc8 100644 --- a/playbooks/kayobe-overcloud-base/run.yml +++ b/playbooks/kayobe-overcloud-base/run.yml @@ -36,6 +36,14 @@ chdir: "{{ kayobe_src_dir }}" executable: /bin/bash + - name: Test inspection of the baremetal machines + shell: + cmd: dev/overcloud-test-inspect.sh &> {{ logs_dir }}/ansible/overcloud-test-inspect + chdir: "{{ kayobe_src_dir }}" + executable: /bin/bash + # TODO(priteau): Fix baremetal inspect issues with UEFI + when: ironic_boot_mode == 'bios' + - name: Perform testing of the baremetal machines shell: cmd: dev/overcloud-test-baremetal.sh &> {{ logs_dir }}/ansible/overcloud-test-baremetal diff --git a/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 b/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 index 91bb0a0db..06933a1d0 100644 --- a/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 +++ b/playbooks/kayobe-overcloud-host-configure-base/overrides.yml.j2 @@ -146,7 +146,7 @@ apt_repositories: suites: noble-security components: main universe # Treasuredata repository. - - url: http://packages.treasuredata.com/5/ubuntu/noble/ + - url: https://packages.treasuredata.com/lts/5/ubuntu/noble components: contrib signed_by: td-agent.asc apt_preferences: @@ -164,23 +164,11 @@ apt_auth: {% endif %} {% if ansible_facts.os_family == 'RedHat' %} -# Use a local DNF mirror. -dnf_use_local_mirror: true -{% if ansible_facts.distribution == 'CentOS' %} -# Mirror FQDN for DNF repos. -dnf_centos_mirror_host: "{{ zuul_site_mirror_fqdn }}" -# Mirror directory for DNF CentOS repos. -dnf_centos_mirror_directory: 'centos-stream' -{% endif %} -# Mirror FQDN for DNF EPEL repos. -dnf_epel_mirror_host: "{{ zuul_site_mirror_fqdn }}" -# Mirror directory for DNF EPEL repos. -dnf_epel_mirror_directory: 'epel' # Configure a custom DNF repository. dnf_custom_repos: - td-agent: - baseurl: http://packages.treasuredata.com/4/redhat/$releasever/$basearch - gpgkey: https://packages.treasuredata.com/GPG-KEY-td-agent + fluent-package: + baseurl: https://fluentd.cdn.cncf.io/lts/6/redhat/$releasever/$basearch + gpgkey: https://fluentd.cdn.cncf.io/GPG-KEY-fluent-package gpgcheck: yes # Install EPEL local mirror. dnf_install_epel: true @@ -224,3 +212,9 @@ controller_swap: # Generate a password for libvirt SASL authentication. compute_libvirt_sasl_password: "{% raw %}{{ lookup('password', '/tmp/libvirt-sasl-password') }}{% endraw %}" + +# Test fail2ban configuration +{% if fail2ban_enabled | bool %} +dnf_use_local_mirror: true +controller_fail2ban_enabled: true +{% endif %} diff --git a/playbooks/kayobe-overcloud-host-configure-base/pre.yml b/playbooks/kayobe-overcloud-host-configure-base/pre.yml index 65e0a66f0..d564c45a8 100644 --- a/playbooks/kayobe-overcloud-host-configure-base/pre.yml +++ b/playbooks/kayobe-overcloud-host-configure-base/pre.yml @@ -3,30 +3,21 @@ vars: testinfra_venv: ~/testinfra-venv tasks: - - name: Ensure python3 is installed + - name: Ensure python3 and setuptools are installed package: - name: python3 - become: true - - - name: Install Python3.12 on RHEL derivatives - dnf: name: - - python3.12 - - python3.12-devel - state: latest - when: ansible_facts.os_family == 'RedHat' + - python3 + - python3-setuptools become: true - name: Ensure testinfra is installed - vars: - cmd: "{{ 'python3.12' if ansible_facts.os_family == 'RedHat' else 'python3' }} -m venv" pip: name: - distro - pytest-testinfra - pytest-html virtualenv: "{{ testinfra_venv }}" - virtualenv_command: "{{ cmd }}" + virtualenv_command: python3 -m venv # NOTE(mgoddard): Use the name zz-30-overrides.yml to ensure this takes # precedence over the standard config files and zz-20-overrides.yml from @@ -52,3 +43,9 @@ become: true loop: "{{ range(2, 8) | list }}" when: ansible_facts.os_family == 'Debian' + + - name: Ensure firewalld is unmasked + ansible.builtin.systemd_service: + name: firewalld + masked: false + become: true diff --git a/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py b/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py index 6c10068fe..f8e394909 100644 --- a/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py +++ b/playbooks/kayobe-overcloud-host-configure-base/tests/test_overcloud_host_configure.py @@ -20,9 +20,11 @@ def _is_dnf(): return info in ['centos', 'rocky'] +# NOTE: There are OpenDev mirrors only for centos-stream/9-stream and epel/9. def _is_dnf_mirror(): info = distro.id() - return info == 'centos' + version = distro.version() + return info == 'centos' and version == '9' def _is_ubuntu_noble(): @@ -229,8 +231,8 @@ def test_apt_preferences(host): @pytest.mark.skipif(not _is_apt(), reason="Apt only supported on Ubuntu") def test_apt_custom_package_repository_is_available(host): with host.sudo(): - host.check_output("apt -y install td-agent") - assert host.package("td-agent").is_installed + host.check_output("apt -y install fluent-package") + assert host.package("fluent-package").is_installed @pytest.mark.skipif(not _is_apt(), reason="Apt only supported on Ubuntu") @@ -246,7 +248,7 @@ def test_apt_auth(host): @pytest.mark.parametrize('repo', ["appstream", "baseos", "extras", "epel"]) @pytest.mark.skipif(not _is_dnf_mirror(), - reason="DNF OpenDev mirror only for CentOS 8") + reason="DNF OpenDev mirror only for CentOS Stream 9") def test_dnf_local_package_mirrors(host, repo): # Depends on SITE_MIRROR_FQDN environment variable. assert os.getenv('SITE_MIRROR_FQDN') @@ -261,8 +263,8 @@ def test_dnf_local_package_mirrors(host, repo): @pytest.mark.skipif(not _is_dnf(), reason="DNF only supported on CentOS/Rocky") def test_dnf_custom_package_repository_is_available(host): with host.sudo(): - host.check_output("dnf -y install td-agent") - assert host.package("td-agent").is_installed + host.check_output("dnf -y install fluent-package") + assert host.package("fluent-package").is_installed @pytest.mark.skipif(not _is_dnf(), reason="DNF only supported on CentOS/Rocky") @@ -272,8 +274,6 @@ def test_dnf_automatic(host): assert host.service("dnf-automatic.timer").is_running -@pytest.mark.skipif(not _is_dnf(), - reason="tuned profiles only supported on CentOS/Rocky") def test_tuned_profile_is_active(host): tuned_output = host.check_output("tuned-adm active") assert "throughput-performance" in tuned_output @@ -344,6 +344,16 @@ def test_firewalld_rules(host): assert expected_line in info assert expected_line in perm_info +def test_fail2ban_running(host): + assert host.package("fail2ban").is_installed + assert host.service("fail2ban.service").is_enabled + assert host.service("fail2ban.service").is_running + +def test_fail2ban_default_jail_config(host): + # verify that sshd jail is enabled by default + status = host.check_output("sudo fail2ban-client status sshd") + status = status.splitlines() + assert "Status for the jail: sshd" in status @pytest.mark.skipif(not _is_dnf(), reason="SELinux only supported on CentOS/Rocky") diff --git a/playbooks/kayobe-overcloud-upgrade-base/overrides.yml.j2 b/playbooks/kayobe-overcloud-upgrade-base/overrides.yml.j2 index 4ac75a54c..dc8fdb04f 100644 --- a/playbooks/kayobe-overcloud-upgrade-base/overrides.yml.j2 +++ b/playbooks/kayobe-overcloud-upgrade-base/overrides.yml.j2 @@ -4,9 +4,6 @@ docker_daemon_debug: true docker_registry_mirrors: - "http://{{ zuul_site_mirror_fqdn }}:8082/" -kolla_docker_namespace: "openstack.kolla" -# use the published images from a site mirror of quay.io -kolla_docker_registry: "{{ zuul_site_mirror_fqdn }}:4447" {% if not is_previous_release | default(false) %} kolla_source_url: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/kolla'].src_dir }}" kolla_source_version: "{{ zuul.projects['opendev.org/openstack/kolla'].checkout }}" @@ -41,17 +38,5 @@ compute_libvirt_sasl_password: "{% raw %}{{ lookup('password', '/tmp/libvirt-sas # Enable ironic for testing baremetal compute. kolla_enable_ironic: true -# TODO(mgoddard): Remove condition and contents when previous release is Epoxy. -# In Dalmatian we switched to the upstream ironic defaults for hardware -# interfaces, which does not enable the following interfaces. -{% if not is_previous_release | default(false) %} -kolla_ironic_enabled_console_interfaces: - - ipmitool-socat - - no-console -kolla_ironic_enabled_inspect_interfaces: - - inspector - - no-inspect -{% endif %} - # Disable heat to save disk space. kolla_enable_heat: false diff --git a/playbooks/kayobe-overcloud-upgrade-base/run.yml b/playbooks/kayobe-overcloud-upgrade-base/run.yml index fef50fbd6..820536979 100644 --- a/playbooks/kayobe-overcloud-upgrade-base/run.yml +++ b/playbooks/kayobe-overcloud-upgrade-base/run.yml @@ -83,17 +83,6 @@ chdir: "{{ previous_kayobe_src_dir }}" executable: /bin/bash - # Migrate RabbitMQ queues. - - - name: Ensure RabbitMQ queues are migrated - shell: - cmd: dev/rabbitmq-migrate-queues.sh ironic,keystone,neutron,nova &> {{ logs_dir }}/ansible/rabbitmq-migrate-queues - chdir: "{{ previous_kayobe_src_dir }}" - executable: /bin/bash - environment: - KAYOBE_CONFIG_SOURCE_PATH: "{{ previous_kayobe_config_src_dir }}" - KAYOBE_SOURCE_PATH: "{{ previous_kayobe_src_dir }}" - # Upgrade Kayobe, and use it to perform an upgrade of the control plane. - name: Ensure overcloud is upgraded diff --git a/playbooks/kayobe-seed-base/overrides.yml.j2 b/playbooks/kayobe-seed-base/overrides.yml.j2 index 889d82489..b56fda355 100644 --- a/playbooks/kayobe-seed-base/overrides.yml.j2 +++ b/playbooks/kayobe-seed-base/overrides.yml.j2 @@ -1,4 +1,13 @@ --- +{% if ansible_facts.os_family == 'RedHat' and ansible_facts.distribution_major_version == '10' %} +# Configure EPEL repository. Necessary for gdisk to be available. +# dnf_install_epel and dnf_use_local_mirror are both necessary to trigger +# templating of epel.repo. We use the official EPEL repository because there is +# no OpenDev mirror for EL10. +dnf_install_epel: true +dnf_use_local_mirror: true +{% endif %} + docker_daemon_debug: true # Use alternative registry image to avoid Docker Hub pull rate limit. docker_registry_image_full: "quay.io/libpod/registry:2.8.2" @@ -6,9 +15,6 @@ docker_registry_image_full: "quay.io/libpod/registry:2.8.2" docker_registry_mirrors: - "http://{{ zuul_site_mirror_fqdn }}:8082/" -kolla_docker_namespace: "openstack.kolla" -# use the published images from a site mirror of quay.io -kolla_docker_registry: "{{ zuul_site_mirror_fqdn }}:4447" # NOTE(mgoddard): The kolla repository is copied to /tmp/kolla and made # readable by the stack user. kolla_source_url: "/tmp/kolla" @@ -51,9 +57,15 @@ overcloud_dib_build_host_images: {{ build_images }} overcloud_dib_elements_extra: - "openstack-ci-mirrors" -# NOTE(bbezak): Kolla does not build CentOS Stream 9 container images. -# Using Rocky Linux 9 images on CentOS Stream 9 in CI. +# NOTE(bbezak): Kolla does not build CentOS Stream 10 container images. +# Using Rocky Linux 10 images on CentOS Stream 10 in CI. kolla_base_distro: "{% raw %}{{ 'rocky' if os_distribution == 'centos' else os_distribution }}{% endraw %}" # Support overriding container_engine container_engine: "{{ container_engine }}" + +# Test deployment of custom seed containers +seed_containers: + node_exporter: + image: "quay.io/prometheus/node-exporter" + tag: "latest" diff --git a/playbooks/kayobe-seed-upgrade-base/overrides.yml.j2 b/playbooks/kayobe-seed-upgrade-base/overrides.yml.j2 index 49d493862..11797e695 100644 --- a/playbooks/kayobe-seed-upgrade-base/overrides.yml.j2 +++ b/playbooks/kayobe-seed-upgrade-base/overrides.yml.j2 @@ -6,9 +6,6 @@ docker_registry_image_full: "quay.io/libpod/registry:2.8.2" docker_registry_mirrors: - "http://{{ zuul_site_mirror_fqdn }}:8082/" -kolla_docker_namespace: "openstack.kolla" -# use the published images from a site mirror of quay.io -kolla_docker_registry: "{{ zuul_site_mirror_fqdn }}:4447" {% if not is_previous_release | default(false) %} kolla_source_url: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/kolla'].src_dir }}" kolla_source_version: "{{ zuul.projects['opendev.org/openstack/kolla'].checkout }}" diff --git a/playbooks/kayobe-seed-vm-base/overrides.yml.j2 b/playbooks/kayobe-seed-vm-base/overrides.yml.j2 index bf98d7a89..83be19105 100644 --- a/playbooks/kayobe-seed-vm-base/overrides.yml.j2 +++ b/playbooks/kayobe-seed-vm-base/overrides.yml.j2 @@ -34,17 +34,11 @@ seed_vm_vcpus: 1 # Reduce the memory footprint of the seed VM. seed_vm_memory_mb: "{{ 1 * 1024 }}" -# Use cirros rather than CentOS for the VM. +{% if seed_vm_use_cirros | default(true) %} +# Use cirros rather than distribution cloud image for the VM. seed_bootstrap_user: cirros seed_vm_root_image: /opt/cache/files/cirros-0.5.3-x86_64-disk.img -{% if seed_vm_boot_firmware is defined %} -seed_vm_boot_firmware: "{{ seed_vm_boot_firmware }}" -{% endif %} -{% if seed_vm_machine is defined %} -seed_vm_machine: "{{ seed_vm_machine }}" -{% endif %} - # Cirros doesn't load cdom drivers by default. seed_vm_configdrive_device: disk @@ -52,6 +46,16 @@ seed_vm_configdrive_device: disk # /etc/network/interfaces. configdrive_os_family: Debian configdrive_debian_network_interfaces_supports_glob: false +{% else %} +seed_vm_provision_timeout: 600 +{% endif %} + +{% if seed_vm_boot_firmware is defined %} +seed_vm_boot_firmware: "{{ seed_vm_boot_firmware }}" +{% endif %} +{% if seed_vm_machine is defined %} +seed_vm_machine: "{{ seed_vm_machine }}" +{% endif %} # NOTE(mgoddard): CentOS 8 removes interfaces from their bridge during ifdown, # and removes the bridge if there are no interfaces left. When Kayobe bounces diff --git a/playbooks/kayobe-seed-vm-base/seed-group-vars.j2 b/playbooks/kayobe-seed-vm-base/seed-group-vars.j2 index 109707e11..ccf57a4e8 100644 --- a/playbooks/kayobe-seed-vm-base/seed-group-vars.j2 +++ b/playbooks/kayobe-seed-vm-base/seed-group-vars.j2 @@ -1,5 +1,10 @@ --- +{% if seed_vm_use_cirros | default(true) %} aio_interface: eth0 +{% else %} +# Required for official cloud images (CentOS Stream 10, Rocky Linux 10, Ubuntu) +aio_interface: ens2 +{% endif %} # Route via the seed-hypervisor to the outside world. aio_gateway: 192.168.33.4 diff --git a/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml b/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml new file mode 100644 index 000000000..2739d0ba8 --- /dev/null +++ b/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for easily viewing the content of ``kolla/passwords.yml`` with + the new command ``kayobe overcloud service passwords view``. diff --git a/releasenotes/notes/add-redfish-pxe-args-for-virtual-media-1446188235feaaac.yaml b/releasenotes/notes/add-redfish-pxe-args-for-virtual-media-1446188235feaaac.yaml new file mode 100644 index 000000000..1f167ece2 --- /dev/null +++ b/releasenotes/notes/add-redfish-pxe-args-for-virtual-media-1446188235feaaac.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Adds support for Redfish virtual media and PXE boot using a common set of + variables. Migration to using ``kolla_ironic_kernel_append_params`` is + advised. New boot variables are: + + * ``kolla_ironic_kernel_append_params`` + * ``kolla_ironic_kernel_append_params_default`` + * ``kolla_ironic_kernel_append_params_extra`` diff --git a/releasenotes/notes/adds-selinux-update-kernel-param-0f162ec4d55566e8.yaml b/releasenotes/notes/adds-selinux-update-kernel-param-0f162ec4d55566e8.yaml new file mode 100644 index 000000000..44bea9790 --- /dev/null +++ b/releasenotes/notes/adds-selinux-update-kernel-param-0f162ec4d55566e8.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds the ``selinux_update_kernel_param`` variable which controls whether + the selinux kernel command line option is updated. diff --git a/releasenotes/notes/adds-support-for-configuring-builtin-inspector-04ab4ea4f1a1c3c8.yaml b/releasenotes/notes/adds-support-for-configuring-builtin-inspector-04ab4ea4f1a1c3c8.yaml new file mode 100644 index 000000000..a721182c8 --- /dev/null +++ b/releasenotes/notes/adds-support-for-configuring-builtin-inspector-04ab4ea4f1a1c3c8.yaml @@ -0,0 +1,26 @@ +--- +upgrade: + - | + ``inspector_processing_hooks`` has been removed. A new variable named + ``inspector_hooks`` has been introduced to to replace it since the names of + the hooks differ between standalone and built-in implementations. See + :ironic-doc:`Ironic documentation ` for + more details. + - | + Support for standalone inspector has been removed. All Ironic nodes will + need to be migrated from the ``inspector`` inspect-interface to ``agent``. + It is recommended that you do this after upgrading, but you will need to + ensure that you add ``inspector`` and ``agent`` to + ``kolla_ironic_enabled_inspect_interfaces`` for the upgrade. Check that + ``kolla_ironic_default_inspect_interface`` is not set to ``inspector``. + See :ironic-doc:`Ironic documentation ` for + more details. + - | + The format of inspection rules has changed. Any custom rules will need + to be updated to the new format. See + :ironic-doc:`Ironic documentation ` + for more details. + - | + The format of the data output from ``kayobe baremetal compute introspection + data save`` and ``kayobe overcloud introspection data save`` has changed. + You may need to update any tooling that is using this data. diff --git a/releasenotes/notes/bifrost-keep-ports-5f2e583a201645b9.yaml b/releasenotes/notes/bifrost-keep-ports-5f2e583a201645b9.yaml new file mode 100644 index 000000000..9041d79a4 --- /dev/null +++ b/releasenotes/notes/bifrost-keep-ports-5f2e583a201645b9.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Allows configuring ``inspector_keep_ports`` for Bifrost via + ``kolla_bifrost_inspector_keep_ports`` (defaults to ``present`` to match + Bifrost). diff --git a/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml b/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml new file mode 100644 index 000000000..5ed203141 --- /dev/null +++ b/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for bootstrapping Python on Ubuntu through a proxy. diff --git a/releasenotes/notes/bug-2111341-706fe5689414a9c1.yaml b/releasenotes/notes/bug-2111341-706fe5689414a9c1.yaml new file mode 100644 index 000000000..fee46a379 --- /dev/null +++ b/releasenotes/notes/bug-2111341-706fe5689414a9c1.yaml @@ -0,0 +1,15 @@ +--- +upgrade: + - | + Deployments using Juniper Junos OS switches are required to update their + configuration according to `Juniper Junos OS documentation + `_. + This is due to the ``junos_config`` module dropping support for the + ``provider`` parameter. +fixes: + - | + Fixes physical network configuration for Juniper Junos OS switches. + Note that users are required to update their configuration according to + `Juniper Junos OS documentation + `_. + `LP#2111341 `__ diff --git a/releasenotes/notes/bug-2111594-656e035ece40fbf6.yaml b/releasenotes/notes/bug-2111594-656e035ece40fbf6.yaml new file mode 100644 index 000000000..f19ce5c55 --- /dev/null +++ b/releasenotes/notes/bug-2111594-656e035ece40fbf6.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes incorrect handling of ``ansible_python_interpreter`` when using + ``ansible-collection-kolla``. Kayobe now aligns its behavior with + ``kolla-ansible`` to ensure the correct interpreter is used. + `LP#2111594 `__ diff --git a/releasenotes/notes/bug-2115037-f2ed2a4994511832.yaml b/releasenotes/notes/bug-2115037-f2ed2a4994511832.yaml new file mode 100644 index 000000000..5031882db --- /dev/null +++ b/releasenotes/notes/bug-2115037-f2ed2a4994511832.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes the user prompt for deprovisioning multiple overcloud hosts at once. + Previously, the prompt would only confirm deprovisioning for one host. It + will now correctly apply to all hosts that have been targeted. + `LP#2115037 `__ diff --git a/releasenotes/notes/bug-2116318-44f0c022cde1e686.yaml b/releasenotes/notes/bug-2116318-44f0c022cde1e686.yaml new file mode 100644 index 000000000..8851717e4 --- /dev/null +++ b/releasenotes/notes/bug-2116318-44f0c022cde1e686.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes duplicate ``OS_CACERT`` lines in ``public-openrc.sh`` when both admin + and public cacert variables are set. + `LP#2116318 `__ diff --git a/releasenotes/notes/bug-2117084-8d1eaa375df1d1f8.yaml b/releasenotes/notes/bug-2117084-8d1eaa375df1d1f8.yaml new file mode 100644 index 000000000..1e275e202 --- /dev/null +++ b/releasenotes/notes/bug-2117084-8d1eaa375df1d1f8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes configuration of backend TLS when network nodes are separate from + controllers. + `LP#2117084 `__ diff --git a/releasenotes/notes/bug-2119921-b23dc13147e6bda4.yaml b/releasenotes/notes/bug-2119921-b23dc13147e6bda4.yaml new file mode 100644 index 000000000..cdc21b1cc --- /dev/null +++ b/releasenotes/notes/bug-2119921-b23dc13147e6bda4.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes wrong name of ``gpgkey`` used for EPEL repositories when + ``dnf_use_local_mirror`` is enabled. + `LP#2119921 `__ diff --git a/releasenotes/notes/bug-2121588-ad6d8b33e3fcaff9.yaml b/releasenotes/notes/bug-2121588-ad6d8b33e3fcaff9.yaml new file mode 100644 index 000000000..ff8b82e06 --- /dev/null +++ b/releasenotes/notes/bug-2121588-ad6d8b33e3fcaff9.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes CentOS Stream seed and infra VMs not booting by switching to an + EFI-compatible image. + `LP#2121588 `__ diff --git a/releasenotes/notes/bug-2133489-1b83b7e24655caff.yaml b/releasenotes/notes/bug-2133489-1b83b7e24655caff.yaml new file mode 100644 index 000000000..3173fb921 --- /dev/null +++ b/releasenotes/notes/bug-2133489-1b83b7e24655caff.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes generation of Bifrost host variable files when ``ipv4_gateway`` is + undefined. `LP#2133489 `__ diff --git a/releasenotes/notes/build-neutron-bgp-dragent-a6b28ce7e22d8145.yaml b/releasenotes/notes/build-neutron-bgp-dragent-a6b28ce7e22d8145.yaml new file mode 100644 index 000000000..cfe93cbda --- /dev/null +++ b/releasenotes/notes/build-neutron-bgp-dragent-a6b28ce7e22d8145.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``neutron-bgp-dragent`` container will now be built by default when + ``kolla_enable_neutron_bgp_dragent`` is true. diff --git a/releasenotes/notes/build-rocky-ipa-f59e45e6fea6a4c4.yaml b/releasenotes/notes/build-rocky-ipa-f59e45e6fea6a4c4.yaml new file mode 100644 index 000000000..6d165b117 --- /dev/null +++ b/releasenotes/notes/build-rocky-ipa-f59e45e6fea6a4c4.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + Rocky Linux based Ironic Python Agent images are now built in Rocky Linux + based deployments rather than CentOS Stream: + ``ipa_build_dib_elements_default`` uses ``rocky-container`` element; + ``ipa_build_dib_packages`` includes the ``python3-yaml`` package; + ``ipa_build_dib_env_default`` uses specified ``os_release``; + and container runtime is set to ``container_engine``. + - | + ``baremetal`` element now included in ``ipa_build_dib_elements_default`` diff --git a/releasenotes/notes/bump-ansible-12-536bc4a3ff55dc3b.yaml b/releasenotes/notes/bump-ansible-12-536bc4a3ff55dc3b.yaml new file mode 100644 index 000000000..90ee0665e --- /dev/null +++ b/releasenotes/notes/bump-ansible-12-536bc4a3ff55dc3b.yaml @@ -0,0 +1,6 @@ +--- + upgrade: + - | + Updates the maximum supported version of Ansible from 11 (ansible-core + 2.18) to 12 (ansible-core 2.19). The minimum supported version is updated + from 10.x to 11.x. This is true for both Kayobe and Kolla Ansible. diff --git a/releasenotes/notes/bump-ansible-13-07ef8be6a3fa4529.yaml b/releasenotes/notes/bump-ansible-13-07ef8be6a3fa4529.yaml new file mode 100644 index 000000000..d7edd74e5 --- /dev/null +++ b/releasenotes/notes/bump-ansible-13-07ef8be6a3fa4529.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Updates the maximum supported version of Ansible from 12 (ansible-core + 2.19) to 13 (ansible-core 2.20). The minimum supported version is updated + from 11.x to 12.x. This is true for both Kayobe and Kolla Ansible. diff --git a/releasenotes/notes/bump-dellemc-os10-1.2.7-0107ff39718372c5.yaml b/releasenotes/notes/bump-dellemc-os10-1.2.7-0107ff39718372c5.yaml new file mode 100644 index 000000000..2d3188783 --- /dev/null +++ b/releasenotes/notes/bump-dellemc-os10-1.2.7-0107ff39718372c5.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Bumps the ``dellemc.os10`` collection to version 1.2.7. This resolves an + issue with applying some configuration blocks. + `LP#2115121 `__ diff --git a/releasenotes/notes/bump-jriguera-configdrive-e01c6f347507ee6f.yaml b/releasenotes/notes/bump-jriguera-configdrive-e01c6f347507ee6f.yaml new file mode 100644 index 000000000..599dab288 --- /dev/null +++ b/releasenotes/notes/bump-jriguera-configdrive-e01c6f347507ee6f.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes an issue where incorrect network-data.json would + be generated when interfaces without IP addresses are + attached to infrastructure VMs. + `LP#2118403 `__ diff --git a/releasenotes/notes/bump-michaelrigart.interfaces-1.15.6-c5a85320e5b3f4e3.yaml b/releasenotes/notes/bump-michaelrigart.interfaces-1.15.6-c5a85320e5b3f4e3.yaml new file mode 100644 index 000000000..7d2259c15 --- /dev/null +++ b/releasenotes/notes/bump-michaelrigart.interfaces-1.15.6-c5a85320e5b3f4e3.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Bumps the MichaelRigart.interfaces role to v1.15.6 to fix compatibility + with CentOS Stream 10 and Rocky Linux 10. diff --git a/releasenotes/notes/bump-stackhpc-libvirt-vm-1.16.3-48680a17eeb5f632.yaml b/releasenotes/notes/bump-stackhpc-libvirt-vm-1.16.3-48680a17eeb5f632.yaml new file mode 100644 index 000000000..fd50372fe --- /dev/null +++ b/releasenotes/notes/bump-stackhpc-libvirt-vm-1.16.3-48680a17eeb5f632.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Bumps the ``stackhpc.libvirt-vm`` role to v1.16.3 to fix seed and infra VM + provisioning failures on Rocky Linux 9.6. diff --git a/releasenotes/notes/bump-stackhpc-linux-1.3.4-0ae3e0ec9f4c25e2.yaml b/releasenotes/notes/bump-stackhpc-linux-1.3.4-0ae3e0ec9f4c25e2.yaml new file mode 100644 index 000000000..e300e9752 --- /dev/null +++ b/releasenotes/notes/bump-stackhpc-linux-1.3.4-0ae3e0ec9f4c25e2.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes failure to activate SR-IOV on GPU devices by bumping the + ``stackhpc.linux`` collection to v1.3.4. diff --git a/releasenotes/notes/bump-stackhpc-linux-v1.5.1-c002b7c99921cd20.yaml b/releasenotes/notes/bump-stackhpc-linux-v1.5.1-c002b7c99921cd20.yaml new file mode 100644 index 000000000..a15eb2584 --- /dev/null +++ b/releasenotes/notes/bump-stackhpc-linux-v1.5.1-c002b7c99921cd20.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Bumps ``stackhpc.linux`` Ansible collection to v1.5.1. This adds + support for configuring MIG devices without creating vGPUs. +fixes: + - | + Bumps ``stackhpc.linux`` Ansible collection to v1.5.1. This fixes + race conditions in setup of vGPU SR-IOV devices. diff --git a/releasenotes/notes/centos-bootstrap-user-4ee02dee551c62a6.yaml b/releasenotes/notes/centos-bootstrap-user-4ee02dee551c62a6.yaml new file mode 100644 index 000000000..045a74b19 --- /dev/null +++ b/releasenotes/notes/centos-bootstrap-user-4ee02dee551c62a6.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + The default bootstrap user has been changed to ``cloud-user`` if + ``os_distribution`` is set to ``centos``. Set ``*_bootstrap_user`` + variables to ``centos`` to retain existing behaviour. +fixes: + - | + The default bootstrap user has been changed to ``cloud-user`` if + ``os_distribution`` is set to ``centos``, to match official cloud images. diff --git a/releasenotes/notes/change-IPA-compression-algorithm-to-zstd-19-b3860e0a24ca824e.yaml b/releasenotes/notes/change-IPA-compression-algorithm-to-zstd-19-b3860e0a24ca824e.yaml new file mode 100644 index 000000000..1942bd165 --- /dev/null +++ b/releasenotes/notes/change-IPA-compression-algorithm-to-zstd-19-b3860e0a24ca824e.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Changes the IPA (Ironic Python Agent) image compression algorithm from + the default ``gzip`` to ``zstd``. This improves provisioning + performance by reducing the size of the IPA boot ISO transferred from + the Ironic conductor to the bare metal nodes. +upgrade: + - | + Changes the IPA (Ironic Python Agent) image compression algorithm from + default ``gzip`` to ``zstd``. The ``ipa_build_dib_env_default`` dictionary + now includes ``DIB_IPA_COMPRESS_CMD`` set to ``zstd -19``. + The default ``ipa_build_dib_host_packages_extra`` has been changed from + none to ``['zstd']``. diff --git a/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml b/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml new file mode 100644 index 000000000..681a4ae08 --- /dev/null +++ b/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Adds support for managing the Ansible control host configuration. This is + provided by the new ``kayobe control host configure`` command, and uses the + existing host configuration features in Kayobe. + + Also provided is a ``kayobe control host command run`` command for running + commands on the Ansible control host, and a ``kayobe control host package + update`` command for updating its OS packages. diff --git a/releasenotes/notes/dev-tools-packages-select-filter-17f432df42762625.yaml b/releasenotes/notes/dev-tools-packages-select-filter-17f432df42762625.yaml new file mode 100644 index 000000000..255cc8301 --- /dev/null +++ b/releasenotes/notes/dev-tools-packages-select-filter-17f432df42762625.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes support for empty strings in the ``dev-tools`` package lists. This + allows using expressions such as ``{{ 'foo' if os_distribution == 'rocky' + else '' }}``. + `LP#2115000 `__ diff --git a/releasenotes/notes/dns-config-ironic-dhcp-967a99a01005a6bc.yaml b/releasenotes/notes/dns-config-ironic-dhcp-967a99a01005a6bc.yaml new file mode 100644 index 000000000..5d234371e --- /dev/null +++ b/releasenotes/notes/dns-config-ironic-dhcp-967a99a01005a6bc.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Configure inspection network DNS servers to enable Ironic inspection + when ``kolla_internal_fqdn`` is set. diff --git a/releasenotes/notes/drop-ironic-inspector-ce12abb330f5398b.yaml b/releasenotes/notes/drop-ironic-inspector-ce12abb330f5398b.yaml new file mode 100644 index 000000000..bea9b3d7f --- /dev/null +++ b/releasenotes/notes/drop-ironic-inspector-ce12abb330f5398b.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Support for deploying ironic-inspector has been dropped. diff --git a/releasenotes/notes/drop-kolla-tags-and-kolla-limits-254faef5584176e1.yaml b/releasenotes/notes/drop-kolla-tags-and-kolla-limits-254faef5584176e1.yaml new file mode 100644 index 000000000..854451fbd --- /dev/null +++ b/releasenotes/notes/drop-kolla-tags-and-kolla-limits-254faef5584176e1.yaml @@ -0,0 +1,22 @@ +--- +features: + - | + Added the tag ``bifrost`` to ``kolla-bifrost.yml`` so that we can easily + limit to Bifrost in ``kayobe seed service deploy``. + - | + Removed the options ``--kolla-tags`` and ``kolla-limit`` from all commands. + Regular ``--tags`` and ``--limit`` will now be passed directly to the + Kolla-Ansible invocations. Added the tag ``kayobe-generate-config`` to + ``kolla-ansible.yml`` and ``kolla-openstack.yml``. This tag is now always + called, to allow for limiting to OpenStack services with just one tag, e.g. + ``kayobe overcloud service deploy -t nova`. You can still skip this with + ``--skip-tags kayobe-generate-config``. +upgrade: + - | + Removed the options ``--kolla-tags`` and ``kolla-limit`` from all commands. + Regular ``--tags`` and ``--limit`` will now be passed directly to the + Kolla-Ansible invocations. Added the tag ``kayobe-generate-config`` to + ``kolla-ansible.yml`` and ``kolla-openstack.yml``. This tag is now always + called, to allow for limiting to OpenStack services with just one tag, e.g. + ``kayobe overcloud service deploy -t nova`. You can still skip this with + ``--skip-tags kayobe-generate-config``. diff --git a/releasenotes/notes/drop-python310-and-311-b284d9a4d8d91324.yaml b/releasenotes/notes/drop-python310-and-311-b284d9a4d8d91324.yaml new file mode 100644 index 000000000..e72c8148a --- /dev/null +++ b/releasenotes/notes/drop-python310-and-311-b284d9a4d8d91324.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Python 3.10 and 3.11 are no longer supported on the control host. Use + Python 3.12 as a minimum version for the Kayobe virtualenv. diff --git a/releasenotes/notes/fix-baremetal-serial-venv-946b4b7dd191662b.yaml b/releasenotes/notes/fix-baremetal-serial-venv-946b4b7dd191662b.yaml new file mode 100644 index 000000000..eedc72012 --- /dev/null +++ b/releasenotes/notes/fix-baremetal-serial-venv-946b4b7dd191662b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes ``executable not found`` errors on Rocky Linux by calling virtualenv + as a Python module during baremetal compute serial console setup. diff --git a/releasenotes/notes/fix-bifrost-invenory-05f8a92915998f09.yaml b/releasenotes/notes/fix-bifrost-invenory-05f8a92915998f09.yaml new file mode 100644 index 000000000..c616a971a --- /dev/null +++ b/releasenotes/notes/fix-bifrost-invenory-05f8a92915998f09.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes issue when Bifrost hostvars file incorectly generated by Kayobe. + `LP#2045927 `__ diff --git a/releasenotes/notes/fix-deploy-containers-24da0992b56a10db.yaml b/releasenotes/notes/fix-deploy-containers-24da0992b56a10db.yaml new file mode 100644 index 000000000..1421c2bfc --- /dev/null +++ b/releasenotes/notes/fix-deploy-containers-24da0992b56a10db.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an infinite loop when deploying seed containers. + `LP#2114845 `__ diff --git a/releasenotes/notes/fix-empty-registry-bug.yaml b/releasenotes/notes/fix-empty-registry-bug.yaml new file mode 100644 index 000000000..8efe04310 --- /dev/null +++ b/releasenotes/notes/fix-empty-registry-bug.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes an issue building images with a regex when no image registry is set + `LP#2112646 `__ diff --git a/releasenotes/notes/fix-external-connectivity-check-43d232b52f43ed93.yaml b/releasenotes/notes/fix-external-connectivity-check-43d232b52f43ed93.yaml new file mode 100644 index 000000000..3ee7d838f --- /dev/null +++ b/releasenotes/notes/fix-external-connectivity-check-43d232b52f43ed93.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Skip external connectivity check behind a proxy. diff --git a/releasenotes/notes/fix-networking-connectivity-check-with-partial-no-ip-5efcc8ed6e76267e.yaml b/releasenotes/notes/fix-networking-connectivity-check-with-partial-no-ip-5efcc8ed6e76267e.yaml new file mode 100644 index 000000000..44ee8e262 --- /dev/null +++ b/releasenotes/notes/fix-networking-connectivity-check-with-partial-no-ip-5efcc8ed6e76267e.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes network connectivity check when a subset of hosts have the ``no_ip`` + property set via group or host variables. + `LP#2120918 `__ diff --git a/releasenotes/notes/fix-swap-support-35ccff76a9b46b4c.yaml b/releasenotes/notes/fix-swap-support-35ccff76a9b46b4c.yaml new file mode 100644 index 000000000..574bf101c --- /dev/null +++ b/releasenotes/notes/fix-swap-support-35ccff76a9b46b4c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes swap configuration for the seed hypervisor, seed and infra VM hosts. + `LP#2138800 `__ diff --git a/releasenotes/notes/fix-working-dir-url-encoding-5f50d66547858e37.yaml b/releasenotes/notes/fix-working-dir-url-encoding-5f50d66547858e37.yaml new file mode 100644 index 000000000..f5a4ebae0 --- /dev/null +++ b/releasenotes/notes/fix-working-dir-url-encoding-5f50d66547858e37.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes an issue where the working directory contains symbols such as ``@``. + The previous behaviour tries to load files with url encoded symbols. + `LP#2129687 `__ diff --git a/releasenotes/notes/fixes-cumulus-5.13-74e0d08675404f46.yaml b/releasenotes/notes/fixes-cumulus-5.13-74e0d08675404f46.yaml new file mode 100644 index 000000000..34401e5cf --- /dev/null +++ b/releasenotes/notes/fixes-cumulus-5.13-74e0d08675404f46.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Bumps version of ``nvidia.nvue`` Ansible collection from ``1.2.6`` to + ``1.2.9``. This fixes an issue where switch configuration could not be + applied to switches running Cumulus Linux 5.13. See `LP#2131677 + `__ for more details. diff --git a/releasenotes/notes/fixes-dib-image-build-18f29d072b913669.yaml b/releasenotes/notes/fixes-dib-image-build-18f29d072b913669.yaml new file mode 100644 index 000000000..4639a4add --- /dev/null +++ b/releasenotes/notes/fixes-dib-image-build-18f29d072b913669.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + Fixes an issue building diskimage-builder images when EPEL is disabled. + See `LP#2141684 `_ for more + details. + - | + Fixes an issue building diskimage-builder images when using the podman + container engine, See `LP#2142501 + `_ for more details. diff --git a/releasenotes/notes/fixes-regression-network-connectivity-no-ip-a37a2d97379a93e7.yaml b/releasenotes/notes/fixes-regression-network-connectivity-no-ip-a37a2d97379a93e7.yaml new file mode 100644 index 000000000..c75267ba7 --- /dev/null +++ b/releasenotes/notes/fixes-regression-network-connectivity-no-ip-a37a2d97379a93e7.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes a regression in network connectivity check when using the ``no_ip`` + attribute. `LP#2125560 `__ diff --git a/releasenotes/notes/host-libvirt-ceph-squid-6c835edd7ac422c8.yaml b/releasenotes/notes/host-libvirt-ceph-squid-6c835edd7ac422c8.yaml new file mode 100644 index 000000000..afb2a819a --- /dev/null +++ b/releasenotes/notes/host-libvirt-ceph-squid-6c835edd7ac422c8.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Bumps Ceph package repository to Squid. This repository is only configured + when using host libvirt. diff --git a/releasenotes/notes/infra-vms-ntp-group-a2bd39f7c659a8d3.yaml b/releasenotes/notes/infra-vms-ntp-group-a2bd39f7c659a8d3.yaml new file mode 100644 index 000000000..870d5ec28 --- /dev/null +++ b/releasenotes/notes/infra-vms-ntp-group-a2bd39f7c659a8d3.yaml @@ -0,0 +1,7 @@ +--- +issues: + - | + NTP configuration was missing from infrastructure VMs because ``infra-vms`` + was not present under the ``[ntp]`` group. Operators should ensure the most + recent upstream ``kayobe-config`` is merged into their local configuration + to resolve this issue. diff --git a/releasenotes/notes/kolla-check-diff--extra-opts-03bc183075f59ada.yaml b/releasenotes/notes/kolla-check-diff--extra-opts-03bc183075f59ada.yaml new file mode 100644 index 000000000..8c418f91a --- /dev/null +++ b/releasenotes/notes/kolla-check-diff--extra-opts-03bc183075f59ada.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes ``--check`` and ``--diff`` CLI arguments not being passed into + Kolla Ansible commands. + `LP#2123834 `__ diff --git a/releasenotes/notes/kolla-ironic-inspection-network-6cd3977447492236.yaml b/releasenotes/notes/kolla-ironic-inspection-network-6cd3977447492236.yaml new file mode 100644 index 000000000..605d473a8 --- /dev/null +++ b/releasenotes/notes/kolla-ironic-inspection-network-6cd3977447492236.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds support for ``kolla_ironic_inspection_network`` which will be created + in Neutron for Ironic Inspection purposes (defaults to using provisioning + network for backwards compatibility). diff --git a/releasenotes/notes/make-bifrost-optional-32d89388cb2e578e.yaml b/releasenotes/notes/make-bifrost-optional-32d89388cb2e578e.yaml new file mode 100644 index 000000000..5530896bb --- /dev/null +++ b/releasenotes/notes/make-bifrost-optional-32d89388cb2e578e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds the ``kolla_enable_bifrost`` variable to make deploying Bifrost + optional. It defaults to ``true``, maintaining the current behavior. diff --git a/releasenotes/notes/override-kolla_sources-url-b268b5c87b051ea0.yaml b/releasenotes/notes/override-kolla_sources-url-b268b5c87b051ea0.yaml new file mode 100644 index 000000000..e3cd87cd7 --- /dev/null +++ b/releasenotes/notes/override-kolla_sources-url-b268b5c87b051ea0.yaml @@ -0,0 +1,7 @@ +--- +feature: + - | + Adds support for ``url`` sources in ``kolla_sources``. Previously the + ``version`` and ``sha256`` attributes would not be generated in + ``kolla-build.conf``, preventing the override for sources with ``url`` + type. Only ``git`` and ``local`` types worked. diff --git a/releasenotes/notes/remove-inspector-rule-legacy-deploy-kernel-1a4f1f5db58766ee.yaml b/releasenotes/notes/remove-inspector-rule-legacy-deploy-kernel-1a4f1f5db58766ee.yaml new file mode 100644 index 000000000..ff4850587 --- /dev/null +++ b/releasenotes/notes/remove-inspector-rule-legacy-deploy-kernel-1a4f1f5db58766ee.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + Removes the Bifrost introspection rule that was updating the deploy kernel + location from the legacy ``ipa.vmlinuz`` path to the new ``ipa.kernel`` + path. If this migration has not yet been applied, update the deployment + kernel location manually with: + + ``OS_CLOUD=bifrost baremetal node set --driver-info deploy_kernel= $NODE_UUID_OR_NAME`` + + This is only relevant for deployments with nodes discovered with the + OpenStack Train release or earlier. diff --git a/releasenotes/notes/remove-linuxbridge-0112ace7c0bff24b.yaml b/releasenotes/notes/remove-linuxbridge-0112ace7c0bff24b.yaml new file mode 100644 index 000000000..f5625e782 --- /dev/null +++ b/releasenotes/notes/remove-linuxbridge-0112ace7c0bff24b.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Support for Linux Bridge mechanism driver has been removed. The driver was + already removed from neutron. diff --git a/releasenotes/notes/removes-inspection-store-7d969c6200787282.yaml b/releasenotes/notes/removes-inspection-store-7d969c6200787282.yaml new file mode 100644 index 000000000..f1634e8be --- /dev/null +++ b/releasenotes/notes/removes-inspection-store-7d969c6200787282.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + The inspection_store container has been removed since it was tied to the + standalone Ironic Inspector implementation and support for deploying that + service has been dropped. The new inspection implemenation built into + Ironic stores a similar set of data in the database. Prior to upgrading, + you may wish to dump any data with the ``kayobe overcloud introspection data + save`` command. After the upgrade the container should be manually removed + from the first controller using either ``docker stop inspection store && + docker rm inspection_store`` or ``sudo podman stop inspection_store && sudo + podman rm inspection_store`` diff --git a/releasenotes/notes/rocky-10-8d56e4bfffe39c08.yaml b/releasenotes/notes/rocky-10-8d56e4bfffe39c08.yaml new file mode 100644 index 000000000..eb5abc988 --- /dev/null +++ b/releasenotes/notes/rocky-10-8d56e4bfffe39c08.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + Adds support for CentOS Stream 10 and Rocky Linux 10 as host operating + systems and base container images. These are the only major versions of + CentOS Stream and Rocky Linux supported from the 2025.2 Flamingo release. + The 2025.1 Epoxy release will support both Rocky Linux 9 and 10 hosts to + provide a route for migration. +upgrade: + - | + CentOS Stream 9 and Rocky Linux 9 are no longer supported as host operating + systems or base container images. Users should migrate to CentOS Stream 10 + or Rocky Linux 10. The 2025.1 Epoxy release will support both Rocky Linux 9 + and 10 hosts to provide a route for migration. diff --git a/releasenotes/notes/stackhpc-libvirt-host-el10-0250bdabad776c65.yaml b/releasenotes/notes/stackhpc-libvirt-host-el10-0250bdabad776c65.yaml new file mode 100644 index 000000000..646925cfc --- /dev/null +++ b/releasenotes/notes/stackhpc-libvirt-host-el10-0250bdabad776c65.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Bumps the ``stackhpc.libvirt-host`` role to v1.14.0 which fixes + compatibility with CentOS Stream 10 and Rocky Linux 10. diff --git a/releasenotes/notes/support-fail2ban-b25a26d66cfbcaaf.yaml b/releasenotes/notes/support-fail2ban-b25a26d66cfbcaaf.yaml new file mode 100644 index 000000000..975239e50 --- /dev/null +++ b/releasenotes/notes/support-fail2ban-b25a26d66cfbcaaf.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds support for installing and configuring fail2ban. See the docs + http://docs.openstack.org/kayobe/latest/configuration/reference/hosts.html#fail2ban + for more information. diff --git a/releasenotes/notes/tuned-ubuntu-c25b484a19918ad9.yaml b/releasenotes/notes/tuned-ubuntu-c25b484a19918ad9.yaml new file mode 100644 index 000000000..cd66f2c0c --- /dev/null +++ b/releasenotes/notes/tuned-ubuntu-c25b484a19918ad9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for configuring ``tuned`` on Ubuntu hosts. diff --git a/releasenotes/source/2024.1.rst b/releasenotes/source/2024.1.rst index 4977a4f1a..6896656be 100644 --- a/releasenotes/source/2024.1.rst +++ b/releasenotes/source/2024.1.rst @@ -3,4 +3,4 @@ =========================== .. release-notes:: - :branch: stable/2024.1 + :branch: unmaintained/2024.1 diff --git a/releasenotes/source/2025.1.rst b/releasenotes/source/2025.1.rst new file mode 100644 index 000000000..3add0e53a --- /dev/null +++ b/releasenotes/source/2025.1.rst @@ -0,0 +1,6 @@ +=========================== +2025.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.1 diff --git a/releasenotes/source/2025.2.rst b/releasenotes/source/2025.2.rst new file mode 100644 index 000000000..4dae18d86 --- /dev/null +++ b/releasenotes/source/2025.2.rst @@ -0,0 +1,6 @@ +=========================== +2025.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.2 diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst index 97ae9bdb6..6cd206129 100644 --- a/releasenotes/source/index.rst +++ b/releasenotes/source/index.rst @@ -6,6 +6,8 @@ Kayobe Release Notes :maxdepth: 1 unreleased + 2025.2 + 2025.1 2024.2 2024.1 2023.2 diff --git a/requirements.txt b/requirements.txt index c4141c12d..67091f5ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ pbr>=2.0 # Apache-2.0 Jinja2>3 # BSD -ansible>=10,<12 # GPLv3 +ansible>=12,<14 # GPLv3 cliff>=3.1.0 # Apache netaddr!=0.7.16,>=0.7.13 # BSD PyYAML>=3.10.0 # MIT diff --git a/requirements.yml b/requirements.yml index a6739af7a..5974a41a5 100644 --- a/requirements.yml +++ b/requirements.yml @@ -2,50 +2,58 @@ collections: - name: https://opendev.org/openstack/ansible-collection-kolla type: git - version: stable/2025.1 + version: master - name: community.docker - version: 3.11.0 + version: 5.0.5 + - name: community.network + version: 5.1.0 + - name: dellemc.os6 + version: 1.0.7 + - name: dellemc.os9 + version: 1.0.4 - name: dellemc.os10 - version: 1.1.1 + version: 1.2.7 - name: nvidia.nvue - version: 1.2.6 + version: 1.2.9 - name: openstack.cloud version: '<3' - name: stackhpc.linux - version: 1.3.1 + version: 1.5.2 - name: stackhpc.network version: 1.0.0 - name: stackhpc.openstack - version: 0.2.2 + version: 0.10.1 roles: - src: ahuffman.resolv - version: 1.3.1 + version: 1.3.2 - src: giovtorres.tuned - version: 1.1.1 - - src: jriguera.configdrive - # There are no versioned releases of this role. - version: acd08fd126d0e442ab8b3bc518e37761390d8c2f + version: 2.0.2 + - src: git+https://github.com/stackhpc/ansible-role-configdrive.git + name: jriguera.configdrive + version: fb199247333e72e38a9d414cf7b6144daa645477 - src: MichaelRigart.interfaces - version: v1.15.5 + version: v1.16.1 - src: mrlesmithjr.chrony version: v0.1.6 - src: mrlesmithjr.manage_lvm - version: v0.2.8 + version: v0.2.13 - src: mrlesmithjr.mdadm - version: v0.1.1 + version: v0.1.9 + - src: robertdebock.fail2ban + version: 5.0.6 - src: singleplatform-eng.users - version: v1.2.5 + version: v1.2.6 - src: stackhpc.drac version: 1.1.6 - src: stackhpc.drac-facts - version: 1.0.0 + version: v1.0.1 - src: stackhpc.libvirt-host - version: v1.12.1 + version: v1.15.0 - src: stackhpc.libvirt-vm - version: v1.16.2 + version: v1.16.3 - src: stackhpc.luks - version: 0.4.2 + version: 0.4.4 - src: stackhpc.os-ironic-state version: v1.3.1 - src: stackhpc.timezone diff --git a/roles/kayobe-diagnostics/files/get_logs.sh b/roles/kayobe-diagnostics/files/get_logs.sh index 2b2b54964..cc880b9d6 100644 --- a/roles/kayobe-diagnostics/files/get_logs.sh +++ b/roles/kayobe-diagnostics/files/get_logs.sh @@ -151,6 +151,11 @@ copy_logs() { cp /opt/kayobe/images/deployment_image/deployment_image.stderr /opt/kayobe/images/deployment_image/deployment_image.stdout ${LOG_DIR}/kayobe/ fi + # Baremetal inspection data + if [ -d "/tmp/baremetal-compute-inspection-data" ]; then + cp -rf /tmp/baremetal-compute-inspection-data ${LOG_DIR} + fi + # Rename files to .txt; this is so that when displayed via # logs.openstack.org clicking results in the browser shows the # files, rather than trying to send it to another app or make you diff --git a/setup.cfg b/setup.cfg index 46ea41577..81bc91a1d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ description_file = author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/kayobe/latest/ -python_requires = >=3.10 +python_requires = >=3.12 license = Apache License, Version 2.0 classifier = Environment :: OpenStack @@ -18,9 +18,8 @@ classifier = Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 + Programming Language :: Python :: 3.13 [files] packages = @@ -48,6 +47,9 @@ kayobe.cli= baremetal_compute_serial_console_enable = kayobe.cli.commands:BaremetalComputeSerialConsoleEnable baremetal_compute_serial_console_disable = kayobe.cli.commands:BaremetalComputeSerialConsoleDisable control_host_bootstrap = kayobe.cli.commands:ControlHostBootstrap + control_host_command_run = kayobe.cli.commands:ControlHostCommandRun + control_host_configure = kayobe.cli.commands:ControlHostConfigure + control_host_package_update = kayobe.cli.commands:ControlHostPackageUpdate control_host_upgrade = kayobe.cli.commands:ControlHostUpgrade configuration_dump = kayobe.cli.commands:ConfigurationDump environment_create = kayobe.cli.commands:EnvironmentCreate @@ -77,6 +79,7 @@ kayobe.cli= overcloud_service_deploy = kayobe.cli.commands:OvercloudServiceDeploy overcloud_service_deploy_containers = kayobe.cli.commands:OvercloudServiceDeployContainers overcloud_service_destroy = kayobe.cli.commands:OvercloudServiceDestroy + overcloud_service_passwords_view = kayobe.cli.commands:OvercloudServicePasswordsView overcloud_service_prechecks = kayobe.cli.commands:OvercloudServicePrechecks overcloud_service_reconfigure = kayobe.cli.commands:OvercloudServiceReconfigure overcloud_service_stop = kayobe.cli.commands:OvercloudServiceStop @@ -127,6 +130,12 @@ kayobe.cli.baremetal_compute_serial_console_disable = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.control_host_bootstrap = hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_command_run = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_configure = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_package_update = + hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.control_host_upgrade = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.configuration_dump = @@ -195,6 +204,8 @@ kayobe.cli.overcloud_service_upgrade = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.overcloud_swift_rings_generate = hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.overcloud_passwords_view = + hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.physical_network_configure = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.playbook_run = diff --git a/test-requirements.txt b/test-requirements.txt index 9caa5ace2..0f57abc8e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,8 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -ansible-lint>=3.0.0,!=4.3.0 # MIT +ansible-lint>=26.0.0,<27.0.0 # MIT +bandit>=1.1.0 # Apache-2.0 bashate>=0.2 # Apache-2.0 coverage>=4.0 # Apache-2.0 doc8 # Apache-2.0 diff --git a/tools/kolla-feature-flags.sh b/tools/kolla-feature-flags.sh index 8d4277a87..73a34f950 100755 --- a/tools/kolla-feature-flags.sh +++ b/tools/kolla-feature-flags.sh @@ -10,12 +10,12 @@ set -e set -o pipefail KOLLA_ANSIBLE_SRC=$1 -KOLLA_GROUP_VARS_ALL=${KOLLA_ANSIBLE_SRC}/ansible/group_vars/all.yml +KOLLA_GROUP_VARS_ALL=${KOLLA_ANSIBLE_SRC}/ansible/group_vars/all -if [[ ! -f $KOLLA_GROUP_VARS_ALL ]]; then +if [[ ! -d $KOLLA_GROUP_VARS_ALL ]]; then echo "Usage: $0 " exit 1 fi # Find all feature flags, strip the enable_ prefix and value, sort. -cat ${KOLLA_GROUP_VARS_ALL} | grep '^enable_'| sed -e 's/enable_\(.*\):.*/ - \1/' | sort +cat ${KOLLA_GROUP_VARS_ALL}/*.yml | grep '^enable_'| sed -e 's/enable_\(.*\):.*/ - \1/' | sort diff --git a/tools/run-bashate.sh b/tools/run-bashate.sh index 2b5b514cf..dd35ba737 100755 --- a/tools/run-bashate.sh +++ b/tools/run-bashate.sh @@ -6,4 +6,6 @@ ROOT=$(readlink -fn $(dirname $0)/.. ) # NOTE(priteau): ignore E010 because it fails on one-liner bash loops: # https://bugs.launchpad.net/bash8/+bug/1895102 find $ROOT -not -wholename \*.tox/\* -and -not -wholename \*.test/\* \ + -and -not -wholename \*.ansible/\* -and -not -wholename \*venv\* \ + -and -not -wholename \*/ansible/roles/\*.\*/\* \ -and -name \*.sh -print0 | xargs -0 bashate -v --ignore E006,E010 diff --git a/tox.ini b/tox.ini index 1d9a45ee9..ed74876cd 100644 --- a/tox.ini +++ b/tox.ini @@ -17,8 +17,9 @@ setenv = OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 + ANSIBLE_VERBOSITY=3 deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} @@ -26,7 +27,7 @@ commands = stestr run {posargs} [testenv:pep8] # sphinx8 needs the sphinx package which is required via doc/requirements.txt deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt -r{toxinidir}/test-requirements.txt @@ -38,10 +39,11 @@ commands = # directives. python3 {toxinidir}/tools/sphinx8 README.rst CONTRIBUTING.rst doc/source --ignore D001 yamllint etc/kayobe + bandit -r --severity-level=high ansible kayobe tools [testenv:venv] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt commands = {posargs} @@ -59,7 +61,7 @@ commands = [testenv:molecule] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/molecule-requirements.txt commands = @@ -69,8 +71,28 @@ commands = -p {toxinidir}/ansible/roles bash -c "source {envdir}/bin/activate && {toxinidir}/tools/test-molecule.sh {posargs}" -[testenv:alint] -commands = /bin/bash -c "ansible-lint {toxinidir}/ansible/*.yml" +[testenv:linters] +# Env vars and deps need to be defined in top level tox env +setenv = + ANSIBLE_ACTION_PLUGINS = {toxinidir}/ansible/action_plugins + ANSIBLE_FILTER_PLUGINS = {toxinidir}/ansible/filter_plugins + ANSIBLE_ROLES_PATH = {toxinidir}/ansible/roles + +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + {[testenv:ansible-lint]commands} + +[testenv:ansible-lint] +# Lint only code in ansible/* - ignore various folders used by CI +# TODO(priteau): Ignore YAML linting issues in plugins and figure out why +# idrac-bootstrap.yml fails. +setenv = {[testenv:linters]setenv} +deps = {[testenv:linters]deps} +commands = + ansible-lint -p --exclude etc --exclude kayobe/plugins --exclude playbooks --exclude releasenotes --exclude roles --exclude zuul.d --exclude ansible/idrac-bootstrap.yml --exclude .ansible --exclude ansible/roles/*.* --exclude ansible/collections [testenv:ansible-syntax] commands = @@ -110,7 +132,7 @@ commands = [testenv:docs] deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -rf doc/build/html diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml index 3dee43bce..08a4328ee 100644 --- a/zuul.d/jobs.yaml +++ b/zuul.d/jobs.yaml @@ -71,19 +71,11 @@ required-projects: # Include kayobe to ensure other projects can use this job. - name: openstack/ansible-collection-kolla - # TODO(priteau): Remove when kayobe stable/2025.1 exists. - override-checkout: stable/2025.1 - name: openstack/kayobe - name: openstack/kayobe-config-dev - name: openstack/kolla - # TODO(priteau): Remove when kayobe stable/2025.1 exists. - override-checkout: stable/2025.1 - name: openstack/kolla-ansible - # TODO(priteau): Remove when kayobe stable/2025.1 exists. - override-checkout: stable/2025.1 - name: openstack/requirements - # TODO(priteau): Remove when kayobe stable/2025.1 exists. - override-checkout: stable/2025.1 - name: openstack/tenks irrelevant-files: - ^\..+ @@ -113,7 +105,7 @@ tenks_src_dir: "{{ ansible_env.PWD ~ '/' ~ zuul.projects['opendev.org/openstack/tenks'].src_dir }}" is_upgrade: "{{ 'upgrade' in zuul.job }}" is_slurp: "{{ 'slurp' in zuul.job }}" - previous_release: "{{ '2024.1' if is_slurp else '2024.2' }}" + previous_release: "{{ '2025.1' if is_slurp else '2025.2' }}" tls_enabled: false container_engine: 'docker' ironic_boot_mode: "bios" @@ -130,34 +122,34 @@ timeout: 7200 - job: - name: kayobe-overcloud-centos9s + name: kayobe-overcloud-centos10s parent: kayobe-overcloud-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s-16GB voting: false - job: - name: kayobe-overcloud-rocky9 + name: kayobe-overcloud-rocky10 parent: kayobe-overcloud-base vars: kayobe_control_host_become: false - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10-16GB - job: - name: kayobe-overcloud-rocky9-podman + name: kayobe-overcloud-rocky10-podman parent: kayobe-overcloud-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10-16GB vars: container_engine: podman - job: name: kayobe-overcloud-ubuntu-noble parent: kayobe-overcloud-base - nodeset: kayobe-ubuntu-noble + nodeset: kayobe-ubuntu-noble-16GB - job: name: kayobe-overcloud-ubuntu-noble-podman parent: kayobe-overcloud-base - nodeset: kayobe-ubuntu-noble + nodeset: kayobe-ubuntu-noble-16GB vars: container_engine: podman kayobe_control_host_become: false @@ -170,15 +162,15 @@ ironic_boot_mode: "uefi" - job: - name: kayobe-overcloud-tls-centos9s + name: kayobe-overcloud-tls-centos10s parent: kayobe-overcloud-tls-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s-16GB voting: false - job: - name: kayobe-overcloud-tls-rocky9 + name: kayobe-overcloud-tls-rocky10 parent: kayobe-overcloud-tls-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10-16GB - job: name: kayobe-overcloud-upgrade-base @@ -193,24 +185,24 @@ timeout: 10800 - job: - name: kayobe-overcloud-upgrade-rocky9 + name: kayobe-overcloud-upgrade-rocky10 parent: kayobe-overcloud-upgrade-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10-16GB - job: name: kayobe-overcloud-upgrade-ubuntu-noble parent: kayobe-overcloud-upgrade-base - nodeset: kayobe-ubuntu-noble + nodeset: kayobe-ubuntu-noble-16GB - job: - name: kayobe-overcloud-upgrade-slurp-rocky9 + name: kayobe-overcloud-upgrade-slurp-rocky10 parent: kayobe-overcloud-upgrade-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10-16GB - job: name: kayobe-overcloud-upgrade-slurp-ubuntu-noble parent: kayobe-overcloud-upgrade-base - nodeset: kayobe-ubuntu-noble + nodeset: kayobe-ubuntu-noble-16GB - job: name: kayobe-seed-base @@ -226,20 +218,20 @@ build_images: false - job: - name: kayobe-seed-centos9s + name: kayobe-seed-centos10s parent: kayobe-seed-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s voting: false - job: - name: kayobe-seed-rocky9 + name: kayobe-seed-rocky10 parent: kayobe-seed-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 - job: - name: kayobe-seed-rocky9-podman + name: kayobe-seed-rocky10-podman parent: kayobe-seed-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 vars: container_engine: podman @@ -266,27 +258,22 @@ build_images: true - job: - name: kayobe-seed-images-centos9s + name: kayobe-seed-images-centos10s parent: kayobe-seed-images-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s voting: false -# Build only the base container image in the kayobe-seed-images-rocky9 job -# which always runs. Use `check experimental` to run the other jobs which build -# more images. - job: - name: kayobe-seed-images-rocky9 + name: kayobe-seed-images-rocky10 parent: kayobe-seed-images-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 vars: - overcloud_container_image_regex: "^base" - seed_container_image_regex: "^base" kayobe_control_host_become: false - job: - name: kayobe-seed-images-rocky9-podman + name: kayobe-seed-images-rocky10-podman parent: kayobe-seed-images-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 vars: container_engine: podman @@ -317,20 +304,24 @@ timeout: 7200 - job: - name: kayobe-overcloud-host-configure-centos9s + name: kayobe-overcloud-host-configure-centos10s parent: kayobe-overcloud-host-configure-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s voting: false - job: - name: kayobe-overcloud-host-configure-rocky9 + name: kayobe-overcloud-host-configure-rocky10 parent: kayobe-overcloud-host-configure-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 + vars: + fail2ban_enabled: true - job: name: kayobe-overcloud-host-configure-ubuntu-noble parent: kayobe-overcloud-host-configure-base nodeset: kayobe-ubuntu-noble + vars: + fail2ban_enabled: true - job: name: kayobe-seed-upgrade-base @@ -345,9 +336,9 @@ timeout: 5400 - job: - name: kayobe-seed-upgrade-rocky9 + name: kayobe-seed-upgrade-rocky10 parent: kayobe-seed-upgrade-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 - job: name: kayobe-seed-upgrade-ubuntu-noble @@ -355,9 +346,9 @@ nodeset: kayobe-ubuntu-noble - job: - name: kayobe-seed-upgrade-slurp-rocky9 + name: kayobe-seed-upgrade-slurp-rocky10 parent: kayobe-seed-upgrade-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 - job: name: kayobe-seed-upgrade-slurp-ubuntu-noble @@ -377,15 +368,15 @@ timeout: 5400 - job: - name: kayobe-seed-vm-centos9s + name: kayobe-seed-vm-centos10s parent: kayobe-seed-vm-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s voting: false - job: - name: kayobe-seed-vm-rocky9 + name: kayobe-seed-vm-rocky10 parent: kayobe-seed-vm-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 - job: name: kayobe-seed-vm-ubuntu-noble @@ -395,30 +386,52 @@ kayobe_control_host_become: false - job: - name: kayobe-seed-vm-efi-base + name: kayobe-seed-vm-centos10s-cloud-image + parent: kayobe-seed-vm-base + nodeset: kayobe-centos10s + voting: false + vars: + seed_vm_use_cirros: false + +- job: + name: kayobe-seed-vm-rocky10-cloud-image + parent: kayobe-seed-vm-base + nodeset: kayobe-rocky10 + vars: + seed_vm_use_cirros: false + +- job: + name: kayobe-seed-vm-ubuntu-noble-cloud-image + parent: kayobe-seed-vm-base + nodeset: kayobe-ubuntu-noble + voting: false + vars: + seed_vm_use_cirros: false + +- job: + name: kayobe-seed-vm-q35-base parent: kayobe-seed-vm-base description: | - Base job for testing seed VM provisioning with EFI and q35 + Base job for testing seed VM provisioning with q35 vars: - seed_vm_boot_firmware: efi seed_vm_machine: q35 - job: - name: kayobe-seed-vm-centos9s-efi - parent: kayobe-seed-vm-efi-base - nodeset: kayobe-centos9s + name: kayobe-seed-vm-centos10s-q35 + parent: kayobe-seed-vm-q35-base + nodeset: kayobe-centos10s voting: false - job: - name: kayobe-seed-vm-rocky9-efi - parent: kayobe-seed-vm-efi-base - nodeset: kayobe-rocky9 + name: kayobe-seed-vm-rocky10-q35 + parent: kayobe-seed-vm-q35-base + nodeset: kayobe-rocky10 vars: kayobe_control_host_become: false - job: - name: kayobe-seed-vm-ubuntu-noble-efi - parent: kayobe-seed-vm-efi-base + name: kayobe-seed-vm-ubuntu-noble-q35 + parent: kayobe-seed-vm-q35-base nodeset: kayobe-ubuntu-noble - job: @@ -434,15 +447,15 @@ timeout: 5400 - job: - name: kayobe-infra-vm-centos9s + name: kayobe-infra-vm-centos10s parent: kayobe-infra-vm-base - nodeset: kayobe-centos9s + nodeset: kayobe-centos10s voting: false - job: - name: kayobe-infra-vm-rocky9 + name: kayobe-infra-vm-rocky10 parent: kayobe-infra-vm-base - nodeset: kayobe-rocky9 + nodeset: kayobe-rocky10 vars: kayobe_control_host_become: false @@ -450,3 +463,26 @@ name: kayobe-infra-vm-ubuntu-noble parent: kayobe-infra-vm-base nodeset: kayobe-ubuntu-noble + +- job: + name: kayobe-infra-vm-centos10s-cloud-image + parent: kayobe-infra-vm-base + nodeset: kayobe-centos10s + voting: false + vars: + infra_vm_use_cirros: false + +- job: + name: kayobe-infra-vm-rocky10-cloud-image + parent: kayobe-infra-vm-base + nodeset: kayobe-rocky10 + vars: + infra_vm_use_cirros: false + +- job: + name: kayobe-infra-vm-ubuntu-noble-cloud-image + parent: kayobe-infra-vm-base + nodeset: kayobe-ubuntu-noble + voting: false + vars: + infra_vm_use_cirros: false diff --git a/zuul.d/nodesets.yaml b/zuul.d/nodesets.yaml index 3dd99d404..32c72a536 100644 --- a/zuul.d/nodesets.yaml +++ b/zuul.d/nodesets.yaml @@ -1,18 +1,36 @@ --- - nodeset: - name: kayobe-centos9s + name: kayobe-centos10s nodes: - name: primary - label: centos-9-stream + label: centos-10-stream-8GB - nodeset: - name: kayobe-rocky9 + name: kayobe-centos10s-16GB nodes: - name: primary - label: rockylinux-9 + label: centos-10-stream-16GB + +- nodeset: + name: kayobe-rocky10 + nodes: + - name: primary + label: rockylinux-10-8GB + +- nodeset: + name: kayobe-rocky10-16GB + nodes: + - name: primary + label: rockylinux-10-16GB - nodeset: name: kayobe-ubuntu-noble nodes: - name: primary label: ubuntu-noble + +- nodeset: + name: kayobe-ubuntu-noble-16GB + nodes: + - name: primary + label: ubuntu-noble-16GB diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 76783e56e..37fe7338e 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -1,80 +1,94 @@ --- - project: - queue: kayobe + queue: kolla templates: + - ansible-role-jobs - openstack-cover-jobs - openstack-python3-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - - openstack-tox-py39: - #NOTE(wszumski): We have dropped python3.9 support, so disable this job. + - openstack-tox-py310: + #NOTE(wszumski): We have dropped python3.10 support, so disable this job. files: THIS-JOB-IS-DISABLED - kayobe-tox-ansible-syntax - kayobe-tox-ansible - kayobe-tox-molecule - - kayobe-overcloud-rocky9 - - kayobe-overcloud-rocky9-podman + - kayobe-infra-vm-rocky10 + - kayobe-infra-vm-ubuntu-noble + - kayobe-overcloud-host-configure-rocky10 + - kayobe-overcloud-host-configure-ubuntu-noble + - kayobe-overcloud-rocky10 + - kayobe-overcloud-rocky10-podman + - kayobe-overcloud-tls-rocky10 - kayobe-overcloud-ubuntu-noble - kayobe-overcloud-ubuntu-noble-podman - - kayobe-overcloud-tls-rocky9 - - kayobe-overcloud-host-configure-rocky9 - - kayobe-overcloud-host-configure-ubuntu-noble - - kayobe-overcloud-upgrade-rocky9 - - kayobe-overcloud-upgrade-ubuntu-noble - - kayobe-overcloud-upgrade-slurp-rocky9 + - kayobe-overcloud-upgrade-rocky10 + - kayobe-overcloud-upgrade-slurp-rocky10 - kayobe-overcloud-upgrade-slurp-ubuntu-noble - - kayobe-seed-rocky9 - - kayobe-seed-rocky9-podman + - kayobe-overcloud-upgrade-ubuntu-noble + - kayobe-seed-rocky10 + - kayobe-seed-rocky10-podman - kayobe-seed-ubuntu-noble - kayobe-seed-ubuntu-noble-podman - - kayobe-seed-images-rocky9 - - kayobe-seed-upgrade-rocky9 + - kayobe-seed-upgrade-rocky10 + - kayobe-seed-upgrade-slurp-rocky10 + - kayobe-seed-upgrade-slurp-ubuntu-noble - kayobe-seed-upgrade-ubuntu-noble - - kayobe-seed-upgrade-slurp-rocky9 - - kayobe-seed-vm-rocky9 - - kayobe-seed-vm-rocky9-efi + - kayobe-seed-vm-rocky10 - kayobe-seed-vm-ubuntu-noble - - kayobe-seed-vm-ubuntu-noble-efi - - kayobe-infra-vm-rocky9 - - kayobe-infra-vm-ubuntu-noble gate: jobs: - - openstack-tox-py39: - #NOTE(wszumski): We have dropped python3.9 support, so disable this job. + - openstack-tox-py310: + #NOTE(wszumski): We have dropped python3.10 support, so disable this job. files: THIS-JOB-IS-DISABLED - kayobe-tox-ansible-syntax - kayobe-tox-ansible - kayobe-tox-molecule - - kayobe-overcloud-rocky9 - - kayobe-overcloud-rocky9-podman + - kayobe-infra-vm-rocky10 + - kayobe-infra-vm-ubuntu-noble + - kayobe-overcloud-host-configure-rocky10 + - kayobe-overcloud-host-configure-ubuntu-noble + - kayobe-overcloud-rocky10 + - kayobe-overcloud-rocky10-podman + - kayobe-overcloud-tls-rocky10 - kayobe-overcloud-ubuntu-noble - kayobe-overcloud-ubuntu-noble-podman - - kayobe-overcloud-tls-rocky9 - - kayobe-overcloud-host-configure-rocky9 - - kayobe-overcloud-host-configure-ubuntu-noble - - kayobe-overcloud-upgrade-rocky9 - - kayobe-overcloud-upgrade-ubuntu-noble - - kayobe-overcloud-upgrade-slurp-rocky9 + - kayobe-overcloud-upgrade-rocky10 + - kayobe-overcloud-upgrade-slurp-rocky10 - kayobe-overcloud-upgrade-slurp-ubuntu-noble - - kayobe-seed-rocky9 - - kayobe-seed-rocky9-podman + - kayobe-overcloud-upgrade-ubuntu-noble + - kayobe-seed-rocky10 + - kayobe-seed-rocky10-podman - kayobe-seed-ubuntu-noble - kayobe-seed-ubuntu-noble-podman - - kayobe-seed-upgrade-rocky9 + - kayobe-seed-upgrade-rocky10 + - kayobe-seed-upgrade-slurp-rocky10 + - kayobe-seed-upgrade-slurp-ubuntu-noble - kayobe-seed-upgrade-ubuntu-noble - - kayobe-seed-upgrade-slurp-rocky9 - - kayobe-seed-vm-rocky9 + - kayobe-seed-vm-rocky10 - kayobe-seed-vm-ubuntu-noble - - kayobe-infra-vm-rocky9 - - kayobe-infra-vm-ubuntu-noble experimental: jobs: - - kayobe-overcloud-centos9s - - kayobe-overcloud-host-configure-centos9s - - kayobe-seed-images-centos9s - - kayobe-seed-images-rocky9-podman + - kayobe-infra-vm-centos10s + - kayobe-infra-vm-centos10s-cloud-image + - kayobe-infra-vm-rocky10-cloud-image + - kayobe-infra-vm-ubuntu-noble-cloud-image + - kayobe-overcloud-centos10s + - kayobe-overcloud-host-configure-centos10s + - kayobe-overcloud-tls-centos10s + - kayobe-seed-centos10s + - kayobe-seed-images-centos10s + - kayobe-seed-images-rocky10 + - kayobe-seed-images-rocky10-podman - kayobe-seed-images-ubuntu-noble - kayobe-seed-images-ubuntu-noble-podman + - kayobe-seed-vm-centos10s + - kayobe-seed-vm-centos10s-cloud-image + - kayobe-seed-vm-centos10s-q35 + - kayobe-seed-vm-rocky10-cloud-image + - kayobe-seed-vm-rocky10-q35 + - kayobe-seed-vm-ubuntu-noble-cloud-image + - kayobe-seed-vm-ubuntu-noble-q35