From e74bdae6e53e26c8634a59c7fcf44a51c93822d1 Mon Sep 17 00:00:00 2001 From: GuEe-GUI <2991707448@qq.com> Date: Sun, 1 Mar 2026 13:06:32 +0800 Subject: [PATCH 1/3] [dm][virtio] support virtio dm 1. version for v1.2 2. support packed queue 1. support MMIO/PCI over bus 2. new virtio queue api with dma/cpu sync 3. update for virtio device drivers: - net - block - console - input - gpu 4. new virtio device drivers: - 9p - crypto - rng - scmi - scsi Signed-off-by: GuEe-GUI <2991707448@qq.com> --- bsp/qemu-virt64-riscv/.config | 31 +- bsp/qemu-virt64-riscv/driver/Kconfig | 20 +- bsp/qemu-virt64-riscv/driver/SConscript | 2 +- bsp/qemu-virt64-riscv/driver/virtio/Kconfig | 44 + .../driver/virtio/SConscript | 11 + bsp/qemu-virt64-riscv/driver/virtio/virtio.c | 473 +++++++ .../qemu-virt64-riscv/driver}/virtio/virtio.h | 0 .../driver}/virtio/virtio_blk.c | 0 .../driver}/virtio/virtio_blk.h | 0 .../driver}/virtio/virtio_console.c | 0 .../driver}/virtio/virtio_console.h | 0 .../driver}/virtio/virtio_gpu.c | 0 .../driver}/virtio/virtio_gpu.h | 0 .../driver}/virtio/virtio_input.c | 0 .../driver}/virtio/virtio_input.h | 0 .../driver}/virtio/virtio_input_event_codes.h | 0 .../driver}/virtio/virtio_mmio.h | 0 .../driver}/virtio/virtio_net.c | 0 .../driver}/virtio/virtio_net.h | 0 .../driver}/virtio/virtio_queue.h | 0 bsp/qemu-virt64-riscv/link_cpus.lds | 2 +- bsp/qemu-virt64-riscv/rtconfig.h | 24 +- bsp/xuantie/virt64/c906/.config | 29 +- bsp/xuantie/virt64/c906/board/Kconfig | 8 +- bsp/xuantie/virt64/c906/board/SConscript | 1 + bsp/xuantie/virt64/c906/rtconfig.h | 14 +- components/drivers/include/drivers/virtio.h | 282 ++++ components/drivers/include/drivers/virtq.h | 198 +++ components/drivers/include/rtdevice.h | 5 + components/drivers/virtio/Kconfig | 134 +- components/drivers/virtio/SConscript | 52 +- components/drivers/virtio/virtio-9p.c | 188 +++ components/drivers/virtio/virtio-blk.c | 332 +++++ components/drivers/virtio/virtio-console.c | 732 ++++++++++ components/drivers/virtio/virtio-crypto.c | 666 +++++++++ components/drivers/virtio/virtio-gpu.c | 1021 ++++++++++++++ components/drivers/virtio/virtio-input.c | 300 ++++ components/drivers/virtio/virtio-net.c | 423 ++++++ components/drivers/virtio/virtio-rng.c | 191 +++ components/drivers/virtio/virtio-scmi.c | 293 ++++ components/drivers/virtio/virtio-scsi.c | 322 +++++ components/drivers/virtio/virtio.c | 564 +++----- .../drivers/virtio/virtio_config/virtio-9p.h | 26 + .../drivers/virtio/virtio_config/virtio-blk.h | 129 ++ .../virtio/virtio_config/virtio-console.h | 63 + .../virtio/virtio_config/virtio-crypto.h | 527 +++++++ .../drivers/virtio/virtio_config/virtio-gpu.h | 499 +++++++ .../virtio/virtio_config/virtio-input.h | 68 + .../drivers/virtio/virtio_config/virtio-net.h | 82 ++ .../virtio/virtio_config/virtio-scmi.h | 35 + .../virtio/virtio_config/virtio-scsi.h | 163 +++ components/drivers/virtio/virtio_ids.h | 77 ++ components/drivers/virtio/virtio_internal.h | 112 ++ components/drivers/virtio/virtio_mmio.c | 652 +++++++++ components/drivers/virtio/virtio_pci.c | 942 +++++++++++++ components/drivers/virtio/virtio_queue.c | 1219 +++++++++++++++++ 56 files changed, 10497 insertions(+), 459 deletions(-) create mode 100644 bsp/qemu-virt64-riscv/driver/virtio/Kconfig create mode 100644 bsp/qemu-virt64-riscv/driver/virtio/SConscript create mode 100644 bsp/qemu-virt64-riscv/driver/virtio/virtio.c rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_blk.c (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_blk.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_console.c (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_console.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_gpu.c (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_gpu.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_input.c (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_input.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_input_event_codes.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_mmio.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_net.c (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_net.h (100%) rename {components/drivers => bsp/qemu-virt64-riscv/driver}/virtio/virtio_queue.h (100%) create mode 100644 components/drivers/include/drivers/virtio.h create mode 100644 components/drivers/include/drivers/virtq.h create mode 100644 components/drivers/virtio/virtio-9p.c create mode 100644 components/drivers/virtio/virtio-blk.c create mode 100644 components/drivers/virtio/virtio-console.c create mode 100644 components/drivers/virtio/virtio-crypto.c create mode 100644 components/drivers/virtio/virtio-gpu.c create mode 100644 components/drivers/virtio/virtio-input.c create mode 100644 components/drivers/virtio/virtio-net.c create mode 100644 components/drivers/virtio/virtio-rng.c create mode 100644 components/drivers/virtio/virtio-scmi.c create mode 100644 components/drivers/virtio/virtio-scsi.c create mode 100644 components/drivers/virtio/virtio_config/virtio-9p.h create mode 100644 components/drivers/virtio/virtio_config/virtio-blk.h create mode 100644 components/drivers/virtio/virtio_config/virtio-console.h create mode 100644 components/drivers/virtio/virtio_config/virtio-crypto.h create mode 100644 components/drivers/virtio/virtio_config/virtio-gpu.h create mode 100644 components/drivers/virtio/virtio_config/virtio-input.h create mode 100644 components/drivers/virtio/virtio_config/virtio-net.h create mode 100644 components/drivers/virtio/virtio_config/virtio-scmi.h create mode 100644 components/drivers/virtio/virtio_config/virtio-scsi.h create mode 100644 components/drivers/virtio/virtio_ids.h create mode 100644 components/drivers/virtio/virtio_internal.h create mode 100644 components/drivers/virtio/virtio_mmio.c create mode 100644 components/drivers/virtio/virtio_pci.c create mode 100644 components/drivers/virtio/virtio_queue.c diff --git a/bsp/qemu-virt64-riscv/.config b/bsp/qemu-virt64-riscv/.config index 2bca112cf86..5126334b577 100644 --- a/bsp/qemu-virt64-riscv/.config +++ b/bsp/qemu-virt64-riscv/.config @@ -191,7 +191,8 @@ CONFIG_RT_USING_DEVICE_OPS=y CONFIG_RT_USING_CONSOLE=y CONFIG_RT_CONSOLEBUF_SIZE=256 CONFIG_RT_CONSOLE_DEVICE_NAME="uart0" -CONFIG_RT_VER_NUM=0x50201 +CONFIG_RT_USING_CONSOLE_OUTPUT_CTL=y +CONFIG_RT_VER_NUM=0x50300 CONFIG_RT_USING_STDC_ATOMIC=y CONFIG_RT_BACKTRACE_LEVEL_MAX_NR=32 # end of RT-Thread Kernel @@ -294,9 +295,8 @@ CONFIG_RT_SERIAL_USING_DMA=y CONFIG_RT_SERIAL_RB_BUFSZ=64 # CONFIG_RT_USING_SERIAL_BYPASS is not set # CONFIG_RT_USING_CAN is not set -CONFIG_RT_USING_CPUTIME=y -CONFIG_RT_USING_CPUTIME_RISCV=y -CONFIG_CPUTIME_TIMER_FREQ=10000000 +CONFIG_RT_USING_CLOCK_TIME=y +CONFIG_CLOCK_TIMER_FREQ=0 # CONFIG_RT_USING_I2C is not set # CONFIG_RT_USING_PHY is not set # CONFIG_RT_USING_PHY_V2 is not set @@ -324,18 +324,7 @@ CONFIG_RT_USING_SOFT_RTC=y # CONFIG_RT_USING_HWCRYPTO is not set # CONFIG_RT_USING_WIFI is not set # CONFIG_RT_USING_BLK is not set -CONFIG_RT_USING_VIRTIO=y -CONFIG_RT_USING_VIRTIO10=y -# CONFIG_RT_USING_VIRTIO_MMIO_ALIGN is not set -CONFIG_RT_USING_VIRTIO_BLK=y -CONFIG_RT_USING_VIRTIO_NET=y -CONFIG_RT_USING_VIRTIO_CONSOLE=y -CONFIG_RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR=4 -CONFIG_RT_USING_VIRTIO_GPU=y -CONFIG_RT_USING_VIRTIO_INPUT=y CONFIG_RT_USING_PIN=y -CONFIG_RT_USING_KTIME=y -# CONFIG_RT_USING_HWTIMER is not set # CONFIG_RT_USING_CHERRYUSB is not set # end of Device Drivers @@ -508,11 +497,10 @@ CONFIG_RT_USING_ADT_REF=y # CONFIG_RT_USING_RT_LINK is not set # end of Utilities -# CONFIG_RT_USING_VBUS is not set - # # Memory management # +# CONFIG_RT_PAGE_MPR_SIZE_DYNAMIC is not set CONFIG_RT_PAGE_AFFINITY_BLOCK_SIZE=0x1000 CONFIG_RT_PAGE_MAX_ORDER=11 # CONFIG_RT_USING_MEMBLOCK is not set @@ -534,6 +522,7 @@ CONFIG_RT_PAGE_MAX_ORDER=11 # end of Using USB legacy version # CONFIG_RT_USING_FDT is not set +# CONFIG_RT_USING_RUST is not set # end of RT-Thread Components # @@ -1572,6 +1561,14 @@ CONFIG_RT_PAGE_MAX_ORDER=11 # RISC-V QEMU virt64 configs # CONFIG_BSP_USING_VIRTIO=y +CONFIG_RT_USING_VIRTIO10=y +# CONFIG_RT_USING_VIRTIO_MMIO_ALIGN is not set +CONFIG_RT_USING_VIRTIO_BLK=y +CONFIG_RT_USING_VIRTIO_NET=y +CONFIG_RT_USING_VIRTIO_CONSOLE=y +CONFIG_RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR=4 +CONFIG_RT_USING_VIRTIO_GPU=y +CONFIG_RT_USING_VIRTIO_INPUT=y CONFIG_BSP_USING_VIRTIO_BLK=y CONFIG_BSP_USING_VIRTIO_NET=y CONFIG_BSP_USING_VIRTIO_CONSOLE=y diff --git a/bsp/qemu-virt64-riscv/driver/Kconfig b/bsp/qemu-virt64-riscv/driver/Kconfig index ba9a2650e19..23da1fb582d 100644 --- a/bsp/qemu-virt64-riscv/driver/Kconfig +++ b/bsp/qemu-virt64-riscv/driver/Kconfig @@ -1,43 +1,35 @@ menu "RISC-V QEMU virt64 configs" -config BSP_USING_VIRTIO - bool "Using VirtIO" - default y - depends on RT_USING_DEVICE_OPS +rsource "virtio/Kconfig" config BSP_USING_VIRTIO_BLK bool "Using VirtIO BLK" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_BLK default y - depends on BSP_USING_VIRTIO config BSP_USING_VIRTIO_NET bool "Using VirtIO NET" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_NET default y - depends on BSP_USING_VIRTIO config BSP_USING_VIRTIO_CONSOLE bool "Using VirtIO Console" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_CONSOLE default y - depends on BSP_USING_VIRTIO config BSP_USING_VIRTIO_GPU bool "Using VirtIO GPU" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_GPU default y - depends on BSP_USING_VIRTIO config BSP_USING_VIRTIO_INPUT bool "Using VirtIO Input" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_INPUT default y - depends on BSP_USING_VIRTIO endmenu diff --git a/bsp/qemu-virt64-riscv/driver/SConscript b/bsp/qemu-virt64-riscv/driver/SConscript index faea9c1bd9b..e4c3a1bff28 100644 --- a/bsp/qemu-virt64-riscv/driver/SConscript +++ b/bsp/qemu-virt64-riscv/driver/SConscript @@ -4,7 +4,7 @@ from building import * cwd = GetCurrentDir() src = Glob('*.c') -CPPPATH = [cwd] +CPPPATH = [cwd, cwd + "/virtio"] group = DefineGroup('Drivers', src, depend = [''], CPPPATH = CPPPATH) diff --git a/bsp/qemu-virt64-riscv/driver/virtio/Kconfig b/bsp/qemu-virt64-riscv/driver/virtio/Kconfig new file mode 100644 index 00000000000..119023239c5 --- /dev/null +++ b/bsp/qemu-virt64-riscv/driver/virtio/Kconfig @@ -0,0 +1,44 @@ +menuconfig BSP_USING_VIRTIO + bool "Using VirtIO device drivers" + default n + depends on RT_USING_DEVICE_OPS + + if BSP_USING_VIRTIO + choice RT_USING_VIRTIO_VERSION + prompt "VirtIO Version" + default RT_USING_VIRTIO10 + + config RT_USING_VIRTIO10 + bool "VirtIO v1.0" + endchoice + + config RT_USING_VIRTIO_MMIO_ALIGN + bool "Using VirtIO MMIO alignment" + default y + + config RT_USING_VIRTIO_BLK + bool "Using VirtIO BLK" + default y + + config RT_USING_VIRTIO_NET + bool "Using VirtIO NET" + default y + + menuconfig RT_USING_VIRTIO_CONSOLE + bool "Using VirtIO Console" + default y + + if RT_USING_VIRTIO_CONSOLE + config RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR + int "Max number of port in VirtIO Console" + default 4 + endif + + config RT_USING_VIRTIO_GPU + bool "Using VirtIO GPU" + default y + + config RT_USING_VIRTIO_INPUT + bool "Using VirtIO Input" + default y + endif diff --git a/bsp/qemu-virt64-riscv/driver/virtio/SConscript b/bsp/qemu-virt64-riscv/driver/virtio/SConscript new file mode 100644 index 00000000000..b946feed923 --- /dev/null +++ b/bsp/qemu-virt64-riscv/driver/virtio/SConscript @@ -0,0 +1,11 @@ +# RT-Thread building script for component + +from building import * + +cwd = GetCurrentDir() +src = Glob('*.c') +CPPPATH = [cwd] + +group = DefineGroup('DeviceDrivers', src, depend = ['BSP_USING_VIRTIO'], CPPPATH = CPPPATH) + +Return('group') diff --git a/bsp/qemu-virt64-riscv/driver/virtio/virtio.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio.c new file mode 100644 index 00000000000..a7937dfaeea --- /dev/null +++ b/bsp/qemu-virt64-riscv/driver/virtio/virtio.c @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-10-12 fangjianzhou support SDL2 + */ + +#include +#include + +#include + +rt_inline void _virtio_dev_check(struct virtio_device *dev) +{ + RT_ASSERT(dev != RT_NULL); + RT_ASSERT(dev->mmio_config != RT_NULL); +} + +void virtio_reset_device(struct virtio_device *dev) +{ + _virtio_dev_check(dev); + + dev->mmio_config->status = 0; +} + +void virtio_status_acknowledge_driver(struct virtio_device *dev) +{ + _virtio_dev_check(dev); + + dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER; +} + +void virtio_status_driver_ok(struct virtio_device *dev) +{ + _virtio_dev_check(dev); + + dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK; +} + +void virtio_interrupt_ack(struct virtio_device *dev) +{ + rt_uint32_t status; + + _virtio_dev_check(dev); + + status = dev->mmio_config->interrupt_status; + + if (status != 0) + { + dev->mmio_config->interrupt_ack = status; + } +} + +rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit) +{ + _virtio_dev_check(dev); + + return !!(dev->mmio_config->device_features & (1UL << feature_bit)); +} + +rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num) +{ + _virtio_dev_check(dev); + + dev->queues = rt_malloc(sizeof(struct virtq) * queues_num); + + if (dev->queues != RT_NULL) + { + dev->queues_num = queues_num; + + return RT_EOK; + } + + return -RT_ENOMEM; +} + +void virtio_queues_free(struct virtio_device *dev) +{ + if (dev->queues != RT_NULL) + { + dev->queues_num = 0; + rt_free(dev->queues); + } +} + +rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size) +{ + int i; + void *pages; + rt_size_t pages_total_size; + struct virtq *queue; + + _virtio_dev_check(dev); + + RT_ASSERT(queue_index < dev->queues_num); + /* ring_size is power of 2 */ + RT_ASSERT(ring_size > 0); + RT_ASSERT(((ring_size - 1) & ring_size) == 0); + + /* Select the queue first, then read queue_num_max */ + dev->mmio_config->queue_sel = queue_index; + RT_ASSERT(dev->mmio_config->queue_num_max > 0); + RT_ASSERT(ring_size <= dev->mmio_config->queue_num_max); + + queue = &dev->queues[queue_index]; + pages_total_size = VIRTIO_PAGE_ALIGN( + VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size); + + pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE); + + if (pages == RT_NULL) + { + return -RT_ENOMEM; + } + + queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size); + + if (queue->free == RT_NULL) + { + rt_free_align(pages); + return -RT_ENOMEM; + } + + rt_memset(pages, 0, pages_total_size); + + dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE; + dev->mmio_config->queue_num = ring_size; + dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE; + dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT; + + queue->num = ring_size; + queue->desc = (struct virtq_desc *)((rt_ubase_t)pages); + queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size)); + queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN( + (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE); + + queue->used_idx = 0; + + /* All descriptors start out unused */ + for (i = 0; i < ring_size; ++i) + { + queue->free[i] = RT_TRUE; + } + + queue->free_count = ring_size; + + return RT_EOK; +} + +void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index) +{ + struct virtq *queue; + + _virtio_dev_check(dev); + + RT_ASSERT(queue_index < dev->queues_num); + + /* Select the queue first, then read queue_num_max */ + dev->mmio_config->queue_sel = queue_index; + RT_ASSERT(dev->mmio_config->queue_num_max > 0); + + queue = &dev->queues[queue_index]; + + RT_ASSERT(queue->num > 0); + + rt_free(queue->free); + rt_free_align((void *)queue->desc); + + dev->mmio_config->queue_pfn = RT_NULL; + + queue->num = 0; + queue->desc = RT_NULL; + queue->avail = RT_NULL; + queue->used = RT_NULL; +} + +void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index) +{ + _virtio_dev_check(dev); + + dev->mmio_config->queue_notify = queue_index; +} + +void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) +{ + rt_size_t ring_size; + struct virtq *queue; + + _virtio_dev_check(dev); + + queue = &dev->queues[queue_index]; + ring_size = queue->num; + + /* Tell the device the first index in our chain of descriptors */ + queue->avail->ring[queue->avail->idx % ring_size] = desc_index; + rt_hw_dsb(); + + /* Tell the device another avail ring entry is available */ + queue->avail->idx++; + rt_hw_dsb(); +} + +rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index) +{ + int i; + struct virtq *queue; + + _virtio_dev_check(dev); + + RT_ASSERT(queue_index < dev->queues_num); + + queue = &dev->queues[queue_index]; + + if (queue->free_count > 0) + { + rt_size_t ring_size = queue->num; + + for (i = 0; i < ring_size; ++i) + { + if (queue->free[i]) + { + queue->free[i] = RT_FALSE; + queue->free_count--; + + return (rt_uint16_t)i; + } + } + } + + return VIRTQ_INVALID_DESC_ID; +} + +void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) +{ + struct virtq *queue; + + _virtio_dev_check(dev); + + queue = &dev->queues[queue_index]; + + RT_ASSERT(queue_index < dev->queues_num); + RT_ASSERT(!queue->free[desc_index]); + + queue->desc[desc_index].addr = 0; + queue->desc[desc_index].len = 0; + queue->desc[desc_index].flags = 0; + queue->desc[desc_index].next = 0; + + queue->free[desc_index] = RT_TRUE; + + queue->free_count++; +} + +rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count, + rt_uint16_t *indexs) +{ + int i, j; + + _virtio_dev_check(dev); + + RT_ASSERT(indexs != RT_NULL); + + if (dev->queues[queue_index].free_count < count) + { + return -RT_ERROR; + } + + for (i = 0; i < count; ++i) + { + indexs[i] = virtio_alloc_desc(dev, queue_index); + + if (indexs[i] == VIRTQ_INVALID_DESC_ID) + { + for (j = 0; j < i; ++j) + { + virtio_free_desc(dev, queue_index, indexs[j]); + } + + return -RT_ERROR; + } + } + + return RT_EOK; +} + +void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) +{ + rt_uint16_t flags, next; + struct virtq_desc *desc; + + _virtio_dev_check(dev); + + desc = &dev->queues[queue_index].desc[0]; + + for (;;) + { + flags = desc[desc_index].flags; + next = desc[desc_index].next; + + virtio_free_desc(dev, queue_index, desc_index); + + if (flags & VIRTQ_DESC_F_NEXT) + { + desc_index = next; + } + else + { + break; + } + } +} + +void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index, + rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next) +{ + struct virtq_desc *desc; + + _virtio_dev_check(dev); + + desc = &dev->queues[queue_index].desc[desc_index]; + + desc->addr = addr; + desc->len = len; + desc->flags = flags; + desc->next = next; +} + +#ifdef RT_USING_SMART +#ifdef RT_USING_VIRTIO_GPU + +#include +#include "drivers/lcd.h" +#include +#include + +static struct rt_device_graphic_info _graphic_info; +static struct rt_device_rect_info _rect_info; +static struct rt_device _fb = {}; +static rt_device_t _gpu_dev = RT_NULL; + +static rt_err_t fb_open(rt_device_t dev, rt_uint16_t oflag) +{ + return RT_EOK; +} + +static rt_err_t fb_close(rt_device_t dev) +{ + return RT_EOK; +} + +static rt_err_t fb_control(rt_device_t dev, int cmd, void *args) +{ + switch(cmd) + { + case FBIOPAN_DISPLAY: + { + rt_hw_cpu_dcache_clean(_graphic_info.framebuffer, _graphic_info.smem_len); + rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info); + break; + } + case FBIOGET_FSCREENINFO: + { + struct fb_fix_screeninfo *info = (struct fb_fix_screeninfo *)args; + strncpy(info->id, "lcd", sizeof(info->id)); + info->smem_len = _graphic_info.smem_len; + break; + } + case FBIOGET_VSCREENINFO: + { + struct fb_var_screeninfo *info = (struct fb_var_screeninfo *)args; + info->bits_per_pixel = _graphic_info.bits_per_pixel; + info->xres = _graphic_info.width; + info->yres = _graphic_info.height; + info->yres_virtual = _graphic_info.height; + info->xres_virtual = _graphic_info.width; + info->transp.offset = 24; + info->transp.length = 8; + info->red.offset = 0; + info->red.length = 8; + info->green.offset = 8; + info->green.length = 8; + info->blue.offset = 16; + info->blue.length = 8; + break; + } + case RT_FIOMMAP2: + { + struct dfs_mmap2_args *mmap2 = (struct dfs_mmap2_args *)args; + + if(mmap2) + { + mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, rt_kmem_v2p(_graphic_info.framebuffer), mmap2->length, 1); + } + else + { + return -EIO; + } + + break; + } + default: + break; + } + + return RT_EOK; +} + +#ifdef RT_USING_DEVICE_OPS +const static struct rt_device_ops fb_ops = +{ + RT_NULL, + fb_open, + fb_close, + RT_NULL, + RT_NULL, + fb_control +}; +#endif + +static int fb_init() +{ + _gpu_dev = rt_device_find("virtio-gpu0"); + + if(_gpu_dev == RT_NULL) + { + return -RT_ERROR; + } + + if(_gpu_dev != RT_NULL && rt_device_open(_gpu_dev, 0) == RT_EOK) + { + rt_memset(&_graphic_info, 0, sizeof(_graphic_info)); + rt_memset(&_rect_info, 0, sizeof(_rect_info)); + rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL); + rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888); + rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_GET_INFO, &_graphic_info); + _rect_info.x = 0; + _rect_info.y = 0; + _rect_info.width = _graphic_info.width; + _rect_info.height = _graphic_info.height; + memset(_graphic_info.framebuffer, 0xff, _graphic_info.smem_len); + rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info); + } + + if(rt_device_find("fb0") != RT_NULL) + { + rt_kprintf("a device named fb0 already exists\n"); + return -RT_ERROR; + } + + _fb.type = RT_Device_Class_Miscellaneous; + +#ifdef RT_USING_DEVICE_OPS + _fb.ops = &fb_ops; +#else + _fb.init = RT_NULL; + _fb.open = fb_open; + _fb.close = fb_close; + _fb.read = RT_NULL; + _fb.write = RT_NULL; + _fb.control = fb_control; + _fb.user_data = RT_NULL; +#endif + + rt_device_register(&_fb, "fb0", RT_DEVICE_FLAG_RDWR); + return RT_EOK; +} +INIT_COMPONENT_EXPORT(fb_init); +#endif +#endif diff --git a/components/drivers/virtio/virtio.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio.h similarity index 100% rename from components/drivers/virtio/virtio.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio.h diff --git a/components/drivers/virtio/virtio_blk.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio_blk.c similarity index 100% rename from components/drivers/virtio/virtio_blk.c rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_blk.c diff --git a/components/drivers/virtio/virtio_blk.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_blk.h similarity index 100% rename from components/drivers/virtio/virtio_blk.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_blk.h diff --git a/components/drivers/virtio/virtio_console.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio_console.c similarity index 100% rename from components/drivers/virtio/virtio_console.c rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_console.c diff --git a/components/drivers/virtio/virtio_console.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_console.h similarity index 100% rename from components/drivers/virtio/virtio_console.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_console.h diff --git a/components/drivers/virtio/virtio_gpu.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio_gpu.c similarity index 100% rename from components/drivers/virtio/virtio_gpu.c rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_gpu.c diff --git a/components/drivers/virtio/virtio_gpu.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_gpu.h similarity index 100% rename from components/drivers/virtio/virtio_gpu.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_gpu.h diff --git a/components/drivers/virtio/virtio_input.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio_input.c similarity index 100% rename from components/drivers/virtio/virtio_input.c rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_input.c diff --git a/components/drivers/virtio/virtio_input.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_input.h similarity index 100% rename from components/drivers/virtio/virtio_input.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_input.h diff --git a/components/drivers/virtio/virtio_input_event_codes.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_input_event_codes.h similarity index 100% rename from components/drivers/virtio/virtio_input_event_codes.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_input_event_codes.h diff --git a/components/drivers/virtio/virtio_mmio.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_mmio.h similarity index 100% rename from components/drivers/virtio/virtio_mmio.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_mmio.h diff --git a/components/drivers/virtio/virtio_net.c b/bsp/qemu-virt64-riscv/driver/virtio/virtio_net.c similarity index 100% rename from components/drivers/virtio/virtio_net.c rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_net.c diff --git a/components/drivers/virtio/virtio_net.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_net.h similarity index 100% rename from components/drivers/virtio/virtio_net.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_net.h diff --git a/components/drivers/virtio/virtio_queue.h b/bsp/qemu-virt64-riscv/driver/virtio/virtio_queue.h similarity index 100% rename from components/drivers/virtio/virtio_queue.h rename to bsp/qemu-virt64-riscv/driver/virtio/virtio_queue.h diff --git a/bsp/qemu-virt64-riscv/link_cpus.lds b/bsp/qemu-virt64-riscv/link_cpus.lds index 2659b2befb4..e4cd5b88712 100644 --- a/bsp/qemu-virt64-riscv/link_cpus.lds +++ b/bsp/qemu-virt64-riscv/link_cpus.lds @@ -1 +1 @@ -RT_CPUS_NR = 8; +RT_CPUS_NR = 1; diff --git a/bsp/qemu-virt64-riscv/rtconfig.h b/bsp/qemu-virt64-riscv/rtconfig.h index 430730ff68c..8105dca7fb1 100644 --- a/bsp/qemu-virt64-riscv/rtconfig.h +++ b/bsp/qemu-virt64-riscv/rtconfig.h @@ -120,7 +120,8 @@ #define RT_USING_CONSOLE #define RT_CONSOLEBUF_SIZE 256 #define RT_CONSOLE_DEVICE_NAME "uart0" -#define RT_VER_NUM 0x50201 +#define RT_USING_CONSOLE_OUTPUT_CTL +#define RT_VER_NUM 0x50300 #define RT_USING_STDC_ATOMIC #define RT_BACKTRACE_LEVEL_MAX_NR 32 /* end of RT-Thread Kernel */ @@ -193,24 +194,14 @@ #define RT_USING_SERIAL_V1 #define RT_SERIAL_USING_DMA #define RT_SERIAL_RB_BUFSZ 64 -#define RT_USING_CPUTIME -#define RT_USING_CPUTIME_RISCV -#define CPUTIME_TIMER_FREQ 10000000 +#define RT_USING_CLOCK_TIME +#define CLOCK_TIMER_FREQ 0 #define RT_USING_NULL #define RT_USING_ZERO #define RT_USING_RANDOM #define RT_USING_RTC #define RT_USING_SOFT_RTC -#define RT_USING_VIRTIO -#define RT_USING_VIRTIO10 -#define RT_USING_VIRTIO_BLK -#define RT_USING_VIRTIO_NET -#define RT_USING_VIRTIO_CONSOLE -#define RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR 4 -#define RT_USING_VIRTIO_GPU -#define RT_USING_VIRTIO_INPUT #define RT_USING_PIN -#define RT_USING_KTIME /* end of Device Drivers */ /* C/C++ and POSIX layer */ @@ -551,6 +542,13 @@ /* RISC-V QEMU virt64 configs */ #define BSP_USING_VIRTIO +#define RT_USING_VIRTIO10 +#define RT_USING_VIRTIO_BLK +#define RT_USING_VIRTIO_NET +#define RT_USING_VIRTIO_CONSOLE +#define RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR 4 +#define RT_USING_VIRTIO_GPU +#define RT_USING_VIRTIO_INPUT #define BSP_USING_VIRTIO_BLK #define BSP_USING_VIRTIO_NET #define BSP_USING_VIRTIO_CONSOLE diff --git a/bsp/xuantie/virt64/c906/.config b/bsp/xuantie/virt64/c906/.config index 72eee0c6676..434ab428f22 100644 --- a/bsp/xuantie/virt64/c906/.config +++ b/bsp/xuantie/virt64/c906/.config @@ -191,7 +191,8 @@ CONFIG_RT_USING_DEVICE_OPS=y CONFIG_RT_USING_CONSOLE=y CONFIG_RT_CONSOLEBUF_SIZE=256 CONFIG_RT_CONSOLE_DEVICE_NAME="uart0" -CONFIG_RT_VER_NUM=0x50201 +CONFIG_RT_USING_CONSOLE_OUTPUT_CTL=y +CONFIG_RT_VER_NUM=0x50300 CONFIG_RT_USING_STDC_ATOMIC=y CONFIG_RT_BACKTRACE_LEVEL_MAX_NR=32 # end of RT-Thread Kernel @@ -308,9 +309,8 @@ CONFIG_RT_SERIAL_USING_DMA=y CONFIG_RT_SERIAL_RB_BUFSZ=64 CONFIG_RT_USING_SERIAL_BYPASS=y # CONFIG_RT_USING_CAN is not set -CONFIG_RT_USING_CPUTIME=y -CONFIG_RT_USING_CPUTIME_RISCV=y -CONFIG_CPUTIME_TIMER_FREQ=10000000 +CONFIG_RT_USING_CLOCK_TIME=y +CONFIG_CLOCK_TIMER_FREQ=0 # CONFIG_RT_USING_I2C is not set # CONFIG_RT_USING_PHY is not set # CONFIG_RT_USING_PHY_V2 is not set @@ -338,17 +338,7 @@ CONFIG_RT_USING_SOFT_RTC=y # CONFIG_RT_USING_HWCRYPTO is not set # CONFIG_RT_USING_WIFI is not set # CONFIG_RT_USING_BLK is not set -CONFIG_RT_USING_VIRTIO=y -CONFIG_RT_USING_VIRTIO10=y -# CONFIG_RT_USING_VIRTIO_MMIO_ALIGN is not set -CONFIG_RT_USING_VIRTIO_BLK=y -# CONFIG_RT_USING_VIRTIO_NET is not set -# CONFIG_RT_USING_VIRTIO_CONSOLE is not set -# CONFIG_RT_USING_VIRTIO_GPU is not set -# CONFIG_RT_USING_VIRTIO_INPUT is not set CONFIG_RT_USING_PIN=y -CONFIG_RT_USING_KTIME=y -# CONFIG_RT_USING_HWTIMER is not set # CONFIG_RT_USING_CHERRYUSB is not set # end of Device Drivers @@ -447,11 +437,10 @@ CONFIG_RT_USING_ADT_REF=y # CONFIG_RT_USING_RT_LINK is not set # end of Utilities -# CONFIG_RT_USING_VBUS is not set - # # Memory management # +# CONFIG_RT_PAGE_MPR_SIZE_DYNAMIC is not set CONFIG_RT_PAGE_AFFINITY_BLOCK_SIZE=0x1000 CONFIG_RT_PAGE_MAX_ORDER=11 # CONFIG_RT_USING_MEMBLOCK is not set @@ -488,6 +477,7 @@ CONFIG_LWP_PTY_MAX_PARIS_LIMIT=64 # end of Using USB legacy version # CONFIG_RT_USING_FDT is not set +# CONFIG_RT_USING_RUST is not set # end of RT-Thread Components # @@ -1526,6 +1516,13 @@ CONFIG_LWP_PTY_MAX_PARIS_LIMIT=64 # RISC-V QEMU virt64 configs # CONFIG_BSP_USING_VIRTIO=y +CONFIG_RT_USING_VIRTIO10=y +# CONFIG_RT_USING_VIRTIO_MMIO_ALIGN is not set +CONFIG_RT_USING_VIRTIO_BLK=y +# CONFIG_RT_USING_VIRTIO_NET is not set +# CONFIG_RT_USING_VIRTIO_CONSOLE is not set +# CONFIG_RT_USING_VIRTIO_GPU is not set +# CONFIG_RT_USING_VIRTIO_INPUT is not set CONFIG_BSP_USING_VIRTIO_BLK=y # end of RISC-V QEMU virt64 configs diff --git a/bsp/xuantie/virt64/c906/board/Kconfig b/bsp/xuantie/virt64/c906/board/Kconfig index 6e529db46fb..84e51d8d6a8 100644 --- a/bsp/xuantie/virt64/c906/board/Kconfig +++ b/bsp/xuantie/virt64/c906/board/Kconfig @@ -1,15 +1,11 @@ menu "RISC-V QEMU virt64 configs" -config BSP_USING_VIRTIO - bool "Using VirtIO" - default y - depends on RT_USING_DEVICE_OPS +rsource "../../../../qemu-virt64-riscv/driver/virtio/Kconfig" config BSP_USING_VIRTIO_BLK bool "Using VirtIO BLK" - select RT_USING_VIRTIO + select BSP_USING_VIRTIO select RT_USING_VIRTIO_BLK default y - depends on BSP_USING_VIRTIO endmenu diff --git a/bsp/xuantie/virt64/c906/board/SConscript b/bsp/xuantie/virt64/c906/board/SConscript index caddaea921b..2aecdf2e6e1 100644 --- a/bsp/xuantie/virt64/c906/board/SConscript +++ b/bsp/xuantie/virt64/c906/board/SConscript @@ -12,4 +12,5 @@ src += ['drv_virtio.c'] path = [cwd] group = DefineGroup('Drivers', src, depend = [''], CPPPATH = path) +group = group + SConscript(os.path.join('../../../../../qemu-virt64-riscv/driver/virtio', 'SConscript')) Return('group') diff --git a/bsp/xuantie/virt64/c906/rtconfig.h b/bsp/xuantie/virt64/c906/rtconfig.h index 5fed78875ad..3f65e72f127 100644 --- a/bsp/xuantie/virt64/c906/rtconfig.h +++ b/bsp/xuantie/virt64/c906/rtconfig.h @@ -121,7 +121,8 @@ #define RT_USING_CONSOLE #define RT_CONSOLEBUF_SIZE 256 #define RT_CONSOLE_DEVICE_NAME "uart0" -#define RT_VER_NUM 0x50201 +#define RT_USING_CONSOLE_OUTPUT_CTL +#define RT_VER_NUM 0x50300 #define RT_USING_STDC_ATOMIC #define RT_BACKTRACE_LEVEL_MAX_NR 32 /* end of RT-Thread Kernel */ @@ -210,19 +211,14 @@ #define RT_SERIAL_USING_DMA #define RT_SERIAL_RB_BUFSZ 64 #define RT_USING_SERIAL_BYPASS -#define RT_USING_CPUTIME -#define RT_USING_CPUTIME_RISCV -#define CPUTIME_TIMER_FREQ 10000000 +#define RT_USING_CLOCK_TIME +#define CLOCK_TIMER_FREQ 0 #define RT_USING_NULL #define RT_USING_ZERO #define RT_USING_RANDOM #define RT_USING_RTC #define RT_USING_SOFT_RTC -#define RT_USING_VIRTIO -#define RT_USING_VIRTIO10 -#define RT_USING_VIRTIO_BLK #define RT_USING_PIN -#define RT_USING_KTIME /* end of Device Drivers */ /* C/C++ and POSIX layer */ @@ -520,6 +516,8 @@ /* RISC-V QEMU virt64 configs */ #define BSP_USING_VIRTIO +#define RT_USING_VIRTIO10 +#define RT_USING_VIRTIO_BLK #define BSP_USING_VIRTIO_BLK /* end of RISC-V QEMU virt64 configs */ #define BOARD_QEMU_VIRT_RV64 diff --git a/components/drivers/include/drivers/virtio.h b/components/drivers/include/drivers/virtio.h new file mode 100644 index 00000000000..e38aed57930 --- /dev/null +++ b/components/drivers/include/drivers/virtio.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-9-16 GuEe-GUI the first version + * 2021-11-11 GuEe-GUI modify to virtio common interface + * 2023-02-25 GuEe-GUI support DM + */ + +#ifndef __VIRTIO_H__ +#define __VIRTIO_H__ + +#include + +#include +#include +#include + +#include "../../virtio/virtio_ids.h" + +#define VIRTIO_STATUS_ACKNOWLEDGE RT_BIT(0) +#define VIRTIO_STATUS_DRIVER RT_BIT(1) +#define VIRTIO_STATUS_DRIVER_OK RT_BIT(2) +#define VIRTIO_STATUS_FEATURES_OK RT_BIT(3) +#define VIRTIO_STATUS_NEEDS_RESET RT_BIT(6) +#define VIRTIO_STATUS_FAILED RT_BIT(7) + +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 +#define VIRTIO_F_ANY_LAYOUT 27 +#define VIRTIO_F_RING_INDIRECT_DESC 28 +#define VIRTIO_F_RING_EVENT_IDX 29 +#define VIRTIO_F_VERSION_1 32 +#define VIRTIO_F_ACCESS_PLATFORM 33 +#define VIRTIO_F_RING_PACKED 34 +#define VIRTIO_F_IN_ORDER 35 +#define VIRTIO_F_ORDER_PLATFORM 36 +#define VIRTIO_F_SR_IOV 37 +#define VIRTIO_F_NOTIFICATION_DATA 38 +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 +#define VIRTIO_F_RING_RESET 40 + +#define VIRTIO_TRANSPORT_F_START VIRTIO_F_RING_INDIRECT_DESC +#define VIRTIO_TRANSPORT_F_END VIRTIO_F_RING_RESET + +struct rt_virtio_device_id +{ +#define VIRTIO_DEVICE_ANY_ID 0xffffffff + rt_uint32_t device; + rt_uint32_t vendor; +}; + +struct rt_virtqueue; +struct rt_virtio_transport; + +typedef rt_bool_t (*rt_virtqueue_notifier)(struct rt_virtqueue *vq); +typedef void (*rt_virtqueue_callback)(struct rt_virtqueue *vq); + +struct rt_virtio_device +{ + struct rt_device parent; + + int idx; + struct rt_virtio_device_id id; + + rt_uint64_t features; + const struct rt_virtio_transport *trans; + + rt_list_t vq_node; + struct rt_spinlock vq_lock; + + rt_bool_t dma_dispatch; + void *priv; +}; + +struct virtqueue_split; +struct virtqueue_packed; + +struct rt_virtqueue_formula +{ + void *page; +}; + +struct rt_virtqueue +{ + rt_list_t list; + rt_list_t user_list; + + const char *name; + rt_uint32_t index; + rt_uint32_t num_max; + rt_uint32_t num_free; + + rt_virtqueue_callback callback; + + union + { + struct virtqueue_split *vq_split; + struct virtqueue_packed *vq_packed; + }; + rt_bool_t packed_ring; + + rt_virtqueue_notifier notify; + + rt_bool_t event; + rt_bool_t event_triggered; + rt_uint32_t free_head; + rt_uint32_t num_added; + rt_uint16_t last_used_idx; + + struct rt_virtio_device *vdev; + struct rt_virtqueue_formula formula; +}; + +struct rt_virtio_shm_region +{ + rt_uint8_t id; + + rt_uint64_t base; + rt_uint64_t size; +}; + +#define RT_VIRTQUEUE_CTL_IRQ_AFFINITY 0 +#define RT_VIRTQUEUE_CTL_READ_SHM_REGION 1 + +struct rt_virtio_transport +{ + rt_err_t (*get_status)(struct rt_virtio_device *vdev, rt_uint8_t *out_status); + rt_err_t (*set_status)(struct rt_virtio_device *vdev, rt_uint8_t status); + rt_err_t (*get_features)(struct rt_virtio_device *vdev, rt_uint64_t *out_features); + rt_err_t (*set_features)(struct rt_virtio_device *vdev); + rt_err_t (*get_config)(struct rt_virtio_device *vdev, rt_uint32_t offset, void *dst, int length); + rt_err_t (*set_config)(struct rt_virtio_device *vdev, rt_uint32_t offset, const void *src, int length); + rt_err_t (*install_vqs)(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]); + rt_err_t (*release_vqs)(struct rt_virtio_device *vdev); + rt_err_t (*control_vqs)(struct rt_virtio_device *vdev, rt_uint32_t cfg, void *data); + rt_err_t (*generation)(struct rt_virtio_device *vdev, rt_uint32_t *out_counter); + rt_err_t (*reset)(struct rt_virtio_device *vdev); +}; + +struct rt_virtio_driver +{ + struct rt_driver parent; + + const struct rt_virtio_device_id *ids; + + rt_uint64_t features; + + rt_err_t (*probe)(struct rt_virtio_device *vdev); + rt_err_t (*remove)(struct rt_virtio_device *vdev); + void (*config_changed)(struct rt_virtio_device *vdev); +}; + +struct rt_virtqueue *rt_virtqueue_create(struct rt_virtio_device *vdev, + const char *name, int index, int num, rt_uint32_t align, + rt_virtqueue_notifier notify, rt_virtqueue_callback callback, + struct rt_virtqueue_formula *formula); + +rt_err_t rt_virtqueue_delete(struct rt_virtio_device *vdev, struct rt_virtqueue *vq); + +rt_err_t rt_virtqueue_add_outbuf(struct rt_virtqueue *vq, void *dma_buf, rt_size_t size); +rt_err_t rt_virtqueue_add_inbuf(struct rt_virtqueue *vq, void *dma_buf, rt_size_t size); + +rt_bool_t rt_virtqueue_prepare(struct rt_virtqueue *vq, rt_uint32_t nr); +void rt_virtqueue_wait_prepare(struct rt_virtqueue *vq, rt_uint32_t nr); +rt_uint32_t rt_virtqueue_next_buf_index(struct rt_virtqueue *vq); + +rt_bool_t rt_virtqueue_submit(struct rt_virtqueue *vq); +rt_bool_t rt_virtqueue_notify(struct rt_virtqueue *vq); +rt_bool_t rt_virtqueue_kick(struct rt_virtqueue *vq); + +void rt_virtqueue_isr(int irq, struct rt_virtqueue *vq); +rt_bool_t rt_virtqueue_poll(struct rt_virtqueue *vq, rt_uint32_t last_used_idx); + +void rt_virtqueue_disable_callback(struct rt_virtqueue *vq); +rt_bool_t rt_virtqueue_enable_callback(struct rt_virtqueue *vq, + rt_uint32_t *out_last_used_idx); + +void *rt_virtqueue_read_buf(struct rt_virtqueue *vq, rt_size_t *out_len); + +rt_size_t rt_virtqueue_get_virtq_size(struct rt_virtqueue *vq); +rt_ubase_t rt_virtqueue_get_desc_addr(struct rt_virtqueue *vq); +rt_ubase_t rt_virtqueue_get_avail_addr(struct rt_virtqueue *vq); +rt_ubase_t rt_virtqueue_get_used_addr(struct rt_virtqueue *vq); + +void rt_virtio_device_ready(struct rt_virtio_device *vdev); +void rt_virtio_device_reset(struct rt_virtio_device *vdev); +void rt_virtio_device_config_changed(struct rt_virtio_device *vdev); + +void rt_virtio_add_status(struct rt_virtio_device *vdev, rt_uint32_t status); + +rt_bool_t rt_virtio_has_status(struct rt_virtio_device *vdev, rt_uint8_t status_bit); +rt_bool_t rt_virtio_has_feature(struct rt_virtio_device *vdev, rt_uint32_t feature_bit); + +rt_err_t rt_virtio_get_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, void *dst, int length); +rt_err_t rt_virtio_set_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, const void *src, int length); + +rt_inline void rt_virtio_read_bytes(struct rt_virtio_device *vdev, + rt_uint32_t offset, void *buf, rt_uint32_t bytes_size, rt_uint32_t count) +{ + rt_uint32_t old, gen; + + if (vdev->trans->generation) + { + vdev->trans->generation(vdev, &gen); + } + else + { + gen = 0; + } + + do { + old = gen; + + for (int i = 0; i < count; ++i) + { + vdev->trans->get_config(vdev, + offset + bytes_size * i, buf + i * bytes_size, bytes_size); + } + + if (vdev->trans->generation) + { + vdev->trans->generation(vdev, &gen); + } + else + { + gen = 0; + } + } while (gen != old); +} + +#define rt_virtio_le_to_cpu(value) \ + (sizeof(value) == 4 ? rt_le32_to_cpu(value) : \ + (sizeof(value) == 2 ? rt_le16_to_cpu(value) : \ + (sizeof(value) == 1 ? value : rt_le64_to_cpu(value)))) + +#define rt_virtio_cpu_to_le(value) \ + (sizeof(value) == 4 ? rt_cpu_to_le32(value) : \ + (sizeof(value) == 2 ? rt_cpu_to_le16(value) : \ + (sizeof(value) == 1 ? value : rt_cpu_to_le64(value)))) + +#define rt_virtio_read_config(vdev, type, member, out_value) \ +do { \ + typeof(((type *)0)->member) tmp; \ + rt_uint32_t offset = (rt_ubase_t)&((type *)0)->member; \ + switch (sizeof(tmp)) \ + { \ + case 1: case 2: case 4: \ + rt_virtio_get_config(vdev, offset, &tmp, sizeof(tmp)); \ + break; \ + default: \ + rt_virtio_read_bytes(vdev, offset, &tmp, sizeof(tmp), 1); \ + break; \ + } \ + *(out_value) = rt_virtio_le_to_cpu(tmp); \ +} while (0) + +#define rt_virtio_write_config(vdev, type, member, value) \ +do { \ + typeof(((type *)0)->member) tmp = rt_virtio_cpu_to_le(value); \ + rt_uint32_t offset = (rt_ubase_t)&((type *)0)->member; \ + rt_virtio_set_config(vdev, offset, &tmp, sizeof(tmp)); \ +} while (0) + +rt_err_t rt_virtio_virtqueue_install(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]); +rt_err_t rt_virtio_virtqueue_release(struct rt_virtio_device *vdev); +rt_err_t rt_virtio_virtqueue_control(struct rt_virtio_device *vdev, rt_uint32_t cfg, void *data); + +const char *rt_virtio_device_id_name(struct rt_virtio_device *vdev); + +rt_err_t rt_virtio_driver_register(struct rt_virtio_driver *vdrv); +rt_err_t rt_virtio_device_register(struct rt_virtio_device *vdev); + +#define RT_VIRTIO_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, virtio, BUILIN) + +#endif /* __VIRTIO_H__ */ diff --git a/components/drivers/include/drivers/virtq.h b/components/drivers/include/drivers/virtq.h new file mode 100644 index 00000000000..fa18ee82e38 --- /dev/null +++ b/components/drivers/include/drivers/virtq.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI support packed + */ + +#ifndef __VIRTQ_H__ +#define __VIRTQ_H__ + +#include +#include + +#define VIRTQ_DESC_F_NEXT RT_BIT(0) /* This marks a buffer as continuing via the next field. */ +#define VIRTQ_DESC_F_WRITE RT_BIT(1) /* This marks a buffer as write-only (otherwise read-only). */ +#define VIRTQ_DESC_F_INDIRECT RT_BIT(3) /* This means the buffer contains a list of buffer descriptors. */ +#define VIRTQ_DESC_F_AVAIL RT_BIT(7) +#define VIRTQ_DESC_F_USED RT_BIT(15) + +/* + * The device uses this in used->flags to advise the driver: don't kick me + * when you add a buffer. It's unreliable, so it's simply an optimization. + */ +#define VIRTQ_USED_F_NO_NOTIFY 1 + +/* + * The driver uses this in avail->flags to advise the device: don't + * interrupt me when you consume a buffer. It's unreliable, so it's + * simply an optimization. + */ +#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 + +/* Virtqueue descriptors: 16 bytes. These can chain together via "next". */ +struct virtq_desc +{ + rt_uint64_t addr; /* Address (guest-physical). */ + rt_uint32_t len; /* Length. */ + rt_uint16_t flags; /* The flags as indicated above. */ + rt_uint16_t next; /* We chain unused descriptors via this, too */ +}; + +struct virtq_avail +{ + rt_uint16_t flags; /* Notifications */ + rt_uint16_t idx; /* Where the driver would put the next descriptor entry in the ring (modulo the queue size) */ + rt_uint16_t ring[]; + + /* + * Only if VIRTIO_F_RING_EVENT_IDX + * rt_uint16_t used_event; + */ +}; + +struct virtq_used_elem +{ + rt_uint32_t id; /* Index of start of used descriptor chain. */ + rt_uint32_t len; /* Total length of the descriptor chain which was written to. */ +}; + +struct virtq_used +{ + rt_uint16_t flags; + rt_uint16_t idx; + struct virtq_used_elem ring[]; + + /* + * Only if VIRTIO_F_RING_EVENT_IDX + * rt_uint16_t avail_event; + */ +}; + +/* Enable events in packed ring. */ +#define VIRTQ_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ +#define VIRTQ_PACKED_EVENT_FLAG_DISABLE 0x1 +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated. + */ +#define VIRTQ_PACKED_EVENT_FLAG_DESC 0x2 + +/* Offset bit shift in event suppression structure of packed ring. */ +#define VIRTQ_PACKED_EVENT_OFF 0 +/* Wrap counter bit shift in event suppression structure of packed ring. */ +#define VIRTQ_PACKED_EVENT_WRAP_CTR 15 + +struct virtq_packed_desc +{ + rt_le64_t addr; /* Buffer Address. */ + rt_le32_t len; /* Buffer Length. */ + rt_le16_t id; /* Buffer ID (always at the last desc). */ + rt_le16_t flags; /* The flags depending on descriptor type. */ + /* + * For avail desc: + * When wrap counter is 1 => VIRTQ_DESC_F_AVAIL | ~VIRTQ_DESC_F_USED + * When wrap counter is 0 => ~VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED + * + * For used desc: + * When wrap counter is 1 => VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED + * When wrap counter is 0 => ~VIRTQ_DESC_F_AVAIL | ~VIRTQ_DESC_F_USED + */ +}; + +struct virtq_packed_desc_event +{ + rt_le16_t off_wrap; /* Descriptor ring change event offset/wrap counter. */ + rt_le16_t flags; /* Descriptor ring change event flags. */ +}; + +struct virtq +{ + rt_uint32_t num; + rt_uint32_t align; + + union + { + struct virtq_desc *desc; + struct virtq_packed_desc *desc_packed; + }; + union + { + struct virtq_avail *avail; + struct virtq_packed_desc_event *driver_event; + }; + union + { + struct virtq_used *used; + struct virtq_packed_desc_event *device_event; + }; +}; + +rt_inline void virtq_init(struct virtq *virtq, rt_uint32_t num, + void *pages, rt_ubase_t align) +{ + /* Only split */ + virtq->num = num; + virtq->align = align; + + virtq->desc = (struct virtq_desc *)pages; + virtq->avail = (struct virtq_avail *)&virtq->desc[num]; + /* Keep sizeof(used_event) */ + virtq->used = (struct virtq_used *)RT_ALIGN((rt_ubase_t)&virtq->avail->ring[num] + sizeof(rt_uint16_t), align); +} + +rt_inline rt_size_t virtq_size(struct virtq *virtq, rt_uint32_t try_num, rt_uint32_t try_align) +{ + /* Only split */ + rt_uint32_t align; + rt_size_t size, num; + + if (virtq) + { + num = virtq->num; + align = virtq->align; + } + else + { + num = try_num; + align = try_align; + } + + size = sizeof(struct virtq_desc) * num; + size += sizeof(struct virtq_avail) + sizeof(rt_uint16_t) * num; + /* Keep sizeof(used_event) */ + size += sizeof(rt_uint16_t); + size = RT_ALIGN(size, align); + size += sizeof(struct virtq_used) + sizeof(struct virtq_used_elem) * num; + /* Keep sizeof(avail_event) */ + size += sizeof(rt_uint16_t); + + return size; +} + +rt_inline rt_bool_t virtq_need_event(rt_uint16_t event_idx, + rt_uint16_t new_idx, rt_uint16_t old_idx) +{ + return (rt_uint16_t)(new_idx - event_idx - 1) < (rt_uint16_t)(new_idx - old_idx); +} + +/* Get location of event indices (only with VIRTIO_F_EVENT_IDX) */ +rt_inline rt_uint16_t *virtq_used_event(struct virtq *virtq) +{ + /* For backwards compat, used event index is at *end* of avail ring. */ + return &virtq->avail->ring[virtq->num]; +} + +rt_inline rt_uint16_t *virtq_avail_event(struct virtq *virtq) +{ + /* For backwards compat, avail event index is at *end* of used ring. */ + return (rt_uint16_t *)&virtq->used->ring[virtq->num]; +} + +#endif /* __VIRTQ_H__ */ diff --git a/components/drivers/include/rtdevice.h b/components/drivers/include/rtdevice.h index c3a1277449c..4a42a7e62a6 100644 --- a/components/drivers/include/rtdevice.h +++ b/components/drivers/include/rtdevice.h @@ -155,6 +155,11 @@ extern "C" { #ifdef RT_USING_NVMEM #include "drivers/nvmem.h" #endif /* RT_USING_NVMEM */ + +#ifdef RT_USING_VIRTIO +#include "drivers/virtio.h" +#include "drivers/virtq.h" +#endif #endif /* RT_USING_DM */ #ifdef RT_USING_RTC diff --git a/components/drivers/virtio/Kconfig b/components/drivers/virtio/Kconfig index 8298ff75beb..5d974c886d3 100644 --- a/components/drivers/virtio/Kconfig +++ b/components/drivers/virtio/Kconfig @@ -1,43 +1,99 @@ menuconfig RT_USING_VIRTIO bool "Using VirtIO device drivers" + depends on RT_USING_DM + depends on RT_USING_DMA default n - if RT_USING_VIRTIO - choice RT_USING_VIRTIO_VERSION - prompt "VirtIO Version" - default RT_USING_VIRTIO10 - - config RT_USING_VIRTIO10 - bool "VirtIO v1.0" - endchoice - - config RT_USING_VIRTIO_MMIO_ALIGN - bool "Using VirtIO MMIO alignment" - default y - - config RT_USING_VIRTIO_BLK - bool "Using VirtIO BLK" - default y - - config RT_USING_VIRTIO_NET - bool "Using VirtIO NET" - default y - - menuconfig RT_USING_VIRTIO_CONSOLE - bool "Using VirtIO Console" - default y - - if RT_USING_VIRTIO_CONSOLE - config RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR - int "Max number of port in VirtIO Console" - default 4 - endif - - config RT_USING_VIRTIO_GPU - bool "Using VirtIO GPU" - default y - - config RT_USING_VIRTIO_INPUT - bool "Using VirtIO Input" - default y - endif +config RT_VIRTIO_TRANSPORT_MMIO + bool "Using VirtIO MMIO transport" + depends on RT_USING_VIRTIO + depends on RT_USING_OFW + default y + +config RT_VIRTIO_TRANSPORT_PCI + bool "Using VirtIO PCI transport" + depends on RT_USING_VIRTIO + depends on RT_USING_PCI + default y + help + We not support the version of 0.95 in PCI. The minimum version >= 1.0 + +config RT_VIRTIO_NET + bool "VirtIO Net" + depends on RT_USING_VIRTIO + depends on RT_USING_ETHERNET + default y + +config RT_VIRTIO_BLK + bool "VirtIO BLK" + depends on RT_USING_VIRTIO + depends on RT_USING_BLK + default y + +config RT_VIRTIO_CONSOLE + bool "VirtIO Console" + depends on RT_USING_VIRTIO + depends on RT_USING_SERIAL + select RT_USING_DEVICE_IPC + select RT_USING_SYSTEM_WORKQUEUE + default y + +config RT_VIRTIO_RNG + bool "VirtIO RNG" + depends on RT_USING_VIRTIO + depends on RT_USING_HWCRYPTO + depends on RT_HWCRYPTO_USING_RNG + default y + +config RT_VIRTIO_SCSI + bool "VirtIO SCSI" + depends on RT_USING_VIRTIO + depends on RT_SCSI_SD || RT_SCSI_CDROM + default y + +config RT_VIRTIO_9P + bool "VirtIO 9P" + depends on RT_USING_VIRTIO + select RT_USING_DFS_9PFS + default y + +config RT_VIRTIO_RPROC_SERIAL + bool "VirtIO Remoteproc Serial Link" + depends on RT_USING_VIRTIO + select RT_VIRTIO_CONSOLE + default y + +config RT_VIRTIO_GPU + bool "VirtIO GPU" + depends on RT_USING_VIRTIO + default y + +config RT_VIRTIO_INPUT + bool "VirtIO Input" + depends on RT_USING_VIRTIO + depends on RT_USING_INPUT + depends on RT_INPUT_TOUCHSCREEN + default y + +config RT_VIRTIO_CRYPTO + bool "VirtIO Crypto" + depends on RT_USING_VIRTIO + depends on RT_USING_HWCRYPTO + depends on RT_HWCRYPTO_USING_AES + depends on RT_HWCRYPTO_USING_AES_ECB + depends on RT_HWCRYPTO_USING_AES_CBC + depends on RT_HWCRYPTO_USING_AES_CTR + depends on RT_HWCRYPTO_USING_DES + depends on RT_HWCRYPTO_USING_DES_ECB + depends on RT_HWCRYPTO_USING_DES_CBC + depends on RT_HWCRYPTO_USING_3DES + depends on RT_HWCRYPTO_USING_3DES_ECB + depends on RT_HWCRYPTO_USING_3DES_CBC + depends on RT_HWCRYPTO_USING_RC4 + default y + +config RT_VIRTIO_SCMI + bool "VirtIO SCMI" + depends on RT_USING_VIRTIO + depends on RT_FIRMWARE_ARM_SCMI + default y diff --git a/components/drivers/virtio/SConscript b/components/drivers/virtio/SConscript index d38ec0e3561..a5d3817f0db 100644 --- a/components/drivers/virtio/SConscript +++ b/components/drivers/virtio/SConscript @@ -1,11 +1,51 @@ -# RT-Thread building script for component - from building import * -cwd = GetCurrentDir() -src = Glob('*.c') -CPPPATH = [cwd] +group = [] + +if not GetDepend(['RT_USING_VIRTIO']): + Return('group') + +cwd = GetCurrentDir() +CPPPATH = [cwd + '/../include'] + +src = ['virtio.c', 'virtio_queue.c'] + +if GetDepend(['RT_VIRTIO_TRANSPORT_MMIO']): + src += ['virtio_mmio.c'] + +if GetDepend(['RT_VIRTIO_TRANSPORT_PCI']): + src += ['virtio_pci.c'] + +if GetDepend(['RT_VIRTIO_NET']): + src += ['virtio-net.c'] + +if GetDepend(['RT_VIRTIO_BLK']): + src += ['virtio-blk.c'] + +if GetDepend(['RT_VIRTIO_CONSOLE']) or GetDepend(['RT_VIRTIO_RPROC_SERIAL']): + src += ['virtio-console.c'] + +if GetDepend(['RT_VIRTIO_RNG']): + src += ['virtio-rng.c'] + +if GetDepend(['RT_VIRTIO_SCSI']): + src += ['virtio-scsi.c'] + +if GetDepend(['RT_VIRTIO_9P']): + src += ['virtio-9p.c'] + +if GetDepend(['RT_VIRTIO_GPU']): + src += ['virtio-gpu.c'] + +if GetDepend(['RT_VIRTIO_INPUT']): + src += ['virtio-input.c'] + +if GetDepend(['RT_VIRTIO_CRYPTO']): + src += ['virtio-crypto.c'] + +if GetDepend(['RT_VIRTIO_SCMI']): + src += ['virtio-scmi.c'] -group = DefineGroup('DeviceDrivers', src, depend = ['RT_USING_VIRTIO'], CPPPATH = CPPPATH) +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) Return('group') diff --git a/components/drivers/virtio/virtio-9p.c b/components/drivers/virtio/virtio-9p.c new file mode 100644 index 00000000000..67ef8ca3e8f --- /dev/null +++ b/components/drivers/virtio/virtio-9p.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.9p" +#define DBG_LVL DBG_INFO +#include + +#include +#include "virtio_config/virtio-9p.h" + +struct virtio_9p +{ + struct p9_protocol parent; + struct rt_virtio_device *vdev; + + char *tag; + struct rt_virtqueue *vqs[1]; + struct rt_completion done; + struct rt_spinlock lock; +}; + +static rt_err_t virtio_9p_transport(struct p9_protocol *p9p, + rt_uint8_t *tx_data, rt_uint32_t tx_size, + rt_uint8_t *rx_data, rt_uint32_t *ref_rx_size) +{ + rt_err_t err; + rt_ubase_t level; + struct rt_virtqueue *vq; + struct virtio_9p *v9p = rt_container_of(p9p, struct virtio_9p, parent); + + vq = v9p->vqs[0]; + rt_virtqueue_wait_prepare(vq, 2); + + level = rt_spin_lock_irqsave(&v9p->lock); + + rt_virtqueue_add_outbuf(vq, tx_data, tx_size); + rt_virtqueue_add_inbuf(vq, rx_data, *ref_rx_size); + + rt_virtqueue_kick(vq); + + rt_spin_unlock_irqrestore(&v9p->lock, level); + + if (!(err = rt_completion_wait(&v9p->done, rt_tick_from_millisecond(1000)))) + { + *ref_rx_size = rt_min_t(rt_uint32_t, + *ref_rx_size, rt_le32_to_cpu(*(rt_uint32_t*)(&rx_data[0]))); + } + + return err; +} + +static void virtio_9p_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_9p *v9p = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&v9p->lock); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&v9p->done); + + rt_spin_unlock_irqrestore(&v9p->lock, level); +} + +static rt_err_t virtio_9p_vq_init(struct virtio_9p *v9p) +{ + const char *names[] = + { + "req", + }; + rt_virtqueue_callback cbs[] = + { + &virtio_9p_done, + }; + + return rt_virtio_virtqueue_install(v9p->vdev, RT_ARRAY_SIZE(v9p->vqs), + v9p->vqs, names, cbs); +} + +static void virtio_9p_vq_finit(struct virtio_9p *v9p) +{ + if (v9p->vqs[0]) + { + rt_virtio_virtqueue_release(v9p->vdev); + } + if (v9p->tag) + { + rt_free(v9p->tag); + } + + rt_free(v9p); +} + +static rt_err_t virtio_9p_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + rt_uint16_t tag_len = 0; + struct virtio_9p *v9p = rt_calloc(1, sizeof(*v9p)); + + if (!v9p) + { + return -RT_ENOMEM; + } + + vdev->priv = v9p; + v9p->vdev = vdev; + vdev->parent.user_data = v9p; + + if ((err = virtio_9p_vq_init(v9p))) + { + goto _fail; + } + + if (rt_virtio_has_feature(vdev, VIRTIO_9P_F_MOUNT_TAG)) + { + rt_virtio_read_config(vdev, struct virtio_9p_config, tag_len, &tag_len); + } + + v9p->tag = rt_malloc(tag_len + 1); + + if (!v9p->tag) + { + err = -RT_EINVAL; + goto _fail; + } + + rt_virtio_read_bytes(vdev, rt_offsetof(struct virtio_9p_config, tag), + v9p->tag, 1, tag_len); + v9p->tag[tag_len] = '\0'; + + rt_completion_init(&v9p->done); + rt_spin_lock_init(&v9p->lock); + + v9p->parent.name = "virtio"; + v9p->parent.tag = v9p->tag; + v9p->parent.transport = &virtio_9p_transport; + if ((err = dfs_9pfs_add_tag(&v9p->parent))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + virtio_9p_vq_finit(v9p); + + return err; +} + +static rt_err_t virtio_9p_remove(struct rt_virtio_device *vdev) +{ + struct virtio_9p *v9p = vdev->parent.user_data; + + dfs_9pfs_del_tag(&v9p->parent); + + virtio_9p_vq_finit(v9p); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_9p_ids[] = +{ + { VIRTIO_DEVICE_ID_9P, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_9p_driver = +{ + .ids = virtio_9p_ids, + .features = + RT_BIT(VIRTIO_9P_F_MOUNT_TAG) + | RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_9p_probe, + .remove = virtio_9p_remove, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_9p_driver); diff --git a/components/drivers/virtio/virtio-blk.c b/components/drivers/virtio/virtio-blk.c new file mode 100644 index 00000000000..32267ad0e03 --- /dev/null +++ b/components/drivers/virtio/virtio-blk.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-9-16 GuEe-GUI the first version + * 2021-11-11 GuEe-GUI using virtio common interface + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.blk" +#define DBG_LVL DBG_INFO +#include + +#include "../block/blk_dev.h" +#include "virtio_internal.h" +#include "virtio_config/virtio-blk.h" + +struct virtio_blk +{ + struct rt_blk_disk parent; + struct rt_virtio_device *vdev; + + int vblk_id; + rt_le32_t blk_size; + + int vqs_nr; + struct rt_virtqueue *vqs[RT_CPUS_NR]; + struct rt_spinlock *lock; + struct rt_completion *done; +}; + +#define raw_to_virtio_blk(raw) rt_container_of(raw, struct virtio_blk, parent) + +static struct rt_dm_ida virtio_blk_ida = RT_DM_IDA_INIT(CUSTOM); +static struct rt_dm_ida vd_ida = RT_DM_IDA_INIT(VIRTUAL_BLOCK); + +static rt_err_t virtio_blk_request(struct virtio_blk *vblk, + rt_off_t sector, void *buffer, rt_size_t sector_count, int type) +{ + rt_base_t level; + rt_uint8_t status; + struct rt_virtqueue *vq; + struct virtio_blk_req req; + struct rt_virtio_device *vdev = vblk->vdev; + + vq = vblk->vqs[rt_hw_cpu_id() % vblk->vqs_nr]; + if (type == VIRTIO_BLK_T_OUT || type == VIRTIO_BLK_T_IN) + { + rt_virtqueue_wait_prepare(vq, 3); + } + else + { + rt_virtqueue_wait_prepare(vq, 2); + } + + level = rt_spin_lock_irqsave(&vblk->lock[vq->index]); + + req.type = cpu_to_virtio32(vdev, type); + req.ioprio = cpu_to_virtio32(vdev, blk_request_ioprio()); + req.sector = cpu_to_virtio64(vdev, sector * (vblk->blk_size / 512)); + rt_virtqueue_add_outbuf(vq, &req, sizeof(req)); + + if (type == VIRTIO_BLK_T_OUT) + { + rt_virtqueue_add_outbuf(vq, buffer, sector_count * vblk->blk_size); + } + else if (type == VIRTIO_BLK_T_IN) + { + rt_virtqueue_add_inbuf(vq, buffer, sector_count * vblk->blk_size); + } + + status = RT_UINT8_MAX; + rt_virtqueue_add_inbuf(vq, &status, sizeof(status)); + + rt_virtqueue_kick(vq); + + rt_spin_unlock_irqrestore(&vblk->lock[vq->index], level); + + rt_completion_wait(&vblk->done[vq->index], RT_WAITING_FOREVER); + + switch (status) + { + case VIRTIO_BLK_S_OK: + return RT_EOK; + + case VIRTIO_BLK_S_UNSUPP: + return -RT_ENOSYS; + + case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE: + return 1; + + case VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE: + return 2; + + case VIRTIO_BLK_S_IOERR: + case VIRTIO_BLK_S_ZONE_UNALIGNED_WP: + default: + return -RT_EIO; + } +} + +static rt_ssize_t virtio_blk_read(struct rt_blk_disk *disk, rt_off_t sector, + void *buffer, rt_size_t sector_count) +{ + rt_ssize_t res; + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + res = virtio_blk_request(vblk, sector, buffer, sector_count, VIRTIO_BLK_T_IN); + + return res >= 0 ? sector_count : res; +} + +static rt_ssize_t virtio_blk_write(struct rt_blk_disk *disk, rt_off_t sector, + const void *buffer, rt_size_t sector_count) +{ + rt_ssize_t res; + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + res = virtio_blk_request(vblk, sector, (void *)buffer, sector_count, VIRTIO_BLK_T_OUT); + + return res >= 0 ? sector_count : res; +} + +static rt_err_t virtio_blk_getgeome(struct rt_blk_disk *disk, + struct rt_device_blk_geometry *geometry) +{ + rt_le64_t capacity; + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + rt_virtio_read_config(vblk->vdev, struct virtio_blk_config, capacity, &capacity); + + geometry->bytes_per_sector = 512; + geometry->block_size = vblk->blk_size; + geometry->sector_count = rt_le64_to_cpu(capacity); + + return RT_EOK; +} + +static rt_err_t virtio_blk_sync(struct rt_blk_disk *disk) +{ + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + return virtio_blk_request(vblk, 0, RT_NULL, 0, VIRTIO_BLK_T_FLUSH); +} + +static rt_err_t virtio_blk_erase(struct rt_blk_disk *disk) +{ + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + return virtio_blk_request(vblk, 0, RT_NULL, 0, VIRTIO_BLK_T_SECURE_ERASE); +} + +static rt_err_t virtio_blk_autorefresh(struct rt_blk_disk *disk, rt_bool_t is_auto) +{ + rt_uint8_t writeback = !is_auto; + struct virtio_blk *vblk = raw_to_virtio_blk(disk); + + /* + * 0: write through + * 1: write back + */ + rt_virtio_write_config(vblk->vdev, struct virtio_blk_config, writeback, writeback); + + return RT_EOK; +} + +static const struct rt_blk_disk_ops virtio_blk_ops = +{ + .read = virtio_blk_read, + .write = virtio_blk_write, + .getgeome = virtio_blk_getgeome, + .sync = virtio_blk_sync, + .erase = virtio_blk_erase, + .autorefresh = virtio_blk_autorefresh, +}; + +static void virtio_blk_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_blk *vblk = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vblk->lock[vq->index]); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&vblk->done[vq->index]); + + rt_spin_unlock_irqrestore(&vblk->lock[vq->index], level); +} + +static rt_err_t virtio_blk_vq_init(struct virtio_blk *vblk) +{ + rt_uint16_t vqs_nr = 1; + const char *names[RT_ARRAY_SIZE(vblk->vqs)]; + rt_virtqueue_callback cbs[RT_ARRAY_SIZE(vblk->vqs)]; + struct rt_virtio_device *vdev = vblk->vdev; + + if (rt_virtio_has_feature(vdev, VIRTIO_BLK_F_MQ)) + { + rt_virtio_read_config(vdev, struct virtio_blk_config, num_queues, &vqs_nr); + } + + if (!(vblk->lock = rt_malloc(sizeof(vblk->lock[0]) * vqs_nr))) + { + return -RT_ENOMEM; + } + + if (!(vblk->done = rt_malloc(sizeof(vblk->done[0]) * vqs_nr))) + { + return -RT_ENOMEM; + } + + for (int i = 0; i < vqs_nr; ++i) + { + names[i] = "req"; + cbs[i] = &virtio_blk_done; + + rt_spin_lock_init(&vblk->lock[i]); + rt_completion_init(&vblk->done[i]); + } + + vblk->vqs_nr = vqs_nr; + return rt_virtio_virtqueue_install(vdev, vqs_nr, vblk->vqs, names, cbs); +} + +static void virtio_blk_vq_finit(struct virtio_blk *vblk) +{ + if (vblk->vqs[0]) + { + rt_virtio_virtqueue_release(vblk->vdev); + } + if (vblk->lock) + { + rt_free(vblk->lock); + } + if (vblk->done) + { + rt_free(vblk->done); + } +} + +static rt_err_t virtio_blk_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_blk *vblk = rt_calloc(1, sizeof(*vblk)); + + if (!vblk) + { + return -RT_ENOMEM; + } + + if ((vblk->vblk_id = rt_dm_ida_alloc(&virtio_blk_ida)) < 0) + { + return -RT_EFULL; + } + + vdev->priv = vblk; + vblk->vdev = vdev; + vdev->parent.user_data = vblk; + vblk->parent.ida = &vd_ida; + vblk->parent.parallel_io = RT_TRUE; + vblk->parent.ops = &virtio_blk_ops; + vblk->parent.max_partitions = RT_BLK_PARTITION_MAX; + + if ((err = virtio_blk_vq_init(vblk))) + { + goto _fail; + } + + rt_virtio_read_config(vdev, struct virtio_blk_config, blk_size, &vblk->blk_size); + + rt_dm_dev_set_name(&vblk->parent.parent, "vd%c%c", letter_name(vblk->vblk_id)); + + if ((err = rt_hw_blk_disk_register(&vblk->parent))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + rt_dm_ida_free(&virtio_blk_ida, vblk->vblk_id); + virtio_blk_vq_finit(vblk); + rt_free(vblk); + + return err; +} + +static rt_err_t virtio_blk_remove(struct rt_virtio_device *vdev) +{ + struct virtio_blk *vblk = vdev->parent.user_data; + + rt_hw_blk_disk_unregister(&vblk->parent); + rt_dm_ida_free(&virtio_blk_ida, vblk->vblk_id); + + virtio_blk_vq_finit(vblk); + rt_free(vblk); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_blk_ids[] = +{ + { VIRTIO_DEVICE_ID_BLOCK, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_blk_driver = +{ + .ids = virtio_blk_ids, + .features = + RT_BIT(VIRTIO_BLK_F_SIZE_MAX) + | RT_BIT(VIRTIO_BLK_F_GEOMETRY) + | RT_BIT(VIRTIO_BLK_F_BLK_SIZE) + | RT_BIT(VIRTIO_BLK_F_FLUSH) + | RT_BIT(VIRTIO_BLK_F_TOPOLOGY) + | RT_BIT(VIRTIO_BLK_F_CONFIG_WCE) + | RT_BIT(VIRTIO_BLK_F_MQ) + | RT_BIT(VIRTIO_BLK_F_DISCARD) + | RT_BIT(VIRTIO_BLK_F_WRITE_ZEROES) + | RT_BIT(VIRTIO_BLK_F_SECURE_ERASE), + + .probe = virtio_blk_probe, + .remove = virtio_blk_remove, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_blk_driver); diff --git a/components/drivers/virtio/virtio-console.c b/components/drivers/virtio/virtio-console.c new file mode 100644 index 00000000000..b49391409ed --- /dev/null +++ b/components/drivers/virtio/virtio-console.c @@ -0,0 +1,732 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.console" +#define DBG_LVL DBG_INFO +#include + +#include + +#include "virtio_internal.h" +#include "virtio_config/virtio-console.h" + +#define QUEUE_PORT_RX 0 +#define QUEUE_PORT_TX 1 +#define QUEUE_CTRL_RX 2 +#define QUEUE_CTRL_TX 3 + +struct virtio_console; + +struct console_port +{ + struct rt_serial_device parent; + char name[64]; + + char *stream; + char *rx_stream; + char *tx_stream; + rt_bool_t irq_enabled; + rt_bool_t host_connected; + struct rt_spinlock lock; + struct virtio_console *vconsole; + + struct rt_virtqueue *rx_vq; + struct rt_virtqueue *tx_vq; +}; + +#define raw_to_console_port(raw) rt_container_of(raw, struct console_port, parent) + +struct virtio_console +{ + struct rt_virtio_device *vdev; + + rt_uint32_t uid; + rt_size_t stream_rx_size; + rt_size_t stream_tx_size; + + struct rt_spinlock lock; + struct rt_virtqueue *ctrl_rx_vq; + struct rt_virtqueue *ctrl_tx_vq; + + rt_size_t ctrl_packet_size; + struct virtio_console_control_ext *ctrl_packet; + + struct rt_work ctrl_work; + struct rt_work config_work; + + rt_size_t max_port_nr; + struct rt_virtqueue **vqs; + struct console_port *ports; +}; + +static struct rt_dm_ida virtio_console_ida = RT_DM_IDA_INIT(CUSTOM); + +rt_inline rt_bool_t console_multiport(struct virtio_console *vconsole) +{ + return rt_virtio_has_feature(vconsole->vdev, VIRTIO_CONSOLE_F_MULTIPORT); +} + +static rt_ssize_t console_send_ctrl_msg(struct virtio_console *vconsole, + rt_uint32_t port_id, rt_uint16_t event, rt_uint16_t value) +{ + struct rt_virtqueue *vq; + struct virtio_console_control ctrl_packet; + + if (!vconsole || vconsole->max_port_nr <= 1) + { + return RT_EOK; + } + + vq = vconsole->ctrl_tx_vq; + rt_spin_lock(&vconsole->lock); + + ctrl_packet.id = cpu_to_virtio32(vconsole->vdev, port_id); + ctrl_packet.event = cpu_to_virtio16(vconsole->vdev, event); + ctrl_packet.value = cpu_to_virtio16(vconsole->vdev, value); + + rt_virtqueue_add_outbuf(vq, &ctrl_packet, sizeof(ctrl_packet)); + rt_virtqueue_kick(vq); + + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } + + rt_spin_unlock(&vconsole->lock); + + return RT_EOK; +} + +static void console_emergency_write(struct virtio_console *vconsole, + rt_uint32_t emerg_wr) +{ + rt_virtio_write_config(vconsole->vdev, struct virtio_console_config, + emerg_wr, emerg_wr); +} + +static rt_err_t console_port_configure(struct rt_serial_device *serial, + struct serial_configure *cfg) +{ + return RT_EOK; +} + +static rt_err_t console_port_control(struct rt_serial_device *serial, int cmd, void *arg) +{ + struct console_port *port = raw_to_console_port(serial); + + switch (cmd) + { + case RT_DEVICE_CTRL_CLR_INT: + port->irq_enabled = RT_FALSE; + break; + + case RT_DEVICE_CTRL_SET_INT: + port->irq_enabled = RT_TRUE; + break; + } + + return RT_EOK; +} + +static int console_port_putc(struct rt_serial_device *serial, char c) +{ + rt_base_t level; + char *tx_stream; + struct rt_virtqueue *vq; + struct console_port *port = raw_to_console_port(serial); + + if (!port->host_connected) + { + if (rt_virtio_has_feature(port->vconsole->vdev, VIRTIO_CONSOLE_F_EMERG_WRITE)) + { + console_emergency_write(port->vconsole, c); + + if (port->parent.parent.open_flag & RT_DEVICE_FLAG_INT_TX) + { + rt_hw_serial_isr(&port->parent, RT_SERIAL_EVENT_TX_DONE); + } + } + + return 1; + } + + vq = port->tx_vq; + rt_virtqueue_wait_prepare(vq, 1); + + level = rt_spin_lock_irqsave(&port->lock); + + tx_stream = &port->tx_stream[rt_virtqueue_next_buf_index(vq)]; + *tx_stream = c; + rt_virtqueue_add_outbuf(vq, tx_stream, sizeof(*tx_stream)); + rt_virtqueue_kick(vq); + + if (port->parent.parent.open_flag & ~RT_DEVICE_FLAG_INT_TX) + { + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } + } + + rt_spin_unlock_irqrestore(&port->lock, level); + + return 1; +} + +static int console_port_getc(struct rt_serial_device *serial) +{ + rt_base_t level; + int ch = -1; + char *rx_stream; + struct rt_virtqueue *vq; + struct console_port *port = raw_to_console_port(serial); + + level = rt_spin_lock_irqsave(&port->lock); + + vq = port->rx_vq; + if ((rx_stream = rt_virtqueue_read_buf(vq, RT_NULL))) + { + ch = *rx_stream; + + rt_virtqueue_add_inbuf(vq, rx_stream, sizeof(char)); + + rt_virtqueue_kick(vq); + } + + rt_spin_unlock_irqrestore(&port->lock, level); + + return ch; +} + +static const struct rt_uart_ops console_port_ops = +{ + .configure = console_port_configure, + .control = console_port_control, + .putc = console_port_putc, + .getc = console_port_getc, +}; + +static rt_err_t console_port_add(struct virtio_console *vconsole, + struct console_port *port) +{ + char *stream; + const char *name; + rt_uint32_t id = port - vconsole->ports; + + rt_memset(port, 0, sizeof(*port)); + + if (!(stream = rt_malloc(sizeof(char) * (vconsole->stream_rx_size + vconsole->stream_tx_size)))) + { + return -RT_ENOMEM; + } + + port->stream = stream; + port->tx_stream = stream; + port->rx_stream = stream + vconsole->stream_tx_size; + port->vconsole = vconsole; + rt_spin_lock_init(&port->lock); + + if (!port->rx_vq || !port->tx_vq) + { + if (id != 0) + { + port->rx_vq = vconsole->vqs[4 + (id - 1) * 2 + QUEUE_PORT_RX]; + port->tx_vq = vconsole->vqs[4 + (id - 1) * 2 + QUEUE_PORT_TX]; + } + else + { + port->rx_vq = vconsole->vqs[QUEUE_PORT_RX]; + port->tx_vq = vconsole->vqs[QUEUE_PORT_TX]; + } + + for (int idx = 0; idx < vconsole->stream_rx_size; ++idx) + { + rt_virtqueue_add_inbuf(port->rx_vq, &port->rx_stream[idx], sizeof(char)); + + rt_virtqueue_submit(port->rx_vq); + } + + rt_virtqueue_notify(port->rx_vq); + } + + console_send_ctrl_msg(vconsole, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 1); + + if (!console_multiport(port->vconsole)) + { + port->host_connected = RT_TRUE; + } + + port->parent.ops = &console_port_ops; + port->parent.config = (struct serial_configure)RT_SERIAL_CONFIG_DEFAULT; + rt_dm_dev_set_name(&port->parent.parent, "vport%up%u", vconsole->uid, id); + name = rt_dm_dev_get_name(&port->parent.parent); + + return rt_hw_serial_register(&port->parent, name, + RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE | + RT_DEVICE_FLAG_INT_TX | RT_DEVICE_FLAG_INT_RX, vconsole); +} + +static void console_port_remove(struct console_port *port, rt_bool_t sync) +{ + rt_uint32_t port_id; + + if (!port->vconsole) + { + return; + } + + /* Make sure pending buffer is empty */ + while (rt_virtqueue_read_buf(port->tx_vq, RT_NULL) || + rt_virtqueue_read_buf(port->rx_vq, RT_NULL)) + { + rt_thread_yield(); + } + + port_id = port - port->vconsole->ports; + + if (sync && port_id != 0) + { + console_send_ctrl_msg(port->vconsole, port_id, VIRTIO_CONSOLE_PORT_OPEN, 0); + } + + rt_device_unregister(&port->parent.parent); + + rt_free(port->stream); + port->vconsole = RT_NULL; +} + +static struct console_port *console_find_port_by_vq(struct virtio_console *vconsole, + struct rt_virtqueue *vq) +{ + for (int i = 0; i < vconsole->max_port_nr; ++i) + { + struct console_port *port = &vconsole->ports[i]; + + if (port->tx_vq == vq || port->rx_vq == vq) + { + return port; + } + } + + return RT_NULL; +} + +static struct console_port *console_find_port_by_id(struct virtio_console *vconsole, + rt_uint32_t id) +{ + return id < vconsole->max_port_nr ? &vconsole->ports[id] : RT_NULL; +} + +static void console_resize(struct virtio_console *vconsole, + rt_uint16_t cols, rt_uint16_t rows) +{ + LOG_D("%s: [cols, rows] resize [%u, %u]", + rt_dm_dev_get_name(&vconsole->vdev.parent), cols, rows); + + /* Port to TTY */ +} + +static void console_ctrl_work(struct rt_work *work, void *work_data) +{ + rt_uint32_t port_id; + struct console_port *port; + struct virtio_console_control *ctrl_packet; + struct virtio_console_control_ext *ctrl_packet_ext; + struct virtio_console *vconsole = work_data; + struct rt_virtio_device *vdev = vconsole->vdev; + +_next_ctrl: + if (!(ctrl_packet_ext = rt_virtqueue_read_buf(vconsole->ctrl_rx_vq, RT_NULL))) + { + return; + } + + ctrl_packet = &ctrl_packet_ext->ctrl; + port = console_find_port_by_id(vconsole, virtio32_to_cpu(vdev, ctrl_packet->id)); + port_id = virtio32_to_cpu(vdev, ctrl_packet->id); + + if (!port->vconsole && + cpu_to_virtio16(vdev, ctrl_packet->event) != VIRTIO_CONSOLE_PORT_ADD) + { + LOG_D("%s: Port%d in control is invalid", rt_dm_dev_get_name(&vdev->parent), + port_id); + + goto _prepare_next_ctrl; + } + + switch (virtio16_to_cpu(vdev, ctrl_packet->event)) + { + case VIRTIO_CONSOLE_PORT_ADD: + if (port->vconsole) + { + LOG_D("%s: Port%u already added", rt_dm_dev_get_name(&vdev->parent), + port_id); + + console_send_ctrl_msg(vconsole, port_id, VIRTIO_CONSOLE_PORT_READY, 1); + break; + } + if (port_id >= vconsole->max_port_nr) + { + LOG_W("%s: Port%u is out of %u max supported", + rt_dm_dev_get_name(&vdev->parent), port_id, vconsole->max_port_nr); + break; + } + console_port_add(vconsole, port); + break; + + case VIRTIO_CONSOLE_PORT_REMOVE: + console_port_remove(port, RT_FALSE); + break; + + case VIRTIO_CONSOLE_CONSOLE_PORT: + console_send_ctrl_msg(vconsole, port_id, VIRTIO_CONSOLE_PORT_OPEN, 1); + break; + + case VIRTIO_CONSOLE_RESIZE: + console_resize(vconsole, + virtio16_to_cpu(vdev, ctrl_packet_ext->resize.cols), + virtio16_to_cpu(vdev, ctrl_packet_ext->resize.rows)); + break; + + case VIRTIO_CONSOLE_PORT_OPEN: + port->host_connected = !!virtio16_to_cpu(vdev, ctrl_packet->value); + console_send_ctrl_msg(vconsole, port_id, VIRTIO_CONSOLE_PORT_OPEN, 1); + break; + + case VIRTIO_CONSOLE_PORT_NAME: + rt_strncpy(port->name, (const char *)ctrl_packet_ext->data, sizeof(port->name)); + break; + + default: + break; + } + +_prepare_next_ctrl: + rt_virtqueue_add_inbuf(vconsole->ctrl_rx_vq, + ctrl_packet_ext, sizeof(*ctrl_packet_ext)); + + rt_virtqueue_kick(vconsole->ctrl_rx_vq); + + goto _next_ctrl; +} + +static void console_config_work(struct rt_work *work, void *work_data) +{ + rt_uint16_t cols, rows; + struct virtio_console *vconsole = work_data; + struct rt_virtio_device *vdev = vconsole->vdev; + + rt_virtio_read_config(vdev, struct virtio_console_config, cols, &cols); + rt_virtio_read_config(vdev, struct virtio_console_config, rows, &rows); + + console_resize(vconsole, cols, rows); +} + +static void virtio_console_rx_done(struct rt_virtqueue *vq) +{ + struct virtio_console *vconsole = vq->vdev->priv; + struct console_port *port = console_find_port_by_vq(vconsole, vq); + + if (!port) + { + return; + } + + if (port->irq_enabled) + { + rt_hw_serial_isr(&port->parent, RT_SERIAL_EVENT_RX_IND); + } + else + { + /* Give up */ + console_port_getc(&port->parent); + } +} + +static void virtio_console_tx_done(struct rt_virtqueue *vq) +{ + struct virtio_console *vconsole = vq->vdev->priv; + struct console_port *port = console_find_port_by_vq(vconsole, vq); + + if (!port) + { + return; + } + + if (port->parent.parent.open_flag & RT_DEVICE_FLAG_INT_TX) + { + if (rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_serial_isr(&port->parent, RT_SERIAL_EVENT_TX_DONE); + } + } +} + +static void virtio_console_ctrl_done(struct rt_virtqueue *vq) +{ + struct virtio_console *vconsole = vq->vdev->priv; + + rt_work_submit(&vconsole->ctrl_work, 0); +} + +static void virtio_console_config_changed(struct rt_virtio_device *vdev) +{ + struct virtio_console *vconsole = vdev->priv; + + if (!console_multiport(vconsole)) + { + rt_work_submit(&vconsole->config_work, 0); + } +} + +static rt_err_t virtio_console_vq_init(struct virtio_console *vconsole) +{ + rt_err_t err; + const char **names; + rt_size_t vqs_nr = 2; + rt_virtqueue_callback *cbs; + + if (vconsole->max_port_nr > 1) + { + vqs_nr = 2 * vconsole->max_port_nr + 2; + } + + vconsole->vqs = rt_calloc(vqs_nr, sizeof(*vconsole->vqs)); + + if (!vconsole->vqs) + { + return -RT_ENOMEM; + } + + if (!(names = rt_malloc(sizeof(*names) * vqs_nr))) + { + return -RT_ENOMEM; + } + + if (!(cbs = rt_malloc(sizeof(*cbs) * vqs_nr))) + { + rt_free(names); + + return -RT_ENOMEM; + } + + names[QUEUE_PORT_RX] = "ch-rx"; + names[QUEUE_PORT_TX] = "ch-tx"; + cbs[QUEUE_PORT_RX] = &virtio_console_rx_done; + cbs[QUEUE_PORT_TX] = &virtio_console_tx_done; + + if (vqs_nr > 2) + { + names[QUEUE_CTRL_RX] = "ctl-rx"; + names[QUEUE_CTRL_TX] = "ctl-tx"; + cbs[QUEUE_CTRL_RX] = &virtio_console_ctrl_done; + cbs[QUEUE_CTRL_TX] = RT_NULL; + } + + for (int i = 4; i < vqs_nr; i += 2) + { + names[i + QUEUE_PORT_RX] = "ch-rx"; + names[i + QUEUE_PORT_TX] = "ch-tx"; + cbs[i + QUEUE_PORT_RX] = &virtio_console_rx_done; + cbs[i + QUEUE_PORT_TX] = &virtio_console_tx_done; + } + + if ((err = rt_virtio_virtqueue_install(vconsole->vdev, vqs_nr, + vconsole->vqs, names, cbs))) + { + goto _out_free; + } + + vconsole->stream_rx_size = rt_virtqueue_get_virtq_size(vconsole->vqs[QUEUE_PORT_RX]); + vconsole->stream_tx_size = rt_virtqueue_get_virtq_size(vconsole->vqs[QUEUE_PORT_TX]); + + if (vqs_nr > 2) + { + struct rt_virtqueue *ctrl_rx_vq, *ctrl_tx_vq; + + ctrl_rx_vq = vconsole->vqs[QUEUE_CTRL_RX]; + ctrl_tx_vq = vconsole->vqs[QUEUE_CTRL_TX]; + + vconsole->ctrl_packet_size = rt_virtqueue_get_virtq_size(ctrl_rx_vq); + vconsole->ctrl_packet = rt_malloc(sizeof(*vconsole->ctrl_packet) * + vconsole->ctrl_packet_size); + + if (!vconsole->ctrl_packet) + { + goto _out_free; + } + + for (int idx = 0; idx < vconsole->ctrl_packet_size; ++idx) + { + rt_virtqueue_add_inbuf(ctrl_rx_vq, &vconsole->ctrl_packet[idx], + sizeof(vconsole->ctrl_packet[idx])); + + rt_virtqueue_submit(ctrl_rx_vq); + } + + rt_virtqueue_notify(ctrl_rx_vq); + + vconsole->ctrl_rx_vq = ctrl_rx_vq; + vconsole->ctrl_tx_vq = ctrl_tx_vq; + + rt_virtqueue_disable_callback(ctrl_tx_vq); + } + +_out_free: + rt_free(names); + rt_free(cbs); + + return err; +} + +static void virtio_console_vq_finit(struct virtio_console *vconsole) +{ + if (vconsole->ports) + { + rt_free(vconsole->ports); + } + + if (vconsole->vqs) + { + if (vconsole->vqs[0]) + { + rt_virtio_virtqueue_release(vconsole->vdev); + } + + rt_free(vconsole->vqs); + } + + if (vconsole->ctrl_packet) + { + rt_free(vconsole->ctrl_packet); + } +} + +static rt_err_t virtio_console_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_console *vconsole = rt_calloc(1, sizeof(*vconsole)); + + if (!vconsole) + { + return -RT_ENOMEM; + } + + vdev->priv = vconsole; + vconsole->vdev = vdev; + vdev->parent.user_data = vconsole; + + rt_virtio_read_config(vdev, struct virtio_console_config, + max_nr_ports, &vconsole->max_port_nr); + vconsole->max_port_nr = rt_max_t(rt_size_t, 1, vconsole->max_port_nr); + + vconsole->ports = rt_malloc(sizeof(*vconsole->ports) * vconsole->max_port_nr); + + if (!vconsole->ports) + { + err = -RT_ENOMEM; + goto _fail; + } + + if ((err = virtio_console_vq_init(vconsole))) + { + goto _fail; + } + + if ((vconsole->uid = rt_dm_ida_alloc(&virtio_console_ida)) < 0) + { + return -RT_EFULL; + } + + if (vconsole->ctrl_rx_vq && vconsole->ctrl_tx_vq) + { + rt_work_init(&vconsole->ctrl_work, console_ctrl_work, vconsole); + } + + rt_work_init(&vconsole->config_work, console_config_work, vconsole); + + for (int i = 0; i < vconsole->max_port_nr; ++i) + { + vconsole->ports[i].vconsole = RT_NULL; + } + + if ((err = console_port_add(vconsole, &vconsole->ports[0]))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + rt_dm_ida_free(&virtio_console_ida, vconsole->uid); + virtio_console_vq_finit(vconsole); + rt_free(vconsole); + + return err; +} + +static rt_err_t virtio_console_remove(struct rt_virtio_device *vdev) +{ + struct virtio_console *vconsole = vdev->parent.user_data; + + for (int i = 0; i < vconsole->max_port_nr; ++i) + { + console_port_remove(&vconsole->ports[i], RT_TRUE); + } + + rt_dm_ida_free(&virtio_console_ida, vconsole->uid); + + virtio_console_vq_finit(vconsole); + rt_free(vconsole); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_console_ids[] = +{ + { VIRTIO_DEVICE_ID_CONSOLE, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_console_driver = +{ + .ids = virtio_console_ids, + .features = + RT_BIT(VIRTIO_CONSOLE_F_SIZE) + | RT_BIT(VIRTIO_CONSOLE_F_MULTIPORT) + | RT_BIT(VIRTIO_CONSOLE_F_EMERG_WRITE), + + .probe = virtio_console_probe, + .remove = virtio_console_remove, + .config_changed = virtio_console_config_changed, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_console_driver); + +static const struct rt_virtio_device_id virtio_rproc_serial_ids[] = +{ +#ifdef RT_VIRTIO_RPROC_SERIAL + { VIRTIO_DEVICE_ID_RPROC_SERIAL, VIRTIO_DEVICE_ANY_ID }, +#endif + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_rproc_serial_driver = +{ + .ids = virtio_rproc_serial_ids, + .probe = virtio_console_probe, + .remove = virtio_console_remove, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_rproc_serial_driver); diff --git a/components/drivers/virtio/virtio-crypto.c b/components/drivers/virtio/virtio-crypto.c new file mode 100644 index 00000000000..1f4d80abdff --- /dev/null +++ b/components/drivers/virtio/virtio-crypto.c @@ -0,0 +1,666 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.crypto" +#define DBG_LVL DBG_INFO +#include + +#include "virtio_config/virtio-crypto.h" + +struct virtio_crypto_symmetric_contex +{ + rt_uint64_t session_id; + struct rt_virtqueue *data_vq; +}; + +struct virtio_crypto +{ + struct rt_hwcrypto_device parent; + struct rt_virtio_device *vdev; + + rt_uint32_t status; + rt_uint32_t services; + rt_uint32_t max_data_queues; + rt_uint64_t max_req_size; + rt_uint64_t max_cipher_key_len; + rt_uint64_t max_auth_key_len; + + struct rt_virtqueue **vqs; + struct rt_virtqueue *ctrl_vq; + struct rt_completion *data_done; + struct rt_completion ctrl_done; + struct rt_spinlock *data_lock; + struct rt_spinlock ctrl_lock; +}; + +#define raw_to_virtio_crypto(raw) rt_container_of((raw)->parent.device, struct virtio_crypto, parent) + +rt_inline struct rt_virtqueue *virtio_crypto_data_vq(struct virtio_crypto *vcrypto) +{ + return vcrypto->vqs[rt_hw_cpu_id() % vcrypto->max_data_queues]; +} + +static rt_err_t virtio_crypto_symmetric_create_session( + struct hwcrypto_symmetric *symmetric_ctx, rt_bool_t encrypt) +{ + rt_ubase_t level; + struct rt_virtqueue *ctrl_vq, *data_vq; + struct virtio_crypto_op_ctrl_req ctrl; + struct virtio_crypto_session_input input; + struct virtio_crypto_sym_create_session_req *sym_create_session; + struct virtio_crypto *vcrypto = raw_to_virtio_crypto(symmetric_ctx); + struct virtio_crypto_symmetric_contex *vctx = symmetric_ctx->parent.contex; + + ctrl_vq = vcrypto->ctrl_vq; + data_vq = virtio_crypto_data_vq(vcrypto); + + /* Pad ctrl header */ + ctrl.header.opcode = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION); + switch (symmetric_ctx->parent.type & HWCRYPTO_MAIN_TYPE_MASK) + { + case HWCRYPTO_TYPE_AES: + switch (symmetric_ctx->parent.type) + { + case HWCRYPTO_TYPE_AES_ECB: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_AES_ECB); + break; + + case HWCRYPTO_TYPE_AES_CBC: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_AES_CBC); + break; + + case HWCRYPTO_TYPE_AES_CTR: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_AES_CTR); + break; + + default: + return -RT_ENOSYS; + } + break; + + case HWCRYPTO_TYPE_DES: + switch (symmetric_ctx->parent.type) + { + case HWCRYPTO_TYPE_DES_ECB: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DES_ECB); + break; + + case HWCRYPTO_TYPE_DES_CBC: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DES_CBC); + break; + + default: + return -RT_ENOSYS; + } + break; + + case HWCRYPTO_TYPE_3DES: + switch (symmetric_ctx->parent.type) + { + case HWCRYPTO_TYPE_3DES_ECB: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_3DES_ECB); + break; + + case HWCRYPTO_TYPE_3DES_CBC: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_3DES_CBC); + break; + + default: + return -RT_ENOSYS; + } + break; + + case HWCRYPTO_TYPE_RC4: + ctrl.header.algo = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ARC4); + break; + } + ctrl.header.queue_id = data_vq->index; + + input.status = rt_cpu_to_le32(VIRTIO_CRYPTO_ERR); + + /* Pad cipher's parameters */ + sym_create_session = &ctrl.sym_create_session; + sym_create_session->op_type = rt_cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); + sym_create_session->cipher.para.algo = ctrl.header.algo; + sym_create_session->cipher.para.keylen = rt_cpu_to_le32(symmetric_ctx->key_bitlen / 8); + sym_create_session->cipher.para.op = rt_cpu_to_le32(encrypt ? + VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT); + + rt_virtqueue_wait_prepare(ctrl_vq, 3); + + level = rt_spin_lock_irqsave(&vcrypto->ctrl_lock); + + rt_virtqueue_add_outbuf(ctrl_vq, &ctrl, sizeof(ctrl)); + rt_virtqueue_add_outbuf(ctrl_vq, symmetric_ctx->key, symmetric_ctx->key_bitlen / 8); + rt_virtqueue_add_inbuf(ctrl_vq, &input, sizeof(input)); + + rt_virtqueue_kick(ctrl_vq); + + rt_spin_unlock_irqrestore(&vcrypto->ctrl_lock, level); + + rt_completion_wait(&vcrypto->ctrl_done, RT_WAITING_FOREVER); + + switch (input.status) + { + case VIRTIO_CRYPTO_OK: + break; + + case VIRTIO_CRYPTO_INVSESS: + RT_ASSERT(0); + case VIRTIO_CRYPTO_ERR: + case VIRTIO_CRYPTO_KEY_REJECTED: + return -RT_ERROR; + + case VIRTIO_CRYPTO_BADMSG: + return -RT_EINVAL; + + case VIRTIO_CRYPTO_NOTSUPP: + return -RT_ENOSYS; + + case VIRTIO_CRYPTO_NOSPC: + return -RT_EEMPTY; + } + + vctx->session_id = input.session_id; + vctx->data_vq = data_vq; + + return RT_EOK; +} + +static void virtio_crypto_symmetric_destroy_session( + struct hwcrypto_symmetric *symmetric_ctx) +{ + rt_ubase_t level; + struct rt_virtqueue *ctrl_vq; + struct virtio_crypto_inhdr inhdr; + struct virtio_crypto_op_ctrl_req ctrl; + struct virtio_crypto_destroy_session_req *destroy_session; + struct virtio_crypto *vcrypto = raw_to_virtio_crypto(symmetric_ctx); + struct virtio_crypto_symmetric_contex *vctx = symmetric_ctx->parent.contex; + + ctrl_vq = vcrypto->ctrl_vq; + inhdr.status = VIRTIO_CRYPTO_ERR; + + /* Pad ctrl header */ + ctrl.header.opcode = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION); + ctrl.header.queue_id = vctx->data_vq->index; + + destroy_session = &ctrl.destroy_session; + destroy_session->session_id = rt_cpu_to_le64(vctx->session_id); + + rt_virtqueue_wait_prepare(ctrl_vq, 2); + + level = rt_spin_lock_irqsave(&vcrypto->ctrl_lock); + + rt_virtqueue_add_outbuf(ctrl_vq, &ctrl, sizeof(ctrl)); + rt_virtqueue_add_inbuf(ctrl_vq, &inhdr.status, sizeof(inhdr.status)); + + rt_virtqueue_kick(ctrl_vq); + + rt_spin_unlock_irqrestore(&vcrypto->ctrl_lock, level); + + rt_completion_wait(&vcrypto->ctrl_done, RT_WAITING_FOREVER); + + if (inhdr.status != VIRTIO_CRYPTO_OK) + { + LOG_E("%s fail status = %d", __func__, inhdr.status); + } +} + +static rt_err_t virtio_crypto_symmetric_crypt( + struct hwcrypto_symmetric *symmetric_ctx, + struct hwcrypto_symmetric_info *symmetric_info) +{ + rt_err_t err; + rt_ubase_t level; + struct rt_virtqueue *data_vq; + struct virtio_crypto_inhdr inhdr; + struct virtio_crypto_op_data_req data; + struct virtio_crypto *vcrypto = raw_to_virtio_crypto(symmetric_ctx); + struct virtio_crypto_symmetric_contex *vctx = symmetric_ctx->parent.contex; + + if (!(vcrypto->status & VIRTIO_CRYPTO_S_HW_READY)) + { + LOG_D("%s: to %s %s is not ready", + rt_dm_dev_get_name(&vcrypto->parent.parent), "symmetric", "crypt"); + + return -RT_EIO; + } + + if (symmetric_info->mode == HWCRYPTO_MODE_ENCRYPT) + { + err = virtio_crypto_symmetric_create_session(symmetric_ctx, RT_TRUE); + } + else if (symmetric_info->mode == HWCRYPTO_MODE_DECRYPT) + { + err = virtio_crypto_symmetric_create_session(symmetric_ctx, RT_FALSE); + } + else + { + err = -RT_ENOSYS; + } + + if (err) + { + return err; + } + + vctx = symmetric_ctx->parent.contex; + data_vq = vctx->data_vq; + + data.header.session_id = rt_cpu_to_le64(vctx->session_id); + if (symmetric_info->mode == HWCRYPTO_MODE_ENCRYPT) + { + data.header.opcode = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT); + } + else if (symmetric_info->mode == HWCRYPTO_MODE_DECRYPT) + { + data.header.opcode = rt_cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT); + } + + if (!symmetric_ctx->iv_len) + { + symmetric_ctx->iv_len = sizeof(symmetric_ctx->iv); + rt_memset(symmetric_ctx->iv, 0, symmetric_ctx->iv_len); + } + + data.sym_req.op_type = rt_cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); + data.sym_req.cipher.para.iv_len = rt_cpu_to_le32(symmetric_ctx->iv_len); + data.sym_req.cipher.para.src_data_len = rt_cpu_to_le32(symmetric_info->length); + data.sym_req.cipher.para.dst_data_len = rt_cpu_to_le32(symmetric_info->length); + + inhdr.status = VIRTIO_CRYPTO_ERR; + + rt_virtqueue_wait_prepare(data_vq, 5); + + level = rt_spin_lock_irqsave(&vcrypto->data_lock[data_vq->index]); + + /* Outhdr */ + rt_virtqueue_add_outbuf(data_vq, &data, sizeof(data)); + + rt_virtqueue_add_outbuf(data_vq, + &symmetric_ctx->iv[symmetric_ctx->iv_off], symmetric_ctx->iv_len); + + /* Source data */ + rt_virtqueue_add_outbuf(data_vq, (void *)symmetric_info->in, symmetric_info->length); + + /* Destination data */ + rt_virtqueue_add_inbuf(data_vq, symmetric_info->out, symmetric_info->length); + + rt_virtqueue_add_inbuf(data_vq, &inhdr.status, sizeof(inhdr.status)); + + rt_virtqueue_kick(data_vq); + + rt_spin_unlock_irqrestore(&vcrypto->data_lock[data_vq->index], level); + + rt_completion_wait(&vcrypto->data_done[data_vq->index], RT_WAITING_FOREVER); + + switch (inhdr.status) + { + case VIRTIO_CRYPTO_OK: + err = RT_EOK; + break; + + case VIRTIO_CRYPTO_INVSESS: + err = -RT_EINVAL; + break; + + case VIRTIO_CRYPTO_ERR: + case VIRTIO_CRYPTO_KEY_REJECTED: + err = -RT_ERROR; + break; + + case VIRTIO_CRYPTO_BADMSG: + err = -RT_EINVAL; + break; + + case VIRTIO_CRYPTO_NOTSUPP: + err = -RT_ENOSYS; + break; + + case VIRTIO_CRYPTO_NOSPC: + err = -RT_EEMPTY; + break; + } + + virtio_crypto_symmetric_destroy_session(symmetric_ctx); + + return err; +} + +static const struct hwcrypto_symmetric_ops virtio_crypto_symmetric_ops = +{ + .crypt = virtio_crypto_symmetric_crypt, +}; + +static rt_err_t virtio_crypto_create(struct rt_hwcrypto_ctx *ctx) +{ + struct hwcrypto_symmetric *crypto_symmetric; + + switch (ctx->type & HWCRYPTO_MAIN_TYPE_MASK) + { + case HWCRYPTO_TYPE_AES: + case HWCRYPTO_TYPE_DES: + case HWCRYPTO_TYPE_3DES: + case HWCRYPTO_TYPE_RC4: + ctx->contex = rt_calloc(1, sizeof(struct virtio_crypto_symmetric_contex)); + + if (!ctx->contex) + { + return -RT_ENOMEM; + } + + crypto_symmetric = rt_container_of(ctx, struct hwcrypto_symmetric, parent); + crypto_symmetric->ops = &virtio_crypto_symmetric_ops; + break; + + default: + return -RT_ENOSYS; + } + + return RT_EOK; +} + +static void virtio_crypto_destroy(struct rt_hwcrypto_ctx *ctx) +{ + if (!ctx->contex) + { + return; + } + + switch (ctx->type & HWCRYPTO_MAIN_TYPE_MASK) + { + case HWCRYPTO_TYPE_AES: + case HWCRYPTO_TYPE_DES: + case HWCRYPTO_TYPE_3DES: + case HWCRYPTO_TYPE_RC4: + rt_free(ctx->contex); + break; + } +} + +static rt_err_t virtio_crypto_copy(struct rt_hwcrypto_ctx *des, + const struct rt_hwcrypto_ctx *src) +{ + rt_err_t err = RT_EOK; + + switch (src->type & HWCRYPTO_MAIN_TYPE_MASK) + { + case HWCRYPTO_TYPE_AES: + case HWCRYPTO_TYPE_DES: + case HWCRYPTO_TYPE_3DES: + case HWCRYPTO_TYPE_RC4: + break; + + default: + err = -RT_ENOSYS; + break; + } + + return err; +} + +static void virtio_crypto_reset(struct rt_hwcrypto_ctx *ctx) +{ +} + +static const struct rt_hwcrypto_ops virtio_crypto_ops = +{ + .create = virtio_crypto_create, + .destroy = virtio_crypto_destroy, + .copy = virtio_crypto_copy, + .reset = virtio_crypto_reset, +}; + +static void virtio_crypto_data_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_crypto *vcrypto = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vcrypto->data_lock[vq->index]); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&vcrypto->data_done[vq->index]); + + rt_spin_unlock_irqrestore(&vcrypto->data_lock[vq->index], level); +} + +static void virtio_crypto_ctrl_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_crypto *vcrypto = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vcrypto->ctrl_lock); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&vcrypto->ctrl_done); + + rt_spin_unlock_irqrestore(&vcrypto->ctrl_lock, level); +} + +static void virtio_crypto_config_changed(struct rt_virtio_device *vdev) +{ + rt_uint32_t status; + struct virtio_crypto *vcrypto = vdev->parent.user_data; + + rt_virtio_read_config(vdev, struct virtio_crypto_config, status, &status); + + if (status & (~VIRTIO_CRYPTO_S_HW_READY)) + { + LOG_E("%s: Unknown status bits = 0x%x", + rt_dm_dev_get_name(&vdev->parent), status); + + RT_ASSERT(0); + return; + } + + if (vcrypto->status != status) + { + /* Change status */ + vcrypto->status = status; + } +} + +static rt_err_t virtio_crypto_vq_init(struct virtio_crypto *vcrypto) +{ + rt_err_t err; + rt_size_t vqs_nr; + const char **names = RT_NULL; + rt_virtqueue_callback *cbs = RT_NULL; + + vqs_nr = vcrypto->max_data_queues + 1; + + vcrypto->vqs = rt_calloc(vqs_nr, sizeof(*vcrypto->vqs)); + + if (!vcrypto->vqs) + { + return -RT_ENOMEM; + } + + if (!(names = rt_malloc(sizeof(*names) * vqs_nr))) + { + return -RT_ENOMEM; + } + + if (!(cbs = rt_malloc(sizeof(*cbs) * vqs_nr))) + { + err = -RT_ENOMEM; + goto _out_free; + } + + vcrypto->data_lock = rt_malloc(sizeof(vcrypto->data_lock[0]) * + vcrypto->max_data_queues); + + if (!vcrypto->data_lock) + { + err = -RT_ENOMEM; + goto _out_free; + } + + vcrypto->data_done = rt_malloc(sizeof(vcrypto->data_done[0]) * + vcrypto->max_data_queues); + + if (!vcrypto->data_done) + { + err = -RT_ENOMEM; + goto _out_free; + } + + for (int i = 0; i < vcrypto->max_data_queues; ++i) + { + names[i] = "data"; + cbs[i] = &virtio_crypto_data_done; + + rt_spin_lock_init(&vcrypto->data_lock[i]); + rt_completion_init(&vcrypto->data_done[i]); + } + + names[vqs_nr - 1] = "ctrl"; + cbs[vqs_nr - 1] = &virtio_crypto_ctrl_done; + + if ((err = rt_virtio_virtqueue_install(vcrypto->vdev, vqs_nr, + vcrypto->vqs, names, cbs))) + { + goto _out_free; + } + + vcrypto->ctrl_vq = vcrypto->vqs[vcrypto->max_data_queues]; + +_out_free: + if (names) + { + rt_free(names); + } + if (cbs) + { + rt_free(cbs); + } + + return err; +} + +static void virtio_crypto_vq_finit(struct virtio_crypto *vcrypto) +{ + if (vcrypto->vqs) + { + if (vcrypto->vqs[0]) + { + rt_virtio_virtqueue_release(vcrypto->vdev); + } + + rt_free(vcrypto->vqs); + } + + if (vcrypto->data_lock) + { + rt_free(vcrypto->data_lock); + } + + if (vcrypto->data_done) + { + rt_free(vcrypto->data_done); + } +} + +static rt_err_t virtio_crypto_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_crypto *vcrypto = rt_calloc(1, sizeof(*vcrypto)); + + if (!vcrypto) + { + return -RT_ENOMEM; + } + + vdev->priv = vcrypto; + vcrypto->vdev = vdev; + vdev->parent.user_data = vcrypto; + + rt_virtio_read_config(vdev, struct virtio_crypto_config, max_dataqueues, + &vcrypto->max_data_queues); + + if (vcrypto->max_data_queues > RT_CPUS_NR) + { + vcrypto->max_data_queues = RT_CPUS_NR; + } + else + { + vcrypto->max_data_queues = 1; + } + + rt_virtio_read_config(vdev, struct virtio_crypto_config, crypto_services, + &vcrypto->services); + rt_virtio_read_config(vdev, struct virtio_crypto_config, max_size, + &vcrypto->max_req_size); + rt_virtio_read_config(vdev, struct virtio_crypto_config, max_cipher_key_len, + &vcrypto->max_cipher_key_len); + rt_virtio_read_config(vdev, struct virtio_crypto_config, max_auth_key_len, + &vcrypto->max_auth_key_len); + + if ((err = virtio_crypto_vq_init(vcrypto))) + { + goto _fail; + } + + rt_spin_lock_init(&vcrypto->ctrl_lock); + rt_completion_init(&vcrypto->ctrl_done); + + vcrypto->parent.ops = &virtio_crypto_ops; + vcrypto->parent.id = ((rt_uint64_t)vdev->id.vendor << 32) | vdev->id.device; + + virtio_crypto_config_changed(vdev); + + if ((err = rt_hwcrypto_register(&vcrypto->parent, RT_HWCRYPTO_DEFAULT_NAME))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + virtio_crypto_vq_finit(vcrypto); + rt_free(vcrypto); + + return err; +} + +static rt_err_t virtio_crypto_remove(struct rt_virtio_device *vdev) +{ + struct virtio_crypto *vcrypto = vdev->parent.user_data; + + rt_device_unregister(&vcrypto->parent.parent); + + virtio_crypto_vq_finit(vcrypto); + rt_free(vcrypto); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_crypto_ids[] = +{ + { VIRTIO_DEVICE_ID_CRYPTO, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_crypto_driver = +{ + .ids = virtio_crypto_ids, + .features = + RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_crypto_probe, + .remove = virtio_crypto_remove, + .config_changed = virtio_crypto_config_changed, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_crypto_driver); diff --git a/components/drivers/virtio/virtio-gpu.c b/components/drivers/virtio/virtio-gpu.c new file mode 100644 index 00000000000..e090730f49f --- /dev/null +++ b/components/drivers/virtio/virtio-gpu.c @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.gpu" +#define DBG_LVL DBG_INFO +#include + +#include +#include +#include "virtio_internal.h" +#include "virtio_config/virtio-gpu.h" + +#define CTRL_QUEUE 0 +#define CURSOR_QUEUE 1 + +struct virtio_gpu +{ + struct rt_graphic_device parent; + struct rt_virtio_device *vdev; + + struct rt_virtqueue *vqs[2]; + + rt_bool_t vsync; + rt_uint32_t width; + rt_uint32_t height; + rt_uint32_t pmode_id; + rt_uint32_t num_scanouts; + rt_uint32_t resource_id; +#define PRIMARY_PLANE_RESOURCE_ID 1 +#define CURSOR_PLANE_RESOURCE_ID 2 + + struct rt_work event_work; + struct rt_completion done; + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + rt_uint8_t *cursor; +}; + +#define raw_to_virtio_gpu(raw) rt_container_of(raw, struct virtio_gpu, parent) + +static void virtio_gpu_ctrl_send_command(struct virtio_gpu *vgpu, + const void *cmd, rt_size_t cmd_len, void *res, rt_size_t res_len) +{ + struct rt_virtqueue *vq; + + vq = vgpu->vqs[CTRL_QUEUE]; + + rt_virtqueue_wait_prepare(vq, 2); + + rt_virtqueue_add_outbuf(vq, (void *)cmd, cmd_len); + rt_virtqueue_add_inbuf(vq, res, res_len); + + rt_virtqueue_kick(vq); + + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } +} + +static void virtio_gpu_cursor_send_command(struct virtio_gpu *vgpu, + const void *cmd, rt_size_t cmd_len) +{ + struct rt_virtqueue *vq; + + vq = vgpu->vqs[CURSOR_QUEUE]; + + rt_virtqueue_wait_prepare(vq, 1); + + rt_virtqueue_add_outbuf(vq, (void *)cmd, cmd_len); + + rt_virtqueue_kick(vq); + + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } +} + +static rt_uint32_t virtio_gpu_next_resource_id(struct virtio_gpu *vgpu) +{ + rt_uint32_t last_resource_id = vgpu->resource_id++; + + while (last_resource_id != vgpu->resource_id) + { + if (vgpu->resource_id && + vgpu->resource_id == PRIMARY_PLANE_RESOURCE_ID && + vgpu->resource_id == CURSOR_PLANE_RESOURCE_ID) + { + return vgpu->resource_id; + } + + ++vgpu->resource_id; + } + + return 0; +} + +static rt_err_t virtio_gpu_create_2d_resource(struct virtio_gpu *vgpu, + enum virtio_gpu_formats format, rt_uint32_t resource_id, + rt_uint32_t width, rt_uint32_t height) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_resource_create_2d req; + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); + req.resource_id = rt_cpu_to_le32(resource_id); + req.format = rt_cpu_to_le32(format); + req.width = rt_cpu_to_le32(width); + req.height = rt_cpu_to_le32(height); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_unref_resource(struct virtio_gpu *vgpu, + rt_uint32_t resource_id) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_resource_unref req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_attach_backing_resource(struct virtio_gpu *vgpu, + rt_uint32_t resource_id, void *buffer, rt_size_t size) +{ + struct virtio_gpu_ctrl_hdr res; + struct + { + struct virtio_gpu_resource_attach_backing req; + struct virtio_gpu_mem_entry mem; + } req; + + rt_memset(&req, 0, sizeof(req)); + + req.req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); + req.req.resource_id = rt_cpu_to_le32(resource_id); + req.req.nr_entries = rt_cpu_to_le32(1); + + req.mem.addr = rt_cpu_to_le64((rt_ubase_t)rt_kmem_v2p(buffer)); + req.mem.length = rt_cpu_to_le32(size); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_detach_backing_resource(struct virtio_gpu *vgpu, + rt_uint32_t resource_id) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_resource_detach_backing req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_set_scanout(struct virtio_gpu *vgpu, + rt_uint32_t scanout_id, rt_uint32_t resource_id, + rt_uint32_t width, rt_uint32_t height) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_set_scanout req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); + req.r.width = rt_cpu_to_le32(width); + req.r.height = rt_cpu_to_le32(height); + req.scanout_id = rt_cpu_to_le32(scanout_id); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_flush_resource(struct virtio_gpu *vgpu, + rt_uint32_t resource_id, + rt_uint32_t x, rt_uint32_t y, rt_uint32_t width, rt_uint32_t height) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_resource_flush req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + req.r.x = rt_cpu_to_le32(x); + req.r.y = rt_cpu_to_le32(y); + req.r.width = rt_cpu_to_le32(width); + req.r.height = rt_cpu_to_le32(height); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_transfer_to_host_2d(struct virtio_gpu *vgpu, + rt_uint32_t resource_id, + rt_uint32_t x, rt_uint32_t y, rt_uint32_t width, rt_uint32_t height, + rt_uint64_t offset) +{ + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_transfer_to_host_2d req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + req.r.x = rt_cpu_to_le32(x); + req.r.y = rt_cpu_to_le32(y); + req.r.width = rt_cpu_to_le32(width); + req.r.height = rt_cpu_to_le32(height); + req.offset = rt_cpu_to_le64(offset); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &res, sizeof(res)); + + return res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; +} + +static rt_err_t virtio_gpu_update_cursor(struct virtio_gpu *vgpu, + rt_uint32_t scanout_id, rt_uint32_t resource_id, + rt_uint32_t hot_x, rt_uint32_t hot_y) +{ + struct virtio_gpu_update_cursor req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); + req.pos.scanout_id = rt_cpu_to_le32(scanout_id); + req.resource_id = rt_cpu_to_le32(resource_id); + req.hot_x = rt_cpu_to_le32(hot_x); + req.hot_y = rt_cpu_to_le32(hot_y); + + virtio_gpu_cursor_send_command(vgpu, &req, sizeof(req)); + + return RT_EOK; +} + +static rt_err_t virtio_gpu_cursor_move(struct virtio_gpu *vgpu, + rt_uint32_t scanout_id, rt_uint32_t resource_id, + rt_uint32_t x, rt_uint32_t y) +{ + struct virtio_gpu_update_cursor req; + + rt_memset(&req, 0, sizeof(req)); + + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); + req.pos.scanout_id = rt_cpu_to_le32(scanout_id); + req.pos.x = rt_cpu_to_le32(x); + req.pos.y = rt_cpu_to_le32(y); + req.resource_id = rt_cpu_to_le32(resource_id); + + virtio_gpu_cursor_send_command(vgpu, &req, sizeof(req)); + + return RT_EOK; +} + +static rt_err_t virtio_gpu_get_edid(struct virtio_gpu *vgpu) +{ + rt_err_t err; + struct virtio_gpu_cmd_get_edid req; + struct virtio_gpu_resp_edid *res = rt_malloc(sizeof(*res)); + + if (!res) + { + return -RT_ENOMEM; + } + + rt_memset(&req, 0, sizeof(req)); + req.hdr.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); + req.scanout = rt_cpu_to_le32(vgpu->pmode_id); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), res, sizeof(*res)); + + if (res->hdr.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_EDID)) + { + rt_memcpy(&vgpu->parent.edid, res->edid, + rt_min_t(rt_size_t, sizeof(vgpu->parent.edid), res->size)); + err = RT_EOK; + } + else + { + err = -RT_EIO; + } + + rt_free(res); + + return err; +} + +static rt_err_t virtio_gpu_get_display_info(struct virtio_gpu *vgpu) +{ + struct virtio_gpu_ctrl_hdr req; + struct virtio_gpu_resp_display_info info; + + rt_memset(&req, 0, sizeof(req)); + req.type = rt_cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), &info, sizeof(info)); + + if (info.hdr.type != rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_DISPLAY_INFO)) + { + return -RT_EIO; + } + + for (int i = 0; i < vgpu->num_scanouts; ++i) + { + if (info.pmodes[i].enabled) + { + struct virtio_gpu_display_one *pmode = &info.pmodes[i]; + + vgpu->pmode_id = i; + vgpu->width = pmode->r.width; + vgpu->height = pmode->r.height; + break; + } + } + + return RT_EOK; +} + +static rt_err_t virtio_gpu_resize(struct virtio_gpu *vgpu) +{ + rt_err_t err = RT_EOK; + + if (rt_virtio_has_feature(vgpu->vdev, VIRTIO_GPU_F_EDID)) + { + err = virtio_gpu_get_edid(vgpu); + } + + if (!err) + { + err = virtio_gpu_get_display_info(vgpu); + } + + return err; +} + +static enum virtio_gpu_formats virtio_gpu_raw_modes[] = +{ + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM, + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, +}; + +static rt_uint32_t virtio_gpu_modes[] = +{ + RTGRAPHIC_PIXEL_FORMAT_ARGB888, + RTGRAPHIC_PIXEL_FORMAT_ABGR888, +}; + +typedef char __assert_virtio_gpu_modes[-(RT_ARRAY_SIZE(virtio_gpu_raw_modes) % RT_ARRAY_SIZE(virtio_gpu_modes))]; + +static enum virtio_gpu_formats virtio_gpu_mode(rt_uint32_t mode) +{ + for (int i = 0; i < RT_ARRAY_SIZE(virtio_gpu_modes); ++i) + { + if (virtio_gpu_modes[i] == mode) + { + return virtio_gpu_raw_modes[i]; + } + } + + LOG_E("BUG of the graphic framework"); + RT_ASSERT(0); + return -1; +} + +static rt_err_t virtio_gpu_cursor_plane_fb_update(struct rt_graphic_plane *plane, + struct rt_device_rect_info *rect) +{ + rt_err_t err; + rt_uint32_t resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + if (!rect->width && !rect->height) + { + return RT_EOK; + } + + if (rect->width > CURSOR_WIDTH || rect->height > CURSOR_HEIGHT) + { + return -RT_EINVAL; + } + + resource_id = CURSOR_PLANE_RESOURCE_ID; + + err = virtio_gpu_transfer_to_host_2d(vgpu, resource_id, + rect->x, rect->y, rect->width, rect->height, 0); + + if (!err) + { + err = virtio_gpu_flush_resource(vgpu, resource_id, + rect->x, rect->y, rect->width, rect->height); + } + + virtio_gpu_update_cursor(vgpu, vgpu->pmode_id, resource_id, rect->x, rect->y); + virtio_gpu_cursor_move(vgpu, vgpu->pmode_id, resource_id, rect->x, rect->y); + + return err; +} + +static rt_err_t virtio_gpu_cursor_plane_fb_remap(struct rt_graphic_plane *plane, + rt_uint32_t mode, struct rt_device_rect_info *rect) +{ + rt_err_t err; + rt_uint32_t framebuffer_len, bits_per_pixel, line_length, resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + if (rect->width > CURSOR_WIDTH || rect->height > CURSOR_HEIGHT) + { + return -RT_EINVAL; + } + + resource_id = CURSOR_PLANE_RESOURCE_ID; + + err = virtio_gpu_create_2d_resource(vgpu, virtio_gpu_mode(mode), + resource_id, rect->width, rect->height); + + if (err) + { + return err; + } + + bits_per_pixel = rt_graphic_mode_bpp(mode); + line_length = rect->width * (bits_per_pixel / 8); + + framebuffer_len = line_length * rect->height; + + err = virtio_gpu_attach_backing_resource(vgpu, resource_id, vgpu->cursor, framebuffer_len); + + if (err) + { + goto _unref_resource; + } + + plane->line_length = line_length; + plane->bits_per_pixel = bits_per_pixel; + + plane->framebuffer = vgpu->cursor; + plane->screen_len = framebuffer_len; + plane->framebuffer_len = framebuffer_len; + + return RT_EOK; + +_unref_resource: + virtio_gpu_unref_resource(vgpu, resource_id); + + return err; +} + +static rt_err_t virtio_gpu_cursor_plane_fb_cleanup(struct rt_graphic_plane *plane) +{ + rt_uint32_t resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + if (!plane->framebuffer) + { + return RT_EOK; + } + + resource_id = CURSOR_PLANE_RESOURCE_ID; + + virtio_gpu_detach_backing_resource(vgpu, resource_id); + virtio_gpu_unref_resource(vgpu, resource_id); + + plane->framebuffer = RT_NULL; + + return RT_EOK; +} + +static const struct rt_graphic_plane_ops virtio_gpu_cursor_plane_ops = +{ + .update = virtio_gpu_cursor_plane_fb_update, + .fb_remap = virtio_gpu_cursor_plane_fb_remap, + .fb_cleanup = virtio_gpu_cursor_plane_fb_cleanup, +}; + +static rt_err_t virtio_gpu_primary_plane_fb_update(struct rt_graphic_plane *plane, + struct rt_device_rect_info *rect) +{ + rt_err_t err; + rt_uint32_t resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + vgpu->vsync = RT_FALSE; + resource_id = PRIMARY_PLANE_RESOURCE_ID; + + err = virtio_gpu_transfer_to_host_2d(vgpu, resource_id, + rect->x, rect->y, rect->width, rect->height, 0); + + if (!err) + { + err = virtio_gpu_flush_resource(vgpu, resource_id, + rect->x, rect->y, rect->width, rect->height); + } + vgpu->vsync = RT_TRUE; + + return err; +} + +static rt_err_t virtio_gpu_primary_plane_fb_remap(struct rt_graphic_plane *plane, + rt_uint32_t mode, struct rt_device_rect_info *rect) +{ + rt_err_t err; + void *framebuffer; + rt_uint32_t framebuffer_len, bits_per_pixel, line_length, resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + resource_id = PRIMARY_PLANE_RESOURCE_ID; + err = virtio_gpu_create_2d_resource(vgpu, virtio_gpu_mode(mode), + resource_id, rect->width, rect->height); + + if (err) + { + return err; + } + + bits_per_pixel = rt_graphic_mode_bpp(mode); + line_length = rect->width * (bits_per_pixel / 8); + + framebuffer_len = line_length * rect->height; + framebuffer = rt_malloc_align(framebuffer_len, ARCH_PAGE_SIZE); + + if (!framebuffer) + { + err = -RT_ENOMEM; + goto _unref_resource; + } + + err = virtio_gpu_attach_backing_resource(vgpu, resource_id, framebuffer, framebuffer_len); + + if (err) + { + goto _free_framebuffer; + } + + err = virtio_gpu_set_scanout(vgpu, vgpu->pmode_id, resource_id, + rect->width, rect->height); + + if (err) + { + goto _detach_backing; + } + + plane->line_length = line_length; + plane->bits_per_pixel = bits_per_pixel; + + plane->framebuffer = framebuffer; + plane->screen_len = framebuffer_len; + plane->framebuffer_len = framebuffer_len; + + return RT_EOK; + +_detach_backing: + virtio_gpu_detach_backing_resource(vgpu, resource_id); + +_free_framebuffer: + rt_free_align(framebuffer); + +_unref_resource: + virtio_gpu_unref_resource(vgpu, resource_id); + + return err; +} + +static rt_err_t virtio_gpu_primary_plane_fb_cleanup(struct rt_graphic_plane *plane) +{ + rt_uint32_t resource_id; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(plane->graphic); + + if (!plane->framebuffer) + { + return RT_EOK; + } + + resource_id = PRIMARY_PLANE_RESOURCE_ID; + + virtio_gpu_detach_backing_resource(vgpu, resource_id); + virtio_gpu_unref_resource(vgpu, resource_id); + rt_free_align(plane->framebuffer); + + plane->framebuffer = RT_NULL; + + return RT_EOK; +} + +static const struct rt_graphic_plane_ops virtio_gpu_primary_plane_ops = +{ + .update = virtio_gpu_primary_plane_fb_update, + .fb_remap = virtio_gpu_primary_plane_fb_remap, + .fb_cleanup = virtio_gpu_primary_plane_fb_cleanup, +}; + +static rt_err_t virtio_gpu_dpms_switch(struct rt_graphic_device *gdev, rt_uint32_t dpms) +{ + rt_uint32_t update_ms = 0; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(gdev); + + if (dpms == RT_GRAPHIC_DPMS_ON) + { + update_ms = RT_GRAPHIC_UPDATE_MS; + } + + rt_graphic_device_update_auto(&vgpu->parent, update_ms); + + return RT_EOK; +} + +static rt_err_t virtio_gpu_wait_vsync(struct rt_graphic_device *gdev) +{ + struct virtio_gpu *vgpu = raw_to_virtio_gpu(gdev); + + while (!vgpu->vsync) + { + rt_hw_cpu_relax(); + } + + return RT_EOK; +} + +static rt_err_t virtio_gpu_virgl_control(struct rt_graphic_device *gdev, int cmd, void *args) +{ + rt_err_t err; + struct virtio_gpu *vgpu = raw_to_virtio_gpu(gdev); + + switch (cmd) + { + case VIRTIO_GPU_CMD_GET_CAPSET_INFO: + { + struct virtio_gpu_get_capset_info req; + struct virtio_gpu_resp_capset_info *res = args; + + rt_memcpy(&req, args, sizeof(req)); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), res, sizeof(*res)); + + err = res->hdr.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_CAPSET_INFO) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_GET_CAPSET: + { + struct virtio_gpu_get_capset req; + struct virtio_gpu_resp_capset *res = args; + + rt_memcpy(&req, args, sizeof(req)); + + virtio_gpu_ctrl_send_command(vgpu, &req, sizeof(req), res, sizeof(*res)); + + err = res->hdr.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_CAPSET) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_CTX_CREATE: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_ctx_create *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_CTX_DESTROY: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_ctx_destroy *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_ctx_resource *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_ctx_resource *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_resource_create_3d *req = args; + + /* + * Create a new resource id instead of user's. Because the + * `virtio_gpu_resource_create_3d` is base on the `virtio_gpu_ctrl_hdr`, + * user should pick the resource_id by res after request. + */ + req->resource_id = virtio_gpu_next_resource_id(vgpu); + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_transfer_host_3d *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_transfer_host_3d *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, sizeof(*req), &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + case VIRTIO_GPU_CMD_SUBMIT_3D: + { + struct virtio_gpu_ctrl_hdr res; + struct virtio_gpu_cmd_submit *req = args; + + virtio_gpu_ctrl_send_command(vgpu, req, req->size, &res, sizeof(res)); + + rt_memcpy(args, &res, sizeof(res)); + err = res.type == rt_cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA) ? RT_EOK : -RT_ERROR; + break; + } + + default: + err = -RT_ENOSYS; + break; + } + + return err; +} + +static const struct rt_graphic_device_ops virtio_gpu_virgl_ops = +{ + .dpms_switch = virtio_gpu_dpms_switch, + .wait_vsync = virtio_gpu_wait_vsync, + .control = virtio_gpu_virgl_control, +}; + +static const struct rt_graphic_device_ops virtio_gpu_ops = +{ + .dpms_switch = virtio_gpu_dpms_switch, + .wait_vsync = virtio_gpu_wait_vsync, +}; + +static void virtio_gpu_event_work(struct rt_work *work, void *work_data) +{ + rt_uint32_t events_read, events_clear = 0; + struct virtio_gpu *vgpu = work_data; + struct rt_virtio_device *vdev = vgpu->vdev; + + rt_virtio_read_config(vdev, struct virtio_gpu_config, events_read, &events_read); + + if (events_read & VIRTIO_GPU_EVENT_DISPLAY) + { + rt_graphic_device_enter(&vgpu->parent); + virtio_gpu_resize(vgpu); + rt_graphic_device_leave(&vgpu->parent); + + rt_graphic_device_hotplug_event(&vgpu->parent); + + events_clear |= VIRTIO_GPU_EVENT_DISPLAY; + } + + rt_virtio_write_config(vdev, struct virtio_gpu_config, events_clear, events_clear); +} + +static void virtio_gpu_config_changed(struct rt_virtio_device *vdev) +{ + struct virtio_gpu *vgpu = vdev->priv; + + rt_work_submit(&vgpu->event_work, 0); +} + +static rt_err_t virtio_gpu_vq_init(struct virtio_gpu *vgpu) +{ + const char *names[] = + { + "ctrl", + "cursor" + }; + rt_virtqueue_callback cbs[] = + { + RT_NULL, /* Just poll */ + RT_NULL, /* Just poll */ + }; + + return rt_virtio_virtqueue_install(vgpu->vdev, + RT_ARRAY_SIZE(names), vgpu->vqs, names, cbs); +} + +static void virtio_gpu_vq_finit(struct virtio_gpu *vgpu) +{ + if (vgpu->vqs[0]) + { + rt_virtio_virtqueue_release(vgpu->vdev); + } +} + +static void virtio_gpu_unregister(struct virtio_gpu *vgpu) +{ + if (rt_virtio_has_feature(vgpu->vdev, VIRTIO_GPU_F_EDID)) + { + rt_graphic_device_unregister(&vgpu->parent); + } + else + { + rt_graphic_device_simple_unregister(&vgpu->parent); + } +} + +static rt_err_t virtio_gpu_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct rt_graphic_plane *primary_plane, *cursor_plane; + struct virtio_gpu *vgpu = rt_calloc(1, sizeof(*vgpu)); + + if (!vgpu) + { + return -RT_ENOMEM; + } + + vdev->priv = vgpu; + vgpu->vdev = vdev; + + vgpu->cursor = rt_malloc_align( + CURSOR_WIDTH * CURSOR_HEIGHT * sizeof(rt_uint32_t), ARCH_PAGE_SIZE); + + if (!vgpu->cursor) + { + err = -RT_ENOMEM; + goto _fail; + } + + if ((err = virtio_gpu_vq_init(vgpu))) + { + goto _fail; + } + + vgpu->vsync = RT_TRUE; + + rt_virtio_read_config(vdev, struct virtio_gpu_config, num_scanouts, &vgpu->num_scanouts); + + rt_work_init(&vgpu->event_work, virtio_gpu_event_work, vgpu); + + virtio_gpu_resize(vgpu); + + if (rt_virtio_has_feature(vdev, VIRTIO_GPU_F_VIRGL)) + { + vgpu->parent.ops = &virtio_gpu_virgl_ops; + } + else + { + vgpu->parent.ops = &virtio_gpu_ops; + } + + if (rt_virtio_has_feature(vdev, VIRTIO_GPU_F_EDID)) + { + primary_plane = rt_graphic_device_alloc_plane(&vgpu->parent, 0, + &virtio_gpu_primary_plane_ops, + virtio_gpu_modes, RT_ARRAY_SIZE(virtio_gpu_modes), + RT_GRAPHIC_PLANE_TYPE_PRIMARY); + + if (!primary_plane) + { + err = -RT_EIO; + goto _fail; + } + + if ((err = rt_graphic_device_add_plane(&vgpu->parent, primary_plane))) + { + goto _free_primary_plane; + } + + err = rt_graphic_device_register(&vgpu->parent); + } + else + { + err = rt_graphic_device_simple_register(&vgpu->parent, + vgpu->width, vgpu->height, 0, &virtio_gpu_primary_plane_ops, + virtio_gpu_modes, RT_ARRAY_SIZE(virtio_gpu_modes)); + } + + if (err) + { + goto _fail; + } + + cursor_plane = rt_graphic_device_alloc_plane(&vgpu->parent, 0, + &virtio_gpu_cursor_plane_ops, + virtio_gpu_modes, RT_ARRAY_SIZE(virtio_gpu_modes), + RT_GRAPHIC_PLANE_TYPE_CURSOR); + + if (!cursor_plane) + { + err = -RT_EIO; + goto _fail; + } + + if ((err = rt_graphic_device_add_plane(&vgpu->parent, cursor_plane))) + { + goto _free_cursor_plane; + } + + cursor_plane->width = CURSOR_WIDTH; + cursor_plane->height = CURSOR_HEIGHT; + + return RT_EOK; + +_free_cursor_plane: + virtio_gpu_unregister(vgpu); + rt_graphic_device_free_plane(cursor_plane); + +_free_primary_plane: + rt_graphic_device_free_plane(primary_plane); + +_fail: + virtio_gpu_vq_finit(vgpu); + + if (vgpu->cursor) + { + rt_free_align(vgpu->cursor); + } + rt_free(vgpu); + + return err; +} + +static rt_err_t virtio_gpu_remove(struct rt_virtio_device *vdev) +{ + struct virtio_gpu *vgpu = vdev->parent.user_data; + + virtio_gpu_unregister(vgpu); + virtio_gpu_vq_finit(vgpu); + rt_free_align(vgpu->cursor); + rt_free(vgpu); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_gpu_ids[] = +{ + { VIRTIO_DEVICE_ID_GPU, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_gpu_driver = +{ + .ids = virtio_gpu_ids, + .features = + RT_BIT(VIRTIO_GPU_F_VIRGL) + | RT_BIT(VIRTIO_GPU_F_EDID) + | RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_gpu_probe, + .remove = virtio_gpu_remove, + .config_changed = virtio_gpu_config_changed, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_gpu_driver); diff --git a/components/drivers/virtio/virtio-input.c b/components/drivers/virtio/virtio-input.c new file mode 100644 index 00000000000..1b705b17d5f --- /dev/null +++ b/components/drivers/virtio/virtio-input.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.input" +#define DBG_LVL DBG_INFO +#include + +#include "virtio_config/virtio-input.h" + +#define QUEUE_EVENTS 0 +#define QUEUE_STATUS 1 + +struct virtio_input +{ + struct rt_input_device parent; + struct rt_virtio_device *vdev; + + struct rt_virtqueue *vqs[2]; + + rt_size_t events_nr; + struct rt_spinlock lock; + struct virtio_input_event *events; +}; + +#define raw_to_virtio_input(raw) rt_container_of(raw, struct virtio_input, parent) + +static rt_err_t virtio_input_trigger(struct rt_input_device *idev, + rt_uint16_t type, rt_uint16_t code, rt_int32_t value) +{ + struct rt_virtqueue *vq; + struct virtio_input_event status; + struct virtio_input *vinput = raw_to_virtio_input(idev); + + vq = vinput->vqs[QUEUE_STATUS]; + + rt_virtqueue_wait_prepare(vq, 1); + + rt_spin_lock(&vinput->lock); + + status.type = rt_cpu_to_le16(type); + status.code = rt_cpu_to_le16(code); + status.value = rt_cpu_to_le32(value); + rt_virtqueue_add_outbuf(vq, &status, sizeof(status)); + + rt_virtqueue_kick(vq); + + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } + + rt_spin_unlock(&vinput->lock); + + return RT_EOK; +} + +static void virtio_input_events_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_input_event *event; + struct virtio_input *vinput = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vinput->lock); + + while ((event = rt_virtqueue_read_buf(vq, RT_NULL))) + { + rt_input_event(&vinput->parent, + rt_le16_to_cpu(event->type), + rt_le16_to_cpu(event->code), + rt_le32_to_cpu(event->value)); + + LOG_D("%s: Event (type: %d, code: %d, value: %d)", + rt_dm_dev_get_name(&vinput->parent), + rt_le16_to_cpu(event->type), + rt_le16_to_cpu(event->code), + rt_le32_to_cpu(event->value)); + + rt_virtqueue_add_inbuf(vq, event, sizeof(*event)); + + rt_virtqueue_kick(vq); + } + + rt_spin_unlock_irqrestore(&vinput->lock, level); +} + +static void virtio_input_status_done(struct rt_virtqueue *vq) +{ +} + +static rt_uint8_t virtio_input_cfg_select(struct virtio_input *vinput, + rt_uint8_t select, rt_uint8_t subsel) +{ + rt_uint8_t size = 0; + struct rt_virtio_device *vdev = vinput->vdev; + + rt_virtio_write_config(vdev, struct virtio_input_config, select, select); + rt_virtio_write_config(vdev, struct virtio_input_config, subsel, subsel); + rt_virtio_read_config(vdev, struct virtio_input_config, size, &size); + + return size; +} + +static void virtio_input_cfg_bits(struct virtio_input *vinput, + rt_uint8_t select, rt_uint8_t subsel, rt_bitmap_t *map, rt_size_t bitcount) +{ + rt_uint8_t size = virtio_input_cfg_select(vinput, select, subsel); + + if (!size) + { + return; + } + + bitcount = rt_min_t(rt_size_t, bitcount, size * RT_BITS_PER_BYTE); + + rt_virtio_read_bytes(vinput->vdev, + rt_offsetof(struct virtio_input_config, bitmap), map, 1, size); + + rt_bitmap_set_bit(vinput->parent.cap, subsel); +} + +static void virtio_input_cfg_abs(struct virtio_input *vinput, int abs) +{ + rt_uint32_t min, max, res, fuzz, flat; + struct rt_virtio_device *vdev = vinput->vdev; + + virtio_input_cfg_select(vinput, VIRTIO_INPUT_CFG_ABS_INFO, abs); + rt_virtio_read_config(vdev, struct virtio_input_config, abs.min, &min); + rt_virtio_read_config(vdev, struct virtio_input_config, abs.max, &max); + rt_virtio_read_config(vdev, struct virtio_input_config, abs.res, &res); + rt_virtio_read_config(vdev, struct virtio_input_config, abs.fuzz, &fuzz); + rt_virtio_read_config(vdev, struct virtio_input_config, abs.flat, &flat); + + rt_input_set_capability(&vinput->parent, EV_ABS, abs); + rt_input_set_absinfo(&vinput->parent, abs, min, max, fuzz, flat); + vinput->parent.absinfo->resolution = res; +} + +static rt_err_t virtio_input_vq_init(struct virtio_input *vinput) +{ + rt_err_t err; + const char *names[] = + { + "events", + "status" + }; + rt_virtqueue_callback cbs[] = + { + &virtio_input_events_done, + &virtio_input_status_done, + }; + struct rt_virtqueue *vq; + + err = rt_virtio_virtqueue_install(vinput->vdev, + RT_ARRAY_SIZE(names), vinput->vqs, names, cbs); + + if (err) + { + return err; + } + + vinput->events_nr = rt_virtqueue_get_virtq_size(vinput->vqs[QUEUE_EVENTS]); + + vinput->events = rt_malloc(sizeof(*vinput->events) * vinput->events_nr); + + if (!vinput->events) + { + return -RT_ENOMEM; + } + + vq = vinput->vqs[QUEUE_EVENTS]; + + for (int i = 0; i < vinput->events_nr; ++i) + { + rt_virtqueue_add_inbuf(vq, &vinput->events[i], sizeof(*vinput->events)); + + rt_virtqueue_submit(vq); + } + + rt_virtqueue_notify(vq); + + return RT_EOK; +} + +static void virtio_input_vq_finit(struct virtio_input *vinput) +{ + if (vinput->vqs[0]) + { + rt_virtio_virtqueue_release(vinput->vdev); + } + + if (vinput->events) + { + rt_free(vinput->events); + } +} + +static rt_err_t virtio_input_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_input *vinput = rt_calloc(1, sizeof(*vinput)); + + if (!vinput) + { + return -RT_ENOMEM; + } + + vdev->priv = vinput; + vinput->vdev = vdev; + vdev->parent.user_data = vinput; + + if ((err = virtio_input_vq_init(vinput)) < 0) + { + goto _fail; + } + + rt_spin_lock_init(&vinput->lock); + + virtio_input_cfg_bits(vinput, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY, + vinput->parent.key_map, KEY_CNT); + virtio_input_cfg_bits(vinput, VIRTIO_INPUT_CFG_EV_BITS, EV_REL, + vinput->parent.rel_map, REL_CNT); + virtio_input_cfg_bits(vinput, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS, + vinput->parent.abs_map, ABS_CNT); + + if (rt_bitmap_test_bit(vinput->parent.cap, EV_ABS)) + { + for (int abs = 0; abs < ABS_CNT; ++abs) + { + if (!rt_bitmap_test_bit(vinput->parent.abs_map, abs)) + { + continue; + } + + virtio_input_cfg_abs(vinput, abs); + } + + if (rt_bitmap_test_bit(vinput->parent.abs_map, ABS_MT_SLOT)) + { + int nslots = vinput->parent.absinfo[ABS_MT_SLOT].maximum + 1; + + if ((err = rt_input_setup_touch(&vinput->parent, nslots, RT_NULL))) + { + goto _fail; + } + } + } + + vinput->parent.trigger = &virtio_input_trigger; + + if ((err = rt_input_device_register(&vinput->parent))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + virtio_input_vq_finit(vinput); + rt_input_remove_config(&vinput->parent); + rt_free(vinput); + + return err; +} + +static rt_err_t virtio_input_remove(struct rt_virtio_device *vdev) +{ + struct virtio_input *vinput = vdev->parent.user_data; + + rt_input_device_unregister(&vinput->parent); + + virtio_input_vq_finit(vinput); + rt_free(vinput); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_input_ids[] = +{ + { VIRTIO_DEVICE_ID_INPUT, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_input_driver = +{ + .ids = virtio_input_ids, + .probe = virtio_input_probe, + .remove = virtio_input_remove, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_input_driver); diff --git a/components/drivers/virtio/virtio-net.c b/components/drivers/virtio/virtio-net.c new file mode 100644 index 00000000000..e2e1b559a66 --- /dev/null +++ b/components/drivers/virtio/virtio-net.c @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.net" +#define DBG_LVL DBG_INFO +#include + +#include +#include + +#include "virtio_config/virtio-net.h" + +#define QUEUE_RX(n) (n) +#define QUEUE_TX(n) ((n) + 1) + +rt_packed(struct net_packet +{ + struct virtio_net_hdr hdr; + + rt_uint8_t mss[VIRTIO_NET_MSS]; +}); + +struct virtio_net +{ + struct eth_device parent; + struct rt_virtio_device *vdev; + + rt_uint16_t status; + + int max_pairs; + int request_num; + void (*virtqueue_add_net_packet)(struct rt_virtqueue *, struct net_packet *, rt_size_t); + struct rt_virtqueue *vqs[RT_CPUS_NR * 2]; + + rt_list_t rx_vq_nodes; + struct rt_spinlock rx_lock; + + struct net_packet *tx_packet; + struct net_packet *rx_packet; +}; + +#define raw_to_virtio_net(raw) \ + rt_container_of(rt_container_of(raw, struct eth_device, parent), struct virtio_net, parent) + +static void virtqueue_add_net_packet_split(struct rt_virtqueue *vq, + struct net_packet *packet, rt_size_t len) +{ + rt_virtqueue_add_outbuf(vq, packet, sizeof(packet->hdr) + len); +} + +static void virtqueue_add_net_packet_packed(struct rt_virtqueue *vq, + struct net_packet *packet, rt_size_t len) +{ + rt_virtqueue_add_outbuf(vq, &packet->hdr, sizeof(packet->hdr)); + rt_virtqueue_add_outbuf(vq, &packet->mss, len); +} + +static rt_err_t virtio_net_tx(rt_device_t dev, struct pbuf *p) +{ + struct rt_virtqueue *vq; + struct net_packet *packet; + struct virtio_net *vnet = raw_to_virtio_net(dev); + + vq = vnet->vqs[QUEUE_TX(rt_hw_cpu_id() % vnet->max_pairs)]; + rt_virtqueue_wait_prepare(vq, vnet->request_num); + + packet = &vnet->tx_packet[vq->index * rt_virtqueue_next_buf_index(vq)]; + packet->hdr.flags = 0; + packet->hdr.gso_type = 0; + packet->hdr.hdr_len = 0; + packet->hdr.gso_size = 0; + packet->hdr.csum_start = 0; + packet->hdr.csum_offset = 0; + packet->hdr.num_buffers = 0; + + RT_ASSERT(p->tot_len <= sizeof(packet->mss)); + + pbuf_copy_partial(p, &packet->mss, p->tot_len, 0); + + vnet->virtqueue_add_net_packet(vq, packet, p->tot_len); + + rt_virtqueue_kick(vq); + + return RT_EOK; +} + +static struct pbuf *virtio_net_rx(rt_device_t dev) +{ + rt_size_t size; + rt_ubase_t level; + struct pbuf *p = RT_NULL; + struct net_packet *packet; + struct rt_virtqueue *vq; + struct virtio_net *vnet = raw_to_virtio_net(dev); + + level = rt_spin_lock_irqsave(&vnet->rx_lock); + + rt_list_for_each_entry(vq, &vnet->rx_vq_nodes, user_list) + { + packet = rt_virtqueue_read_buf(vq, &size); + + if (!packet) + { + rt_list_remove(&vq->user_list); + break; + } + size -= sizeof(packet->hdr); + + rt_spin_unlock_irqrestore(&vnet->rx_lock, level); + + p = pbuf_alloc(PBUF_RAW, size, PBUF_RAM); + + if (!p) + { + return RT_NULL; + } + + rt_memcpy(p->payload, &packet->mss, size); + + level = rt_spin_lock_irqsave(&vnet->rx_lock); + + if (!rt_virtqueue_poll(vq, packet - vnet->rx_packet)) + { + rt_list_remove(&vq->user_list); + } + + rt_virtqueue_add_inbuf(vq, packet, sizeof(*packet)); + + rt_virtqueue_kick(vq); + + break; + } + + rt_spin_unlock_irqrestore(&vnet->rx_lock, level); + + return p; +} + +static rt_err_t virtio_net_control(rt_device_t dev, int cmd, void *args) +{ + rt_err_t err = RT_EOK; + rt_uint8_t *mac; + struct virtio_net *vnet = raw_to_virtio_net(dev); + + switch (cmd) + { + case NIOCTL_GADDR: + mac = args; + + if (!mac) + { + err = -RT_EINVAL; + break; + } + + for (int i = 0; i < 6; ++i) + { + rt_virtio_read_config(vnet->vdev, struct virtio_net_config, mac[i], &mac[i]); + } + break; + + default: + err = -RT_EINVAL; + break; + } + + return err; +} + +#ifdef RT_USING_DEVICE_OPS +const static struct rt_device_ops virtio_net_ops = +{ + .control = virtio_net_control, +}; +#endif + +static void virtio_net_rx_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_net *vnet = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vnet->rx_lock); + + rt_list_insert_before(&vnet->rx_vq_nodes, &vq->user_list); + + rt_spin_unlock_irqrestore(&vnet->rx_lock, level); + + eth_device_ready(&vnet->parent); +} + +static void virtio_net_tx_done(struct rt_virtqueue *vq) +{ + rt_virtqueue_read_buf(vq, RT_NULL); +} + +static void virtio_net_config_changed(struct rt_virtio_device *vdev) +{ + rt_uint16_t status; + rt_bool_t link_up; + struct virtio_net *vnet = vdev->priv; + + if (!rt_virtio_has_feature(vdev, VIRTIO_NET_F_STATUS)) + { + return; + } + + rt_virtio_read_config(vdev, struct virtio_net_config, status, &status); + + /* Remove other status bit */ + status &= VIRTIO_NET_S_LINK_UP; + + if (vnet->status == status) + { + /* Status no change */ + return; + } + + link_up = !!((vnet->status = status) & VIRTIO_NET_S_LINK_UP); + + LOG_D("%s linkchange to %s", rt_dm_dev_get_name(&vdev->parent), link_up ? "up" : "down"); + + eth_device_linkchange(&vnet->parent, link_up); +} + +static rt_err_t virtio_net_vq_init(struct virtio_net *vnet) +{ + rt_err_t err; + rt_size_t vqs_nr = 2, qsize; + const char *names[RT_ARRAY_SIZE(vnet->vqs)]; + rt_virtqueue_callback cbs[RT_ARRAY_SIZE(vnet->vqs)]; + struct rt_virtio_device *vdev = vnet->vdev; + + if (rt_virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || + rt_virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) + { + rt_virtio_read_config(vdev, struct virtio_net_config, max_virtqueue_pairs, &vqs_nr); + vqs_nr *= 2; + } + + for (int i = 0; i < vqs_nr; i += 2) + { + int rx = QUEUE_RX(i), tx = QUEUE_TX(i); + + names[rx] = "rx"; + names[tx] = "tx"; + + cbs[rx] = &virtio_net_rx_done; + cbs[tx] = &virtio_net_tx_done; + } + + vnet->max_pairs = vqs_nr / 2; + err = rt_virtio_virtqueue_install(vdev, vqs_nr, vnet->vqs, names, cbs); + + if (err) + { + return err; + } + + qsize = rt_virtqueue_get_virtq_size(vnet->vqs[QUEUE_TX(0)]); + + if (!(vnet->tx_packet = rt_malloc(sizeof(vnet->tx_packet[0]) * vqs_nr * qsize))) + { + return -RT_ENOMEM; + } + + qsize = rt_virtqueue_get_virtq_size(vnet->vqs[QUEUE_RX(0)]); + + if (!(vnet->rx_packet = rt_malloc(sizeof(vnet->rx_packet[0]) * vqs_nr * qsize))) + { + return -RT_ENOMEM; + } + + for (int i = 0; i < vqs_nr; i += 2) + { + int rx = QUEUE_RX(i); + struct net_packet *packet; + struct rt_virtqueue *vq = vnet->vqs[rx]; + + packet = &vnet->rx_packet[rx * qsize]; + + for (int idx = 0; idx < qsize; ++idx, ++packet) + { + rt_virtqueue_add_inbuf(vq, packet, sizeof(*packet)); + + rt_virtqueue_submit(vq); + } + + rt_virtqueue_notify(vq); + } + + return RT_EOK; +} + +static void virtio_net_vq_finit(struct virtio_net *vnet) +{ + if (vnet->vqs[0]) + { + rt_virtio_virtqueue_release(vnet->vdev); + } + if (vnet->tx_packet) + { + rt_free(vnet->tx_packet); + } + if (vnet->rx_packet) + { + rt_free(vnet->rx_packet); + } +} + +static rt_err_t virtio_net_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err = RT_EOK; + struct virtio_net *vnet = rt_calloc(1, sizeof(*vnet)); + + if (!vnet) + { + return -RT_ENOMEM; + } + + vdev->priv = vnet; + vnet->vdev = vdev; + vdev->parent.user_data = vnet; +#ifdef RT_USING_DEVICE_OPS + vnet->parent.parent.ops = &virtio_net_ops; +#else + vnet->parent.parent.control = virtio_net_control; +#endif + vnet->parent.eth_tx = virtio_net_tx; + vnet->parent.eth_rx = virtio_net_rx; + + if (rt_virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) + { + vnet->request_num = 2; + vnet->virtqueue_add_net_packet = &virtqueue_add_net_packet_packed; + } + else + { + vnet->request_num = 1; + vnet->virtqueue_add_net_packet = &virtqueue_add_net_packet_split; + } + + if ((err = virtio_net_vq_init(vnet))) + { + goto _fail; + } + + rt_list_init(&vnet->rx_vq_nodes); + rt_spin_lock_init(&vnet->rx_lock); + + if ((err = rt_dm_dev_set_name_auto(&vnet->parent.parent, "e")) < 0) + { + goto _fail; + } + + if ((err = eth_device_init(&vnet->parent, rt_dm_dev_get_name(&vnet->parent.parent)))) + { + goto _fail; + } + + eth_device_linkchange(&vnet->parent, RT_TRUE); + + return RT_EOK; + +_fail: + virtio_net_vq_finit(vnet); + rt_free(vnet); + + return err; +} + +static rt_err_t virtio_net_remove(struct rt_virtio_device *vdev) +{ + struct virtio_net *vnet = vdev->parent.user_data; + + eth_device_linkchange(&vnet->parent, RT_FALSE); + eth_device_deinit(&vnet->parent); + + virtio_net_vq_finit(vnet); + rt_free(vnet); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_net_ids[] = +{ + { VIRTIO_DEVICE_ID_NET, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_net_driver = +{ + .ids = virtio_net_ids, + .features = + RT_BIT(VIRTIO_NET_F_MTU) + | RT_BIT(VIRTIO_NET_F_MAC) + | RT_BIT(VIRTIO_NET_F_MRG_RXBUF) + | RT_BIT(VIRTIO_NET_F_STATUS) + | RT_BIT(VIRTIO_NET_F_CTRL_RX) + | RT_BIT(VIRTIO_NET_F_CTRL_VLAN) + | RT_BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) + | RT_BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) + | RT_BIT(VIRTIO_NET_F_MQ) + | RT_BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) + | RT_BIT(VIRTIO_NET_F_SPEED_DUPLEX) + | RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_net_probe, + .remove = virtio_net_remove, + .config_changed = virtio_net_config_changed, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_net_driver); diff --git a/components/drivers/virtio/virtio-rng.c b/components/drivers/virtio/virtio-rng.c new file mode 100644 index 00000000000..99d27819603 --- /dev/null +++ b/components/drivers/virtio/virtio-rng.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.rng" +#define DBG_LVL DBG_INFO +#include + +#include + +struct virtio_rng +{ + struct rt_hwcrypto_device parent; + struct rt_virtio_device *vdev; + + struct rt_virtqueue *vqs[1]; + struct rt_spinlock lock; +}; + +static rt_uint32_t virtio_rng_rand(struct hwcrypto_rng *ctx) +{ + rt_uint32_t rand; + struct rt_virtqueue *vq; + struct virtio_rng *vrng = rt_container_of(ctx->parent.device, struct virtio_rng, parent); + + vq = vrng->vqs[0]; + rt_virtqueue_wait_prepare(vq, 1); + + rt_spin_lock(&vrng->lock); + + rt_virtqueue_add_inbuf(vq, &rand, sizeof(rand)); + rt_virtqueue_kick(vq); + + while (!rt_virtqueue_read_buf(vq, RT_NULL)) + { + rt_hw_cpu_relax(); + } + + rt_spin_unlock(&vrng->lock); + + return rand; +} + +static const struct hwcrypto_rng_ops rng_ops = +{ + .update = virtio_rng_rand, +}; + +static rt_err_t virtio_rng_create(struct rt_hwcrypto_ctx *ctx) +{ + if ((ctx->type & HWCRYPTO_MAIN_TYPE_MASK) == HWCRYPTO_TYPE_RNG) + { + struct hwcrypto_rng *rng; + + ctx->contex = RT_NULL; + + rng = rt_container_of(ctx, struct hwcrypto_rng, parent); + rng->ops = &rng_ops; + + return RT_EOK; + } + + return -RT_ENOSYS; +} + +static void virtio_rng_destroy(struct rt_hwcrypto_ctx *ctx) +{ +} + +static rt_err_t virtio_rng_copy(struct rt_hwcrypto_ctx *des, + const struct rt_hwcrypto_ctx *src) +{ + if ((src->type & HWCRYPTO_MAIN_TYPE_MASK) == HWCRYPTO_TYPE_RNG) + { + return RT_EOK; + } + + return -RT_ENOSYS; +} + +static void virtio_rng_reset(struct rt_hwcrypto_ctx *ctx) +{ +} + +static const struct rt_hwcrypto_ops virtio_rng_ops = +{ + .create = virtio_rng_create, + .destroy = virtio_rng_destroy, + .copy = virtio_rng_copy, + .reset = virtio_rng_reset, +}; + +static void virtio_rng_done(struct rt_virtqueue *vq) +{ +} + +static rt_err_t virtio_rng_vq_init(struct virtio_rng *vrng) +{ + const char *names[] = + { + "req", + }; + rt_virtqueue_callback cbs[] = + { + &virtio_rng_done, + }; + + return rt_virtio_virtqueue_install(vrng->vdev, RT_ARRAY_SIZE(vrng->vqs), + vrng->vqs, names, cbs); +} + +static void virtio_rng_vq_finit(struct virtio_rng *vrng) +{ + if (vrng->vqs[0]) + { + rt_virtio_virtqueue_release(vrng->vdev); + } +} + +static rt_err_t virtio_rng_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_rng *vrng = rt_calloc(1, sizeof(*vrng)); + + if (!vrng) + { + return -RT_ENOMEM; + } + + vdev->priv = vrng; + vrng->vdev = vdev; + vdev->parent.user_data = vrng; + + if ((err = virtio_rng_vq_init(vrng))) + { + goto _fail; + } + + rt_spin_lock_init(&vrng->lock); + vrng->parent.ops = &virtio_rng_ops; + vrng->parent.id = vdev->id.vendor; + + if ((err = rt_hwcrypto_register(&vrng->parent, "hwrng"))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + virtio_rng_vq_finit(vrng); + rt_free(vrng); + + return err; +} + +static rt_err_t virtio_rng_remove(struct rt_virtio_device *vdev) +{ + struct virtio_rng *vrng = vdev->parent.user_data; + + rt_device_unregister(&vrng->parent.parent); + + virtio_rng_vq_finit(vrng); + rt_free(vrng); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_rng_ids[] = +{ + { VIRTIO_DEVICE_ID_RNG, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_rng_driver = +{ + .ids = virtio_rng_ids, + .probe = virtio_rng_probe, + .remove = virtio_rng_remove, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_rng_driver); diff --git a/components/drivers/virtio/virtio-scmi.c b/components/drivers/virtio/virtio-scmi.c new file mode 100644 index 00000000000..92343e4f7e9 --- /dev/null +++ b/components/drivers/virtio/virtio-scmi.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.scmi" +#define DBG_LVL DBG_INFO +#include + +#include "virtio_internal.h" +#include "virtio_config/virtio-scmi.h" +#include "../firmware/arm_scmi/agent.h" + +#define QUEUE_CMDQ 0 +#define QUEUE_EVENTQ 1 + +struct virtio_scmi_event_msg_ext +{ + rt_list_t list; + + struct rt_scmi_device *sdev; + rt_scmi_msg_callback rx_callback; + + rt_size_t msg_size; + struct virtio_scmi_event_msg event_msg; +}; + +struct virtio_scmi +{ + struct rt_virtio_device *vdev; + + rt_list_t event_nodes; + struct rt_work event_work; + + struct rt_virtqueue *vqs[2]; + struct rt_completion done; + struct rt_spinlock lock; +}; + +static struct virtio_scmi *system_virtio_scmi = RT_NULL; + +static rt_err_t scmi_agent_virtio_setup(struct scmi_agent *agent, + struct rt_device *dev) +{ + RT_ASSERT(system_virtio_scmi != RT_NULL); + + agent->priv = system_virtio_scmi; + + return RT_EOK; +} + +static rt_err_t scmi_agent_virtio_process_msg(struct scmi_agent *agent, + struct rt_scmi_msg *msg) +{ + rt_base_t level; + struct rt_virtqueue *vq; + struct virtio_scmi *vscmi = agent->priv; + + if (!msg->rx_callback || + !rt_virtio_has_feature(vscmi->vdev, VIRTIO_SCMI_F_P2A_CHANNELS)) + { + void *buffer; + rt_size_t request_size, response_size; + struct virtio_scmi_request *request; + struct virtio_scmi_response *response; + + request_size = sizeof(*request) + msg->in_msg_size; + response_size = sizeof(*response) + msg->out_msg_size; + buffer = rt_malloc(request_size + response_size); + + if (!buffer) + { + return -RT_ENOMEM; + } + + request = buffer; + response = buffer + request_size; + + request->hdr = cpu_to_virtio32(vscmi->vdev, scmi_header( + msg->message_id, 0, msg->sdev->protocol_id, 0)); + rt_memcpy(request->params, msg->in_msg, msg->in_msg_size); + + vq = vscmi->vqs[QUEUE_CMDQ]; + rt_virtqueue_wait_prepare(vq, 2); + + level = rt_spin_lock_irqsave(&vscmi->lock); + + rt_virtqueue_add_outbuf(vq, request, request_size); + rt_virtqueue_add_inbuf(vq, response, response_size); + rt_virtqueue_kick(vq); + + rt_spin_unlock_irqrestore(&vscmi->lock, level); + + rt_completion_wait(&vscmi->done, RT_WAITING_FOREVER); + + if (msg->out_msg) + { + if (request->hdr != response->hdr) + { + LOG_W("WTF? request header(%x) != response header(%x)", + rt_le32_to_cpu(request->hdr), rt_le32_to_cpu(response->hdr)); + } + + rt_memcpy(msg->out_msg, response->ret_values, msg->out_msg_size); + } + + /* Fixup for device */ + if (msg->rx_callback) + { + msg->rx_callback(msg->sdev, response->ret_values, msg->out_msg_size); + } + + rt_free(buffer); + } + else + { + struct virtio_scmi_event_msg_ext *msge = rt_malloc(sizeof(*msge) + + rt_max(msg->in_msg_size, msg->out_msg_size)); + + if (!msge) + { + return -RT_ENOMEM; + } + + msge->sdev = msg->sdev; + msge->rx_callback = msg->rx_callback; + msge->msg_size = msg->out_msg_size; + msge->event_msg.hdr = cpu_to_virtio32(vscmi->vdev, scmi_header( + msg->message_id, 0, msg->sdev->protocol_id, 0)); + rt_memcpy(msge->event_msg.payload, msg->in_msg, msg->in_msg_size); + + vq = vscmi->vqs[QUEUE_EVENTQ]; + rt_virtqueue_wait_prepare(vq, 1); + + level = rt_spin_lock_irqsave(&vscmi->lock); + + rt_virtqueue_add_outbuf(vq, &msge->event_msg, + sizeof(msge->event_msg) + msg->in_msg_size); + rt_virtqueue_kick(vq); + + rt_spin_unlock_irqrestore(&vscmi->lock, level); + } + + return RT_EOK; +} + +struct scmi_agent_ops scmi_agent_virtio_ops = +{ + .name = "virtio", + .setup = scmi_agent_virtio_setup, + .process_msg = scmi_agent_virtio_process_msg, +}; + +static void virtio_scmi_work(struct rt_work *work, void *work_data) +{ + rt_ubase_t level; + struct virtio_scmi *vscmi = work_data; + struct virtio_scmi_event_msg_ext *msge, *msge_next; + + level = rt_spin_lock_irqsave(&vscmi->lock); + + rt_list_for_each_entry_safe(msge, msge_next, &vscmi->event_nodes, list) + { + rt_list_remove(&msge->list); + + rt_spin_unlock_irqrestore(&vscmi->lock, level); + + msge->rx_callback(msge->sdev, msge->event_msg.payload, msge->msg_size); + rt_free(msge); + + level = rt_spin_lock_irqsave(&vscmi->lock); + } + + rt_spin_unlock_irqrestore(&vscmi->lock, level); +} + +static void virtio_scmi_cmd_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_scmi *vscmi = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vscmi->lock); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&vscmi->done); + + rt_spin_unlock_irqrestore(&vscmi->lock, level); +} + +static void virtio_scmi_event_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + void *raw_buffer; + struct virtio_scmi *vscmi = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vscmi->lock); + + if ((raw_buffer = rt_virtqueue_read_buf(vq, RT_NULL))) + { + struct virtio_scmi_event_msg_ext *msge; + + msge = rt_container_of(raw_buffer, struct virtio_scmi_event_msg_ext, event_msg); + + rt_list_init(&msge->list); + rt_list_insert_before(&vscmi->event_nodes, &msge->list); + + rt_work_submit(&vscmi->event_work, 0); + } + + rt_spin_unlock_irqrestore(&vscmi->lock, level); +} + +static rt_err_t virtio_scmi_vq_init(struct virtio_scmi *vscmi) +{ + rt_size_t vqs_nr; + const char *names[] = + { + "cmd", + "event" + }; + rt_virtqueue_callback cbs[] = + { + &virtio_scmi_cmd_done, + &virtio_scmi_event_done, + }; + + vqs_nr = rt_virtio_has_feature(vscmi->vdev, VIRTIO_SCMI_F_P2A_CHANNELS) ? + RT_ARRAY_SIZE(names) : 1; + + return rt_virtio_virtqueue_install(vscmi->vdev, vqs_nr, vscmi->vqs, names, cbs); +} + +static rt_err_t virtio_scmi_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + struct virtio_scmi *vscmi = rt_calloc(1, sizeof(*vscmi)); + + if (!vscmi) + { + return -RT_ENOMEM; + } + + vdev->priv = vscmi; + vscmi->vdev = vdev; + vdev->parent.user_data = vscmi; + + if ((err = virtio_scmi_vq_init(vscmi)) < 0) + { + rt_free(vscmi); + + return err; + } + + if (rt_virtio_has_feature(vscmi->vdev, VIRTIO_SCMI_F_P2A_CHANNELS)) + { + rt_list_init(&vscmi->event_nodes); + rt_work_init(&vscmi->event_work, virtio_scmi_work, vscmi); + } + + rt_spin_lock_init(&vscmi->lock); + rt_completion_init(&vscmi->done); + + system_virtio_scmi = vscmi; + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_scmi_ids[] = +{ + { VIRTIO_DEVICE_ID_SCMI, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_scmi_driver = +{ + .ids = virtio_scmi_ids, + .features = + RT_BIT(VIRTIO_SCMI_F_P2A_CHANNELS) + | RT_BIT(VIRTIO_SCMI_F_SHARED_MEMORY) + | RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_scmi_probe, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_scmi_driver); diff --git a/components/drivers/virtio/virtio-scsi.c b/components/drivers/virtio/virtio-scsi.c new file mode 100644 index 00000000000..7080f1fd167 --- /dev/null +++ b/components/drivers/virtio/virtio-scsi.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include + +#define DBG_TAG "virtio.dev.scsi" +#define DBG_LVL DBG_INFO +#include + +#include "virtio_config/virtio-scsi.h" +#include "virtio_internal.h" + +#define QUEUE_CONTROL 0 +#define QUEUE_EVENT 1 +#define QUEUE_REQUEST 2 + +struct virtio_scsi +{ + struct rt_scsi_host parent; + struct rt_virtio_device *vdev; + + struct rt_virtqueue *vqs[3]; + struct rt_completion done; + struct rt_spinlock lock; + + rt_uint16_t max_target; + rt_uint32_t max_lun; + struct rt_work hotplug_work; +}; + +static void virtio_scsi_init_cmd_req(struct virtio_scsi *vscsi, + struct rt_scsi_device *sdev, struct rt_scsi_cmd *cmd, + struct virtio_scsi_cmd_req *req) +{ + req->lun[0] = 1; + req->lun[1] = sdev->id; + req->lun[2] = (sdev->lun >> 8) | 0x40; + req->lun[3] = sdev->lun & 0xff; + req->tag = cpu_to_virtio64(vscsi->vdev, (rt_ubase_t)cmd); + req->task_attr = VIRTIO_SCSI_S_SIMPLE; + req->prio = 0; + req->crn = 0; +} + +static rt_err_t virtio_scsi_transfer(struct rt_scsi_device *sdev, + struct rt_scsi_cmd *cmd) +{ + rt_ubase_t level; + struct rt_virtqueue *vq; + struct virtio_scsi_cmd_req req; + struct virtio_scsi_cmd_resp resp; + struct virtio_scsi *vscsi = rt_container_of(sdev->host, struct virtio_scsi, parent); + + virtio_scsi_init_cmd_req(vscsi, sdev, cmd, &req); + + RT_ASSERT(cmd->op_size < sizeof(req.cdb)); + rt_memcpy(&req.cdb, &cmd->op, cmd->op_size); + + vq = vscsi->vqs[QUEUE_REQUEST]; + if (!cmd->data.ptr || !cmd->data.size) + { + rt_virtqueue_wait_prepare(vq, 2); + } + else + { + rt_virtqueue_wait_prepare(vq, 3); + } + + level = rt_spin_lock_irqsave(&vscsi->lock); + + rt_virtqueue_add_outbuf(vq, &req, sizeof(req)); + + if (!cmd->data.ptr || !cmd->data.size) + { + rt_virtqueue_add_inbuf(vq, &resp, sizeof(resp)); + } + else if (!rt_scsi_cmd_is_write(cmd)) + { + rt_virtqueue_add_inbuf(vq, &resp, sizeof(resp)); + rt_virtqueue_add_inbuf(vq, cmd->data.ptr, cmd->data.size); + } + else + { + rt_virtqueue_add_outbuf(vq, cmd->data.ptr, cmd->data.size); + rt_virtqueue_add_inbuf(vq, &resp, sizeof(resp)); + } + + rt_virtqueue_kick(vq); + + rt_spin_unlock_irqrestore(&vscsi->lock, level); + + rt_completion_wait(&vscsi->done, RT_WAITING_FOREVER); + + switch (resp.response) + { + case VIRTIO_SCSI_S_OK: + if (resp.sense_len) + { + rt_memcpy(&cmd->data.request_sense, &resp.sense, + rt_min_t(rt_uint32_t, + virtio32_to_cpu(vscsi->vdev, resp.sense_len), + VIRTIO_SCSI_SENSE_SIZE)); + } + return RT_EOK; + + case VIRTIO_SCSI_S_OVERRUN: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "OVERRUN"); + return -RT_ERROR; + + case VIRTIO_SCSI_S_ABORTED: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "ABORTED"); + return -RT_EIO; + + case VIRTIO_SCSI_S_BAD_TARGET: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "BAD_TARGET"); + return -RT_EINVAL; + + case VIRTIO_SCSI_S_RESET: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "RESET"); + return -RT_EIO; + + case VIRTIO_SCSI_S_BUSY: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "BUSY"); + return -RT_EBUSY; + + case VIRTIO_SCSI_S_TRANSPORT_FAILURE: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "TRANSPORT_FAILURE"); + return -RT_EIO; + + case VIRTIO_SCSI_S_TARGET_FAILURE: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "TARGET_FAILURE"); + return -RT_EIO; + + case VIRTIO_SCSI_S_NEXUS_FAILURE: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "NEXUS_FAILURE"); + return -RT_EIO; + + case VIRTIO_SCSI_S_FAILURE: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "FAILURE"); + return -RT_EIO; + + case VIRTIO_SCSI_S_FUNCTION_SUCCEEDED: + return RT_EOK; + + case VIRTIO_SCSI_S_FUNCTION_REJECTED: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "FUNCTION_REJECTED"); + return -RT_EINVAL; + + case VIRTIO_SCSI_S_INCORRECT_LUN: + LOG_E("%s resp = %s", rt_dm_dev_get_name(sdev->host->dev), "INCORRECT_LUN"); + return -RT_EINVAL; + + default: + LOG_E("%s resp = UNKNOWN(%d)", rt_dm_dev_get_name(sdev->host->dev), resp.response); + return -RT_EIO; + } +} + +static struct rt_scsi_ops virtio_scsi_ops = +{ + .transfer = virtio_scsi_transfer, +}; + +static void virtio_scsi_hotplug_work(struct rt_work *work, void *work_data) +{ + struct virtio_scsi *vscsi = work_data; + + if (!rt_scsi_host_unregister(&vscsi->parent)) + { + vscsi->parent.max_id = vscsi->max_target; + vscsi->parent.max_lun = vscsi->max_lun; + + rt_scsi_host_register(&vscsi->parent); + } +} + +static void virtio_scsi_request_done(struct rt_virtqueue *vq) +{ + rt_ubase_t level; + struct virtio_scsi *vscsi = vq->vdev->priv; + + level = rt_spin_lock_irqsave(&vscsi->lock); + + rt_virtqueue_read_buf(vq, RT_NULL); + rt_completion_done(&vscsi->done); + + rt_spin_unlock_irqrestore(&vscsi->lock, level); +} + +static void virtio_scsi_config_changed(struct rt_virtio_device *vdev) +{ + struct virtio_scsi *vscsi = vdev->priv; + + rt_virtio_read_config(vdev, struct virtio_scsi_config, + max_target, &vscsi->max_target); + rt_virtio_read_config(vdev, struct virtio_scsi_config, + max_lun, &vscsi->max_lun); + + if (vscsi->parent.max_id != vscsi->max_target || + vscsi->parent.max_lun != vscsi->max_lun) + { + rt_work_submit(&vscsi->hotplug_work, 0); + } +} + +static rt_err_t virtio_scsi_vq_init(struct virtio_scsi *vscsi) +{ + const char *names[] = + { + RT_NULL, /* ctrl */ + RT_NULL, /* event */ + "req", + }; + rt_virtqueue_callback cbs[] = + { + RT_NULL, + RT_NULL, + &virtio_scsi_request_done, + }; + + return rt_virtio_virtqueue_install(vscsi->vdev, RT_ARRAY_SIZE(vscsi->vqs), + vscsi->vqs, names, cbs); +} + +static void virtio_scsi_vq_finit(struct virtio_scsi *vscsi) +{ + if (vscsi->vqs[0]) + { + rt_virtio_virtqueue_release(vscsi->vdev); + } +} + +static rt_err_t virtio_scsi_probe(struct rt_virtio_device *vdev) +{ + rt_err_t err; + rt_uint32_t cdb_size; + struct virtio_scsi *vscsi = rt_calloc(1, sizeof(*vscsi)); + + if (!vscsi) + { + return -RT_ENOMEM; + } + + vdev->priv = vscsi; + vscsi->vdev = vdev; + vdev->parent.user_data = vscsi; + vscsi->parent.dev = &vdev->parent; + vscsi->parent.ops = &virtio_scsi_ops; + + rt_virtio_read_config(vdev, struct virtio_scsi_config, + max_target, &vscsi->max_target); + rt_virtio_read_config(vdev, struct virtio_scsi_config, + max_lun, &vscsi->max_lun); + + vscsi->parent.max_id = vscsi->max_target; + vscsi->parent.max_lun = vscsi->max_lun; + + if ((err = virtio_scsi_vq_init(vscsi))) + { + goto _fail; + } + + cdb_size = RT_ALIGN(sizeof(((struct rt_scsi_cmd *)RT_NULL)->op), 32); + rt_virtio_write_config(vdev, struct virtio_scsi_config, cdb_size, cdb_size); + + rt_completion_init(&vscsi->done); + rt_spin_lock_init(&vscsi->lock); + + rt_work_init(&vscsi->hotplug_work, virtio_scsi_hotplug_work, vscsi); + + if ((err = rt_scsi_host_register(&vscsi->parent))) + { + goto _fail; + } + + return RT_EOK; + +_fail: + virtio_scsi_vq_finit(vscsi); + rt_free(vscsi); + + return err; +} + +static rt_err_t virtio_scsi_remove(struct rt_virtio_device *vdev) +{ + struct virtio_scsi *vscsi = vdev->parent.user_data; + + rt_scsi_host_unregister(&vscsi->parent); + + virtio_scsi_vq_finit(vscsi); + rt_free(vscsi); + + return RT_EOK; +} + +static const struct rt_virtio_device_id virtio_scsi_ids[] = +{ + { VIRTIO_DEVICE_ID_SCSI, VIRTIO_DEVICE_ANY_ID }, + { /* sentinel */ } +}; + +static struct rt_virtio_driver virtio_scsi_driver = +{ + .ids = virtio_scsi_ids, + .features = + RT_BIT(VIRTIO_F_ANY_LAYOUT), + + .probe = virtio_scsi_probe, + .remove = virtio_scsi_remove, + .config_changed = virtio_scsi_config_changed, +}; +RT_VIRTIO_DRIVER_EXPORT(virtio_scsi_driver); diff --git a/components/drivers/virtio/virtio.c b/components/drivers/virtio/virtio.c index a7937dfaeea..083922eaf44 100644 --- a/components/drivers/virtio/virtio.c +++ b/components/drivers/virtio/virtio.c @@ -6,468 +6,356 @@ * Change Logs: * Date Author Notes * 2021-11-11 GuEe-GUI the first version - * 2023-10-12 fangjianzhou support SDL2 + * 2023-04-13 ErikChan add virtio bus */ +#include #include -#include -#include +#define DBG_TAG "rtdm.virtio" +#define DBG_LVL DBG_INFO +#include -rt_inline void _virtio_dev_check(struct virtio_device *dev) -{ - RT_ASSERT(dev != RT_NULL); - RT_ASSERT(dev->mmio_config != RT_NULL); -} +#include -void virtio_reset_device(struct virtio_device *dev) +#include +#include + +void rt_virtio_device_ready(struct rt_virtio_device *vdev) { - _virtio_dev_check(dev); + rt_uint8_t status; - dev->mmio_config->status = 0; -} + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); -void virtio_status_acknowledge_driver(struct virtio_device *dev) -{ - _virtio_dev_check(dev); + if (!vdev->trans->get_status(vdev, &status)) + { + if (status & VIRTIO_STATUS_DRIVER_OK) + { + LOG_W("%s device driver is OK yet"); + } - dev->mmio_config->status |= VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER; + vdev->trans->set_status(vdev, status | VIRTIO_STATUS_DRIVER_OK); + } } -void virtio_status_driver_ok(struct virtio_device *dev) +void rt_virtio_device_reset(struct rt_virtio_device *vdev) { - _virtio_dev_check(dev); + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - dev->mmio_config->status |= VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK; + vdev->trans->reset(vdev); } -void virtio_interrupt_ack(struct virtio_device *dev) +void rt_virtio_device_config_changed(struct rt_virtio_device *vdev) { - rt_uint32_t status; + struct rt_virtio_driver *vdrv; - _virtio_dev_check(dev); + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->parent.drv != RT_NULL); - status = dev->mmio_config->interrupt_status; + vdrv = rt_container_of(vdev->parent.drv, struct rt_virtio_driver, parent); - if (status != 0) + if (vdrv->config_changed) { - dev->mmio_config->interrupt_ack = status; + vdrv->config_changed(vdev); } } -rt_bool_t virtio_has_feature(struct virtio_device *dev, rt_uint32_t feature_bit) +void rt_virtio_add_status(struct rt_virtio_device *vdev, rt_uint32_t status) { - _virtio_dev_check(dev); + rt_uint8_t cur_status; + + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - return !!(dev->mmio_config->device_features & (1UL << feature_bit)); + if (!vdev->trans->get_status(vdev, &cur_status)) + { + vdev->trans->set_status(vdev, cur_status | status); + } } -rt_err_t virtio_queues_alloc(struct virtio_device *dev, rt_size_t queues_num) +rt_bool_t rt_virtio_has_status(struct rt_virtio_device *vdev, rt_uint8_t status_bit) { - _virtio_dev_check(dev); + rt_uint8_t status; - dev->queues = rt_malloc(sizeof(struct virtq) * queues_num); + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - if (dev->queues != RT_NULL) + if (!vdev->trans->get_status(vdev, &status)) { - dev->queues_num = queues_num; - - return RT_EOK; + return (status & RT_BIT(status_bit)) != 0; } - return -RT_ENOMEM; + return RT_FALSE; } -void virtio_queues_free(struct virtio_device *dev) +rt_bool_t rt_virtio_has_feature(struct rt_virtio_device *vdev, rt_uint32_t feature_bit) { - if (dev->queues != RT_NULL) - { - dev->queues_num = 0; - rt_free(dev->queues); - } + RT_ASSERT(vdev != RT_NULL); + + return (vdev->features & RT_BIT(feature_bit)) != 0; } -rt_err_t virtio_queue_init(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t ring_size) +rt_err_t rt_virtio_get_config(struct rt_virtio_device *vdev, rt_uint32_t offset, void *dst, int length) { - int i; - void *pages; - rt_size_t pages_total_size; - struct virtq *queue; + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - _virtio_dev_check(dev); + return vdev->trans->get_config(vdev, offset, dst, length); +} - RT_ASSERT(queue_index < dev->queues_num); - /* ring_size is power of 2 */ - RT_ASSERT(ring_size > 0); - RT_ASSERT(((ring_size - 1) & ring_size) == 0); +rt_err_t rt_virtio_set_config(struct rt_virtio_device *vdev, rt_uint32_t offset, const void *src, int length) +{ + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - /* Select the queue first, then read queue_num_max */ - dev->mmio_config->queue_sel = queue_index; - RT_ASSERT(dev->mmio_config->queue_num_max > 0); - RT_ASSERT(ring_size <= dev->mmio_config->queue_num_max); + return vdev->trans->set_config(vdev, offset, src, length); +} - queue = &dev->queues[queue_index]; - pages_total_size = VIRTIO_PAGE_ALIGN( - VIRTQ_DESC_TOTAL_SIZE(ring_size) + VIRTQ_AVAIL_TOTAL_SIZE(ring_size)) + VIRTQ_USED_TOTAL_SIZE(ring_size); +rt_err_t rt_virtio_virtqueue_install(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]) +{ + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); + RT_ASSERT(vqs_nr >= 0); + RT_ASSERT(vqs != RT_NULL); + RT_ASSERT(names != RT_NULL); + RT_ASSERT(cbs != RT_NULL); + + return vdev->trans->install_vqs(vdev, vqs_nr, vqs, names, cbs); +} - pages = rt_malloc_align(pages_total_size, VIRTIO_PAGE_SIZE); +rt_err_t rt_virtio_virtqueue_release(struct rt_virtio_device *vdev) +{ + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - if (pages == RT_NULL) - { - return -RT_ENOMEM; - } + return vdev->trans->release_vqs(vdev); +} - queue->free = rt_malloc(sizeof(rt_bool_t) * ring_size); +rt_err_t rt_virtio_virtqueue_control(struct rt_virtio_device *vdev, rt_uint32_t cfg, void *data) +{ + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - if (queue->free == RT_NULL) - { - rt_free_align(pages); - return -RT_ENOMEM; - } + return vdev->trans->control_vqs(vdev, cfg, data); +} - rt_memset(pages, 0, pages_total_size); +static const char *const virtio_device_id_name[] = +{ + [VIRTIO_DEVICE_ID_INVALID] = "invalid", + [VIRTIO_DEVICE_ID_NET] = "net", + [VIRTIO_DEVICE_ID_BLOCK] = "blk", + [VIRTIO_DEVICE_ID_CONSOLE] = "console", + [VIRTIO_DEVICE_ID_RNG] = "rng", + [VIRTIO_DEVICE_ID_RPMSG] = "rpmsg", + [VIRTIO_DEVICE_ID_SCSI] = "scsi", + [VIRTIO_DEVICE_ID_9P] = "9p", + [VIRTIO_DEVICE_ID_RPROC_SERIAL] = "rproc-serial", + [VIRTIO_DEVICE_ID_GPU] = "gpu", + [VIRTIO_DEVICE_ID_INPUT] = "input", + [VIRTIO_DEVICE_ID_CRYPTO] = "crypto", + [VIRTIO_DEVICE_ID_IOMMU] = "iommu", + [VIRTIO_DEVICE_ID_AUDIO] = "sound", + [VIRTIO_DEVICE_ID_SCMI] = "scmi", + + [VIRTIO_DEVICE_ID_MAX] = RT_NULL, +}; - dev->mmio_config->guest_page_size = VIRTIO_PAGE_SIZE; - dev->mmio_config->queue_num = ring_size; - dev->mmio_config->queue_align = VIRTIO_PAGE_SIZE; - dev->mmio_config->queue_pfn = VIRTIO_VA2PA(pages) >> VIRTIO_PAGE_SHIFT; +const char *rt_virtio_device_id_name(struct rt_virtio_device *vdev) +{ + int device; + const char *name = RT_NULL; - queue->num = ring_size; - queue->desc = (struct virtq_desc *)((rt_ubase_t)pages); - queue->avail = (struct virtq_avail *)(((rt_ubase_t)pages) + VIRTQ_DESC_TOTAL_SIZE(ring_size)); - queue->used = (struct virtq_used *)VIRTIO_PAGE_ALIGN( - (rt_ubase_t)&queue->avail->ring[ring_size] + VIRTQ_AVAIL_RES_SIZE); + RT_ASSERT(vdev != RT_NULL); - queue->used_idx = 0; + device = vdev->id.device; - /* All descriptors start out unused */ - for (i = 0; i < ring_size; ++i) + if (device < RT_ARRAY_SIZE(virtio_device_id_name)) { - queue->free[i] = RT_TRUE; - } - - queue->free_count = ring_size; - - return RT_EOK; -} + name = virtio_device_id_name[device]; -void virtio_queue_destroy(struct virtio_device *dev, rt_uint32_t queue_index) -{ - struct virtq *queue; + if (!name) + { + name = "nosys"; + } - _virtio_dev_check(dev); + return name; + } - RT_ASSERT(queue_index < dev->queues_num); + return name; +} - /* Select the queue first, then read queue_num_max */ - dev->mmio_config->queue_sel = queue_index; - RT_ASSERT(dev->mmio_config->queue_num_max > 0); +static struct rt_bus virtio_bus; - queue = &dev->queues[queue_index]; +rt_err_t rt_virtio_driver_register(struct rt_virtio_driver *vdrv) +{ + const struct rt_virtio_device_id *id; - RT_ASSERT(queue->num > 0); + RT_ASSERT(vdrv != RT_NULL); - rt_free(queue->free); - rt_free_align((void *)queue->desc); + id = vdrv->ids; + while (id->device && (id + 1)->device) + { + ++id; + } - dev->mmio_config->queue_pfn = RT_NULL; + vdrv->parent.bus = &virtio_bus; +#if RT_NAME_MAX > 0 + rt_strcpy(vdrv->parent.parent.name, virtio_device_id_name[id->device]); +#else + vdrv->parent.parent.name = virtio_device_id_name[id->device]; +#endif - queue->num = 0; - queue->desc = RT_NULL; - queue->avail = RT_NULL; - queue->used = RT_NULL; + return rt_driver_register(&vdrv->parent); } -void virtio_queue_notify(struct virtio_device *dev, rt_uint32_t queue_index) +static struct rt_dm_ida virtio_ida = RT_DM_IDA_INIT(CUSTOM); + +rt_err_t rt_virtio_device_register(struct rt_virtio_device *vdev) { - _virtio_dev_check(dev); + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vdev->trans != RT_NULL); - dev->mmio_config->queue_notify = queue_index; -} + if ((vdev->idx = rt_dm_ida_alloc(&virtio_ida)) < 0) + { + return -RT_EFULL; + } -void virtio_submit_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) -{ - rt_size_t ring_size; - struct virtq *queue; + rt_dm_dev_set_name(&vdev->parent, "virtio%u-%s", vdev->idx, + virtio_device_id_name[vdev->id.device]); - _virtio_dev_check(dev); + rt_list_init(&vdev->vq_node); - queue = &dev->queues[queue_index]; - ring_size = queue->num; + rt_spin_lock_init(&vdev->vq_lock); - /* Tell the device the first index in our chain of descriptors */ - queue->avail->ring[queue->avail->idx % ring_size] = desc_index; - rt_hw_dsb(); + rt_virtio_device_reset(vdev); + rt_virtio_add_status(vdev, VIRTIO_STATUS_ACKNOWLEDGE); - /* Tell the device another avail ring entry is available */ - queue->avail->idx++; - rt_hw_dsb(); + return rt_bus_add_device(&virtio_bus, &vdev->parent); } -rt_uint16_t virtio_alloc_desc(struct virtio_device *dev, rt_uint32_t queue_index) +static rt_bool_t virtio_match(rt_driver_t drv, rt_device_t dev) { - int i; - struct virtq *queue; + const struct rt_virtio_device_id *id; + struct rt_virtio_driver *vdrv = rt_container_of(drv, struct rt_virtio_driver, parent); + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); - _virtio_dev_check(dev); - - RT_ASSERT(queue_index < dev->queues_num); - - queue = &dev->queues[queue_index]; - - if (queue->free_count > 0) + for (id = vdrv->ids; id->device; ++id) { - rt_size_t ring_size = queue->num; - - for (i = 0; i < ring_size; ++i) + if (id->device != vdev->id.device && id->device != VIRTIO_DEVICE_ANY_ID) { - if (queue->free[i]) - { - queue->free[i] = RT_FALSE; - queue->free_count--; + continue; + } - return (rt_uint16_t)i; - } + if (id->vendor == VIRTIO_DEVICE_ANY_ID || id->vendor == vdev->id.vendor) + { + return RT_TRUE; } } - return VIRTQ_INVALID_DESC_ID; + return RT_FALSE; } -void virtio_free_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) +static rt_err_t virtio_probe(rt_device_t dev) { - struct virtq *queue; - - _virtio_dev_check(dev); + rt_err_t err; + rt_uint8_t status; + rt_uint64_t device_features, driver_features; + struct rt_virtio_driver *vdrv = rt_container_of(dev->drv, struct rt_virtio_driver, parent); + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); - queue = &dev->queues[queue_index]; + rt_virtio_add_status(vdev, VIRTIO_STATUS_DRIVER); - RT_ASSERT(queue_index < dev->queues_num); - RT_ASSERT(!queue->free[desc_index]); + vdev->trans->get_features(vdev, &device_features); - queue->desc[desc_index].addr = 0; - queue->desc[desc_index].len = 0; - queue->desc[desc_index].flags = 0; - queue->desc[desc_index].next = 0; + driver_features = vdrv->features; - queue->free[desc_index] = RT_TRUE; - - queue->free_count++; -} + for (int i = VIRTIO_TRANSPORT_F_START; i <= VIRTIO_TRANSPORT_F_END; ++i) + { + rt_uint64_t flag = RT_BIT_ULL(i); -rt_err_t virtio_alloc_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_size_t count, - rt_uint16_t *indexs) -{ - int i, j; + if (device_features & flag) + { + driver_features |= flag; + } + } - _virtio_dev_check(dev); + vdev->features = driver_features & device_features; - RT_ASSERT(indexs != RT_NULL); + err = vdev->trans->set_features(vdev); - if (dev->queues[queue_index].free_count < count) + if (err) { - return -RT_ERROR; + goto _err; } - for (i = 0; i < count; ++i) + if (rt_virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { - indexs[i] = virtio_alloc_desc(dev, queue_index); + rt_virtio_add_status(vdev, VIRTIO_STATUS_FEATURES_OK); + vdev->trans->get_status(vdev, &status); - if (indexs[i] == VIRTQ_INVALID_DESC_ID) + if (!(status & VIRTIO_STATUS_FEATURES_OK)) { - for (j = 0; j < i; ++j) - { - virtio_free_desc(dev, queue_index, indexs[j]); - } + LOG_E("%s device refuses features: %x", rt_dm_dev_get_name(dev), status); - return -RT_ERROR; + err = -RT_EIO; + goto _err; } } - return RT_EOK; -} - -void virtio_free_desc_chain(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index) -{ - rt_uint16_t flags, next; - struct virtq_desc *desc; - - _virtio_dev_check(dev); + err = vdrv->probe(vdev); - desc = &dev->queues[queue_index].desc[0]; - - for (;;) + if (err) { - flags = desc[desc_index].flags; - next = desc[desc_index].next; - - virtio_free_desc(dev, queue_index, desc_index); - - if (flags & VIRTQ_DESC_F_NEXT) - { - desc_index = next; - } - else - { - break; - } + goto _err; } -} - -void virtio_fill_desc(struct virtio_device *dev, rt_uint32_t queue_index, rt_uint16_t desc_index, - rt_uint64_t addr, rt_uint32_t len, rt_uint16_t flags, rt_uint16_t next) -{ - struct virtq_desc *desc; - - _virtio_dev_check(dev); - desc = &dev->queues[queue_index].desc[desc_index]; + vdev->trans->get_status(vdev, &status); - desc->addr = addr; - desc->len = len; - desc->flags = flags; - desc->next = next; -} - -#ifdef RT_USING_SMART -#ifdef RT_USING_VIRTIO_GPU + if (!(status & VIRTIO_STATUS_DRIVER_OK)) + { + rt_virtio_device_ready(vdev); + } -#include -#include "drivers/lcd.h" -#include -#include + return RT_EOK; -static struct rt_device_graphic_info _graphic_info; -static struct rt_device_rect_info _rect_info; -static struct rt_device _fb = {}; -static rt_device_t _gpu_dev = RT_NULL; +_err: + rt_virtio_add_status(vdev, VIRTIO_STATUS_FAILED); -static rt_err_t fb_open(rt_device_t dev, rt_uint16_t oflag) -{ - return RT_EOK; + return err; } -static rt_err_t fb_close(rt_device_t dev) +static rt_err_t virtio_remove(rt_device_t dev) { - return RT_EOK; -} + struct rt_virtio_driver *vdrv = rt_container_of(dev->drv, struct rt_virtio_driver, parent); + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); -static rt_err_t fb_control(rt_device_t dev, int cmd, void *args) -{ - switch(cmd) + if (vdrv && vdrv->remove) { - case FBIOPAN_DISPLAY: - { - rt_hw_cpu_dcache_clean(_graphic_info.framebuffer, _graphic_info.smem_len); - rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info); - break; - } - case FBIOGET_FSCREENINFO: - { - struct fb_fix_screeninfo *info = (struct fb_fix_screeninfo *)args; - strncpy(info->id, "lcd", sizeof(info->id)); - info->smem_len = _graphic_info.smem_len; - break; - } - case FBIOGET_VSCREENINFO: - { - struct fb_var_screeninfo *info = (struct fb_var_screeninfo *)args; - info->bits_per_pixel = _graphic_info.bits_per_pixel; - info->xres = _graphic_info.width; - info->yres = _graphic_info.height; - info->yres_virtual = _graphic_info.height; - info->xres_virtual = _graphic_info.width; - info->transp.offset = 24; - info->transp.length = 8; - info->red.offset = 0; - info->red.length = 8; - info->green.offset = 8; - info->green.length = 8; - info->blue.offset = 16; - info->blue.length = 8; - break; - } - case RT_FIOMMAP2: - { - struct dfs_mmap2_args *mmap2 = (struct dfs_mmap2_args *)args; - - if(mmap2) - { - mmap2->ret = lwp_map_user_phy(lwp_self(), RT_NULL, rt_kmem_v2p(_graphic_info.framebuffer), mmap2->length, 1); - } - else - { - return -EIO; - } - - break; - } - default: - break; + vdrv->remove(vdev); } + rt_virtio_add_status(vdev, VIRTIO_STATUS_ACKNOWLEDGE); + + rt_dm_ida_free(&virtio_ida, vdev->idx); + return RT_EOK; } -#ifdef RT_USING_DEVICE_OPS -const static struct rt_device_ops fb_ops = +static struct rt_bus virtio_bus = { - RT_NULL, - fb_open, - fb_close, - RT_NULL, - RT_NULL, - fb_control + .name = "virtio", + .match = virtio_match, + .probe = virtio_probe, + .remove = virtio_remove, }; -#endif -static int fb_init() +static int virtio_bus_init(void) { - _gpu_dev = rt_device_find("virtio-gpu0"); - - if(_gpu_dev == RT_NULL) - { - return -RT_ERROR; - } - - if(_gpu_dev != RT_NULL && rt_device_open(_gpu_dev, 0) == RT_EOK) - { - rt_memset(&_graphic_info, 0, sizeof(_graphic_info)); - rt_memset(&_rect_info, 0, sizeof(_rect_info)); - rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL); - rt_device_control(_gpu_dev, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888); - rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_GET_INFO, &_graphic_info); - _rect_info.x = 0; - _rect_info.y = 0; - _rect_info.width = _graphic_info.width; - _rect_info.height = _graphic_info.height; - memset(_graphic_info.framebuffer, 0xff, _graphic_info.smem_len); - rt_device_control(_gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &_rect_info); - } - - if(rt_device_find("fb0") != RT_NULL) - { - rt_kprintf("a device named fb0 already exists\n"); - return -RT_ERROR; - } + rt_bus_register(&virtio_bus); - _fb.type = RT_Device_Class_Miscellaneous; - -#ifdef RT_USING_DEVICE_OPS - _fb.ops = &fb_ops; -#else - _fb.init = RT_NULL; - _fb.open = fb_open; - _fb.close = fb_close; - _fb.read = RT_NULL; - _fb.write = RT_NULL; - _fb.control = fb_control; - _fb.user_data = RT_NULL; -#endif - - rt_device_register(&_fb, "fb0", RT_DEVICE_FLAG_RDWR); - return RT_EOK; + return 0; } -INIT_COMPONENT_EXPORT(fb_init); -#endif -#endif +INIT_CORE_EXPORT(virtio_bus_init); diff --git a/components/drivers/virtio/virtio_config/virtio-9p.h b/components/drivers/virtio/virtio_config/virtio-9p.h new file mode 100644 index 00000000000..be870d9984b --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-9p.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __VIRTIO_9P_H__ +#define __VIRTIO_9P_H__ + +#include + +#define VIRTIO_9P_F_MOUNT_TAG 0 + +rt_packed(struct virtio_9p_config +{ + /* length of the tag name */ + rt_le16_t tag_len; + /* non-NULL terminated tag name */ + rt_uint8_t tag[]; +}); + +#endif /* __VIRTIO_9P_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-blk.h b/components/drivers/virtio/virtio_config/virtio-blk.h new file mode 100644 index 00000000000..045aa780874 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-blk.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-9-16 GuEe-GUI the first version + * 2021-11-11 GuEe-GUI using virtio common interface + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#ifndef __VIRTIO_BLK_H__ +#define __VIRTIO_BLK_H__ + +#include + +#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ +#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ +#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ +#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ +#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ +#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ +#define VIRTIO_BLK_F_FLUSH 9 /* Flush command supported */ +#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ +#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */ +#define VIRTIO_BLK_F_MQ 12 /* Support more than one vq */ +#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */ +#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */ +#define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */ + +#define VIRTIO_BLK_T_IN 0 /* Read the blk */ +#define VIRTIO_BLK_T_OUT 1 /* Write the blk */ +#define VIRTIO_BLK_T_SCSI_CMD 2 +#define VIRTIO_BLK_T_SCSI_CMD_OUT 3 +#define VIRTIO_BLK_T_FLUSH 4 +#define VIRTIO_BLK_T_FLUSH_OUT 5 +#define VIRTIO_BLK_T_GET_ID 8 /* Get device ID command */ +#define VIRTIO_BLK_T_DISCARD 11 /* Discard command */ +#define VIRTIO_BLK_T_WRITE_ZEROES 13 /* Write zeroes command */ +#define VIRTIO_BLK_T_SECURE_ERASE 14 /* Secure erase command */ +#define VIRTIO_BLK_T_ZONE_APPEND 15 /* Zone append command */ +#define VIRTIO_BLK_T_ZONE_REPORT 16 /* Report zones command */ +#define VIRTIO_BLK_T_ZONE_OPEN 18 /* Open zone command */ +#define VIRTIO_BLK_T_ZONE_CLOSE 20 /* Close zone command */ +#define VIRTIO_BLK_T_ZONE_FINISH 22 /* Finish zone command */ +#define VIRTIO_BLK_T_ZONE_RESET 24 /* Reset zone command */ +#define VIRTIO_BLK_T_ZONE_RESET_ALL 26 /* Reset All zones command */ + +struct virtio_blk_req +{ + rt_le32_t type; + rt_le32_t ioprio; + rt_le64_t sector; + +#define VIRTIO_BLK_S_OK 0 +#define VIRTIO_BLK_S_IOERR 1 +#define VIRTIO_BLK_S_UNSUPP 2 +#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3 +#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4 +#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5 +#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6 + /* + * next: + * rt_uint8_t data[]; + * rt_uint8_t status; + */ +}; + +struct virtio_blk_discard_write_zeroes +{ + rt_le64_t sector; + rt_le32_t num_sectors; + + /* + * unmap:1 + * reserved:31 + */ + rt_le32_t flags; +}; + +rt_packed(struct virtio_blk_config +{ + /* The capacity (in 512-byte sectors). */ + rt_le64_t capacity; + /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ + rt_le32_t size_max; + /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ + rt_le32_t seg_max; + + /* Geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ + struct virtio_blk_geometry + { + rt_le16_t cylinders; + rt_uint8_t heads; + rt_uint8_t sectors; + } geometry; + + /* Block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ + rt_le32_t blk_size; + + struct virtio_blk_topology + { + /* # Of logical blocks per physical block (log2) */ + rt_uint8_t physical_block_exp; + /* Offset of first aligned logical block */ + rt_uint8_t alignment_offset; + /* Suggested minimum I/O size in blocks */ + rt_le16_t min_io_size; + /* Optimal (suggested maximum) I/O size in blocks */ + rt_le32_t opt_io_size; + } topology; + + rt_uint8_t writeback; + rt_uint8_t unused0; + rt_uint16_t num_queues; + rt_le32_t max_discard_sectors; + rt_le32_t max_discard_seg; + rt_le32_t discard_sector_alignment; + rt_le32_t max_write_zeroes_sectors; + rt_le32_t max_write_zeroes_seg; + rt_uint8_t write_zeroes_may_unmap; + rt_uint8_t unused1[3]; + rt_le32_t max_secure_erase_sectors; + rt_le32_t max_secure_erase_seg; + rt_le32_t secure_erase_sector_alignment; +}); + +#endif /* __VIRTIO_BLK_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-console.h b/components/drivers/virtio/virtio_config/virtio-console.h new file mode 100644 index 00000000000..cf51ab41a45 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-console.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#ifndef __VIRTIO_CONSOLE_H__ +#define __VIRTIO_CONSOLE_H__ + +#include + +#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ +#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ +#define VIRTIO_CONSOLE_F_EMERG_WRITE 2 /* Does host support emergency write? */ + +rt_packed(struct virtio_console_config +{ + rt_le16_t cols; + rt_le16_t rows; + rt_le32_t max_nr_ports; + rt_le32_t emerg_wr; +}); + +rt_packed(struct virtio_console_resize +{ + rt_le16_t cols; + rt_le16_t rows; +}); + +rt_packed(struct virtio_console_control +{ +#define VIRTIO_CONSOLE_BAD_ID (~(rt_uint32_t)0) + rt_le32_t id; /* Port number */ + +#define VIRTIO_CONSOLE_DEVICE_READY 0 +#define VIRTIO_CONSOLE_PORT_ADD 1 +#define VIRTIO_CONSOLE_PORT_REMOVE 2 +#define VIRTIO_CONSOLE_PORT_READY 3 +#define VIRTIO_CONSOLE_CONSOLE_PORT 4 +#define VIRTIO_CONSOLE_RESIZE 5 +#define VIRTIO_CONSOLE_PORT_OPEN 6 +#define VIRTIO_CONSOLE_PORT_NAME 7 + rt_le16_t event; /* The kind of control event */ + rt_le16_t value; /* Extra information for the event */ +}); + +rt_packed(struct virtio_console_control_ext +{ + struct virtio_console_control ctrl; + + union + { + struct virtio_console_resize resize; + rt_uint8_t data[64]; + }; +}); + +#endif /* __VIRTIO_CONSOLE_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-crypto.h b/components/drivers/virtio/virtio_config/virtio-crypto.h new file mode 100644 index 00000000000..49df7ef96c5 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-crypto.h @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __VIRTIO_CRYPTO_H__ +#define __VIRTIO_CRYPTO_H__ + +#include + +#define VIRTIO_CRYPTO_SERVICE_CIPHER 0 +#define VIRTIO_CRYPTO_SERVICE_HASH 1 +#define VIRTIO_CRYPTO_SERVICE_MAC 2 +#define VIRTIO_CRYPTO_SERVICE_AEAD 3 +#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4 + +#define VIRTIO_CRYPTO_OPCODE(service, op) (((VIRTIO_CRYPTO_SERVICE_##service) << 8) | (op)) + +struct virtio_crypto_ctrl_header +{ +#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION VIRTIO_CRYPTO_OPCODE(CIPHER, 0x02) +#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION VIRTIO_CRYPTO_OPCODE(CIPHER, 0x03) +#define VIRTIO_CRYPTO_HASH_CREATE_SESSION VIRTIO_CRYPTO_OPCODE(HASH, 0x02) +#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION VIRTIO_CRYPTO_OPCODE(HASH, 0x03) +#define VIRTIO_CRYPTO_MAC_CREATE_SESSION VIRTIO_CRYPTO_OPCODE(MAC, 0x02) +#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION VIRTIO_CRYPTO_OPCODE(MAC, 0x03) +#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION VIRTIO_CRYPTO_OPCODE(AEAD, 0x02) +#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION VIRTIO_CRYPTO_OPCODE(AEAD, 0x03) +#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x04) +#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x05) + rt_le32_t opcode; + rt_le32_t algo; + rt_le32_t flag; + /* data virtqueue id */ + rt_le32_t queue_id; +}; + +struct virtio_crypto_cipher_session_para +{ +#define VIRTIO_CRYPTO_NO_CIPHER 0 +#define VIRTIO_CRYPTO_CIPHER_ARC4 1 +#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2 +#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3 +#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4 +#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5 +#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6 +#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7 +#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8 +#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9 +#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10 +#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11 +#define VIRTIO_CRYPTO_CIPHER_AES_F8 12 +#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13 +#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14 + rt_le32_t algo; + /* length of key */ + rt_le32_t keylen; + +#define VIRTIO_CRYPTO_OP_ENCRYPT 1 +#define VIRTIO_CRYPTO_OP_DECRYPT 2 + /* encrypt or decrypt */ + rt_le32_t op; + rt_le32_t padding; +}; + +struct virtio_crypto_session_input +{ + /* Device-writable part */ + rt_le64_t session_id; + rt_le32_t status; + rt_le32_t padding; +}; + +struct virtio_crypto_cipher_session_req +{ + struct virtio_crypto_cipher_session_para para; + rt_uint8_t padding[32]; +}; + +struct virtio_crypto_hash_session_para +{ +#define VIRTIO_CRYPTO_NO_HASH 0 +#define VIRTIO_CRYPTO_HASH_MD5 1 +#define VIRTIO_CRYPTO_HASH_SHA1 2 +#define VIRTIO_CRYPTO_HASH_SHA_224 3 +#define VIRTIO_CRYPTO_HASH_SHA_256 4 +#define VIRTIO_CRYPTO_HASH_SHA_384 5 +#define VIRTIO_CRYPTO_HASH_SHA_512 6 +#define VIRTIO_CRYPTO_HASH_SHA3_224 7 +#define VIRTIO_CRYPTO_HASH_SHA3_256 8 +#define VIRTIO_CRYPTO_HASH_SHA3_384 9 +#define VIRTIO_CRYPTO_HASH_SHA3_512 10 +#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11 +#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12 + rt_le32_t algo; + /* hash result length */ + rt_le32_t hash_result_len; + rt_uint8_t padding[8]; +}; + +struct virtio_crypto_hash_create_session_req +{ + struct virtio_crypto_hash_session_para para; + rt_uint8_t padding[40]; +}; + +struct virtio_crypto_mac_session_para +{ +#define VIRTIO_CRYPTO_NO_MAC 0 +#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1 +#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2 +#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3 +#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4 +#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5 +#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6 +#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25 +#define VIRTIO_CRYPTO_MAC_CMAC_AES 26 +#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27 +#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28 +#define VIRTIO_CRYPTO_MAC_GMAC_AES 41 +#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42 +#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49 +#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50 +#define VIRTIO_CRYPTO_MAC_XCBC_AES 53 + rt_le32_t algo; + /* hash result length */ + rt_le32_t hash_result_len; + /* length of authenticated key */ + rt_le32_t auth_key_len; + rt_le32_t padding; +}; + +struct virtio_crypto_mac_create_session_req +{ + struct virtio_crypto_mac_session_para para; + rt_uint8_t padding[40]; +}; + +struct virtio_crypto_aead_session_para +{ +#define VIRTIO_CRYPTO_NO_AEAD 0 +#define VIRTIO_CRYPTO_AEAD_GCM 1 +#define VIRTIO_CRYPTO_AEAD_CCM 2 +#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3 + rt_le32_t algo; + /* length of key */ + rt_le32_t key_len; + /* hash result length */ + rt_le32_t hash_result_len; + /* length of the additional authenticated data (AAD) in bytes */ + rt_le32_t aad_len; + /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */ + rt_le32_t op; + rt_le32_t padding; +}; + +struct virtio_crypto_aead_create_session_req +{ + struct virtio_crypto_aead_session_para para; + rt_uint8_t padding[32]; +}; + +struct virtio_crypto_rsa_session_para +{ +#define VIRTIO_CRYPTO_RSA_RAW_PADDING 0 +#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1 + rt_le32_t padding_algo; + +#define VIRTIO_CRYPTO_RSA_NO_HASH 0 +#define VIRTIO_CRYPTO_RSA_MD2 1 +#define VIRTIO_CRYPTO_RSA_MD3 2 +#define VIRTIO_CRYPTO_RSA_MD4 3 +#define VIRTIO_CRYPTO_RSA_MD5 4 +#define VIRTIO_CRYPTO_RSA_SHA1 5 +#define VIRTIO_CRYPTO_RSA_SHA256 6 +#define VIRTIO_CRYPTO_RSA_SHA384 7 +#define VIRTIO_CRYPTO_RSA_SHA512 8 +#define VIRTIO_CRYPTO_RSA_SHA224 9 + rt_le32_t hash_algo; +}; + +struct virtio_crypto_ecdsa_session_para +{ +#define VIRTIO_CRYPTO_CURVE_UNKNOWN 0 +#define VIRTIO_CRYPTO_CURVE_NIST_P192 1 +#define VIRTIO_CRYPTO_CURVE_NIST_P224 2 +#define VIRTIO_CRYPTO_CURVE_NIST_P256 3 +#define VIRTIO_CRYPTO_CURVE_NIST_P384 4 +#define VIRTIO_CRYPTO_CURVE_NIST_P521 5 + rt_le32_t curve_id; + rt_le32_t padding; +}; + +struct virtio_crypto_akcipher_session_para +{ +#define VIRTIO_CRYPTO_NO_AKCIPHER 0 +#define VIRTIO_CRYPTO_AKCIPHER_RSA 1 +#define VIRTIO_CRYPTO_AKCIPHER_DSA 2 +#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3 + rt_le32_t algo; + +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC 1 +#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2 + rt_le32_t keytype; + rt_le32_t keylen; + + union + { + struct virtio_crypto_rsa_session_para rsa; + struct virtio_crypto_ecdsa_session_para ecdsa; + }; +}; + +struct virtio_crypto_akcipher_create_session_req +{ + struct virtio_crypto_akcipher_session_para para; + rt_uint8_t padding[36]; +}; + +struct virtio_crypto_alg_chain_session_para +{ +#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1 +#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2 + rt_le32_t alg_chain_order; +/* Plain hash */ +#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1 +/* Authenticated hash (mac) */ +#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2 +/* Nested hash */ +#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3 + rt_le32_t hash_mode; + struct virtio_crypto_cipher_session_para cipher_param; + union + { + struct virtio_crypto_hash_session_para hash_param; + struct virtio_crypto_mac_session_para mac_param; + rt_uint8_t padding_union[16]; + }; + /* length of the additional authenticated data (AAD) in bytes */ + rt_le32_t aad_len; + rt_le32_t padding; +}; + +struct virtio_crypto_alg_chain_session_req +{ + struct virtio_crypto_alg_chain_session_para para; +}; + +struct virtio_crypto_sym_create_session_req +{ + union + { + struct virtio_crypto_cipher_session_req cipher; + struct virtio_crypto_alg_chain_session_req chain; + rt_uint8_t padding_union[48]; + }; + + /* Device-readable part */ + +/* No operation */ +#define VIRTIO_CRYPTO_SYM_OP_NONE 0 +/* Cipher only operation on the data */ +#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1 +/* + * Chain any cipher with any hash or mac operation. The order + * depends on the value of alg_chain_order param + */ +#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2 + rt_le32_t op_type; + rt_le32_t padding; +}; + +struct virtio_crypto_destroy_session_req +{ + /* Device-readable part */ + rt_le64_t session_id; + rt_uint8_t padding[48]; +}; + +/* The request of the control virtqueue's packet */ +struct virtio_crypto_op_ctrl_req +{ + struct virtio_crypto_ctrl_header header; + + union + { + struct virtio_crypto_sym_create_session_req sym_create_session; + struct virtio_crypto_hash_create_session_req hash_create_session; + struct virtio_crypto_mac_create_session_req mac_create_session; + struct virtio_crypto_aead_create_session_req aead_create_session; + struct virtio_crypto_akcipher_create_session_req akcipher_create_session; + struct virtio_crypto_destroy_session_req destroy_session; + rt_uint8_t padding[56]; + }; +}; + +struct virtio_crypto_op_header +{ +#define VIRTIO_CRYPTO_CIPHER_ENCRYPT VIRTIO_CRYPTO_OPCODE(CIPHER, 0x00) +#define VIRTIO_CRYPTO_CIPHER_DECRYPT VIRTIO_CRYPTO_OPCODE(CIPHER, 0x01) +#define VIRTIO_CRYPTO_HASH VIRTIO_CRYPTO_OPCODE(HASH, 0x00) +#define VIRTIO_CRYPTO_MAC VIRTIO_CRYPTO_OPCODE(MAC, 0x00) +#define VIRTIO_CRYPTO_AEAD_ENCRYPT VIRTIO_CRYPTO_OPCODE(AEAD, 0x00) +#define VIRTIO_CRYPTO_AEAD_DECRYPT VIRTIO_CRYPTO_OPCODE(AEAD, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x00) +#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x01) +#define VIRTIO_CRYPTO_AKCIPHER_SIGN VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x02) +#define VIRTIO_CRYPTO_AKCIPHER_VERIFY VIRTIO_CRYPTO_OPCODE(AKCIPHER, 0x03) + rt_le32_t opcode; + /* algo should be service-specific algorithms */ + rt_le32_t algo; + /* session_id should be service-specific algorithms */ + rt_le64_t session_id; + /* control flag to control the request */ + rt_le32_t flag; + rt_le32_t padding; +}; + +struct virtio_crypto_cipher_para +{ + /* + * Byte Length of valid IV/Counter + * + * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for + * SNOW3G in UEA2 mode, this is the length of the IV (which + * must be the same as the block length of the cipher). + * For block ciphers in CTR mode, this is the length of the counter + * (which must be the same as the block length of the cipher). + * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007. + * + * The IV/Counter will be updated after every partial cryptographic + * operation. + */ + rt_le32_t iv_len; + /* length of source data */ + rt_le32_t src_data_len; + /* length of dst data */ + rt_le32_t dst_data_len; + rt_le32_t padding; +}; + +struct virtio_crypto_hash_para +{ + /* length of source data */ + rt_le32_t src_data_len; + /* hash result length */ + rt_le32_t hash_result_len; +}; + +struct virtio_crypto_mac_para +{ + struct virtio_crypto_hash_para hash; +}; + +struct virtio_crypto_aead_para +{ + /* + * Byte Length of valid IV data pointed to by the below iv_addr + * parameter. + * + * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which + * case iv_addr points to J0. + * For CCM mode, this is the length of the nonce, which can be in the + * range 7 to 13 inclusive. + */ + rt_le32_t iv_len; + /* length of additional auth data */ + rt_le32_t aad_len; + /* length of source data */ + rt_le32_t src_data_len; + /* length of dst data */ + rt_le32_t dst_data_len; +}; + +struct virtio_crypto_cipher_data_req +{ + /* Device-readable part */ + struct virtio_crypto_cipher_para para; + rt_uint8_t padding[24]; +}; + +struct virtio_crypto_hash_data_req +{ + /* Device-readable part */ + struct virtio_crypto_hash_para para; + rt_uint8_t padding[40]; +}; + +struct virtio_crypto_mac_data_req +{ + /* Device-readable part */ + struct virtio_crypto_mac_para para; + rt_uint8_t padding[40]; +}; + +struct virtio_crypto_alg_chain_data_para +{ + rt_le32_t iv_len; + /* Length of source data */ + rt_le32_t src_data_len; + /* Length of destination data */ + rt_le32_t dst_data_len; + /* Starting point for cipher processing in source data */ + rt_le32_t cipher_start_src_offset; + /* Length of the source data that the cipher will be computed on */ + rt_le32_t len_to_cipher; + /* Starting point for hash processing in source data */ + rt_le32_t hash_start_src_offset; + /* Length of the source data that the hash will be computed on */ + rt_le32_t len_to_hash; + /* Length of the additional auth data */ + rt_le32_t aad_len; + /* Length of the hash result */ + rt_le32_t hash_result_len; + rt_le32_t reserved; +}; + +struct virtio_crypto_alg_chain_data_req +{ + /* Device-readable part */ + struct virtio_crypto_alg_chain_data_para para; +}; + +struct virtio_crypto_sym_data_req +{ + union + { + struct virtio_crypto_cipher_data_req cipher; + struct virtio_crypto_alg_chain_data_req chain; + rt_uint8_t padding_union[40]; + }; + + /* See above VIRTIO_CRYPTO_SYM_OP_* */ + rt_le32_t op_type; + rt_le32_t padding; +}; + +struct virtio_crypto_aead_data_req +{ + /* Device-readable part */ + struct virtio_crypto_aead_para para; + rt_uint8_t padding[32]; +}; + +struct virtio_crypto_akcipher_para +{ + rt_le32_t src_data_len; + rt_le32_t dst_data_len; +}; + +struct virtio_crypto_akcipher_data_req +{ + struct virtio_crypto_akcipher_para para; + rt_uint8_t padding[40]; +}; + +/* The request of the data virtqueue's packet */ +struct virtio_crypto_op_data_req +{ + struct virtio_crypto_op_header header; + + union + { + struct virtio_crypto_sym_data_req sym_req; + struct virtio_crypto_hash_data_req hash_req; + struct virtio_crypto_mac_data_req mac_req; + struct virtio_crypto_aead_data_req aead_req; + struct virtio_crypto_akcipher_data_req akcipher_req; + rt_uint8_t padding[48]; + }; +}; + +#define VIRTIO_CRYPTO_OK 0 +#define VIRTIO_CRYPTO_ERR 1 +#define VIRTIO_CRYPTO_BADMSG 2 +#define VIRTIO_CRYPTO_NOTSUPP 3 +#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */ +#define VIRTIO_CRYPTO_NOSPC 5 /* no free session ID */ +#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */ + +/* The accelerator hardware is ready */ +#define VIRTIO_CRYPTO_S_HW_READY (1 << 0) + +struct virtio_crypto_config +{ + /* See VIRTIO_CRYPTO_OP_* above */ + rt_le32_t status; + + /* + * Maximum number of data queue + */ + rt_le32_t max_dataqueues; + + /* + * Specifies the services mask which the device support, + * see VIRTIO_CRYPTO_SERVICE_* above + */ + rt_le32_t crypto_services; + + /* Detailed algorithms mask */ + rt_le32_t cipher_algo_l; + rt_le32_t cipher_algo_h; + rt_le32_t hash_algo; + rt_le32_t mac_algo_l; + rt_le32_t mac_algo_h; + rt_le32_t aead_algo; + /* Maximum length of cipher key */ + rt_le32_t max_cipher_key_len; + /* Maximum length of authenticated key */ + rt_le32_t max_auth_key_len; + rt_le32_t akcipher_algo; + /* Maximum size of each crypto request's content */ + rt_le64_t max_size; +}; + +struct virtio_crypto_inhdr +{ + /* See VIRTIO_CRYPTO_* above */ + rt_uint8_t status; +}; + +#endif /* __VIRTIO_CRYPTO_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-gpu.h b/components/drivers/virtio/virtio_config/virtio-gpu.h new file mode 100644 index 00000000000..20e2ca6d2ed --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-gpu.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#ifndef __VIRTIO_GPU_H__ +#define __VIRTIO_GPU_H__ + +#include + +#define VIRTIO_GPU_F_VIRGL 0 /* VIRTIO_GPU_CMD_CTX_*, VIRTIO_GPU_CMD_*_3D */ +#define VIRTIO_GPU_F_EDID 1 /* VIRTIO_GPU_CMD_GET_EDID */ +#define VIRTIO_GPU_F_RESOURCE_UUID 2 /* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */ +#define VIRTIO_GPU_F_RESOURCE_BLOB 3 /* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */ +#define VIRTIO_GPU_F_CONTEXT_INIT 4 /* VIRTIO_GPU_CMD_CREATE_CONTEXT with context_init and multiple timelines */ + +enum virtio_gpu_ctrl_type +{ + VIRTIO_GPU_UNDEFINED = 0, + + /* 2d commands */ + VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100, + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + VIRTIO_GPU_CMD_GET_CAPSET_INFO, + VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_EDID, + VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID, + VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB, + VIRTIO_GPU_CMD_SET_SCANOUT_BLOB, + + /* 3d commands */ + VIRTIO_GPU_CMD_CTX_CREATE = 0x0200, + VIRTIO_GPU_CMD_CTX_DESTROY, + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, + VIRTIO_GPU_CMD_SUBMIT_3D, + VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB, + VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB, + + /* cursor commands */ + VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, + VIRTIO_GPU_CMD_MOVE_CURSOR, + + /* success responses */ + VIRTIO_GPU_RESP_OK_NODATA = 0x1100, + VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + VIRTIO_GPU_RESP_OK_CAPSET_INFO, + VIRTIO_GPU_RESP_OK_CAPSET, + VIRTIO_GPU_RESP_OK_EDID, + VIRTIO_GPU_RESP_OK_RESOURCE_UUID, + VIRTIO_GPU_RESP_OK_MAP_INFO, + + /* error responses */ + VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, + VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, +}; + +enum virtio_gpu_shm_id +{ + VIRTIO_GPU_SHM_ID_UNDEFINED = 0, + /* + * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB + * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB + */ + VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 +}; + +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) +/* + * If the following flag is set, then ring_idx contains the index + * of the command ring that needs to used when creating the fence + */ +#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1) + +struct virtio_gpu_ctrl_hdr +{ + /* + * specifies the type of the driver request (VIRTIO_GPU_CMD_*) or + * device response (VIRTIO_GPU_RESP_*). + */ + rt_le32_t type; + /* + * request / response flags. + */ + rt_le32_t flags; + /* + * If the driver sets the VIRTIO_GPU_FLAG_FENCE bit + * in the request flags field the device MUST: + * set VIRTIO_GPU_FLAG_FENCE bit in the response, + * copy the content of the fence_id field from the request to the response, + * and send the response only after command processing is complete. + */ + rt_le64_t fence_id; + /* + * Rendering context (used in 3D mode only). + */ + rt_le32_t ctx_id; + /* + * If VIRTIO_GPU_F_CONTEXT_INIT is supported, + * then the driver MAY set VIRTIO_GPU_FLAG_INFO_RING_IDX bit + * in the request flags. In that case: + * ring_idx indicates the value of a context-specific ring index. + * The minimum value is 0 and maximum value is 63 (inclusive). + * If VIRTIO_GPU_FLAG_FENCE is set, fence_id acts as a sequence number + * on the synchronization timeline defined by ctx_idx and the ring index. + * If VIRTIO_GPU_FLAG_FENCE is set and when the command associated + * with fence_id is complete, the device MUST send a response for + * all outstanding commands with a sequence number less than or + * equal to fence_id on the same synchronization timeline. + */ + rt_uint8_t ring_idx; + rt_uint8_t padding[3]; +}; + +/* data passed in the cursor vq */ + +struct virtio_gpu_cursor_pos +{ + rt_le32_t scanout_id; + rt_le32_t x; + rt_le32_t y; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */ +struct virtio_gpu_update_cursor +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_cursor_pos pos; /* update & move */ + rt_le32_t resource_id; /* update only */ + rt_le32_t hot_x; /* update only */ + rt_le32_t hot_y; /* update only */ + rt_le32_t padding; +}; + +/* data passed in the control vq, 2d related */ + +struct virtio_gpu_rect +{ + rt_le32_t x; + rt_le32_t y; + rt_le32_t width; + rt_le32_t height; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */ +struct virtio_gpu_resource_unref +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */ +struct virtio_gpu_resource_create_2d +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t format; + rt_le32_t width; + rt_le32_t height; +}; + +/* VIRTIO_GPU_CMD_SET_SCANOUT */ +struct virtio_gpu_set_scanout +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + rt_le32_t scanout_id; + rt_le32_t resource_id; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */ +struct virtio_gpu_resource_flush +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */ +struct virtio_gpu_transfer_to_host_2d +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + rt_le64_t offset; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +struct virtio_gpu_mem_entry +{ + rt_le64_t addr; + rt_le32_t length; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */ +struct virtio_gpu_resource_attach_backing +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t nr_entries; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */ +struct virtio_gpu_resource_detach_backing +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */ +#define VIRTIO_GPU_MAX_SCANOUTS 16 +struct virtio_gpu_resp_display_info +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_display_one + { + struct virtio_gpu_rect r; + rt_le32_t enabled; + rt_le32_t flags; + } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; +}; + +/* data passed in the control vq, 3d related */ + +struct virtio_gpu_box +{ + rt_le32_t x, y, z; + rt_le32_t w, h, d; +}; + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */ +struct virtio_gpu_transfer_host_3d +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_box box; + rt_le64_t offset; + rt_le32_t resource_id; + rt_le32_t level; + rt_le32_t stride; + rt_le32_t layer_stride; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */ +#define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0) +struct virtio_gpu_resource_create_3d +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t target; + rt_le32_t format; + rt_le32_t bind; + rt_le32_t width; + rt_le32_t height; + rt_le32_t depth; + rt_le32_t array_size; + rt_le32_t last_level; + rt_le32_t nr_samples; + rt_le32_t flags; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_CTX_CREATE */ +#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff +struct virtio_gpu_ctx_create +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t nlen; + rt_le32_t context_init; + char debug_name[64]; +}; + +/* VIRTIO_GPU_CMD_CTX_DESTROY */ +struct virtio_gpu_ctx_destroy +{ + struct virtio_gpu_ctrl_hdr hdr; +}; + +/* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */ +struct virtio_gpu_ctx_resource +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_SUBMIT_3D */ +struct virtio_gpu_cmd_submit +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t size; + rt_le32_t padding; +}; + +#define VIRTIO_GPU_CAPSET_VIRGL 1 +#define VIRTIO_GPU_CAPSET_VIRGL2 2 +#define VIRTIO_GPU_CAPSET_GFXSTREAM 3 +#define VIRTIO_GPU_CAPSET_VENUS 4 +#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5 + +/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ +struct virtio_gpu_get_capset_info +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t capset_index; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_RESP_OK_CAPSET_INFO */ +struct virtio_gpu_resp_capset_info +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t capset_id; + rt_le32_t capset_max_version; + rt_le32_t capset_max_size; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_CMD_GET_CAPSET */ +struct virtio_gpu_get_capset +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t capset_id; + rt_le32_t capset_version; +}; + +/* VIRTIO_GPU_RESP_OK_CAPSET */ +struct virtio_gpu_resp_capset +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_uint8_t capset_data[]; +}; + +/* VIRTIO_GPU_CMD_GET_EDID */ +struct virtio_gpu_cmd_get_edid +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t scanout; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_RESP_OK_EDID */ +struct virtio_gpu_resp_edid +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t size; + rt_le32_t padding; + rt_uint8_t edid[1024]; +}; + +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) + +struct virtio_gpu_config +{ + /* + * signals pending events to the driver. + * The driver MUST NOT write to this field. + */ + rt_le32_t events_read; + /* + * clears pending events in the device. + * Writing a '1' into a bit will clear the corresponding bit in events_read, + * mimicking write-to-clear behavior. + */ + rt_le32_t events_clear; + /* + * specifies the maximum number of scanouts supported by the device. + * Minimum value is 1, maximum value is 16. + */ + rt_le32_t num_scanouts; + /* + * specifies the maximum number of capability sets supported by the device. + * The minimum value is zero. + */ + rt_le32_t num_capsets; +}; + +/* simple formats for fbcon/X use */ +enum virtio_gpu_formats +{ + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1, + VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2, + VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3, + VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4, + + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67, + VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68, + + VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121, + VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134, +}; + +/* VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID */ +struct virtio_gpu_resource_assign_uuid +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +/* VIRTIO_GPU_RESP_OK_RESOURCE_UUID */ +struct virtio_gpu_resp_resource_uuid +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_uint8_t uuid[16]; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */ +struct virtio_gpu_resource_create_blob +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; +#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001 +#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002 +#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003 + +#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001 +#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002 +#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004 + /* zero is invalid blob mem */ + rt_le32_t blob_mem; + rt_le32_t blob_flags; + rt_le32_t nr_entries; + rt_le64_t blob_id; + rt_le64_t size; + /* + * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow + */ +}; + +/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */ +struct virtio_gpu_set_scanout_blob +{ + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + rt_le32_t scanout_id; + rt_le32_t resource_id; + rt_le32_t width; + rt_le32_t height; + rt_le32_t format; + rt_le32_t padding; + rt_le32_t strides[4]; + rt_le32_t offsets[4]; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */ +struct virtio_gpu_resource_map_blob +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; + rt_le64_t offset; +}; + +/* VIRTIO_GPU_RESP_OK_MAP_INFO */ +#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f +#define VIRTIO_GPU_MAP_CACHE_NONE 0x00 +#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01 +#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02 +#define VIRTIO_GPU_MAP_CACHE_WC 0x03 +struct virtio_gpu_resp_map_info +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_uint32_t map_info; + rt_uint32_t padding; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */ +struct virtio_gpu_resource_unmap_blob +{ + struct virtio_gpu_ctrl_hdr hdr; + rt_le32_t resource_id; + rt_le32_t padding; +}; + +#endif /* __VIRTIO_GPU_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-input.h b/components/drivers/virtio/virtio_config/virtio-input.h new file mode 100644 index 00000000000..62c7c9953a0 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-input.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#ifndef __VIRTIO_INPUT_H__ +#define __VIRTIO_INPUT_H__ + +#include + +enum virtio_input_config_select +{ + VIRTIO_INPUT_CFG_UNSET = 0x00, + VIRTIO_INPUT_CFG_ID_NAME = 0x01, + VIRTIO_INPUT_CFG_ID_SERIAL = 0x02, + VIRTIO_INPUT_CFG_ID_DEVIDS = 0x03, + VIRTIO_INPUT_CFG_PROP_BITS = 0x10, + VIRTIO_INPUT_CFG_EV_BITS = 0x11, + VIRTIO_INPUT_CFG_ABS_INFO = 0x12, +}; + +struct virtio_input_absinfo +{ + rt_le32_t min; /* Minimum value for the axis */ + rt_le32_t max; /* Maximum value for the axis */ + rt_le32_t fuzz; /* Fuzz value that is used to filter noise from the event stream */ + rt_le32_t flat; /* Within this value will be discarded by joydev interface and reported as 0 instead */ + rt_le32_t res; /* Resolution for the values reported for the axis */ +}; + +struct virtio_input_devids +{ + rt_le16_t bustype; + rt_le16_t vendor; + rt_le16_t product; + rt_le16_t version; +}; + +rt_packed(struct virtio_input_config +{ + rt_uint8_t select; + rt_uint8_t subsel; + rt_uint8_t size; + rt_uint8_t reserved[5]; + + union + { + char string[128]; + rt_uint8_t bitmap[128]; + struct virtio_input_absinfo abs; + struct virtio_input_devids ids; + }; +}); + +struct virtio_input_event +{ + rt_le16_t type; + rt_le16_t code; + rt_le32_t value; +}; + +#endif /* __VIRTIO_INPUT_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-net.h b/components/drivers/virtio/virtio_config/virtio-net.h new file mode 100644 index 00000000000..1c5d0a29321 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-net.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2021-11-11 GuEe-GUI the first version + * 2023-02-25 GuEe-GUI using virtio dm + */ + +#ifndef __VIRTIO_NET_H__ +#define __VIRTIO_NET_H__ + +#include + +#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration */ +#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice */ +#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address */ +#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in */ +#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in */ +#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in */ +#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in */ +#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in */ +#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in */ +#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in */ +#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in */ +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ +#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ +#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ +#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ +#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ +#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the network */ +#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow Steering */ +#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ + +#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */ +#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */ +#define VIRTIO_NET_F_RSC_EXT 61 /* Extended coalescing info */ +#define VIRTIO_NET_F_STANDBY 62 /* Act as standby for another device with the same MAC */ +#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */ + +rt_packed(struct virtio_net_hdr +{ +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 +#define VIRTIO_NET_HDR_F_DATA_VALID 2 +#define VIRTIO_NET_HDR_F_RSC_INFO 4 + rt_uint8_t flags; +#define VIRTIO_NET_HDR_GSO_NONE 0 +#define VIRTIO_NET_HDR_GSO_TCPV4 1 +#define VIRTIO_NET_HDR_GSO_UDP 3 +#define VIRTIO_NET_HDR_GSO_TCPV6 4 +#define VIRTIO_NET_HDR_GSO_ECN 0x80 + rt_uint8_t gso_type; + rt_uint16_t hdr_len; + rt_uint16_t gso_size; + rt_uint16_t csum_start; + rt_uint16_t csum_offset; + rt_uint16_t num_buffers; +}); + +#define VIRTIO_NET_MSS 1514 + +rt_packed(struct virtio_net_config +{ + rt_uint8_t mac[6]; +#define VIRTIO_NET_S_LINK_UP (1 << 0) +#define VIRTIO_NET_S_ANNOUNCE (1 << 1) + rt_uint16_t status; + rt_uint16_t max_virtqueue_pairs; + rt_uint16_t mtu; + rt_uint32_t speed; + rt_uint8_t duplex; + rt_uint8_t rss_max_key_size; + rt_uint16_t rss_max_indirection_table_length; + rt_uint32_t supported_hash_types; +}); + +#endif /* __VIRTIO_NET_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-scmi.h b/components/drivers/virtio/virtio_config/virtio-scmi.h new file mode 100644 index 00000000000..bd080067161 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-scmi.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __VIRTIO_SCMI_H__ +#define __VIRTIO_SCMI_H__ + +#define VIRTIO_SCMI_F_P2A_CHANNELS 0 /* Device implements some SCMI notifications, or delayed responses. */ +#define VIRTIO_SCMI_F_SHARED_MEMORY 1 /* Device implements any SCMI statistics shared memory region */ + +rt_packed(struct virtio_scmi_request +{ + rt_le32_t hdr; + rt_uint8_t params[]; +}); + +rt_packed(struct virtio_scmi_response +{ + rt_le32_t hdr; + rt_uint8_t ret_values[]; +}); + +rt_packed(struct virtio_scmi_event_msg +{ + rt_le32_t hdr; + rt_uint8_t payload[]; +}); + +#endif /* __VIRTIO_SCMI_H__ */ diff --git a/components/drivers/virtio/virtio_config/virtio-scsi.h b/components/drivers/virtio/virtio_config/virtio-scsi.h new file mode 100644 index 00000000000..f0e5320adc3 --- /dev/null +++ b/components/drivers/virtio/virtio_config/virtio-scsi.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __VIRTIO_SCSI_H__ +#define __VIRTIO_SCSI_H__ + +#include + +#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 +#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 + +#ifndef VIRTIO_SCSI_CDB_SIZE +#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE +#endif +#ifndef VIRTIO_SCSI_SENSE_SIZE +#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE +#endif + +/* SCSI command request, followed by data-out */ +rt_packed(struct virtio_scsi_cmd_req +{ + rt_uint8_t lun[8]; /* Logical Unit Number */ + rt_le64_t tag; /* Command identifier */ + rt_uint8_t task_attr; /* Task attribute */ + rt_uint8_t prio; /* SAM command priority field */ + rt_uint8_t crn; + rt_uint8_t cdb[VIRTIO_SCSI_CDB_SIZE]; +}); + +/* SCSI command request, followed by protection information */ +rt_packed(struct virtio_scsi_cmd_req_pi +{ + rt_uint8_t lun[8]; /* Logical Unit Number */ + rt_le64_t tag; /* Command identifier */ + rt_uint8_t task_attr; /* Task attribute */ + rt_uint8_t prio; /* SAM command priority field */ + rt_uint8_t crn; + rt_le32_t pi_bytesout; /* DataOUT PI Number of bytes */ + rt_le32_t pi_bytesin; /* DataIN PI Number of bytes */ + rt_uint8_t cdb[VIRTIO_SCSI_CDB_SIZE]; +}); + +/* Response, followed by sense data and data-in */ +rt_packed(struct virtio_scsi_cmd_resp +{ + rt_le32_t sense_len; /* Sense data length */ + rt_le32_t resid; /* Residual bytes in data buffer */ + rt_le16_t status_qualifier; /* Status qualifier */ + rt_uint8_t status; /* Command completion status */ + rt_uint8_t response; /* Response values */ + rt_uint8_t sense[VIRTIO_SCSI_SENSE_SIZE]; +}); + +/* Task Management Request */ +rt_packed(struct virtio_scsi_ctrl_tmf_req +{ + rt_le32_t type; + rt_le32_t subtype; + rt_uint8_t lun[8]; + rt_le64_t tag; +}); + +rt_packed(struct virtio_scsi_ctrl_tmf_resp +{ + rt_uint8_t response; +}); + +/* Asynchronous notification query/subscription */ +rt_packed(struct virtio_scsi_ctrl_an_req +{ + rt_le32_t type; + rt_uint8_t lun[8]; + rt_le32_t event_requested; +}); + +rt_packed(struct virtio_scsi_ctrl_an_resp +{ + rt_le32_t event_actual; + rt_uint8_t response; +}); + +rt_packed(struct virtio_scsi_event +{ + rt_le32_t event; + rt_uint8_t lun[8]; + rt_le32_t reason; +}); + +rt_packed(struct virtio_scsi_config +{ + rt_le32_t num_queues; + rt_le32_t seg_max; + rt_le32_t max_sectors; + rt_le32_t cmd_per_lun; + rt_le32_t event_info_size; + rt_le32_t sense_size; + rt_le32_t cdb_size; + rt_le16_t max_channel; + rt_le16_t max_target; + rt_le32_t max_lun; +}); + +/* Feature Bits */ +#define VIRTIO_SCSI_F_INOUT 0 +#define VIRTIO_SCSI_F_HOTPLUG 1 +#define VIRTIO_SCSI_F_CHANGE 2 +#define VIRTIO_SCSI_F_T10_PI 3 + +/* Response codes */ +#define VIRTIO_SCSI_S_OK 0 +#define VIRTIO_SCSI_S_OVERRUN 1 +#define VIRTIO_SCSI_S_ABORTED 2 +#define VIRTIO_SCSI_S_BAD_TARGET 3 +#define VIRTIO_SCSI_S_RESET 4 +#define VIRTIO_SCSI_S_BUSY 5 +#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6 +#define VIRTIO_SCSI_S_TARGET_FAILURE 7 +#define VIRTIO_SCSI_S_NEXUS_FAILURE 8 +#define VIRTIO_SCSI_S_FAILURE 9 +#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10 +#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11 +#define VIRTIO_SCSI_S_INCORRECT_LUN 12 + +/* Controlq type codes */ +#define VIRTIO_SCSI_T_TMF 0 +#define VIRTIO_SCSI_T_AN_QUERY 1 +#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2 + +/* Valid TMF subtypes */ +#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0 +#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1 +#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2 +#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3 +#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4 +#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5 +#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6 +#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7 + +/* Events */ +#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000 +#define VIRTIO_SCSI_T_NO_EVENT 0 +#define VIRTIO_SCSI_T_TRANSPORT_RESET 1 +#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2 +#define VIRTIO_SCSI_T_PARAM_CHANGE 3 + +/* Reasons of transport reset event */ +#define VIRTIO_SCSI_EVT_RESET_HARD 0 +#define VIRTIO_SCSI_EVT_RESET_RESCAN 1 +#define VIRTIO_SCSI_EVT_RESET_REMOVED 2 + +#define VIRTIO_SCSI_S_SIMPLE 0 +#define VIRTIO_SCSI_S_ORDERED 1 +#define VIRTIO_SCSI_S_HEAD 2 +#define VIRTIO_SCSI_S_ACA 3 + +#endif /* __VIRTIO_SCSI_H__ */ diff --git a/components/drivers/virtio/virtio_ids.h b/components/drivers/virtio/virtio_ids.h new file mode 100644 index 00000000000..f3452e4f6d0 --- /dev/null +++ b/components/drivers/virtio/virtio_ids.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __VIRTIO_IDS_H__ +#define __VIRTIO_IDS_H__ + +enum +{ + /* virtio 1.0 */ + VIRTIO_DEVICE_ID_INVALID = 0, /* Invalid device */ + VIRTIO_DEVICE_ID_NET = 1, /* Net */ + VIRTIO_DEVICE_ID_BLOCK = 2, /* Block */ + VIRTIO_DEVICE_ID_CONSOLE = 3, /* Console */ + VIRTIO_DEVICE_ID_RNG = 4, /* Rng */ + VIRTIO_DEVICE_ID_BALLOON = 5, /* Balloon */ + VIRTIO_DEVICE_ID_IOMEM = 6, /* IO memory */ + VIRTIO_DEVICE_ID_RPMSG = 7, /* Remote processor messaging */ + VIRTIO_DEVICE_ID_SCSI = 8, /* SCSI */ + VIRTIO_DEVICE_ID_9P = 9, /* 9p console */ + VIRTIO_DEVICE_ID_MAC80211_WLAN = 10, /* Mac80211 wlan */ + VIRTIO_DEVICE_ID_RPROC_SERIAL = 11, /* Remoteproc serial link */ + VIRTIO_DEVICE_ID_CAIF = 12, /* CAIF */ + VIRTIO_DEVICE_ID_MEM_BALLOON = 13, /* Memory balloon */ + VIRTIO_DEVICE_ID_GPU = 16, /* GPU */ + VIRTIO_DEVICE_ID_TIME = 17, /* Timer/clock device */ + VIRTIO_DEVICE_ID_INPUT = 18, /* Input */ + /* virtio 1.1 */ + VIRTIO_DEVICE_ID_SOCKET = 19, /* Socket device */ + VIRTIO_DEVICE_ID_CRYPTO = 20, /* Crypto device */ + VIRTIO_DEVICE_ID_SIG_DIS_MOD = 21, /* Signal Distribution Module */ + VIRTIO_DEVICE_ID_PSTORE = 22, /* Pstore device */ + VIRTIO_DEVICE_ID_IOMMU = 23, /* IOMMU device */ + VIRTIO_DEVICE_ID_MEM = 24, /* Memory device */ + /* virtio 1.2 */ + VIRTIO_DEVICE_ID_AUDIO = 25, /* Audio device */ + VIRTIO_DEVICE_ID_FS = 26, /* File system device */ + VIRTIO_DEVICE_ID_PMEM = 27, /* PMEM device */ + VIRTIO_DEVICE_ID_RPMB = 28, /* Replay protected memory block device */ + VIRTIO_DEVICE_ID_MAC80211_HWSIM = 29, /* Mac80211 hwsim wireless simulation device */ + VIRTIO_DEVICE_ID_VIDEO_ENCODER = 30, /* Video encoder device */ + VIRTIO_DEVICE_ID_VIDEO_DECODER = 31, /* Video decoder device */ + VIRTIO_DEVICE_ID_SCMI = 32, /* SCMI device */ + VIRTIO_DEVICE_ID_NITRO_SEC_MOD = 33, /* NitroSecureModule */ + VIRTIO_DEVICE_ID_I2C_ADAPTER = 34, /* I2C adapter */ + VIRTIO_DEVICE_ID_WATCHDOG = 35, /* Watchdog */ + VIRTIO_DEVICE_ID_CAN = 36, /* CAN device */ + VIRTIO_DEVICE_ID_DMABUF = 37, /* Virtio dmabuf */ + VIRTIO_DEVICE_ID_PARAM_SERV = 38, /* Parameter Server */ + VIRTIO_DEVICE_ID_AUDIO_POLICY = 39, /* Audio policy device */ + VIRTIO_DEVICE_ID_BT = 40, /* Bluetooth device */ + VIRTIO_DEVICE_ID_GPIO = 41, /* GPIO device */ + VIRTIO_DEVICE_ID_RDMA = 42, /* RDMA device */ + /* virtio 1.3 */ + VIRTIO_DEVICE_ID_CAMERA = 43, /* Camera device */ + VIRTIO_DEVICE_ID_ISM = 44, /* Internal shared memory device */ + VIRTIO_DEVICE_ID_SPI_MASTER = 45, /* SPI master */ + + VIRTIO_DEVICE_ID_MAX +}; + +enum +{ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_NET = 0x1000, /* Network card */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_BLOCK = 0x1001, /* Block device */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_BALLOON = 0x1002, /* Memory ballooning (traditional) */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_CONSOLE = 0x1003, /* Console */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_SCSI = 0x1004, /* SCSI host */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_RNG = 0x1005, /* Entropy source */ + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_9P = 0x1009, /* 9P transport */ + + VIRTIO_PCI_SUBSYSTEM_DEVICE_ID_MAX +}; + +#endif /* __VIRTIO_IDS_H__ */ diff --git a/components/drivers/virtio/virtio_internal.h b/components/drivers/virtio/virtio_internal.h new file mode 100644 index 00000000000..b86ae224b12 --- /dev/null +++ b/components/drivers/virtio/virtio_internal.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __VIRTIO_INTERNAL_H__ +#define __VIRTIO_INTERNAL_H__ + +#include +#include + +rt_inline rt_bool_t virtio_legacy_is_little_endian(void) +{ +#ifdef ARCH_CPU_BIG_ENDIAN + return RT_FALSE; +#else + return RT_TRUE; +#endif +} + +rt_inline rt_bool_t virtio_is_little_endian(struct rt_virtio_device *vdev) +{ + return rt_virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || + virtio_legacy_is_little_endian(); +} + +#ifdef __CHECKER__ +#define FORCE __attribute__((force)) +#else +#define FORCE +#endif + +rt_inline rt_uint16_t virtio16_to_cpu(struct rt_virtio_device *vdev, rt_uint16_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return rt_le16_to_cpu((FORCE rt_le16_t)val); + } + else + { + return rt_be16_to_cpu((FORCE rt_be16_t)val); + } +} + +rt_inline rt_uint16_t cpu_to_virtio16(struct rt_virtio_device *vdev, rt_uint16_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return (FORCE rt_le16_t)rt_cpu_to_le16(val); + } + else + { + return (FORCE rt_be16_t)rt_cpu_to_be16(val); + } +} + +rt_inline rt_uint32_t virtio32_to_cpu(struct rt_virtio_device *vdev, rt_uint32_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return rt_le32_to_cpu((FORCE rt_le32_t)val); + } + else + { + return rt_be32_to_cpu((FORCE rt_be32_t)val); + } +} + +rt_inline rt_uint32_t cpu_to_virtio32(struct rt_virtio_device *vdev, rt_uint32_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return (FORCE rt_le32_t)rt_cpu_to_le32(val); + } + else + { + return (FORCE rt_be32_t)rt_cpu_to_be32(val); + } +} + +rt_inline rt_uint64_t virtio64_to_cpu(struct rt_virtio_device *vdev, rt_uint64_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return rt_le64_to_cpu((FORCE rt_le64_t)val); + } + else + { + return rt_be64_to_cpu((FORCE rt_be64_t)val); + } +} + +rt_inline rt_uint64_t cpu_to_virtio64(struct rt_virtio_device *vdev, rt_uint64_t val) +{ + if (virtio_is_little_endian(vdev)) + { + return (FORCE rt_le64_t)rt_cpu_to_le64(val); + } + else + { + return (FORCE rt_be64_t)rt_cpu_to_be64(val); + } +} + +#undef FORCE + +#endif /* __VIRTIO_INTERNAL_H__ */ diff --git a/components/drivers/virtio/virtio_mmio.c b/components/drivers/virtio/virtio_mmio.c new file mode 100644 index 00000000000..12f2f93dcfe --- /dev/null +++ b/components/drivers/virtio/virtio_mmio.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include + +#define DBG_TAG "virtio.mmio" +#define DBG_LVL DBG_INFO +#include + +#include + +#include +#include +#include +#include +#include + +#define VIRTIO_MMIO_MAGIC 0x000 /* Magic value */ +#define VIRTIO_MMIO_VERSION 0x004 /* Device version number */ +#define VIRTIO_MMIO_DEVICE_ID 0x008 /* Virtio Subsystem Device ID */ +#define VIRTIO_MMIO_VENDOR_ID 0x00c /* Virtio Subsystem Vendor ID */ +#define VIRTIO_MMIO_DEVICE_FEATURES 0x010 /* Flags representing features the device supports */ +#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 /* Device (host) features word selection. */ +#define VIRTIO_MMIO_DRIVER_FEATURES 0x020 /* Device features understood and activated by the driver */ +#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 /* Activated (guest) features word selection */ +#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 /* Guest page size, this value should be a power of 2 */ +#define VIRTIO_MMIO_QUEUE_SEL 0x030 /* Virtual queue index */ +#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 /* Maximum virtual queue size */ +#define VIRTIO_MMIO_QUEUE_NUM 0x038 /* Virtual queue size */ +#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c /* Used Ring alignment in the virtual queue */ +#define VIRTIO_MMIO_QUEUE_PFN 0x040 /* Guest physical page number of the virtual queue */ +#define VIRTIO_MMIO_QUEUE_READY 0x044 /* Virtual queue ready bit */ +#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 /* Queue notifier */ +#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 /* Interrupt status */ +#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 /* Interrupt acknowledge */ +#define VIRTIO_MMIO_STATUS 0x070 /* Device status */ +#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 /* Virtual queue's descriptor Area 64 bit long physical address */ +#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 /* */ +#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 /* Virtual queue's available Ring (Driver Area) 64 bit long physical address */ +#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 /* */ +#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 /* Virtual queue's used Ring (Device Area) 64 bit long physical address */ +#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 /* */ +#define VIRTIO_MMIO_SHM_SEL 0x0ac /* Shared memory region id */ +#define VIRTIO_MMIO_SHM_LEN_LOW 0x0b0 /* Shared memory region length, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_LEN_HIGH 0x0b4 /* */ +#define VIRTIO_MMIO_SHM_BASE_LOW 0x0b8 /* Shared memory region base address, 64 bits in two halves */ +#define VIRTIO_MMIO_SHM_BASE_HIGH 0x0bc /* */ +#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc /* Configuration atomicity value */ +#define VIRTIO_MMIO_CONFIG 0x100 /* Configuration space */ + +#define VIRTIO_MMIO_INT_VIRTQ (1 << 0) /* The device has used a buffer in at least one of the active virtual queues */ +#define VIRTIO_MMIO_INT_CONFIG (1 << 1) /* The configuration of the device has changed */ + +#define VIRTIO_MMIO_VIRTQ_PAGE_SHIFT ARCH_PAGE_SHIFT +#define VIRTIO_MMIO_VIRTQ_PAGE_SZIE (1 << VIRTIO_MMIO_VIRTQ_PAGE_SHIFT) +#define VIRTIO_MMIO_VIRTQ_ALIGN_SHIFT VIRTIO_MMIO_VIRTQ_PAGE_SHIFT +#define VIRTIO_MMIO_VIRTQ_ALIGN (1 << VIRTIO_MMIO_VIRTQ_ALIGN_SHIFT) + +struct virtio_mmio +{ + struct rt_virtio_device parent; + + void *base; + int irq; + rt_ubase_t version; + + struct rt_spinlock spinlock; +}; + +#define raw_to_virtio_mmio(raw) rt_container_of(raw, struct virtio_mmio, parent) + +rt_inline rt_uint64_t virtio_mmio_read64(struct virtio_mmio *vio, int offset) +{ + return rt_cpu_to_le64(HWREG64(vio->base + offset)); +} + +rt_inline void virtio_mmio_write64(struct virtio_mmio *vio, int offset, rt_uint64_t value) +{ + HWREG64(vio->base + offset) = rt_le64_to_cpu(value); +} + +rt_inline rt_uint32_t virtio_mmio_read32(struct virtio_mmio *vio, int offset) +{ + return rt_cpu_to_le32(HWREG32(vio->base + offset)); +} + +rt_inline void virtio_mmio_write32(struct virtio_mmio *vio, int offset, rt_uint32_t value) +{ + HWREG32(vio->base + offset) = rt_le32_to_cpu(value); +} + +rt_inline rt_uint16_t virtio_mmio_read16(struct virtio_mmio *vio, int offset) +{ + return rt_cpu_to_le16(HWREG16(vio->base + offset)); +} + +rt_inline void virtio_mmio_write16(struct virtio_mmio *vio, int offset, rt_uint16_t value) +{ + HWREG16(vio->base + offset) = rt_le16_to_cpu(value); +} + +rt_inline rt_uint8_t virtio_mmio_read8(struct virtio_mmio *vio, int offset) +{ + return HWREG8(vio->base + offset); +} + +rt_inline void virtio_mmio_write8(struct virtio_mmio *vio, int offset, rt_uint8_t value) +{ + HWREG8(vio->base + offset) = value; +} + +static void virtio_mmio_isr(int irqno, void *param) +{ + rt_uint32_t status; + struct virtio_mmio *vio = param; + + /* Read interrupts */ + status = virtio_mmio_read32(vio, VIRTIO_MMIO_INTERRUPT_STATUS); + + /* Acknowledge interrupts */ + virtio_mmio_write32(vio, VIRTIO_MMIO_INTERRUPT_ACK, status); + + if (status & VIRTIO_MMIO_INT_CONFIG) + { + rt_virtio_device_config_changed(&vio->parent); + } + + if (status & VIRTIO_MMIO_INT_VIRTQ) + { + struct rt_virtqueue *vq; + rt_ubase_t level = rt_spin_lock_irqsave(&vio->spinlock); + + rt_list_for_each_entry(vq, &vio->parent.vq_node, list) + { + rt_virtqueue_isr(irqno, vq); + } + + rt_spin_unlock_irqrestore(&vio->spinlock, level); + } +} + +static rt_bool_t virtio_mmio_notify(struct rt_virtqueue *vq) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vq->vdev); + + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_NOTIFY, vq->index); + + return RT_TRUE; +} + +static rt_err_t virtio_mmio_read_shm_region(struct virtio_mmio *vio, + struct rt_virtio_shm_region *shm) +{ + virtio_mmio_write32(vio, VIRTIO_MMIO_SHM_SEL, shm->id); + + shm->size = virtio_mmio_read32(vio, VIRTIO_MMIO_SHM_LEN_HIGH); + shm->size <<= 32; + shm->size |= virtio_mmio_read32(vio, VIRTIO_MMIO_SHM_LEN_LOW); + + if (shm->size == ~(rt_uint64_t)0UL) + { + return -RT_EEMPTY; + } + + shm->base = virtio_mmio_read32(vio, VIRTIO_MMIO_SHM_BASE_HIGH); + shm->base <<= 32; + shm->base |= virtio_mmio_read32(vio, VIRTIO_MMIO_SHM_BASE_LOW); + + return RT_EOK; +} + +static rt_err_t virtio_mmio_get_status(struct rt_virtio_device *vdev, + rt_uint8_t *out_status) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + *out_status = virtio_mmio_read32(vio, VIRTIO_MMIO_STATUS); + + return RT_EOK; +} + +static rt_err_t virtio_mmio_set_status(struct rt_virtio_device *vdev, + rt_uint8_t status) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + RT_ASSERT(status != 0); + + virtio_mmio_write32(vio, VIRTIO_MMIO_STATUS, status); + + return RT_EOK; +} + +static rt_err_t virtio_mmio_get_features(struct rt_virtio_device *vdev, + rt_uint64_t *out_features) +{ + rt_uint64_t features; + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + virtio_mmio_write32(vio, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1); + features = virtio_mmio_read32(vio, VIRTIO_MMIO_DEVICE_FEATURES); + features <<= 32; + + virtio_mmio_write32(vio, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0); + features |= virtio_mmio_read32(vio, VIRTIO_MMIO_DEVICE_FEATURES); + + *out_features = features; + + return RT_EOK; +} + +static rt_err_t virtio_mmio_set_features(struct rt_virtio_device *vdev) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + /* Make sure there are no mixed devices */ + if (vio->version == 2 && !(vdev->features & RT_BIT(VIRTIO_F_VERSION_1))) + { + LOG_E("%s devices (version 2) must provide VIRTIO_F_VERSION_1 feature", + rt_dm_dev_get_name(&vdev->parent)); + + return -RT_EINVAL; + } + + virtio_mmio_write32(vio, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1); + virtio_mmio_write32(vio, VIRTIO_MMIO_DRIVER_FEATURES, + (rt_uint32_t)(vdev->features >> 32)); + + virtio_mmio_write32(vio, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0); + virtio_mmio_write32(vio, VIRTIO_MMIO_DRIVER_FEATURES, + (rt_uint32_t)vdev->features); + + return RT_EOK; +} + +static rt_err_t virtio_mmio_get_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, void *dst, int length) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + offset += VIRTIO_MMIO_CONFIG; + + if (vio->version == 1) + { + for (int i = 0; i < length; ++i) + { + ((rt_uint8_t *)dst)[i] = virtio_mmio_read8(vio, offset + i); + } + + return RT_EOK; + } + + switch (length) + { + case 4: + *(rt_uint32_t *)dst = virtio_mmio_read32(vio, offset); + break; + case 1: + *(rt_uint8_t *)dst = virtio_mmio_read8(vio, offset); + break; + case 2: + *(rt_uint16_t *)dst = virtio_mmio_read16(vio, offset); + break; + case 8: + *(rt_uint64_t *)dst = virtio_mmio_read64(vio, offset); + break; + default: + return -RT_EINVAL; + } + + return RT_EOK; +} + +static rt_err_t virtio_mmio_set_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, const void *src, int length) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + offset += VIRTIO_MMIO_CONFIG; + + if (vio->version == 1) + { + for (int i = 0; i < length; ++i) + { + virtio_mmio_write8(vio, offset + i, ((rt_uint8_t *)src)[i]); + } + + return RT_EOK; + } + + switch (length) + { + case 4: + virtio_mmio_write32(vio, offset, *(rt_uint32_t *)src); + break; + case 1: + virtio_mmio_write8(vio, offset, *(rt_uint8_t *)src); + break; + case 2: + virtio_mmio_write16(vio, offset, *(rt_uint16_t *)src); + break; + case 8: + virtio_mmio_write64(vio, offset, *(rt_uint64_t *)src); + break; + default: + return -RT_EINVAL; + } + + return RT_EOK; +} + +static struct rt_virtqueue *virtio_mmio_install_vq(struct rt_virtio_device *vdev, + int index, const char *name, rt_virtqueue_callback cb, + struct rt_virtqueue_formula *formula) +{ + rt_uint32_t num_max; + struct rt_virtqueue *vq; + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_SEL, index); + + if (virtio_mmio_read32(vio, vio->version == 1 ? + VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY)) + { + return RT_NULL; + } + + num_max = virtio_mmio_read32(vio, VIRTIO_MMIO_QUEUE_NUM_MAX); + + if (num_max == 0) + { + LOG_E("%s.virtqueue[%d](%s) num_max is zero", + rt_dm_dev_get_name(&vdev->parent), index, name); + + return RT_NULL; + } + + vq = rt_virtqueue_create(vdev, name, index, num_max, VIRTIO_MMIO_VIRTQ_ALIGN, + virtio_mmio_notify, cb, formula); + + if (!vq) + { + return vq; + } + + vq->num_max = num_max; + + /* Activate the queue */ + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_NUM, rt_virtqueue_get_virtq_size(vq)); + + if (vio->version == 1) + { + rt_uint32_t pfn = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_desc_addr(vq)); + + pfn >>= VIRTIO_MMIO_VIRTQ_ALIGN_SHIFT; + + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_ALIGN, VIRTIO_MMIO_VIRTQ_ALIGN); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_PFN, pfn); + } + else + { + rt_uint64_t page; + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_desc_addr(vq)); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_DESC_LOW, (rt_uint32_t)page); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_DESC_HIGH, (rt_uint32_t)(page >> 32)); + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_avail_addr(vq)); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_AVAIL_LOW, (rt_uint32_t)page); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, (rt_uint32_t)(page >> 32)); + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_used_addr(vq)); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_USED_LOW, (rt_uint32_t)page); + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_USED_HIGH, (rt_uint32_t)(page >> 32)); + + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_READY, 1); + } + + return vq; +} + +static rt_err_t virtio_mmio_install_vqs(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + for (int i = 0, vq_idx = 0; i < vqs_nr; ++i) + { + if (!names[i]) + { + vqs[i] = RT_NULL; + + continue; + } + + vqs[i] = virtio_mmio_install_vq(vdev, vq_idx++, names[i], cbs[i], RT_NULL); + + if (!vqs[i]) + { + rt_virtio_virtqueue_release(vdev); + + return -RT_ERROR; + } + } + + rt_hw_interrupt_install(vio->irq, virtio_mmio_isr, vio, + rt_dm_dev_get_name(&vdev->parent)); + rt_hw_interrupt_umask(vio->irq); + + return RT_EOK; +} + +static rt_err_t virtio_mmio_release_vqs(struct rt_virtio_device *vdev) +{ + struct rt_virtqueue *vq, *vq_next; + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + rt_hw_interrupt_mask(vio->irq); + rt_pic_detach_irq(vio->irq, vio); + + rt_list_for_each_entry_safe(vq, vq_next, &vio->parent.vq_node, list) + { + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_SEL, vq->index); + + if (vio->version == 1) + { + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_PFN, RT_NULL); + } + else + { + rt_uint32_t status; + + virtio_mmio_write32(vio, VIRTIO_MMIO_QUEUE_READY, RT_NULL); + status = virtio_mmio_read32(vio, VIRTIO_MMIO_QUEUE_READY); + + if (!status) + { + LOG_W("%s.virtqueue[%s] VIRTIO_MMIO_QUEUE_READY = %d", + rt_dm_dev_get_name(&vq->vdev->parent), vq->name, status); + } + } + + rt_virtqueue_delete(&vio->parent, vq); + } + + return RT_EOK; +} + +static rt_err_t virtio_mmio_control_vqs(struct rt_virtio_device *vdev, + rt_uint32_t cfg, void *data) +{ + rt_err_t err = RT_EOK; + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + switch (cfg) + { + case RT_VIRTQUEUE_CTL_IRQ_AFFINITY: + err = rt_pic_irq_set_affinity(vio->irq, (rt_bitmap_t *)data); + break; + + case RT_VIRTQUEUE_CTL_READ_SHM_REGION: + if (!data) + { + err = -RT_EINVAL; + break; + } + + err = virtio_mmio_read_shm_region(vio, data); + break; + + default: + err = -RT_ENOSYS; + break; + } + + return err; +} + +static rt_err_t virtio_mmio_generation(struct rt_virtio_device *vdev, + rt_uint32_t *out_counter) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + if (vio->version == 1) + { + *out_counter = 0; + } + else + { + *out_counter = virtio_mmio_read32(vio, VIRTIO_MMIO_CONFIG_GENERATION); + } + + return RT_EOK; +} + +static rt_err_t virtio_mmio_reset(struct rt_virtio_device *vdev) +{ + struct virtio_mmio *vio = raw_to_virtio_mmio(vdev); + + virtio_mmio_write32(vio, VIRTIO_MMIO_STATUS, 0); + + return RT_EOK; +} + +static const struct rt_virtio_transport virtio_mmio_trans = +{ + .get_status = virtio_mmio_get_status, + .set_status = virtio_mmio_set_status, + .get_features = virtio_mmio_get_features, + .set_features = virtio_mmio_set_features, + .get_config = virtio_mmio_get_config, + .set_config = virtio_mmio_set_config, + .install_vqs = virtio_mmio_install_vqs, + .release_vqs = virtio_mmio_release_vqs, + .control_vqs = virtio_mmio_control_vqs, + .generation = virtio_mmio_generation, + .reset = virtio_mmio_reset, +}; + +static rt_err_t virtio_mmio_probe(struct rt_platform_device *pdev) +{ + rt_err_t err = RT_EOK; + rt_uint32_t magic; + struct rt_virtio_device *vdev; + struct rt_device *dev = &pdev->parent; + struct virtio_mmio *vio = rt_calloc(1, sizeof(*vio)); + + if (!vio) + { + return -RT_ENOMEM; + } + + vio->base = rt_dm_dev_iomap(dev, 0); + + if (!vio->base) + { + err = -RT_EIO; + + goto _fail; + } + + vio->irq = rt_dm_dev_get_irq(dev, 0); + + if (vio->irq < 0) + { + err = vio->irq; + + goto _fail; + } + + magic = virtio_mmio_read32(vio, VIRTIO_MMIO_MAGIC); + + /* 0x74726976 (a Little Endian equivalent of the "virt" string). */ + if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) + { + err = -RT_EINVAL; + + LOG_E("Invalid magic: %x", magic); + + goto _fail; + } + + vio->version = virtio_mmio_read32(vio, VIRTIO_MMIO_VERSION); + + /* Only support version 1~2 */ + if (vio->version < 1 || vio->version > 2) + { + err = -RT_ENOSYS; + + LOG_E("Not support version: %d", vio->version); + + goto _fail; + } + + vdev = &vio->parent; + + vdev->id.device = virtio_mmio_read32(vio, VIRTIO_MMIO_DEVICE_ID); + + if (vdev->id.device == VIRTIO_DEVICE_ID_INVALID) + { + err = -RT_EEMPTY; + goto _fail; + } + + vdev->id.vendor = virtio_mmio_read32(vio, VIRTIO_MMIO_VENDOR_ID); + + if (vio->version == 1) + { + virtio_mmio_write32(vio, VIRTIO_MMIO_GUEST_PAGE_SIZE, VIRTIO_MMIO_VIRTQ_PAGE_SZIE); + } + + rt_spin_lock_init(&vio->spinlock); + + vdev->trans = &virtio_mmio_trans; + vdev->parent.ofw_node = dev->ofw_node; + + if ((err = rt_virtio_device_register(vdev))) + { + goto _fail; + } + + dev->user_data = vio; + + return RT_EOK; + +_fail: + if (vio->base) + { + rt_iounmap(vio->base); + } + rt_free(vio); + + return err; +} + +static rt_err_t virtio_mmio_remove(struct rt_platform_device *pdev) +{ + struct virtio_mmio *vio = pdev->parent.user_data; + + rt_hw_interrupt_mask(vio->irq); + rt_pic_detach_irq(vio->irq, vio); + + rt_bus_remove_device(&vio->parent.parent); + + rt_iounmap(vio->base); + rt_free(vio); + + return RT_EOK; +} + +static const struct rt_ofw_node_id virtio_mmio_ofw_ids[] = +{ + { .compatible = "virtio,mmio" }, + { /* sentinel */ } +}; + +static struct rt_platform_driver virtio_mmio_driver = +{ + .name = "virtio-mmio", + .ids = virtio_mmio_ofw_ids, + + .probe = virtio_mmio_probe, + .remove = virtio_mmio_remove, +}; +RT_PLATFORM_DRIVER_EXPORT(virtio_mmio_driver); diff --git a/components/drivers/virtio/virtio_pci.c b/components/drivers/virtio/virtio_pci.c new file mode 100644 index 00000000000..d17b6ee97a9 --- /dev/null +++ b/components/drivers/virtio/virtio_pci.c @@ -0,0 +1,942 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2022-11-07 GuEe-GUI first version + */ + +#include + +#define DBG_TAG "virtio.pci" +#define DBG_LVL DBG_INFO +#include + +#include + +#include +#include +#include +#include +#include + +#define VIRTIO_PCI_CAP_COMMON_CFG 1 /* Common configuration */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /* Notifications */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 /* ISR Status */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 /* Device specific configuration */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 /* PCI configuration access */ +#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8 /* Shared memory region */ +#define VIRTIO_PCI_CAP_VENDOR_CFG 9 /* Vendor-specific data */ + +struct virtio_pci_cap +{ + rt_uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + rt_uint8_t cap_next; /* Generic PCI field: next ptr. */ + rt_uint8_t cap_len; /* Generic PCI field: capability length */ + rt_uint8_t cfg_type; /* Identifies the structure. */ + rt_uint8_t bar; /* Where to find it. */ + rt_uint8_t id; /* Multiple capabilities of the same type */ + rt_uint8_t padding[2]; /* Pad to full dword. */ + rt_le32_t offset; /* Offset within bar. */ + rt_le32_t length; /* Length of the structure, in bytes. */ +}; + +struct virtio_pci_cap64 +{ + struct virtio_pci_cap cap; + rt_le32_t offset_hi; /* Most sig 32 bits of offset */ + rt_le32_t length_hi; /* Most sig 32 bits of length */ +}; + +struct virtio_pci_common_cfg +{ + /* About the whole device. */ + rt_le32_t device_feature_select; /* Read-write */ + rt_le32_t device_feature; /* Read-only for driver */ + rt_le32_t driver_feature_select; /* Read-write */ + rt_le32_t driver_feature; /* Read-write */ + rt_le16_t config_msix_vector; /* Read-write */ + rt_le16_t num_queues; /* Read-only for driver */ + rt_uint8_t device_status; /* Read-write */ + rt_uint8_t config_generation; /* Read-only for driver */ + + /* About a specific virtqueue. */ + rt_le16_t queue_select; /* Read-write */ + rt_le16_t queue_size; /* Read-write */ + rt_le16_t queue_msix_vector; /* Read-write */ + rt_le16_t queue_enable; /* Read-write */ + rt_le16_t queue_notify_off; /* Read-only for driver */ + rt_le32_t queue_desc_low; /* read-write */ + rt_le32_t queue_desc_high; /* read-write */ + rt_le32_t queue_avail_low; /* read-write */ + rt_le32_t queue_avail_high; /* read-write */ + rt_le32_t queue_used_low; /* read-write */ + rt_le32_t queue_used_high; /* read-write */ +}; + +struct virtio_pci_notify_cap +{ + struct virtio_pci_cap cap; + rt_le32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +struct virtio_pci_cfg_cap +{ + struct virtio_pci_cap cap; + rt_uint8_t pci_cfg_data[4]; /* Data for BAR access. */ +}; + +#define VIRTIO_PCI_VIRTQ_ALIGN 4096 +#define VIRTIO_PCI_ISR_QUEUE RT_BIT(0) +#define VIRTIO_PCI_ISR_CONFIG RT_BIT(1) +#define VIRTIO_MSI_NO_VECTOR 0xffff + +#define VIRTIO_PCI_DEVICE_START 0x1040 +#define VIRTIO_PCI_DEVICE_END 0x107f + +struct virtio_pci +{ + struct rt_virtio_device parent; + struct rt_pci_device *pdev; + + void *dev_regs; + void *isr_regs; + void *notify_regs; + struct virtio_pci_common_cfg *common; + + rt_size_t dev_regs_offset; + rt_size_t isr_regs_offset; + rt_size_t notify_regs_offset; + rt_size_t common_offset; + + rt_size_t notify_len; + rt_uint32_t notify_offset_multiplier; + + int nvectors; + struct rt_pci_msix_entry *msix_entries; +}; + +#define raw_to_virtio_pci(raw) rt_container_of(raw, struct virtio_pci, parent) + +rt_inline rt_uint32_t virtio_pci_read32(void *addr) +{ + return rt_cpu_to_le32(HWREG32(addr)); +} + +rt_inline void virtio_pci_write32(void *addr, rt_uint32_t value) +{ + HWREG32(addr) = rt_le32_to_cpu(value); +} + +rt_inline rt_uint16_t virtio_pci_read16(void *addr) +{ + return rt_cpu_to_le16(HWREG16(addr)); +} + +rt_inline void virtio_pci_write16(void *addr, rt_uint16_t value) +{ + HWREG16(addr) = rt_le16_to_cpu(value); +} + +rt_inline rt_uint8_t virtio_pci_read8(void *addr) +{ + return HWREG8(addr); +} + +rt_inline void virtio_pci_write8(void *addr, rt_uint8_t value) +{ + HWREG8(addr) = value; +} + +static void virtio_pci_vq_isr(int irqno, void *param) +{ + struct rt_virtqueue *vq = param; + + rt_virtqueue_isr(irqno, vq); +} + +static void virtio_pci_config_isr(int irqno, void *param) +{ + struct virtio_pci *vp = param; + + rt_virtio_device_config_changed(&vp->parent); +} + +static void virtio_pci_isr(int irqno, void *param) +{ + rt_uint8_t isr; + struct rt_virtqueue *vq; + struct virtio_pci *vp = param; + + isr = virtio_pci_read8(vp->isr_regs); + + if (!isr) + { + return; + } + + if (isr & VIRTIO_PCI_ISR_CONFIG) + { + virtio_pci_config_isr(irqno, param); + } + + if (isr & VIRTIO_PCI_ISR_QUEUE) + { + rt_list_for_each_entry(vq, &vp->parent.vq_node, list) + { + virtio_pci_vq_isr(irqno, vq); + } + } +} + +static rt_bool_t virtio_pci_notify(struct rt_virtqueue *vq) +{ + rt_uint16_t offset; + struct virtio_pci *vp = raw_to_virtio_pci(vq->vdev); + + virtio_pci_write16(&vp->common->queue_select, vq->index); + + offset = virtio_pci_read16(&vp->common->queue_notify_off); + offset *= vp->notify_offset_multiplier; + + /* We no config VIRTIO_F_NOTIFICATION_DATA, so it is only 16 bits */ + if (offset > vp->notify_len - sizeof(rt_uint16_t)) + { + return RT_FALSE; + } + + virtio_pci_write16(vp->notify_regs + offset, vq->index); + + return RT_TRUE; +} + +static rt_uint16_t virtio_pci_queue_vector(struct virtio_pci *vp, + rt_uint16_t index, rt_uint16_t vector) +{ + struct virtio_pci_common_cfg *cfg = vp->common; + + virtio_pci_write16(&cfg->queue_select, index); + virtio_pci_write16(&cfg->queue_msix_vector, vector); + + /* Flush the write out to device */ + return virtio_pci_read16(&cfg->queue_msix_vector); +} + +static rt_uint16_t virtio_pci_config_vector(struct virtio_pci *vp, + rt_uint16_t vector) +{ + struct virtio_pci_common_cfg *cfg = vp->common; + + virtio_pci_write16(&cfg->config_msix_vector, vector); + + /* Flush the write out to device */ + return virtio_pci_read16(&cfg->config_msix_vector); +} + +static rt_err_t virtio_pci_read_shm_region(struct virtio_pci *vp, + struct rt_virtio_shm_region *shm) +{ + int pos; + rt_uint8_t bar; + rt_uint64_t offset, len; + struct rt_pci_device *pdev = vp->pdev; + + for (pos = rt_pci_find_capability(pdev, PCIY_VENDOR); pos > 0; + pos = rt_pci_find_next_capability(pdev, pos, PCIY_VENDOR)) + { + rt_uint32_t value32; + rt_uint8_t type, cap_len, id; + + rt_pci_read_config_u8(pdev, + pos + rt_offsetof(struct virtio_pci_cap, cfg_type), &type); + + if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG) + { + continue; + } + + rt_pci_read_config_u8(pdev, + pos + rt_offsetof(struct virtio_pci_cap, cap_len), &cap_len); + + if (cap_len != sizeof(struct virtio_pci_cap64)) + { + LOG_E("%s: shm cap with bad size offset: %d size: %d", + rt_dm_dev_get_name(&pdev->parent), pos, cap_len); + + continue; + } + + rt_pci_read_config_u8(pdev, + pos + rt_offsetof(struct virtio_pci_cap, id), &id); + + if (id != shm->id) + { + continue; + } + + rt_pci_read_config_u8(pdev, + pos + rt_offsetof(struct virtio_pci_cap, bar), &bar); + + if (bar >= PCI_STD_NUM_BARS) + { + continue; + } + + rt_pci_read_config_u32(pdev, + pos + rt_offsetof(struct virtio_pci_cap, offset), &value32); + offset = value32; + + rt_pci_read_config_u32(pdev, + pos + rt_offsetof(struct virtio_pci_cap, length), &value32); + len = value32; + + rt_pci_read_config_u32(pdev, + pos + rt_offsetof(struct virtio_pci_cap64, offset_hi), &value32); + offset |= ((rt_uint64_t)value32) << 32; + + rt_pci_read_config_u32(pdev, + pos + rt_offsetof(struct virtio_pci_cap64, length_hi), &value32); + len |= ((rt_uint64_t)value32) << 32; + + break; + } + + if (!pos) + { + return -RT_EEMPTY; + } + + if (offset + len < offset) + { + LOG_E("%s: cap offset + len overflow detected", + rt_dm_dev_get_name(&pdev->parent)); + + return -RT_EIO; + } + + if (offset + len > pdev->resource[bar].size) + { + LOG_E("%s: bar shorter than cap offset + len", + rt_dm_dev_get_name(&pdev->parent)); + + return -RT_EIO; + } + + shm->base = pdev->resource[bar].base + offset; + shm->size = len; + + return RT_EOK; +} + +static rt_err_t virtio_pci_get_status(struct rt_virtio_device *vdev, + rt_uint8_t *out_status) +{ + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + *out_status = virtio_pci_read32(&vp->common->device_status); + + return RT_EOK; +} + +static rt_err_t virtio_pci_set_status(struct rt_virtio_device *vdev, + rt_uint8_t status) +{ + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + virtio_pci_write32(&vp->common->device_status, status); + + return RT_EOK; +} + +static rt_err_t virtio_pci_get_features(struct rt_virtio_device *vdev, + rt_uint64_t *out_features) +{ + rt_uint64_t features; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + virtio_pci_write32(&vp->common->device_feature_select, 1); + features = virtio_pci_read32(&vp->common->device_feature); + features <<= 32; + + virtio_pci_write32(&vp->common->device_feature_select, 0); + features |= virtio_pci_read32(&vp->common->device_feature); + + *out_features = features; + + return RT_EOK; +} + +static rt_err_t virtio_pci_set_features(struct rt_virtio_device *vdev) +{ + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + virtio_pci_write32(&vp->common->driver_feature_select, 1); + virtio_pci_write32(&vp->common->driver_feature, (rt_uint32_t)(vdev->features >> 32)); + + virtio_pci_write32(&vp->common->driver_feature_select, 0); + virtio_pci_write32(&vp->common->driver_feature, (rt_uint32_t)vdev->features); + + return RT_EOK; +} + +static rt_err_t virtio_pci_get_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, void *dst, int length) +{ + rt_uint8_t byte; + rt_le16_t word; + rt_le32_t dword; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + switch (length) + { + case 4: + dword = virtio_pci_read32(vp->dev_regs + offset); + rt_memcpy(dst, &dword, sizeof(dword)); + break; + case 1: + byte = virtio_pci_read8(vp->dev_regs + offset); + rt_memcpy(dst, &byte, sizeof(byte)); + break; + case 2: + word = virtio_pci_read16(vp->dev_regs + offset); + rt_memcpy(dst, &word, sizeof(word)); + break; + case 8: + dword = virtio_pci_read32(vp->dev_regs + offset); + rt_memcpy(dst, &dword, sizeof(dword)); + dword = virtio_pci_read32(vp->dev_regs + offset + sizeof(dword)); + rt_memcpy(dst + sizeof(dword), &dword, sizeof(dword)); + break; + default: + return -RT_EINVAL; + } + + return RT_EOK; +} + +static rt_err_t virtio_pci_set_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, const void *src, int length) +{ + rt_uint8_t byte; + rt_le16_t word; + rt_le32_t dword; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + switch (length) + { + case 4: + rt_memcpy(&dword, src, sizeof(dword)); + virtio_pci_write32(vp->dev_regs + offset, dword); + break; + case 1: + rt_memcpy(&byte, src, sizeof(byte)); + virtio_pci_write8(vp->dev_regs + offset, byte); + break; + case 2: + rt_memcpy(&word, src, sizeof(word)); + virtio_pci_write16(vp->dev_regs + offset, word); + break; + case 8: + rt_memcpy(&dword, src, sizeof(dword)); + virtio_pci_write32(vp->dev_regs + offset, dword); + rt_memcpy(&dword, src + sizeof(dword), sizeof(dword)); + virtio_pci_write32(vp->dev_regs + offset + sizeof(dword), dword); + break; + default: + return -RT_EINVAL; + } + + return RT_EOK; +} + +static struct rt_virtqueue *virtio_pci_install_vq(struct rt_virtio_device *vdev, + int index, const char *name, rt_virtqueue_callback cb, + struct rt_virtqueue_formula *formula) +{ + rt_uint64_t page; + rt_uint32_t num_max; + struct rt_virtqueue *vq; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + struct virtio_pci_common_cfg *cfg = vp->common; + + if (index >= virtio_pci_read16(&cfg->num_queues)) + { + return RT_NULL; + } + + virtio_pci_write16(&cfg->queue_select, index); + + num_max = virtio_pci_read16(&cfg->queue_size); + + if (!num_max || virtio_pci_read16(&cfg->queue_enable)) + { + return RT_NULL; + } + + vq = rt_virtqueue_create(vdev, name, index, num_max, VIRTIO_PCI_VIRTQ_ALIGN, + virtio_pci_notify, cb, formula); + + if (!vq) + { + return vq; + } + + vq->num_max = num_max; + + virtio_pci_write16(&cfg->queue_size, rt_virtqueue_get_virtq_size(vq)); + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_desc_addr(vq)); + virtio_pci_write32(&cfg->queue_desc_low, (rt_uint32_t)page); + virtio_pci_write32(&cfg->queue_desc_high, (rt_uint32_t)(page >> 32)); + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_avail_addr(vq)); + virtio_pci_write32(&cfg->queue_avail_low, (rt_uint32_t)page); + virtio_pci_write32(&cfg->queue_avail_high, (rt_uint32_t)(page >> 32)); + + page = (rt_ubase_t)rt_kmem_v2p((void *)rt_virtqueue_get_used_addr(vq)); + virtio_pci_write32(&cfg->queue_used_low, (rt_uint32_t)page); + virtio_pci_write32(&cfg->queue_used_high, (rt_uint32_t)(page >> 32)); + + virtio_pci_write16(&cfg->queue_enable, 1); + + return vq; +} + +static rt_err_t virtio_pci_install_vqs(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]) +{ + void *isr; + rt_err_t err; + char name[RT_NAME_MAX]; + int nvectors = vqs_nr + 1, irq, cpuid = 0; + RT_IRQ_AFFINITY_DECLARE(affinity) = { 0 }; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + vp->msix_entries = rt_malloc(nvectors * sizeof(*vp->msix_entries)); + + if (!vp->msix_entries) + { + return -RT_ENOMEM; + } + + rt_pci_msix_entry_index_linear(vp->msix_entries, nvectors); + + /* Try MSI-X with one vector per queue */ + if (rt_pci_msix_enable(vp->pdev, vp->msix_entries, nvectors) < 0) + { + nvectors = 2; + + /* Try MSI-X with one shared for queues, one vector for config */ + if (rt_pci_msix_enable(vp->pdev, vp->msix_entries, nvectors) < 0) + { + /* Only supported INTx */ + nvectors = 1; + + rt_free(vp->msix_entries); + vp->msix_entries = RT_NULL; + } + } + + for (int i = 0, vq_idx = 0; i < vqs_nr; ++i, ++vq_idx) + { + if (!names[i]) + { + vqs[i] = RT_NULL; + + continue; + } + + vqs[i] = virtio_pci_install_vq(vdev, vq_idx, names[i], cbs[i], RT_NULL); + + if (!vqs[i]) + { + err = -RT_ERROR; + goto _fail; + } + + if (nvectors >= 2 || (nvectors == 2 && vqs_nr == 1)) + { + int vec_idx = nvectors != 2 ? vq_idx : 0; + int msi_idx = nvectors != 2 ? i : 0; + + if (!cbs[i]) + { + virtio_pci_queue_vector(vp, vq_idx, VIRTIO_MSI_NO_VECTOR); + + continue; + } + + if (virtio_pci_queue_vector(vp, vq_idx, vec_idx) == VIRTIO_MSI_NO_VECTOR) + { + err = -RT_EBUSY; + goto _fail; + } + + irq = vp->msix_entries[msi_idx].irq; + + rt_snprintf(name, RT_NAME_MAX, "%s-%s-%d", + rt_dm_dev_get_name(&vdev->parent), names[i], vq_idx); + + RT_IRQ_AFFINITY_SET(affinity, cpuid % RT_CPUS_NR); + rt_pic_irq_set_affinity(irq, affinity); + RT_IRQ_AFFINITY_CLEAR(affinity, cpuid++ % RT_CPUS_NR); + + rt_hw_interrupt_install(irq, virtio_pci_vq_isr, vqs[i], name); + rt_hw_interrupt_umask(irq); + } + } + + if (nvectors >= 2) + { + rt_snprintf(name, RT_NAME_MAX, "%s-config", + rt_dm_dev_get_name(&vdev->parent)); + + isr = virtio_pci_config_isr; + irq = vp->msix_entries[nvectors - 1].irq; + + if (virtio_pci_config_vector(vp, nvectors - 1) == VIRTIO_MSI_NO_VECTOR) + { + err = -RT_EBUSY; + goto _fail; + } + + RT_IRQ_AFFINITY_SET(affinity, cpuid % RT_CPUS_NR); + rt_pic_irq_set_affinity(irq, affinity); + RT_IRQ_AFFINITY_CLEAR(affinity, cpuid++ % RT_CPUS_NR); + } + else + { + rt_snprintf(name, RT_NAME_MAX, "%s", rt_dm_dev_get_name(&vdev->parent)); + + isr = virtio_pci_isr; + irq = vp->pdev->irq; + + virtio_pci_config_vector(vp, VIRTIO_MSI_NO_VECTOR); + } + + rt_hw_interrupt_install(irq, isr, vp, name); + rt_hw_interrupt_umask(irq); + + vp->nvectors = nvectors; + + return RT_EOK; + +_fail: + rt_virtio_virtqueue_release(vdev); + + return err; +} + +static rt_err_t virtio_pci_release_vqs(struct rt_virtio_device *vdev) +{ + int irq; + struct rt_virtqueue *vq, *vq_next; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + rt_list_for_each_entry_safe(vq, vq_next, &vp->parent.vq_node, list) + { + virtio_pci_write16(&vp->common->queue_select, vq->index); + + if (vp->nvectors >= 2) + { + virtio_pci_queue_vector(vp, vq->index, VIRTIO_MSI_NO_VECTOR); + + irq = vp->msix_entries[vq->index].irq; + + rt_hw_interrupt_mask(irq); + rt_pic_detach_irq(irq, vq); + } + + /* Select and deactivate the queue */ + virtio_pci_write16(&vp->common->queue_enable, 0); + + rt_virtqueue_delete(&vp->parent, vq); + } + + if (vp->nvectors >= 2) + { + irq = vp->msix_entries[vp->nvectors - 1].irq; + } + else + { + irq = vp->pdev->irq; + } + + rt_hw_interrupt_mask(irq); + rt_pic_detach_irq(irq, vp); + + return RT_EOK; +} + +static rt_err_t virtio_pci_control_vqs(struct rt_virtio_device *vdev, + rt_uint32_t cfg, void *data) +{ + rt_err_t err = RT_EOK; + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + switch (cfg) + { + case RT_VIRTQUEUE_CTL_IRQ_AFFINITY: + if (vp->nvectors == 1) + { + err = rt_pic_irq_set_affinity(vp->pdev->irq, (rt_bitmap_t *)data); + } + else + { + err = -RT_ENOSYS; + } + break; + + case RT_VIRTQUEUE_CTL_READ_SHM_REGION: + if (!data) + { + err = -RT_EINVAL; + break; + } + + err = virtio_pci_read_shm_region(vp, data); + break; + + default: + err = -RT_ENOSYS; + break; + } + + return err; +} + +static rt_err_t virtio_pci_generation(struct rt_virtio_device *vdev, + rt_uint32_t *out_counter) +{ + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + *out_counter = virtio_pci_read8(&vp->common->config_generation); + + return RT_EOK; +} + +static rt_err_t virtio_pci_reset(struct rt_virtio_device *vdev) +{ + struct virtio_pci *vp = raw_to_virtio_pci(vdev); + + virtio_pci_write8(&vp->common->device_status, 0); + + while (virtio_pci_read8(&vp->common->device_status)) + { + rt_thread_mdelay(1); + } + + return RT_EOK; +} + +static const struct rt_virtio_transport virtio_pci_trans = +{ + .get_status = virtio_pci_get_status, + .set_status = virtio_pci_set_status, + .get_features = virtio_pci_get_features, + .set_features = virtio_pci_set_features, + .get_config = virtio_pci_get_config, + .set_config = virtio_pci_set_config, + .install_vqs = virtio_pci_install_vqs, + .release_vqs = virtio_pci_release_vqs, + .control_vqs = virtio_pci_control_vqs, + .generation = virtio_pci_generation, + .reset = virtio_pci_reset, +}; + +static rt_uint32_t virtio_pci_find_capability(struct rt_pci_device *pdev, + rt_uint8_t cfg_type, rt_size_t cap_size, struct virtio_pci_cap *out_cap) +{ + int offset; + + for (rt_uint8_t pos = rt_pci_find_capability(pdev, PCIY_VENDOR); + pos > 0 && (PCI_REGMAX + 1) - cap_size >= pos; + pos = rt_pci_find_next_capability(pdev, pos, PCIY_VENDOR)) + { + offset = pos + rt_offsetof(struct virtio_pci_cap, cap_vndr); + rt_pci_read_config_u8(pdev, offset, &out_cap->cap_vndr); + offset = pos + rt_offsetof(struct virtio_pci_cap, cap_next); + rt_pci_read_config_u8(pdev, offset, &out_cap->cap_next); + offset = pos + rt_offsetof(struct virtio_pci_cap, cap_len); + rt_pci_read_config_u8(pdev, offset, &out_cap->cap_len); + offset = pos + rt_offsetof(struct virtio_pci_cap, cfg_type); + rt_pci_read_config_u8(pdev, offset, &out_cap->cfg_type); + offset = pos + rt_offsetof(struct virtio_pci_cap, bar); + rt_pci_read_config_u8(pdev, offset, &out_cap->bar); + offset = pos + rt_offsetof(struct virtio_pci_cap, offset); + rt_pci_read_config_u32(pdev, offset, &out_cap->offset); + offset = pos + rt_offsetof(struct virtio_pci_cap, length); + rt_pci_read_config_u32(pdev, offset, &out_cap->length); + + /* Ignore structures with reserved BAR values */ + if (out_cap->bar >= RT_PCI_BAR_NR_MAX) + { + continue; + } + + if (out_cap->cfg_type == cfg_type) + { + return pos; + } + } + + return 0; +} + +static void virtio_pci_free(struct virtio_pci *vp) +{ + if (vp->common) + { + rt_iounmap(vp->common - vp->common_offset); + } + if (vp->dev_regs) + { + rt_iounmap(vp->dev_regs - vp->dev_regs_offset); + } + if (vp->notify_regs) + { + rt_iounmap(vp->notify_regs - vp->notify_regs_offset); + } + if (vp->isr_regs) + { + rt_iounmap(vp->isr_regs - vp->isr_regs_offset); + } + + rt_free(vp); +} + +static rt_err_t virtio_pci_probe(struct rt_pci_device *pdev) +{ + rt_err_t err = RT_EOK; + rt_uint8_t common, notify, device, isr; + struct virtio_pci_cap common_cap, notify_cap, device_cap, isr_cap; + struct rt_virtio_device *vdev; + struct virtio_pci *vp; + + if (pdev->device < VIRTIO_PCI_DEVICE_START || + pdev->device > VIRTIO_PCI_DEVICE_END) + { + return -RT_EINVAL; + } + + if (!(vp = rt_calloc(1, sizeof(*vp)))) + { + return -RT_ENOMEM; + } + vp->pdev = pdev; + + if (!(common = virtio_pci_find_capability(pdev, VIRTIO_PCI_CAP_COMMON_CFG, + sizeof(struct virtio_pci_cap), &common_cap))) + { + err = -RT_EINVAL; + goto _fail; + } + + if (!(device = virtio_pci_find_capability(pdev, VIRTIO_PCI_CAP_DEVICE_CFG, + sizeof(struct virtio_pci_cap), &device_cap))) + { + err = -RT_EINVAL; + goto _fail; + } + + if (!(notify = virtio_pci_find_capability(pdev, VIRTIO_PCI_CAP_NOTIFY_CFG, + sizeof(struct virtio_pci_notify_cap), ¬ify_cap))) + { + err = -RT_EINVAL; + goto _fail; + } + + if (!(isr = virtio_pci_find_capability(pdev, VIRTIO_PCI_CAP_ISR_CFG, + sizeof(struct virtio_pci_cap), &isr_cap))) + { + err = -RT_EINVAL; + goto _fail; + } + + if (!(vp->common = rt_pci_iomap(pdev, common_cap.bar))) + { + err = -RT_EIO; + goto _fail; + } + vp->common_offset = common_cap.offset; + vp->common += vp->common_offset; + + if (!(vp->dev_regs = rt_pci_iomap(pdev, device_cap.bar))) + { + err = -RT_EIO; + goto _fail; + } + vp->dev_regs_offset = device_cap.offset; + vp->dev_regs += vp->dev_regs_offset; + + if (!(vp->notify_regs = rt_pci_iomap(pdev, notify_cap.bar))) + { + err = -RT_EIO; + goto _fail; + } + vp->notify_regs_offset = notify_cap.offset; + vp->notify_regs += vp->notify_regs_offset; + vp->notify_len = notify_cap.length; + + if (!(vp->isr_regs = rt_pci_iomap(pdev, isr_cap.bar))) + { + err = -RT_EIO; + goto _fail; + } + vp->isr_regs_offset = isr_cap.offset; + vp->isr_regs += vp->isr_regs_offset; + + rt_pci_read_config_u32(pdev, + notify + rt_offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), + &vp->notify_offset_multiplier); + + vdev = &vp->parent; + vdev->id.device = pdev->device - VIRTIO_PCI_DEVICE_START; + rt_pci_read_config_u16(pdev, PCIR_SUBVEND_0, (rt_uint16_t *)&vdev->id.vendor); + + vdev->trans = &virtio_pci_trans; + vdev->parent.ofw_node = pdev->parent.ofw_node; + + if ((err = rt_virtio_device_register(vdev))) + { + goto _fail; + } + + pdev->parent.user_data = vp; + + return err; + +_fail: + virtio_pci_free(vp); + + return err; +} + +static rt_err_t virtio_pci_remove(struct rt_pci_device *pdev) +{ + struct virtio_pci *vp = pdev->parent.user_data; + + rt_bus_remove_device(&vp->parent.parent); + + virtio_pci_free(vp); + + return RT_EOK; +} + +static struct rt_pci_device_id virtio_pci_ids[] = +{ + { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID), }, + { /* sentinel */ } +}; + +static struct rt_pci_driver virtio_pci_driver = +{ + .name = "virtio-pci", + + .ids = virtio_pci_ids, + .probe = virtio_pci_probe, + .remove = virtio_pci_remove, +}; +RT_PCI_DRIVER_EXPORT(virtio_pci_driver); diff --git a/components/drivers/virtio/virtio_queue.c b/components/drivers/virtio/virtio_queue.c new file mode 100644 index 00000000000..1244fefb1fc --- /dev/null +++ b/components/drivers/virtio/virtio_queue.c @@ -0,0 +1,1219 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +#define DBG_TAG "virtio.queue" +#define DBG_LVL DBG_INFO +#include + +#include + +#include +#include +#include + +#include "virtio_internal.h" + +struct virtq_buffer_shadow +{ + rt_ubase_t dma_handle; + void *dma_buf; + + rt_size_t size; + rt_size_t chain_num; +}; + +struct virtqueue_split +{ + struct virtq virtq; + rt_ubase_t virtq_dma; + + rt_uint16_t avail_idx_shadow; + rt_uint16_t avail_flags_shadow; + struct virtq_buffer_shadow buffer_shadow[0]; +}; + +struct virtqueue_packed +{ + struct virtq virtq; + rt_size_t virtq_size; + rt_ubase_t virtq_dma; + rt_ubase_t driver_event_dma; + rt_ubase_t device_event_dma; + + rt_uint16_t last_used_idx; + rt_uint16_t avail_used_flags; + rt_uint16_t avail_wrap_counter; + rt_uint16_t avail_flags_shadow; + rt_uint16_t event_flags_shadow; + struct virtq_buffer_shadow buffer_shadow[0]; +}; + +static struct rt_virtqueue *vq_split_create(struct rt_virtio_device *vdev, + const char *name, int index, int num, rt_uint32_t align, + rt_virtqueue_notifier notify, rt_virtqueue_callback callback, + struct rt_virtqueue_formula *formula) +{ + void *pages; + rt_size_t size; + struct virtqueue_split *vq_split; + struct rt_virtqueue *vq = RT_NULL; + struct virtq_buffer_shadow *buffer_shadow; + + /* + * Layout: + * +-----------------+ + * | rt_virtqueue | + * +-----------------+ + * | virtqueue_split | + * +-----------------+ + * | buffer_shadow | + * +-----------------+ + */ + vq = rt_malloc(sizeof(*vq) + + sizeof(struct virtqueue_split) + + sizeof(struct virtq_buffer_shadow) * num); + + if (!vq) + { + return RT_NULL; + } + + vq_split = (void *)vq + sizeof(*vq); + buffer_shadow = (void *)vq_split + sizeof(*vq_split); + rt_memset(buffer_shadow, 0, sizeof(*buffer_shadow) * num); + + /* + * +------------------+-----------+----------------------+ + * | Virtqueue Part | Alignment | Size | + * +------------------+-----------+----------------------+ + * | Descriptor Table | 16 | 16 * (Queue Size) | + * +------------------+-----------+----------------------+ + * | Available Ring | 2 | 6 + 2 * (Queue Size) | + * +------------------+-----------+----------------------+ + * | Used Ring | 4 | 6 + 8 * (Queue Size) | + * +------------------+-----------+----------------------+ + */ + size = virtq_size(RT_NULL, num, align); + + if (formula->page) + { + pages = formula->page; + } + else + { + pages = rt_dma_alloc(&vdev->parent, size, &vq_split->virtq_dma, RT_DMA_F_LINEAR); + + if (!pages) + { + rt_free(vq); + + return RT_NULL; + } + } + + rt_memset(pages, 0, size); + virtq_init(&vq_split->virtq, num, pages, align); + + vq_split->avail_idx_shadow = 0; + vq_split->avail_flags_shadow = 0; + + vq->name = name; + vq->index = index; + vq->num_free = num; + + vq->callback = callback; + + vq->vq_split = vq_split; + vq->packed_ring = RT_FALSE; + + vq->notify = notify; + vq->event = rt_virtio_has_feature(vdev, VIRTIO_F_RING_EVENT_IDX); + vq->event_triggered = RT_FALSE; + vq->free_head = 0; + vq->num_added = 0; + vq->last_used_idx = 0; + + vq->vdev = vdev; + + if (!vq->callback) + { + vq_split->avail_flags_shadow |= VIRTQ_AVAIL_F_NO_INTERRUPT; + + if (!vq->event) + { + vq_split->virtq.avail->flags = cpu_to_virtio16(vdev, + vq_split->avail_flags_shadow); + } + } + + rt_memcpy(&vq->formula, formula, sizeof(*formula)); + + return vq; +} + +static rt_err_t vq_split_delete(struct rt_virtqueue *vq) +{ + rt_err_t err = RT_EOK; + struct virtq *virtq; + struct virtqueue_split *vq_split = vq->vq_split; + struct rt_virtqueue_formula *formula = &vq->formula; + + if (!formula->page) + { + virtq = &vq_split->virtq; + rt_dma_free(&vq->vdev->parent, virtq_size(virtq, 0, 0), + (void *)virtq->desc, vq_split->virtq_dma, RT_DMA_F_LINEAR); + } + + rt_free(vq); + + return err; +} + +static rt_err_t vq_split_add_buf(struct rt_virtqueue *vq, + void *dma_buf, rt_size_t size, rt_bool_t is_out) +{ + rt_err_t err; + rt_uint16_t buf_id; + rt_ubase_t dma_addr; + rt_size_t virtq_num; + int head, next, flags; + struct virtq *virtq; + struct virtq_desc *desc; + struct virtqueue_split *vq_split; + struct virtq_buffer_shadow *buffer_shadow; + struct rt_virtio_device *vdev = vq->vdev; + + if (!vq->num_free) + { + LOG_D("%s.virtqueue[%s(%d)] add buffer.len = %d fail", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, size); + + /* Recvice buffer NOW! */ + if (is_out) + { + vq->notify(vq); + } + + return -RT_EFULL; + } + + err = rt_dma_sync_out_data(&vdev->parent, dma_buf, size, &dma_addr, RT_DMA_F_LINEAR); + + if (err) + { + return err; + } + + vq_split = vq->vq_split; + virtq = &vq_split->virtq; + virtq_num = virtq->num; + + head = vq->free_head++ & (virtq_num - 1); + rt_hw_dsb(); + + next = vq->free_head & (virtq_num - 1); + desc = &virtq->desc[head]; + buffer_shadow = &vq_split->buffer_shadow[head]; + + buffer_shadow->dma_handle = dma_addr; + buffer_shadow->dma_buf = dma_buf; + buffer_shadow->size = size; + + buf_id = (head - vq->num_added) & (virtq_num - 1); + vq_split->buffer_shadow[buf_id].chain_num = vq->num_added + 1; + + flags = VIRTQ_DESC_F_NEXT; + if (!is_out) + { + flags |= VIRTQ_DESC_F_WRITE; + } + + desc->addr = cpu_to_virtio64(vdev, dma_addr); + desc->len = cpu_to_virtio32(vdev, size); + desc->flags = cpu_to_virtio16(vdev, flags); + desc->next = cpu_to_virtio16(vdev, next); + + /* Update index */ + ++vq->num_added; + --vq->num_free; + + LOG_D("%s.virtqueue[%s(%d)] add buffer(%p, size = %d) head = %d", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, dma_buf, size, head); + + return RT_EOK; +} + +static rt_bool_t vq_split_submit(struct rt_virtqueue *vq) +{ + int head, prev; + rt_uint16_t avail, num_added; + struct rt_virtio_device *vdev = vq->vdev; + struct virtqueue_split *vq_split = vq->vq_split; + struct virtq *virtq = &vq_split->virtq; + rt_size_t virtq_num = virtq->num; + + num_added = vq->num_added; + head = vq->free_head; + prev = (head - 1) & (virtq_num - 1); + head = head - num_added; + + LOG_D("%s.virtqueue[%s(%d)] submit head = %d, num_added = %d, idx = %d", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, + head & (virtq_num - 1), num_added, virtq->avail->idx); + + /* Reset list info */ + vq->num_added = 0; + + /* Clear last "next" flags */ + virtq->desc[prev].flags &= ~cpu_to_virtio16(vdev, VIRTQ_DESC_F_NEXT); + virtq->desc[prev].next = 0; + + /* Tell the device the first index in our chain of descriptors */ + avail = vq_split->avail_idx_shadow & (virtq_num - 1); + virtq->avail->ring[avail] = cpu_to_virtio32(vdev, head & (virtq_num - 1)); + + /* Tell the device another avail ring entry is available */ + rt_hw_wmb(); + ++vq_split->avail_idx_shadow; + virtq->avail->idx = cpu_to_virtio16(vdev, vq_split->avail_idx_shadow); + + if (vq->event) + { + rt_uint16_t old = vq_split->avail_idx_shadow - num_added; + rt_uint16_t new = vq_split->avail_idx_shadow; + + return virtq_need_event(virtio16_to_cpu(vdev, + *virtq_avail_event(virtq)), new, old); + } + + return !(virtq->used->flags & cpu_to_virtio16(vdev, VIRTQ_USED_F_NO_NOTIFY)); +} + +static rt_bool_t vq_split_poll(struct rt_virtqueue *vq, rt_uint32_t last_used_idx) +{ + return (rt_uint16_t)last_used_idx != virtio16_to_cpu(vq->vdev, vq->vq_split->virtq.used->idx); +} + +rt_inline rt_bool_t vq_split_pending(struct rt_virtqueue *vq) +{ + return vq->last_used_idx != virtio16_to_cpu(vq->vdev, vq->vq_split->virtq.used->idx); +} + +static void vq_split_disable_callback(struct rt_virtqueue *vq) +{ + struct virtqueue_split *vq_split = vq->vq_split; + struct virtq *virtq = &vq_split->virtq; + + if (!(vq_split->avail_flags_shadow & VIRTQ_AVAIL_F_NO_INTERRUPT)) + { + vq_split->avail_flags_shadow |= VIRTQ_AVAIL_F_NO_INTERRUPT; + + if (vq->event) + { + *virtq_used_event(virtq) = 0; + } + else + { + virtq->avail->flags = cpu_to_virtio16(vq->vdev, vq_split->avail_flags_shadow); + } + } +} + +static rt_uint32_t vq_split_enable_callback(struct rt_virtqueue *vq) +{ + struct virtqueue_split *vq_split = vq->vq_split; + struct virtq *virtq = &vq_split->virtq; + rt_uint16_t last_used_idx = vq->last_used_idx; + + if (vq_split->avail_flags_shadow & VIRTQ_AVAIL_F_NO_INTERRUPT) + { + vq_split->avail_flags_shadow &= ~VIRTQ_AVAIL_F_NO_INTERRUPT; + + if (!vq->event) + { + virtq->avail->flags = cpu_to_virtio16(vq->vdev, vq_split->avail_flags_shadow); + } + } + + *virtq_used_event(virtq) = cpu_to_virtio16(vq->vdev, last_used_idx); + + return last_used_idx; +} + +static void *vq_split_read_buf(struct rt_virtqueue *vq, rt_size_t *out_len) +{ + void *buf; + rt_err_t err; + rt_uint32_t idx; + rt_uint16_t last_used, next; + rt_size_t chain_num; + struct virtq *virtq; + struct virtqueue_split *vq_split; + struct virtq_buffer_shadow *buffer_shadow; + struct rt_virtio_device *vdev = vq->vdev; + + if (!vq_split_pending(vq)) + { + LOG_D("%s.virtqueue[%s(%d)] read buffer empty", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index); + + return RT_NULL; + } + + vq_split = vq->vq_split; + virtq = &vq_split->virtq; + + last_used = vq->last_used_idx & (virtq->num - 1); + next = idx = virtio32_to_cpu(vdev, virtq->used->ring[last_used].id); + *out_len = virtio32_to_cpu(vdev, virtq->used->ring[last_used].len); + rt_hw_dsb(); + + buffer_shadow = &vq_split->buffer_shadow[idx]; + buf = buffer_shadow->dma_buf; + chain_num = buffer_shadow->chain_num; + + LOG_D("%s.virtqueue[%s(%d)] read head = %d, buffer(%p, size = %d)", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, idx, buf, *out_len); + + for (int i = 0; i < chain_num; ++i) + { + idx = next; + next = virtq->desc[idx].next; + + if (virtq->desc[idx].flags & VIRTQ_DESC_F_WRITE || + vdev->dma_dispatch) + { + buffer_shadow = &vq_split->buffer_shadow[idx]; + err = rt_dma_sync_in_data(&vdev->parent, + buffer_shadow->dma_buf, + buffer_shadow->size, + buffer_shadow->dma_handle, RT_DMA_F_LINEAR); + + if (err) + { + LOG_E("%s.virtqueue[%s(%d)] read head = %d sync error = %s", + rt_dm_dev_get_name(&vdev->parent), + vq->name, vq->index, idx, rt_strerror(err)); + } + } + } + + vq->num_free += chain_num; + ++vq->last_used_idx; + + if (!(vq_split->avail_flags_shadow & VIRTQ_AVAIL_F_NO_INTERRUPT)) + { + HWREG16(virtq_used_event(virtq)) = cpu_to_virtio16(vq->vdev, vq->last_used_idx); + rt_hw_dmb(); + } + + rt_hw_dsb(); + + return buf; +} + +static struct rt_virtqueue *vq_packed_create(struct rt_virtio_device *vdev, + const char *name, int index, int num, int align, + rt_virtqueue_notifier notify, rt_virtqueue_callback callback, + struct rt_virtqueue_formula *formula) +{ + rt_size_t size, event_size; + struct virtq *virtq; + struct virtqueue_packed *vq_packed; + struct rt_virtqueue *vq = RT_NULL; + struct virtq_buffer_shadow *buffer_shadow; + + /* + * Layout: + * +------------------------+ + * | rt_virtqueue | + * +------------------------+ + * | virtqueue_packed | + * +------------------------+ + * | buffer_shadow | + * +------------------------+ + */ + vq = rt_malloc(sizeof(*vq) + + sizeof(struct virtqueue_packed) + + sizeof(struct virtq_buffer_shadow) * num); + + if (!vq) + { + return RT_NULL; + } + + vq_packed = (void *)vq + sizeof(*vq); + buffer_shadow = (void *)vq_packed + sizeof(*vq_packed); + rt_memset(buffer_shadow, 0, sizeof(*buffer_shadow) * num); + + /* + * +--------------------------+-----------+-------------------+ + * | Virtqueue Part | Alignment | Size | + * +--------------------------+-----------+-------------------+ + * | Descriptor Ring | 16 | 16 * (Queue Size) | + * +--------------------------+-----------+-------------------+ + * | Device Event Suppression | 4 | 4 | + * +--------------------------+-----------+-------------------+ + * | Driver Event Suppression | 4 | 4 | + * +--------------------------+-----------+-------------------+ + */ + virtq = &vq_packed->virtq; + + size = num * sizeof(struct virtq_packed_desc); + size = RT_ALIGN(size, 4); + event_size = sizeof(struct virtq_packed_desc_event); + vq_packed->virtq_size = size + event_size * 2; + + if (formula->page) + { + virtq->desc_packed = formula->page; + } + else + { + virtq->desc_packed = rt_dma_alloc(&vdev->parent, + vq_packed->virtq_size, &vq_packed->virtq_dma, RT_DMA_F_LINEAR); + + if (!virtq->desc_packed) + { + rt_free(vq); + + return RT_NULL; + } + } + + virtq->driver_event = (void *)virtq->desc_packed + size; + vq_packed->driver_event_dma = vq_packed->virtq_dma + size; + + virtq->device_event = (void *)virtq->driver_event + event_size; + vq_packed->device_event_dma = vq_packed->driver_event_dma + event_size; + + rt_memset(virtq->desc_packed, 0, size); + rt_memset(virtq->driver_event, 0, event_size); + rt_memset(virtq->device_event, 0, event_size); + virtq->num = num; + virtq->align = align; + + vq_packed->avail_wrap_counter = 1; + vq_packed->event_flags_shadow = 0; + vq_packed->avail_used_flags = VIRTQ_DESC_F_AVAIL; + + vq->name = name; + vq->index = index; + vq->num_free = num; + + vq->callback = callback; + + vq->vq_packed = vq_packed; + vq->packed_ring = RT_TRUE; + + vq->notify = notify; + vq->event = rt_virtio_has_feature(vdev, VIRTIO_F_RING_EVENT_IDX); + vq->event_triggered = RT_FALSE; + vq->free_head = 0; + vq->num_added = 0; + vq->last_used_idx = 0 | RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR); + + vq->vdev = vdev; + + if (!vq->callback) + { + vq_packed->avail_flags_shadow = VIRTQ_PACKED_EVENT_FLAG_DISABLE; + vq_packed->virtq.avail->flags = rt_cpu_to_le16(vq_packed->avail_flags_shadow); + } + + rt_memcpy(&vq->formula, formula, sizeof(*formula)); + + return vq; +} + +static rt_err_t vq_packed_delete(struct rt_virtqueue *vq) +{ + rt_err_t err = RT_EOK; + struct virtq *virtq; + struct virtqueue_packed *vq_packed = vq->vq_packed; + struct rt_virtqueue_formula *formula = &vq->formula; + + virtq = &vq_packed->virtq; + + if (virtq->desc_packed && !formula->page) + { + rt_dma_free(&vq->vdev->parent, vq_packed->virtq_size, + (void *)virtq->desc_packed, vq_packed->virtq_dma, RT_DMA_F_LINEAR); + } + + rt_free(vq); + + return err; +} + +static rt_err_t vq_packed_add_buf(struct rt_virtqueue *vq, + void *dma_buf, rt_size_t size, rt_bool_t is_out) +{ + rt_err_t err; + rt_uint16_t buf_id; + rt_ubase_t dma_addr; + rt_size_t virtq_num; + int head, flags; + struct virtq *virtq; + struct virtqueue_packed *vq_packed; + struct virtq_packed_desc *desc_packed; + struct virtq_buffer_shadow *buffer_shadow; + struct rt_virtio_device *vdev = vq->vdev; + + if (!vq->num_free) + { + LOG_D("%s.virtqueue[%s(%d)] add buffer.len = %d fail", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, size); + + /* Recvice buffer NOW! */ + if (is_out) + { + vq->notify(vq); + } + + return -RT_EFULL; + } + + err = rt_dma_sync_out_data(&vdev->parent, dma_buf, size, &dma_addr, RT_DMA_F_LINEAR); + + if (err) + { + return err; + } + + vq_packed = vq->vq_packed; + virtq = &vq_packed->virtq; + virtq_num = virtq->num; + + head = vq->free_head++ & (virtq_num - 1); + rt_hw_dsb(); + + desc_packed = &virtq->desc_packed[head]; + buffer_shadow = &vq_packed->buffer_shadow[head]; + + buffer_shadow->dma_handle = dma_addr; + buffer_shadow->dma_buf = dma_buf; + buffer_shadow->size = size; + + buf_id = (head - vq->num_added) & (virtq_num - 1); + vq_packed->buffer_shadow[buf_id].chain_num = vq->num_added + 1; + + flags = VIRTQ_DESC_F_NEXT; + if (!is_out) + { + flags |= VIRTQ_DESC_F_WRITE; + } + + desc_packed->addr = rt_cpu_to_le64(dma_addr); + desc_packed->len = rt_cpu_to_le32(size); + desc_packed->id = rt_cpu_to_le16(buf_id); + desc_packed->flags = rt_cpu_to_le16(vq_packed->avail_used_flags | flags); + + /* Ready fot next */ + if (head + 1 >= virtq->num) + { + vq_packed->avail_used_flags ^= VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED; + /* Toggle the wrap counter */ + vq_packed->avail_wrap_counter ^= 1; + } + + rt_hw_wmb(); + + /* Update index */ + ++vq->num_added; + --vq->num_free; + + LOG_D("%s.virtqueue[%s(%d)] add buffer(%p, size = %d) head = %d", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, dma_buf, size, head); + + return RT_EOK; +} + +static rt_bool_t vq_packed_submit(struct rt_virtqueue *vq) +{ + int head, prev; + rt_uint16_t off_wrap, flags, wrap_counter, event_idx; + struct virtq_packed_desc_event device_event; + struct virtqueue_packed *vq_packed = vq->vq_packed; + struct virtq *virtq = &vq_packed->virtq; + rt_size_t virtq_num = virtq->num; + + rt_hw_dmb(); + rt_memcpy(&device_event, virtq->device_event, sizeof(device_event)); + + head = vq->free_head; + prev = (head - 1) & (virtq_num - 1); + head = head - vq->num_added; + + LOG_D("%s.virtqueue[%s(%d)] submit head = %d, num_added = %d, idx = %d", + rt_dm_dev_get_name(&vq->vdev->parent), vq->name, vq->index, + head & (virtq_num - 1), vq->num_added, rt_le16_to_cpu(device_event.off_wrap) & + ~RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR)); + + /* Reset list info */ + vq->num_added = 0; + + /* Clear last "next" flags */ + virtq->desc_packed[prev].flags &= ~rt_cpu_to_le16(VIRTQ_DESC_F_NEXT); + + flags = rt_le16_to_cpu(device_event.flags); + + if (flags != VIRTQ_PACKED_EVENT_FLAG_DESC) + { + return (flags != VIRTQ_PACKED_EVENT_FLAG_DISABLE); + } + + off_wrap = rt_le16_to_cpu(device_event.off_wrap); + + wrap_counter = off_wrap >> VIRTQ_PACKED_EVENT_WRAP_CTR; + event_idx = off_wrap & ~RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR); + if (wrap_counter != vq_packed->avail_wrap_counter) + { + event_idx -= virtq->num; + } + + return virtq_need_event(event_idx, prev, head); +} + +rt_inline rt_bool_t is_used_desc_packed(const struct virtq *virtq, + rt_uint16_t idx, rt_bool_t used_wrap_counter) +{ + rt_uint16_t flags; + rt_bool_t avail, used; + + flags = rt_le16_to_cpu(virtq->desc_packed[idx].flags); + avail = !!(flags & VIRTQ_DESC_F_AVAIL); + used = !!(flags & VIRTQ_DESC_F_USED); + + return avail == used && used == used_wrap_counter; +} + +static rt_bool_t packed_used_wrap_counter(rt_uint16_t last_used_idx) +{ + return !!(last_used_idx & RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR)); +} + +static rt_uint16_t packed_last_used(rt_uint16_t last_used_idx) +{ + return last_used_idx & ~(-RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR)); +} + +static rt_bool_t vq_packed_poll(struct rt_virtqueue *vq, rt_uint32_t last_used_idx) +{ + rt_bool_t wrap_counter; + rt_uint16_t off_wrap = last_used_idx, used_idx; + + wrap_counter = off_wrap >> VIRTQ_PACKED_EVENT_WRAP_CTR; + used_idx = off_wrap & ~RT_BIT(VIRTQ_PACKED_EVENT_WRAP_CTR); + + return is_used_desc_packed(&vq->vq_packed->virtq, used_idx, wrap_counter); +} + +rt_inline rt_bool_t vq_packed_pending(struct rt_virtqueue *vq) +{ + rt_bool_t wrap_counter; + rt_uint16_t last_used, last_used_idx; + + last_used_idx = HWREG16(&vq->last_used_idx); + last_used = packed_last_used(last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); + return is_used_desc_packed(&vq->vq_packed->virtq, last_used, wrap_counter); +} + +static void vq_packed_disable_callback(struct rt_virtqueue *vq) +{ + struct virtqueue_packed *vq_packed = vq->vq_packed; + + if (vq_packed->event_flags_shadow != VIRTQ_PACKED_EVENT_FLAG_DISABLE) + { + vq_packed->event_flags_shadow = VIRTQ_PACKED_EVENT_FLAG_DISABLE; + + /* + * If device triggered an event already it won't trigger one again: + * no need to disable. + */ + if (vq->event_triggered) + { + return; + } + + vq_packed->virtq.driver_event->flags = rt_cpu_to_le16(vq_packed->event_flags_shadow); + } +} + +static rt_uint32_t vq_packed_enable_callback(struct rt_virtqueue *vq) +{ + struct virtqueue_packed *vq_packed = vq->vq_packed; + + if (vq->event) + { + vq_packed->virtq.driver_event->off_wrap = rt_cpu_to_le16(vq->last_used_idx); + + /* Update event offset and event wrap counter first before updating event flags. */ + rt_hw_wmb(); + } + + if (vq_packed->event_flags_shadow == VIRTQ_PACKED_EVENT_FLAG_DISABLE) + { + vq_packed->event_flags_shadow = vq->event ? + VIRTQ_PACKED_EVENT_FLAG_DESC : VIRTQ_PACKED_EVENT_FLAG_ENABLE; + vq_packed->virtq.driver_event->flags = + rt_cpu_to_le16(vq_packed->event_flags_shadow); + } + + return vq->last_used_idx; +} + +static void *vq_packed_read_buf(struct rt_virtqueue *vq, rt_size_t *out_len) +{ + void *buf; + rt_err_t err; + rt_bool_t wrap_counter; + rt_uint16_t last_used, last_used_idx, id; + rt_size_t chain_num; + struct virtq *virtq; + struct virtqueue_packed *vq_packed; + struct virtq_buffer_shadow *buffer_shadow; + struct rt_virtio_device *vdev = vq->vdev; + + if (!vq_packed_pending(vq)) + { + LOG_D("%s.virtqueue[%s(%d)] read buffer empty", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index); + + return RT_NULL; + } + + vq_packed = vq->vq_packed; + virtq = &vq_packed->virtq; + + rt_hw_rmb(); + + last_used_idx = HWREG16(&vq->last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); + last_used = packed_last_used(last_used_idx); + id = rt_le16_to_cpu(virtq->desc_packed[last_used].id); + *out_len = rt_le32_to_cpu(virtq->desc_packed[last_used].len); + + buffer_shadow = &vq->vq_packed->buffer_shadow[id]; + buf = buffer_shadow->dma_buf; + chain_num = buffer_shadow->chain_num; + + LOG_D("%s.virtqueue[%s(%d)] read head = %d, buffer(%p, size = %d)", + rt_dm_dev_get_name(&vdev->parent), vq->name, vq->index, id, buf, *out_len); + + for (int i = 0; i < chain_num; ++i) + { + if (virtq->desc_packed[id].flags & VIRTQ_DESC_F_WRITE || + vdev->dma_dispatch) + { + buffer_shadow = &vq->vq_packed->buffer_shadow[id]; + err = rt_dma_sync_in_data(&vdev->parent, + buffer_shadow->dma_buf, + buffer_shadow->size, + buffer_shadow->dma_handle, RT_DMA_F_LINEAR); + + if (err) + { + LOG_E("%s.virtqueue[%s(%d)] read head = %d sync error = %s", + rt_dm_dev_get_name(&vdev->parent), + vq->name, vq->index, id, rt_strerror(err)); + } + } + + ++id; + } + + vq->num_free += chain_num; + last_used += chain_num; + + if (last_used >= virtq->num) + { + last_used -= virtq->num; + wrap_counter ^= 1; + } + + last_used |= (wrap_counter << VIRTQ_PACKED_EVENT_WRAP_CTR); + HWREG16(&vq->last_used_idx) = last_used; + + if (vq_packed->event_flags_shadow == VIRTQ_PACKED_EVENT_FLAG_DESC) + { + HWREG16(&virtq->driver_event->off_wrap) = rt_cpu_to_le16(vq->last_used_idx); + rt_hw_dmb(); + } + + return buf; +} + +rt_inline rt_bool_t virtqueue_pending(struct rt_virtqueue *vq) +{ + rt_bool_t res = RT_FALSE; + + if (vq->packed_ring) + { + res = vq_packed_pending(vq); + } + else + { + res = vq_split_pending(vq); + } + + return res; +} + +struct rt_virtqueue *rt_virtqueue_create(struct rt_virtio_device *vdev, + const char *name, int index, int num, rt_uint32_t align, + rt_virtqueue_notifier notify, rt_virtqueue_callback callback, + struct rt_virtqueue_formula *formula) +{ + struct rt_virtqueue *vq = RT_NULL; + struct rt_virtqueue_formula default_formula = {}; + + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(num != 0); + RT_ASSERT((num & (num - 1)) == 0); + RT_ASSERT(notify != RT_NULL); + RT_ASSERT(name != RT_NULL); + + formula = formula ? : &default_formula; + + if (rt_virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) + { + vq = vq_packed_create(vdev, name, index, num, align, notify, callback, formula); + } + else + { + vq = vq_split_create(vdev, name, index, num, align, notify, callback, formula); + } + + if (vq) + { + if (!callback) + { + rt_virtqueue_disable_callback(vq); + } + + rt_base_t level = rt_spin_lock_irqsave(&vdev->vq_lock); + + rt_list_init(&vq->list); + rt_list_init(&vq->user_list); + rt_list_insert_before(&vdev->vq_node, &vq->list); + + rt_spin_unlock_irqrestore(&vdev->vq_lock, level); + } + + return vq; +} + +rt_err_t rt_virtqueue_delete(struct rt_virtio_device *vdev, struct rt_virtqueue *vq) +{ + rt_ubase_t level; + rt_err_t err = RT_EOK; + + RT_ASSERT(vdev != RT_NULL); + RT_ASSERT(vq != RT_NULL); + + level = rt_spin_lock_irqsave(&vdev->vq_lock); + + while (virtqueue_pending(vq)) + { + rt_spin_unlock_irqrestore(&vdev->vq_lock, level); + + rt_thread_yield(); + + level = rt_spin_lock_irqsave(&vdev->vq_lock); + } + + rt_list_remove(&vq->list); + + rt_spin_unlock_irqrestore(&vdev->vq_lock, level); + + if (vq->packed_ring) + { + vq_packed_delete(vq); + } + else + { + vq_split_delete(vq); + } + + return err; +} + +rt_err_t rt_virtqueue_add_outbuf(struct rt_virtqueue *vq, void *dma_buf, rt_size_t size) +{ + rt_err_t err = -RT_ENOSYS; + + RT_ASSERT(vq != RT_NULL); + RT_ASSERT(dma_buf != RT_NULL); + RT_ASSERT(size != 0); + + if (vq->packed_ring) + { + err = vq_packed_add_buf(vq, dma_buf, size, RT_TRUE); + } + else + { + err = vq_split_add_buf(vq, dma_buf, size, RT_TRUE); + } + + return err; +} + +rt_err_t rt_virtqueue_add_inbuf(struct rt_virtqueue *vq, void *dma_buf, rt_size_t size) +{ + rt_err_t err = -RT_ENOSYS; + + RT_ASSERT(vq != RT_NULL); + RT_ASSERT(dma_buf != RT_NULL); + RT_ASSERT(size != 0); + + if (vq->packed_ring) + { + err = vq_packed_add_buf(vq, dma_buf, size, RT_FALSE); + } + else + { + err = vq_split_add_buf(vq, dma_buf, size, RT_FALSE); + } + + return err; +} + +rt_bool_t rt_virtqueue_prepare(struct rt_virtqueue *vq, rt_uint32_t nr) +{ + return vq->num_free >= nr; +} + +void rt_virtqueue_wait_prepare(struct rt_virtqueue *vq, rt_uint32_t nr) +{ + while (!rt_virtqueue_prepare(vq, nr)) + { + rt_thread_yield(); + rt_hw_cpu_relax(); + } +} + +rt_uint32_t rt_virtqueue_next_buf_index(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + return vq->free_head % (vq->packed_ring ? + vq->vq_packed->virtq.num : vq->vq_split->virtq.num); +} + +rt_bool_t rt_virtqueue_submit(struct rt_virtqueue *vq) +{ + rt_bool_t res = RT_FALSE; + + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + res = vq_packed_submit(vq); + } + else + { + res = vq_split_submit(vq); + } + + return res; +} + +rt_bool_t rt_virtqueue_notify(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (!vq->notify(vq)) + { + return RT_FALSE; + } + + return RT_TRUE; +} + +rt_bool_t rt_virtqueue_kick(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (rt_virtqueue_submit(vq)) + { + return rt_virtqueue_notify(vq); + } + + return RT_TRUE; +} + +void rt_virtqueue_isr(int irq, struct rt_virtqueue *vq) +{ + if (!virtqueue_pending(vq)) + { + LOG_D("%s.virtqueue[%s(%d)] no buffer pending in %s", + rt_dm_dev_get_name(&vq->vdev->parent), vq->name, vq->index, "isr"); + + return; + } + + if (vq->event) + { + vq->event_triggered = RT_TRUE; + } + + if (vq->callback) + { + vq->callback(vq); + } +} + +rt_bool_t rt_virtqueue_poll(struct rt_virtqueue *vq, rt_uint32_t last_used_idx) +{ + rt_bool_t res = RT_FALSE; + + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + res = vq_packed_poll(vq, last_used_idx); + } + else + { + res = vq_split_poll(vq, last_used_idx); + } + + return res; +} + +void rt_virtqueue_disable_callback(struct rt_virtqueue *vq) +{ + if (vq->event_triggered) + { + return; + } + + if (vq->packed_ring) + { + vq_packed_disable_callback(vq); + } + else + { + vq_split_disable_callback(vq); + } +} + +rt_bool_t rt_virtqueue_enable_callback(struct rt_virtqueue *vq, + rt_uint32_t *out_last_used_idx) +{ + rt_uint32_t last_used_idx; + + if (vq->event_triggered) + { + vq->event_triggered = RT_FALSE; + } + + if (vq->packed_ring) + { + last_used_idx = vq_packed_enable_callback(vq); + } + else + { + last_used_idx = vq_split_enable_callback(vq); + } + + if (out_last_used_idx) + { + *out_last_used_idx = last_used_idx; + } + + return !rt_virtqueue_poll(vq, last_used_idx); +} + +void *rt_virtqueue_read_buf(struct rt_virtqueue *vq, rt_size_t *out_len) +{ + void *buf = RT_NULL; + rt_size_t len = 0; + + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + buf = vq_packed_read_buf(vq, &len); + } + else + { + buf = vq_split_read_buf(vq, &len); + } + + if (len && out_len) + { + *out_len = len; + } + + return buf; +} + +rt_size_t rt_virtqueue_get_virtq_size(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + return (rt_ubase_t)vq->vq_packed->virtq.num; + } + else + { + return (rt_ubase_t)vq->vq_split->virtq.num; + } +} + +rt_ubase_t rt_virtqueue_get_desc_addr(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + return (rt_ubase_t)vq->vq_packed->virtq.desc; + } + else + { + return (rt_ubase_t)vq->vq_split->virtq.desc; + } +} + +rt_ubase_t rt_virtqueue_get_avail_addr(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + return (rt_ubase_t)vq->vq_packed->virtq.driver_event; + } + else + { + return (rt_ubase_t)vq->vq_split->virtq.avail; + } +} + +rt_ubase_t rt_virtqueue_get_used_addr(struct rt_virtqueue *vq) +{ + RT_ASSERT(vq != RT_NULL); + + if (vq->packed_ring) + { + return (rt_ubase_t)vq->vq_packed->virtq.device_event; + } + else + { + return (rt_ubase_t)vq->vq_split->virtq.used; + } +} From 088bb877b4e57ab811848ab179c869861cbe94b7 Mon Sep 17 00:00:00 2001 From: GuEe-GUI <2991707448@qq.com> Date: Sun, 1 Mar 2026 13:10:54 +0800 Subject: [PATCH 2/3] [dm][firmware] support QEMU fw config Signed-off-by: GuEe-GUI <2991707448@qq.com> --- components/drivers/firmware/Kconfig | 5 + components/drivers/firmware/qemu/SConscript | 15 + components/drivers/firmware/qemu/fw_cfg.c | 518 ++++++++++++++++++++ components/drivers/firmware/qemu/fw_cfg.h | 109 ++++ 4 files changed, 647 insertions(+) create mode 100644 components/drivers/firmware/qemu/SConscript create mode 100644 components/drivers/firmware/qemu/fw_cfg.c create mode 100644 components/drivers/firmware/qemu/fw_cfg.h diff --git a/components/drivers/firmware/Kconfig b/components/drivers/firmware/Kconfig index 4261e6157ec..6aa179dde56 100755 --- a/components/drivers/firmware/Kconfig +++ b/components/drivers/firmware/Kconfig @@ -3,6 +3,11 @@ menuconfig RT_USING_FIRMWARE depends on RT_USING_DM default n +config RT_FIRMWARE_QEMU_FW_CFG + bool "QEMU Firmware Configuration" + depends on RT_USING_FIRMWARE + default n + if RT_USING_FIRMWARE rsource "arm_scmi/Kconfig" osource "$(SOC_DM_FIRMWARE_DIR)/Kconfig" diff --git a/components/drivers/firmware/qemu/SConscript b/components/drivers/firmware/qemu/SConscript new file mode 100644 index 00000000000..1f5108aa31e --- /dev/null +++ b/components/drivers/firmware/qemu/SConscript @@ -0,0 +1,15 @@ +from building import * + +group = [] + +if not GetDepend(['RT_FIRMWARE_QEMU_FW_CFG']): + Return('group') + +cwd = GetCurrentDir() +CPPPATH = [cwd + '/../../include'] + +src = Glob('*.c') + +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) + +Return('group') diff --git a/components/drivers/firmware/qemu/fw_cfg.c b/components/drivers/firmware/qemu/fw_cfg.c new file mode 100644 index 00000000000..20a535866ae --- /dev/null +++ b/components/drivers/firmware/qemu/fw_cfg.c @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include +#include +#include + +#define DBG_TAG "fw.qemu" +#define DBG_LVL DBG_INFO +#include + +#include +#include + +#include "fw_cfg.h" + +/* arch-specific ctrl & data register offsets are not available in ACPI, DT */ +#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) +# if (defined(ARCH_ARM) || defined(ARCH_ARMV8) || defined(ARCH_LOONGARCH) || defined(ARCH_RISCV)) +# define FW_CFG_CTRL_OFF 0x08 +# define FW_CFG_DATA_OFF 0x00 +# define FW_CFG_DMA_OFF 0x10 +# elif defined(ARCH_PARISC) /* parisc */ +# define FW_CFG_CTRL_OFF 0x00 +# define FW_CFG_DATA_OFF 0x04 +# elif (defined(ARCH_PPC) || defined(ARCH_SPARC32)) /* ppc/mac,sun4m */ +# define FW_CFG_CTRL_OFF 0x00 +# define FW_CFG_DATA_OFF 0x02 +# elif (defined(ARCH_IA32) || defined(ARCH_SPARC64)) /* x86, sun4u */ +# define FW_CFG_CTRL_OFF 0x00 +# define FW_CFG_DATA_OFF 0x01 +# define FW_CFG_DMA_OFF 0x04 +# else +# error "QEMU FW_CFG not available on this architecture!" +# endif +#endif + +struct fw_cfg_device +{ + struct rt_device parent; + + rt_uint32_t size; + rt_uint16_t select; + char name[FW_CFG_MAX_FILE_PATH]; +}; +#define raw_to_fw_cfg_device(raw) rt_container_of(raw, struct fw_cfg_device, parent) + +static void *_fw_cfg_dev_base; +static void *_fw_cfg_reg_ctrl; +static void *_fw_cfg_reg_data; +static void *_fw_cfg_reg_dma; +static rt_bool_t _fw_cfg_is_mmio; +static rt_uint32_t _fw_cfg_rev = 0; + +static RT_DEFINE_SPINLOCK(_fw_cfg_dev_lock); + +static void fw_cfg_sel_endianness(rt_uint16_t key) +{ + if (_fw_cfg_is_mmio) + { + HWREG16(_fw_cfg_reg_ctrl) = rt_cpu_to_be16(key); + } + else + { + HWREG16(_fw_cfg_reg_ctrl) = key; + } +} + +static rt_base_t fw_cfg_read_blob(rt_uint16_t key, void *buf, rt_off_t pos, rt_size_t count) +{ + rt_uint8_t tmp; + + rt_spin_lock(&_fw_cfg_dev_lock); + fw_cfg_sel_endianness(key); + + while (pos-- > 0) + { + tmp = HWREG8(_fw_cfg_reg_data); + } + + if (count) + { + int loop = count; + rt_uint8_t *buffer = buf; + + do { + tmp = HWREG8(_fw_cfg_reg_data); + *buffer++ = tmp; + } while (--loop); + } + + rt_spin_unlock(&_fw_cfg_dev_lock); + + return count; +} + +rt_inline rt_bool_t fw_cfg_dma_enabled(void) +{ + return (_fw_cfg_rev & FW_CFG_VERSION_DMA) && _fw_cfg_reg_dma; +} + +/* qemu fw_cfg device is sync today, but spec says it may become async */ +rt_inline void fw_cfg_wait_for_control(struct fw_cfg_dma_access *dma) +{ + for (;;) + { + rt_uint32_t ctrl = rt_be32_to_cpu(HWREG32(&dma->control)); + + /* Do not reorder the read to dma->control */ + rt_hw_rmb(); + + if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0) + { + break; + } + + rt_hw_cpu_relax(); + } +} + +rt_inline rt_base_t fw_cfg_dma_transfer(void *address, rt_uint32_t length, rt_uint32_t control) +{ + rt_ubase_t dma_pa; + rt_base_t res = length; + struct fw_cfg_dma_access dma = + { + .address = rt_cpu_to_be64((rt_uint64_t)(address ? rt_kmem_v2p(address) : 0)), + .length = rt_cpu_to_be32(length), + .control = rt_cpu_to_be32(control), + }; + + dma_pa = (rt_ubase_t)rt_kmem_v2p(&dma); + + HWREG32(_fw_cfg_reg_dma) = rt_cpu_to_be32((rt_uint64_t)dma_pa >> 32); + /* Force memory to sync before notifying device via MMIO */ + rt_hw_wmb(); + HWREG32(_fw_cfg_reg_dma + 4) = rt_cpu_to_be32(dma_pa); + + fw_cfg_wait_for_control(&dma); + + if ((rt_be32_to_cpu(HWREG32(&dma.control)) & FW_CFG_DMA_CTL_ERROR)) + { + res = -RT_EIO; + } + + return res; +} + +rt_inline rt_base_t fw_cfg_write_blob(rt_uint16_t key, void *buf, rt_off_t pos, rt_size_t count) +{ + rt_base_t res = count; + + rt_spin_lock(&_fw_cfg_dev_lock); + + if (pos == 0) + { + res = fw_cfg_dma_transfer(buf, count, key << 16 | FW_CFG_DMA_CTL_SELECT | FW_CFG_DMA_CTL_WRITE); + } + else + { + fw_cfg_sel_endianness(key); + res = fw_cfg_dma_transfer(RT_NULL, pos, FW_CFG_DMA_CTL_SKIP); + + if (res >= 0) + { + res = fw_cfg_dma_transfer(buf, count, FW_CFG_DMA_CTL_WRITE); + } + } + + rt_spin_unlock(&_fw_cfg_dev_lock); + + return res; +} + +#ifdef RT_GRAPHIC_FB +struct ramfb_device +{ + struct rt_graphic_device parent; + + const struct fw_cfg_file *file; +}; + +extern void platform_get_ramfb_params(rt_uint32_t *width, rt_uint32_t *height); + +static rt_uint32_t ramfb_formats[] = +{ +#define fourcc_code(a, b, c, d) \ + ((rt_uint32_t)(a) | \ + ((rt_uint32_t)(b) << 8) | \ + ((rt_uint32_t)(c) << 16) | \ + ((rt_uint32_t)(d) << 24)) + + fourcc_code('A', 'R', '2', '4'), + fourcc_code('A', 'B', '2', '4'), + fourcc_code('R', 'G', '2', '4'), +}; + +static rt_uint32_t ramfb_modes[] = +{ + RTGRAPHIC_PIXEL_FORMAT_ARGB888, + RTGRAPHIC_PIXEL_FORMAT_ABGR888, + RTGRAPHIC_PIXEL_FORMAT_RGB888, +}; + +static rt_err_t ramfb_plane_fb_remap(struct rt_graphic_plane *plane, + rt_uint32_t mode, struct rt_device_rect_info *rect) +{ + void *framebuffer; + rt_size_t framebuffer_size; + rt_uint32_t stride, format, bpp; + struct fw_cfg_ram_fb ram_fb; + struct ramfb_device *ramfb = rt_container_of(plane->graphic, struct ramfb_device, parent); + + bpp = rt_graphic_mode_bpp(mode); + stride = RT_ALIGN(rect->width * (bpp / 8), sizeof(rt_uint32_t)); + framebuffer_size = rect->height * stride; + framebuffer = rt_malloc_align(framebuffer_size, ARCH_PAGE_SIZE); + + if (!framebuffer) + { + return -RT_ENOMEM; + } + + for (int i = 0; i < RT_ARRAY_SIZE(ramfb_modes); ++i) + { + if (mode == ramfb_modes[i]) + { + format = ramfb_formats[i]; + break; + } + } + + ram_fb.addr = rt_cpu_to_be64((rt_ubase_t)rt_kmem_v2p(framebuffer)); + ram_fb.fourcc = rt_cpu_to_be32(format); + ram_fb.flags = rt_cpu_to_be32(0); + ram_fb.width = rt_cpu_to_be32(rect->width); + ram_fb.height = rt_cpu_to_be32(rect->height); + ram_fb.stride = rt_cpu_to_be32(stride); + + if (fw_cfg_write_blob(rt_be16_to_cpu(ramfb->file->select), + &ram_fb, 0, sizeof(struct fw_cfg_ram_fb)) < 0) + { + rt_free_align(framebuffer); + return -RT_ERROR; + } + + if (plane->framebuffer) + { + rt_free_align(plane->framebuffer); + } + + plane->bits_per_pixel = bpp; + plane->line_length = stride; + + plane->framebuffer = framebuffer; + plane->screen_len = framebuffer_size; + plane->framebuffer_len = framebuffer_size; + + return RT_EOK; +} + +const struct rt_graphic_plane_ops ramfb_plane_ops = +{ + .fb_remap = ramfb_plane_fb_remap, +}; + +static rt_err_t fw_cfg_setup_ramfb(const struct fw_cfg_file *file) +{ + rt_err_t err; + rt_uint32_t width, height; + struct ramfb_device *ramfb = rt_calloc(1, sizeof(*ramfb)); + + if (!ramfb) + { + return -RT_ENOMEM; + } + + ramfb->file = file; + platform_get_ramfb_params(&width, &height); + + if ((err = rt_graphic_device_simple_register(&ramfb->parent, + width, height, 0, &ramfb_plane_ops, + ramfb_modes, RT_ARRAY_SIZE(ramfb_modes)))) + { + rt_free(ramfb); + } + + return err; +} +#endif /* RT_GRAPHIC_FB */ + +static rt_ssize_t fw_cfg_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size) +{ + rt_ssize_t res; + struct fw_cfg_device *fw = raw_to_fw_cfg_device(dev); + + if (pos <= fw->size) + { + if (size > fw->size - pos) + { + size = fw->size - pos; + } + + res = fw_cfg_read_blob(fw->select, buffer, pos, size); + } + else + { + res = 0; + } + + return res; +} + +static struct rt_device_ops fw_cfg_ops = +{ + .read = fw_cfg_read, +}; + +static const char * const fw_export[] = +{ + "etc/acpi/rsdp", "acpi_rsdp", + "etc/acpi/tables", "acpi_tables", +}; + +static rt_err_t fw_cfg_register_file(const struct fw_cfg_file *file) +{ + rt_err_t err; + const char *name = RT_NULL; + struct fw_cfg_device *fw; + +#ifdef RT_GRAPHIC_FB + if (fw_cfg_dma_enabled() && !rt_strcmp(file->name, FW_CFG_RAMFB_FILENAME)) + { + if (fw_cfg_setup_ramfb(file) < 0) + { + LOG_W("failed to setup ramfb"); + } + + return RT_EOK; + } +#endif /* RT_GRAPHIC_FB */ + + for (int i = 0; i < RT_ARRAY_SIZE(fw_export); i += 2) + { + if (!rt_strcmp(fw_export[i], file->name)) + { + name = fw_export[i + 1]; + break; + } + } + + if (!name) + { + return RT_EOK; + } + + if (!(fw = rt_malloc(sizeof(*fw)))) + { + return -RT_ENOMEM; + } + + fw->size = rt_be32_to_cpu(file->size); + fw->select = rt_be16_to_cpu(file->select); + rt_strncpy(fw->name, file->name, FW_CFG_MAX_FILE_PATH); + + fw->parent.type = RT_Device_Class_Char; +#ifdef RT_USING_DEVICE_OPS + fw->parent.ops = &fw_cfg_ops; +#else + fw->parent.read = fw_cfg_read, +#endif + + if ((err = rt_device_register(&fw->parent, name, RT_DEVICE_FLAG_RDONLY))) + { + rt_free(fw); + return err; + } + + return RT_EOK; +} + +static rt_err_t fw_cfg_register_dir_entries(void) +{ + rt_err_t err = 0; + rt_uint32_t count; + rt_size_t dir_size; + rt_be32_t files_count; + struct fw_cfg_file *dir; + + err = fw_cfg_read_blob(FW_CFG_FILE_DIR, &files_count, 0, sizeof(files_count)); + + if (err < 0) + { + return err; + } + + count = rt_be32_to_cpu(files_count); + dir_size = count * sizeof(struct fw_cfg_file); + + dir = rt_malloc(dir_size); + + if (!dir) + { + return -RT_ENOMEM; + } + + err = fw_cfg_read_blob(FW_CFG_FILE_DIR, dir, sizeof(files_count), dir_size); + + if (err < 0) + { + return err; + } + + for (int i = 0; i < count; ++i) + { + if ((err = fw_cfg_register_file(&dir[i]))) + { + break; + } + } + + rt_free(dir); + + return err; +} + +static rt_err_t qemu_fw_cfg_probe(struct rt_platform_device *pdev) +{ + rt_le32_t rev; + rt_err_t err = RT_EOK; + char sig[FW_CFG_SIG_SIZE]; + rt_uint32_t ctrl = FW_CFG_CTRL_OFF, data = FW_CFG_DATA_OFF, dma; + struct rt_device *dev = &pdev->parent; + +#ifdef FW_CFG_DMA_OFF + dma = FW_CFG_DMA_OFF; +#else + dma = RT_UINT32_MAX; +#endif + + rt_dm_dev_prop_read_u32(dev, "ctrl", &ctrl); + rt_dm_dev_prop_read_u32(dev, "data", &data); + rt_dm_dev_prop_read_u32(dev, "dma", &dma); + + if (!(_fw_cfg_dev_base = rt_dm_dev_iomap(dev, 0))) + { + err = -RT_EIO; + goto _fail; + } + +#ifdef ARCH_SUPPORT_PIO + _fw_cfg_is_mmio = RT_FALSE; +#else + _fw_cfg_is_mmio = RT_TRUE; +#endif + + _fw_cfg_reg_ctrl = _fw_cfg_dev_base + ctrl; + _fw_cfg_reg_data = _fw_cfg_dev_base + data; + + if (dma != RT_UINT32_MAX) + { + _fw_cfg_reg_dma = _fw_cfg_dev_base + dma; + } + else + { + _fw_cfg_reg_dma = RT_NULL; + } + + if (fw_cfg_read_blob(FW_CFG_SIGNATURE, sig, 0, FW_CFG_SIG_SIZE) < 0 || + rt_memcmp(sig, "QEMU", FW_CFG_SIG_SIZE)) + { + err = -RT_ENOSYS; + goto _fail; + } + + if (fw_cfg_read_blob(FW_CFG_ID, &rev, 0, sizeof(rev)) < 0) + { + err = -RT_ENOSYS; + goto _fail; + } + + _fw_cfg_rev = rt_le32_to_cpu(rev); + + fw_cfg_register_dir_entries(); + +_fail: + return err; +} + +static const struct rt_ofw_node_id qemu_fw_cfg_ofw_ids[] = +{ + { .compatible = "qemu,fw-cfg-mmio", }, + { /* sentinel */ } +}; + +static struct rt_platform_driver qemu_fw_cfg_driver = +{ + .name = "qemu-fw-cfg", + .ids = qemu_fw_cfg_ofw_ids, + + .probe = qemu_fw_cfg_probe, +}; + +static int qemu_fw_cfg_drv_register(void) +{ + rt_platform_driver_register(&qemu_fw_cfg_driver); + + return 0; +} +INIT_SUBSYS_EXPORT(qemu_fw_cfg_drv_register); diff --git a/components/drivers/firmware/qemu/fw_cfg.h b/components/drivers/firmware/qemu/fw_cfg.h new file mode 100644 index 00000000000..b0719de77f7 --- /dev/null +++ b/components/drivers/firmware/qemu/fw_cfg.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __QEMU_FW_CFG_H__ +#define __QEMU_FW_CFG_H__ + +#include +#include + +#define FW_CFG_ACPI_DEVICE_ID "QEMU0002" + +/* selector key values for "well-known" fw_cfg entries */ +#define FW_CFG_SIGNATURE 0x00 +#define FW_CFG_ID 0x01 +#define FW_CFG_UUID 0x02 +#define FW_CFG_RAM_SIZE 0x03 +#define FW_CFG_NOGRAPHIC 0x04 +#define FW_CFG_NB_CPUS 0x05 +#define FW_CFG_MACHINE_ID 0x06 +#define FW_CFG_KERNEL_ADDR 0x07 +#define FW_CFG_KERNEL_SIZE 0x08 +#define FW_CFG_KERNEL_CMDLINE 0x09 +#define FW_CFG_INITRD_ADDR 0x0a +#define FW_CFG_INITRD_SIZE 0x0b +#define FW_CFG_BOOT_DEVICE 0x0c +#define FW_CFG_NUMA 0x0d +#define FW_CFG_BOOT_MENU 0x0e +#define FW_CFG_MAX_CPUS 0x0f +#define FW_CFG_KERNEL_ENTRY 0x10 +#define FW_CFG_KERNEL_DATA 0x11 +#define FW_CFG_INITRD_DATA 0x12 +#define FW_CFG_CMDLINE_ADDR 0x13 +#define FW_CFG_CMDLINE_SIZE 0x14 +#define FW_CFG_CMDLINE_DATA 0x15 +#define FW_CFG_SETUP_ADDR 0x16 +#define FW_CFG_SETUP_SIZE 0x17 +#define FW_CFG_SETUP_DATA 0x18 +#define FW_CFG_FILE_DIR 0x19 + +#define FW_CFG_FILE_FIRST 0x20 +#define FW_CFG_FILE_SLOTS_MIN 0x10 + +#define FW_CFG_WRITE_CHANNEL 0x4000 +#define FW_CFG_ARCH_LOCAL 0x8000 +#define FW_CFG_ENTRY_MASK (~(FW_CFG_WRITE_CHANNEL | FW_CFG_ARCH_LOCAL)) + +#define FW_CFG_INVALID 0xffff + +/* width in bytes of fw_cfg control register */ +#define FW_CFG_CTL_SIZE 0x02 + +/* fw_cfg "file name" is up to 56 characters (including terminating nul) */ +#define FW_CFG_MAX_FILE_PATH 56 + +/* size in bytes of fw_cfg signature */ +#define FW_CFG_SIG_SIZE 4 + +/* FW_CFG_ID bits */ +#define FW_CFG_VERSION 0x01 +#define FW_CFG_VERSION_DMA 0x02 + +/* fw_cfg file directory entry type */ +struct fw_cfg_file +{ + rt_be32_t size; + rt_be16_t select; + rt_le16_t reserved; + char name[FW_CFG_MAX_FILE_PATH]; +}; + +/* FW_CFG_DMA_CONTROL bits */ +#define FW_CFG_DMA_CTL_ERROR 0x01 +#define FW_CFG_DMA_CTL_READ 0x02 +#define FW_CFG_DMA_CTL_SKIP 0x04 +#define FW_CFG_DMA_CTL_SELECT 0x08 +#define FW_CFG_DMA_CTL_WRITE 0x10 + +#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */ + +/* Control as first field allows for different structures selected by this + * field, which might be useful in the future + */ +rt_packed(struct fw_cfg_dma_access +{ + rt_be32_t control; + rt_be32_t length; + rt_be64_t address; +}); + +#define FW_CFG_RAMFB_FILENAME "etc/ramfb" + +rt_packed(struct fw_cfg_ram_fb +{ + rt_be64_t addr; + rt_be32_t fourcc; + rt_be32_t flags; + rt_be32_t width; + rt_be32_t height; + rt_be32_t stride; +}); + +#endif /* __QEMU_FW_CFG_H__ */ From 63693d06712d24d114b574b014a155054dd3f623 Mon Sep 17 00:00:00 2001 From: GuEe-GUI <2991707448@qq.com> Date: Sun, 1 Mar 2026 13:11:32 +0800 Subject: [PATCH 3/3] [bsp][qemu-virt64-aarch64] support full dm Signed-off-by: GuEe-GUI <2991707448@qq.com> --- bsp/qemu-virt64-aarch64/.config | 204 +++++++---- bsp/qemu-virt64-aarch64/.gitignore | 2 + bsp/qemu-virt64-aarch64/Kconfig | 11 +- .../applications/console.c | 33 -- .../applications/graphic.c | 224 ------------ bsp/qemu-virt64-aarch64/applications/mnt.c | 41 --- bsp/qemu-virt64-aarch64/applications/pin.c | 33 -- bsp/qemu-virt64-aarch64/drivers/Kconfig | 80 ----- bsp/qemu-virt64-aarch64/drivers/SConscript | 0 bsp/qemu-virt64-aarch64/drivers/board.c | 6 - bsp/qemu-virt64-aarch64/drivers/board.h | 9 +- bsp/qemu-virt64-aarch64/drivers/drv_gpio.c | 318 ------------------ bsp/qemu-virt64-aarch64/drivers/drv_gpio.h | 16 - bsp/qemu-virt64-aarch64/drivers/drv_romfb.c | 21 ++ bsp/qemu-virt64-aarch64/drivers/drv_rtc.c | 121 ------- bsp/qemu-virt64-aarch64/drivers/drv_rtc.h | 25 -- bsp/qemu-virt64-aarch64/drivers/drv_uart.c | 146 -------- bsp/qemu-virt64-aarch64/drivers/drv_uart.h | 16 - bsp/qemu-virt64-aarch64/drivers/drv_virtio.c | 93 ----- bsp/qemu-virt64-aarch64/drivers/drv_virtio.h | 16 - bsp/qemu-virt64-aarch64/drivers/virt.h | 96 ------ bsp/qemu-virt64-aarch64/qemu-debug.bat | 9 - bsp/qemu-virt64-aarch64/qemu-debug.sh | 8 - bsp/qemu-virt64-aarch64/qemu-graphic.bat | 13 - bsp/qemu-virt64-aarch64/qemu-graphic.sh | 12 - bsp/qemu-virt64-aarch64/qemu.bat | 9 - bsp/qemu-virt64-aarch64/qemu.py | 288 ++++++++++++++++ bsp/qemu-virt64-aarch64/qemu.sh | 8 - bsp/qemu-virt64-aarch64/rtconfig.h | 134 ++++++-- libcpu/aarch64/common/trap.c | 3 + src/utest/smp/smp_interrupt_pri_tc.c | 7 + 31 files changed, 564 insertions(+), 1438 deletions(-) create mode 100755 bsp/qemu-virt64-aarch64/.gitignore delete mode 100644 bsp/qemu-virt64-aarch64/applications/console.c delete mode 100644 bsp/qemu-virt64-aarch64/applications/graphic.c delete mode 100644 bsp/qemu-virt64-aarch64/applications/mnt.c delete mode 100644 bsp/qemu-virt64-aarch64/applications/pin.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/Kconfig mode change 100644 => 100755 bsp/qemu-virt64-aarch64/drivers/SConscript delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_gpio.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_gpio.h create mode 100755 bsp/qemu-virt64-aarch64/drivers/drv_romfb.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_rtc.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_rtc.h delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_uart.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_uart.h delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_virtio.c delete mode 100644 bsp/qemu-virt64-aarch64/drivers/drv_virtio.h delete mode 100644 bsp/qemu-virt64-aarch64/drivers/virt.h delete mode 100644 bsp/qemu-virt64-aarch64/qemu-debug.bat delete mode 100644 bsp/qemu-virt64-aarch64/qemu-debug.sh delete mode 100644 bsp/qemu-virt64-aarch64/qemu-graphic.bat delete mode 100644 bsp/qemu-virt64-aarch64/qemu-graphic.sh delete mode 100644 bsp/qemu-virt64-aarch64/qemu.bat create mode 100755 bsp/qemu-virt64-aarch64/qemu.py delete mode 100755 bsp/qemu-virt64-aarch64/qemu.sh diff --git a/bsp/qemu-virt64-aarch64/.config b/bsp/qemu-virt64-aarch64/.config index 62c0d402b65..78ad43a6963 100644 --- a/bsp/qemu-virt64-aarch64/.config +++ b/bsp/qemu-virt64-aarch64/.config @@ -11,18 +11,8 @@ # rt_vsnprintf options # # CONFIG_RT_KLIBC_USING_LIBC_VSNPRINTF is not set -CONFIG_RT_KLIBC_USING_VSNPRINTF_LONGLONG=y -CONFIG_RT_KLIBC_USING_VSNPRINTF_STANDARD=y -CONFIG_RT_KLIBC_USING_VSNPRINTF_DECIMAL_SPECIFIERS=y -CONFIG_RT_KLIBC_USING_VSNPRINTF_EXPONENTIAL_SPECIFIERS=y -CONFIG_RT_KLIBC_USING_VSNPRINTF_WRITEBACK_SPECIFIER=y -CONFIG_RT_KLIBC_USING_VSNPRINTF_CHECK_NUL_IN_FORMAT_SPECIFIER=y -# CONFIG_RT_KLIBC_USING_VSNPRINTF_MSVC_STYLE_INTEGER_SPECIFIERS is not set -CONFIG_RT_KLIBC_USING_VSNPRINTF_INTEGER_BUFFER_SIZE=32 -CONFIG_RT_KLIBC_USING_VSNPRINTF_DECIMAL_BUFFER_SIZE=32 -CONFIG_RT_KLIBC_USING_VSNPRINTF_FLOAT_PRECISION=6 -CONFIG_RT_KLIBC_USING_VSNPRINTF_MAX_INTEGRAL_DIGITS_FOR_DECIMAL=9 -CONFIG_RT_KLIBC_USING_VSNPRINTF_LOG10_TAYLOR_TERMS=4 +# CONFIG_RT_KLIBC_USING_VSNPRINTF_LONGLONG is not set +# CONFIG_RT_KLIBC_USING_VSNPRINTF_STANDARD is not set # end of rt_vsnprintf options # @@ -197,7 +187,8 @@ CONFIG_RT_USING_INTERRUPT_INFO=y CONFIG_RT_USING_CONSOLE=y CONFIG_RT_CONSOLEBUF_SIZE=256 CONFIG_RT_CONSOLE_DEVICE_NAME="uart0" -CONFIG_RT_VER_NUM=0x50201 +CONFIG_RT_USING_CONSOLE_OUTPUT_CTL=y +CONFIG_RT_VER_NUM=0x50300 CONFIG_RT_USING_STDC_ATOMIC=y CONFIG_RT_BACKTRACE_LEVEL_MAX_NR=32 # end of RT-Thread Kernel @@ -291,6 +282,7 @@ CONFIG_RT_DFS_ELM_MUTEX_TIMEOUT=3000 # end of elm-chan's FatFs, Generic FAT Filesystem Module CONFIG_RT_USING_DFS_DEVFS=y +CONFIG_RT_USING_DFS_9PFS=y # CONFIG_RT_USING_DFS_ISO9660 is not set CONFIG_RT_USING_DFS_ROMFS=y # CONFIG_RT_USING_DFS_ROMFS_USER_ROOT is not set @@ -318,8 +310,15 @@ CONFIG_RT_USING_SERIAL_V1=y CONFIG_RT_SERIAL_USING_DMA=y CONFIG_RT_SERIAL_RB_BUFSZ=256 # CONFIG_RT_USING_SERIAL_BYPASS is not set +# CONFIG_RT_SERIAL_EARLY_HVC is not set +CONFIG_RT_SERIAL_PL011=y +CONFIG_RT_SERIAL_8250=y +# CONFIG_RT_SERIAL_8250_DW is not set +CONFIG_RT_SERIAL_8250_PCI=y +# CONFIG_RT_SERIAL_VIRTUAL is not set # CONFIG_RT_USING_CAN is not set -# CONFIG_RT_USING_CPUTIME is not set +CONFIG_RT_USING_CLOCK_TIME=y +CONFIG_RT_CLOCK_TIME_ARM_ARCH=y # CONFIG_RT_USING_I2C is not set # CONFIG_RT_USING_PHY is not set # CONFIG_RT_USING_PHY_V2 is not set @@ -331,56 +330,156 @@ CONFIG_RT_USING_RANDOM=y # CONFIG_RT_USING_PWM is not set # CONFIG_RT_USING_PULSE_ENCODER is not set # CONFIG_RT_USING_INPUT_CAPTURE is not set -# CONFIG_RT_USING_MTD_NOR is not set +CONFIG_RT_USING_MTD_NOR=y +CONFIG_RT_USING_MTD_NOR_CFI=y # CONFIG_RT_USING_MTD_NAND is not set -CONFIG_RT_USING_PM=y -CONFIG_PM_TICKLESS_THRESHOLD_TIME=2 -# CONFIG_PM_USING_CUSTOM_CONFIG is not set -# CONFIG_PM_ENABLE_DEBUG is not set -# CONFIG_PM_ENABLE_SUSPEND_SLEEP_MODE is not set -# CONFIG_PM_ENABLE_THRESHOLD_SLEEP_MODE is not set +# CONFIG_RT_USING_PM is not set CONFIG_RT_USING_RTC=y -# CONFIG_RT_USING_ALARM is not set +CONFIG_RT_USING_ALARM=y +CONFIG_RT_ALARM_STACK_SIZE=8192 +CONFIG_RT_ALARM_TIMESLICE=5 +CONFIG_RT_ALARM_PRIORITY=10 +# CONFIG_RT_ALARM_USING_LOCAL_TIME is not set CONFIG_RT_USING_SOFT_RTC=y +# CONFIG_RT_RTC_GOLDFISH is not set +CONFIG_RT_RTC_PL031=y # CONFIG_RT_USING_SDIO is not set # CONFIG_RT_USING_SPI is not set -# CONFIG_RT_USING_WDT is not set +CONFIG_RT_USING_WDT=y +CONFIG_RT_WDT_I6300ESB=y # CONFIG_RT_USING_AUDIO is not set # CONFIG_RT_USING_SENSOR is not set # CONFIG_RT_USING_TOUCH is not set -# CONFIG_RT_USING_LCD is not set -# CONFIG_RT_USING_HWCRYPTO is not set +CONFIG_RT_USING_LCD=y +CONFIG_RT_USING_GRAPHIC=y +# CONFIG_RT_GRAPHIC_BACKLIGHT is not set +CONFIG_RT_GRAPHIC_FB=y +# CONFIG_RT_GRAPHIC_FB_SIMPLE is not set +CONFIG_RT_GRAPHIC_LOGO=y +# CONFIG_RT_GRAPHIC_LOGO_NONE is not set +CONFIG_RT_GRAPHIC_LOGO_RT_THREAD_CLUT224=y +# CONFIG_RT_GRAPHIC_LOGO_RT_THREAD_WHITE_CLUT224 is not set +CONFIG_RT_USING_HWCRYPTO=y +CONFIG_RT_HWCRYPTO_DEFAULT_NAME="hwcryto" +CONFIG_RT_HWCRYPTO_IV_MAX_SIZE=16 +CONFIG_RT_HWCRYPTO_KEYBIT_MAX_SIZE=256 +# CONFIG_RT_HWCRYPTO_USING_GCM is not set +CONFIG_RT_HWCRYPTO_USING_AES=y +CONFIG_RT_HWCRYPTO_USING_AES_ECB=y +CONFIG_RT_HWCRYPTO_USING_AES_CBC=y +# CONFIG_RT_HWCRYPTO_USING_AES_CFB is not set +CONFIG_RT_HWCRYPTO_USING_AES_CTR=y +# CONFIG_RT_HWCRYPTO_USING_AES_OFB is not set +CONFIG_RT_HWCRYPTO_USING_DES=y +CONFIG_RT_HWCRYPTO_USING_DES_ECB=y +CONFIG_RT_HWCRYPTO_USING_DES_CBC=y +CONFIG_RT_HWCRYPTO_USING_3DES=y +CONFIG_RT_HWCRYPTO_USING_3DES_ECB=y +CONFIG_RT_HWCRYPTO_USING_3DES_CBC=y +CONFIG_RT_HWCRYPTO_USING_RC4=y +# CONFIG_RT_HWCRYPTO_USING_MD5 is not set +# CONFIG_RT_HWCRYPTO_USING_SHA1 is not set +# CONFIG_RT_HWCRYPTO_USING_SHA2 is not set +CONFIG_RT_HWCRYPTO_USING_RNG=y +# CONFIG_RT_HWCRYPTO_USING_CRC is not set +# CONFIG_RT_HWCRYPTO_USING_BIGNUM is not set # CONFIG_RT_USING_WIFI is not set # CONFIG_RT_USING_LED is not set +CONFIG_RT_USING_INPUT=y +CONFIG_RT_INPUT_POWER=y +# CONFIG_RT_INPUT_UAPI is not set +# CONFIG_RT_INPUT_JOYSTICK is not set +CONFIG_RT_INPUT_KEYBOARD=y +CONFIG_RT_INPUT_KEYBOARD_GPIO=y +# CONFIG_RT_INPUT_MISC is not set +# CONFIG_RT_INPUT_TOUCHSCREEN is not set # CONFIG_RT_USING_MBOX is not set +# CONFIG_RT_USING_HWSPINLOCK is not set # CONFIG_RT_USING_PHYE is not set -# CONFIG_RT_USING_BLK is not set -# CONFIG_RT_USING_SCSI is not set -# CONFIG_RT_USING_REGULATOR is not set +CONFIG_RT_USING_ATA=y +CONFIG_RT_ATA_AHCI=y +CONFIG_RT_ATA_AHCI_PCI=y +# CONFIG_RT_USING_NVME is not set +CONFIG_RT_USING_BLK=y + +# +# Partition Types +# +CONFIG_RT_BLK_PARTITION_DFS=y +CONFIG_RT_BLK_PARTITION_EFI=y +# end of Partition Types + +CONFIG_RT_USING_SCSI=y +CONFIG_RT_SCSI_SD=y +CONFIG_RT_SCSI_CDROM=y +CONFIG_RT_USING_FIRMWARE=y +CONFIG_RT_FIRMWARE_QEMU_FW_CFG=y +# CONFIG_RT_FIRMWARE_ARM_SCMI is not set +# CONFIG_RT_USING_HWCACHE is not set +CONFIG_RT_USING_REGULATOR=y +# CONFIG_RT_REGULATOR_GPIO is not set # CONFIG_RT_USING_RESET is not set + +# +# Power Management (PM) Domains device drivers +# +# end of Power Management (PM) Domains device drivers + +# CONFIG_RT_USING_POWER_RESET is not set +# CONFIG_RT_USING_POWER_SUPPLY is not set # CONFIG_RT_USING_THERMAL is not set CONFIG_RT_USING_VIRTIO=y -CONFIG_RT_USING_VIRTIO10=y -CONFIG_RT_USING_VIRTIO_MMIO_ALIGN=y -CONFIG_RT_USING_VIRTIO_BLK=y -# CONFIG_RT_USING_VIRTIO_NET is not set -CONFIG_RT_USING_VIRTIO_CONSOLE=y -CONFIG_RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR=4 -CONFIG_RT_USING_VIRTIO_GPU=y -CONFIG_RT_USING_VIRTIO_INPUT=y -# CONFIG_RT_USING_DMA is not set -# CONFIG_RT_USING_MFD is not set +CONFIG_RT_VIRTIO_TRANSPORT_MMIO=y +CONFIG_RT_VIRTIO_TRANSPORT_PCI=y +CONFIG_RT_VIRTIO_BLK=y +CONFIG_RT_VIRTIO_CONSOLE=y +CONFIG_RT_VIRTIO_RNG=y +CONFIG_RT_VIRTIO_SCSI=y +# CONFIG_RT_VIRTIO_9P is not set +CONFIG_RT_VIRTIO_RPROC_SERIAL=y +CONFIG_RT_VIRTIO_GPU=y +CONFIG_RT_VIRTIO_CRYPTO=y +# CONFIG_RT_USING_NVMEM is not set +CONFIG_RT_USING_DMA=y +CONFIG_RT_USING_MFD=y +CONFIG_RT_MFD_EDU=y +CONFIG_RT_MFD_SYSCON=y CONFIG_RT_USING_OFW=y # CONFIG_RT_USING_BUILTIN_FDT is not set CONFIG_RT_FDT_EARLYCON_MSG_SIZE=128 CONFIG_RT_USING_OFW_BUS_RANGES_NUMBER=8 -# CONFIG_RT_USING_PIC is not set +CONFIG_RT_USING_PCI=y +CONFIG_RT_PCI_MSI=y +# CONFIG_RT_PCI_ENDPOINT is not set +CONFIG_RT_PCI_SYS_64BIT=y +CONFIG_RT_PCI_CACHE_LINE_SIZE=8 +# CONFIG_RT_PCI_LOCKLESS is not set + +# +# PCI Device Drivers +# +CONFIG_RT_PCI_ECAM=y +CONFIG_RT_PCI_HOST_COMMON=y +CONFIG_RT_PCI_HOST_GENERIC=y +# CONFIG_RT_PCI_DW is not set +CONFIG_RT_USING_PIC=y +# CONFIG_RT_USING_PIC_STATISTICS is not set +CONFIG_MAX_HANDLERS=512 +CONFIG_RT_PIC_ARM_GIC=y +CONFIG_RT_PIC_ARM_GIC_V2M=y +CONFIG_RT_PIC_ARM_GIC_V3=y +CONFIG_RT_PIC_ARM_GIC_V3_ITS=y +CONFIG_RT_PIC_ARM_GIC_V3_ITS_IRQ_MAX=127 +CONFIG_RT_PIC_ARM_GIC_MAX_NR=1 CONFIG_RT_USING_PIN=y +CONFIG_RT_PIN_PL061=y # CONFIG_RT_USING_PINCTRL is not set -CONFIG_RT_USING_KTIME=y CONFIG_RT_USING_CLK=y -# CONFIG_RT_USING_HWTIMER is not set # CONFIG_RT_USING_CHERRYUSB is not set + +# +# SoC (System on Chip) Drivers +# # end of Device Drivers # @@ -471,11 +570,10 @@ CONFIG_RT_USING_ADT_REF=y # CONFIG_RT_USING_RT_LINK is not set # end of Utilities -# CONFIG_RT_USING_VBUS is not set - # # Memory management # +# CONFIG_RT_PAGE_MPR_SIZE_DYNAMIC is not set CONFIG_RT_PAGE_AFFINITY_BLOCK_SIZE=0x1000 CONFIG_RT_PAGE_MAX_ORDER=11 CONFIG_RT_USING_MEMBLOCK=y @@ -498,6 +596,7 @@ CONFIG_RT_INIT_MEMORY_REGIONS=128 # end of Using USB legacy version # CONFIG_RT_USING_FDT is not set +# CONFIG_RT_USING_RUST is not set # end of RT-Thread Components # @@ -1533,22 +1632,3 @@ CONFIG_RT_INIT_MEMORY_REGIONS=128 # end of RT-Thread online packages CONFIG_SOC_VIRT64_AARCH64=y - -# -# AARCH64 qemu virt64 configs -# -CONFIG_BSP_SUPPORT_FPU=y -CONFIG_BSP_USING_UART=y -CONFIG_RT_USING_UART0=y -CONFIG_BSP_USING_RTC=y -# CONFIG_BSP_USING_ALARM is not set -CONFIG_BSP_USING_PIN=y -CONFIG_BSP_USING_VIRTIO_BLK=y -# CONFIG_BSP_USING_VIRTIO_NET is not set -CONFIG_BSP_USING_VIRTIO_CONSOLE=y -CONFIG_BSP_USING_VIRTIO_GPU=y -CONFIG_BSP_USING_VIRTIO_INPUT=y -CONFIG_BSP_USING_GIC=y -CONFIG_BSP_USING_GICV2=y -# CONFIG_BSP_USING_GICV3 is not set -# end of AARCH64 qemu virt64 configs diff --git a/bsp/qemu-virt64-aarch64/.gitignore b/bsp/qemu-virt64-aarch64/.gitignore new file mode 100755 index 00000000000..eb6cf13ddeb --- /dev/null +++ b/bsp/qemu-virt64-aarch64/.gitignore @@ -0,0 +1,2 @@ +*.qcow2 +*.dtb diff --git a/bsp/qemu-virt64-aarch64/Kconfig b/bsp/qemu-virt64-aarch64/Kconfig index 9a2d65614e4..bc363becd56 100644 --- a/bsp/qemu-virt64-aarch64/Kconfig +++ b/bsp/qemu-virt64-aarch64/Kconfig @@ -12,14 +12,9 @@ osource "$PKGS_DIR/Kconfig" config SOC_VIRT64_AARCH64 bool select ARCH_ARMV8 - select ARCH_CPU_64BIT - select ARCH_ARM_MMU - select RT_USING_CACHE select RT_USING_COMPONENTS_INIT select RT_USING_USER_MAIN - select RT_USING_GIC - select BSP_USING_GIC - select ARCH_MM_MMU + select ARCH_CPU_64BIT + select RT_USING_CACHE + select RT_USING_STDC_ATOMIC default y - -source "$(BSP_DIR)/drivers/Kconfig" diff --git a/bsp/qemu-virt64-aarch64/applications/console.c b/bsp/qemu-virt64-aarch64/applications/console.c deleted file mode 100644 index 3f8be481891..00000000000 --- a/bsp/qemu-virt64-aarch64/applications/console.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-11 GuEe-GUI the first version - */ - -#include - -#include - -static int console_init() -{ - rt_err_t status = RT_EOK; - rt_device_t device = rt_device_find("virtio-console0"); - - if (device != RT_NULL && rt_device_open(device, 0) == RT_EOK) - { - /* Create vport0p1 */ - status = rt_device_control(device, VIRTIO_DEVICE_CTRL_CONSOLE_PORT_CREATE, RT_NULL); - } - - if (device != RT_NULL) - { - rt_device_close(device); - } - - return status; -} -INIT_ENV_EXPORT(console_init); diff --git a/bsp/qemu-virt64-aarch64/applications/graphic.c b/bsp/qemu-virt64-aarch64/applications/graphic.c deleted file mode 100644 index c54132bb739..00000000000 --- a/bsp/qemu-virt64-aarch64/applications/graphic.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-11 GuEe-GUI the first version - */ - -#include -#include - -#include -#include - -#define GRAPHIC_THREAD_PRIORITY 25 -#define GRAPHIC_THREAD_STACK_SIZE 4096 -#define GRAPHIC_THREAD_TIMESLICE 5 - -static rt_uint32_t cur_min[2]; -static rt_uint32_t cur_max[2]; -static rt_uint32_t cur_range[2]; -static rt_uint32_t cur_points[2]; -static rt_uint32_t cur_last_points[2]; -static rt_bool_t cur_event_sync; -static rt_uint32_t color[2] = { 0xff0000, 0x0000ff }; -static rt_uint8_t cursor[VIRTIO_GPU_CURSOR_IMG_SIZE] rt_align(VIRTIO_PAGE_SIZE); - -void tablet_event_handler(struct virtio_input_event event) -{ - static rt_bool_t cur_btn_down = RT_FALSE; - - if (event.type == EV_ABS) - { - if (event.code == 0) - { - cur_points[0] = (cur_max[0] * (event.value - cur_min[0]) + cur_range[0] / 2) / cur_range[0]; - } - else if (event.code == 1) - { - cur_points[1] = (cur_max[1] * (event.value - cur_min[1]) + cur_range[1] / 2) / cur_range[1]; - } - } - else if (event.type == EV_KEY) - { - if (event.code == BTN_LEFT) - { - if (cur_btn_down && event.value == 0) - { - color[0] ^= color[1]; - color[1] ^= color[0]; - color[0] ^= color[1]; - cur_btn_down = RT_FALSE; - cur_event_sync = RT_TRUE; - } - else - { - cur_btn_down = RT_TRUE; - } - } - } - else if (event.type == EV_SYN) - { - cur_event_sync = RT_TRUE; - } -} - -void graphic_thread(void *param) -{ - int i; - char dev_name[RT_NAME_MAX]; - rt_device_t device = RT_NULL; - - rt_device_t tablet_dev = RT_NULL; - struct virtio_input_config tablet_config; - - rt_uint32_t white = 0xffffff; - rt_device_t gpu_dev = RT_NULL; - struct rt_device_rect_info rect_info; - struct rt_device_graphic_info graphic_info; - struct rt_device_graphic_ops *virtio_gpu_graphic_ops; - - /* GPU */ - device = rt_device_find("virtio-gpu0"); - - if (device != RT_NULL && rt_device_open(device, 0) == RT_EOK) - { - virtio_gpu_graphic_ops = rt_graphix_ops(device); - - rt_memset(&rect_info, 0, sizeof(rect_info)); - rt_memset(&graphic_info, 0, sizeof(graphic_info)); - - rt_device_control(device, VIRTIO_DEVICE_CTRL_GPU_SET_PRIMARY, RT_NULL); - rt_device_control(device, VIRTIO_DEVICE_CTRL_GPU_CREATE_2D, (void *)RTGRAPHIC_PIXEL_FORMAT_RGB888); - rt_device_control(device, RTGRAPHIC_CTRL_GET_INFO, &graphic_info); - - rect_info.x = 0; - rect_info.y = 0; - rect_info.width = graphic_info.width; - rect_info.height = graphic_info.height; - - if (graphic_info.framebuffer != RT_NULL) - { - int i = 0; - - rt_memset(graphic_info.framebuffer, 0xff, graphic_info.pitch * graphic_info.height); - - cur_last_points[0] = graphic_info.width / 2; - cur_last_points[1] = graphic_info.height / 2; - - virtio_gpu_graphic_ops->draw_hline((char *)&color[0], 0, graphic_info.width, cur_last_points[1]); - virtio_gpu_graphic_ops->draw_vline((char *)&color[1], cur_last_points[0], 0, graphic_info.height); - - rt_device_control(device, RTGRAPHIC_CTRL_RECT_UPDATE, &rect_info); - - while (i < sizeof(cursor) / 4) - { - /* R: 0x4c G: 0xaf B: 0x50 A: 0.8 */ - ((rt_uint32_t *)cursor)[i] = 0xcc4caf50; - ++i; - } - - rt_device_control(device, VIRTIO_DEVICE_CTRL_CURSOR_SETUP, cursor); - rt_device_control(device, VIRTIO_DEVICE_CTRL_CURSOR_MOVE, (rt_uint32_t[]){0, 0}); - - gpu_dev = device; - } - } - - /* Keyboard, Mouse, Tablet */ - for (i = 0; i < 3; ++i) - { - rt_snprintf(dev_name, RT_NAME_MAX, "virtio-input%d", i); - - device = rt_device_find(dev_name); - - if (device != RT_NULL && rt_device_open(device, 0) == RT_EOK) - { - enum virtio_input_type type; - rt_device_control(device, VIRTIO_DEVICE_CTRL_INPUT_GET_TYPE, &type); - - if (type == VIRTIO_INPUT_TYPE_TABLET) - { - tablet_dev = device; - } - else - { - rt_device_close(device); - } - } - } - - if (tablet_dev == RT_NULL || gpu_dev == RT_NULL) - { - goto _graphic_fail; - } - - cur_max[0] = graphic_info.width; - cur_max[1] = graphic_info.height; - - rt_device_control(tablet_dev, VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_X_INFO, &tablet_config); - - cur_min[0] = tablet_config.abs.min; - cur_range[0] = tablet_config.abs.max - cur_min[0]; - - rt_device_control(tablet_dev, VIRTIO_DEVICE_CTRL_INPUT_GET_ABS_Y_INFO, &tablet_config); - - cur_min[1] = tablet_config.abs.min; - cur_range[1] = tablet_config.abs.max - cur_min[1]; - - cur_event_sync = RT_FALSE; - - rt_device_control(tablet_dev, VIRTIO_DEVICE_CTRL_INPUT_BIND_BSCT_HANDLER, tablet_event_handler); - - for (;;) - { - while (cur_event_sync) - { - virtio_gpu_graphic_ops->draw_hline((char *)&white, 0, graphic_info.width, cur_last_points[1]); - virtio_gpu_graphic_ops->draw_vline((char *)&white, cur_last_points[0], 0, graphic_info.height); - - cur_last_points[0] = cur_points[0]; - cur_last_points[1] = cur_points[1]; - - virtio_gpu_graphic_ops->draw_hline((char *)&color[0], 0, graphic_info.width, cur_last_points[1]); - virtio_gpu_graphic_ops->draw_vline((char *)&color[1], cur_last_points[0], 0, graphic_info.height); - - rt_device_control(gpu_dev, RTGRAPHIC_CTRL_RECT_UPDATE, &rect_info); - - cur_event_sync = RT_FALSE; - - rt_thread_mdelay(1); - } - } - -_graphic_fail: - - if (gpu_dev != RT_NULL) - { - rt_device_close(gpu_dev); - } - - if (tablet_dev != RT_NULL) - { - rt_device_close(tablet_dev); - } -} - -int graphic_test(void) -{ - rt_thread_t graphic_tid = rt_thread_create("graphic work", graphic_thread, RT_NULL, - GRAPHIC_THREAD_STACK_SIZE, GRAPHIC_THREAD_PRIORITY, GRAPHIC_THREAD_TIMESLICE); - - if (graphic_tid != RT_NULL) - { - rt_thread_startup(graphic_tid); - - return RT_EOK; - } - - return -RT_ERROR; -} -MSH_CMD_EXPORT(graphic_test, Graphic test); diff --git a/bsp/qemu-virt64-aarch64/applications/mnt.c b/bsp/qemu-virt64-aarch64/applications/mnt.c deleted file mode 100644 index 262c19c4f75..00000000000 --- a/bsp/qemu-virt64-aarch64/applications/mnt.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021/08/19 bernard the first version - */ - -#include - -#ifdef RT_USING_DFS -#include - -int mnt_init(void) -{ - if (rt_device_find("virtio-blk0")) - { - /* mount virtio-blk as root directory */ - if (dfs_mount("virtio-blk0", "/", "elm", 0, RT_NULL) == 0) - { - rt_kprintf("file system initialization done!\n"); - } - else - { - if (dfs_mount("virtio-blk0", "/", "ext", 0, RT_NULL) == 0) - { - rt_kprintf("file system initialization done!\n"); - } - else - { - rt_kprintf("file system initialization fail!\n"); - } - } - } - - return 0; -} -INIT_ENV_EXPORT(mnt_init); -#endif diff --git a/bsp/qemu-virt64-aarch64/applications/pin.c b/bsp/qemu-virt64-aarch64/applications/pin.c deleted file mode 100644 index 37d1927b003..00000000000 --- a/bsp/qemu-virt64-aarch64/applications/pin.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2006-2022, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2022-6-30 GuEe-GUI first version - */ - -#include -#include -#include - -#ifdef RT_USING_PIN - -void qemu_gpio3_key_poweroff(void *args) -{ - rt_kprintf("\nYou power off the machine.\n"); - - rt_hw_cpu_shutdown(); -} - -static int pin_init() -{ - rt_pin_attach_irq(3, PIN_IRQ_MODE_FALLING, qemu_gpio3_key_poweroff, RT_NULL); - rt_pin_irq_enable(3, RT_TRUE); - - return 0; -} -INIT_ENV_EXPORT(pin_init); - -#endif /* RT_USING_PIN */ diff --git a/bsp/qemu-virt64-aarch64/drivers/Kconfig b/bsp/qemu-virt64-aarch64/drivers/Kconfig deleted file mode 100644 index f14fc73fc18..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/Kconfig +++ /dev/null @@ -1,80 +0,0 @@ - -menu "AARCH64 qemu virt64 configs" - menuconfig BSP_SUPPORT_FPU - bool "Using Float" - default y - - menuconfig BSP_USING_UART - bool "Using UART" - select RT_USING_SERIAL - default y - - if BSP_USING_UART - config RT_USING_UART0 - bool "Enabel UART 0" - default y - endif - - menuconfig BSP_USING_RTC - bool "Using RTC" - select RT_USING_RTC - default y - - if BSP_USING_RTC - config BSP_USING_ALARM - bool "Enable Alarm" - select RT_USING_ALARM - default n - endif - - config BSP_USING_PIN - bool "Using PIN" - select RT_USING_PIN - default y - - config BSP_USING_VIRTIO_BLK - bool "Using VirtIO BLK" - select RT_USING_VIRTIO - select RT_USING_VIRTIO_BLK - default y - - config BSP_USING_VIRTIO_NET - bool "Using VirtIO NET" - select RT_USING_VIRTIO - select RT_USING_VIRTIO_NET - default y - - config BSP_USING_VIRTIO_CONSOLE - bool "Using VirtIO Console" - select RT_USING_VIRTIO - select RT_USING_VIRTIO_CONSOLE - default y - - config BSP_USING_VIRTIO_GPU - bool "Using VirtIO GPU" - select RT_USING_VIRTIO - select RT_USING_VIRTIO_GPU - default y - - config BSP_USING_VIRTIO_INPUT - bool "Using VirtIO Input" - select RT_USING_VIRTIO - select RT_USING_VIRTIO_INPUT - default y - - config BSP_USING_GIC - bool - default y - - choice - prompt "GIC Version" - default BSP_USING_GICV2 - - config BSP_USING_GICV2 - bool "GICv2" - - config BSP_USING_GICV3 - bool "GICv3" - endchoice - -endmenu diff --git a/bsp/qemu-virt64-aarch64/drivers/SConscript b/bsp/qemu-virt64-aarch64/drivers/SConscript old mode 100644 new mode 100755 diff --git a/bsp/qemu-virt64-aarch64/drivers/board.c b/bsp/qemu-virt64-aarch64/drivers/board.c index 3bccfad1950..489eb0b99ef 100644 --- a/bsp/qemu-virt64-aarch64/drivers/board.c +++ b/bsp/qemu-virt64-aarch64/drivers/board.c @@ -11,14 +11,8 @@ * add smp ipi init */ -#include -#include -#include -#include #include -extern size_t MMUTable[]; - void rt_hw_board_init(void) { rt_hw_common_setup(); diff --git a/bsp/qemu-virt64-aarch64/drivers/board.h b/bsp/qemu-virt64-aarch64/drivers/board.h index e9fc17a283b..23148a28a5e 100644 --- a/bsp/qemu-virt64-aarch64/drivers/board.h +++ b/bsp/qemu-virt64-aarch64/drivers/board.h @@ -12,16 +12,9 @@ #ifndef __BOARD_H__ #define __BOARD_H__ -#include - extern unsigned char __bss_start; extern unsigned char __bss_end; -#define HEAP_BEGIN (void *)&__bss_end -#define HEAP_END ((void *)HEAP_BEGIN + 64 * 1024 * 1024) - void rt_hw_board_init(void); -int rt_hw_uart_init(void); - -#endif +#endif /* __BOARD_H__ */ diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_gpio.c b/bsp/qemu-virt64-aarch64/drivers/drv_gpio.c deleted file mode 100644 index bcf6677d2ca..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_gpio.c +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Copyright (c) 2006-2022, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2022-6-30 GuEe-GUI first version - */ - -#include -#include -#include -#include - -#include "drv_gpio.h" - -#ifdef BSP_USING_PIN - -#define GPIODIR 0x400 -#define GPIOIS 0x404 -#define GPIOIBE 0x408 -#define GPIOIEV 0x40c -#define GPIOIE 0x410 -#define GPIORIS 0x414 -#define GPIOMIS 0x418 -#define GPIOIC 0x41c - -#define BIT(x) (1UL << (x)) - -#define PL061_GPIO_NR 8 - -static struct pl061 -{ -#ifdef RT_USING_SMP - struct rt_spinlock spinlock; -#endif - void (*(hdr[PL061_GPIO_NR]))(void *args); - void *args[PL061_GPIO_NR]; -} _pl061; - -static rt_ubase_t pl061_gpio_base = PL061_GPIO_BASE; - -rt_inline rt_uint8_t pl061_read8(rt_ubase_t offset) -{ - return HWREG8(pl061_gpio_base + offset); -} - -rt_inline void pl061_write8(rt_ubase_t offset, rt_uint8_t value) -{ - HWREG8(pl061_gpio_base + offset) = value; -} - -static void pl061_pin_mode(struct rt_device *device, rt_base_t pin, rt_uint8_t mode) -{ - int value; - rt_uint8_t gpiodir; - -#ifdef RT_USING_SMP - rt_base_t level; -#endif - - if (pin < 0 || pin >= PL061_GPIO_NR) - { - return; - } - -#ifdef RT_USING_SMP - level = rt_spin_lock_irqsave(&_pl061.spinlock); -#endif - - switch (mode) - { - case PIN_MODE_OUTPUT: - - value = !!pl061_read8((BIT(pin + 2))); - - pl061_write8(BIT(pin + 2), 0 << pin); - gpiodir = pl061_read8(GPIODIR); - gpiodir |= BIT(pin); - pl061_write8(GPIODIR, gpiodir); - - /* - * gpio value is set again, because pl061 doesn't allow to set value of - * a gpio pin before configuring it in OUT mode. - */ - pl061_write8((BIT(pin + 2)), value << pin); - - break; - case PIN_MODE_INPUT: - - gpiodir = pl061_read8(GPIODIR); - gpiodir &= ~(BIT(pin)); - pl061_write8(GPIODIR, gpiodir); - - break; - } - -#ifdef RT_USING_SMP - rt_spin_unlock_irqrestore(&_pl061.spinlock, level); -#endif -} - -static void pl061_pin_write(struct rt_device *device, rt_base_t pin, rt_uint8_t value) -{ - pl061_write8(BIT(pin + 2), !!value << pin); -} - -static rt_ssize_t pl061_pin_read(struct rt_device *device, rt_base_t pin) -{ - return !!pl061_read8((BIT(pin + 2))); -} - -static rt_err_t pl061_pin_attach_irq(struct rt_device *device, rt_base_t pin, rt_uint8_t mode, void (*hdr)(void *args), void *args) -{ - rt_uint8_t gpiois, gpioibe, gpioiev; - rt_uint8_t bit = BIT(mode); -#ifdef RT_USING_SMP - rt_base_t level; -#endif - - if (pin < 0 || pin >= PL061_GPIO_NR) - { - return -RT_EINVAL; - } - -#ifdef RT_USING_SMP - level = rt_spin_lock_irqsave(&_pl061.spinlock); -#endif - - gpioiev = pl061_read8(GPIOIEV); - gpiois = pl061_read8(GPIOIS); - gpioibe = pl061_read8(GPIOIBE); - - if (mode == PIN_IRQ_MODE_HIGH_LEVEL || pin == PIN_IRQ_MODE_LOW_LEVEL) - { - rt_bool_t polarity = (mode == PIN_IRQ_MODE_HIGH_LEVEL); - - /* Disable edge detection */ - gpioibe &= ~bit; - /* Enable level detection */ - gpiois |= bit; - - /* Select polarity */ - if (polarity) - { - gpioiev |= bit; - } - else - { - gpioiev &= ~bit; - } - } - else if (mode == PIN_IRQ_MODE_RISING_FALLING) - { - /* Disable level detection */ - gpiois &= ~bit; - /* Select both edges, setting this makes GPIOEV be ignored */ - gpioibe |= bit; - } - else if (mode == PIN_IRQ_MODE_RISING || mode == PIN_IRQ_MODE_FALLING) - { - rt_bool_t rising = (mode == PIN_IRQ_MODE_RISING); - - /* Disable level detection */ - gpiois &= ~bit; - /* Clear detection on both edges */ - gpioibe &= ~bit; - - /* Select edge */ - if (rising) - { - gpioiev |= bit; - } - else - { - gpioiev &= ~bit; - } - } - else - { - /* No trigger: disable everything */ - gpiois &= ~bit; - gpioibe &= ~bit; - gpioiev &= ~bit; - } - - pl061_write8(GPIOIS, gpiois); - pl061_write8(GPIOIBE, gpioibe); - pl061_write8(GPIOIEV, gpioiev); - - _pl061.hdr[pin] = hdr; - _pl061.args[pin] = args; - -#ifdef RT_USING_SMP - rt_spin_unlock_irqrestore(&_pl061.spinlock, level); -#endif - - return RT_EOK; -} - -static rt_err_t pl061_pin_detach_irq(struct rt_device *device, rt_base_t pin) -{ - if (pin < 0 || pin >= PL061_GPIO_NR) - { - return -RT_EINVAL; - } - - _pl061.hdr[pin] = RT_NULL; - _pl061.args[pin] = RT_NULL; - - return RT_EOK; -} - -static rt_err_t pl061_pin_irq_enable(struct rt_device *device, rt_base_t pin, rt_uint8_t enabled) -{ - rt_uint8_t mask = BIT(pin); - rt_uint8_t gpioie; - -#ifdef RT_USING_SMP - rt_base_t level; -#endif - - if (pin < 0 || pin >= PL061_GPIO_NR) - { - return -RT_EINVAL; - } - -#ifdef RT_USING_SMP - level = rt_spin_lock_irqsave(&_pl061.spinlock); -#endif - - if (enabled) - { - gpioie = pl061_read8(GPIOIE) | mask; - } - else - { - gpioie = pl061_read8(GPIOIE) & ~mask; - } - - pl061_write8(GPIOIE, gpioie); - -#ifdef RT_USING_SMP - rt_spin_unlock_irqrestore(&_pl061.spinlock, level); -#endif - - return RT_EOK; -} - -static const struct rt_pin_ops ops = -{ - pl061_pin_mode, - pl061_pin_write, - pl061_pin_read, - pl061_pin_attach_irq, - pl061_pin_detach_irq, - pl061_pin_irq_enable, - RT_NULL, -}; - -static void rt_hw_gpio_isr(int irqno, void *param) -{ - rt_uint8_t mask; - unsigned long pending; - -#ifdef RT_USING_SMP - rt_base_t level; -#endif - - pending = pl061_read8(GPIOMIS); - - if (pending) - { - rt_base_t pin; - - for (pin = 0; pin < PL061_GPIO_NR; ++pin) - { - if (pending & BIT(pin)) - { - mask |= BIT(pin); - - if (_pl061.hdr[pin] != RT_NULL) - { - _pl061.hdr[pin](_pl061.args[pin]); - } - } - } - } - -#ifdef RT_USING_SMP - level = rt_spin_lock_irqsave(&_pl061.spinlock); -#endif - - pl061_write8(GPIOIC, mask); - -#ifdef RT_USING_SMP - rt_spin_unlock_irqrestore(&_pl061.spinlock, level); -#endif -} - -int rt_hw_gpio_init(void) -{ -#ifdef RT_USING_SMP - rt_spin_lock_init(&_pl061.spinlock); -#endif - - pl061_gpio_base = (rt_size_t)rt_ioremap((void *)pl061_gpio_base, PL061_GPIO_SIZE); - - rt_device_pin_register("gpio", &ops, RT_NULL); - rt_hw_interrupt_install(PL061_GPIO_IRQNUM, rt_hw_gpio_isr, RT_NULL, "gpio"); - rt_hw_interrupt_umask(PL061_GPIO_IRQNUM); - - return 0; -} -INIT_DEVICE_EXPORT(rt_hw_gpio_init); - -#endif /* BSP_USING_PIN */ diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_gpio.h b/bsp/qemu-virt64-aarch64/drivers/drv_gpio.h deleted file mode 100644 index 3f3e014f11d..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_gpio.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2022-6-30 GuEe-GUI first version - */ - -#ifndef DRV_GPIO_H__ -#define DRV_GPIO_H__ - -int rt_hw_gpio_init(void); - -#endif diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_romfb.c b/bsp/qemu-virt64-aarch64/drivers/drv_romfb.c new file mode 100755 index 00000000000..9667b99bdfc --- /dev/null +++ b/bsp/qemu-virt64-aarch64/drivers/drv_romfb.c @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +void platform_get_ramfb_params(rt_uint32_t *width, rt_uint32_t *height) +{ + *width = 800; + *height = 600; +} + +#include "../../../examples/test/dm_graphic_test.c" +#include "../../../examples/test/dm_hmi_test.c" diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_rtc.c b/bsp/qemu-virt64-aarch64/drivers/drv_rtc.c deleted file mode 100644 index 8612b626411..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_rtc.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-4 GuEe-GUI first version - */ - -#include -#include -#include -#include - -#include "drv_rtc.h" - -#ifdef BSP_USING_RTC - -#define RTC_DR 0x00 /* data read register */ -#define RTC_MR 0x04 /* match register */ -#define RTC_LR 0x08 /* data load register */ -#define RTC_CR 0x0c /* control register */ -#define RTC_IMSC 0x10 /* interrupt mask and set register */ -#define RTC_RIS 0x14 /* raw interrupt status register */ -#define RTC_MIS 0x18 /* masked interrupt status register */ -#define RTC_ICR 0x1c /* interrupt clear register */ - -#define RTC_CR_OPEN 1 -#define RTC_CR_CLOSE 0 - -static struct hw_rtc_device rtc_device; -static rt_ubase_t pl031_rtc_base = PL031_RTC_BASE; - -rt_inline rt_uint32_t pl031_read32(rt_ubase_t offset) -{ - return (*((volatile unsigned int *)(pl031_rtc_base + offset))); -} - -rt_inline void pl031_write32(rt_ubase_t offset, rt_uint32_t value) -{ - (*((volatile unsigned int *)(pl031_rtc_base + offset))) = value; -} - -static rt_err_t pl031_rtc_init(rt_device_t dev) -{ - return RT_EOK; -} - -static rt_err_t pl031_rtc_open(rt_device_t dev, rt_uint16_t oflag) -{ - pl031_write32(RTC_CR, RTC_CR_OPEN); - return RT_EOK; -} - -static rt_err_t pl031_rtc_close(rt_device_t dev) -{ - pl031_write32(RTC_CR, RTC_CR_CLOSE); - return RT_EOK; -} - -static rt_err_t pl031_rtc_control(rt_device_t dev, int cmd, void *args) -{ - - RT_ASSERT(dev != RT_NULL); - - switch (cmd) - { - case RT_DEVICE_CTRL_RTC_GET_TIME: - *(time_t *)args = pl031_read32(RTC_DR); - break; - case RT_DEVICE_CTRL_RTC_SET_TIME: - pl031_write32(RTC_LR, *(time_t *)args); - break; - default: - return -RT_EINVAL; - } - return RT_EOK; -} - -static rt_ssize_t pl031_rtc_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size) -{ - pl031_rtc_control(dev, RT_DEVICE_CTRL_RTC_GET_TIME, buffer); - return size; -} - -static rt_ssize_t pl031_rtc_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size) -{ - pl031_rtc_control(dev, RT_DEVICE_CTRL_RTC_SET_TIME, (void *)buffer); - return size; -} - -const static struct rt_device_ops pl031_rtc_ops = -{ - .init = pl031_rtc_init, - .open = pl031_rtc_open, - .close = pl031_rtc_close, - .read = pl031_rtc_read, - .write = pl031_rtc_write, - .control = pl031_rtc_control -}; - -int rt_hw_rtc_init(void) -{ - pl031_rtc_base = (rt_size_t)rt_ioremap((void *)pl031_rtc_base, PL031_RTC_SIZE); - - rt_memset(&rtc_device, 0, sizeof(rtc_device)); - - rtc_device.device.type = RT_Device_Class_RTC; - rtc_device.device.rx_indicate = RT_NULL; - rtc_device.device.tx_complete = RT_NULL; - rtc_device.device.ops = &pl031_rtc_ops; - rtc_device.device.user_data = RT_NULL; - - /* register a rtc device */ - rt_device_register(&rtc_device.device, "rtc0", RT_DEVICE_FLAG_RDWR); - rt_soft_rtc_set_source("rtc0"); - return 0; -} -INIT_DEVICE_EXPORT(rt_hw_rtc_init); -#endif /* BSP_USING_RTC */ diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_rtc.h b/bsp/qemu-virt64-aarch64/drivers/drv_rtc.h deleted file mode 100644 index 15d4f01a9c7..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_rtc.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-4 GuEe-GUI first version - */ - -#ifndef DRV_RTC_H__ -#define DRV_RTC_H__ - -#include -#include -#include - -struct hw_rtc_device -{ - struct rt_device device; -}; - -int rt_hw_rtc_init(void); - -#endif diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_uart.c b/bsp/qemu-virt64-aarch64/drivers/drv_uart.c deleted file mode 100644 index 12005d297d7..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_uart.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * serial.c UART driver - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2013-03-30 Bernard the first verion - */ - -#include -#include - -#include "board.h" -#include "mmu.h" - -struct hw_uart_device -{ - rt_size_t hw_base; - rt_size_t irqno; -}; - -#define UART_DR(base) __REG32(base + 0x00) -#define UART_FR(base) __REG32(base + 0x18) -#define UART_CR(base) __REG32(base + 0x30) -#define UART_IMSC(base) __REG32(base + 0x38) -#define UART_ICR(base) __REG32(base + 0x44) - -#define UARTFR_RXFE 0x10 -#define UARTFR_TXFF 0x20 -#define UARTIMSC_RXIM 0x10 -#define UARTIMSC_TXIM 0x20 -#define UARTICR_RXIC 0x10 -#define UARTICR_TXIC 0x20 - -static void rt_hw_uart_isr(int irqno, void *param) -{ - struct rt_serial_device *serial = (struct rt_serial_device *)param; - - rt_hw_serial_isr(serial, RT_SERIAL_EVENT_RX_IND); -} - -static rt_err_t uart_configure(struct rt_serial_device *serial, struct serial_configure *cfg) -{ - return RT_EOK; -} - -static rt_err_t uart_control(struct rt_serial_device *serial, int cmd, void *arg) -{ - struct hw_uart_device *uart; - - RT_ASSERT(serial != RT_NULL); - uart = (struct hw_uart_device *)serial->parent.user_data; - - switch (cmd) - { - case RT_DEVICE_CTRL_CLR_INT: - /* disable rx irq */ - UART_IMSC(uart->hw_base) &= ~UARTIMSC_RXIM; - break; - - case RT_DEVICE_CTRL_SET_INT: - /* enable rx irq */ - UART_IMSC(uart->hw_base) |= UARTIMSC_RXIM; - rt_hw_interrupt_umask(uart->irqno); - break; - - default: - return -1; - } - - return RT_EOK; -} - -static int uart_putc(struct rt_serial_device *serial, char c) -{ - struct hw_uart_device *uart; - - RT_ASSERT(serial != RT_NULL); - uart = (struct hw_uart_device *)serial->parent.user_data; - - while (UART_FR(uart->hw_base) & UARTFR_TXFF); - UART_DR(uart->hw_base) = c; - - return 1; -} - -static int uart_getc(struct rt_serial_device *serial) -{ - int ch; - struct hw_uart_device *uart; - - RT_ASSERT(serial != RT_NULL); - uart = (struct hw_uart_device *)serial->parent.user_data; - - ch = -1; - if (!(UART_FR(uart->hw_base) & UARTFR_RXFE)) - { - ch = UART_DR(uart->hw_base) & 0xff; - } - - return ch; -} - -static const struct rt_uart_ops _uart_ops = -{ - uart_configure, - uart_control, - uart_putc, - uart_getc, -}; - -#ifdef RT_USING_UART0 -/* UART device driver structure */ -static struct hw_uart_device _uart0_device = -{ - PL011_UART0_BASE, - PL011_UART0_IRQNUM, -}; -static struct rt_serial_device _serial0; -#endif - -int rt_hw_uart_init(void) -{ - struct hw_uart_device *uart; - struct serial_configure config = RT_SERIAL_CONFIG_DEFAULT; - -#ifdef RT_USING_UART0 - _uart0_device.hw_base = (rt_size_t)rt_ioremap((void*)_uart0_device.hw_base, PL011_UART0_SIZE); - uart = &_uart0_device; - - _serial0.ops = &_uart_ops; - _serial0.config = config; - - /* register UART1 device */ - rt_hw_serial_register(&_serial0, "uart0", - RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_INT_RX, - uart); - rt_hw_interrupt_install(uart->irqno, rt_hw_uart_isr, &_serial0, "uart0"); - /* enable Rx and Tx of UART */ - UART_CR(uart->hw_base) = (1 << 0) | (1 << 8) | (1 << 9); -#endif - - return 0; -} diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_uart.h b/bsp/qemu-virt64-aarch64/drivers/drv_uart.h deleted file mode 100644 index 1cd7a583ff4..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_uart.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2013-03-30 Bernard the first verion - */ - -#ifndef __DRV_UART_H__ -#define __DRV_UART_H__ - -int rt_hw_uart_init(void); - -#endif /* __DRV_UART_H__ */ diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_virtio.c b/bsp/qemu-virt64-aarch64/drivers/drv_virtio.c deleted file mode 100644 index 2634da67732..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_virtio.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-11 GuEe-GUI the first version - */ - -#include - -#include -#ifdef BSP_USING_VIRTIO_BLK -#include -#endif -#ifdef BSP_USING_VIRTIO_NET -#include -#endif -#ifdef BSP_USING_VIRTIO_CONSOLE -#include -#endif -#ifdef BSP_USING_VIRTIO_GPU -#include -#endif -#ifdef BSP_USING_VIRTIO_INPUT -#include -#endif - -#include - -static virtio_device_init_handler virtio_device_init_handlers[] = -{ -#ifdef BSP_USING_VIRTIO_BLK - [VIRTIO_DEVICE_ID_BLOCK] = rt_virtio_blk_init, -#endif -#ifdef BSP_USING_VIRTIO_NET - [VIRTIO_DEVICE_ID_NET] = rt_virtio_net_init, -#endif -#ifdef BSP_USING_VIRTIO_CONSOLE - [VIRTIO_DEVICE_ID_CONSOLE] = rt_virtio_console_init, -#endif -#ifdef BSP_USING_VIRTIO_GPU - [VIRTIO_DEVICE_ID_GPU] = rt_virtio_gpu_init, -#endif -#ifdef BSP_USING_VIRTIO_INPUT - [VIRTIO_DEVICE_ID_INPUT] = rt_virtio_input_init, -#endif -}; - -int rt_virtio_devices_init(void) -{ - int i; - rt_uint32_t irq = VIRTIO_IRQ_BASE; - rt_ubase_t mmio_base = VIRTIO_MMIO_BASE; - struct virtio_mmio_config *mmio_config; - virtio_device_init_handler init_handler; - - if (sizeof(virtio_device_init_handlers) == 0) - { - /* The compiler will optimize the codes after here. */ - return 0; - } - - mmio_base = (rt_ubase_t)rt_ioremap((void *)mmio_base, VIRTIO_MMIO_SIZE * VIRTIO_MAX_NR); - - if (mmio_base == RT_NULL) - { - return -RT_ERROR; - } - - for (i = 0; i < VIRTIO_MAX_NR; ++i, ++irq, mmio_base += VIRTIO_MMIO_SIZE) - { - mmio_config = (struct virtio_mmio_config *)mmio_base; - - if (mmio_config->magic != VIRTIO_MAGIC_VALUE || - mmio_config->version != RT_USING_VIRTIO_VERSION || - mmio_config->vendor_id != VIRTIO_VENDOR_ID) - { - continue; - } - - init_handler = virtio_device_init_handlers[mmio_config->device_id]; - - if (init_handler != RT_NULL) - { - init_handler((rt_ubase_t *)mmio_base, irq); - } - } - - return 0; -} -INIT_DEVICE_EXPORT(rt_virtio_devices_init); diff --git a/bsp/qemu-virt64-aarch64/drivers/drv_virtio.h b/bsp/qemu-virt64-aarch64/drivers/drv_virtio.h deleted file mode 100644 index 954338a3864..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/drv_virtio.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-11-11 GuEe-GUI the first version - */ - -#ifndef __DRV_VIRTIO_H__ -#define __DRV_VIRTIO_H__ - -int rt_virtio_devices_init(void); - -#endif /* __DRV_VIRTIO_H__ */ diff --git a/bsp/qemu-virt64-aarch64/drivers/virt.h b/bsp/qemu-virt64-aarch64/drivers/virt.h deleted file mode 100644 index 801e889bff0..00000000000 --- a/bsp/qemu-virt64-aarch64/drivers/virt.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2006-2021, RT-Thread Development Team - * - * SPDX-License-Identifier: Apache-2.0 - * - * Change Logs: - * Date Author Notes - * 2021-02-17 GuEe-GUI the first version - */ - -#ifndef VIRT_H__ -#define VIRT_H__ - -#include -#include - -#ifdef RT_USING_SMART -#include -#endif - -#define __REG32(x) (*((volatile unsigned int *)(x))) -#define __REG16(x) (*((volatile unsigned short *)(x))) - -/* UART */ -#define PL011_UART0_BASE 0x09000000 -#define PL011_UART0_SIZE 0x00001000 -#define PL011_UART0_IRQNUM (32 + 1) - -/* RTC */ -#define PL031_RTC_BASE 0x9010000 -#define PL031_RTC_SIZE 0x00001000 -#define PL031_RTC_IRQNUM (32 + 2) - -/* GPIO */ -#define PL061_GPIO_BASE 0x09030000 -#define PL061_GPIO_SIZE 0x00001000 -#define PL061_GPIO_IRQNUM (32 + 7) - -/* VirtIO */ -#define VIRTIO_MMIO_BASE 0x0a000000 -#define VIRTIO_MMIO_SIZE 0x00000200 -#define VIRTIO_MAX_NR 32 -#define VIRTIO_IRQ_BASE (32 + 16) -#define VIRTIO_VENDOR_ID 0x554d4551 /* "QEMU" */ - -/* GIC */ -#define MAX_HANDLERS 96 -#define GIC_IRQ_START 0 -#define ARM_GIC_NR_IRQS 96 -#define ARM_GIC_MAX_NR 1 - -#define IRQ_ARM_IPI_KICK 0 -#define IRQ_ARM_IPI_CALL 1 - -/* GICv2 */ -#define GIC_PL390_DISTRIBUTOR_PPTR 0x08000000 -#define GIC_PL390_CONTROLLER_PPTR 0x08010000 -#define GIC_PL390_HYPERVISOR_BASE 0x08030000 -#define GIC_PL390_VIRTUAL_CPU_BASE 0x08040000 - -/* GICv3 */ -#define GIC_PL500_DISTRIBUTOR_PPTR GIC_PL390_DISTRIBUTOR_PPTR -#define GIC_PL500_REDISTRIBUTOR_PPTR 0x080a0000 -#define GIC_PL500_CONTROLLER_PPTR GIC_PL390_CONTROLLER_PPTR -#define GIC_PL500_ITS_PPTR 0x08080000 - -/* the basic constants and interfaces needed by gic */ -rt_inline rt_ubase_t platform_get_gic_dist_base(void) -{ -#ifdef BSP_USING_GICV2 - return GIC_PL390_DISTRIBUTOR_PPTR; -#else - return GIC_PL500_DISTRIBUTOR_PPTR; -#endif -} - -rt_inline rt_ubase_t platform_get_gic_redist_base(void) -{ - return GIC_PL500_REDISTRIBUTOR_PPTR; -} - -rt_inline rt_ubase_t platform_get_gic_cpu_base(void) -{ -#ifdef BSP_USING_GICV2 - return GIC_PL390_CONTROLLER_PPTR; -#else - return GIC_PL500_CONTROLLER_PPTR; -#endif -} - -rt_inline rt_ubase_t platform_get_gic_its_base(void) -{ - return GIC_PL500_ITS_PPTR; -} - -#endif diff --git a/bsp/qemu-virt64-aarch64/qemu-debug.bat b/bsp/qemu-virt64-aarch64/qemu-debug.bat deleted file mode 100644 index 8d47286bd23..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu-debug.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off -if exist sd.bin goto run -qemu-img create -f raw sd.bin 64M - -:run -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -smp 4 -kernel rtthread.bin -nographic ^ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 ^ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 -s -S ^ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/qemu-debug.sh b/bsp/qemu-virt64-aarch64/qemu-debug.sh deleted file mode 100644 index 6e60986085e..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu-debug.sh +++ /dev/null @@ -1,8 +0,0 @@ -if [ ! -f "sd.bin" ]; then -dd if=/dev/zero of=sd.bin bs=1024 count=65536 -mkfs.fat sd.bin -fi -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -m 128M -smp 4 -kernel rtthread.bin -nographic \ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 -s -S \ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/qemu-graphic.bat b/bsp/qemu-virt64-aarch64/qemu-graphic.bat deleted file mode 100644 index 74332bb5eea..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu-graphic.bat +++ /dev/null @@ -1,13 +0,0 @@ -@echo off -if exist sd.bin goto run -qemu-img create -f raw sd.bin 64M - -:run -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -smp 4 -kernel rtthread.bin -serial stdio ^ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 ^ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 ^ --device virtio-gpu-device,xres=800,yres=600,bus=virtio-mmio-bus.2 ^ --device virtio-keyboard-device,bus=virtio-mmio-bus.3 ^ --device virtio-mouse-device,bus=virtio-mmio-bus.4 ^ --device virtio-tablet-device,bus=virtio-mmio-bus.5 ^ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/qemu-graphic.sh b/bsp/qemu-virt64-aarch64/qemu-graphic.sh deleted file mode 100644 index 52e78ba1770..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu-graphic.sh +++ /dev/null @@ -1,12 +0,0 @@ -if [ ! -f "sd.bin" ]; then -dd if=/dev/zero of=sd.bin bs=1024 count=65536 -mkfs.fat sd.bin -fi -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -smp 4 -kernel rtthread.bin -serial stdio \ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 \ --device virtio-gpu-device,xres=800,yres=600,bus=virtio-mmio-bus.2 \ --device virtio-keyboard-device,bus=virtio-mmio-bus.3 \ --device virtio-mouse-device,bus=virtio-mmio-bus.4 \ --device virtio-tablet-device,bus=virtio-mmio-bus.5 \ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/qemu.bat b/bsp/qemu-virt64-aarch64/qemu.bat deleted file mode 100644 index de958844932..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off -if exist sd.bin goto run -qemu-img create -f raw sd.bin 64M - -:run -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -smp 4 -kernel rtthread.bin -nographic ^ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 ^ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 ^ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/qemu.py b/bsp/qemu-virt64-aarch64/qemu.py new file mode 100755 index 00000000000..775a605ba2d --- /dev/null +++ b/bsp/qemu-virt64-aarch64/qemu.py @@ -0,0 +1,288 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import os, sys, re, shutil + +with open('rtconfig.h', 'r') as file: + rtconfig_header = file.read() + +if os.getenv('RTT_QEMU_ROOT'): + RTT_QEMU_ROOT = os.getenv('RTT_QEMU_ROOT') +else: + RTT_QEMU_ROOT = "" + +if os.getenv('RTT_ROOT'): + RTT_ROOT = os.getenv('RTT_ROOT') +else: + RTT_ROOT = os.path.join(os.getcwd(), '..', '..') + +sys.path.append(RTT_ROOT + '/tools') + +from building import * +import dtc + +# WSL? +is_windows = sys.platform.startswith('win') or \ + os.popen('which qemu-system-aarch64 | xargs file').read().find('PE') >= 0 or \ + (os.system("readlink `which qemu-system-aarch64` > /dev/null") == 0 and \ + os.popen('readlink `which qemu-system-aarch64` | xargs file').read().find('PE') >= 0) + +class QEMU_VERSION: + def __init__(self): + cmd = os.popen(RTT_QEMU_ROOT + "qemu-system-aarch64 --version").readlines()[0] + version = cmd[cmd.find("version ") + 8: -1].split('.') + + self.major = version[0] + self.minor = version[1] + self.revision = version[2] + # == + def __eq__(self, version_in): + version = version_in.split('.') + return self.major == version[0] and self.minor == version[1] and self.revision == version[2] + # >= + def __ge__(self, version_in): + return self.__gt__(version_in) or self.__eq__(version_in) + # > + def __gt__(self, version_in): + version = version_in.split('.') + return int(self.major) > int(version[0]) or \ + (int(self.major) == int(version[0]) and int(self.minor) > int(version[1])) or \ + (int(self.major) == int(version[0]) and int(self.minor) == int(version[1]) and int(self.revision) > int(version[2])) + # <= + def __le__(self, version_in): + return self.__lt__(version_in) or self.__eq__(version_in) + # < + def __lt__(self, version_in): + return not self.__ge__(version_in) + # != + def __ne__(self, version_in): + return not self.__eq__(version_in) + + def toString(self): + return self.major + '.' + self.minor + '.' + self.revision + +qemu_version = QEMU_VERSION() + +opt = sys.argv + +graphic_cfg = """ \ + -device ramfb \ + -device virtio-keyboard-device \ + -device virtio-tablet-device \ +""" + +smmu_cfg = "" +iommu_cfg = "" +cdrom_cfg = "" +p9_cfg = "" +ufs_cfg = "" +amp_cfg = "" + +q_gic = 2 +q_dumpdtb = "" +q_el = 1 +q_smp = int(re.findall(r'#define\s+RT_CPUS_NR\s+(\d+)', rtconfig_header)[0]) +q_mem = 128 +q_graphic = "-nographic" +q_debug = "" +q_bootargs = "console=ttyAMA0 earlycon cma=8M coherent_pool=2M root=vda0 rootfstype=elm rootwait rw" +q_initrd = "" +q_block = "virtio" +q_net = "user" +q_ssh = 12055 +q_scsi = "scsi" +q_cdrom = "" +q_flash = "flash" +q_emmc = "emmc" +q_nvme = "nvme" +q_ahci = "ahci" +q_gl = None +q_9p = "" + +def is_opt(key, inkey): + if str("-" + key) == inkey: + return True + return False + +for i in range(len(opt)): + if i == 0: + continue + inkey = opt[i] + + if is_opt("gic", inkey): q_gic = int(opt[i + 1]) + if is_opt("dumpdtb", inkey): q_dumpdtb = str(",dumpdtb=" + opt[i + 1]) + if is_opt("el", inkey): q_el = int(opt[i + 1]) + if is_opt("smp", inkey): q_smp = int(opt[i + 1]) + if is_opt("mem", inkey): q_mem = int(opt[i + 1]) + if is_opt("debug", inkey): q_debug = "-S -s" + if is_opt("bootargs", inkey): q_bootargs = opt[i + 1] + if is_opt("initrd", inkey): q_initrd = str("-initrd " + opt[i + 1]) + if is_opt("graphic", inkey): q_graphic = graphic_cfg + if is_opt("virtio-block", inkey): q_block = opt[i + 1] + if is_opt("tap", inkey): q_net = "tap,ifname=tap0" + if is_opt("ssh", inkey): q_ssh = int(opt[i + 1]) + if is_opt("cdrom", inkey): q_cdrom = opt[i + 1] + if is_opt("flash", inkey): q_flash = opt[i + 1] + if is_opt("emmc", inkey): q_emmc = opt[i + 1] + if is_opt("nvme", inkey): q_nvme = opt[i + 1] + if is_opt("gl", inkey): q_gl = "-device virtio-gpu-gl-pci -display {},gl=on ".format(opt[i + 1]) + if is_opt("9p", inkey): q_9p = opt[i + 1] + +# SMP +if q_smp > 8: + q_gic = 3 + +# Exception Level +if q_el == 1: + q_el = "" +elif q_el == 2: + q_el = ",virtualization=on" + if q_gic == 3: + q_gic = "max" +elif q_el == 3: + q_el = ",secure=on" +else: + print("Error: Invalid -el {}".format(q_el)) + exit(-1) + +# Display +# --enable-opengl --enable-virglrenderer +if q_graphic != "-nographic": + if q_gl: + q_graphic += q_gl + else: + q_graphic += "-device virtio-gpu-device " +elif q_gl != None: + print("Error: GL should in graphic mode") + exit(-1) + +# Net +# --enable-slirp +# Enable TAP in example (not support in Windows, Maybe WSL2): +# ETH0=`/sbin/route -n | awk '$1 == "0.0.0.0" {print $NF}'` +# ip link add br0 type bridge +# ip link set br0 up +# echo 1 | tee /proc/sys/net/ipv4/ip_forward > /dev/null +# ip link set ${ETH0} up +# ip link set ${ETH0} master br0 +# dhclient br0 +# ip tuntap add dev tap0 mode tap +# ip link set dev tap0 up +# ip link set tap0 master br0 +# +# Disable TAP in example (not support in Windows, Maybe WSL2): +# ip link set tap0 down +# ip tuntap del dev tap0 mod tap +# echo 0 | tee /proc/sys/net/ipv4/ip_forward > /dev/null +# ip link set br0 down +# ip link del br0 type bridge +if q_net.find("user") >= 0: + q_net += ",hostfwd=tcp::{}-:22".format(q_ssh) +else: + if not is_windows: + q_net += ",script=no,downscript=no" + print("Warning: SSH not set in TAP") + +# Storage +# pflash have pflash0 and pflash1, pflash0 is used for BootROMs such as UEFI +# if we load file to pflash0, QEMU will boot from it, so we only use pflash1. +# Well, we can R/W in pflash0 by CFI driver, but the data will lost after QEMU exits. +# +# partitions (not support in Windows, Maybe WSL2): +# modprobe nbd max_part=12 +# qemu-nbd --connect=/dev/nbdX ABC.qcow2 +# fdisk /dev/nbdX +# ... +# qemu-nbd --disconnect /dev/nbdX +disk_list = [q_block, q_scsi, q_flash, q_emmc, q_nvme, q_ahci] + +for disk in disk_list: + disk += ".qcow2" + if not os.path.exists(disk): + os.system("qemu-img create -f qcow2 {} 64M".format(disk)) + +if len(q_cdrom) > 0: + cdrom_cfg = """ \ + -device scsi-cd,channel=0,scsi-id=0,lun=1,drive=scsi1 \ + -drive file={},format=raw,if=none,id=scsi1 \ + """.format(q_cdrom) + +# Share File System +# --enable-virtfs +if len(q_9p) > 0: + p9_tag = "hostshare" + p9_cfg = """ \ + -fsdev local,security_model=passthrough,id=fsdev0,path={} \ + -device virtio-9p-device,fsdev=fsdev0,mount_tag={} \ + """.format(q_9p, p9_tag) + q_bootargs += " 9p.tag={} 9p.mount=host".format(p9_tag) + +# Note +# Hot-plug / Hot-unplug in QEMU monitor: +# (qemu) chardev-add socket,host=127.0.0.1,port=4323,server=on,wait=off,telnet=on,id=console2 +# (qemu) device_add virtserialport,chardev=console2,name=org.rt-thread.port,id=port2 +# (qemu) device_del port2 +# (qemu) chardev-remove console2 +# +# VirtIO version disable legacy to set version >= 1.0: +# -global virtio-mmio.force-legacy=false +# +# VirtIO used virtqueue packed (version >= 1.0) +# -device virtio-XYZ-device,packed=on +# +# VirtIO used PCI/PCIe bus (version >= 1.0) +# -device virtio-XYZ-pci,disable-legacy=on + +cmd_base = """ +{}qemu-system-aarch64 \ + -M virt,acpi=on,its=on,gic-version={}{}{} \ + -cpu max \ + -smp {} \ + -m {} \ + -kernel rtthread.bin \ + -append "{}" \ + {} \ + -serial mon:stdio \ + {} \ + {} \ + -drive if=none,file={}.qcow2,format=qcow2,id=blk0 \ + -device virtio-blk-device,drive=blk0 \ + -netdev {},id=net0 \ + -device virtio-net-device,netdev=net0,speed=800000 \ + -device virtio-rng-device \ + -device virtio-scsi-pci,disable-legacy=on \ + -device scsi-hd,channel=0,scsi-id=0,lun=0,drive=scsi0 \ + -drive file={}.qcow2,format=qcow2,if=none,id=scsi0 \ + {} \ + {} \ + -device virtio-crypto-device,cryptodev=vcrypto0 \ + -object cryptodev-backend-builtin,id=vcrypto0 \ + -device virtio-serial-device \ + -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 \ + -device virtserialport,chardev=console0,name=org.rt-thread.console \ + -drive if=pflash,file={}.qcow2,format=qcow2,index=1 \ + -device pci-serial,chardev=console1 \ + -chardev socket,host=127.0.0.1,port=4322,server=on,wait=off,telnet=on,id=console1 \ + -device sdhci-pci -device sd-card,drive=emmc0 \ + -drive if=none,file={}.qcow2,format=qcow2,id=emmc0 \ + -device nvme,id=nvme-ctrl-0,serial=deadbeef \ + -drive if=none,file={}.qcow2,format=qcow2,id=nvme0 \ + -device nvme-ns,drive=nvme0 \ + -device ahci,id=ahci-ctrl-0 \ + -drive if=none,file={}.qcow2,format=qcow2,id=ahci0 \ + -device ide-hd,drive=ahci0 \ + -device i6300esb -watchdog-action reset \ + -device edu,dma_mask=0xffffffff +""" +def cmd(): + return cmd_base.format(RTT_QEMU_ROOT, q_gic, q_dumpdtb, q_el, q_smp, q_mem, q_bootargs, q_initrd, + q_graphic, q_debug, q_block, q_net, q_scsi, cdrom_cfg, p9_cfg, + q_flash, q_emmc, q_nvme, q_ahci) + +def dumpdtb(): + dtb = q_dumpdtb.split('=')[-1] + dtc.dtb_to_dts(RTT_ROOT, dtb) + +os.system(cmd()) + +if len(q_dumpdtb) != 0: + dumpdtb() diff --git a/bsp/qemu-virt64-aarch64/qemu.sh b/bsp/qemu-virt64-aarch64/qemu.sh deleted file mode 100755 index 4f07acaad44..00000000000 --- a/bsp/qemu-virt64-aarch64/qemu.sh +++ /dev/null @@ -1,8 +0,0 @@ -if [ ! -f "sd.bin" ]; then -dd if=/dev/zero of=sd.bin bs=1024 count=65536 -mkfs.fat sd.bin -fi -qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -m 128M -smp 4 -kernel rtthread.bin -nographic \ --drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \ --netdev user,id=net0 -device virtio-net-device,netdev=net0,bus=virtio-mmio-bus.1 \ --device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0 diff --git a/bsp/qemu-virt64-aarch64/rtconfig.h b/bsp/qemu-virt64-aarch64/rtconfig.h index 7a4f71034ba..0c1439f5461 100644 --- a/bsp/qemu-virt64-aarch64/rtconfig.h +++ b/bsp/qemu-virt64-aarch64/rtconfig.h @@ -7,17 +7,6 @@ /* rt_vsnprintf options */ -#define RT_KLIBC_USING_VSNPRINTF_LONGLONG -#define RT_KLIBC_USING_VSNPRINTF_STANDARD -#define RT_KLIBC_USING_VSNPRINTF_DECIMAL_SPECIFIERS -#define RT_KLIBC_USING_VSNPRINTF_EXPONENTIAL_SPECIFIERS -#define RT_KLIBC_USING_VSNPRINTF_WRITEBACK_SPECIFIER -#define RT_KLIBC_USING_VSNPRINTF_CHECK_NUL_IN_FORMAT_SPECIFIER -#define RT_KLIBC_USING_VSNPRINTF_INTEGER_BUFFER_SIZE 32 -#define RT_KLIBC_USING_VSNPRINTF_DECIMAL_BUFFER_SIZE 32 -#define RT_KLIBC_USING_VSNPRINTF_FLOAT_PRECISION 6 -#define RT_KLIBC_USING_VSNPRINTF_MAX_INTEGRAL_DIGITS_FOR_DECIMAL 9 -#define RT_KLIBC_USING_VSNPRINTF_LOG10_TAYLOR_TERMS 4 /* end of rt_vsnprintf options */ /* rt_vsscanf options */ @@ -124,7 +113,8 @@ #define RT_USING_CONSOLE #define RT_CONSOLEBUF_SIZE 256 #define RT_CONSOLE_DEVICE_NAME "uart0" -#define RT_VER_NUM 0x50201 +#define RT_USING_CONSOLE_OUTPUT_CTL +#define RT_VER_NUM 0x50300 #define RT_USING_STDC_ATOMIC #define RT_BACKTRACE_LEVEL_MAX_NR 32 /* end of RT-Thread Kernel */ @@ -197,6 +187,7 @@ #define RT_DFS_ELM_MUTEX_TIMEOUT 3000 /* end of elm-chan's FatFs, Generic FAT Filesystem Module */ #define RT_USING_DFS_DEVFS +#define RT_USING_DFS_9PFS #define RT_USING_DFS_ROMFS /* end of DFS: device virtual file system */ @@ -213,27 +204,111 @@ #define RT_USING_SERIAL_V1 #define RT_SERIAL_USING_DMA #define RT_SERIAL_RB_BUFSZ 256 +#define RT_SERIAL_PL011 +#define RT_SERIAL_8250 +#define RT_SERIAL_8250_PCI +#define RT_USING_CLOCK_TIME +#define RT_CLOCK_TIME_ARM_ARCH #define RT_USING_NULL #define RT_USING_ZERO #define RT_USING_RANDOM -#define RT_USING_PM -#define PM_TICKLESS_THRESHOLD_TIME 2 +#define RT_USING_MTD_NOR +#define RT_USING_MTD_NOR_CFI #define RT_USING_RTC +#define RT_USING_ALARM +#define RT_ALARM_STACK_SIZE 8192 +#define RT_ALARM_TIMESLICE 5 +#define RT_ALARM_PRIORITY 10 #define RT_USING_SOFT_RTC +#define RT_RTC_PL031 +#define RT_USING_WDT +#define RT_WDT_I6300ESB +#define RT_USING_LCD +#define RT_USING_GRAPHIC +#define RT_GRAPHIC_FB +#define RT_GRAPHIC_LOGO +#define RT_GRAPHIC_LOGO_RT_THREAD_CLUT224 +#define RT_USING_HWCRYPTO +#define RT_HWCRYPTO_DEFAULT_NAME "hwcryto" +#define RT_HWCRYPTO_IV_MAX_SIZE 16 +#define RT_HWCRYPTO_KEYBIT_MAX_SIZE 256 +#define RT_HWCRYPTO_USING_AES +#define RT_HWCRYPTO_USING_AES_ECB +#define RT_HWCRYPTO_USING_AES_CBC +#define RT_HWCRYPTO_USING_AES_CTR +#define RT_HWCRYPTO_USING_DES +#define RT_HWCRYPTO_USING_DES_ECB +#define RT_HWCRYPTO_USING_DES_CBC +#define RT_HWCRYPTO_USING_3DES +#define RT_HWCRYPTO_USING_3DES_ECB +#define RT_HWCRYPTO_USING_3DES_CBC +#define RT_HWCRYPTO_USING_RC4 +#define RT_HWCRYPTO_USING_RNG +#define RT_USING_INPUT +#define RT_INPUT_POWER +#define RT_INPUT_KEYBOARD +#define RT_INPUT_KEYBOARD_GPIO +#define RT_USING_ATA +#define RT_ATA_AHCI +#define RT_ATA_AHCI_PCI +#define RT_USING_BLK + +/* Partition Types */ + +#define RT_BLK_PARTITION_DFS +#define RT_BLK_PARTITION_EFI +/* end of Partition Types */ +#define RT_USING_SCSI +#define RT_SCSI_SD +#define RT_SCSI_CDROM +#define RT_USING_FIRMWARE +#define RT_FIRMWARE_QEMU_FW_CFG +#define RT_USING_REGULATOR + +/* Power Management (PM) Domains device drivers */ + +/* end of Power Management (PM) Domains device drivers */ #define RT_USING_VIRTIO -#define RT_USING_VIRTIO10 -#define RT_USING_VIRTIO_MMIO_ALIGN -#define RT_USING_VIRTIO_BLK -#define RT_USING_VIRTIO_CONSOLE -#define RT_USING_VIRTIO_CONSOLE_PORT_MAX_NR 4 -#define RT_USING_VIRTIO_GPU -#define RT_USING_VIRTIO_INPUT +#define RT_VIRTIO_TRANSPORT_MMIO +#define RT_VIRTIO_TRANSPORT_PCI +#define RT_VIRTIO_BLK +#define RT_VIRTIO_CONSOLE +#define RT_VIRTIO_RNG +#define RT_VIRTIO_SCSI +#define RT_VIRTIO_RPROC_SERIAL +#define RT_VIRTIO_GPU +#define RT_VIRTIO_CRYPTO +#define RT_USING_DMA +#define RT_USING_MFD +#define RT_MFD_EDU +#define RT_MFD_SYSCON #define RT_USING_OFW #define RT_FDT_EARLYCON_MSG_SIZE 128 #define RT_USING_OFW_BUS_RANGES_NUMBER 8 +#define RT_USING_PCI +#define RT_PCI_MSI +#define RT_PCI_SYS_64BIT +#define RT_PCI_CACHE_LINE_SIZE 8 + +/* PCI Device Drivers */ + +#define RT_PCI_ECAM +#define RT_PCI_HOST_COMMON +#define RT_PCI_HOST_GENERIC +#define RT_USING_PIC +#define MAX_HANDLERS 512 +#define RT_PIC_ARM_GIC +#define RT_PIC_ARM_GIC_V2M +#define RT_PIC_ARM_GIC_V3 +#define RT_PIC_ARM_GIC_V3_ITS +#define RT_PIC_ARM_GIC_V3_ITS_IRQ_MAX 127 +#define RT_PIC_ARM_GIC_MAX_NR 1 #define RT_USING_PIN -#define RT_USING_KTIME +#define RT_PIN_PL061 #define RT_USING_CLK + +/* SoC (System on Chip) Drivers */ + /* end of Device Drivers */ /* C/C++ and POSIX layer */ @@ -512,19 +587,4 @@ /* end of RT-Thread online packages */ #define SOC_VIRT64_AARCH64 -/* AARCH64 qemu virt64 configs */ - -#define BSP_SUPPORT_FPU -#define BSP_USING_UART -#define RT_USING_UART0 -#define BSP_USING_RTC -#define BSP_USING_PIN -#define BSP_USING_VIRTIO_BLK -#define BSP_USING_VIRTIO_CONSOLE -#define BSP_USING_VIRTIO_GPU -#define BSP_USING_VIRTIO_INPUT -#define BSP_USING_GIC -#define BSP_USING_GICV2 -/* end of AARCH64 qemu virt64 configs */ - #endif diff --git a/libcpu/aarch64/common/trap.c b/libcpu/aarch64/common/trap.c index 424e29aacfe..0ac2dca8bab 100644 --- a/libcpu/aarch64/common/trap.c +++ b/libcpu/aarch64/common/trap.c @@ -15,6 +15,9 @@ #include #include "interrupt.h" #include "mm_aspace.h" +#ifdef RT_USING_PIC +#include +#endif #define DBG_TAG "libcpu.trap" #define DBG_LVL DBG_LOG diff --git a/src/utest/smp/smp_interrupt_pri_tc.c b/src/utest/smp/smp_interrupt_pri_tc.c index edb54703209..5293ab652a6 100644 --- a/src/utest/smp/smp_interrupt_pri_tc.c +++ b/src/utest/smp/smp_interrupt_pri_tc.c @@ -12,6 +12,13 @@ #include "utest.h" #include +#ifdef RT_USING_PIC +#include + +#define rt_hw_interrupt_get_priority rt_pic_irq_get_priority +#define rt_hw_interrupt_set_priority rt_pic_irq_set_priority +#endif + /** * @brief Setting the Interrupt Priority Test. *