diff --git a/arch/riscv/configs/starfive_visionfive2_defconfig b/arch/riscv/configs/starfive_visionfive2_defconfig new file mode 100644 index 0000000000000..61c0780901c13 --- /dev/null +++ b/arch/riscv/configs/starfive_visionfive2_defconfig @@ -0,0 +1,567 @@ +CONFIG_COMPILE_TEST=y +# CONFIG_WERROR is not set +CONFIG_DEFAULT_HOSTNAME="StarFive" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_USELIB=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_PERF_EVENTS=y +CONFIG_SOC_STARFIVE=y +CONFIG_SOC_STARFIVE_JH7110=y +CONFIG_SMP=y +CONFIG_HZ_100=y +CONFIG_HIBERNATION=y +CONFIG_PM_STD_PARTITION="PARTLABEL=hibernation" +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_TEST_SUSPEND=y +CONFIG_CPU_IDLE=y +CONFIG_RISCV_SBI_CPUIDLE=y +# CONFIG_SECCOMP is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_BINFMT_MISC=y +CONFIG_PAGE_REPORTING=y +CONFIG_CMA=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_TABLES=y +CONFIG_NFT_CT=y +CONFIG_NFT_COMPAT=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=y +CONFIG_NFT_FIB_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_NETLINK_DIAG=y +CONFIG_CAN=y +CONFIG_IPMS_CAN=y +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HCIBTUSB=m +# CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_RTL is not set +CONFIG_BT_AICUSB=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_RFKILL=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_PCI=y +# CONFIG_PCIEASPM is not set +CONFIG_PCIE_PLDA=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_MTD=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_CONFIGFS=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_EEPROM_AT24=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_SCSI_VIRTIO=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=m +CONFIG_NETDEVICES=y +CONFIG_TUN=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_GOOGLE is not set +# CONFIG_NET_VENDOR_HUAWEI is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_PENSANDO is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +CONFIG_R8169=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_SELFTESTS=y +CONFIG_DWMAC_DWC_QOS_ETH=y +CONFIG_DWMAC_STARFIVE_PLAT=y +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_MARVELL_PHY=y +CONFIG_MICREL_PHY=y +CONFIG_MOTORCOMM_PHY=y +CONFIG_IWLWIFI=y +CONFIG_IWLDVM=y +CONFIG_IWLMVM=y +CONFIG_HOSTAP=y +# CONFIG_RTL_CARDS is not set +CONFIG_USB_WIFI_ECR6600U=y +CONFIG_AIC_WLAN_SUPPORT=y +CONFIG_AIC8800_WLAN_SUPPORT=m +CONFIG_AIC_LOADFW_SUPPORT=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_GOODIX=y +CONFIG_TOUCHSCREEN_TINKER_FT5406=y +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=6 +CONFIG_SERIAL_8250_RUNTIME_UARTS=6 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y +CONFIG_HVC_RISCV_SBI=y +CONFIG_TTY_PRINTK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_STARFIVE=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_SPI=y +CONFIG_SPI_CADENCE_QUADSPI=y +CONFIG_SPI_PL022_STARFIVE=y +CONFIG_SPI_SIFIVE=y +CONFIG_SPI_SPIDEV=y +# CONFIG_PTP_1588_CLOCK is not set +CONFIG_PINCTRL=y +CONFIG_PINCTRL_STARFIVE=y +CONFIG_PINCTRL_STARFIVE_JH7110=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_SENSORS_SFCTEMP=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_CPU_THERMAL=y +CONFIG_THERMAL_EMULATION=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_STARFIVE_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_AXP15060=y +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_STF_VIN=y +CONFIG_VIN_SENSOR_OV4689=y +CONFIG_VIN_SENSOR_IMX219=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_WAVE_VPU=m +CONFIG_VIDEO_IMX708=y +# CONFIG_CXD2880_SPI_DRV is not set +# CONFIG_MEDIA_TUNER_SIMPLE is not set +# CONFIG_MEDIA_TUNER_TDA18250 is not set +# CONFIG_MEDIA_TUNER_TDA8290 is not set +# CONFIG_MEDIA_TUNER_TDA827X is not set +# CONFIG_MEDIA_TUNER_TDA18271 is not set +# CONFIG_MEDIA_TUNER_TDA9887 is not set +# CONFIG_MEDIA_TUNER_TEA5761 is not set +# CONFIG_MEDIA_TUNER_TEA5767 is not set +# CONFIG_MEDIA_TUNER_MSI001 is not set +# CONFIG_MEDIA_TUNER_MT20XX is not set +# CONFIG_MEDIA_TUNER_MT2060 is not set +# CONFIG_MEDIA_TUNER_MT2063 is not set +# CONFIG_MEDIA_TUNER_MT2266 is not set +# CONFIG_MEDIA_TUNER_MT2131 is not set +# CONFIG_MEDIA_TUNER_QT1010 is not set +# CONFIG_MEDIA_TUNER_XC2028 is not set +# CONFIG_MEDIA_TUNER_XC5000 is not set +# CONFIG_MEDIA_TUNER_XC4000 is not set +# CONFIG_MEDIA_TUNER_MXL5005S is not set +# CONFIG_MEDIA_TUNER_MXL5007T is not set +# CONFIG_MEDIA_TUNER_MC44S803 is not set +# CONFIG_MEDIA_TUNER_MAX2165 is not set +# CONFIG_MEDIA_TUNER_TDA18218 is not set +# CONFIG_MEDIA_TUNER_FC0011 is not set +# CONFIG_MEDIA_TUNER_FC0012 is not set +# CONFIG_MEDIA_TUNER_FC0013 is not set +# CONFIG_MEDIA_TUNER_TDA18212 is not set +# CONFIG_MEDIA_TUNER_E4000 is not set +# CONFIG_MEDIA_TUNER_FC2580 is not set +# CONFIG_MEDIA_TUNER_M88RS6000T is not set +# CONFIG_MEDIA_TUNER_TUA9001 is not set +# CONFIG_MEDIA_TUNER_SI2157 is not set +# CONFIG_MEDIA_TUNER_IT913X is not set +# CONFIG_MEDIA_TUNER_R820T is not set +# CONFIG_MEDIA_TUNER_MXL301RF is not set +# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set +# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set +# CONFIG_DVB_STB0899 is not set +# CONFIG_DVB_STB6100 is not set +# CONFIG_DVB_STV090x is not set +# CONFIG_DVB_STV0910 is not set +# CONFIG_DVB_STV6110x is not set +# CONFIG_DVB_STV6111 is not set +# CONFIG_DVB_MXL5XX is not set +# CONFIG_DVB_DRXK is not set +# CONFIG_DVB_TDA18271C2DD is not set +# CONFIG_DVB_SI2165 is not set +# CONFIG_DVB_MN88472 is not set +# CONFIG_DVB_MN88473 is not set +# CONFIG_DVB_CX24110 is not set +# CONFIG_DVB_CX24123 is not set +# CONFIG_DVB_MT312 is not set +# CONFIG_DVB_ZL10036 is not set +# CONFIG_DVB_ZL10039 is not set +# CONFIG_DVB_S5H1420 is not set +# CONFIG_DVB_STV0288 is not set +# CONFIG_DVB_STB6000 is not set +# CONFIG_DVB_STV0299 is not set +# CONFIG_DVB_STV6110 is not set +# CONFIG_DVB_STV0900 is not set +# CONFIG_DVB_TDA8083 is not set +# CONFIG_DVB_TDA10086 is not set +# CONFIG_DVB_TDA8261 is not set +# CONFIG_DVB_VES1X93 is not set +# CONFIG_DVB_TUNER_ITD1000 is not set +# CONFIG_DVB_TUNER_CX24113 is not set +# CONFIG_DVB_TDA826X is not set +# CONFIG_DVB_TUA6100 is not set +# CONFIG_DVB_CX24116 is not set +# CONFIG_DVB_CX24117 is not set +# CONFIG_DVB_CX24120 is not set +# CONFIG_DVB_SI21XX is not set +# CONFIG_DVB_TS2020 is not set +# CONFIG_DVB_DS3000 is not set +# CONFIG_DVB_MB86A16 is not set +# CONFIG_DVB_TDA10071 is not set +# CONFIG_DVB_SP887X is not set +# CONFIG_DVB_CX22700 is not set +# CONFIG_DVB_CX22702 is not set +# CONFIG_DVB_S5H1432 is not set +# CONFIG_DVB_DRXD is not set +# CONFIG_DVB_L64781 is not set +# CONFIG_DVB_TDA1004X is not set +# CONFIG_DVB_NXT6000 is not set +# CONFIG_DVB_MT352 is not set +# CONFIG_DVB_ZL10353 is not set +# CONFIG_DVB_DIB3000MB is not set +# CONFIG_DVB_DIB3000MC is not set +# CONFIG_DVB_DIB7000M is not set +# CONFIG_DVB_DIB7000P is not set +# CONFIG_DVB_DIB9000 is not set +# CONFIG_DVB_TDA10048 is not set +# CONFIG_DVB_EC100 is not set +# CONFIG_DVB_STV0367 is not set +# CONFIG_DVB_CXD2820R is not set +# CONFIG_DVB_CXD2841ER is not set +# CONFIG_DVB_ZD1301_DEMOD is not set +# CONFIG_DVB_CXD2880 is not set +# CONFIG_DVB_VES1820 is not set +# CONFIG_DVB_TDA10021 is not set +# CONFIG_DVB_TDA10023 is not set +# CONFIG_DVB_STV0297 is not set +# CONFIG_DVB_NXT200X is not set +# CONFIG_DVB_OR51211 is not set +# CONFIG_DVB_OR51132 is not set +# CONFIG_DVB_BCM3510 is not set +# CONFIG_DVB_LGDT330X is not set +# CONFIG_DVB_LGDT3305 is not set +# CONFIG_DVB_LG2160 is not set +# CONFIG_DVB_S5H1409 is not set +# CONFIG_DVB_AU8522_DTV is not set +# CONFIG_DVB_AU8522_V4L is not set +# CONFIG_DVB_S5H1411 is not set +# CONFIG_DVB_MXL692 is not set +# CONFIG_DVB_S921 is not set +# CONFIG_DVB_DIB8000 is not set +# CONFIG_DVB_MB86A20S is not set +# CONFIG_DVB_TC90522 is not set +# CONFIG_DVB_MN88443X is not set +# CONFIG_DVB_PLL is not set +# CONFIG_DVB_TUNER_DIB0070 is not set +# CONFIG_DVB_TUNER_DIB0090 is not set +# CONFIG_DVB_DRX39XYJ is not set +# CONFIG_DVB_LNBH25 is not set +# CONFIG_DVB_LNBH29 is not set +# CONFIG_DVB_LNBP21 is not set +# CONFIG_DVB_LNBP22 is not set +# CONFIG_DVB_ISL6405 is not set +# CONFIG_DVB_ISL6421 is not set +# CONFIG_DVB_ISL6423 is not set +# CONFIG_DVB_A8293 is not set +# CONFIG_DVB_LGS8GL5 is not set +# CONFIG_DVB_LGS8GXX is not set +# CONFIG_DVB_ATBM8830 is not set +# CONFIG_DVB_TDA665x is not set +# CONFIG_DVB_IX2505V is not set +# CONFIG_DVB_M88RS2000 is not set +# CONFIG_DVB_AF9033 is not set +# CONFIG_DVB_HORUS3A is not set +# CONFIG_DVB_ASCOT2E is not set +# CONFIG_DVB_HELENE is not set +# CONFIG_DVB_CXD2099 is not set +# CONFIG_DVB_SP2 is not set +CONFIG_DRM_PANEL_JADARD_JD9365DA_H3=y +CONFIG_DRM_PANEL_STARFIVE_10INCH=y +CONFIG_DRM_VERISILICON=y +CONFIG_STARFIVE_INNO_HDMI=y +CONFIG_STARFIVE_DSI=y +CONFIG_DRM_IMG_ROGUE=y +CONFIG_DRM_LEGACY=y +CONFIG_FB=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_PWM=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_SND_DESIGNWARE_I2S=y +CONFIG_SND_SOC_SOF_TOPLEVEL=y +CONFIG_SND_SOC_SOF_OF=y +CONFIG_SND_SOC_SOF_STARFIVE_TOPLEVEL=y +CONFIG_SND_SOC_SOF_STARFIVE_SUPPORT=y +CONFIG_STARFIVE_DSP=y +CONFIG_SND_SOC_STARFIVE=y +CONFIG_SND_SOC_STARFIVE_PWMDAC=y +CONFIG_SND_SOC_STARFIVE_I2S=y +CONFIG_SND_SOC_STARFIVE_SOF_TDM_DAI=y +CONFIG_SND_SOC_AC108=y +CONFIG_SND_SOC_WM8960=y +CONFIG_SND_SIMPLE_CARD=y +CONFIG_UHID=y +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_USB_UAS=y +CONFIG_USB_CDNS_SUPPORT=y +CONFIG_USB_CDNS3=y +CONFIG_USB_CDNS3_GADGET=y +CONFIG_USB_CDNS3_HOST=y +CONFIG_USB_CDNS3_STARFIVE=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_GADGET=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_DWCMSHC=y +CONFIG_MMC_SPI=y +CONFIG_MMC_DW=y +CONFIG_MMC_DW_STARFIVE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_GPIO=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_STARFIVE=y +CONFIG_RTC_DRV_GOLDFISH=y +CONFIG_DMADEVICES=y +CONFIG_DW_AXI_DMAC=y +CONFIG_DMATEST=y +# CONFIG_VIRTIO_MENU is not set +# CONFIG_VHOST_MENU is not set +CONFIG_GOLDFISH=y +CONFIG_STARFIVE_TIMER=y +CONFIG_MAILBOX=y +CONFIG_STARFIVE_MBOX=y +CONFIG_STARFIVE_MBOX_TEST=m +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_VIRTIO=y +CONFIG_SIFIVE_L2_FLUSH_START=0x40000000 +CONFIG_SIFIVE_L2_FLUSH_SIZE=0x400000000 +CONFIG_STARFIVE_PMU=y +CONFIG_IIO=y +CONFIG_IIO_ST_ACCEL_3AXIS=y +CONFIG_PWM=y +CONFIG_PWM_STARFIVE_PTC=y +CONFIG_PHY_M31_DPHY_RX0=y +CONFIG_RAS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_VIRTIO_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_FSCACHE_STATS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_EXFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_JFFS2_FS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +CONFIG_SMB_SERVER=m +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_CRYPTO_USER=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_AKCIPHER=y +CONFIG_CRYPTO_DEV_VIRTIO=y +CONFIG_CRYPTO_DEV_JH7110_ENCRYPT=y +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_FS=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_DEBUG_TIMEKEEPING=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_STACKTRACE=y +CONFIG_DEBUG_LIST=y +CONFIG_DEBUG_PLIST=y +CONFIG_DEBUG_SG=y +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_EQS_DEBUG=y +# CONFIG_FTRACE is not set +# CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_MEMTEST=y diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d9ec1e69e4283..3fe638afc98f7 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -190,6 +190,13 @@ config DW_AXI_DMAC NOTE: This driver wasn't tested on 64 bit platform because of lack 64 bit platform with Synopsys DW AXI DMAC. +config DW_AXI_DMAC_OOB + bool "Out-of-band support for DW AXI DMA" + depends on DW_AXI_DMAC && DOVETAIL + select DMA_VIRTUAL_CHANNELS_OOB + help + Enable out-of-band requests to DW AXI DMA. + config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" depends on ARCH_EP93XX || COMPILE_TEST diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 38cdbca59485c..dcfd548f9cb96 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -851,6 +851,13 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, ch->serving = NULL; } +static inline bool pl08_dma_oob_capable(void) +{ + // return IS_ENABLED(CONFIG_PL08_DMAC_OOB); + //Kconfig is not modified for now + return true; +} + /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The @@ -860,6 +867,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *ch; + struct virt_dma_desc *vd; ch = pl08x_get_phy_channel(pl08x, plchan); if (!ch) { @@ -874,7 +882,10 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) plchan->phychan = ch; plchan->state = PL08X_CHAN_RUNNING; - pl08x_start_next_txd(plchan); + vd = vchan_next_desc(&plchan->vc); + // pl08x_start_next_txd(plchan); + if(!pl08_dma_oob_capable() || !vchan_oob_pulsed(vd))//when oob is enable and desc is oob,desc is not triggered here + pl08x_start_next_txd(plchan); } static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, @@ -940,12 +951,14 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) * Eww. We know this isn't going to deadlock * but lockdep probably doesn't. */ - spin_lock(&next->vc.lock); + // spin_lock(&next->vc.lock); + vchan_lock(&next->vc); /* Re-check the state now that we have the lock */ success = next->state == PL08X_CHAN_WAITING; if (success) pl08x_phy_reassign_start(plchan->phychan, next); - spin_unlock(&next->vc.lock); + // spin_unlock(&next->vc.lock); + vchan_unlock(&next->vc); /* If the state changed, try to find another channel */ if (!success) @@ -1563,7 +1576,8 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, return ret; } - spin_lock_irqsave(&plchan->vc.lock, flags); + // spin_lock_irqsave(&plchan->vc.lock, flags); + vchan_lock_irqsave(&plchan->vc,flags); ret = dma_cookie_status(chan, cookie, txstate); if (ret != DMA_COMPLETE) { vd = vchan_find_desc(&plchan->vc, cookie); @@ -1578,8 +1592,8 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, bytes = pl08x_getbytes_chan(plchan); } } - spin_unlock_irqrestore(&plchan->vc.lock, flags); - + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue @@ -1733,12 +1747,32 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->vc.lock, flags); + // spin_lock_irqsave(&plchan->vc.lock, flags); + vchan_lock_irqsave(&plchan->vc,flags); if (vchan_issue_pending(&plchan->vc)) { if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) pl08x_phy_alloc_and_start(plchan); } - spin_unlock_irqrestore(&plchan->vc.lock, flags); + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); +} + +static int pl08x_dma_pulse_oob(struct dma_chan *chan) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + unsigned long flags; + struct virt_dma_desc *vd; + int ret = -EIO; + + vchan_lock_irqsave(&plchan->vc, flags); + vd = vchan_next_desc(&plchan->vc); + if (vd!=NULL && vchan_oob_pulsed(vd)) { + pl08x_start_next_txd(plchan); + ret = 0; + } + vchan_unlock_irqrestore(&plchan->vc, flags); + + return ret; } static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) @@ -2050,6 +2084,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( int ret, tmp; dma_addr_t slave_addr; + if(!pl08_dma_oob_capable()) { + if(flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { + dev_err(&pl08x->adev->dev, + "%s: out-of-band slave transfers disabled\n", + __func__); + return NULL; + } + } dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); @@ -2091,6 +2133,20 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( int ret, tmp; dma_addr_t slave_addr; + //check flags + if(!pl08_dma_oob_capable()) { + if (flags & DMA_OOB_INTERRUPT) { + dev_err(&pl08x->adev->dev, + "%s: out-of-band cyclic transfers disabled\n", + __func__); + return NULL; + } + } else if(flags & DMA_OOB_PULSE) { + dev_err(&pl08x->adev->dev, + "%s: no pulse mode with out-of-band cyclic transfers\n", + __func__); + return NULL; + } dev_dbg(&pl08x->adev->dev, "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", __func__, period_len, buf_len, @@ -2155,9 +2211,11 @@ static int pl08x_terminate_all(struct dma_chan *chan) struct pl08x_driver_data *pl08x = plchan->host; unsigned long flags; - spin_lock_irqsave(&plchan->vc.lock, flags); + // spin_lock_irqsave(&plchan->vc.lock, flags); + vchan_lock_irqsave(&plchan->vc,flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->vc.lock, flags); + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } @@ -2178,8 +2236,8 @@ static int pl08x_terminate_all(struct dma_chan *chan) /* Dequeue jobs not yet fired as well */ pl08x_free_txd_list(pl08x, plchan); - spin_unlock_irqrestore(&plchan->vc.lock, flags); - + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } @@ -2199,16 +2257,19 @@ static int pl08x_pause(struct dma_chan *chan) * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->vc.lock, flags); + // spin_lock_irqsave(&plchan->vc.lock, flags); + vchan_lock_irqsave(&plchan->vc,flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->vc.lock, flags); + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } pl08x_pause_phy_chan(plchan->phychan); plchan->state = PL08X_CHAN_PAUSED; - spin_unlock_irqrestore(&plchan->vc.lock, flags); + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } @@ -2222,17 +2283,19 @@ static int pl08x_resume(struct dma_chan *chan) * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->vc.lock, flags); + // spin_lock_irqsave(&plchan->vc.lock, flags); + vchan_lock_irqsave(&plchan->vc,flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->vc.lock, flags); + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } pl08x_resume_phy_chan(plchan->phychan); plchan->state = PL08X_CHAN_RUNNING; - spin_unlock_irqrestore(&plchan->vc.lock, flags); - + // spin_unlock_irqrestore(&plchan->vc.lock, flags); + vchan_unlock_irqrestore(&plchan->vc,flags); return 0; } @@ -2281,20 +2344,60 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } +static bool do_channel(struct pl08x_txd *tx,struct pl08x_dma_chan *plchan) +{ + struct dmaengine_desc_callback cb; + //oob process + if(running_oob()){ + if(!vchan_oob_handled(&tx->vd)) + return false;//ib desc need to be forward + dmaengine_desc_get_callback(&tx->vd.tx,&cb); + if (dmaengine_desc_callback_valid(&cb)) { + vchan_unlock(&plchan->vc); + dmaengine_desc_callback_invoke(&cb, NULL); + vchan_lock(&plchan->vc); + } + return true; + } + //ib process + if (tx && tx->cyclic) { + vchan_cyclic_callback(&tx->vd); + } else if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + return true; +} + static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; u32 mask = 0, err, tc, i; - + bool oob_need_forward = false; /* check & clear - ERR & TC interrupts */ err = readl(pl08x->base + PL080_ERR_STATUS); - if (err) { + if (err && !running_oob()) { dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", __func__, err); writel(err, pl08x->base + PL080_ERR_CLEAR); } tc = readl(pl08x->base + PL080_TC_STATUS); - if (tc) + if (tc && !running_oob()) writel(tc, pl08x->base + PL080_TC_CLEAR); if (!err && !tc) @@ -2306,44 +2409,47 @@ static irqreturn_t pl08x_irq(int irq, void *dev) struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; struct pl08x_txd *tx; + //oob process if (!plchan) { - dev_err(&pl08x->adev->dev, + if(!running_oob()) { + dev_err(&pl08x->adev->dev, "%s Error TC interrupt on unused channel: 0x%08x\n", __func__, i); + } continue; } - spin_lock(&plchan->vc.lock); + // spin_lock(&plchan->vc.lock); + vchan_lock(&plchan->vc); tx = plchan->at; - if (tx && tx->cyclic) { - vchan_cyclic_callback(&tx->vd); - } else if (tx) { - plchan->at = NULL; - /* - * This descriptor is done, release its mux - * reservation. - */ - pl08x_release_mux(plchan); - tx->done = true; - vchan_cookie_complete(&tx->vd); - /* - * And start the next descriptor (if any), - * otherwise free this channel. - */ - if (vchan_next_desc(&plchan->vc)) - pl08x_start_next_txd(plchan); - else - pl08x_phy_free(plchan); + if(pl08_dma_oob_capable() && running_oob()) { + if(!do_channel(tx,plchan)) { + oob_need_forward = true; + } else { + //oob process success,clear the corresponding tc and err bit + if((BIT(i) & err)) + writel(BIT(i), pl08x->base + PL080_ERR_CLEAR); + if((BIT(i) & tc)) + writel(BIT(i), pl08x->base + PL080_TC_CLEAR); + } + } else { + do_channel(tx,plchan); } - spin_unlock(&plchan->vc.lock); - - mask |= BIT(i); + // spin_unlock(&plchan->vc.lock); + vchan_unlock(&plchan->vc); + if(!running_oob()) + mask |= BIT(i); } } - - return mask ? IRQ_HANDLED : IRQ_NONE; + //在oob阶段:遍历所有待处理通道,根据do_channel,只要有任意一个是带内的无法处理,那么就需要return forward;反之返回handled + //在ib阶段,有mask返回handled,无mask返回none + if(running_oob()) { + return oob_need_forward ? IRQ_FORWARD : IRQ_HANDLED; + } else { + return mask ? IRQ_HANDLED : IRQ_NONE; + } } static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) @@ -2782,6 +2888,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; + pl08x->slave.device_pulse_oob = pl08x_dma_pulse_oob; pl08x->slave.device_config = pl08x_config; pl08x->slave.device_pause = pl08x_pause; pl08x->slave.device_resume = pl08x_resume; @@ -2908,6 +3015,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Register as many memcpy channels as there are physical channels */ + // dma_cap_set(DMA_OOB,pl08x->slave.cap_mask);//no oob for memcpy virt channels ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, pl08x->vd->channels, false); if (ret <= 0) { @@ -2918,6 +3026,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) } /* Register slave channels */ + dma_cap_set(DMA_OOB,pl08x->slave.cap_mask); if (pl08x->has_slave) { ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, pl08x->pd->num_slave_channels, true); @@ -2952,7 +3061,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - + + dev_info(&pl08x->adev->dev, "probe modify success\n"); return 0; out_no_slave_reg: diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index fffafa86d964e..320fd1820693e 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include "dw-axi-dmac.h" #include "../dmaengine.h" @@ -369,8 +371,7 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, if (status == DMA_COMPLETE || !txstate) return status; - spin_lock_irqsave(&chan->vc.lock, flags); - + vchan_lock_irqsave(&chan->vc,flags); vdesc = vchan_find_desc(&chan->vc, cookie); if (vdesc) { length = vd_to_axi_desc(vdesc)->length; @@ -380,7 +381,7 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, bytes = length - completed_length; } - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_unlock_irqrestore(&chan->vc,flags); dma_set_residue(txstate, bytes); return status; @@ -480,6 +481,11 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, axi_chan_enable(chan); } +static inline bool dw_axi_dma_oob_capable(void) +{ + return IS_ENABLED(CONFIG_DW_AXI_DMAC_OOB); +} + static void axi_chan_start_first_queued(struct axi_dma_chan *chan) { struct axi_dma_desc *desc; @@ -492,7 +498,11 @@ static void axi_chan_start_first_queued(struct axi_dma_chan *chan) desc = vd_to_axi_desc(vd); dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan), vd->tx.cookie); - axi_chan_block_xfer_start(chan, desc); + // axi_chan_block_xfer_start(chan, desc); + //filter the desc:ib context OR non-oob desc + if(!dw_axi_dma_oob_capable() || !vchan_oob_pulsed(vd)) + axi_chan_block_xfer_start(chan, desc); + } static void dma_chan_issue_pending(struct dma_chan *dchan) @@ -500,11 +510,40 @@ static void dma_chan_issue_pending(struct dma_chan *dchan) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); unsigned long flags; - spin_lock_irqsave(&chan->vc.lock, flags); - if (vchan_issue_pending(&chan->vc)) + vchan_lock_irqsave(&chan->vc,flags); + if (vchan_issue_pending_mix(&chan->vc)) axi_chan_start_first_queued(chan); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_unlock_irqrestore(&chan->vc,flags); +} + +#ifdef CONFIG_DW_AXI_DMAC_OOB +static int dw_axi_dma_pulse_oob(struct dma_chan *dchan) +{ + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); + struct virt_dma_desc *vd; + struct axi_dma_desc *desc; + unsigned long flags; + int ret = -EIO; + + pr_info("pulse oob called\n"); + vchan_lock_irqsave(&chan->vc, flags); + vd = vchan_next_desc(&chan->vc); + desc = (vd==NULL)?(NULL):(vd_to_axi_desc(vd)); + if (desc && vchan_oob_pulsed(&desc->vd)) { + pr_info("pulse:1\n"); + axi_chan_block_xfer_start(chan,desc); + ret = 0; + } + vchan_unlock_irqrestore(&chan->vc, flags); + + return ret; +} +#else +static int dw_axi_dma_pulse_oob(struct dma_chan *dchan) +{ + return -ENOTSUPP; } +#endif static void dw_axi_dma_synchronize(struct dma_chan *dchan) { @@ -779,6 +818,22 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, u64 llp = 0; u8 lms = 0; /* Select AXI0 master for LLI fetching */ + //check flags + if(!dw_axi_dma_oob_capable()) { + if (flags & DMA_OOB_INTERRUPT) { + dev_err(dchan2dev(dchan), + "%s: out-of-band cyclic transfers disabled\n", + __func__); + return NULL; + } + } else if(flags & DMA_OOB_PULSE) { + dev_err(dchan2dev(dchan), + "%s: no pulse mode with out-of-band cyclic transfers\n", + __func__); + return NULL; + } + + num_periods = buf_len / period_len; axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction); @@ -790,6 +845,11 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr, total_segments = num_periods * num_segments; + pr_info("prep_cyclic: buf_len=%zu, period_len=%zu\n",buf_len, period_len); + pr_info("prep_cyclic: num_periods=%u, axi_block_len=%zu, num_segments=%u, segment_len=%u, total_segments=%u\n", + num_periods, axi_block_len, num_segments, segment_len, total_segments); + + desc = axi_desc_alloc(total_segments); if (unlikely(!desc)) goto err_desc_get; @@ -857,6 +917,15 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, u64 llp = 0; u8 lms = 0; /* Select AXI0 master for LLI fetching */ + if(!dw_axi_dma_oob_capable()) { + if(flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) { + dev_err(dchan2dev(dchan), + "%s: out-of-band slave transfers disabled\n", + __func__); + return NULL; + } + } + if (unlikely(!is_slave_direction(direction) || !sg_len)) return NULL; @@ -1063,8 +1132,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) struct virt_dma_desc *vd; unsigned long flags; - spin_lock_irqsave(&chan->vc.lock, flags); - + vchan_lock_irqsave(&chan->vc, flags); axi_chan_disable(chan); /* The bad descriptor currently is in the head of vc list */ @@ -1089,20 +1157,92 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) axi_chan_start_first_queued(chan); out: - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_unlock_irqrestore(&chan->vc, flags); } -static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) +static bool axi_chan_block_xfer_complete(struct axi_dma_chan *chan) { int count = atomic_read(&chan->descs_allocated); - struct axi_dma_hw_desc *hw_desc; + struct axi_dma_hw_desc *hw_desc;// struct axi_dma_desc *desc; struct virt_dma_desc *vd; unsigned long flags; u64 llp; int i; + struct dmaengine_desc_callback cb; + bool ret = true; + + vchan_lock_irqsave(&chan->vc, flags); + // pr_info("AXI_DMA:descs_allocated =%d\n",count); + if(running_oob()) {//oob + if (unlikely(axi_chan_is_hw_enable(chan))) { + ret = false;//caught bug,no operation in oob,forward to inband + } + vd = vchan_next_desc(&chan->vc); + if (!vd) { + ret = false; + goto out;//caught bug,no operation in oob,forward to inband + } + if(!vchan_oob_handled(vd)) { + ret = false; + goto out; + } - spin_lock_irqsave(&chan->vc.lock, flags); + if (chan->cyclic) { + desc = vd_to_axi_desc(vd); + if (desc) { + llp = lo_hi_readq(chan->chan_regs + CH_LLP); + for (i = 0; i < count; i++) { + hw_desc = &desc->hw_desc[i]; + if (hw_desc->llp == llp) { + axi_chan_irq_clear(chan, hw_desc->lli->status_lo); + hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID; + desc->completed_blocks = i; + + if (((hw_desc->len * (i + 1)) % desc->period_len) == 0) { + //1 cyclic period is over,time to call callback + dmaengine_desc_get_callback(&vd->tx,&cb); + if(dmaengine_desc_callback_valid(&cb)) { + vchan_unlock_irqrestore(&chan->vc, flags); + dmaengine_desc_callback_invoke(&cb,NULL); + vchan_lock_irqsave(&chan->vc, flags); + } + } + break; + } + } + axi_chan_enable(chan); + ret = true; + goto out; + } + } else { + /* Remove the completed descriptor from issued list before completing */ + list_del(&vd->node); + //complete the desc cookie manually + dma_cookie_complete(&vd->tx); + list_add_tail(&vd->node, &(chan->vc.desc_completed)); + //vd is in completed list right now + dmaengine_desc_get_callback(&vd->tx,&cb);//get callback + if(dmaengine_desc_callback_valid(&cb)) { + vchan_unlock_irqrestore(&chan->vc, flags); + dmaengine_desc_callback_invoke(&cb,NULL); + vchan_lock_irqsave(&chan->vc, flags); + } + //free the vd in completed list + list_del(&vd->node); + vchan_vdesc_fini(vd); + //if there is vd,continue to execute + vd = vchan_next_desc(&chan->vc); + desc = (vd==NULL)?(NULL):(vd_to_axi_desc(vd)); + if(vd) { + axi_chan_block_xfer_start(chan,desc); + } + ret = true; + goto out; + } + } + + //ib:we don't care about ret in ib context,for we won't forward the irq again if (unlikely(axi_chan_is_hw_enable(chan))) { dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n", axi_chan_name(chan)); @@ -1140,10 +1280,13 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) /* Remove the completed descriptor from issued list before completing */ list_del(&vd->node); vchan_cookie_complete(vd); + /* Submit queued descriptors after processing the completed ones */ + axi_chan_start_first_queued(chan); } out: - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_unlock_irqrestore(&chan->vc, flags); + return ret; } static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) @@ -1153,29 +1296,64 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) struct axi_dma_chan *chan; u32 status, i; + irqreturn_t ret = IRQ_HANDLED; /* Disable DMAC interrupts. We'll enable them after processing channels */ axi_dma_irq_disable(chip); + // if(dw_axi_dma_oob_capable()){ + // pr_info("AXI_DMA:oob capable\n"); + // } else { + // pr_info("AXI_DMA:oob disable\n"); + // } + // if(running_oob()) { + // pr_info("AXI_DMA:oob\n"); + // } else { + // pr_info("AXI_DMA:ib\n"); + // } + /* Poll, clear and process every channel interrupt status */ for (i = 0; i < dw->hdata->nr_channels; i++) { chan = &dw->chan[i]; status = axi_chan_irq_read(chan); - axi_chan_irq_clear(chan, status); - - dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n", - axi_chan_name(chan), i, status); - if (status & DWAXIDMAC_IRQ_ALL_ERR) - axi_chan_handle_err(chan, status); - else if (status & DWAXIDMAC_IRQ_DMA_TRF) - axi_chan_block_xfer_complete(chan); + if(dw_axi_dma_oob_capable() && running_oob()) { + // pr_info("AXI_DMA:1\n"); + if(status & DWAXIDMAC_IRQ_ALL_ERR) { + // pr_info("AXI_DMA:2\n"); + ret = IRQ_FORWARD; + } else if(status & DWAXIDMAC_IRQ_DMA_TRF) { + // pr_info("AXI_DMA:3\n"); + if(!axi_chan_block_xfer_complete(chan)) { + ret = IRQ_FORWARD; + // pr_info("AXI_DMA:4\n"); + } else {//only clear irq when process success + axi_chan_irq_clear(chan, status); + // pr_info("AXI_DMA:5\n"); + } + } + } else { + // pr_info("AXI_DMA:11\n"); + axi_chan_irq_clear(chan, status); + dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n", + axi_chan_name(chan), i, status); + + if (status & DWAXIDMAC_IRQ_ALL_ERR) { + axi_chan_handle_err(chan, status); + // pr_info("AXI_DMA:12\n"); + } + + else if (status & DWAXIDMAC_IRQ_DMA_TRF) { + axi_chan_block_xfer_complete(chan); + // pr_info("AXI_DMA:13\n"); + } + + } } /* Re-enable interrupts */ axi_dma_irq_enable(chip); - - return IRQ_HANDLED; + return ret; } static int dma_chan_terminate_all(struct dma_chan *dchan) @@ -1188,7 +1366,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) LIST_HEAD(head); axi_chan_disable(chan); - + pr_info("dma_chan_terminate_all is called\n"); ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, !(val & chan_active), 1000, 50000); if (ret == -ETIMEDOUT) @@ -1200,13 +1378,11 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) if (chan->direction == DMA_MEM_TO_DEV) dw_axi_dma_set_byte_halfword(chan, false); - spin_lock_irqsave(&chan->vc.lock, flags); - + vchan_lock_irqsave(&chan->vc, flags); vchan_get_all_descriptors(&chan->vc, &head); chan->cyclic = false; - spin_unlock_irqrestore(&chan->vc.lock, flags); - + vchan_unlock_irqrestore(&chan->vc, flags); vchan_dma_desc_free_list(&chan->vc, &head); dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); @@ -1221,7 +1397,7 @@ static int dma_chan_pause(struct dma_chan *dchan) unsigned int timeout = 20; /* timeout iterations */ u64 val; - spin_lock_irqsave(&chan->vc.lock, flags); + vchan_lock_irqsave(&chan->vc, flags); if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG); @@ -1260,8 +1436,7 @@ static int dma_chan_pause(struct dma_chan *dchan) chan->is_paused = true; - spin_unlock_irqrestore(&chan->vc.lock, flags); - + vchan_unlock_irqrestore(&chan->vc, flags); return timeout ? 0 : -EAGAIN; } @@ -1304,12 +1479,12 @@ static int dma_chan_resume(struct dma_chan *dchan) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); unsigned long flags; - spin_lock_irqsave(&chan->vc.lock, flags); + vchan_lock_irqsave(&chan->vc,flags); if (chan->is_paused) axi_chan_resume(chan); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_unlock_irqrestore(&chan->vc,flags); return 0; } @@ -1452,8 +1627,9 @@ static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip) chip->irq[i] = platform_get_irq(pdev, i); if (chip->irq[i] < 0) return chip->irq[i]; - ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt, - IRQF_SHARED, KBUILD_MODNAME, chip); + // ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt, + // IRQF_SHARED, KBUILD_MODNAME, chip); + ret = request_irq(chip->irq[i], dw_axi_dma_interrupt,IRQF_SHARED | IRQF_OOB, KBUILD_MODNAME, chip); if (ret < 0) return ret; } @@ -1532,6 +1708,7 @@ static int dw_probe(struct platform_device *pdev) return ret; INIT_LIST_HEAD(&dw->dma.channels); + dma_cap_set(DMA_OOB,dw->dma.cap_mask); for (i = 0; i < hdata->nr_channels; i++) { struct axi_dma_chan *chan = &dw->chan[i]; @@ -1572,6 +1749,7 @@ static int dw_probe(struct platform_device *pdev) dw->dma.device_config = dw_axi_dma_chan_slave_config; dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg; dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic; + dw->dma.device_pulse_oob = dw_axi_dma_pulse_oob; /* * Synopsis DesignWare AxiDMA datasheet mentioned Maximum @@ -1611,7 +1789,9 @@ static int dw_probe(struct platform_device *pdev) dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n", dw->hdata->nr_channels); - + if(dw_axi_dma_oob_capable()) { + dev_info(chip->dev,"this driver is oob capable"); + } return 0; err_pm_disable: diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7961172a780dd..cc2c77c5b7758 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -36,6 +36,28 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) } EXPORT_SYMBOL_GPL(vchan_tx_submit); +dma_cookie_t vchan_tx_submit_mix(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + vchan_lock_irqsave(vc, flags); + cookie = dma_cookie_assign(tx); + if(tx->flags & DMA_OOB_PULSE) + list_move_tail(&vd->node, &vc->desc_submitted_oob); + else + list_move_tail(&vd->node, &vc->desc_submitted); + vchan_unlock_irqrestore(vc, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit_mix); + /** * vchan_tx_desc_free - free a reusable descriptor * @tx: the transfer @@ -127,6 +149,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) spin_lock_init(&vc->lock); INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_submitted_oob); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); INIT_LIST_HEAD(&vc->desc_terminated); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 59d9eabc8b674..0dc1c989f4daa 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,6 +29,7 @@ struct virt_dma_chan { /* protected by vc.lock */ struct list_head desc_allocated; struct list_head desc_submitted; + struct list_head desc_submitted_oob; struct list_head desc_issued; struct list_head desc_completed; struct list_head desc_terminated; @@ -45,6 +46,7 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); +extern dma_cookie_t vchan_tx_submit_mix(struct dma_async_tx_descriptor *); extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); /** @@ -60,7 +62,7 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; - vd->tx.tx_submit = vchan_tx_submit; + vd->tx.tx_submit = vchan_tx_submit_mix; vd->tx.desc_free = vchan_tx_desc_free; vd->tx_result.result = DMA_TRANS_NOERROR; @@ -73,6 +75,37 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan return &vd->tx; } +/** + * vchan_issue_pending_mix - move submitted ib and oob descriptors to issued list + * @vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending_mix(struct virt_dma_chan *vc) +{ + lockdep_assert_held(&vc->lock); + struct virt_dma_desc *vd; + bool oob_found = false; + //insert oob descs + if(!list_empty(&vc->desc_submitted_oob)) { + list_for_each_entry_reverse(vd, &vc->desc_issued, node) { + if (vchan_oob_pulsed(vd)) { + // 把 desc_submitted_oob 插到这个 OOB 描述符的后面 + oob_found = true; + list_splice_init(&vc->desc_submitted_oob, &vd->node); + break; + } + } + if(!oob_found) + list_splice_init(&vc->desc_submitted_oob, &vc->desc_issued); + } + //add ib descs to tail + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + + + return !list_empty(&vc->desc_issued); +} + /** * vchan_issue_pending - move submitted descriptors to issued list * @vc: virtual channel to update @@ -189,6 +222,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_submitted_oob, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); list_splice_tail_init(&vc->desc_terminated, head); diff --git a/llvm.sh b/llvm.sh new file mode 100755 index 0000000000000..4f9f0a098ff24 --- /dev/null +++ b/llvm.sh @@ -0,0 +1,232 @@ +#!/bin/bash +################################################################################ +# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +# See https://llvm.org/LICENSE.txt for license information. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +################################################################################ +# +# This script will install the llvm toolchain on the different +# Debian and Ubuntu versions + +set -eux + +usage() { + set +x + echo "Usage: $0 [llvm_major_version] [all] [OPTIONS]" 1>&2 + echo -e "all\t\t\tInstall all packages." 1>&2 + echo -e "-n=code_name\t\tSpecifies the distro codename, for example bionic" 1>&2 + echo -e "-h\t\t\tPrints this help." 1>&2 + echo -e "-m=repo_base_url\tSpecifies the base URL from which to download." 1>&2 + exit 1; +} + +CURRENT_LLVM_STABLE=19 +BASE_URL="http://apt.llvm.org" + +NEW_DEBIAN_DISTROS=("trixie" "unstable") +# Set default values for commandline arguments +# We default to the current stable branch of LLVM +LLVM_VERSION=$CURRENT_LLVM_STABLE +ALL=0 +DISTRO=$(lsb_release -is) +VERSION_CODENAME=$(lsb_release -cs) +VERSION=$(lsb_release -sr) +UBUNTU_CODENAME="" +CODENAME_FROM_ARGUMENTS="" +# Obtain VERSION_CODENAME and UBUNTU_CODENAME (for Ubuntu and its derivatives) +source /etc/os-release +DISTRO=${DISTRO,,} + +# Check for required tools + +# Check if this is a new Debian distro +is_new_debian=0 +if [[ "${DISTRO}" == "debian" ]]; then + for new_distro in "${NEW_DEBIAN_DISTROS[@]}"; do + if [[ "${VERSION_CODENAME}" == "${new_distro}" ]]; then + is_new_debian=1 + break + fi + done +fi + +# Check for required tools +needed_binaries=(lsb_release wget gpg) +# add-apt-repository is not needed for newer Debian distros +if [[ $is_new_debian -eq 0 ]]; then + needed_binaries+=(add-apt-repository) +fi + +missing_binaries=() +using_curl= +for binary in "${needed_binaries[@]}"; do + if ! command -v $binary &>/dev/null ; then + if [[ "$binary" == "wget" ]] && command -v curl &>/dev/null; then + using_curl=1 + continue + fi + missing_binaries+=($binary) + fi +done + +if [[ ${#missing_binaries[@]} -gt 0 ]] ; then + echo "You are missing some tools this script requires: ${missing_binaries[@]}" + echo "(hint: apt install lsb-release wget software-properties-common gnupg)" + echo "curl is also supported" + exit 4 +fi + +case ${DISTRO} in + debian) + # Debian Trixie has a workaround because of + # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1038383 + if [[ "${VERSION}" == "unstable" ]] || [[ "${VERSION}" == "testing" ]] || [[ "${VERSION_CODENAME}" == "trixie" ]]; then + CODENAME=unstable + LINKNAME= + else + # "stable" Debian release + CODENAME=${VERSION_CODENAME} + LINKNAME=-${CODENAME} + fi + ;; + *) + # ubuntu and its derivatives + if [[ -n "${UBUNTU_CODENAME}" ]]; then + CODENAME=${UBUNTU_CODENAME} + if [[ -n "${CODENAME}" ]]; then + LINKNAME=-${CODENAME} + fi + fi + ;; +esac + +# read optional command line arguments +if [ "$#" -ge 1 ] && [ "${1::1}" != "-" ]; then + if [ "$1" != "all" ]; then + LLVM_VERSION=$1 + else + # special case for ./llvm.sh all + ALL=1 + fi + OPTIND=2 + if [ "$#" -ge 2 ]; then + if [ "$2" == "all" ]; then + # Install all packages + ALL=1 + OPTIND=3 + fi + fi +fi + +while getopts ":hm:n:" arg; do + case $arg in + h) + usage + ;; + m) + BASE_URL=${OPTARG} + ;; + n) + CODENAME=${OPTARG} + if [[ "${CODENAME}" == "unstable" ]]; then + # link name does not apply to unstable repository + LINKNAME= + else + LINKNAME=-${CODENAME} + fi + CODENAME_FROM_ARGUMENTS="true" + ;; + esac +done + +if [[ $EUID -ne 0 ]]; then + echo "This script must be run as root!" + exit 1 +fi + +declare -A LLVM_VERSION_PATTERNS +LLVM_VERSION_PATTERNS[9]="-9" +LLVM_VERSION_PATTERNS[10]="-10" +LLVM_VERSION_PATTERNS[11]="-11" +LLVM_VERSION_PATTERNS[12]="-12" +LLVM_VERSION_PATTERNS[13]="-13" +LLVM_VERSION_PATTERNS[14]="-14" +LLVM_VERSION_PATTERNS[15]="-15" +LLVM_VERSION_PATTERNS[16]="-16" +LLVM_VERSION_PATTERNS[17]="-17" +LLVM_VERSION_PATTERNS[18]="-18" +LLVM_VERSION_PATTERNS[19]="-19" +LLVM_VERSION_PATTERNS[20]="-20" +LLVM_VERSION_PATTERNS[21]="" + +if [ ! ${LLVM_VERSION_PATTERNS[$LLVM_VERSION]+_} ]; then + echo "This script does not support LLVM version $LLVM_VERSION" + exit 3 +fi + +LLVM_VERSION_STRING=${LLVM_VERSION_PATTERNS[$LLVM_VERSION]} + +# join the repository name +if [[ -n "${CODENAME}" ]]; then + REPO_NAME="deb ${BASE_URL}/${CODENAME}/ llvm-toolchain${LINKNAME}${LLVM_VERSION_STRING} main" + # check if the repository exists for the distro and version + if ! wget -q --method=HEAD ${BASE_URL}/${CODENAME} &> /dev/null && \ + ! curl -sSLI -XHEAD ${BASE_URL}/${CODENAME} &> /dev/null; then + if [[ -n "${CODENAME_FROM_ARGUMENTS}" ]]; then + echo "Specified codename '${CODENAME}' is not supported by this script." + else + echo "Distribution '${DISTRO}' in version '${VERSION}' is not supported by this script." + fi + exit 2 + fi +fi + + +# install everything + +if [[ ! -f /etc/apt/trusted.gpg.d/apt.llvm.org.asc ]]; then + # download GPG key once + if [[ -z "$using_curl" ]]; then + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + else + curl -sSL https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + fi +fi + +if [[ -z "`apt-key list 2> /dev/null | grep -i llvm`" ]]; then + # Delete the key in the old format + apt-key del AF4F7421 || true +fi + + +# Add repository based on distribution +if [[ "${VERSION_CODENAME}" == "bookworm" ]]; then + # add it twice to workaround: + # https://github.com/llvm/llvm-project/issues/62475 + add-apt-repository -y "${REPO_NAME}" + add-apt-repository -y "${REPO_NAME}" +elif [[ $is_new_debian -eq 1 ]]; then + # workaround missing add-apt-repository in newer Debian and use new source.list format + SOURCES_FILE="/etc/apt/sources.list.d/http_apt_llvm_org_${CODENAME}_-${VERSION_CODENAME}.sources" + TEXT_TO_ADD="Types: deb +Architectures: amd64 arm64 +Signed-By: /etc/apt/trusted.gpg.d/apt.llvm.org.asc +URIs: ${BASE_URL}/${CODENAME}/ +Suites: llvm-toolchain${LINKNAME}${LLVM_VERSION_STRING} +Components: main" + echo "$TEXT_TO_ADD" | tee -a "$SOURCES_FILE" > /dev/null +else + add-apt-repository -y "${REPO_NAME}" +fi + +apt-get update +PKG="clang-$LLVM_VERSION lldb-$LLVM_VERSION lld-$LLVM_VERSION clangd-$LLVM_VERSION" +if [[ $ALL -eq 1 ]]; then + # same as in test-install.sh + # No worries if we have dups + PKG="$PKG clang-tidy-$LLVM_VERSION clang-format-$LLVM_VERSION clang-tools-$LLVM_VERSION llvm-$LLVM_VERSION-dev lld-$LLVM_VERSION lldb-$LLVM_VERSION llvm-$LLVM_VERSION-tools libomp-$LLVM_VERSION-dev libc++-$LLVM_VERSION-dev libc++abi-$LLVM_VERSION-dev libclang-common-$LLVM_VERSION-dev libclang-$LLVM_VERSION-dev libclang-cpp$LLVM_VERSION-dev liblldb-$LLVM_VERSION-dev libunwind-$LLVM_VERSION-dev" + if test $LLVM_VERSION -gt 14; then + PKG="$PKG libclang-rt-$LLVM_VERSION-dev libpolly-$LLVM_VERSION-dev" + fi +fi +apt-get install -y $PKG diff --git a/my_driver/Makefile b/my_driver/Makefile new file mode 100644 index 0000000000000..713d6bc9ab2ed --- /dev/null +++ b/my_driver/Makefile @@ -0,0 +1,12 @@ +obj-m := user_dma.o +KDIR := /home/han/Desktop/evl_riscv/linux +PWD := $(shell pwd) + +CROSS_COMPILE := riscv64-linux-gnu- +ARCH := riscv + +all: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) clean diff --git a/my_driver/update.sh b/my_driver/update.sh new file mode 100755 index 0000000000000..a4d70973e07f0 --- /dev/null +++ b/my_driver/update.sh @@ -0,0 +1,3 @@ +make && \ +md5sum user_dma.ko && \ +scp user_dma.ko root@192.168.138.2:/root diff --git a/my_driver/user_dma.c b/my_driver/user_dma.c new file mode 100644 index 0000000000000..2d8947ce211ca --- /dev/null +++ b/my_driver/user_dma.c @@ -0,0 +1,1631 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +extern unsigned long loops_per_jiffy; +//mocking PL022 spi driver + +#define GEN_MASK_BITS(val, mask, sb) \ + (((val)<<(sb)) & (mask)) + +/* + * Macros to access SSP Registers with their offsets + */ +#define SSP_CR0(r) (r + 0x000) +#define SSP_CR1(r) (r + 0x004) +#define SSP_DR(r) (r + 0x008) +#define SSP_SR(r) (r + 0x00C) +#define SSP_CPSR(r) (r + 0x010) +#define SSP_IMSC(r) (r + 0x014) +#define SSP_ICR(r) (r + 0x020) +#define SSP_DMACR(r) (r + 0x024) + +//spi control:masks and flag + +//cr0 +#define SSP_CR0_MASK_DSS (0x0FUL << 0) +#define SSP_CR0_MASK_FRF (0x3UL << 4) +#define SSP_CR0_MASK_SPO (0x1UL << 6) +#define SSP_CR0_MASK_SPH (0x1UL << 7) +#define SSP_CR0_MASK_SCR (0xFFUL << 8) + +//cr1 +#define SSP_CR1_MASK_LBM (0x1UL << 0) +#define SSP_CR1_MASK_SSE (0x1UL << 1) +#define SSP_CR1_MASK_MS (0x1UL << 2) +#define SSP_CR1_MASK_SOD (0x1UL << 3) + +#define SSP_DISABLED (0) +#define SSP_ENABLED (1) +#define DRIVE_TX 0 +#define DO_NOT_DRIVE_TX 1 + +//dmacr +#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)/* Receive DMA Enable bit */ +#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)/* Transmit DMA Enable bit */ + +#define SSP_DMA_ENABLED (1) + +//sr +#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ +#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ + +//cpsr +#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) + +#define SSP_DEFAULT_PRESCALE 0x2 +#define SSP_DEFAULT_CLKRATE 0x3d + +//imsc +#define DEFAULT_SSP_REG_IMSC 0x0UL + +#define DEFAULT_SSP_REG_CR0 ( \ + GEN_MASK_BITS(SSP_DATA_BITS_8, SSP_CR0_MASK_DSS, 0) | \ + GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ + GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ + GEN_MASK_BITS(SSP_CLK_FIRST_EDGE, SSP_CR0_MASK_SPH, 7) | \ + GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ +) + +#define DEFAULT_SSP_REG_CR1 ( \ + GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ + GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ + GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ + GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ +) + +#define DEFAULT_SSP_REG_DMACR (\ + GEN_MASK_BITS(SSP_DMA_ENABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ + GEN_MASK_BITS(SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1) \ +) + +#define DEFAULT_SSP_REG_CPSR ( \ + GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ +) + +#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC +#define CLEAR_ALL_INTERRUPTS 0x3 + +//pl022 functions +static void flush_spi_fifo(void __iomem *spi_base_addr_virt) +{ + unsigned long limit = loops_per_jiffy << 1; + + do { + while (readw(SSP_SR(spi_base_addr_virt)) & SSP_SR_MASK_RNE) + readw(SSP_DR(spi_base_addr_virt)); + } while ((readw(SSP_SR(spi_base_addr_virt)) & SSP_SR_MASK_BSY) && limit--); + + return; +} +static void spi_enable(void __iomem *spi_base_addr_virt) +{ + //enable spi + writew((readw(SSP_CR1(spi_base_addr_virt)) | SSP_CR1_MASK_SSE), + SSP_CR1(spi_base_addr_virt)); +} +static void spi_dma_config(void __iomem *spi_base_addr_virt) +{ + //1-the DMA burstsize should equal the FIFO trigger levels + //2-the addr width should be the same + //3-config fifo trigger level:the pl022 on vf2 will trigger interrupt when fifo is not empty + + writew(DEFAULT_SSP_REG_CR0, SSP_CR0(spi_base_addr_virt)); + writew(DEFAULT_SSP_REG_CR1, SSP_CR1(spi_base_addr_virt)); + writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(spi_base_addr_virt));//enable fifo for dma + writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(spi_base_addr_virt)); + writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(spi_base_addr_virt));//Disable interrupts in DMA mode, IRQ from DMA controller + writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(spi_base_addr_virt)); + + flush_spi_fifo(spi_base_addr_virt); + pr_info("\nspi fifo flushed\n"); +} + +static void print_spi_regs(void __iomem *spi_base_addr_virt) +{ + u32 read_cr0; + u16 read_cr1, read_dmacr, read_sr, read_cpsr ,read_imsc; + + read_cr0 = readw(SSP_CR0(spi_base_addr_virt)); + read_cr1 = readw(SSP_CR1(spi_base_addr_virt)); + read_dmacr = readw(SSP_DMACR(spi_base_addr_virt)); + read_sr = readw(SSP_SR(spi_base_addr_virt)); + read_cpsr = readw(SSP_CPSR(spi_base_addr_virt)); + read_imsc = readw(SSP_IMSC(spi_base_addr_virt)); + + pr_info("SPI CR0=0x%x\n",read_cr0); + + pr_info("SPI CR1=0x%x, SPI enable=%d\n", + read_cr1, + !!(read_cr1 & SSP_CR1_MASK_SSE)); + + pr_info("SPI SR=0x%x\n",read_sr); + + pr_info("SPI CPSR=0x%x\n",read_cpsr); + + pr_info("SPI IMSC=0x%x\n",read_imsc); + + pr_info("SPI DMACR=0x%x, SPI DMA rx dmaenable=%d, SPI DMA tx dmaenable=%d\n", + read_dmacr, + !!(read_dmacr & SSP_DMACR_MASK_RXDMAE), + !!(read_dmacr & SSP_DMACR_MASK_TXDMAE)); + return; +} + +//end of PL022 content + + +//start of pwmdac + +#define JH7110_PWMDAC_WDATA 0x00 +#define JH7110_PWMDAC_CTRL 0x04 +#define JH7110_PWMDAC_ENABLE BIT(0) +#define JH7110_PWMDAC_SHIFT BIT(1) +#define JH7110_PWMDAC_DUTY_CYCLE_SHIFT 2 +#define JH7110_PWMDAC_DUTY_CYCLE_MASK GENMASK(3, 2) +#define JH7110_PWMDAC_CNT_N_SHIFT 4 +#define JH7110_PWMDAC_CNT_N_MASK GENMASK(12, 4) +#define JH7110_PWMDAC_DATA_CHANGE BIT(13) +#define JH7110_PWMDAC_DATA_MODE BIT(14) +#define JH7110_PWMDAC_DATA_SHIFT_SHIFT 15 +#define JH7110_PWMDAC_DATA_SHIFT_MASK GENMASK(17, 15) + +#define PWMDAC_CLKRATE 12288000 + +enum JH7110_PWMDAC_SHIFT_VAL { + PWMDAC_SHIFT_8 = 0, + PWMDAC_SHIFT_10, +}; + +enum JH7110_PWMDAC_DUTY_CYCLE_VAL { + PWMDAC_CYCLE_LEFT = 0, + PWMDAC_CYCLE_RIGHT, + PWMDAC_CYCLE_CENTER, +}; + +enum JH7110_PWMDAC_CNT_N_VAL { + PWMDAC_SAMPLE_CNT_1 = 1, + PWMDAC_SAMPLE_CNT_2, + PWMDAC_SAMPLE_CNT_3, + PWMDAC_SAMPLE_CNT_512 = 512, /* max */ +}; + +enum JH7110_PWMDAC_DATA_CHANGE_VAL { + NO_CHANGE = 0, + CHANGE, +}; + +enum JH7110_PWMDAC_DATA_MODE_VAL { + UNSIGNED_DATA = 0, + INVERTER_DATA_MSB, +}; + +enum JH7110_PWMDAC_DATA_SHIFT_VAL { + PWMDAC_DATA_LEFT_SHIFT_BIT_0 = 0, + PWMDAC_DATA_LEFT_SHIFT_BIT_1, + PWMDAC_DATA_LEFT_SHIFT_BIT_2, + PWMDAC_DATA_LEFT_SHIFT_BIT_3, + PWMDAC_DATA_LEFT_SHIFT_BIT_4, + PWMDAC_DATA_LEFT_SHIFT_BIT_5, + PWMDAC_DATA_LEFT_SHIFT_BIT_6, + PWMDAC_DATA_LEFT_SHIFT_BIT_7, +}; + +struct jh7110_pwmdac_cfg { + enum JH7110_PWMDAC_SHIFT_VAL shift; + enum JH7110_PWMDAC_DUTY_CYCLE_VAL duty_cycle; + u16 cnt_n; + enum JH7110_PWMDAC_DATA_CHANGE_VAL data_change; + enum JH7110_PWMDAC_DATA_MODE_VAL data_mode; + enum JH7110_PWMDAC_DATA_SHIFT_VAL data_shift; +}; + + +struct jh7110_pwmdac_dev { + void __iomem *base; + resource_size_t mapbase; + struct jh7110_pwmdac_cfg cfg; + + struct clk_bulk_data clks[2]; + struct reset_control *rst_apb; + struct device *dev; + struct snd_dmaengine_dai_dma_data play_dma_data; + u32 saved_ctrl; +}; + + +static void jh7110_pwmdac_dump_cfg(struct jh7110_pwmdac_dev *pwmdac) +{ + struct jh7110_pwmdac_cfg *cfg = &pwmdac->cfg; + + if (!pwmdac || !pwmdac->dev) + return; + + dev_info(pwmdac->dev, "----- JH7110 PWMDAC CFG DUMP -----\n"); + dev_info(pwmdac->dev, "shift : %d\n", cfg->shift); + dev_info(pwmdac->dev, "duty_cycle : %d\n", cfg->duty_cycle); + dev_info(pwmdac->dev, "cnt_n : %u\n", cfg->cnt_n); + dev_info(pwmdac->dev, "data_change : %d\n", cfg->data_change); + dev_info(pwmdac->dev, "data_mode : %d\n", cfg->data_mode); + dev_info(pwmdac->dev, "data_shift : %d\n", cfg->data_shift); + dev_info(pwmdac->dev, "----------------------------------\n"); +} +static inline void jh7110_pwmdac_write_reg(void __iomem *io_base, int reg, u32 val) +{ + writel(val, io_base + reg); +} + +static inline u32 jh7110_pwmdac_read_reg(void __iomem *io_base, int reg) +{ + return readl(io_base + reg); +} + +static void dump_pwmdac_regs(void __iomem *base_addr_virt) +{ + u32 read_ctrl; + read_ctrl = jh7110_pwmdac_read_reg(base_addr_virt, JH7110_PWMDAC_CTRL); + pr_info("ctrl value=0x%x\n",read_ctrl); + return; +} + +static void jh7110_pwmdac_init_params(struct jh7110_pwmdac_dev *dev) +{ + dev->cfg.shift = PWMDAC_SHIFT_8; + dev->cfg.duty_cycle = PWMDAC_CYCLE_CENTER; + dev->cfg.cnt_n = PWMDAC_SAMPLE_CNT_1; + dev->cfg.data_change = NO_CHANGE; + dev->cfg.data_mode = INVERTER_DATA_MSB; + dev->cfg.data_shift = PWMDAC_DATA_LEFT_SHIFT_BIT_0; + + dev->play_dma_data.addr = dev->mapbase + JH7110_PWMDAC_WDATA; + dev->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dev->play_dma_data.fifo_size = 1; + dev->play_dma_data.maxburst = 16; +} + +static int jh7110_pwmdac_crg_enable(struct jh7110_pwmdac_dev *dev, bool enable) +{ + int ret; + + if (enable) { + ret = clk_bulk_prepare_enable(ARRAY_SIZE(dev->clks), dev->clks); + if (ret) + return dev_err_probe(dev->dev, ret, + "failed to enable pwmdac clocks\n"); + + ret = reset_control_deassert(dev->rst_apb); + if (ret) { + dev_err(dev->dev, "failed to deassert pwmdac apb reset\n"); + goto err_rst_apb; + } + } else { + clk_bulk_disable_unprepare(ARRAY_SIZE(dev->clks), dev->clks); + } + + return 0; + +err_rst_apb: + clk_bulk_disable_unprepare(ARRAY_SIZE(dev->clks), dev->clks); + + return ret; +} +static int jh7110_pwmdac_runtime_resume(struct device *dev) +{ + struct jh7110_pwmdac_dev *pwmdac = dev_get_drvdata(dev); + + return jh7110_pwmdac_crg_enable(pwmdac, true); +} + +static int jh7110_pwmdac_probe(struct platform_device *pdev) +{ + struct jh7110_pwmdac_dev *dev; + struct resource *res; + int ret; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(dev->base)) + return PTR_ERR(dev->base); + + dev->mapbase = res->start; + + dev->clks[0].id = "apb"; + dev->clks[1].id = "core"; + + ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(dev->clks), dev->clks); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to get pwmdac clocks\n"); + + dev->rst_apb = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(dev->rst_apb)) + return dev_err_probe(&pdev->dev, PTR_ERR(dev->rst_apb), + "failed to get pwmdac apb reset\n"); + + jh7110_pwmdac_init_params(dev); + + dev->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, dev); + + dev_info(&pdev->dev,"use this pwmdac to test dmac\n"); + pm_runtime_enable(dev->dev); + if (!pm_runtime_enabled(&pdev->dev)) { + ret = jh7110_pwmdac_runtime_resume(&pdev->dev); + if (ret) + goto err_pm_disable; + } + + return 0; + +err_pm_disable: + pm_runtime_disable(&pdev->dev); + + return ret; +} + + +static void jh7110_pwmdac_set_enable(struct jh7110_pwmdac_dev *dev, bool enable) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + if (enable) + value |= JH7110_PWMDAC_ENABLE; + else + value &= ~JH7110_PWMDAC_ENABLE; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_shift(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + if (dev->cfg.shift == PWMDAC_SHIFT_8) + value &= ~JH7110_PWMDAC_SHIFT; + else if (dev->cfg.shift == PWMDAC_SHIFT_10) + value |= JH7110_PWMDAC_SHIFT; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_duty_cycle(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + value &= ~JH7110_PWMDAC_DUTY_CYCLE_MASK; + value |= (dev->cfg.duty_cycle & 0x3) << JH7110_PWMDAC_DUTY_CYCLE_SHIFT; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_cnt_n(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + value &= ~JH7110_PWMDAC_CNT_N_MASK; + value |= ((dev->cfg.cnt_n - 1) & 0x1ff) << JH7110_PWMDAC_CNT_N_SHIFT; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_data_change(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + if (dev->cfg.data_change == NO_CHANGE) + value &= ~JH7110_PWMDAC_DATA_CHANGE; + else if (dev->cfg.data_change == CHANGE) + value |= JH7110_PWMDAC_DATA_CHANGE; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_data_mode(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + if (dev->cfg.data_mode == UNSIGNED_DATA) + value &= ~JH7110_PWMDAC_DATA_MODE; + else if (dev->cfg.data_mode == INVERTER_DATA_MSB) + value |= JH7110_PWMDAC_DATA_MODE; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} + +static void jh7110_pwmdac_set_data_shift(struct jh7110_pwmdac_dev *dev) +{ + u32 value; + + value = jh7110_pwmdac_read_reg(dev->base, JH7110_PWMDAC_CTRL); + value &= ~JH7110_PWMDAC_DATA_SHIFT_MASK; + value |= (dev->cfg.data_shift & 0x7) << JH7110_PWMDAC_DATA_SHIFT_SHIFT; + + jh7110_pwmdac_write_reg(dev->base, JH7110_PWMDAC_CTRL, value); +} +static void jh7110_pwmdac_set(struct jh7110_pwmdac_dev *dev) +{ + jh7110_pwmdac_set_shift(dev); + jh7110_pwmdac_set_duty_cycle(dev); + jh7110_pwmdac_set_cnt_n(dev); + jh7110_pwmdac_set_enable(dev, true); + + jh7110_pwmdac_set_data_change(dev); + jh7110_pwmdac_set_data_mode(dev); + jh7110_pwmdac_set_data_shift(dev); +} + +static void jh7110_pwmdac_stop(struct jh7110_pwmdac_dev *dev) +{ + jh7110_pwmdac_set_enable(dev, false); +} + +static void mocking_jh7110_pwmdac_hw_params(struct jh7110_pwmdac_dev *dev) +{ + int ret; + jh7110_pwmdac_set(dev); + ret = clk_set_rate(dev->clks[1].clk, PWMDAC_CLKRATE+64); + if (ret) + pr_err("failed to set rate %u for core clock\n",PWMDAC_CLKRATE+64); + return; +} + +//end of pwmdac + +#define DEVICE_NAME "user_dma" +#define BUF_LEN 4096*3 +#define DW_AXI_DMAC_NAME "dma-controller@16050000" +#define PL08_DMAC_NAME "dma-controller@16008000" + +#define SPI_PATH "/soc/spi@10060000" +#define PWMDAC_PATH "/soc/pwmdac@100b0000" + +#define PWMDAC_ADDR 0x100b0000 + +#define MIX_TEST_DESC_LEN 4 + +//USER_DMA_IOCTL___ +#define USER_DMA_IOCTL_MEM_CPY_IB _IOR('M', 1, int)//test inband memcpy by dw-axi-dmac +#define USER_DMA_IOCTL_SPI_TXRX_IB _IOR('M', 2, int)//test inband spi dma loopback +#define USER_DMA_IOCTL_DACcy_TX_IB _IOR('M', 3, int)//test inband dac cyclic dma-tx +#define USER_DMA_IOCTL_DACcy_TX_OOB _IOR('M', 4, int)//test oob dac cyclic dma tx +#define USER_DMA_IOCTL_DACsg_TX_IB _IOR('M', 5, int)//test inband dac dma sg tx +#define USER_DMA_IOCTL_DACsg_TX_OOB _IOR('M', 6, int)//test oob dac dma sg tx +#define USER_DMA_IOCTL_DACsg_TX_MIX _IOR('M', 7, int)//test oob dac dma sg tx +struct transfer_config { + //bus addr that dmac can use + dma_addr_t dma_buf_src; + dma_addr_t dma_buf_des; + //virt addr that driver can use + void *buf_src; + void *buf_des; +}; + +struct my_cb_param { + const char *log; + int submit_index; + int target_cnt; + int cnt; + struct completion done; // 用于同步的 completion + struct dma_chan *dchan; +}; + +struct spi_ctrl { + void __iomem *virt_addr; + dma_addr_t phy_addr; /* phy addr for dmac */ + struct clk *clk; + struct reset_control *rst; +}; + +//transfer info +// static struct transfer_config trans_config; +//char device +static dev_t dev_num; +static struct cdev user_dma_cdev; +static struct class *user_dma_class; +//dma channel +// static struct dma_chan *dw_dma_chan_tx;//memcpy test +// static struct dma_chan *dw_dma_chan_dev;//memcpy to dev test +// static struct dma_chan *pl_dma_chan_tx;//mem2dev +// static struct dma_chan *pl_dma_chan_rx;//dev2mem +//complete +// struct completion cmp_tx; +// struct completion cmp_rx; + +struct jh7110_pwmdac_dev *pwmdac; +struct platform_device *pdev_dac; + +//spi +static struct spi_ctrl spi; + +//callbacks +static void cb_ib(void *param) +{ + struct my_cb_param *params = param; + params->cnt = params->cnt + 1; + if(params->cnt == params->target_cnt) + complete(¶ms->done); + pr_info("--------------------\n"); + pr_info("%s\n",params->log); + pr_info("submit index = %d\n",params->submit_index); + pr_info("cnt = %d,target_cnt = %d\n",params->cnt,params->target_cnt); + pr_info("--------------------\n"); + return; +} + +static void cb_oob(void *param) +{ + struct my_cb_param *params = param; + params->cnt = params->cnt + 1; + if(params->cnt == params->target_cnt) + complete(¶ms->done); + if(running_oob()) { + pr_info("--------------------\n"); + pr_info("%s\n",params->log); + pr_info("running oob\n"); + pr_info("submit index = %d\n",params->submit_index); + pr_info("cnt = %d,target_cnt = %d\n",params->cnt,params->target_cnt); + pr_info("--------------------\n"); + } + return; +} + +static void fill_source_buffer(struct transfer_config *trans_config) +{ + int num = 0; + unsigned char *src_buf = trans_config->buf_src; + for(int i=0;ibuf_src; + unsigned char *des_buf = trans_config->buf_des; + for(int i = 0 ;iclks), pwmdac->clks); + if (ret) { + pr_err("clk enable error\n"); + return false; + } + ret = reset_control_deassert(pwmdac->rst_apb); + if (ret) { + pr_err("failed to deassert pwmdac apb reset\n"); + return false; + } + pr_info("pwmdac enabled\n"); + return true; +} +static void disable_dac(void) +{ + reset_control_assert(pwmdac->rst_apb); + + clk_bulk_disable_unprepare(ARRAY_SIZE(pwmdac->clks), pwmdac->clks); + + pr_info("pwmdac disabled\n"); +} +static bool fetch_dac_addr(void) +{ + struct device_node *np; + int ret; + /* 1. 找到 dac 节点,名字要和 DTS 一致 */ + np = of_find_node_by_path(PWMDAC_PATH); + if (!np) { + pr_err("cannot find pwmdac node\n"); + return false; + } + pdev_dac = of_find_device_by_node(np); + if (!pdev_dac) { + pr_err("cannot find platform_device for DAC\n"); + return false; + } + + ret = jh7110_pwmdac_probe(pdev_dac); + if(ret) { + pr_err("probe fails\n"); + return false; + } + pwmdac = dev_get_drvdata(&pdev_dac->dev); + jh7110_pwmdac_dump_cfg(pwmdac); + pr_info( "dac base (virt addr) : %p\n", pwmdac->base); + pr_info( "dac mapbase (phys) : %pa\n", &pwmdac->mapbase); + //enable the device + if(!enable_dac()){ + return false; + } + + of_node_put(np); + dump_pwmdac_regs(pwmdac->base); + return true; +} + +//filter dmac by device name,target for dw-axi-dmac +static bool my_dma_filter(struct dma_chan *chan, void *param) +{ + if (!chan || !chan->device) + return false; + + struct device *dev = chan->device->dev; + const char *target_name = param; + + if (!dev || !dev->of_node) + return false; + + const char *node_name = of_node_full_name(dev->of_node); + if (!node_name) { + return false; + } + + return strcmp(node_name, target_name) == 0; +} + +static bool allocate_trans_config(struct transfer_config *trans_config,struct device *dev) +{ + trans_config->buf_src = dma_alloc_coherent(dev, BUF_LEN, &(trans_config->dma_buf_src), GFP_KERNEL); + trans_config->buf_des = dma_alloc_coherent(dev, BUF_LEN, &(trans_config->dma_buf_des), GFP_KERNEL); + if(!trans_config->buf_src && !trans_config->buf_des) + { + pr_err("allocate DMAC reachable buffer error\n"); + return false; + } + pr_info("allocate DMAC reachable buffer success\n"); + return true; +} + +static void release_trans_config(struct transfer_config *trans_config,struct device *dev) +{ + dma_free_coherent(dev, BUF_LEN, trans_config->buf_src, trans_config->dma_buf_src); + dma_free_coherent(dev, BUF_LEN, trans_config->buf_des, trans_config->dma_buf_des); + return; +} + +//do_____test +static int do_dw_mem_cp_ib_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie_tx; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + dchan = dma_request_channel(mask, my_dma_filter, DW_AXI_DMAC_NAME); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw memcpy channel success: %s\n", dma_chan_name(dchan)); + } + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + fill_source_buffer(&trans_config); + memset(trans_config.buf_des,0,BUF_LEN); + + init_completion(¶ms.done); + params.log = "MEM:inband memcpy by dw-axi-dmac cb is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 1; + //prepare + tx = dmaengine_prep_dma_memcpy(dchan, + trans_config.dma_buf_src , + trans_config.dma_buf_des , + BUF_LEN, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT + ); + tx->callback = cb_ib; + tx->callback_param = ¶ms; + pr_info("dma prepare success\n"); + //submit + cookie_tx = dmaengine_submit(tx); + pr_info("dma submit success\n"); + //issue pending + dma_async_issue_pending(dchan); + pr_info("dma issue pending success\n"); + //wait for dma to comp + pr_info("wait for dma to comp\n"); + wait_for_completion(¶ms.done); + pr_info("wait success\n"); + //check the pattern in dest buffer + ret = (check_dest_buffer(&trans_config))?(0):(1); + + //release trans_config + release_trans_config(&trans_config,dchan->device->dev); +trans_config_exit: + dma_release_channel(dchan); +chan_exit: + return ret; +} + +static int do_pl08_spi_sg_ib_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan_tx; + struct dma_chan *dchan_rx; + + struct dma_slave_config chan_tx_config = {0}; + struct dma_slave_config chan_rx_config = {0}; + struct scatterlist sg_tx[1], sg_rx[1]; + struct dma_async_tx_descriptor *rx; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie_rx; + dma_cookie_t cookie_tx; + int xfer_len = 8; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan_tx = dma_request_channel(mask, my_dma_filter, PL08_DMAC_NAME); + dchan_rx = dma_request_channel(mask, my_dma_filter, PL08_DMAC_NAME); + if(IS_ERR(dchan_tx) || IS_ERR(dchan_rx)) { + pr_err("Failed to request PL08 DMA channel\n"); + ret = 1; + goto chan_exit; + // return false; + } else { + if(dchan_tx && dchan_rx && dchan_tx->device && dchan_tx->device->dev) { + pr_info("Got PL08 tx channel: %s\n", dma_chan_name(dchan_tx)); + pr_info("Got PL08 rx channel: %s\n", dma_chan_name(dchan_rx)); + } + } + + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan_tx->device->dev)) { + ret = 1; + goto trans_config_exit; + } + + fill_source_buffer(&trans_config); + memset(trans_config.buf_des,0,BUF_LEN); + spi_dma_config(spi.virt_addr); + print_spi_regs(spi.virt_addr); + pr_info("\nPL022 spi config and enable sucess\n"); + + // ini params + init_completion(¶ms.done); + params.log = "SPI:inband spi-rx dma callback by PL08-dmac is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 1; + + //config dma channel + //tx + chan_tx_config.direction = DMA_MEM_TO_DEV; + chan_tx_config.dst_addr = SSP_DR(spi.phy_addr); + chan_tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;// + chan_tx_config.dst_maxburst = 1;// + chan_tx_config.device_fc = false; + + dmaengine_slave_config(dchan_tx, &chan_tx_config); + //rx + chan_rx_config.direction = DMA_DEV_TO_MEM; + chan_rx_config.src_addr = SSP_DR(spi.phy_addr); + chan_rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;// + chan_rx_config.src_maxburst = 1;// + chan_rx_config.device_fc = false; + + dmaengine_slave_config(dchan_rx, &chan_rx_config); + pr_info("dma slave config success\n"); + //init scatter list + sg_init_table(sg_tx, 1); + sg_init_table(sg_rx, 1); + sg_init_one(&sg_tx[0], trans_config.buf_src, xfer_len); + sg_init_one(&sg_rx[0], trans_config.buf_des, xfer_len); + + sg_dma_address(&sg_tx[0]) = trans_config.dma_buf_src; + sg_dma_len(&sg_tx[0]) = xfer_len; + + sg_dma_address(&sg_rx[0]) = trans_config.dma_buf_des; + sg_dma_len(&sg_rx[0]) = xfer_len; + + //prepare dma desc + tx = dmaengine_prep_slave_sg(dchan_tx, + sg_tx, + 1, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK + ); + rx = dmaengine_prep_slave_sg(dchan_rx, + sg_rx, + 1, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK + ); + rx->callback = cb_ib; + rx->callback_param = ¶ms; + pr_info("dma prepare success\n"); + //submit + cookie_rx = dmaengine_submit(rx); + cookie_tx = dmaengine_submit(tx); + pr_info("TX cookie=%u RX cookie=%u\n", cookie_tx, cookie_rx); + //issue pending + dma_async_issue_pending(dchan_rx); + dma_async_issue_pending(dchan_tx); + spi_enable(spi.virt_addr); + print_spi_regs(spi.virt_addr); + pr_info("dma issue pending success\n"); + pr_info("wait for dma to comp\n"); + + if (!wait_for_completion_timeout(¶ms.done, msecs_to_jiffies(5000))) { + pr_err("rx DMA timeout error!\n"); + dmaengine_terminate_sync(dchan_tx); + dmaengine_terminate_sync(dchan_rx); + pr_info("pl dma channels terminated\n"); + ret = 1; + } else { + pr_info("wait success\n"); + } + //release trans_config + release_trans_config(&trans_config,dchan_tx->device->dev); +trans_config_exit: + dma_release_channel(dchan_tx); + dma_release_channel(dchan_rx); +chan_exit: + return ret; +} + +static int do_dw_dac_cy_ib_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_slave_config chan_tx_config = {0}; + struct dma_async_tx_descriptor *tx; + size_t xfer_len = 1024; + dma_cookie_t cookie_tx; + struct dma_tx_state state; + enum dma_status status; + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan = dma_request_chan(&pdev_dac->dev, "tx"); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw m2d channel success: %s\n", dma_chan_name(dchan)); + } + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + fill_source_buffer(&trans_config); + + init_completion(¶ms.done); + params.log = "DAC:inband m2d dma by dw-axi-dmac cb is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 2*BUF_LEN/xfer_len; + + //mocking jh7110_pwmdac_hw_params + enable_dac(); + mocking_jh7110_pwmdac_hw_params(pwmdac); + //slave config + memset(&chan_tx_config, 0, sizeof(chan_tx_config)); + chan_tx_config.direction = DMA_MEM_TO_DEV; + chan_tx_config.dst_addr = PWMDAC_ADDR; + chan_tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;// + chan_tx_config.dst_maxburst = 16;// + chan_tx_config.device_fc = false; + dmaengine_slave_config(dchan, &chan_tx_config); + + //prepare + tx = dmaengine_prep_dma_cyclic(dchan, + trans_config.dma_buf_src, + BUF_LEN, + xfer_len, + DMA_MEM_TO_DEV, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT + ); + tx->callback = cb_ib; + tx->callback_param = ¶ms; + + pr_info("dma prepare success\n"); + //submit + cookie_tx = dmaengine_submit(tx); + + dma_async_issue_pending(dchan); + + // jh7110_pwmdac_set(pwmdac); + dump_pwmdac_regs(pwmdac->base); + if (!wait_for_completion_timeout(¶ms.done, msecs_to_jiffies(10000))) { + pr_err("tx DMA timeout error!\n"); + dump_pwmdac_regs(pwmdac->base); + } else { + + pr_info("wait success\n"); + } + jh7110_pwmdac_stop(pwmdac); + dmaengine_pause(dchan); + disable_dac(); + + release_trans_config(&trans_config,dchan->device->dev); +trans_config_exit: + status = dmaengine_tx_status(dchan,cookie_tx, &state); + + pr_info("DMA status raw: %d\n", status); + + if (status & DMA_COMPLETE) + pr_info(" DMA_COMPLETE\n"); + if (status & DMA_PAUSED) + pr_info(" DMA_PAUSED\n"); + if (status & DMA_ERROR) + pr_info(" DMA_ERROR\n"); + if (status & DMA_IN_PROGRESS) + pr_info(" DMA_IN_PROGRESS\n"); + + pr_info("DMA residue: %u\n", state.residue); + + if (status == DMA_PAUSED) { + pr_info("dchan status paused\n"); + + } + dmaengine_terminate_async(dchan); + dma_release_channel(dchan); + pr_info("cyclic occupied dma-channel may not be released successfully,must reboot system to test other functions\n"); +chan_exit: + return ret; +} + +static int do_dw_dac_cy_oob_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_slave_config chan_tx_config = {0}; + struct dma_async_tx_descriptor *tx; + size_t xfer_len = 1024; + dma_cookie_t cookie_tx; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan = dma_request_chan(&pdev_dac->dev, "tx"); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw m2d channel success: %s\n", dma_chan_name(dchan)); + } + + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + fill_source_buffer(&trans_config); + + //setting params + init_completion(¶ms.done); + params.log = "DAC:oob m2d dma by dw-axi-dmac cb is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 2*BUF_LEN/xfer_len; + pr_info("target_cnt:%d\n",params.target_cnt); + //mocking jh7110_pwmdac_hw_params + enable_dac(); + mocking_jh7110_pwmdac_hw_params(pwmdac); + + //slave config + memset(&chan_tx_config, 0, sizeof(chan_tx_config)); + chan_tx_config.direction = DMA_MEM_TO_DEV; + chan_tx_config.dst_addr = PWMDAC_ADDR; + chan_tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;// + chan_tx_config.dst_maxburst = 16;// + chan_tx_config.device_fc = false; + dmaengine_slave_config(dchan, &chan_tx_config); + + //prepare + tx = dmaengine_prep_dma_cyclic(dchan, + trans_config.dma_buf_src, + BUF_LEN, + xfer_len, + DMA_MEM_TO_DEV, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_OOB_INTERRUPT + ); + tx->callback = cb_oob; + tx->callback_param = ¶ms; + //submit + cookie_tx = dmaengine_submit(tx); + + dma_async_issue_pending(dchan); + + jh7110_pwmdac_set(pwmdac); + dump_pwmdac_regs(pwmdac->base); + if (!wait_for_completion_timeout(¶ms.done, msecs_to_jiffies(10000))) { + pr_err("tx DMA timeout error!\n"); + dump_pwmdac_regs(pwmdac->base); + } else { + pr_info("wait success\n"); + } + + jh7110_pwmdac_stop(pwmdac); + disable_dac(); + + dmaengine_pause(dchan); + msleep(1); + dmaengine_terminate_sync(dchan); + //release trans_config + release_trans_config(&trans_config,dchan->device->dev); +trans_config_exit: + + dma_release_channel(dchan); + pr_info("cyclic occupied dma-channel may not be released successfully,must reboot system to test other functions\n"); +chan_exit: + return ret; +} + +static int do_dw_dac_sg_ib_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct scatterlist sg_tx[1]; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_slave_config chan_tx_config = {0}; + struct dma_async_tx_descriptor *tx; + size_t xfer_len = 4096; + dma_cookie_t cookie_tx; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan = dma_request_chan(&pdev_dac->dev, "tx"); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw m2d channel success: %s\n", dma_chan_name(dchan)); + } + + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + + fill_source_buffer(&trans_config); + //init scatter list + sg_init_table(sg_tx, 1); + sg_init_one(&sg_tx[0], trans_config.buf_src, xfer_len); + + sg_dma_address(&sg_tx[0]) = trans_config.dma_buf_src; + sg_dma_len(&sg_tx[0]) = xfer_len; + + //setting params + init_completion(¶ms.done); + params.log = "DAC:ib m2d dma by dw-axi-dmac cb is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 1; + pr_info("target_cnt:%d\n",params.target_cnt); + //mocking jh7110_pwmdac_hw_params + enable_dac(); + mocking_jh7110_pwmdac_hw_params(pwmdac); + + //slave config + memset(&chan_tx_config, 0, sizeof(chan_tx_config)); + chan_tx_config.direction = DMA_MEM_TO_DEV; + chan_tx_config.dst_addr = PWMDAC_ADDR; + chan_tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;// + chan_tx_config.dst_maxburst = 16;// + chan_tx_config.device_fc = false; + dmaengine_slave_config(dchan, &chan_tx_config); + + //prepare dma desc + tx = dmaengine_prep_slave_sg(dchan, + sg_tx, + 1, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK + ); + tx->callback = cb_ib; + tx->callback_param = ¶ms; + //submit + cookie_tx = dmaengine_submit(tx); + pr_info("TX cookie=%u\n", cookie_tx); + + dma_async_issue_pending(dchan); + + jh7110_pwmdac_set(pwmdac); + dump_pwmdac_regs(pwmdac->base); + if (!wait_for_completion_timeout(¶ms.done, msecs_to_jiffies(10000))) { + pr_err("tx DMA timeout error!\n"); + dump_pwmdac_regs(pwmdac->base); + } else { + pr_info("wait success\n"); + } + jh7110_pwmdac_stop(pwmdac); + disable_dac(); + + dmaengine_pause(dchan); + msleep(1); + dmaengine_terminate_sync(dchan); + //release trans_config + release_trans_config(&trans_config,dchan->device->dev); +trans_config_exit: + + dma_release_channel(dchan); +chan_exit: + return ret; +} + +static int do_dw_dac_sg_oob_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct scatterlist sg_tx[1]; + struct my_cb_param params; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_slave_config chan_tx_config = {0}; + struct dma_async_tx_descriptor *tx; + size_t xfer_len = 4096; + dma_cookie_t cookie_tx; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan = dma_request_chan(&pdev_dac->dev, "tx"); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw m2d channel success: %s\n", dma_chan_name(dchan)); + } + + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + fill_source_buffer(&trans_config); + //init scatter list + sg_init_table(sg_tx, 1); + sg_init_one(&sg_tx[0], trans_config.buf_src, xfer_len); + + sg_dma_address(&sg_tx[0]) = trans_config.dma_buf_src; + sg_dma_len(&sg_tx[0]) = xfer_len; + + //setting params + init_completion(¶ms.done); + params.log = "DAC:oob m2d dma by dw-axi-dmac cb is called"; + params.submit_index = 0; + params.cnt = 0; + params.target_cnt = 1; + + //mocking jh7110_pwmdac_hw_params + enable_dac(); + mocking_jh7110_pwmdac_hw_params(pwmdac); + + //slave config + memset(&chan_tx_config, 0, sizeof(chan_tx_config)); + chan_tx_config.direction = DMA_MEM_TO_DEV; + chan_tx_config.dst_addr = PWMDAC_ADDR; + chan_tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;// + chan_tx_config.dst_maxburst = 16;// + chan_tx_config.device_fc = false; + dmaengine_slave_config(dchan, &chan_tx_config); + + //prepare dma desc + tx = dmaengine_prep_slave_sg(dchan, + sg_tx, + 1, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK | DMA_OOB_INTERRUPT | DMA_OOB_PULSE + ); + tx->callback = cb_oob; + tx->callback_param = ¶ms; + //submit + cookie_tx = dmaengine_submit(tx); + pr_info("TX cookie=%u\n", cookie_tx); + + //trigger the tranfer by pulse_oob + dma_async_issue_pending(dchan); + dma_pulse_oob(dchan);//real trigger + + jh7110_pwmdac_set(pwmdac); + dump_pwmdac_regs(pwmdac->base); + + if (!wait_for_completion_timeout(¶ms.done, msecs_to_jiffies(10000))) { + pr_err("tx DMA timeout error!\n"); + dump_pwmdac_regs(pwmdac->base); + } else { + + pr_info("wait success\n"); + } + jh7110_pwmdac_stop(pwmdac); + disable_dac(); + + dmaengine_pause(dchan); + msleep(1); + dmaengine_terminate_sync(dchan); + //release trans_config + release_trans_config(&trans_config,dchan->device->dev); +trans_config_exit: + + dma_release_channel(dchan); +chan_exit: + return ret; +} + +static int do_dw_dac_sg_mix_test(void) +{ + int ret = 0; + struct transfer_config trans_config; + struct scatterlist sg_tx[1]; + struct my_cb_param params[MIX_TEST_DESC_LEN]; + bool oob_desc_sel[MIX_TEST_DESC_LEN] = {false,false,true,true}; + bool wait_success = true; + + dma_cap_mask_t mask; + struct dma_chan *dchan; + struct dma_slave_config chan_tx_config = {0}; + + struct dma_async_tx_descriptor *tx[MIX_TEST_DESC_LEN]; + size_t xfer_len = 4096; + dma_cookie_t cookie_tx[MIX_TEST_DESC_LEN]; + + //get channel + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dchan = dma_request_chan(&pdev_dac->dev, "tx"); + if (IS_ERR(dchan)) { + pr_err("Failed to request DMA channel\n"); + ret = 1; + goto chan_exit; + } else { + if(dchan && dchan->device && dchan->device->dev) + pr_info("Got dw m2d channel success: %s\n", dma_chan_name(dchan)); + } + + //allocate dma reachable buffer + if(!allocate_trans_config(&trans_config,dchan->device->dev)) { + ret = 1; + goto trans_config_exit; + } + fill_source_buffer(&trans_config); + + //init scatter list + sg_init_table(sg_tx, 1); + sg_init_one(&sg_tx[0], trans_config.buf_src, xfer_len); + + sg_dma_address(&sg_tx[0]) = trans_config.dma_buf_src; + sg_dma_len(&sg_tx[0]) = xfer_len; + + //setting params + for(int i=0;icallback = cb_oob; + else + tx[i]->callback = cb_ib; + tx[i]->callback_param = params+i; + } + //submit + for(int i = 0 ; ibase); + //wait for all completion + for(int i = 0;idevice->dev); +trans_config_exit: + + dma_release_channel(dchan); +chan_exit: + return ret; +} + +static long user_dma_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int output; + switch (cmd) + { + case USER_DMA_IOCTL_MEM_CPY_IB: + output = do_dw_mem_cp_ib_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + case USER_DMA_IOCTL_SPI_TXRX_IB:{ + output = do_pl08_spi_sg_ib_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + case USER_DMA_IOCTL_DACcy_TX_IB:{ + output = do_dw_dac_cy_ib_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + case USER_DMA_IOCTL_DACcy_TX_OOB:{ + output = do_dw_dac_cy_oob_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + case USER_DMA_IOCTL_DACsg_TX_IB:{ + output = do_dw_dac_sg_ib_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + case USER_DMA_IOCTL_DACsg_TX_OOB:{ + output = do_dw_dac_sg_oob_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + case USER_DMA_IOCTL_DACsg_TX_MIX:{ + output = do_dw_dac_sg_mix_test(); + if (copy_to_user((int __user *)arg, &output, sizeof(output))) + return -EFAULT; + break; + } + + default: + return -ENOTTY; + return 0; + } + return 0; +} + +static int user_dma_open(struct inode *inode, struct file *file) +{ + pr_info("user_dma open success\n"); + + return 0; +} + +static int user_dma_release(struct inode *inode, struct file *file) +{ + pr_info("user_dma released\n"); + + return 0; +} + +static const struct file_operations user_dma_fops = { + .owner = THIS_MODULE, + .open = user_dma_open, + .release = user_dma_release, + .unlocked_ioctl = user_dma_ioctl, +}; + +static int __init user_dma_init(void) +{ + int ret; + + if(!fetch_spi_addr()) { + pr_err("fetch spi error\n"); + } else { + pr_info("spi success\n"); + } + + if(!fetch_dac_addr()) { + pr_err("fetch dac error\n"); + } else { + pr_info("dac success\n"); + } + + // register char device + ret = alloc_chrdev_region(&dev_num, 0, 1, DEVICE_NAME); + if (ret < 0) { + pr_err("Failed to alloc chrdev\n"); + return ret; + } + + cdev_init(&user_dma_cdev, &user_dma_fops); + cdev_add(&user_dma_cdev, dev_num, 1); + + user_dma_class = class_create(DEVICE_NAME); + device_create(user_dma_class, NULL, dev_num, NULL, DEVICE_NAME); + + pr_info("user_dma module loaded\n"); + return 0; +} + +static void __exit user_dma_exit(void) +{ + + device_destroy(user_dma_class, dev_num); + class_destroy(user_dma_class); + + cdev_del(&user_dma_cdev); + unregister_chrdev_region(dev_num, 1); + + pr_info("user_dma module unloaded\n"); +} + +module_init(user_dma_init); +module_exit(user_dma_exit); + +MODULE_LICENSE("GPL"); diff --git a/user_scripts/spi_test/Makefile b/user_scripts/spi_test/Makefile new file mode 100644 index 0000000000000..03d26d4927dda --- /dev/null +++ b/user_scripts/spi_test/Makefile @@ -0,0 +1,16 @@ +# 交叉编译工具链前缀 +CROSS_COMPILE = riscv64-linux-gnu- +CC = $(CROSS_COMPILE)gcc + +# the exec finally generated +TARGET = spi_loopback +# source code +SRC = spi_loopback.c + +all: $(TARGET) + +$(TARGET): $(SRC) + $(CC) -Wall -O2 -o $@ $^ + +clean: + rm -f $(TARGET) diff --git a/user_scripts/spi_test/spi_loopback b/user_scripts/spi_test/spi_loopback new file mode 100755 index 0000000000000..90f8ebbc4b26c Binary files /dev/null and b/user_scripts/spi_test/spi_loopback differ diff --git a/user_scripts/spi_test/spi_loopback.c b/user_scripts/spi_test/spi_loopback.c new file mode 100644 index 0000000000000..64add670780d1 --- /dev/null +++ b/user_scripts/spi_test/spi_loopback.c @@ -0,0 +1,71 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int main() { + //open char device + const char *device = "/dev/spidev1.0"; + int fd = open(device, O_RDWR); + if (fd < 0) { + perror("open"); + return 1; + } + + // setting mode,bit width,frequency + uint8_t mode = SPI_MODE_0; + if (ioctl(fd, SPI_IOC_WR_MODE, &mode) == -1) { + perror("SPI_IOC_WR_MODE"); + close(fd); + return 1; + } + + uint8_t bits = 8; + if (ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &bits) == -1) { + perror("SPI_IOC_WR_BITS_PER_WORD"); + close(fd); + return 1; + } + + uint32_t speed = 399193; // 399 kHz + if (ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &speed) == -1) { + perror("SPI_IOC_WR_MAX_SPEED_HZ"); + close(fd); + return 1; + } + + // prepare data + uint8_t tx[5] = { 'h', 'e', 'l', 'l', 'o' }; + uint8_t rx[5] = {0}; + + struct spi_ioc_transfer tr = { + .tx_buf = (unsigned long)tx, + .rx_buf = (unsigned long)rx, + .len = sizeof(tx), + .speed_hz = speed, + .bits_per_word = bits, + .delay_usecs = 0, + }; + + // trigger send and recv + if (ioctl(fd, SPI_IOC_MESSAGE(1), &tr) < 1) { + perror("SPI_IOC_MESSAGE"); + close(fd); + return 1; + } + + // 打印接收到的数据 + printf("Received: "); + for (int i = 0; i < sizeof(rx); i++) { + printf("%02x ", rx[i]); + } + printf("\n"); + + close(fd); + return 0; +} diff --git a/user_scripts/testtool/Makefile b/user_scripts/testtool/Makefile new file mode 100644 index 0000000000000..29bdf1f437832 --- /dev/null +++ b/user_scripts/testtool/Makefile @@ -0,0 +1,16 @@ +# 交叉编译工具链前缀 +CROSS_COMPILE = riscv64-linux-gnu- +CC = $(CROSS_COMPILE)gcc + +# the exec finally generated +TARGET = tt +# source code +SRC = tt.c + +all: $(TARGET) + +$(TARGET): $(SRC) + $(CC) -Wall -O2 -o $@ $^ + +clean: + rm -f $(TARGET) diff --git a/user_scripts/testtool/tt b/user_scripts/testtool/tt new file mode 100755 index 0000000000000..83f4bc9f124f8 Binary files /dev/null and b/user_scripts/testtool/tt differ diff --git a/user_scripts/testtool/tt.c b/user_scripts/testtool/tt.c new file mode 100644 index 0000000000000..efc1b07ffb56d --- /dev/null +++ b/user_scripts/testtool/tt.c @@ -0,0 +1,365 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// 这里定义与驱动中一致的 ioctl 命令 +// 假设驱动里是 _IO('M', 1) +// #define MY_IOCTL_CMD _IO('M', 1) + +enum Command { + CMD_UNKNOWN, + + CMD_DW_MEMCPY_IB_TEST,//USER_DMA: test inband memcpy by dw-axi-dmac + CMD_SPILOOP_TEST, //PL08: test dma by PL08 driver,using spi loopback + CMD_SPI_USERDMA_IB, //USER_DMA: test spi loopback,using duplex dma in user_dma + CMD_DAC_CY_IB, //USER_DMA: test dac tx,using dma in user_dma + CMD_DAC_CY_OOB, //USER_DMA: test dac tx,using dma in user_dma + CMD_DAC_SG_IB, + CMD_DAC_SG_OOB, + CMD_DAC_SG_MIX +}; + +struct Cmd_element { + const char * cmd_input; + enum Command cmd; + int (*cmd_callback)(void); + const char * cmd_desc; +}; + +//ioctls +#define USER_DMA_IOCTL_MEM_CPY_IB _IOR('M', 1, int) +#define USER_DMA_IOCTL_SPI_TXRX_IB _IOR('M', 2, int)//test inband spi dma loopback +#define USER_DMA_IOCTL_DACcy_TX_IB _IOR('M', 3, int)//test inband dac dma-tx +#define USER_DMA_IOCTL_DACcy_TX_OOB _IOR('M', 4, int)//test oob dac dma-tx +#define USER_DMA_IOCTL_DACsg_TX_IB _IOR('M', 5, int)//test inband dac dma sg tx +#define USER_DMA_IOCTL_DACsg_TX_OOB _IOR('M', 6, int)//test oob dac dma sg tx +#define USER_DMA_IOCTL_DACsg_TX_MIX _IOR('M', 7, int)//test oob dac dma sg tx + +int do_dw_memcpy_ib_test(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_MEM_CPY_IB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("inband memcpy executed finish\n"); + if(value==0) { + printf("inband memcpy success!\n"); + } else { + printf("inband memcpy error!\n"); + } + close(fd); + return 0; +} + +int do_spi_loop_test(void) +{ + bool test_success = true; + + //open char device + const char *device = "/dev/spidev1.0"; + int fd = open(device, O_RDWR); + if (fd < 0) { + perror("open"); + return 1; + } + + // setting mode,bit width,frequency + uint8_t mode = SPI_MODE_0; + if (ioctl(fd, SPI_IOC_WR_MODE, &mode) == -1) { + perror("SPI_IOC_WR_MODE"); + close(fd); + return 1; + } + + uint8_t bits = 8; + if (ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &bits) == -1) { + perror("SPI_IOC_WR_BITS_PER_WORD"); + close(fd); + return 1; + } + + uint32_t speed = 399193; // 399 kHz + if (ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &speed) == -1) { + perror("SPI_IOC_WR_MAX_SPEED_HZ"); + close(fd); + return 1; + } + + // prepare data + uint8_t tx[1024] = {0}; + uint8_t rx[1024] = {0}; + int num = 0; + for(int i=0;i<1024;i++) + { + tx[i] = (unsigned char)(0xff & num); + num++; + } + memset(rx,0,1024); + + struct spi_ioc_transfer tr = { + .tx_buf = (unsigned long)tx, + .rx_buf = (unsigned long)rx, + .len = sizeof(tx), + .speed_hz = speed, + .bits_per_word = bits, + .delay_usecs = 0, + }; + + // trigger send and recv + if (ioctl(fd, SPI_IOC_MESSAGE(1), &tr) < 1) { + perror("SPI_IOC_MESSAGE"); + close(fd); + return 1; + } + + // ckeck data + for(int i=0;i<1024;i++) + { + if(tx[i]!=rx[i]) + { + test_success = false; + break; + } + } + + if(test_success) + { + printf("spi loopback success!\n"); + } + else + { + printf("spi loopback error!\n"); + } + + close(fd); + return 0; +} + +int do_spi_userdma_ib_test(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_SPI_TXRX_IB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("inband spi loopback by user_dma executed finish\n"); + if(value==0) { + printf("inband spi loopback success!\n"); + } else { + printf("inband spi loopback error!\n"); + } + close(fd); + return 0; +} + +int do_dac_dma_tx_ib(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_DACcy_TX_IB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("inband dac dma-tx by user_dma executed finish\n"); + if(value==0) { + printf("inband dac dma-tx success!\n"); + } else { + printf("inband dac dma-tx error!\n"); + } + close(fd); + return 0; +} + +int do_dac_dma_tx_oob(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_DACcy_TX_OOB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("oob dac dma-tx by user_dma executed finish\n"); + if(value==0) { + printf("oob dac dma-tx success!\n"); + } else { + printf("oob dac dma-tx error!\n"); + } + close(fd); + return 0; +} + +int do_dac_dma_sg_ib(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_DACsg_TX_IB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("ib dac dma-sg by user_dma executed finish\n"); + if(value==0) { + printf("ib dac dma-sg success!\n"); + } else { + printf("ib dac dma-sg error!\n"); + } + close(fd); + return 0; +} + +int do_dac_dma_sg_oob(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_DACsg_TX_OOB,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("oob dac dma-sg by user_dma executed finish\n"); + if(value==0) { + printf("oob dac dma-sg success!\n"); + } else { + printf("oob dac dma-sg error!\n"); + } + close(fd); + return 0; +} + +int do_dac_dma_sg_mix(void) +{ + const char *dev = "/dev/user_dma"; + int fd; + int value; + + fd = open(dev, O_RDWR); + if (fd < 0) { + perror("open"); + return EXIT_FAILURE; + } + int ret = ioctl(fd, USER_DMA_IOCTL_DACsg_TX_MIX,&value); + if (ret < 0) { + perror("ioctl"); + close(fd); + return EXIT_FAILURE; + } + printf("oob dac dma-sg by user_dma executed finish\n"); + if(value==0) { + printf("oob dac dma-sg success!\n"); + } else { + printf("oob dac dma-sg error!\n"); + } + close(fd); + return 0; +} + + +const struct Cmd_element CMD_TABLE[] ={ + {"dw_mem_ib" , CMD_DW_MEMCPY_IB_TEST, do_dw_memcpy_ib_test , "test inband memcpy by dw-dmac\n"}, + {"spi_driver_loop", CMD_SPILOOP_TEST , do_spi_loop_test , "test inband spi loopback using pl022-spi driver\n"}, + {"spi_userdma_ib" , CMD_SPI_USERDMA_IB , do_spi_userdma_ib_test , "test inband spi loopback using user_dma\n"}, + {"dac_cy_ib" , CMD_DAC_CY_IB , do_dac_dma_tx_ib , "test inband cyclic mem2dev dma using dw-dmac and dac device\n"}, + {"dac_cy_oob" , CMD_DAC_CY_OOB , do_dac_dma_tx_oob , "test oob cyclic mem2dev dma using dw-dmac and dac device\n"}, + {"dac_sg_ib" , CMD_DAC_SG_IB , do_dac_dma_sg_ib , "test inband sg mem2dev dma using dw-dmac and dac device\n"}, + {"dac_sg_oob" , CMD_DAC_SG_OOB , do_dac_dma_sg_oob , "test oob sg mem2dev dma using dw-dmac and dac device\n"}, + {"dac_sg_mix" , CMD_DAC_SG_MIX , do_dac_dma_sg_mix , "test the trigger sequence of mixed ib/oob descs\n"} +}; + +void print_help(void) +{ + printf("|%-20s| %s","INSTRUCTIONS","DESCRIBES\n"); + for(int i=0;i