diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 2b5f90913d26..51f4ff3a4174 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -503,6 +503,8 @@ config ARM64_ERRATUM_1024718 config ARM64_ERRATUM_1188873 bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result" default y + select ARM_ARCH_TIMER_OOL_WORKAROUND + depends on ARM_ARCH_TIMER help This option adds work arounds for ARM Cortex-A76 erratum 1188873 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index bc6e3ea8fb1e..751f687510e8 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -53,6 +53,9 @@ ifeq ($(cc-name),clang) KBUILD_CFLAGS += -mno-implicit-float else KBUILD_CFLAGS += -mgeneral-regs-only +ifeq ($(CONFIG_ARCH_LITO), y) +KBUILD_CFLAGS += -mcpu=cortex-a76.cortex-a55 +endif endif KBUILD_CFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables diff --git a/arch/arm64/boot/dts/google/lito-redbull-memory.dtsi b/arch/arm64/boot/dts/google/lito-redbull-memory.dtsi index 35d35d3b17f1..ed153f2e8fed 100644 --- a/arch/arm64/boot/dts/google/lito-redbull-memory.dtsi +++ b/arch/arm64/boot/dts/google/lito-redbull-memory.dtsi @@ -33,7 +33,6 @@ * pil_camera_mem: 0x86000000 0x500000 * pil_npu_mem: 0x86500000 0x500000 * pil_video_mem: 0x86a00000 0x500000 - * pil_cvp_mem: 0x86f00000 0x500000 * pil_cdsp_mem: 0x87400000 0x1e00000 * pil_adsp_mem: 0x89200000 0x2800000 * pil_wlan_fw_mem: 0x8ba00000 0x200000 diff --git a/arch/arm64/boot/dts/vendor/qcom/lito-gpu.dtsi b/arch/arm64/boot/dts/vendor/qcom/lito-gpu.dtsi index 77d0be6a0fcf..ba9a7fcdd554 100644 --- a/arch/arm64/boot/dts/vendor/qcom/lito-gpu.dtsi +++ b/arch/arm64/boot/dts/vendor/qcom/lito-gpu.dtsi @@ -80,7 +80,7 @@ qcom,initial-pwrlevel = <3>; - qcom,idle-timeout = <80>; /* msecs */ + qcom,idle-timeout = <58>; /* msecs */ qcom,highest-bank-bit = <14>; diff --git a/arch/arm64/boot/dts/vendor/qcom/lito-sde.dtsi b/arch/arm64/boot/dts/vendor/qcom/lito-sde.dtsi index 7e761d30b9e6..0f7cca3a1f21 100644 --- a/arch/arm64/boot/dts/vendor/qcom/lito-sde.dtsi +++ b/arch/arm64/boot/dts/vendor/qcom/lito-sde.dtsi @@ -658,6 +658,7 @@ 00 00 0a 0a 00 00 8a 8a]; qcom,platform-regulator-settings = [1d 1d 1d 1d 1d]; + qcom,panel-allow-phy-poweroff; qcom,phy-supply-entries { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/vendor/qcom/lito.dtsi b/arch/arm64/boot/dts/vendor/qcom/lito.dtsi index 24b013a4b166..01f9cb323bae 100644 --- a/arch/arm64/boot/dts/vendor/qcom/lito.dtsi +++ b/arch/arm64/boot/dts/vendor/qcom/lito.dtsi @@ -487,18 +487,6 @@ label = "dfps_data_region"; }; - disp_rdump_memory: disp_rdump_region@0xa0000000 { - reg = <0x0 0xA0000000 0x0 0x02300000>; - label = "disp_rdump_region"; - }; - - dump_mem: mem_dump_region { - compatible = "shared-dma-pool"; - alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; - reusable; - size = <0 0x2800000>; - }; - /* global autoconfigured region for contiguous allocations */ linux,cma { compatible = "shared-dma-pool"; @@ -512,7 +500,7 @@ }; chosen { - bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; + bootargs = "rcu_nocbs=0-7 noirqdebug"; }; soc: soc { }; @@ -2142,7 +2130,7 @@ mem_dump { compatible = "qcom,mem-dump"; - memory-region = <&dump_mem>; + status = "disabled"; c0_context { qcom,dump-size = <0x800>; diff --git a/arch/arm64/configs/redbull_defconfig b/arch/arm64/configs/redbull_defconfig index c77075b2ca56..d63f0f7cbd93 100644 --- a/arch/arm64/configs/redbull_defconfig +++ b/arch/arm64/configs/redbull_defconfig @@ -1,3 +1,5 @@ +CONFIG_LOCALVERSION="-minimalistic_red" +CONFIG_LOCALVERSION_AUTO=y CONFIG_KERNEL_LZ4=y CONFIG_AUDIT=y CONFIG_NO_HZ=y @@ -45,15 +47,18 @@ CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_PROFILING=y # CONFIG_ZONE_DMA32 is not set CONFIG_ARCH_QCOM=y +CONFIG_ARCH_LITO=y CONFIG_BOARD_BRAMBLE=y CONFIG_BOARD_BARBET=y CONFIG_BOARD_REDFIN=y CONFIG_PCI=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=32 -CONFIG_HZ_300=y +CONFIG_HZ_100=y +CONFIG_HZ=100 +# CONFIG_HZ_300 is not set CONFIG_SECCOMP=y -CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT is not set # CONFIG_UNMAP_KERNEL_AT_EL0 is not set CONFIG_ARM64_SSBD=y CONFIG_PRINT_VMEMLAYOUT=y @@ -68,11 +73,13 @@ CONFIG_RANDOMIZE_BASE=y CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y CONFIG_BUILD_ARM64_DT_OVERLAY=y CONFIG_COMPAT=y +CONFIG_PM=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM_DEBUG=y +# CONFIG_PM_DEBUG is not set CONFIG_PM_SLEEP_MONITOR=y +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_ENERGY_MODEL=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y @@ -92,8 +99,8 @@ CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y CONFIG_JUMP_LABEL=y # CONFIG_THINLTO is not set -CONFIG_LTO_CLANG=y -CONFIG_CFI_CLANG=y +# CONFIG_LTO_CLANG is not set +# CONFIG_CFI_CLANG is not set CONFIG_SHADOW_CALL_STACK=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -279,9 +286,9 @@ CONFIG_REGMAP_WCD_IRQ=y CONFIG_DMA_CMA=y CONFIG_OF_BATTERYDATA=m CONFIG_ZRAM=y -CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_WRITEBACK is not set CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_HDCP_QSEECOM=m @@ -341,12 +348,7 @@ CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_XPAD=y CONFIG_JOYSTICK_XPAD_FF=y CONFIG_JOYSTICK_XPAD_LEDS=y -CONFIG_INPUT_TABLET=y -CONFIG_TABLET_USB_ACECAD=y -CONFIG_TABLET_USB_AIPTEK=y -CONFIG_TABLET_USB_GTCO=y -CONFIG_TABLET_USB_HANWANG=y -CONFIG_TABLET_USB_KBTAB=y +# CONFIG_INPUT_TABLET is not set CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v27=y CONFIG_TOUCHSCREEN_TBN=m @@ -387,8 +389,8 @@ CONFIG_PINCTRL_QCOM_SPMI_PMIC=m CONFIG_PINCTRL_LITO=m CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=m -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set CONFIG_DEBUG_REBOOT=y CONFIG_QPNP_PMIC_VOTER=m CONFIG_QPNP_SMB5=m @@ -464,12 +466,12 @@ CONFIG_SND_SOC_RT5514_SPI=m CONFIG_SND_SOC_CODEC_DETECT=m CONFIG_UHID=y CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_MAGICMOUSE is not set CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y +# CONFIG_HID_MULTITOUCH is not set CONFIG_HID_NINTENDO=y -CONFIG_HID_PLANTRONICS=y +# CONFIG_HID_PLANTRONICS is not set CONFIG_HID_PLAYSTATION=y CONFIG_PLAYSTATION_FF=y CONFIG_HID_SONY=y @@ -487,8 +489,8 @@ CONFIG_USB_DWC3=m CONFIG_USB_DWC3_MSM=m CONFIG_USB_ISP1760=y CONFIG_USB_ISP1760_HOST_ROLE=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_USB_LINK_LAYER_TEST=y +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_LINK_LAYER_TEST is not set CONFIG_NOP_USB_XCEIV=m CONFIG_USB_MSM_SSPHY_QMP=m CONFIG_MSM_HSUSB_PHY=m @@ -511,6 +513,7 @@ CONFIG_USB_CONFIGFS_F_QDSS=m CONFIG_USB_CONFIGFS_F_GSI=m CONFIG_USB_CONFIGFS_F_MTP=m CONFIG_USB_CONFIGFS_F_PTP=m +CONFIG_USB_CONFIGFS_F_UVC=y CONFIG_TYPEC=y CONFIG_TYPEC_TCPM=m CONFIG_USB_PD_ENGINE=m @@ -577,7 +580,7 @@ CONFIG_QCOM_QMI_POWER_COLLAPSE=m CONFIG_QCOM_RPMH=m CONFIG_QCOM_SMEM=m CONFIG_QCOM_EARLY_RANDOM=m -CONFIG_QCOM_MEMORY_DUMP_V2=m +# CONFIG_QCOM_MEMORY_DUMP_V2 is not set CONFIG_QCOM_SMP2P=m CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000 diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 79a9aa27957e..2d3bddcaf47c 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -101,14 +101,7 @@ extern void secondary_entry(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); -#else -static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - BUILD_BUG(); -} -#endif extern int __cpu_disable(void); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 601c89363ac0..8ff61f50f191 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -333,7 +333,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) set_bit(ICACHEF_ALIASING, &__icache_flags); } - pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); + pr_debug("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index cd0638ed1113..2474328b9098 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -98,7 +98,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) do { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { - pr_info("CPU%d killed (polled %d ms)\n", cpu, + pr_debug("CPU%d killed (polled %d ms)\n", cpu, jiffies_to_msecs(jiffies - start)); return 0; } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index d2075926edd2..b638de6e31d8 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -245,7 +245,7 @@ asmlinkage notrace void secondary_start_kernel(void) * the CPU migration code to notice that the CPU is online * before we continue. */ - pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", + pr_debug("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n", cpu, (unsigned long)mpidr, read_cpuid_id()); set_cpu_psci_function_id(cpu, 0); @@ -335,7 +335,7 @@ void __cpu_die(unsigned int cpu) pr_crit("CPU%u: cpu didn't die\n", cpu); return; } - pr_info("CPU%u: shutdown\n", cpu); + pr_debug("CPU%u: shutdown\n", cpu); /* * Now that the dying CPU is beyond the point of no return w.r.t. @@ -833,12 +833,10 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC); } -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call_common(mask, IPI_WAKEUP); } -#endif #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) @@ -959,13 +957,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif -#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL case IPI_WAKEUP: - WARN_ONCE(!acpi_parking_protocol_valid(cpu), - "CPU%u: Wake-up IPI outside the ACPI parking protocol\n", - cpu); break; -#endif default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); diff --git a/block/blk.h b/block/blk.h index 1a5b67b57e6b..348cb240c0ee 100644 --- a/block/blk.h +++ b/block/blk.h @@ -308,9 +308,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int); */ static inline bool blk_do_io_stat(struct request *rq) { - return rq->rq_disk && - (rq->rq_flags & RQF_IO_STAT) && - !blk_rq_is_passthrough(rq); + return false; } static inline void req_set_nomerge(struct request_queue *q, struct request *req) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 6c2392fadf57..155931376117 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -128,8 +128,7 @@ enum { BINDER_DEBUG_PRIORITY_CAP = 1U << 13, BINDER_DEBUG_SPINLOCKS = 1U << 14, }; -static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | - BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +static uint32_t binder_debug_mask = 0; module_param_named(debug_mask, binder_debug_mask, uint, 0644); char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; @@ -5407,7 +5406,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -EINTR) - pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); + pr_debug("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3dcf55786cdf..77bad82d9d8c 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -44,7 +44,7 @@ enum { BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; -static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; +static uint32_t binder_alloc_debug_mask = 0; module_param_named(debug_mask, binder_alloc_debug_mask, uint, 0644); diff --git a/drivers/base/core.c b/drivers/base/core.c index ad27a5964dd2..8c6e8a44c125 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -459,7 +459,7 @@ struct device_link *device_link_add(struct device *consumer, */ device_reorder_to_tail(consumer, NULL); - dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); + dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); out: device_pm_unlock(); @@ -560,7 +560,7 @@ static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); - dev_info(link->consumer, "Dropping the link to %s\n", + dev_dbg(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); if (link->flags & DL_FLAG_PM_RUNTIME) @@ -575,7 +575,7 @@ static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); - dev_info(link->consumer, "Dropping the link to %s\n", + dev_dbg(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); if (link->flags & DL_FLAG_PM_RUNTIME) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 95d39d1414de..a528a42cc97a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -798,7 +797,6 @@ void dpm_noirq_end(void) { resume_device_irqs(); device_wakeup_disarm_wake_irqs(); - cpuidle_resume(); } /** @@ -1420,7 +1418,6 @@ static int device_suspend_noirq(struct device *dev) void dpm_noirq_begin(void) { - cpuidle_pause(); device_wakeup_arm_wake_irqs(); suspend_device_irqs(); } diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 4ed0a78fdc09..4447860aee50 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -40,19 +40,16 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm) if (!IS_ERR_OR_NULL(zstrm->tfm)) crypto_free_comp(zstrm->tfm); free_pages((unsigned long)zstrm->buffer, 1); - kfree(zstrm); + zstrm->tfm = NULL; + zstrm->buffer = NULL; } /* - * allocate new zcomp_strm structure with ->tfm initialized by - * backend, return NULL on error + * Initialize zcomp_strm structure with ->tfm initialized by backend, and + * ->buffer. Return a negative value on error. */ -static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) +static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp) { - struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL); - if (!zstrm) - return NULL; - zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the @@ -61,9 +58,9 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { zcomp_strm_free(zstrm); - zstrm = NULL; + return -ENOMEM; } - return zstrm; + return 0; } bool zcomp_available_algorithm(const char *comp) @@ -116,7 +113,7 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - return *get_cpu_ptr(comp->stream); + return get_cpu_ptr(comp->stream); } void zcomp_stream_put(struct zcomp *comp) @@ -162,17 +159,13 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp_strm *zstrm; + int ret; - if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) - return 0; - - zstrm = zcomp_strm_alloc(comp); - if (IS_ERR_OR_NULL(zstrm)) { + zstrm = per_cpu_ptr(comp->stream, cpu); + ret = zcomp_strm_init(zstrm, comp); + if (ret) pr_err("Can't allocate a compression stream\n"); - return -ENOMEM; - } - *per_cpu_ptr(comp->stream, cpu) = zstrm; - return 0; + return ret; } int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) @@ -180,10 +173,8 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp_strm *zstrm; - zstrm = *per_cpu_ptr(comp->stream, cpu); - if (!IS_ERR_OR_NULL(zstrm)) - zcomp_strm_free(zstrm); - *per_cpu_ptr(comp->stream, cpu) = NULL; + zstrm = per_cpu_ptr(comp->stream, cpu); + zcomp_strm_free(zstrm); return 0; } @@ -191,7 +182,7 @@ static int zcomp_init(struct zcomp *comp) { int ret; - comp->stream = alloc_percpu(struct zcomp_strm *); + comp->stream = alloc_percpu(struct zcomp_strm); if (!comp->stream) return -ENOMEM; diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 41c1002a7d7d..9e94095ce000 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -18,7 +18,7 @@ struct zcomp_strm { /* dynamic per-device compression frontend */ struct zcomp { - struct zcomp_strm * __percpu *stream; + struct zcomp_strm __percpu *stream; const char *name; struct hlist_node node; }; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 520305a68d8a..44bd65e1d5a2 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -207,14 +207,17 @@ static inline void zram_fill_page(void *ptr, unsigned long len, static bool page_same_filled(void *ptr, unsigned long *element) { - unsigned int pos; unsigned long *page; unsigned long val; + unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; page = (unsigned long *)ptr; val = page[0]; - for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { + if (val != page[last_pos]) + return false; + + for (pos = 1; pos < last_pos; pos++) { if (val != page[pos]) return false; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 219704cd87eb..db91c97437b9 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -38,6 +38,27 @@ static int enabled_devices; static int off __read_mostly; static int initialized __read_mostly; +#ifdef CONFIG_SMP +static atomic_t idled = ATOMIC_INIT(0); + +#if NR_CPUS > 32 +#error idled CPU mask not big enough for NR_CPUS +#endif + +static void cpuidle_set_idle_cpu(unsigned int cpu) +{ + atomic_or(BIT(cpu), &idled); +} + +static void cpuidle_clear_idle_cpu(unsigned int cpu) +{ + atomic_andnot(BIT(cpu), &idled); +} +#else +static inline void cpuidle_set_idle_cpu(unsigned int cpu) { } +static inline void cpuidle_clear_idle_cpu(unsigned int cpu) { } +#endif + int cpuidle_disabled(void) { return off; @@ -230,7 +251,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, time_start = ns_to_ktime(local_clock()); stop_critical_timings(); + cpuidle_set_idle_cpu(dev->cpu); entered_state = target_state->enter(dev, drv, index); + cpuidle_clear_idle_cpu(dev->cpu); start_critical_timings(); sched_clock_idle_wakeup_event(); @@ -656,27 +679,6 @@ int cpuidle_register(struct cpuidle_driver *drv, EXPORT_SYMBOL_GPL(cpuidle_register); #ifdef CONFIG_SMP - -static void wake_up_idle_cpus(void *v) -{ - int cpu; - struct cpumask cpus; - - preempt_disable(); - if (v) { - cpumask_andnot(&cpus, v, cpu_isolated_mask); - cpumask_and(&cpus, &cpus, cpu_online_mask); - } else - cpumask_andnot(&cpus, cpu_online_mask, cpu_isolated_mask); - - for_each_cpu(cpu, &cpus) { - if (cpu == smp_processor_id()) - continue; - wake_up_if_idle(cpu); - } - preempt_enable(); -} - /* * This function gets called when a part of the kernel has a new latency * requirement. This means we need to get only those processors out of their @@ -686,7 +688,13 @@ static void wake_up_idle_cpus(void *v) static int cpuidle_latency_notify(struct notifier_block *b, unsigned long l, void *v) { - wake_up_idle_cpus(v); + unsigned long cpus = atomic_read(&idled) & *cpumask_bits(to_cpumask(v)); + + /* Use READ_ONCE to get the isolated mask outside cpu_add_remove_lock */ + cpus &= ~READ_ONCE(*cpumask_bits(cpu_isolated_mask)); + if (cpus) + arch_send_wakeup_ipi_mask(to_cpumask(&cpus)); + return NOTIFY_OK; } diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index b28f0922280b..95ff56273934 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -49,30 +48,6 @@ #define PSCI_POWER_STATE(reset) (reset << 30) #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) -enum { - MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), - MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), -}; - -enum debug_event { - CPU_ENTER, - CPU_EXIT, - CLUSTER_ENTER, - CLUSTER_EXIT, - CPU_HP_STARTING, - CPU_HP_DYING, -}; - -struct lpm_debug { - u64 time; - enum debug_event evt; - int cpu; - uint32_t arg1; - uint32_t arg2; - uint32_t arg3; - uint32_t arg4; -}; - static struct system_pm_ops *sys_pm_ops; @@ -109,9 +84,6 @@ static bool suspend_in_progress; static struct hrtimer lpm_hrtimer; static DEFINE_PER_CPU(struct hrtimer, histtimer); static DEFINE_PER_CPU(struct hrtimer, biastimer); -static struct lpm_debug *lpm_debug; -static phys_addr_t lpm_debug_phys; -static const int num_dbg_elements = 0x100; static void cluster_unprepare(struct lpm_cluster *cluster, const struct cpumask *cpu, int child_idx, bool from_idle, @@ -284,38 +256,10 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency) } EXPORT_SYMBOL(lpm_get_latency); -static void update_debug_pc_event(enum debug_event event, uint32_t arg1, - uint32_t arg2, uint32_t arg3, uint32_t arg4) -{ - struct lpm_debug *dbg; - int idx; - static DEFINE_SPINLOCK(debug_lock); - static int pc_event_index; - - if (!lpm_debug) - return; - - spin_lock(&debug_lock); - idx = pc_event_index++; - dbg = &lpm_debug[idx & (num_dbg_elements - 1)]; - - dbg->evt = event; - dbg->time = arch_counter_get_cntvct(); - dbg->cpu = raw_smp_processor_id(); - dbg->arg1 = arg1; - dbg->arg2 = arg2; - dbg->arg3 = arg3; - dbg->arg4 = arg4; - spin_unlock(&debug_lock); -} - static int lpm_dying_cpu(unsigned int cpu) { struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; - update_debug_pc_event(CPU_HP_DYING, cpu, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], false); cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0); return 0; } @@ -324,9 +268,6 @@ static int lpm_starting_cpu(unsigned int cpu) { struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; - update_debug_pc_event(CPU_HP_STARTING, cpu, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], false); cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0, true); return 0; @@ -721,7 +662,7 @@ static int cpu_power_select(struct cpuidle_device *dev, min_residency = pwr_params->min_residency; max_residency = pwr_params->max_residency; - if (latency_us < lvl_latency_us) + if (latency_us <= lvl_latency_us) break; calculate_next_wakeup(&next_wakeup_us, next_event_us, @@ -1058,7 +999,7 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle, &level->num_cpu_votes)) continue; - if (from_idle && latency_us < pwr_params->exit_latency) + if (from_idle && latency_us <= pwr_params->exit_latency) break; if (sleep_us < (pwr_params->exit_latency + @@ -1109,9 +1050,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, return -EPERM; if (idx != cluster->default_level) { - update_debug_pc_event(CLUSTER_ENTER, idx, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], from_idle); trace_cluster_enter(cluster->cluster_name, idx, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1265,9 +1203,6 @@ static void cluster_unprepare(struct lpm_cluster *cluster, if (sys_pm_ops && sys_pm_ops->exit) sys_pm_ops->exit(success); - update_debug_pc_event(CLUSTER_EXIT, cluster->last_level, - cluster->num_children_in_sync.bits[0], - cluster->child_cpus.bits[0], from_idle); trace_cluster_exit(cluster->cluster_name, cluster->last_level, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1379,8 +1314,6 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); state_id += power_state + affinity_level + cpu->levels[idx].psci_id; - update_debug_pc_event(CPU_ENTER, state_id, - 0xdeaffeed, 0xdeaffeed, from_idle); stop_critical_timings(); #ifdef CONFIG_DEBUG_FS @@ -1393,8 +1326,6 @@ static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) success = !arm_cpuidle_suspend(state_id); start_critical_timings(); - update_debug_pc_event(CPU_EXIT, state_id, - success, 0xdeaffeed, from_idle); if (from_idle && cpu->levels[idx].use_bc_timer) tick_broadcast_exit(); @@ -1782,11 +1713,9 @@ static const struct platform_s2idle_ops lpm_s2idle_ops = { static int lpm_probe(struct platform_device *pdev) { int ret; - int size; unsigned int cpu; struct hrtimer *cpu_histtimer; struct kobject *module_kobj = NULL; - struct md_region md_entry; get_online_cpus(); lpm_root_node = lpm_of_parse_cluster(pdev); @@ -1818,10 +1747,6 @@ static int lpm_probe(struct platform_device *pdev) cluster_timer_init(lpm_root_node); - size = num_dbg_elements * sizeof(struct lpm_debug); - lpm_debug = dma_alloc_coherent(&pdev->dev, size, - &lpm_debug_phys, GFP_KERNEL); - register_cluster_lpm_stats(lpm_root_node, NULL); ret = cluster_cpuidle_register(lpm_root_node); @@ -1852,15 +1777,6 @@ static int lpm_probe(struct platform_device *pdev) set_update_ipi_history_callback(update_ipi_history); - /* Add lpm_debug to Minidump*/ - strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name)); - md_entry.virt_addr = (uintptr_t)lpm_debug; - md_entry.phys_addr = lpm_debug_phys; - md_entry.size = size; - md_entry.id = MINIDUMP_DEFAULT_ID; - if (msm_minidump_add_region(&md_entry) < 0) - pr_info("Failed to add lpm_debug in Minidump\n"); - return 0; failed: free_cluster_node(lpm_root_node); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 1b76beecf0e2..cb24bf34e6ae 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -642,7 +642,7 @@ struct devfreq *devfreq_add_device(struct device *dev, mutex_unlock(&devfreq->lock); err = set_freq_table(devfreq); if (err < 0) - goto err_out; + goto err_dev; mutex_lock(&devfreq->lock); } diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c index 4028bcf2d540..15eaf4988294 100644 --- a/drivers/devfreq/governor_msm_adreno_tz.c +++ b/drivers/devfreq/governor_msm_adreno_tz.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -418,10 +419,14 @@ static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq) priv->bin.busy_time > CEILING) { val = -1 * level; } else { + unsigned int refresh_rate = dsi_panel_get_refresh_rate(); scm_data[0] = level; scm_data[1] = priv->bin.total_time; - scm_data[2] = priv->bin.busy_time; + if (refresh_rate > 60) + scm_data[2] = priv->bin.busy_time * refresh_rate / 60; + else + scm_data[2] = priv->bin.busy_time; scm_data[3] = context_count; __secure_tz_update_entry3(scm_data, sizeof(scm_data), &val, sizeof(val), priv); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index b0d2563cde5d..aa670ad2e9fb 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -124,36 +124,6 @@ struct dma_fence *sync_file_get_fence(int fd) } EXPORT_SYMBOL(sync_file_get_fence); -/** - * sync_file_get_name - get the name of the sync_file - * @sync_file: sync_file to get the fence from - * @buf: destination buffer to copy sync_file name into - * @len: available size of destination buffer. - * - * Each sync_file may have a name assigned either by the user (when merging - * sync_files together) or created from the fence it contains. In the latter - * case construction of the name is deferred until use, and so requires - * sync_file_get_name(). - * - * Returns: a string representing the name. - */ -char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) -{ - if (sync_file->user_name[0]) { - strlcpy(buf, sync_file->user_name, len); - } else { - struct dma_fence *fence = sync_file->fence; - - snprintf(buf, len, "%s-%s%llu-%d", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->context, - fence->seqno); - } - - return buf; -} - static int sync_file_set_fence(struct sync_file *sync_file, struct dma_fence **fences, int num_fences) { @@ -216,7 +186,7 @@ static void add_fence(struct dma_fence **fences, * @a and @b. @a and @b remain valid, independent sync_file. Returns the * new merged sync_file or NULL in case of error. */ -static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, +static struct sync_file *sync_file_merge(struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; @@ -289,7 +259,6 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, if (sync_file_set_fence(sync_file, fences, i) < 0) goto err; - strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: @@ -336,11 +305,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file, int err; struct sync_file *fence2, *fence3; struct sync_merge_data data; + size_t len; if (fd < 0) return fd; - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + arg += offsetof(typeof(data), fd2); + len = sizeof(data) - offsetof(typeof(data), fd2); + if (copy_from_user(&data.fd2, (void __user *)arg, len)) { err = -EFAULT; goto err_put_fd; } @@ -356,15 +328,14 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file, goto err_put_fd; } - data.name[sizeof(data.name) - 1] = '\0'; - fence3 = sync_file_merge(data.name, sync_file, fence2); + fence3 = sync_file_merge(sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; } data.fence = fd; - if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + if (copy_to_user((void __user *)arg, &data.fd2, len)) { err = -EFAULT; goto err_put_fence3; } @@ -387,11 +358,6 @@ static long sync_file_ioctl_merge(struct sync_file *sync_file, static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), - sizeof(info->obj_name)); - strlcpy(info->driver_name, fence->ops->get_driver_name(fence), - sizeof(info->driver_name)); - info->status = dma_fence_get_status(fence); while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) @@ -408,12 +374,13 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { struct sync_file_info info; - struct sync_fence_info *fence_info = NULL; struct dma_fence **fences; - __u32 size; - int num_fences, ret, i; + size_t len, offset; + int num_fences, i; - if (copy_from_user(&info, (void __user *)arg, sizeof(info))) + arg += offsetof(typeof(info), status); + len = sizeof(info) - offsetof(typeof(info), status); + if (copy_from_user(&info.status, (void __user *)arg, len)) return -EFAULT; if (info.flags || info.pad) @@ -437,35 +404,31 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (info.num_fences < num_fences) return -EINVAL; - size = num_fences * sizeof(*fence_info); - fence_info = kzalloc(size, GFP_KERNEL); - if (!fence_info) - return -ENOMEM; - + offset = offsetof(struct sync_fence_info, status); for (i = 0; i < num_fences; i++) { - int status = sync_fill_fence_info(fences[i], &fence_info[i]); + struct { + __s32 status; + __u32 flags; + __u64 timestamp_ns; + } fence_info; + struct sync_fence_info *finfo = (void *)&fence_info - offset; + int status = sync_fill_fence_info(fences[i], finfo); + u64 dest; + + /* Don't leak kernel memory to userspace via finfo->flags */ + finfo->flags = 0; info.status = info.status <= 0 ? info.status : status; - } - - if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, - size)) { - ret = -EFAULT; - goto out; + dest = info.sync_fence_info + i * sizeof(*finfo) + offset; + if (copy_to_user(u64_to_user_ptr(dest), &fence_info, + sizeof(fence_info))) + return -EFAULT; } no_fences: - sync_file_get_name(sync_file, info.name, sizeof(info.name)); info.num_fences = num_fences; - - if (copy_to_user((void __user *)arg, &info, sizeof(info))) - ret = -EFAULT; - else - ret = 0; - -out: - kfree(fence_info); - - return ret; + if (copy_to_user((void __user *)arg, &info.status, len)) + return -EFAULT; + return 0; } static long sync_file_ioctl(struct file *file, unsigned int cmd, diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig index 1ef49c245072..d5f1095099c0 100644 --- a/drivers/gpu/msm/Kconfig +++ b/drivers/gpu/msm/Kconfig @@ -11,7 +11,6 @@ config QCOM_KGSL select DEVFREQ_GOV_PERFORMANCE select DEVFREQ_GOV_QCOM_ADRENO_TZ select DEVFREQ_GOV_QCOM_GPUBW_MON - select TRACE_GPU_MEM help 3D graphics driver for the Adreno family of GPUs from QTI. Required to use hardware accelerated OpenGL, compute and Vulkan diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 454cf4779263..8d8da5c9a41c 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -4,7 +4,6 @@ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include -#include #include #include #include @@ -29,7 +28,7 @@ /* Include the master list of GPU cores that are supported */ #include "adreno-gpulist.h" -static void adreno_input_work(struct work_struct *work); +static void adreno_pwr_on_work(struct work_struct *work); static unsigned int counter_delta(struct kgsl_device *device, unsigned int reg, unsigned int *counter); @@ -58,8 +57,6 @@ static struct adreno_device device_3d0 = { .ft_policy = KGSL_FT_DEFAULT_POLICY, .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY, .long_ib_detect = 1, - .input_work = __WORK_INITIALIZER(device_3d0.input_work, - adreno_input_work), .pwrctrl_flag = BIT(ADRENO_THROTTLING_CTRL) | BIT(ADRENO_HWCG_CTRL), .profile.enabled = false, .active_list = LIST_HEAD_INIT(device_3d0.active_list), @@ -71,6 +68,8 @@ static struct adreno_device device_3d0 = { .skipsaverestore = 1, .usesgmem = 1, }, + .pwr_on_work = __WORK_INITIALIZER(device_3d0.pwr_on_work, + adreno_pwr_on_work), }; /* Ptr to array for the current set of fault detect registers */ @@ -92,9 +91,6 @@ static unsigned int adreno_ft_regs_default[] = { /* Nice level for the higher priority GPU start thread */ int adreno_wake_nice = -7; -/* Number of milliseconds to stay active active after a wake on touch */ -unsigned int adreno_wake_timeout = 100; - void adreno_reglist_write(struct adreno_device *adreno_dev, const struct adreno_reglist *list, u32 count) { @@ -408,152 +404,17 @@ void adreno_gmu_send_nmi(struct adreno_device *adreno_dev) wmb(); } -/* - * A workqueue callback responsible for actually turning on the GPU after a - * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any - * active_count protection to avoid the need to maintain state. Either - * somebody will start using the GPU or the idle timer will fire and put the - * GPU back into slumber. - */ -static void adreno_input_work(struct work_struct *work) +static void adreno_pwr_on_work(struct work_struct *work) { - struct adreno_device *adreno_dev = container_of(work, - struct adreno_device, input_work); + struct adreno_device *adreno_dev = + container_of(work, typeof(*adreno_dev), pwr_on_work); struct kgsl_device *device = KGSL_DEVICE(adreno_dev); mutex_lock(&device->mutex); - - device->flags |= KGSL_FLAG_WAKE_ON_TOUCH; - - /* - * Don't schedule adreno_start in a high priority workqueue, we are - * already in a workqueue which should be sufficient - */ kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE); - - /* - * When waking up from a touch event we want to stay active long enough - * for the user to send a draw command. The default idle timer timeout - * is shorter than we want so go ahead and push the idle timer out - * further for this special case - */ - mod_timer(&device->idle_timer, - jiffies + msecs_to_jiffies(adreno_wake_timeout)); mutex_unlock(&device->mutex); } -/* - * Process input events and schedule work if needed. At this point we are only - * interested in groking EV_ABS touchscreen events - */ -static void adreno_input_event(struct input_handle *handle, unsigned int type, - unsigned int code, int value) -{ - struct kgsl_device *device = handle->handler->private; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - - /* Only consider EV_ABS (touch) events */ - if (type != EV_ABS) - return; - - /* - * Don't do anything if anything hasn't been rendered since we've been - * here before - */ - - if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH) - return; - - /* - * If the device is in nap, kick the idle timer to make sure that we - * don't go into slumber before the first render. If the device is - * already in slumber schedule the wake. - */ - - if (device->state == KGSL_STATE_NAP) { - /* - * Set the wake on touch bit to keep from coming back here and - * keeping the device in nap without rendering - */ - - device->flags |= KGSL_FLAG_WAKE_ON_TOUCH; - - mod_timer(&device->idle_timer, - jiffies + device->pwrctrl.interval_timeout); - } else if (device->state == KGSL_STATE_SLUMBER) { - schedule_work(&adreno_dev->input_work); - } -} - -#ifdef CONFIG_INPUT -static int adreno_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - struct input_handle *handle; - int ret; - - handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (handle == NULL) - return -ENOMEM; - - handle->dev = dev; - handle->handler = handler; - handle->name = handler->name; - - ret = input_register_handle(handle); - if (ret) { - kfree(handle); - return ret; - } - - ret = input_open_device(handle); - if (ret) { - input_unregister_handle(handle); - kfree(handle); - } - - return ret; -} - -static void adreno_input_disconnect(struct input_handle *handle) -{ - input_close_device(handle); - input_unregister_handle(handle); - kfree(handle); -} -#else -static int adreno_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - return 0; -} -static void adreno_input_disconnect(struct input_handle *handle) {} -#endif - -/* - * We are only interested in EV_ABS events so only register handlers for those - * input devices that have EV_ABS events - */ -static const struct input_device_id adreno_input_ids[] = { - { - .flags = INPUT_DEVICE_ID_MATCH_EVBIT, - .evbit = { BIT_MASK(EV_ABS) }, - /* assumption: MT_.._X & MT_.._Y are in the same long */ - .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = - BIT_MASK(ABS_MT_POSITION_X) | - BIT_MASK(ABS_MT_POSITION_Y) }, - }, - { }, -}; - -static struct input_handler adreno_input_handler = { - .event = adreno_input_event, - .connect = adreno_input_connect, - .disconnect = adreno_input_disconnect, - .name = "kgsl", - .id_table = adreno_input_ids, -}; - /* * _soft_reset() - Soft reset GPU * @adreno_dev: Pointer to adreno device @@ -1079,7 +940,6 @@ static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev, init_level = 1; pwr->active_pwrlevel = init_level; - pwr->default_pwrlevel = init_level; } static void adreno_of_get_limits(struct adreno_device *adreno_dev, @@ -1250,29 +1110,26 @@ static int adreno_of_get_power(struct adreno_device *adreno_dev, /* get pm-qos-active-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,pm-qos-active-latency", &device->pwrctrl.pm_qos_active_latency)) - device->pwrctrl.pm_qos_active_latency = 501; + device->pwrctrl.pm_qos_active_latency = 1000; /* get pm-qos-cpu-mask-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,l2pc-cpu-mask-latency", &device->pwrctrl.pm_qos_cpu_mask_latency)) - device->pwrctrl.pm_qos_cpu_mask_latency = 501; + device->pwrctrl.pm_qos_cpu_mask_latency = 1000; /* get pm-qos-wakeup-latency, set it to default if not found */ if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency", &device->pwrctrl.pm_qos_wakeup_latency)) - device->pwrctrl.pm_qos_wakeup_latency = 101; + device->pwrctrl.pm_qos_wakeup_latency = 100; if (of_property_read_u32(node, "qcom,idle-timeout", &timeout)) - timeout = 80; + timeout = 58; device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout); device->pwrctrl.bus_control = of_property_read_bool(node, "qcom,bus-control"); - device->pwrctrl.input_disable = of_property_read_bool(node, - "qcom,disable-wake-on-touch"); - return 0; } @@ -1606,21 +1463,6 @@ static int adreno_probe(struct platform_device *pdev) dev_warn(device->dev, "Failed to get gpuhtw LLC slice descriptor %ld\n", PTR_ERR(adreno_dev->gpuhtw_llc_slice)); - -#ifdef CONFIG_INPUT - if (!device->pwrctrl.input_disable) { - adreno_input_handler.private = device; - /* - * It isn't fatal if we cannot register the input handler. Sad, - * perhaps, but not fatal - */ - if (input_register_handler(&adreno_input_handler)) { - adreno_input_handler.private = NULL; - dev_err(device->dev, - "Unable to register the input handler\n"); - } - } -#endif out: if (status) { adreno_ringbuffer_close(adreno_dev); @@ -1676,10 +1518,6 @@ static int adreno_remove(struct platform_device *pdev) /* The memory is fading */ _adreno_free_memories(adreno_dev); -#ifdef CONFIG_INPUT - if (adreno_input_handler.private) - input_unregister_handler(&adreno_input_handler); -#endif adreno_sysfs_close(adreno_dev); adreno_coresight_remove(adreno_dev); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 87ac337caf12..406aa68f8fdd 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -421,7 +421,7 @@ enum gpu_coresight_sources { * @dispatcher: Container for adreno GPU dispatcher * @pwron_fixup: Command buffer to run a post-power collapse shader workaround * @pwron_fixup_dwords: Number of dwords in the command buffer - * @input_work: Work struct for turning on the GPU after a touch event + * @pwr_on_work: Work struct for turning on the GPU * @busy_data: Struct holding GPU VBIF busy stats * @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only * DDR channel 0 read cycles in case of GBIF) @@ -501,7 +501,7 @@ struct adreno_device { struct adreno_dispatcher dispatcher; struct kgsl_memdesc pwron_fixup; unsigned int pwron_fixup_dwords; - struct work_struct input_work; + struct work_struct pwr_on_work; struct adreno_busy_data busy_data; unsigned int ram_cycles_lo; unsigned int ram_cycles_lo_ch1_read; @@ -1048,7 +1048,6 @@ extern struct adreno_gpudev adreno_a5xx_gpudev; extern struct adreno_gpudev adreno_a6xx_gpudev; extern int adreno_wake_nice; -extern unsigned int adreno_wake_timeout; int adreno_start(struct kgsl_device *device, int priority); int adreno_soft_reset(struct kgsl_device *device); diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 157a0682c7b8..781aebaae9d8 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -876,7 +876,7 @@ static int a6xx_gmu_gfx_rail_on(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - unsigned int perf_idx = pwr->num_pwrlevels - pwr->default_pwrlevel - 1; + unsigned int perf_idx = pwr->num_pwrlevels - 1; uint32_t default_opp = gmu->rpmh_votes.gx_votes[perf_idx]; gmu_core_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION, @@ -1297,8 +1297,8 @@ static int a6xx_gmu_notify_slumber(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq; - int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1; + int bus_level = pwr->pwrlevels[pwr->num_pwrlevels - 1].bus_freq; + int perf_idx = gmu->num_gpupwrlevels - 1; int ret, state; /* Disable the power counter so that the GMU is not busy */ diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 7c5c6b976dd8..7d0ddeca9452 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -146,15 +146,6 @@ static void sync_event_print(struct seq_file *s, sync_event->context->id, sync_event->timestamp); break; } - case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { - struct event_fence_info *info = sync_event->priv; - int i; - - for (i = 0; info && i < info->num_fences; i++) - seq_printf(s, "sync: %s", - info->fences[i].name); - break; - } case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { struct event_timeline_info *info = sync_event->priv; int j; diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index b6db7f848305..e59f56748f2e 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -549,12 +549,8 @@ static int sendcmd(struct adreno_device *adreno_dev, struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); struct adreno_dispatcher_drawqueue *dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); - struct adreno_submit_time time; - uint64_t secs = 0; - unsigned long nsecs = 0; int ret; mutex_lock(&device->mutex); @@ -563,8 +559,6 @@ static int sendcmd(struct adreno_device *adreno_dev, return -EBUSY; } - memset(&time, 0x0, sizeof(time)); - dispatcher->inflight++; dispatch_q->inflight++; @@ -590,7 +584,7 @@ static int sendcmd(struct adreno_device *adreno_dev, ADRENO_DRAWOBJ_PROFILE_COUNT; } - ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time); + ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, NULL); /* * On the first command, if the submission was successful, then read the @@ -650,9 +644,6 @@ static int sendcmd(struct adreno_device *adreno_dev, return ret; } - secs = time.ktime; - nsecs = do_div(secs, 1000000000); - /* * For the first submission in any given command queue update the * expected expire time - this won't actually be used / updated until @@ -664,14 +655,8 @@ static int sendcmd(struct adreno_device *adreno_dev, dispatch_q->expires = jiffies + msecs_to_jiffies(adreno_drawobj_timeout); - trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight, - time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb, - adreno_get_rptr(drawctxt->rb)); - mutex_unlock(&device->mutex); - cmdobj->submit_ticks = time.ticks; - dispatch_q->cmd_q[dispatch_q->tail] = cmdobj; dispatch_q->tail = (dispatch_q->tail + 1) % ADRENO_DISPATCH_DRAWQUEUE_SIZE; @@ -1180,12 +1165,6 @@ static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv, if (!_verify_ib(dev_priv, &ADRENO_CONTEXT(context)->base, ib)) return -EINVAL; - /* - * Clear the wake on touch bit to indicate an IB has - * been submitted since the last time we set it. - * But only clear it when we have rendering commands. - */ - device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; } /* A3XX does not have support for drawobj profiling */ @@ -2407,12 +2386,6 @@ static void retire_cmdobj(struct adreno_device *adreno_dev, ADRENO_DRAWOBJ_RB(drawobj), adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery); - drawctxt->submit_retire_ticks[drawctxt->ticks_index] = - end - cmdobj->submit_ticks; - - drawctxt->ticks_index = (drawctxt->ticks_index + 1) % - SUBMIT_RETIRE_TICKS_SIZE; - kgsl_drawobj_destroy(drawobj); } diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index c6c3a33a6745..9f6270881d85 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -40,8 +40,6 @@ void adreno_drawctxt_dump(struct kgsl_device *device, { unsigned int queue, start, retire; struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - int index, pos; - char buf[120]; kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue); kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start); @@ -102,25 +100,6 @@ void adreno_drawctxt_dump(struct kgsl_device *device, } stats: - memset(buf, 0, sizeof(buf)); - - pos = 0; - - for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) { - uint64_t msecs; - unsigned int usecs; - - if (!drawctxt->submit_retire_ticks[index]) - continue; - msecs = drawctxt->submit_retire_ticks[index] * 10; - usecs = do_div(msecs, 192); - usecs = do_div(msecs, 1000); - pos += scnprintf(buf + pos, sizeof(buf) - pos, "%u.%0u ", - (unsigned int)msecs, usecs); - } - dev_err(device->dev, " context[%u]: submit times: %s\n", - context->id, buf); - spin_unlock_bh(&drawctxt->lock); } diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 436c5a27b935..f2e6865629cc 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -15,7 +15,6 @@ struct adreno_context_type { }; #define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128 -#define SUBMIT_RETIRE_TICKS_SIZE 7 struct kgsl_device; struct adreno_device; @@ -42,10 +41,6 @@ struct kgsl_device_private; * @queued_timestamp: The last timestamp that was queued on this context * @rb: The ringbuffer in which this context submits commands. * @submitted_timestamp: The last timestamp that was submitted for this context - * @submit_retire_ticks: Array to hold command obj execution times from submit - * to retire - * @ticks_index: The index into submit_retire_ticks[] where the new delta will - * be written. * @active_node: Linkage for nodes in active_list * @active_time: Time when this context last seen */ @@ -72,8 +67,6 @@ struct adreno_context { unsigned int queued_timestamp; struct adreno_ringbuffer *rb; unsigned int submitted_timestamp; - uint64_t submit_retire_ticks[SUBMIT_RETIRE_TICKS_SIZE]; - int ticks_index; struct list_head active_node; unsigned long active_time; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index fe87e268580d..6e3cd25cc4e3 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -946,6 +946,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; + unsigned int link_onstack[SZ_256] __aligned(sizeof(long)); unsigned int *cmds; struct kgsl_context *context; struct adreno_context *drawctxt; @@ -1073,10 +1074,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if (gpudev->ccu_invalidate) dwords += 4; - link = kvcalloc(dwords, sizeof(unsigned int), GFP_KERNEL); - if (!link) { - ret = -ENOMEM; - goto done; + if (dwords <= ARRAY_SIZE(link_onstack)) { + link = link_onstack; + } else { + link = kvcalloc(dwords, sizeof(unsigned int), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; + goto done; + } } cmds = link; @@ -1206,7 +1211,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp, drawobj->flags, ret, drawctxt->type); - kvfree(link); + if (link != link_onstack) + kvfree(link); return ret; } diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c index b701e401f73d..816537eb0c23 100644 --- a/drivers/gpu/msm/adreno_sysfs.c +++ b/drivers/gpu/msm/adreno_sysfs.c @@ -432,7 +432,6 @@ static ADRENO_SYSFS_BOOL(gpu_llc_slice_enable); static ADRENO_SYSFS_BOOL(gpuhtw_llc_slice_enable); static DEVICE_INT_ATTR(wake_nice, 0644, adreno_wake_nice); -static DEVICE_INT_ATTR(wake_timeout, 0644, adreno_wake_timeout); static ADRENO_SYSFS_BOOL(sptp_pc); static ADRENO_SYSFS_BOOL(lm); @@ -451,7 +450,6 @@ static const struct attribute *_attr_list[] = { &adreno_attr_ft_long_ib_detect.attr.attr, &adreno_attr_ft_hang_intr_status.attr.attr, &dev_attr_wake_nice.attr.attr, - &dev_attr_wake_timeout.attr.attr, &adreno_attr_sptp_pc.attr.attr, &adreno_attr_lm.attr.attr, &adreno_attr_preemption.attr.attr, diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index 07cc11b2b7d4..76a3625ce415 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -43,51 +43,6 @@ TRACE_EVENT(adreno_cmdbatch_queued, ) ); -TRACE_EVENT(adreno_cmdbatch_submitted, - TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks, - unsigned long secs, unsigned long usecs, - struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr), - TP_STRUCT__entry( - __field(unsigned int, id) - __field(unsigned int, timestamp) - __field(int, inflight) - __field(unsigned int, flags) - __field(uint64_t, ticks) - __field(unsigned long, secs) - __field(unsigned long, usecs) - __field(int, prio) - __field(int, rb_id) - __field(unsigned int, rptr) - __field(unsigned int, wptr) - __field(int, q_inflight) - ), - TP_fast_assign( - __entry->id = drawobj->context->id; - __entry->timestamp = drawobj->timestamp; - __entry->inflight = inflight; - __entry->flags = drawobj->flags; - __entry->ticks = ticks; - __entry->secs = secs; - __entry->usecs = usecs; - __entry->prio = drawobj->context->priority; - __entry->rb_id = rb->id; - __entry->rptr = rptr; - __entry->wptr = rb->wptr; - __entry->q_inflight = rb->dispatch_q.inflight; - ), - TP_printk( - "ctx=%u ctx_prio=%d ts=%u inflight=%d flags=%s ticks=%lld time=%lu.%0lu rb_id=%d r/w=%x/%x, q_inflight=%d", - __entry->id, __entry->prio, __entry->timestamp, - __entry->inflight, - __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_DRAWOBJ_FLAGS) : "none", - __entry->ticks, __entry->secs, __entry->usecs, - __entry->rb_id, __entry->rptr, __entry->wptr, - __entry->q_inflight - ) -); - TRACE_EVENT(adreno_cmdbatch_retired, TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t start, uint64_t retire, diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index f04539f9f33e..655a135a167c 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2524,7 +2524,7 @@ static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv, } handle = kgsl_sync_fence_async_wait(event.fd, - gpuobj_free_fence_func, entry, NULL); + gpuobj_free_fence_func, entry); if (IS_ERR(handle)) { kgsl_mem_entry_unset_pend(entry); @@ -5570,7 +5570,7 @@ void kgsl_core_exit(void) int __init kgsl_core_init(void) { int result = 0; - struct sched_param param = { .sched_priority = 2 }; + struct sched_param param = { .sched_priority = 16 }; /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index caaf3810f779..9a1fa726a1d8 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -51,7 +51,6 @@ enum kgsl_event_results { KGSL_EVENT_CANCELLED = 2, }; -#define KGSL_FLAG_WAKE_ON_TOUCH BIT(0) #define KGSL_FLAG_SPARSE BIT(1) /* diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index 63fefc5b98f3..f25f4afee687 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. */ /* @@ -45,14 +45,7 @@ static void syncobj_destroy_object(struct kgsl_drawobj *drawobj) for (i = 0; i < syncobj->numsyncs; i++) { struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; - if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) { - struct event_fence_info *priv = event->priv; - - if (priv) { - kfree(priv->fences); - kfree(priv); - } - } else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE) { + if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMELINE) { kfree(event->priv); } } @@ -111,15 +104,6 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, retired); break; } - case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { - int j; - struct event_fence_info *info = event->priv; - - for (j = 0; info && j < info->num_fences; j++) - dev_err(device->dev, "[%d] fence: %s\n", - i, info->fences[j].name); - break; - } case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { int j; struct event_timeline_info *info = event->priv; @@ -177,15 +161,6 @@ static void syncobj_timer(struct timer_list *t) dev_err(device->dev, " [%u] TIMESTAMP %u:%u\n", i, event->context->id, event->timestamp); break; - case KGSL_CMD_SYNCPOINT_TYPE_FENCE: { - int j; - struct event_fence_info *info = event->priv; - - for (j = 0; info && j < info->num_fences; j++) - dev_err(device->dev, " [%u] FENCE %s\n", - i, info->fences[j].name); - break; - } case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: { int j; struct event_timeline_info *info = event->priv; @@ -419,12 +394,6 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy); static bool drawobj_sync_fence_func(void *priv) { struct kgsl_drawobj_sync_event *event = priv; - struct event_fence_info *info = event->priv; - int i; - - for (i = 0; info && i < info->num_fences; i++) - trace_syncpoint_fence_expire(event->syncobj, - info->fences[i].name); /* * Only call kgsl_drawobj_put() if it's not marked for cancellation @@ -508,6 +477,8 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device, /* Set pending flag before adding callback to avoid race */ set_bit(event->id, &syncobj->pending); + /* Get a dma_fence refcount to hand over to the callback */ + dma_fence_get(event->fence); ret = dma_fence_add_callback(event->fence, &event->cb, drawobj_sync_timeline_fence_callback); @@ -520,10 +491,15 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device, ret = 0; } + /* Put the refcount from fence creation */ + dma_fence_put(event->fence); kgsl_drawobj_put(drawobj); + return ret; } - return ret; + /* Put the refcount from fence creation */ + dma_fence_put(event->fence); + return 0; } static int drawobj_add_sync_fence(struct kgsl_device *device, @@ -533,8 +509,7 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, struct kgsl_cmd_syncpoint_fence sync; struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; - struct event_fence_info *priv; - unsigned int id, i; + unsigned int id; if (copy_struct_from_user(&sync, sizeof(sync), data, datasize)) return -EFAULT; @@ -551,14 +526,10 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, event->device = device; event->context = NULL; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - set_bit(event->id, &syncobj->pending); event->handle = kgsl_sync_fence_async_wait(sync.fd, - drawobj_sync_fence_func, event, priv); - - event->priv = priv; + drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); @@ -578,9 +549,6 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, return ret; } - for (i = 0; priv && i < priv->num_fences; i++) - trace_syncpoint_fence(syncobj, priv->fences[i].name); - return 0; } diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index cd905a6c1102..acd88df87939 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -71,8 +71,6 @@ struct kgsl_drawobj { * for easy access * @profile_index: Index to store the start/stop ticks in the kernel profiling * buffer - * @submit_ticks: Variable to hold ticks at the time of - * command obj submit. */ struct kgsl_drawobj_cmd { @@ -87,7 +85,6 @@ struct kgsl_drawobj_cmd { struct kgsl_mem_entry *profiling_buf_entry; uint64_t profiling_buffer_gpuaddr; unsigned int profile_index; - uint64_t submit_ticks; }; /** @@ -126,17 +123,6 @@ struct kgsl_drawobj_timeline { int count; }; -#define KGSL_FENCE_NAME_LEN 74 - -struct fence_info { - char name[KGSL_FENCE_NAME_LEN]; -}; - -struct event_fence_info { - struct fence_info *fences; - int num_fences; -}; - struct event_timeline_info { u64 seqno; u32 timeline; diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index a9fe84909352..22207f326284 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1612,7 +1612,7 @@ static int gmu_start(struct kgsl_device *device) /* Vote for minimal DDR BW for GMU to init */ ret = msm_bus_scale_client_update_request(gmu->pcl, - pwr->pwrlevels[pwr->default_pwrlevel].bus_min); + pwr->pwrlevels[pwr->num_pwrlevels - 1].bus_min); if (ret) dev_err(&gmu->pdev->dev, "Failed to allocate gmu b/w: %d\n", ret); diff --git a/drivers/gpu/msm/kgsl_ioctl.c b/drivers/gpu/msm/kgsl_ioctl.c index 5492f046e626..30bc77dadb68 100644 --- a/drivers/gpu/msm/kgsl_ioctl.c +++ b/drivers/gpu/msm/kgsl_ioctl.c @@ -5,6 +5,7 @@ #include "kgsl_device.h" #include "kgsl_sync.h" +#include "adreno.h" static const struct kgsl_ioctl kgsl_ioctl_funcs[] = { KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY, @@ -166,8 +167,13 @@ long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct kgsl_device_private *dev_priv = filep->private_data; struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); long ret; + if (cmd == IOCTL_KGSL_GPU_COMMAND && + READ_ONCE(device->state) != KGSL_STATE_ACTIVE) + kgsl_schedule_work(&adreno_dev->pwr_on_work); + ret = kgsl_ioctl_helper(filep, cmd, arg, kgsl_ioctl_funcs, ARRAY_SIZE(kgsl_ioctl_funcs)); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 5f03f9a6ce24..6e663e15713f 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -167,8 +167,7 @@ static void _ab_buslevel_update(struct kgsl_pwrctrl *pwr, * constraint if one exists. */ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level, - struct kgsl_pwr_constraint *pwrc, - int popp) + struct kgsl_pwr_constraint *pwrc) { unsigned int max_pwrlevel = max_t(unsigned int, pwr->thermal_pwrlevel, pwr->max_pwrlevel); @@ -196,9 +195,6 @@ static unsigned int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level, break; } - if (popp && (max_pwrlevel < pwr->active_pwrlevel)) - max_pwrlevel = pwr->active_pwrlevel; - if (level < max_pwrlevel) return max_pwrlevel; if (level > min_pwrlevel) @@ -579,8 +575,7 @@ unsigned int kgsl_pwrctrl_adjust_pwrlevel(struct kgsl_device *device, * Adjust the power level if required by thermal, max/min, * constraints, etc */ - return _adjust_pwrlevel(pwr, new_level, &pwr->constraint, - device->pwrscale.popp_level); + return _adjust_pwrlevel(pwr, new_level, &pwr->constraint); } /** @@ -723,7 +718,7 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device, if (device == NULL || pwrc == NULL) return; constraint = _adjust_pwrlevel(&device->pwrctrl, - device->pwrctrl.active_pwrlevel, pwrc, 0); + device->pwrctrl.active_pwrlevel, pwrc); pwrc_old = &device->pwrctrl.constraint; /* @@ -1063,6 +1058,7 @@ static ssize_t __timer_store(struct device *dev, struct device_attribute *attr, struct kgsl_device *device = dev_get_drvdata(dev); int ret; + return count; ret = kgsl_sysfs_store(buf, &val); if (ret) return ret; @@ -1330,75 +1326,6 @@ static ssize_t bus_split_store(struct device *dev, return count; } -static ssize_t default_pwrlevel_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kgsl_device *device = dev_get_drvdata(dev); - - return scnprintf(buf, PAGE_SIZE, "%d\n", - device->pwrctrl.default_pwrlevel); -} - -static ssize_t default_pwrlevel_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kgsl_device *device = dev_get_drvdata(dev); - struct kgsl_pwrctrl *pwr = &device->pwrctrl; - struct kgsl_pwrscale *pwrscale = &device->pwrscale; - int ret; - unsigned int level = 0; - - ret = kgsl_sysfs_store(buf, &level); - if (ret) - return ret; - - if (level > pwr->num_pwrlevels - 2) - goto done; - - mutex_lock(&device->mutex); - pwr->default_pwrlevel = level; - pwrscale->gpu_profile.profile.initial_freq - = pwr->pwrlevels[level].gpu_freq; - - mutex_unlock(&device->mutex); -done: - return count; -} - - -static ssize_t popp_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned int val = 0; - struct kgsl_device *device = dev_get_drvdata(dev); - int ret; - - ret = kgsl_sysfs_store(buf, &val); - if (ret) - return ret; - - mutex_lock(&device->mutex); - if (val) - set_bit(POPP_ON, &device->pwrscale.popp_state); - else - clear_bit(POPP_ON, &device->pwrscale.popp_state); - mutex_unlock(&device->mutex); - - return count; -} - -static ssize_t popp_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct kgsl_device *device = dev_get_drvdata(dev); - - return scnprintf(buf, PAGE_SIZE, "%d\n", - test_bit(POPP_ON, &device->pwrscale.popp_state)); -} - static ssize_t gpu_model_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1603,8 +1530,6 @@ static DEVICE_ATTR_RW(force_clk_on); static DEVICE_ATTR_RW(force_bus_on); static DEVICE_ATTR_RW(force_rail_on); static DEVICE_ATTR_RW(bus_split); -static DEVICE_ATTR_RW(default_pwrlevel); -static DEVICE_ATTR_RW(popp); static DEVICE_ATTR_RW(force_no_nap); static DEVICE_ATTR_RO(gpu_model); static DEVICE_ATTR_RO(gpu_busy_percentage); @@ -1632,8 +1557,6 @@ static const struct attribute *pwrctrl_attr_list[] = { &dev_attr_force_rail_on.attr, &dev_attr_force_no_nap.attr, &dev_attr_bus_split.attr, - &dev_attr_default_pwrlevel.attr, - &dev_attr_popp.attr, &dev_attr_gpu_model.attr, &dev_attr_gpu_busy_percentage.attr, &dev_attr_min_clock_mhz.attr, @@ -2621,10 +2544,8 @@ static int kgsl_pwrctrl_enable(struct kgsl_device *device) if (pwr->wakeup_maxpwrlevel) { level = pwr->max_pwrlevel; pwr->wakeup_maxpwrlevel = 0; - } else if (kgsl_popp_check(device)) { - level = pwr->active_pwrlevel; } else { - level = pwr->default_pwrlevel; + level = pwr->num_pwrlevels - 1; } kgsl_pwrctrl_pwrlevel_change(device, level); @@ -3424,7 +3345,7 @@ EXPORT_SYMBOL(kgsl_pwr_limits_get_freq); int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; - unsigned int new_level = pwr->default_pwrlevel; + unsigned int new_level = pwr->num_pwrlevels - 1; unsigned int old_level = pwr->active_pwrlevel; /* diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index 406486a8f065..620cbfbaa33d 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -52,9 +52,9 @@ /* * The effective duration of qos request in usecs at queue time. * After timeout, qos request is cancelled automatically. - * Kept 80ms default, inline with default GPU idle time. + * Kept 64ms default, inline with default GPU idle time. */ -#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000) +#define KGSL_L2PC_QUEUE_TIMEOUT (58 * 1000) /* * The effective duration of qos request in usecs at wakeup time. @@ -140,7 +140,6 @@ struct gpu_cx_ipeak_client { * @previous_pwrlevel - The power level before transition * @thermal_pwrlevel - maximum powerlevel constraint from thermal * @thermal_pwrlevel_floor - minimum powerlevel constraint from thermal - * @default_pwrlevel - device wake up power level * @max_pwrlevel - maximum allowable powerlevel per the user * @min_pwrlevel - minimum allowable powerlevel per the user * @num_pwrlevels - number of available power levels @@ -158,7 +157,6 @@ struct gpu_cx_ipeak_client { * @pm_qos_req_dma - the power management quality of service structure * @pm_qos_active_latency - allowed CPU latency in microseconds when active * @pm_qos_cpu_mask_latency - allowed CPU mask latency in microseconds - * @input_disable - To disable GPU wakeup on touch input event * @pm_qos_wakeup_latency - allowed CPU latency in microseconds during wakeup * @bus_control - true if the bus calculation is independent * @bus_mod - modifier from the current power level for the bus vote @@ -199,7 +197,6 @@ struct kgsl_pwrctrl { unsigned int previous_pwrlevel; unsigned int thermal_pwrlevel; unsigned int thermal_pwrlevel_floor; - unsigned int default_pwrlevel; unsigned int wakeup_maxpwrlevel; unsigned int max_pwrlevel; unsigned int min_pwrlevel; @@ -219,7 +216,6 @@ struct kgsl_pwrctrl { unsigned int pm_qos_active_latency; unsigned int pm_qos_cpu_mask_latency; unsigned int pm_qos_wakeup_latency; - bool input_disable; bool bus_control; int bus_mod; unsigned int bus_percent_ab; diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index 892e9e69f1d0..3949ca6cf32b 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -12,24 +12,6 @@ #include "kgsl_pwrscale.h" #include "kgsl_trace.h" -/* - * "SLEEP" is generic counting both NAP & SLUMBER - * PERIODS generally won't exceed 9 for the relavent 150msec - * window, but can be significantly smaller and still POPP - * pushable in cases where SLUMBER is involved. Hence the - * additional reliance on PERCENT to make sure a reasonable - * amount of down-time actually exists. - */ -#define MIN_SLEEP_PERIODS 3 -#define MIN_SLEEP_PERCENT 5 - -static struct kgsl_popp popp_param[POPP_MAX] = { - {0, 0}, - {-5, 20}, - {-5, 0}, - {0, 0}, -}; - /** * struct kgsl_midframe_info - midframe power stats sampling info * @timer - midframe sampling timer @@ -61,15 +43,10 @@ static struct devfreq_dev_status last_status = { .private_data = &last_xstats }; */ void kgsl_pwrscale_sleep(struct kgsl_device *device) { - struct kgsl_pwrscale *psc = &device->pwrscale; - if (!device->pwrscale.enabled) return; device->pwrscale.on_time = 0; - psc->popp_level = 0; - clear_bit(POPP_PUSH, &device->pwrscale.popp_state); - /* to call devfreq_suspend_device() from a kernel thread */ queue_work(device->pwrscale.devfreq_wq, &device->pwrscale.devfreq_suspend_ws); @@ -143,18 +120,6 @@ void kgsl_pwrscale_update_stats(struct kgsl_device *device) struct kgsl_power_stats stats; device->ftbl->power_stats(device, &stats); - if (psc->popp_level) { - u64 x = stats.busy_time; - u64 y = stats.ram_time; - - do_div(x, 100); - do_div(y, 100); - x *= popp_param[psc->popp_level].gpu_x; - y *= popp_param[psc->popp_level].ddr_y; - trace_kgsl_popp_mod(device, x, y); - stats.busy_time += x; - stats.ram_time += y; - } device->pwrscale.accum_stats.busy_time += stats.busy_time; device->pwrscale.accum_stats.ram_time += stats.ram_time; device->pwrscale.accum_stats.ram_wait += stats.ram_wait; @@ -288,7 +253,7 @@ void kgsl_pwrscale_enable(struct kgsl_device *device) * run at default level; */ kgsl_pwrctrl_pwrlevel_change(device, - device->pwrctrl.default_pwrlevel); + device->pwrctrl.num_pwrlevels - 1); device->pwrscale.enabled = false; } } @@ -309,194 +274,6 @@ static int _thermal_adjust(struct kgsl_pwrctrl *pwr, int level) return level; } -/* - * Use various metrics including level stability, NAP intervals, and - * overall GPU freq / DDR freq combination to decide if POPP should - * be activated. - */ -static bool popp_stable(struct kgsl_device *device) -{ - s64 t; - s64 nap_time = 0; - s64 go_time = 0; - int i, index; - int nap = 0; - s64 percent_nap = 0; - struct kgsl_pwr_event *e; - struct kgsl_pwrctrl *pwr = &device->pwrctrl; - struct kgsl_pwrscale *psc = &device->pwrscale; - - if (!test_bit(POPP_ON, &psc->popp_state)) - return false; - - /* If already pushed or running naturally at min don't push further */ - if (test_bit(POPP_PUSH, &psc->popp_state)) - return false; - if (!psc->popp_level && - (pwr->active_pwrlevel == pwr->min_pwrlevel)) - return false; - if (psc->history[KGSL_PWREVENT_STATE].events == NULL) - return false; - - t = ktime_to_ms(ktime_get()); - /* Check for recent NAP statistics: NAPping regularly and well? */ - if (pwr->active_pwrlevel == 0) { - index = psc->history[KGSL_PWREVENT_STATE].index; - i = index > 0 ? (index - 1) : - (psc->history[KGSL_PWREVENT_STATE].size - 1); - while (i != index) { - e = &psc->history[KGSL_PWREVENT_STATE].events[i]; - if (e->data == KGSL_STATE_NAP || - e->data == KGSL_STATE_SLUMBER) { - if (ktime_to_ms(e->start) + STABLE_TIME > t) { - nap++; - nap_time += e->duration; - } - } else if (e->data == KGSL_STATE_ACTIVE) { - if (ktime_to_ms(e->start) + STABLE_TIME > t) - go_time += e->duration; - } - if (i == 0) - i = psc->history[KGSL_PWREVENT_STATE].size - 1; - else - i--; - } - if (nap_time && go_time) { - percent_nap = 100 * nap_time; - div64_s64(percent_nap, nap_time + go_time); - } - trace_kgsl_popp_nap(device, (int)nap_time / 1000, nap, - percent_nap); - /* If running high at turbo, don't push */ - if (nap < MIN_SLEEP_PERIODS || percent_nap < MIN_SLEEP_PERCENT) - return false; - } - - /* Finally check that there hasn't been a recent change */ - if ((device->pwrscale.freq_change_time + STABLE_TIME) < t) { - device->pwrscale.freq_change_time = t; - return true; - } - return false; -} - -bool kgsl_popp_check(struct kgsl_device *device) -{ - int i; - unsigned int index; - struct kgsl_pwrscale *psc = &device->pwrscale; - struct kgsl_pwr_event *e; - - if (!test_bit(POPP_ON, &psc->popp_state)) - return false; - if (!test_bit(POPP_PUSH, &psc->popp_state)) - return false; - if (psc->history[KGSL_PWREVENT_STATE].events == NULL) { - clear_bit(POPP_PUSH, &psc->popp_state); - return false; - } - index = psc->history[KGSL_PWREVENT_STATE].index; - - e = &psc->history[KGSL_PWREVENT_STATE].events[index]; - if (e->data == KGSL_STATE_SLUMBER) - e->duration = ktime_us_delta(ktime_get(), e->start); - - /* If there's been a long SLUMBER in recent history, clear the _PUSH */ - for (i = 0; i < psc->history[KGSL_PWREVENT_STATE].size; i++) { - e = &psc->history[KGSL_PWREVENT_STATE].events[i]; - if ((e->data == KGSL_STATE_SLUMBER) && - (e->duration > POPP_RESET_TIME)) { - clear_bit(POPP_PUSH, &psc->popp_state); - return false; - } - } - return true; -} - -/* - * The GPU has been running at the current frequency for a while. Attempt - * to lower the frequency for boarderline cases. - */ -static void popp_trans1(struct kgsl_device *device) -{ - struct kgsl_pwrctrl *pwr = &device->pwrctrl; - struct kgsl_pwrlevel *pl = &pwr->pwrlevels[pwr->active_pwrlevel]; - struct kgsl_pwrscale *psc = &device->pwrscale; - int old_level = psc->popp_level; - - switch (old_level) { - case 0: - psc->popp_level = 2; - /* If the current level has a high default bus don't push it */ - if (pl->bus_freq == pl->bus_max) - pwr->bus_mod = 1; - kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + 1); - break; - case 1: - case 2: - psc->popp_level++; - break; - case 3: - set_bit(POPP_PUSH, &psc->popp_state); - psc->popp_level = 0; - break; - case POPP_MAX: - default: - psc->popp_level = 0; - break; - } - - trace_kgsl_popp_level(device, old_level, psc->popp_level); -} - -/* - * The GPU DCVS algorithm recommends a level change. Apply any - * POPP restrictions and update the level accordingly - */ -static int popp_trans2(struct kgsl_device *device, int level) -{ - struct kgsl_pwrctrl *pwr = &device->pwrctrl; - struct kgsl_pwrscale *psc = &device->pwrscale; - int old_level = psc->popp_level; - - if (!test_bit(POPP_ON, &psc->popp_state)) - return level; - - clear_bit(POPP_PUSH, &psc->popp_state); - /* If the governor recommends going down, do it! */ - if (pwr->active_pwrlevel < level) { - psc->popp_level = 0; - trace_kgsl_popp_level(device, old_level, psc->popp_level); - return level; - } - - switch (psc->popp_level) { - case 0: - /* If the feature isn't engaged, go up immediately */ - break; - case 1: - /* Turn off mitigation, and go up a level */ - psc->popp_level = 0; - break; - case 2: - case 3: - /* Try a more aggressive mitigation */ - psc->popp_level--; - level++; - /* Update the stable timestamp */ - device->pwrscale.freq_change_time = ktime_to_ms(ktime_get()); - break; - case POPP_MAX: - default: - psc->popp_level = 0; - break; - } - - trace_kgsl_popp_level(device, old_level, psc->popp_level); - - return level; -} - #ifdef DEVFREQ_FLAG_WAKEUP_MAXFREQ static inline bool _check_maxfreq(u32 flags) { @@ -563,13 +340,11 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) if (pwr->thermal_cycle == CYCLE_ACTIVE) level = _thermal_adjust(pwr, i); else - level = popp_trans2(device, i); + level = i; break; } if (level != pwr->active_pwrlevel) kgsl_pwrctrl_pwrlevel_change(device, level); - } else if (popp_stable(device)) { - popp_trans1(device); } *freq = kgsl_pwrctrl_active_freq(pwr); @@ -957,7 +732,7 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor) srcu_init_notifier_head(&pwrscale->nh); profile->initial_freq = - pwr->pwrlevels[pwr->default_pwrlevel].gpu_freq; + pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq; /* Let's start with 10 ms and tune in later */ profile->polling_ms = 10; diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h index af07d5b85a08..8072f6c0abd4 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.h +++ b/drivers/gpu/msm/kgsl_pwrscale.h @@ -16,8 +16,7 @@ #define KGSL_PWREVENT_STATE 0 #define KGSL_PWREVENT_GPU_FREQ 1 #define KGSL_PWREVENT_BUS_FREQ 2 -#define KGSL_PWREVENT_POPP 3 -#define KGSL_PWREVENT_MAX 4 +#define KGSL_PWREVENT_MAX 3 /** * Amount of time running at a level to be considered @@ -25,21 +24,6 @@ */ #define STABLE_TIME 150 -/* Amount of idle time needed to re-set stability in usec */ -#define POPP_RESET_TIME 1000000 - -/* Number of POPP levels */ -#define POPP_MAX 4 - -/* POPP state bits */ -#define POPP_ON BIT(0) -#define POPP_PUSH BIT(1) - -struct kgsl_popp { - int gpu_x; - int ddr_y; -}; - struct kgsl_power_stats { u64 busy_time; u64 ram_time; @@ -70,7 +54,7 @@ struct kgsl_pwr_history { * @enabled - Whether or not power scaling is enabled * @time - Last submitted sample timestamp * @on_time - Timestamp when gpu busy begins - * @freq_change_time - Timestamp of last freq change or popp update + * @freq_change_time - Timestamp of last freq change * @nh - Notifier for the partner devfreq bus device * @devfreq_wq - Main devfreq workqueue * @devfreq_suspend_ws - Pass device suspension to devfreq @@ -79,8 +63,6 @@ struct kgsl_pwr_history { * @next_governor_call - Timestamp after which the governor may be notified of * a new sample * @history - History of power events with timestamps and durations - * @popp_level - Current level of POPP mitigation - * @popp_state - Control state for POPP, on/off, recently pushed, etc * @cooling_dev - Thermal cooling device handle * @ctxt_aware_enable - Whether or not ctxt aware DCVS feature is enabled * @ctxt_aware_busy_penalty - The time in microseconds required to trigger @@ -106,8 +88,6 @@ struct kgsl_pwrscale { struct work_struct devfreq_notify_ws; ktime_t next_governor_call; struct kgsl_pwr_history history[KGSL_PWREVENT_MAX]; - int popp_level; - unsigned long popp_state; struct thermal_cooling_device *cooling_dev; bool ctxt_aware_enable; unsigned int ctxt_aware_target_pwrlevel; @@ -139,9 +119,6 @@ int kgsl_busmon_get_dev_status(struct device *dev, struct devfreq_dev_status *stat); int kgsl_busmon_get_cur_freq(struct device *dev, unsigned long *freq); -bool kgsl_popp_check(struct kgsl_device *device); - - #define KGSL_PWRSCALE_INIT(_priv_data) { \ .enabled = true, \ .gpu_profile = { \ @@ -161,6 +138,5 @@ bool kgsl_popp_check(struct kgsl_device *device); .history[KGSL_PWREVENT_STATE].size = 20, \ .history[KGSL_PWREVENT_GPU_FREQ].size = 3, \ .history[KGSL_PWREVENT_BUS_FREQ].size = 5, \ - .history[KGSL_PWREVENT_POPP].size = 5, \ } #endif diff --git a/drivers/gpu/msm/kgsl_rgmu.c b/drivers/gpu/msm/kgsl_rgmu.c index ff89170831be..d449dfa6f392 100644 --- a/drivers/gpu/msm/kgsl_rgmu.c +++ b/drivers/gpu/msm/kgsl_rgmu.c @@ -174,7 +174,7 @@ static int rgmu_enable_clks(struct kgsl_device *device) /* Let us set gpu clk to default power level */ ret = rgmu_clk_set_rate(rgmu->gpu_clk, - rgmu->gpu_freqs[pwr->default_pwrlevel]); + rgmu->gpu_freqs[pwr->num_pwrlevels - 1]); if (ret) return ret; diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index 229f8baa7822..1d6d1f438208 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -424,54 +424,8 @@ static void kgsl_sync_fence_callback(struct dma_fence *fence, } } -static void kgsl_get_fence_names(struct dma_fence *fence, - struct event_fence_info *info_ptr) -{ - unsigned int num_fences; - struct dma_fence **fences; - struct dma_fence_array *array; - int i; - - if (!info_ptr) - return; - - array = to_dma_fence_array(fence); - - if (array != NULL) { - num_fences = array->num_fences; - fences = array->fences; - } else { - num_fences = 1; - fences = &fence; - } - - info_ptr->fences = kcalloc(num_fences, sizeof(struct fence_info), - GFP_ATOMIC); - if (info_ptr->fences == NULL) - return; - - info_ptr->num_fences = num_fences; - - for (i = 0; i < num_fences; i++) { - struct dma_fence *f = fences[i]; - struct fence_info *fi = &info_ptr->fences[i]; - int len; - - len = scnprintf(fi->name, sizeof(fi->name), "%s %s", - f->ops->get_driver_name(f), - f->ops->get_timeline_name(f)); - - if (f->ops->fence_value_str) { - len += scnprintf(fi->name + len, sizeof(fi->name) - len, - ": "); - f->ops->fence_value_str(f, fi->name + len, - sizeof(fi->name) - len); - } - } -} - struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, - bool (*func)(void *priv), void *priv, struct event_fence_info *info_ptr) + bool (*func)(void *priv), void *priv) { struct kgsl_sync_fence_cb *kcb; struct dma_fence *fence; @@ -481,6 +435,11 @@ struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, if (fence == NULL) return ERR_PTR(-EINVAL); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + dma_fence_put(fence); + return NULL; + } + /* create the callback */ kcb = kzalloc(sizeof(*kcb), GFP_ATOMIC); if (kcb == NULL) { @@ -492,8 +451,6 @@ struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, kcb->priv = priv; kcb->func = func; - kgsl_get_fence_names(fence, info_ptr); - /* if status then error or signaled */ status = dma_fence_add_callback(fence, &kcb->fence_cb, kgsl_sync_fence_callback); diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h index f49f9e04f255..fa0c9e3145f4 100644 --- a/drivers/gpu/msm/kgsl_sync.h +++ b/drivers/gpu/msm/kgsl_sync.h @@ -70,7 +70,6 @@ struct kgsl_sync_fence_cb { struct kgsl_device_private; struct kgsl_drawobj_sync_event; -struct event_fence_info; struct kgsl_process_private; struct kgsl_syncsource; @@ -86,8 +85,7 @@ void kgsl_sync_timeline_detach(struct kgsl_sync_timeline *ktimeline); void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline); struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, - bool (*func)(void *priv), void *priv, - struct event_fence_info *info_ptr); + bool (*func)(void *priv), void *priv); void kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb); @@ -130,8 +128,7 @@ static inline void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline) static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, - bool (*func)(void *priv), void *priv, - struct event_fence_info *info_ptr) + bool (*func)(void *priv), void *priv) { return NULL; } diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 60fbe5b03348..2fab867b7626 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -866,78 +866,6 @@ TRACE_EVENT(kgsl_regwrite, ) ); -TRACE_EVENT(kgsl_popp_level, - - TP_PROTO(struct kgsl_device *device, int level1, int level2), - - TP_ARGS(device, level1, level2), - - TP_STRUCT__entry( - __string(device_name, device->name) - __field(int, level1) - __field(int, level2) - ), - - TP_fast_assign( - __assign_str(device_name, device->name); - __entry->level1 = level1; - __entry->level2 = level2; - ), - - TP_printk( - "d_name=%s old level=%d new level=%d", - __get_str(device_name), __entry->level1, __entry->level2) -); - -TRACE_EVENT(kgsl_popp_mod, - - TP_PROTO(struct kgsl_device *device, int x, int y), - - TP_ARGS(device, x, y), - - TP_STRUCT__entry( - __string(device_name, device->name) - __field(int, x) - __field(int, y) - ), - - TP_fast_assign( - __assign_str(device_name, device->name); - __entry->x = x; - __entry->y = y; - ), - - TP_printk( - "d_name=%s GPU busy mod=%d bus busy mod=%d", - __get_str(device_name), __entry->x, __entry->y) -); - -TRACE_EVENT(kgsl_popp_nap, - - TP_PROTO(struct kgsl_device *device, int t, int nap, int percent), - - TP_ARGS(device, t, nap, percent), - - TP_STRUCT__entry( - __string(device_name, device->name) - __field(int, t) - __field(int, nap) - __field(int, percent) - ), - - TP_fast_assign( - __assign_str(device_name, device->name); - __entry->t = t; - __entry->nap = nap; - __entry->percent = percent; - ), - - TP_printk( - "d_name=%s nap time=%d number of naps=%d percentage=%d", - __get_str(device_name), __entry->t, __entry->nap, - __entry->percent) -); - TRACE_EVENT(kgsl_register_event, TP_PROTO(unsigned int id, unsigned int timestamp, void *func), TP_ARGS(id, timestamp, func), diff --git a/drivers/input/touchscreen/sec_touch/sec_ts.c b/drivers/input/touchscreen/sec_touch/sec_ts.c index a8b125028556..6dc570f1f8ae 100644 --- a/drivers/input/touchscreen/sec_touch/sec_ts.c +++ b/drivers/input/touchscreen/sec_touch/sec_ts.c @@ -1064,7 +1064,7 @@ int sec_ts_wait_for_ready_with_count(struct sec_ts_data *ts, unsigned int ack, input_err(true, &ts->client->dev, "%s: Time Over\n", __func__); - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "%s: %02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X [%d]\n", __func__, tBuff[0], tBuff[1], tBuff[2], tBuff[3], tBuff[4], tBuff[5], tBuff[6], tBuff[7], retry); @@ -2868,7 +2868,7 @@ void sec_ts_set_grip_type(struct sec_ts_data *ts, u8 set_type) { u8 mode = G_NONE; - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "%s: re-init grip(%d), edh:%d, edg:%d, lan:%d\n", __func__, set_type, ts->grip_edgehandler_direction, ts->grip_edge_range, ts->grip_landscape_mode); @@ -4825,7 +4825,7 @@ static void sec_set_switch_gpio(struct sec_ts_data *ts, int gpio_value) if (!gpio_is_valid(gpio)) return; - input_info(true, &ts->client->dev, "%s: toggling switch to %s\n", + input_dbg(true, &ts->client->dev, "%s: toggling switch to %s\n", __func__, gpio_value == SEC_SWITCH_GPIO_VALUE_AP_MASTER ? "AP" : "SLPI"); @@ -4842,7 +4842,7 @@ static void sec_ts_suspend_work(struct work_struct *work) suspend_work); int ret = 0; - input_info(true, &ts->client->dev, "%s\n", __func__); + input_dbg(true, &ts->client->dev, "%s\n", __func__); mutex_lock(&ts->device_mutex); @@ -4890,7 +4890,7 @@ static void sec_ts_resume_work(struct work_struct *work) resume_work); int ret = 0; - input_info(true, &ts->client->dev, "%s\n", __func__); + input_dbg(true, &ts->client->dev, "%s\n", __func__); mutex_lock(&ts->device_mutex); @@ -4978,7 +4978,7 @@ static void sec_ts_resume_work(struct work_struct *work) __func__, SET_TS_CMD_SET_CHARGER_MODE, ts->charger_mode, ret); else - input_info(true, &ts->client->dev, "%s: set charger mode %#x\n", + input_dbg(true, &ts->client->dev, "%s: set charger mode %#x\n", __func__, ts->charger_mode); queue_work(ts->event_wq, &ts->charger_work); @@ -5048,12 +5048,12 @@ static void sec_ts_charger_work(struct work_struct *work) /* keep wlc mode if usb plug in w/ wlc off case */ if (ts->keep_wlc_mode) { - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "keep wlc mode after usb plug in during wlc online"); charger_mode = SEC_TS_BIT_CHARGER_MODE_WIRELESS_CHARGER; } - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "%s: keep_wlc_mode %d, USB(%d->%d), WLC(%d->%d), charger_mode(%#x->%#x)", __func__, ts->keep_wlc_mode, @@ -5073,11 +5073,11 @@ static void sec_ts_charger_work(struct work_struct *work) return; } - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "%s: charger_mode change from %#x to %#x\n", __func__, ts->charger_mode, charger_mode); } else { - input_info(true, &ts->client->dev, + input_dbg(true, &ts->client->dev, "%s: ONLY update charger_mode status from %#x to %#x, then will apply during resume\n", __func__, ts->charger_mode, charger_mode); } diff --git a/drivers/input/touchscreen/sec_touch/sec_ts_fn.c b/drivers/input/touchscreen/sec_touch/sec_ts_fn.c index ad9ba7177be8..7f97b41f0cba 100644 --- a/drivers/input/touchscreen/sec_touch/sec_ts_fn.c +++ b/drivers/input/touchscreen/sec_touch/sec_ts_fn.c @@ -1267,7 +1267,7 @@ int sec_ts_fix_tmode(struct sec_ts_data *ts, u8 mode, u8 state) u8 onoff[1] = {STATE_MANAGE_OFF}; u8 tBuff[2] = { mode, state }; - input_info(true, &ts->client->dev, "%s: mode %d state %d\n", + input_dbg(true, &ts->client->dev, "%s: mode %d state %d\n", __func__, mode, state); ret = ts->sec_ts_write(ts, SEC_TS_CMD_STATEMANAGE_ON, onoff, 1); @@ -1622,7 +1622,7 @@ static void sec_ts_print_frame(struct sec_ts_data *ts, short *min, short *max) unsigned int buff_len = 0; unsigned char *pStr = NULL; - input_info(true, &ts->client->dev, "%s\n", __func__); + input_dbg(true, &ts->client->dev, "%s\n", __func__); pStr = kzalloc(buff_size, GFP_KERNEL); if (pStr == NULL) @@ -1635,7 +1635,7 @@ static void sec_ts_print_frame(struct sec_ts_data *ts, short *min, short *max) buff_len += scnprintf(pStr + buff_len, buff_size - buff_len, " %02d ", i); - input_info(true, &ts->client->dev, "%s\n", pStr); + input_dbg(true, &ts->client->dev, "%s\n", pStr); buff_len = 0; memset(pStr, 0x0, buff_size); buff_len += scnprintf(pStr + buff_len, buff_size - buff_len, " +"); @@ -1644,7 +1644,7 @@ static void sec_ts_print_frame(struct sec_ts_data *ts, short *min, short *max) buff_len += scnprintf(pStr + buff_len, buff_size - buff_len, "----"); - input_info(true, &ts->client->dev, "%s\n", pStr); + input_dbg(true, &ts->client->dev, "%s\n", pStr); for (i = 0; i < ts->rx_count; i++) { buff_len = 0; @@ -1667,7 +1667,7 @@ static void sec_ts_print_frame(struct sec_ts_data *ts, short *min, short *max) i]; } } - input_info(true, &ts->client->dev, "%s\n", pStr); + input_dbg(true, &ts->client->dev, "%s\n", pStr); } kfree(pStr); } diff --git a/drivers/input/touchscreen/sec_touch/sec_ts_fw.c b/drivers/input/touchscreen/sec_touch/sec_ts_fw.c index b02ab3841fbb..e8f404db8c39 100644 --- a/drivers/input/touchscreen/sec_touch/sec_ts_fw.c +++ b/drivers/input/touchscreen/sec_touch/sec_ts_fw.c @@ -189,7 +189,7 @@ int sec_ts_system_reset(struct sec_ts_data *ts) input_err(true, &ts->client->dev, "%s: sw_reset time out!\n", __func__); else - input_info(true, + input_dbg(true, &ts->client->dev, "%s: sw_reset done\n", __func__); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3afd2056e9de..911a50d29a87 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -688,8 +688,6 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) trace_add_device_to_group(group->id, dev); - pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); - return 0; err_put_group: diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fd8d7879f8d0..a05590bd4dda 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -826,7 +826,6 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) MPIDR_TO_SGI_RS(cluster_id) | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); - pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); gic_write_sgi1r(val); } diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 138bdfa359b7..3be0f58dbe87 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "uvcvideo.h" diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 62d43ae7a1b7..2ed1757fc77e 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -25,6 +25,7 @@ #include #include +#include #include "uvcvideo.h" @@ -39,183 +40,6 @@ static unsigned int uvc_quirks_param = -1; unsigned int uvc_trace_param; unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT; -/* ------------------------------------------------------------------------ - * Video formats - */ - -static struct uvc_format_desc uvc_fmts[] = { - { - .name = "YUV 4:2:2 (YUYV)", - .guid = UVC_GUID_FORMAT_YUY2, - .fcc = V4L2_PIX_FMT_YUYV, - }, - { - .name = "YUV 4:2:2 (YUYV)", - .guid = UVC_GUID_FORMAT_YUY2_ISIGHT, - .fcc = V4L2_PIX_FMT_YUYV, - }, - { - .name = "YUV 4:2:0 (NV12)", - .guid = UVC_GUID_FORMAT_NV12, - .fcc = V4L2_PIX_FMT_NV12, - }, - { - .name = "MJPEG", - .guid = UVC_GUID_FORMAT_MJPEG, - .fcc = V4L2_PIX_FMT_MJPEG, - }, - { - .name = "YVU 4:2:0 (YV12)", - .guid = UVC_GUID_FORMAT_YV12, - .fcc = V4L2_PIX_FMT_YVU420, - }, - { - .name = "YUV 4:2:0 (I420)", - .guid = UVC_GUID_FORMAT_I420, - .fcc = V4L2_PIX_FMT_YUV420, - }, - { - .name = "YUV 4:2:0 (M420)", - .guid = UVC_GUID_FORMAT_M420, - .fcc = V4L2_PIX_FMT_M420, - }, - { - .name = "YUV 4:2:2 (UYVY)", - .guid = UVC_GUID_FORMAT_UYVY, - .fcc = V4L2_PIX_FMT_UYVY, - }, - { - .name = "Greyscale 8-bit (Y800)", - .guid = UVC_GUID_FORMAT_Y800, - .fcc = V4L2_PIX_FMT_GREY, - }, - { - .name = "Greyscale 8-bit (Y8 )", - .guid = UVC_GUID_FORMAT_Y8, - .fcc = V4L2_PIX_FMT_GREY, - }, - { - .name = "Greyscale 8-bit (D3DFMT_L8)", - .guid = UVC_GUID_FORMAT_D3DFMT_L8, - .fcc = V4L2_PIX_FMT_GREY, - }, - { - .name = "IR 8-bit (L8_IR)", - .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR, - .fcc = V4L2_PIX_FMT_GREY, - }, - { - .name = "Greyscale 10-bit (Y10 )", - .guid = UVC_GUID_FORMAT_Y10, - .fcc = V4L2_PIX_FMT_Y10, - }, - { - .name = "Greyscale 12-bit (Y12 )", - .guid = UVC_GUID_FORMAT_Y12, - .fcc = V4L2_PIX_FMT_Y12, - }, - { - .name = "Greyscale 16-bit (Y16 )", - .guid = UVC_GUID_FORMAT_Y16, - .fcc = V4L2_PIX_FMT_Y16, - }, - { - .name = "BGGR Bayer (BY8 )", - .guid = UVC_GUID_FORMAT_BY8, - .fcc = V4L2_PIX_FMT_SBGGR8, - }, - { - .name = "BGGR Bayer (BA81)", - .guid = UVC_GUID_FORMAT_BA81, - .fcc = V4L2_PIX_FMT_SBGGR8, - }, - { - .name = "GBRG Bayer (GBRG)", - .guid = UVC_GUID_FORMAT_GBRG, - .fcc = V4L2_PIX_FMT_SGBRG8, - }, - { - .name = "GRBG Bayer (GRBG)", - .guid = UVC_GUID_FORMAT_GRBG, - .fcc = V4L2_PIX_FMT_SGRBG8, - }, - { - .name = "RGGB Bayer (RGGB)", - .guid = UVC_GUID_FORMAT_RGGB, - .fcc = V4L2_PIX_FMT_SRGGB8, - }, - { - .name = "RGB565", - .guid = UVC_GUID_FORMAT_RGBP, - .fcc = V4L2_PIX_FMT_RGB565, - }, - { - .name = "BGR 8:8:8 (BGR3)", - .guid = UVC_GUID_FORMAT_BGR3, - .fcc = V4L2_PIX_FMT_BGR24, - }, - { - .name = "H.264", - .guid = UVC_GUID_FORMAT_H264, - .fcc = V4L2_PIX_FMT_H264, - }, - { - .name = "Greyscale 8 L/R (Y8I)", - .guid = UVC_GUID_FORMAT_Y8I, - .fcc = V4L2_PIX_FMT_Y8I, - }, - { - .name = "Greyscale 12 L/R (Y12I)", - .guid = UVC_GUID_FORMAT_Y12I, - .fcc = V4L2_PIX_FMT_Y12I, - }, - { - .name = "Depth data 16-bit (Z16)", - .guid = UVC_GUID_FORMAT_Z16, - .fcc = V4L2_PIX_FMT_Z16, - }, - { - .name = "Bayer 10-bit (SRGGB10P)", - .guid = UVC_GUID_FORMAT_RW10, - .fcc = V4L2_PIX_FMT_SRGGB10P, - }, - { - .name = "Bayer 16-bit (SBGGR16)", - .guid = UVC_GUID_FORMAT_BG16, - .fcc = V4L2_PIX_FMT_SBGGR16, - }, - { - .name = "Bayer 16-bit (SGBRG16)", - .guid = UVC_GUID_FORMAT_GB16, - .fcc = V4L2_PIX_FMT_SGBRG16, - }, - { - .name = "Bayer 16-bit (SRGGB16)", - .guid = UVC_GUID_FORMAT_RG16, - .fcc = V4L2_PIX_FMT_SRGGB16, - }, - { - .name = "Bayer 16-bit (SGRBG16)", - .guid = UVC_GUID_FORMAT_GR16, - .fcc = V4L2_PIX_FMT_SGRBG16, - }, - { - .name = "Depth data 16-bit (Z16)", - .guid = UVC_GUID_FORMAT_INVZ, - .fcc = V4L2_PIX_FMT_Z16, - }, - { - .name = "Greyscale 10-bit (Y10 )", - .guid = UVC_GUID_FORMAT_INVI, - .fcc = V4L2_PIX_FMT_Y10, - }, - { - .name = "IR:Depth 26-bit (INZI)", - .guid = UVC_GUID_FORMAT_INZI, - .fcc = V4L2_PIX_FMT_INZI, - }, -}; - /* ------------------------------------------------------------------------ * Utility functions */ @@ -235,19 +59,6 @@ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, return NULL; } -static struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]) -{ - unsigned int len = ARRAY_SIZE(uvc_fmts); - unsigned int i; - - for (i = 0; i < len; ++i) { - if (memcmp(guid, uvc_fmts[i].guid, 16) == 0) - return &uvc_fmts[i]; - } - - return NULL; -} - static u32 uvc_colorspace(const u8 primaries) { static const u8 colorprimaries[] = { @@ -265,86 +76,6 @@ static u32 uvc_colorspace(const u8 primaries) return 0; } -/* Simplify a fraction using a simple continued fraction decomposition. The - * idea here is to convert fractions such as 333333/10000000 to 1/30 using - * 32 bit arithmetic only. The algorithm is not perfect and relies upon two - * arbitrary parameters to remove non-significative terms from the simple - * continued fraction decomposition. Using 8 and 333 for n_terms and threshold - * respectively seems to give nice results. - */ -void uvc_simplify_fraction(u32 *numerator, u32 *denominator, - unsigned int n_terms, unsigned int threshold) -{ - u32 *an; - u32 x, y, r; - unsigned int i, n; - - an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); - if (an == NULL) - return; - - /* Convert the fraction to a simple continued fraction. See - * http://mathforum.org/dr.math/faq/faq.fractions.html - * Stop if the current term is bigger than or equal to the given - * threshold. - */ - x = *numerator; - y = *denominator; - - for (n = 0; n < n_terms && y != 0; ++n) { - an[n] = x / y; - if (an[n] >= threshold) { - if (n < 2) - n++; - break; - } - - r = x - an[n] * y; - x = y; - y = r; - } - - /* Expand the simple continued fraction back to an integer fraction. */ - x = 0; - y = 1; - - for (i = n; i > 0; --i) { - r = y; - y = an[i-1] * y + x; - x = r; - } - - *numerator = y; - *denominator = x; - kfree(an); -} - -/* Convert a fraction to a frame interval in 100ns multiples. The idea here is - * to compute numerator / denominator * 10000000 using 32 bit fixed point - * arithmetic only. - */ -u32 uvc_fraction_to_interval(u32 numerator, u32 denominator) -{ - u32 multiplier; - - /* Saturate the result if the operation would overflow. */ - if (denominator == 0 || - numerator/denominator >= ((u32)-1)/10000000) - return (u32)-1; - - /* Divide both the denominator and the multiplier by two until - * numerator * multiplier doesn't overflow. If anyone knows a better - * algorithm please let me know. - */ - multiplier = 10000000; - while (numerator > ((u32)-1)/multiplier) { - multiplier /= 2; - denominator /= 2; - } - - return denominator ? numerator * multiplier / denominator : 0; -} - /* ------------------------------------------------------------------------ * Terminal and unit management */ diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 0371a4a1cd12..2ea93c2db045 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -376,7 +376,7 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, mutex_unlock(&stream->mutex); denominator = 10000000; - uvc_simplify_fraction(&numerator, &denominator, 8, 333); + v4l2_simplify_fraction(&numerator, &denominator, 8, 333); memset(parm, 0, sizeof(*parm)); parm->type = stream->type; @@ -417,7 +417,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, else timeperframe = parm->parm.output.timeperframe; - interval = uvc_fraction_to_interval(timeperframe.numerator, + interval = v4l2_fraction_to_interval(timeperframe.numerator, timeperframe.denominator); uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n", timeperframe.numerator, timeperframe.denominator, interval); @@ -471,7 +471,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, /* Return the actual frame period. */ timeperframe.numerator = probe.dwFrameInterval; timeperframe.denominator = 10000000; - uvc_simplify_fraction(&timeperframe.numerator, + v4l2_simplify_fraction(&timeperframe.numerator, &timeperframe.denominator, 8, 333); if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { @@ -1294,7 +1294,7 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh, fival->discrete.numerator = frame->dwFrameInterval[index]; fival->discrete.denominator = 10000000; - uvc_simplify_fraction(&fival->discrete.numerator, + v4l2_simplify_fraction(&fival->discrete.numerator, &fival->discrete.denominator, 8, 333); } else { fival->type = V4L2_FRMIVAL_TYPE_STEPWISE; @@ -1304,11 +1304,11 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh, fival->stepwise.max.denominator = 10000000; fival->stepwise.step.numerator = frame->dwFrameInterval[2]; fival->stepwise.step.denominator = 10000000; - uvc_simplify_fraction(&fival->stepwise.min.numerator, + v4l2_simplify_fraction(&fival->stepwise.min.numerator, &fival->stepwise.min.denominator, 8, 333); - uvc_simplify_fraction(&fival->stepwise.max.numerator, + v4l2_simplify_fraction(&fival->stepwise.max.numerator, &fival->stepwise.max.denominator, 8, 333); - uvc_simplify_fraction(&fival->stepwise.step.numerator, + v4l2_simplify_fraction(&fival->stepwise.step.numerator, &fival->stepwise.step.denominator, 8, 333); } diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index f335249c93da..8419582ac665 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -38,131 +38,6 @@ ((entity)->type & 0x8000) == UVC_TERM_OUTPUT) -/* ------------------------------------------------------------------------ - * GUIDs - */ -#define UVC_GUID_UVC_CAMERA \ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} -#define UVC_GUID_UVC_OUTPUT \ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} -#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03} -#define UVC_GUID_UVC_PROCESSING \ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01} -#define UVC_GUID_UVC_SELECTOR \ - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02} - -#define UVC_GUID_FORMAT_MJPEG \ - { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_YUY2 \ - { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_YUY2_ISIGHT \ - { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_NV12 \ - { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_YV12 \ - { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_I420 \ - { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_UYVY \ - { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y800 \ - { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y8 \ - { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y10 \ - { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y12 \ - { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y16 \ - { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_BY8 \ - { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_BA81 \ - { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_GBRG \ - { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_GRBG \ - { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_RGGB \ - { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_BG16 \ - { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_GB16 \ - { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_RG16 \ - { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_GR16 \ - { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_RGBP \ - { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_BGR3 \ - { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ - 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} -#define UVC_GUID_FORMAT_M420 \ - { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} - -#define UVC_GUID_FORMAT_H264 \ - { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y8I \ - { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Y12I \ - { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_Z16 \ - { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_RW10 \ - { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_INVZ \ - { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \ - 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b} -#define UVC_GUID_FORMAT_INZI \ - { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \ - 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a} -#define UVC_GUID_FORMAT_INVI \ - { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \ - 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f} - -#define UVC_GUID_FORMAT_D3DFMT_L8 \ - {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} -#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \ - {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \ - 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} - - /* ------------------------------------------------------------------------ * Driver specific constants. */ @@ -264,13 +139,8 @@ struct uvc_control { struct uvc_fh *handle; /* File handle that last changed the control. */ }; -struct uvc_format_desc { - char *name; - u8 guid[16]; - u32 fcc; -}; - -/* The term 'entity' refers to both UVC units and UVC terminals. +/* + * The term 'entity' refers to both UVC units and UVC terminals. * * The type field is either the terminal type (wTerminalType in the terminal * descriptor), or the unit type (bDescriptorSubtype in the unit descriptor). @@ -805,9 +675,6 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain, struct uvc_xu_control_query *xqry); /* Utility functions */ -void uvc_simplify_fraction(u32 *numerator, u32 *denominator, - unsigned int n_terms, unsigned int threshold); -u32 uvc_fraction_to_interval(u32 numerator, u32 denominator); struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, u8 epaddr); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index b518b92d6d96..94ad051b7628 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -444,3 +444,127 @@ int v4l2_s_parm_cap(struct video_device *vdev, return ret; } EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); + +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, + unsigned int div) +{ + struct v4l2_ctrl *ctrl; + s64 freq; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); + if (ctrl) { + struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; + int ret; + + qm.index = v4l2_ctrl_g_ctrl(ctrl); + + ret = v4l2_querymenu(handler, &qm); + if (ret) + return -ENOENT; + + freq = qm.value; + } else { + if (!mul || !div) + return -ENOENT; + + ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); + if (!ctrl) + return -ENOENT; + + freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); + + pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", + __func__); + pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", + __func__); + } + + return freq > 0 ? freq : -EINVAL; +} +EXPORT_SYMBOL_GPL(v4l2_get_link_freq); + +/* + * Simplify a fraction using a simple continued fraction decomposition. The + * idea here is to convert fractions such as 333333/10000000 to 1/30 using + * 32 bit arithmetic only. The algorithm is not perfect and relies upon two + * arbitrary parameters to remove non-significative terms from the simple + * continued fraction decomposition. Using 8 and 333 for n_terms and threshold + * respectively seems to give nice results. + */ +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold) +{ + u32 *an; + u32 x, y, r; + unsigned int i, n; + + an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); + if (an == NULL) + return; + + /* + * Convert the fraction to a simple continued fraction. See + * https://en.wikipedia.org/wiki/Continued_fraction + * Stop if the current term is bigger than or equal to the given + * threshold. + */ + x = *numerator; + y = *denominator; + + for (n = 0; n < n_terms && y != 0; ++n) { + an[n] = x / y; + if (an[n] >= threshold) { + if (n < 2) + n++; + break; + } + + r = x - an[n] * y; + x = y; + y = r; + } + + /* Expand the simple continued fraction back to an integer fraction. */ + x = 0; + y = 1; + + for (i = n; i > 0; --i) { + r = y; + y = an[i-1] * y + x; + x = r; + } + + *numerator = y; + *denominator = x; + kfree(an); +} +EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); + +/* + * Convert a fraction to a frame interval in 100ns multiples. The idea here is + * to compute numerator / denominator * 10000000 using 32 bit fixed point + * arithmetic only. + */ +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) +{ + u32 multiplier; + + /* Saturate the result if the operation would overflow. */ + if (denominator == 0 || + numerator/denominator >= ((u32)-1)/10000000) + return (u32)-1; + + /* + * Divide both the denominator and the multiplier by two until + * numerator * multiplier doesn't overflow. If anyone knows a better + * algorithm please let me know. + */ + multiplier = 10000000; + while (numerator > ((u32)-1)/multiplier) { + multiplier /= 2; + denominator /= 2; + } + + return denominator ? numerator * multiplier / denominator : 0; +} +EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); diff --git a/drivers/of/base.c b/drivers/of/base.c index b28dee2d2cc6..9b96788b8324 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -145,22 +145,6 @@ static void __of_free_phandle_cache(void) phandle_cache = NULL; } -int of_free_phandle_cache(void) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); - - __of_free_phandle_cache(); - - raw_spin_unlock_irqrestore(&devtree_lock, flags); - - return 0; -} -#if !defined(CONFIG_MODULES) -late_initcall_sync(of_free_phandle_cache); -#endif - /* * Caller must hold devtree_lock. */ diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 5dc0cf229f9f..6f54f54ba53c 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -178,14 +178,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->ctl_reg); + val = readl_relaxed(pctrl->regs + g->ctl_reg); val &= ~mask; val |= i << g->mux_bit; /* Check if egpio present and enable that feature */ if (val & BIT(g->egpio_present)) val |= BIT(g->egpio_enable); - writel(val, pctrl->regs + g->ctl_reg); + writel_relaxed(val, pctrl->regs + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -260,7 +260,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, if (ret < 0) return ret; - val = readl(pctrl->regs + g->ctl_reg); + val = readl_relaxed(pctrl->regs + g->ctl_reg); arg = (val >> bit) & mask; /* Convert register value to pinconf value */ @@ -299,7 +299,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, if (!arg) return -EINVAL; - val = readl(pctrl->regs + g->io_reg); + val = readl_relaxed(pctrl->regs + g->io_reg); arg = !!(val & BIT(g->in_bit)); break; case PIN_CONFIG_INPUT_ENABLE: @@ -373,12 +373,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_OUTPUT: /* set output value */ raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->io_reg); + val = readl_relaxed(pctrl->regs + g->io_reg); if (arg) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, pctrl->regs + g->io_reg); + writel_relaxed(val, pctrl->regs + g->io_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); /* enable output */ @@ -401,10 +401,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, } raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->ctl_reg); + val = readl_relaxed(pctrl->regs + g->ctl_reg); val &= ~(mask << bit); val |= arg << bit; - writel(val, pctrl->regs + g->ctl_reg); + writel_relaxed(val, pctrl->regs + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); } @@ -428,9 +428,9 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->ctl_reg); + val = readl_relaxed(pctrl->regs + g->ctl_reg); val &= ~BIT(g->oe_bit); - writel(val, pctrl->regs + g->ctl_reg); + writel_relaxed(val, pctrl->regs + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -448,16 +448,16 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->io_reg); + val = readl_relaxed(pctrl->regs + g->io_reg); if (value) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, pctrl->regs + g->io_reg); + writel_relaxed(val, pctrl->regs + g->io_reg); - val = readl(pctrl->regs + g->ctl_reg); + val = readl_relaxed(pctrl->regs + g->ctl_reg); val |= BIT(g->oe_bit); - writel(val, pctrl->regs + g->ctl_reg); + writel_relaxed(val, pctrl->regs + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -501,12 +501,12 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->io_reg); + val = readl_relaxed(pctrl->regs + g->io_reg); if (value) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); - writel(val, pctrl->regs + g->io_reg); + writel_relaxed(val, pctrl->regs + g->io_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); } @@ -692,14 +692,14 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl, unsigned pol; do { - val = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit); + val = readl_relaxed(pctrl->regs + g->io_reg) & BIT(g->in_bit); - pol = readl(pctrl->regs + g->intr_cfg_reg); + pol = readl_relaxed(pctrl->regs + g->intr_cfg_reg); pol ^= BIT(g->intr_polarity_bit); - writel(pol, pctrl->regs + g->intr_cfg_reg); + writel_relaxed(pol, pctrl->regs + g->intr_cfg_reg); - val2 = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit); - intstat = readl(pctrl->regs + g->intr_status_reg); + val2 = readl_relaxed(pctrl->regs + g->io_reg) & BIT(g->in_bit); + intstat = readl_relaxed(pctrl->regs + g->intr_status_reg); if (intstat || (val == val2)) return; } while (loop_limit-- > 0); @@ -738,7 +738,7 @@ static void _msm_gpio_irq_mask(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->intr_cfg_reg); + val = readl_relaxed(pctrl->regs + g->intr_cfg_reg); /* * There are two bits that control interrupt forwarding to the CPU. The * RAW_STATUS_EN bit causes the level or edge sensed on the line to be @@ -763,7 +763,7 @@ static void _msm_gpio_irq_mask(struct irq_data *d) val &= ~BIT(g->intr_raw_status_bit); val &= ~BIT(g->intr_enable_bit); - writel(val, pctrl->regs + g->intr_cfg_reg); + writel_relaxed(val, pctrl->regs + g->intr_cfg_reg); clear_bit(d->hwirq, pctrl->enabled_irqs); @@ -788,15 +788,15 @@ static void _msm_gpio_irq_unmask(struct irq_data *d, bool status_clear) * any erroneous interrupts that would have got latched * when the interrupt is not in use. */ - val = readl(pctrl->regs + g->intr_status_reg); + val = readl_relaxed(pctrl->regs + g->intr_status_reg); val &= ~BIT(g->intr_status_bit); - writel(val, pctrl->regs + g->intr_status_reg); + writel_relaxed(val, pctrl->regs + g->intr_status_reg); } - val = readl(pctrl->regs + g->intr_cfg_reg); + val = readl_relaxed(pctrl->regs + g->intr_cfg_reg); val |= BIT(g->intr_raw_status_bit); val |= BIT(g->intr_enable_bit); - writel(val, pctrl->regs + g->intr_cfg_reg); + writel_relaxed(val, pctrl->regs + g->intr_cfg_reg); set_bit(d->hwirq, pctrl->enabled_irqs); @@ -933,12 +933,12 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = readl(pctrl->regs + g->intr_status_reg); + val = readl_relaxed(pctrl->regs + g->intr_status_reg); if (g->intr_ack_high) val |= BIT(g->intr_status_bit); else val &= ~BIT(g->intr_status_bit); - writel(val, pctrl->regs + g->intr_status_reg); + writel_relaxed(val, pctrl->regs + g->intr_status_reg); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -1104,17 +1104,17 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) clear_bit(d->hwirq, pctrl->dual_edge_irqs); /* Route interrupts to application cpu */ - val = readl(pctrl->regs + g->intr_target_reg); + val = readl_relaxed(pctrl->regs + g->intr_target_reg); val &= ~(7 << g->intr_target_bit); val |= g->intr_target_kpss_val << g->intr_target_bit; - writel(val, pctrl->regs + g->intr_target_reg); + writel_relaxed(val, pctrl->regs + g->intr_target_reg); /* Update configuration for gpio. * RAW_STATUS_EN is left on for all gpio irqs. Due to the * internal circuitry of TLMM, toggling the RAW_STATUS * could cause the INTR_STATUS to be set for EDGE interrupts. */ - val = readl(pctrl->regs + g->intr_cfg_reg); + val = readl_relaxed(pctrl->regs + g->intr_cfg_reg); val |= BIT(g->intr_raw_status_bit); if (g->intr_detection_width == 2) { val &= ~(3 << g->intr_detection_bit); @@ -1162,7 +1162,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) } else { BUG(); } - writel(val, pctrl->regs + g->intr_cfg_reg); + writel_relaxed(val, pctrl->regs + g->intr_cfg_reg); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -1237,7 +1237,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc) */ for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) { g = &pctrl->soc->groups[i]; - val = readl(pctrl->regs + g->intr_status_reg); + val = readl_relaxed(pctrl->regs + g->intr_status_reg); if (val & BIT(g->intr_status_bit)) { irq_pin = irq_find_mapping(gc->irq.domain, i); generic_handle_irq(irq_pin); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c index 21ce25ebbda9..493938cf8b9c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c @@ -811,7 +811,7 @@ static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode, } } usleep_range(IPA_UC_POLL_SLEEP_USEC, - IPA_UC_POLL_SLEEP_USEC); + IPA_UC_POLL_SLEEP_USEC * 1.2); } if (index == IPA_UC_POLL_MAX_RETRY) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c index 84b94e028707..816a49c65e92 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -2300,7 +2300,7 @@ int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl) } } usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, - IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC * 1.2); } @@ -2457,7 +2457,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl) } } usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, - IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC * 1.2); } diff --git a/drivers/power/supply/google/google_battery.c b/drivers/power/supply/google/google_battery.c index 1b9ac12ca7e8..4c47e24d8299 100644 --- a/drivers/power/supply/google/google_battery.c +++ b/drivers/power/supply/google/google_battery.c @@ -1899,7 +1899,7 @@ static int batt_chg_stats_cstr(char *buff, int size, static void batt_res_dump_logs(struct batt_res *rstate) { - pr_info("RES: req:%d, sample:%d[%d], filt_cnt:%d, res_avg:%d\n", + pr_debug("RES: req:%d, sample:%d[%d], filt_cnt:%d, res_avg:%d\n", rstate->estimate_requested, rstate->sample_accumulator, rstate->sample_count, rstate->filter_count, rstate->resistance_avg); @@ -2186,7 +2186,7 @@ static int msc_logic_irdrop(struct batt_drv *batt_drv, if (batt_drv->checked_cv_cnt == 0) batt_drv->checked_cv_cnt = 1; - pr_info("MSC_FAST vt=%d vb=%d fv_uv=%d->%d vchrg=%d cv_cnt=%d\n", + pr_debug("MSC_FAST vt=%d vb=%d fv_uv=%d->%d vchrg=%d cv_cnt=%d\n", vtier, vbatt, batt_drv->fv_uv, *fv_uv, batt_drv->chg_state.f.vchrg, batt_drv->checked_cv_cnt); @@ -2216,14 +2216,14 @@ static int msc_logic_irdrop(struct batt_drv *batt_drv, *update_interval = profile->cv_update_interval; batt_drv->checked_cv_cnt = 0; - pr_info("MSC_TYPE vt=%d vb=%d fv_uv=%d chg_type=%d\n", + pr_debug("MSC_TYPE vt=%d vb=%d fv_uv=%d chg_type=%d\n", vtier, vbatt, *fv_uv, chg_type); } else if (batt_drv->checked_ov_cnt) { /* TAPER_DLY: countdown to raise fv_uv and/or check * for tier switch, will keep steady... */ - pr_info("MSC_DLY vt=%d vb=%d fv_uv=%d margin=%d cv_cnt=%d, ov_cnt=%d\n", + pr_debug("MSC_DLY vt=%d vb=%d fv_uv=%d margin=%d cv_cnt=%d, ov_cnt=%d\n", vtier, vbatt, *fv_uv, profile->cv_range_accuracy, batt_drv->checked_cv_cnt, batt_drv->checked_ov_cnt); @@ -2238,7 +2238,7 @@ static int msc_logic_irdrop(struct batt_drv *batt_drv, msc_state = MSC_STEADY; *update_interval = profile->cv_update_interval; - pr_info("MSC_STEADY vt=%d vb=%d fv_uv=%d margin=%d\n", + pr_debug("MSC_STEADY vt=%d vb=%d fv_uv=%d margin=%d\n", vtier, vbatt, *fv_uv, profile->cv_range_accuracy); } else if (batt_drv->checked_tier_switch_cnt >= (switch_cnt - 1)) { @@ -2249,7 +2249,7 @@ static int msc_logic_irdrop(struct batt_drv *batt_drv, msc_state = MSC_TIERCNTING; *update_interval = profile->cv_update_interval; - pr_info("MSC_TIERCNTING vt=%d vb=%d fv_uv=%d margin=%d\n", + pr_debug("MSC_TIERCNTING vt=%d vb=%d fv_uv=%d margin=%d\n", vtier, vbatt, *fv_uv, profile->cv_range_accuracy); } else { @@ -2264,7 +2264,7 @@ static int msc_logic_irdrop(struct batt_drv *batt_drv, /* debounce next taper voltage adjustment */ batt_drv->checked_cv_cnt = profile->cv_debounce_cnt; - pr_info("MSC_RAISE vt=%d vb=%d fv_uv=%d->%d\n", + pr_debug("MSC_RAISE vt=%d vb=%d fv_uv=%d->%d\n", vtier, vbatt, batt_drv->fv_uv, *fv_uv); } @@ -2572,7 +2572,7 @@ static int msc_pm_hold(int msc_state) pm_state = 0; /* pm_relax */ break; default: - pr_info("hold not defined for msc_state=%d\n", msc_state); + pr_debug("hold not defined for msc_state=%d\n", msc_state); pm_state = 0; /* pm_relax */ break; } @@ -2743,7 +2743,7 @@ static int msc_logic(struct batt_drv *batt_drv) return 0; } else if (batt_drv->jeita_stop_charging) { - pr_info("MSC_JEITA temp=%d ok, enabling charging\n", temp); + pr_debug("MSC_JEITA temp=%d ok, enabling charging\n", temp); batt_drv->jeita_stop_charging = 0; } @@ -2771,7 +2771,7 @@ static int msc_logic(struct batt_drv *batt_drv) if (batt_drv->vbatt_idx == -1) vbatt_idx = gbms_msc_voltage_idx(profile, vbatt); - pr_info("MSC_SEED temp=%d vbatt=%d temp_idx:%d->%d, vbatt_idx:%d->%d\n", + pr_debug("MSC_SEED temp=%d vbatt=%d temp_idx:%d->%d, vbatt_idx:%d->%d\n", temp, vbatt, batt_drv->temp_idx, temp_idx, batt_drv->vbatt_idx, vbatt_idx); @@ -2789,7 +2789,7 @@ static int msc_logic(struct batt_drv *batt_drv) msc_state = MSC_DSG; vbatt_idx = gbms_msc_voltage_idx(profile, vbatt); - pr_info("MSC_DSG vbatt_idx:%d->%d vbatt=%d ibatt=%d fv_uv=%d cv_cnt=%d ov_cnt=%d\n", + pr_debug("MSC_DSG vbatt_idx:%d->%d vbatt=%d ibatt=%d fv_uv=%d cv_cnt=%d ov_cnt=%d\n", batt_drv->vbatt_idx, vbatt_idx, vbatt, ibatt, fv_uv, batt_drv->checked_cv_cnt, @@ -2813,7 +2813,7 @@ static int msc_logic(struct batt_drv *batt_drv) ramp_cc_max = msc_logic_ramp_cc_max(batt_drv, vbatt); } - pr_info("MSC_LAST vbatt=%d ibatt=%d fv_uv=%d\n", + pr_debug("MSC_LAST vbatt=%d ibatt=%d fv_uv=%d\n", vbatt, ibatt, fv_uv); } else { @@ -2848,7 +2848,7 @@ static int msc_logic(struct batt_drv *batt_drv) msc_state = MSC_WAIT; batt_drv->checked_cv_cnt -= 1; - pr_info("MSC_WAIT vt=%d vb=%d fv_uv=%d ibatt=%d cv_cnt=%d ov_cnt=%d t_cnt=%d\n", + pr_debug("MSC_WAIT vt=%d vb=%d fv_uv=%d ibatt=%d cv_cnt=%d ov_cnt=%d t_cnt=%d\n", vtier, vbatt, fv_uv, ibatt, batt_drv->checked_cv_cnt, batt_drv->checked_ov_cnt, @@ -2862,7 +2862,7 @@ static int msc_logic(struct batt_drv *batt_drv) msc_state = MSC_RSTC; batt_drv->checked_tier_switch_cnt = 0; - pr_info("MSC_RSTC vt=%d vb=%d fv_uv=%d ibatt=%d cc_next_max=%d t_cnt=%d\n", + pr_debug("MSC_RSTC vt=%d vb=%d fv_uv=%d ibatt=%d cc_next_max=%d t_cnt=%d\n", vtier, vbatt, fv_uv, ibatt, cc_next_max, batt_drv->checked_tier_switch_cnt); } else if (batt_drv->checked_tier_switch_cnt >= switch_cnt) { @@ -2870,14 +2870,14 @@ static int msc_logic(struct batt_drv *batt_drv) msc_state = MSC_NEXT; vbatt_idx = batt_drv->vbatt_idx + 1; - pr_info("MSC_NEXT tier vb=%d ibatt=%d vbatt_idx=%d->%d\n", + pr_debug("MSC_NEXT tier vb=%d ibatt=%d vbatt_idx=%d->%d\n", vbatt, ibatt, batt_drv->vbatt_idx, vbatt_idx); } else { /* current under next tier, +1 on tier switch count */ msc_state = MSC_NYET; batt_drv->checked_tier_switch_cnt++; - pr_info("MSC_NYET ibatt=%d cc_next_max=%d t_cnt=%d\n", + pr_debug("MSC_NYET ibatt=%d cc_next_max=%d t_cnt=%d\n", ibatt, cc_next_max, batt_drv->checked_tier_switch_cnt); } @@ -3009,7 +3009,7 @@ static int batt_chg_logic(struct batt_drv *batt_drv) __pm_stay_awake(batt_drv->msc_ws); - pr_info("MSC_DIN chg_state=%lx f=0x%x chg_s=%s chg_t=%s vchg=%d icl=%d\n", + pr_debug("MSC_DIN chg_state=%lx f=0x%x chg_s=%s chg_t=%s vchg=%d icl=%d\n", (unsigned long)chg_state->v, chg_state->f.flags, gbms_chg_status_s(chg_state->f.chg_status), @@ -3150,7 +3150,7 @@ static int batt_chg_logic(struct batt_drv *batt_drv) if (batt_drv->jeita_stop_charging) batt_drv->cc_max = 0; - pr_info("%s msc_state=%d cv_cnt=%d ov_cnt=%d temp_idx:%d, vbatt_idx:%d fv_uv=%d cc_max=%d update_interval=%d\n", + pr_debug("%s msc_state=%d cv_cnt=%d ov_cnt=%d temp_idx:%d, vbatt_idx:%d fv_uv=%d cc_max=%d update_interval=%d\n", (disable_votes) ? "MSC_DOUT" : "MSC_VOTE", batt_drv->msc_state, batt_drv->checked_cv_cnt, batt_drv->checked_ov_cnt, diff --git a/drivers/power/supply/google/google_charger.c b/drivers/power/supply/google/google_charger.c index b45aaa2c8b14..8cd23554721f 100644 --- a/drivers/power/supply/google/google_charger.c +++ b/drivers/power/supply/google/google_charger.c @@ -489,7 +489,7 @@ static int info_usb_state(union gbms_ce_adapter_details *ad, voltage_max = GPSY_GET_PROP(usb_psy, POWER_SUPPLY_PROP_VOLTAGE_MAX); amperage_max = GPSY_GET_PROP(usb_psy, POWER_SUPPLY_PROP_CURRENT_MAX); - pr_info("usbchg=%s typec=%s usbv=%d usbc=%d usbMv=%d usbMc=%d\n", + pr_debug("usbchg=%s typec=%s usbv=%d usbc=%d usbMv=%d usbMc=%d\n", psy_usb_type_str[usb_type], tcpm_psy ? psy_usbc_type_str[usbc_type] : "null", GPSY_GET_PROP(usb_psy, POWER_SUPPLY_PROP_VOLTAGE_NOW) / 1000, @@ -588,7 +588,7 @@ static int chg_update_charger(struct chg_drv *chg_drv, int fv_uv, int cc_max) */ rc = chg_set_charger(chg_psy, fv_uv, fcc); if (rc == 0) { - pr_info("MSC_CHG fv_uv=%d->%d cc_max=%d->%d rc=%d\n", + pr_debug("MSC_CHG fv_uv=%d->%d cc_max=%d->%d rc=%d\n", chg_drv->fv_uv, fv_uv, chg_drv->cc_max, cc_max, rc); @@ -1245,7 +1245,7 @@ static void chg_update_charging_state(struct chg_drv *chg_drv, { /* disable charging is set in retail mode */ if (disable_charging != chg_drv->disable_charging) { - pr_info("MSC_CHG disable_charging %d -> %d", + pr_debug("MSC_CHG disable_charging %d -> %d", chg_drv->disable_charging, disable_charging); GPSY_SET_PROP(chg_drv->chg_psy, @@ -3505,7 +3505,7 @@ static int msc_update_charger_cb(struct votable *votable, alarm_start_relative(&chg_drv->chg_wakeup_alarm, ms_to_ktime(update_interval)); - pr_info("MSC_CHG fv_uv=%d, cc_max=%d, rerun in %d ms (%d)\n", + pr_debug("MSC_CHG fv_uv=%d, cc_max=%d, rerun in %d ms (%d)\n", fv_uv, cc_max, update_interval, rc); msc_done: diff --git a/drivers/power/supply/google/logbuffer.c b/drivers/power/supply/google/logbuffer.c index df0920dfc5a6..4dbc77396106 100644 --- a/drivers/power/supply/google/logbuffer.c +++ b/drivers/power/supply/google/logbuffer.c @@ -24,6 +24,7 @@ #include #include #include +#include #define LOG_BUFFER_ENTRIES 1024 #define LOG_BUFFER_ENTRY_SIZE 256 diff --git a/drivers/power/supply/google/overheat_mitigation.c b/drivers/power/supply/google/overheat_mitigation.c index 4d5a859501f3..9eb27375acdb 100644 --- a/drivers/power/supply/google/overheat_mitigation.c +++ b/drivers/power/supply/google/overheat_mitigation.c @@ -363,7 +363,7 @@ static inline int get_usb_port_temp(struct overheat_info *ovh_info) if (temp == -EINVAL || temp == -ENODATA) return temp; - dev_info(ovh_info->dev, "Update USB port temp:%d\n", temp); + dev_dbg(ovh_info->dev, "Update USB port temp:%d\n", temp); if (temp > ovh_info->max_temp) ovh_info->max_temp = temp; diff --git a/drivers/power/supply/google/sm7250_bms.c b/drivers/power/supply/google/sm7250_bms.c index 3bf9a5db111a..30a2dc41d3b9 100644 --- a/drivers/power/supply/google/sm7250_bms.c +++ b/drivers/power/supply/google/sm7250_bms.c @@ -721,7 +721,7 @@ static int sm7250_get_chg_chgr_state(const struct bms_dev *bms, } chg_state->f.icl = (icl * 50); - pr_info("MSC_PCS chg_state=%lx [0x%x:%d:%d:%d:%d] chg=%c\n", + pr_debug("MSC_PCS chg_state=%lx [0x%x:%d:%d:%d:%d] chg=%c\n", (unsigned long)chg_state->v, chg_state->f.flags, chg_state->f.chg_type, @@ -815,7 +815,7 @@ static int sm7250_get_batt_iterm(struct bms_dev *bms) rc); return rc; } - pr_info("CHGR_ENG_CHARGING_CFG_REG = 0x%02x\n", stat); + pr_debug("CHGR_ENG_CHARGING_CFG_REG = 0x%02x\n", stat); if (stat & CHGR_ITERM_USE_ANALOG_BIT) { return -EINVAL; @@ -1007,7 +1007,7 @@ static int sm7250_charge_disable(struct bms_dev *bms, bool disable) CHGR_CHARGING_ENABLE_CMD, CHARGING_ENABLE_CMD_BIT, val); - pr_info("CHARGE_DISABLE : disable=%d -> val=%d (%d)\n", + pr_debug("CHARGE_DISABLE : disable=%d -> val=%d (%d)\n", disable, val, rc); return rc; @@ -1022,7 +1022,7 @@ static int sm7250_charge_pause(struct bms_dev *bms, bool pause) CHGR_CHARGING_PAUSE_CMD, CHARGING_PAUSE_CMD_BIT, val); - pr_info("CHARGE_PAUSE : pause=%d -> val=%d (%d)\n", + pr_debug("CHARGE_PAUSE : pause=%d -> val=%d (%d)\n", pause, val, rc); return rc; @@ -1078,7 +1078,7 @@ static int sm7250_psy_set_property(struct power_supply *psy, } } - pr_info("CONSTANT_CHARGE_CURRENT_MAX : ivalue=%d, val=%d pause=%d (%d)\n", + pr_debug("CONSTANT_CHARGE_CURRENT_MAX : ivalue=%d, val=%d pause=%d (%d)\n", ivalue, val, ivalue == 0, rc); break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: @@ -1097,7 +1097,7 @@ static int sm7250_psy_set_property(struct power_supply *psy, rc = sm7250_write(bms->pmic_regmap, CHGR_FLOAT_VOLTAGE_SETTING, &val, 1); - pr_info("CONSTANT_CHARGE_VOLTAGE_MAX : ivalue=%d, val=%d (%d)\n", + pr_debug("CONSTANT_CHARGE_VOLTAGE_MAX : ivalue=%d, val=%d (%d)\n", ivalue, val, rc); break; case POWER_SUPPLY_PROP_CHARGE_DISABLE: diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c index 677bc8eb84aa..ac5eb1035ada 100644 --- a/drivers/scsi/ufs/ufs-qcom-debugfs.c +++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c @@ -246,40 +246,6 @@ static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = { .release = single_release, }; -static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data) -{ - struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private; - unsigned long flags; - int i; - - spin_lock_irqsave(host->hba->host->host_lock, flags); - - seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled); - for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++) - seq_printf(file, - "CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n", - i, host->pm_qos.groups[i].mask.bits[0], - host->pm_qos.groups[i].active_reqs, - host->pm_qos.groups[i].state, - host->pm_qos.groups[i].latency_us); - - spin_unlock_irqrestore(host->hba->host->host_lock, flags); - - return 0; -} - -static int ufs_qcom_dbg_pm_qos_open(struct inode *inode, - struct file *file) -{ - return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private); -} - -static const struct file_operations ufs_qcom_dbg_pm_qos_desc = { - .open = ufs_qcom_dbg_pm_qos_open, - .read = seq_read, - .release = single_release, -}; - void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root) { struct ufs_qcom_host *host; @@ -368,17 +334,6 @@ void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root) goto err; } - host->debugfs_files.pm_qos = - debugfs_create_file("pm_qos", 0400, - host->debugfs_files.debugfs_root, host, - &ufs_qcom_dbg_pm_qos_desc); - if (!host->debugfs_files.dbg_regs) { - dev_err(host->hba->dev, - "%s: failed create dbg_regs debugfs entry\n", - __func__); - goto err; - } - return; err: diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4ed1cb2aa767..f00d58b5b0a6 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -34,8 +34,6 @@ #define MAX_PROP_SIZE 32 #define VDDP_REF_CLK_MIN_UV 1200000 #define VDDP_REF_CLK_MAX_UV 1200000 -/* TODO: further tuning for this parameter may be required */ -#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */ #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) @@ -63,7 +61,6 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, u32 clk_1us_cycles, u32 clk_40ns_cycles); -static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host); static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, char *prefix) @@ -863,8 +860,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; } } - /* Unvote PM QoS */ - ufs_qcom_pm_qos_suspend(host); out: return ret; @@ -1599,398 +1594,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, return err; } -#ifdef CONFIG_SMP /* CONFIG_SMP */ -static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu) -{ - int i; - - if (cpu >= 0 && cpu < num_possible_cpus()) - for (i = 0; i < host->pm_qos.num_groups; i++) - if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask)) - return i; - - return host->pm_qos.default_cpu; -} - -static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req) -{ - unsigned long flags; - struct ufs_qcom_host *host; - struct ufs_qcom_pm_qos_cpu_group *group; - - if (!hba || !req) - return; - - host = ufshcd_get_variant(hba); - if (!host->pm_qos.groups) - return; - - group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)]; - - spin_lock_irqsave(hba->host->host_lock, flags); - if (!host->pm_qos.is_enabled) - goto out; - - group->active_reqs++; - if (group->state != PM_QOS_REQ_VOTE && - group->state != PM_QOS_VOTED) { - group->state = PM_QOS_REQ_VOTE; - queue_work(host->pm_qos.workq, &group->vote_work); - } -out: - spin_unlock_irqrestore(hba->host->host_lock, flags); -} - -/* hba->host->host_lock is assumed to be held by caller */ -static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu) -{ - struct ufs_qcom_pm_qos_cpu_group *group; - - if (!host->pm_qos.groups || !host->pm_qos.is_enabled) - return; - - group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)]; - - if (group->active_reqs <= 0) - pr_err_ratelimited("ufshcd-qcom: active req coount is negative: %d\n", - group->active_reqs); - if (--group->active_reqs) - return; - group->state = PM_QOS_REQ_UNVOTE; - queue_work(host->pm_qos.workq, &group->unvote_work); -} - -static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req, - bool should_lock) -{ - unsigned long flags = 0; - - if (!hba || !req) - return; - - if (should_lock) - spin_lock_irqsave(hba->host->host_lock, flags); - __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu); - if (should_lock) - spin_unlock_irqrestore(hba->host->host_lock, flags); -} - -static void ufs_qcom_pm_qos_vote_work(struct work_struct *work) -{ - struct ufs_qcom_pm_qos_cpu_group *group = - container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work); - struct ufs_qcom_host *host = group->host; - unsigned long flags; - - spin_lock_irqsave(host->hba->host->host_lock, flags); - - if (!host->pm_qos.is_enabled || !group->active_reqs) { - spin_unlock_irqrestore(host->hba->host->host_lock, flags); - return; - } - - group->state = PM_QOS_VOTED; - spin_unlock_irqrestore(host->hba->host->host_lock, flags); - - pm_qos_update_request(&group->req, group->latency_us); -} - -static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work) -{ - struct ufs_qcom_pm_qos_cpu_group *group = container_of(work, - struct ufs_qcom_pm_qos_cpu_group, unvote_work); - struct ufs_qcom_host *host = group->host; - unsigned long flags; - - /* - * Check if new requests were submitted in the meantime and do not - * unvote if so. - */ - spin_lock_irqsave(host->hba->host->host_lock, flags); - - if (!host->pm_qos.is_enabled || group->active_reqs) { - spin_unlock_irqrestore(host->hba->host->host_lock, flags); - return; - } - - group->state = PM_QOS_UNVOTED; - spin_unlock_irqrestore(host->hba->host->host_lock, flags); - - pm_qos_update_request_timeout(&group->req, - group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US); -} - -static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ufs_hba *hba = dev_get_drvdata(dev->parent); - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - - return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled); -} - -static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct ufs_hba *hba = dev_get_drvdata(dev->parent); - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - unsigned long value; - unsigned long flags; - bool enable; - int i; - - if (kstrtoul(buf, 0, &value)) - return -EINVAL; - - enable = !!value; - - /* - * Must take the spinlock and save irqs before changing the enabled - * flag in order to keep correctness of PM QoS release. - */ - spin_lock_irqsave(hba->host->host_lock, flags); - if (enable == host->pm_qos.is_enabled) { - spin_unlock_irqrestore(hba->host->host_lock, flags); - return count; - } - host->pm_qos.is_enabled = enable; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - if (!enable) - for (i = 0; i < host->pm_qos.num_groups; i++) { - cancel_work_sync(&host->pm_qos.groups[i].vote_work); - cancel_work_sync(&host->pm_qos.groups[i].unvote_work); - spin_lock_irqsave(hba->host->host_lock, flags); - host->pm_qos.groups[i].state = PM_QOS_UNVOTED; - host->pm_qos.groups[i].active_reqs = 0; - spin_unlock_irqrestore(hba->host->host_lock, flags); - pm_qos_update_request(&host->pm_qos.groups[i].req, - PM_QOS_DEFAULT_VALUE); - } - - return count; -} - -static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct ufs_hba *hba = dev_get_drvdata(dev->parent); - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int ret; - int i; - int offset = 0; - - for (i = 0; i < host->pm_qos.num_groups; i++) { - ret = snprintf(&buf[offset], PAGE_SIZE, - "cpu group #%d(mask=0x%lx): %d\n", i, - host->pm_qos.groups[i].mask.bits[0], - host->pm_qos.groups[i].latency_us); - if (ret > 0) - offset += ret; - else - break; - } - - return offset; -} - -static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct ufs_hba *hba = dev_get_drvdata(dev->parent); - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - unsigned long value; - unsigned long flags; - char *strbuf; - char *strbuf_copy; - char *token; - int i; - int ret; - - /* reserve one byte for null termination */ - strbuf = kmalloc(count + 1, GFP_KERNEL); - if (!strbuf) - return -ENOMEM; - strbuf_copy = strbuf; - strlcpy(strbuf, buf, count + 1); - - for (i = 0; i < host->pm_qos.num_groups; i++) { - token = strsep(&strbuf, ","); - if (!token) - break; - - ret = kstrtoul(token, 0, &value); - if (ret) - break; - - spin_lock_irqsave(hba->host->host_lock, flags); - host->pm_qos.groups[i].latency_us = value; - spin_unlock_irqrestore(hba->host->host_lock, flags); - } - - kfree(strbuf_copy); - return count; -} - -static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host) -{ - struct device_node *node = host->hba->dev->of_node; - struct device_attribute *attr; - int ret = 0; - int num_groups; - int num_values; - char wq_name[sizeof("ufs_pm_qos_00")]; - int i; - - num_groups = of_property_count_u32_elems(node, - "qcom,pm-qos-cpu-groups"); - if (num_groups <= 0) - goto no_pm_qos; - - num_values = of_property_count_u32_elems(node, - "qcom,pm-qos-cpu-group-latency-us"); - if (num_values <= 0) - goto no_pm_qos; - - if (num_values != num_groups || num_groups > num_possible_cpus()) { - dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n", - __func__, num_groups, num_values, num_possible_cpus()); - goto no_pm_qos; - } - - host->pm_qos.num_groups = num_groups; - host->pm_qos.groups = kcalloc(host->pm_qos.num_groups, - sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL); - if (!host->pm_qos.groups) - return -ENOMEM; - - for (i = 0; i < host->pm_qos.num_groups; i++) { - u32 mask; - - ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups", - i, &mask); - if (ret) - goto free_groups; - host->pm_qos.groups[i].mask.bits[0] = mask; - if (!cpumask_subset(&host->pm_qos.groups[i].mask, - cpu_possible_mask)) { - dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n", - __func__, mask); - goto free_groups; - } - - ret = of_property_read_u32_index(node, - "qcom,pm-qos-cpu-group-latency-us", i, - &host->pm_qos.groups[i].latency_us); - if (ret) - goto free_groups; - - host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES; - host->pm_qos.groups[i].req.cpus_affine = - host->pm_qos.groups[i].mask; - host->pm_qos.groups[i].state = PM_QOS_UNVOTED; - host->pm_qos.groups[i].active_reqs = 0; - host->pm_qos.groups[i].host = host; - - INIT_WORK(&host->pm_qos.groups[i].vote_work, - ufs_qcom_pm_qos_vote_work); - INIT_WORK(&host->pm_qos.groups[i].unvote_work, - ufs_qcom_pm_qos_unvote_work); - } - - ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu", - &host->pm_qos.default_cpu); - if (ret || host->pm_qos.default_cpu > num_possible_cpus()) - host->pm_qos.default_cpu = 0; - - /* - * Use a single-threaded workqueue to assure work submitted to the queue - * is performed in order. Consider the following 2 possible cases: - * - * 1. A new request arrives and voting work is scheduled for it. Before - * the voting work is performed the request is finished and unvote - * work is also scheduled. - * 2. A request is finished and unvote work is scheduled. Before the - * work is performed a new request arrives and voting work is also - * scheduled. - * - * In both cases a vote work and unvote work wait to be performed. - * If ordering is not guaranteed, then the end state might be the - * opposite of the desired state. - */ - snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos", - host->hba->host->host_no); - host->pm_qos.workq = create_singlethread_workqueue(wq_name); - if (!host->pm_qos.workq) { - dev_err(host->hba->dev, "%s: failed to create the workqueue\n", - __func__); - ret = -ENOMEM; - goto free_groups; - } - - /* Initialization was ok, add all PM QoS requests */ - for (i = 0; i < host->pm_qos.num_groups; i++) - pm_qos_add_request(&host->pm_qos.groups[i].req, - PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - - /* PM QoS latency sys-fs attribute */ - attr = &host->pm_qos.latency_attr; - attr->show = ufs_qcom_pm_qos_latency_show; - attr->store = ufs_qcom_pm_qos_latency_store; - sysfs_attr_init(&attr->attr); - attr->attr.name = "pm_qos_latency_us"; - attr->attr.mode = 0644; - if (device_create_file(host->hba->var->dev, attr)) - dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n"); - - /* PM QoS enable sys-fs attribute */ - attr = &host->pm_qos.enable_attr; - attr->show = ufs_qcom_pm_qos_enable_show; - attr->store = ufs_qcom_pm_qos_enable_store; - sysfs_attr_init(&attr->attr); - attr->attr.name = "pm_qos_enable"; - attr->attr.mode = 0644; - if (device_create_file(host->hba->var->dev, attr)) - dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n"); - - host->pm_qos.is_enabled = true; - - return 0; - -free_groups: - kfree(host->pm_qos.groups); -no_pm_qos: - host->pm_qos.groups = NULL; - return ret ? ret : -ENOTSUPP; -} - -static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host) -{ - int i; - - if (!host->pm_qos.groups) - return; - - for (i = 0; i < host->pm_qos.num_groups; i++) - flush_work(&host->pm_qos.groups[i].unvote_work); -} - -static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host) -{ - int i; - - if (!host->pm_qos.groups) - return; - - for (i = 0; i < host->pm_qos.num_groups; i++) - pm_qos_remove_request(&host->pm_qos.groups[i].req); - destroy_workqueue(host->pm_qos.workq); - - kfree(host->pm_qos.groups); - host->pm_qos.groups = NULL; -} -#endif /* CONFIG_SMP */ - #define ANDROID_BOOT_DEV_MAX 30 static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; @@ -2156,10 +1759,6 @@ static int ufs_qcom_init(struct ufs_hba *hba) goto out_variant_clear; } - err = ufs_qcom_pm_qos_init(host); - if (err) - dev_info(dev, "%s: PM QoS will be disabled\n", __func__); - /* restore the secure configuration */ ufs_qcom_update_sec_cfg(hba, true); @@ -2284,7 +1883,6 @@ static void ufs_qcom_exit(struct ufs_hba *hba) host->is_phy_pwr_on = false; } phy_exit(host->generic_phy); - ufs_qcom_pm_qos_remove(host); } static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, @@ -2758,15 +2356,9 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .program_key = ufs_qcom_ice_program_key, }; -static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = { - .req_start = ufs_qcom_pm_qos_req_start, - .req_end = ufs_qcom_pm_qos_req_end, -}; - static struct ufs_hba_variant ufs_hba_qcom_variant = { .name = "qcom", .vops = &ufs_hba_qcom_vops, - .pm_qos_vops = &ufs_hba_pm_qos_variant_ops, }; /** diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 3267574098c0..2d156a69dbc6 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -16,7 +16,6 @@ #define UFS_QCOM_H_ #include -#include #include "ufshcd.h" #define MAX_UFS_QCOM_HOSTS 2 @@ -254,62 +253,9 @@ struct qcom_debugfs_files { struct dentry *testbus_cfg; struct dentry *testbus_bus; struct dentry *dbg_regs; - struct dentry *pm_qos; }; #endif -/* PM QoS voting state */ -enum ufs_qcom_pm_qos_state { - PM_QOS_UNVOTED, - PM_QOS_VOTED, - PM_QOS_REQ_VOTE, - PM_QOS_REQ_UNVOTE, -}; - -/** - * struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting - * logic - * @req: request object for PM QoS - * @vote_work: work object for voting procedure - * @unvote_work: work object for un-voting procedure - * @host: back pointer to the main structure - * @state: voting state machine current state - * @latency_us: requested latency value used for cluster voting, in - * microseconds - * @mask: cpu mask defined for this cluster - * @active_reqs: number of active requests on this cluster - */ -struct ufs_qcom_pm_qos_cpu_group { - struct pm_qos_request req; - struct work_struct vote_work; - struct work_struct unvote_work; - struct ufs_qcom_host *host; - enum ufs_qcom_pm_qos_state state; - s32 latency_us; - cpumask_t mask; - int active_reqs; -}; - -/** - * struct ufs_qcom_pm_qos - data related to PM QoS voting logic - * @groups: PM QoS cpu group state array - * @enable_attr: sysfs attribute to enable/disable PM QoS voting logic - * @latency_attr: sysfs attribute to set latency value - * @workq: single threaded workqueue to run PM QoS voting/unvoting - * @num_clusters: number of clusters defined - * @default_cpu: cpu to use for voting for request not specifying a cpu - * @is_enabled: flag specifying whether voting logic is enabled - */ -struct ufs_qcom_pm_qos { - struct ufs_qcom_pm_qos_cpu_group *groups; - struct device_attribute enable_attr; - struct device_attribute latency_attr; - struct workqueue_struct *workq; - int num_groups; - int default_cpu; - bool is_enabled; -}; - struct ufs_qcom_host { /* * Set this capability if host controller supports the QUniPro mode @@ -347,9 +293,6 @@ struct ufs_qcom_host { struct clk *rx_l1_sync_clk; struct clk *tx_l1_sync_clk; - /* PM Quality-of-Service (QoS) data */ - struct ufs_qcom_pm_qos pm_qos; - bool disable_lpm; bool is_lane_clks_enabled; bool sec_cfg_updated; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9948a8077197..75c4a4a91654 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3883,6 +3883,48 @@ static inline void ufshcd_put_read_lock(struct ufs_hba *hba) up_read(&hba->lock); } +static void ufshcd_pm_qos_get_worker(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.get_work); + + if (!atomic_read(&hba->pm_qos.count)) + return; + + mutex_lock(&hba->pm_qos.lock); + if (atomic_read(&hba->pm_qos.count) && !hba->pm_qos.active) { + pm_qos_update_request(&hba->pm_qos.req, 100); + hba->pm_qos.active = true; + } + mutex_unlock(&hba->pm_qos.lock); +} + +static void ufshcd_pm_qos_put_worker(struct work_struct *work) +{ + struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.put_work); + + if (atomic_read(&hba->pm_qos.count)) + return; + + mutex_lock(&hba->pm_qos.lock); + if (!atomic_read(&hba->pm_qos.count) && hba->pm_qos.active) { + pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE); + hba->pm_qos.active = false; + } + mutex_unlock(&hba->pm_qos.lock); +} + +static void ufshcd_pm_qos_get(struct ufs_hba *hba) +{ + if (atomic_inc_return(&hba->pm_qos.count) == 1) + queue_work(system_unbound_wq, &hba->pm_qos.get_work); +} + +static void ufshcd_pm_qos_put(struct ufs_hba *hba) +{ + if (atomic_dec_return(&hba->pm_qos.count) == 0) + queue_work(system_unbound_wq, &hba->pm_qos.put_work); +} + /** * ufshcd_queuecommand - main entry point for SCSI requests * @host: SCSI host pointer @@ -3898,12 +3940,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) int tag; int err = 0; bool has_read_lock = false; + bool cmd_sent = false; hba = shost_priv(host); if (!cmd || !cmd->request || !hba) return -EINVAL; + /* Wake the CPU managing the IRQ as soon as possible */ + ufshcd_pm_qos_get(hba); + tag = cmd->request->tag; if (!ufshcd_valid_tag(hba, tag)) { dev_err(hba->dev, @@ -3917,12 +3963,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (err == -EPERM) { set_host_byte(cmd, DID_ERROR); cmd->scsi_done(cmd); - return 0; + err = 0; + goto out_pm_qos; } if (err == -EAGAIN) { - hba->ufs_stats.scsi_blk_reqs.ts = ktime_get(); - hba->ufs_stats.scsi_blk_reqs.busy_ctx = SCALING_BUSY; - return SCSI_MLQUEUE_HOST_BUSY; + err = SCSI_MLQUEUE_HOST_BUSY; + goto out_pm_qos; } } else if (err == 1) { has_read_lock = true; @@ -4009,9 +4055,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (ufshcd_is_hibern8_on_idle_allowed(hba)) WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED); - /* Vote PM QoS for the request */ - ufshcd_vops_pm_qos_req_start(hba, cmd->request); - WARN_ON(hba->clk_gating.state != CLKS_ON); lrbp = &hba->lrb[tag]; @@ -4029,7 +4072,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); - ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); goto out; } lrbp->req_abort_skip = false; @@ -4041,7 +4083,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); - ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); goto out; } @@ -4059,7 +4100,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); - ufshcd_vops_pm_qos_req_end(hba, cmd->request, true); dev_err(hba->dev, "%s: failed sending command, %d\n", __func__, err); if (err == -EINVAL) { @@ -4067,16 +4107,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (has_read_lock) ufshcd_put_read_lock(hba); cmd->scsi_done(cmd); - return 0; + err = 0; + goto out_pm_qos; } goto out; } + cmd_sent = true; + out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: if (has_read_lock) ufshcd_put_read_lock(hba); +out_pm_qos: + if (!cmd_sent) + ufshcd_pm_qos_put(hba); return err; } @@ -6784,15 +6830,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL; - if (cmd->request) { - /* - * As we are accessing the "request" structure, - * this must be called before calling - * ->scsi_done() callback. - */ - ufshcd_vops_pm_qos_req_end(hba, cmd->request, - false); - } + if (cmd->request) + ufshcd_pm_qos_put(hba); clear_bit_unlock(index, &hba->lrb_in_use); __ufshcd_release(hba, false); @@ -6852,15 +6891,8 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result) update_req_stats(hba, lrbp); /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - if (cmd->request) { - /* - * As we are accessing the "request" structure, - * this must be called before calling - * ->scsi_done() callback. - */ - ufshcd_vops_pm_qos_req_end(hba, cmd->request, - true); - } + if (cmd->request) + ufshcd_pm_qos_put(hba); clear_bit_unlock(index, &hba->lrb_in_use); ufshcd_release_all(hba); @@ -11253,6 +11285,9 @@ void ufshcd_remove(struct ufs_hba *hba) /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba, true); + cancel_work_sync(&hba->pm_qos.put_work); + cancel_work_sync(&hba->pm_qos.get_work); + pm_qos_remove_request(&hba->pm_qos.req); ufshcd_exit_manual_gc(hba); ufshcd_exit_clk_scaling(hba); @@ -11490,6 +11525,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ mb(); + mutex_init(&hba->pm_qos.lock); + INIT_WORK(&hba->pm_qos.get_work, ufshcd_pm_qos_get_worker); + INIT_WORK(&hba->pm_qos.put_work, ufshcd_pm_qos_put_worker); + hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ; + hba->pm_qos.req.irq = irq; + pm_qos_add_request(&hba->pm_qos.req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + /* IRQ registration */ err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, dev_name(dev), hba); @@ -11576,6 +11619,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: + pm_qos_remove_request(&hba->pm_qos.req); ufshcd_exit_manual_gc(hba); ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 4b5322a6713f..25c1f89295ae 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -60,6 +60,7 @@ #include #include #include +#include #include "unipro.h" #include @@ -389,15 +390,6 @@ struct ufs_hba_variant_ops { ANDROID_KABI_RESERVE(4); }; -/** - * struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks - */ -struct ufs_hba_pm_qos_variant_ops { - void (*req_start)(struct ufs_hba *hba, struct request *req); - void (*req_end)(struct ufs_hba *hba, struct request *req, - bool should_lock); -}; - /** * struct ufs_hba_variant - variant specific parameters * @name: variant name @@ -406,7 +398,6 @@ struct ufs_hba_variant { struct device *dev; const char *name; struct ufs_hba_variant_ops *vops; - struct ufs_hba_pm_qos_variant_ops *pm_qos_vops; }; /* for manual gc */ @@ -1237,6 +1228,15 @@ struct ufs_hba { ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); + + struct { + struct pm_qos_request req; + struct work_struct get_work; + struct work_struct put_work; + struct mutex lock; + atomic_t count; + bool active; + } pm_qos; }; static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba) @@ -1704,21 +1704,6 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba) } #endif -static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba, - struct request *req) -{ - if (hba->var && hba->var->pm_qos_vops && - hba->var->pm_qos_vops->req_start) - hba->var->pm_qos_vops->req_start(hba, req); -} - -static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba, - struct request *req, bool lock) -{ - if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end) - hba->var->pm_qos_vops->req_end(hba, req, lock); -} - extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /* diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c index df90e08a8abb..a457e7e9d8ef 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c @@ -1310,8 +1310,7 @@ static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata) return handle; } -static int update_client_paths(struct msm_bus_client *client, bool log_trns, - unsigned int idx) +static int update_client_paths(struct msm_bus_client *client, unsigned int idx) { int lnode, src, dest, cur_idx; uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw; @@ -1372,16 +1371,13 @@ static int update_client_paths(struct msm_bus_client *client, bool log_trns, goto exit_update_client_paths; } - if (log_trns) - getpath_debug(src, lnode, pdata->active_only); } commit_data(); exit_update_client_paths: return ret; } -static int update_client_alc(struct msm_bus_client *client, bool log_trns, - unsigned int idx) +static int update_client_alc(struct msm_bus_client *client, unsigned int idx) { int lnode, cur_idx; uint64_t req_idle_time, req_fal, dual_idle_time, dual_fal, @@ -1560,7 +1556,7 @@ static int update_context(uint32_t cl, bool active_only, pdata->active_only = active_only; msm_bus_dbg_client_data(client->pdata, ctx_idx, cl); - ret = update_client_paths(client, false, ctx_idx); + ret = update_client_paths(client, ctx_idx); if (ret) { pr_err("%s: Err updating path\n", __func__); goto exit_update_context; @@ -1578,8 +1574,6 @@ static int update_request_adhoc(uint32_t cl, unsigned int index) int ret = 0; struct msm_bus_scale_pdata *pdata; struct msm_bus_client *client; - const char *test_cl = "Null"; - bool log_transaction = false; rt_mutex_lock(&msm_bus_adhoc_lock); @@ -1617,17 +1611,14 @@ static int update_request_adhoc(uint32_t cl, unsigned int index) goto exit_update_request; } - if (!strcmp(test_cl, pdata->name)) - log_transaction = true; - MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__, cl, index, client->curr, client->pdata->usecase->num_paths); if (pdata->alc) - ret = update_client_alc(client, log_transaction, index); + ret = update_client_alc(client, index); else { msm_bus_dbg_client_data(client->pdata, index, cl); - ret = update_client_paths(client, log_transaction, index); + ret = update_client_paths(client, index); } if (ret) { pr_err("%s: Err updating path\n", __func__); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index cda873bb6cc3..7d03bde9fd76 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -463,7 +463,7 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) do { ret = tcs_write(drv, msg); if (ret == -EBUSY) { - pr_info_ratelimited("DRV:%s TCS Busy, retrying RPMH message send: addr=%#x\n", + pr_debug("DRV:%s TCS Busy, retrying RPMH message send: addr=%#x\n", drv->name, msg->cmds[0].addr); udelay(10); } diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a97bbd89fae2..693bc1e7af7f 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -1,241 +1,40 @@ // SPDX-License-Identifier: GPL-2.0 -/* mm/ashmem.c - * - * Anonymous Shared Memory Subsystem, ashmem - * +/* * Copyright (C) 2008 Google, Inc. - * * Robert Love + * Copyright (C) 2021 Sultan Alsawaf . */ #define pr_fmt(fmt) "ashmem: " fmt -#include -#include -#include -#include -#include #include -#include -#include #include -#include -#include -#include -#include #include #include "ashmem.h" -#define ASHMEM_NAME_PREFIX "dev/ashmem/" -#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) -#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) - /** * struct ashmem_area - The anonymous shared memory area - * @name: The optional name in /proc/pid/maps - * @unpinned_list: The list of all ashmem areas + * @mmap_lock: The mmap mutex lock * @file: The shmem-based backing file * @size: The size of the mapping, in bytes * @prot_mask: The allowed protection bits, as vm_flags * * The lifecycle of this structure is from our parent file's open() until - * its release(). It is also protected by 'ashmem_mutex' + * its release(). * * Warning: Mappings do NOT pin this structure; It dies on close() */ struct ashmem_area { - char name[ASHMEM_FULL_NAME_LEN]; - struct list_head unpinned_list; + struct mutex mmap_lock; struct file *file; size_t size; unsigned long prot_mask; }; -/** - * struct ashmem_range - A range of unpinned/evictable pages - * @lru: The entry in the LRU list - * @unpinned: The entry in its area's unpinned list - * @asma: The associated anonymous shared memory area. - * @pgstart: The starting page (inclusive) - * @pgend: The ending page (inclusive) - * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) - * - * The lifecycle of this structure is from unpin to pin. - * It is protected by 'ashmem_mutex' - */ -struct ashmem_range { - struct list_head lru; - struct list_head unpinned; - struct ashmem_area *asma; - size_t pgstart; - size_t pgend; - unsigned int purged; -}; - -/* LRU list of unpinned pages, protected by ashmem_mutex */ -static LIST_HEAD(ashmem_lru_list); - -static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0); -static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait); - -/* - * long lru_count - The count of pages on our LRU list. - * - * This is protected by ashmem_mutex. - */ -static unsigned long lru_count; - -/* - * ashmem_mutex - protects the list of and each individual ashmem_area - * - * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem - */ -static DEFINE_MUTEX(ashmem_mutex); - static struct kmem_cache *ashmem_area_cachep __read_mostly; -static struct kmem_cache *ashmem_range_cachep __read_mostly; - -/* - * A separate lockdep class for the backing shmem inodes to resolve the lockdep - * warning about the race between kswapd taking fs_reclaim before inode_lock - * and write syscall taking inode_lock and then fs_reclaim. - * Note that such race is impossible because ashmem does not support write - * syscalls operating on the backing shmem. - */ -static struct lock_class_key backing_shmem_inode_class; - -static inline unsigned long range_size(struct ashmem_range *range) -{ - return range->pgend - range->pgstart + 1; -} - -static inline bool range_on_lru(struct ashmem_range *range) -{ - return range->purged == ASHMEM_NOT_PURGED; -} - -static inline bool page_range_subsumes_range(struct ashmem_range *range, - size_t start, size_t end) -{ - return (range->pgstart >= start) && (range->pgend <= end); -} - -static inline bool page_range_subsumed_by_range(struct ashmem_range *range, - size_t start, size_t end) -{ - return (range->pgstart <= start) && (range->pgend >= end); -} - -static inline bool page_in_range(struct ashmem_range *range, size_t page) -{ - return (range->pgstart <= page) && (range->pgend >= page); -} - -static inline bool page_range_in_range(struct ashmem_range *range, - size_t start, size_t end) -{ - return page_in_range(range, start) || page_in_range(range, end) || - page_range_subsumes_range(range, start, end); -} - -static inline bool range_before_page(struct ashmem_range *range, size_t page) -{ - return range->pgend < page; -} #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) -/** - * lru_add() - Adds a range of memory to the LRU list - * @range: The memory range being added. - * - * The range is first added to the end (tail) of the LRU list. - * After this, the size of the range is added to @lru_count - */ -static inline void lru_add(struct ashmem_range *range) -{ - list_add_tail(&range->lru, &ashmem_lru_list); - lru_count += range_size(range); -} - -/** - * lru_del() - Removes a range of memory from the LRU list - * @range: The memory range being removed - * - * The range is first deleted from the LRU list. - * After this, the size of the range is removed from @lru_count - */ -static inline void lru_del(struct ashmem_range *range) -{ - list_del(&range->lru); - lru_count -= range_size(range); -} - -/** - * range_alloc() - Allocates and initializes a new ashmem_range structure - * @asma: The associated ashmem_area - * @prev_range: The previous ashmem_range in the sorted asma->unpinned list - * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) - * @start: The starting page (inclusive) - * @end: The ending page (inclusive) - * - * This function is protected by ashmem_mutex. - */ -static void range_alloc(struct ashmem_area *asma, - struct ashmem_range *prev_range, unsigned int purged, - size_t start, size_t end, - struct ashmem_range **new_range) -{ - struct ashmem_range *range = *new_range; - - *new_range = NULL; - range->asma = asma; - range->pgstart = start; - range->pgend = end; - range->purged = purged; - - list_add_tail(&range->unpinned, &prev_range->unpinned); - - if (range_on_lru(range)) - lru_add(range); -} - -/** - * range_del() - Deletes and dealloctes an ashmem_range structure - * @range: The associated ashmem_range that has previously been allocated - */ -static void range_del(struct ashmem_range *range) -{ - list_del(&range->unpinned); - if (range_on_lru(range)) - lru_del(range); - kmem_cache_free(ashmem_range_cachep, range); -} - -/** - * range_shrink() - Shrinks an ashmem_range - * @range: The associated ashmem_range being shrunk - * @start: The starting byte of the new range - * @end: The ending byte of the new range - * - * This does not modify the data inside the existing range in any way - It - * simply shrinks the boundaries of the range. - * - * Theoretically, with a little tweaking, this could eventually be changed - * to range_resize, and expand the lru_count if the new range is larger. - */ -static inline void range_shrink(struct ashmem_range *range, - size_t start, size_t end) -{ - size_t pre = range_size(range); - - range->pgstart = start; - range->pgend = end; - - if (range_on_lru(range)) - lru_count -= pre - range_size(range); -} - /** * ashmem_open() - Opens an Anonymous Shared Memory structure * @inode: The backing file's index node(?) @@ -252,16 +51,18 @@ static int ashmem_open(struct inode *inode, struct file *file) int ret; ret = generic_file_open(inode, file); - if (ret) + if (unlikely(ret)) return ret; - asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); - if (!asma) + asma = kmem_cache_alloc(ashmem_area_cachep, GFP_KERNEL); + if (unlikely(!asma)) return -ENOMEM; - INIT_LIST_HEAD(&asma->unpinned_list); - memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); - asma->prot_mask = PROT_MASK; + *asma = (typeof(*asma)){ + .mmap_lock = __MUTEX_INITIALIZER(asma->mmap_lock), + .prot_mask = PROT_MASK + }; + file->private_data = asma; return 0; @@ -278,12 +79,6 @@ static int ashmem_open(struct inode *inode, struct file *file) static int ashmem_release(struct inode *ignored, struct file *file) { struct ashmem_area *asma = file->private_data; - struct ashmem_range *range, *next; - - mutex_lock(&ashmem_mutex); - list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) - range_del(range); - mutex_unlock(&ashmem_mutex); if (asma->file) fput(asma->file); @@ -295,18 +90,16 @@ static int ashmem_release(struct inode *ignored, struct file *file) static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct ashmem_area *asma = iocb->ki_filp->private_data; - int ret = 0; - - mutex_lock(&ashmem_mutex); + struct file *vmfile; + ssize_t ret; /* If size is not set, or set to 0, always return EOF. */ - if (asma->size == 0) - goto out_unlock; + if (!READ_ONCE(asma->size)) + return 0; - if (!asma->file) { - ret = -EBADF; - goto out_unlock; - } + vmfile = READ_ONCE(asma->file); + if (!vmfile) + return -EBADF; /* * asma and asma->file are used outside the lock here. We assume @@ -314,41 +107,31 @@ static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter) * be destroyed until all references to the file are dropped and * ashmem_release is called. */ - mutex_unlock(&ashmem_mutex); - ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0); - mutex_lock(&ashmem_mutex); + ret = vfs_iter_read(vmfile, iter, &iocb->ki_pos, 0); if (ret > 0) - asma->file->f_pos = iocb->ki_pos; -out_unlock: - mutex_unlock(&ashmem_mutex); + vmfile->f_pos = iocb->ki_pos; return ret; } static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) { struct ashmem_area *asma = file->private_data; + struct file *vmfile; loff_t ret; - mutex_lock(&ashmem_mutex); - - if (asma->size == 0) { - mutex_unlock(&ashmem_mutex); + if (!READ_ONCE(asma->size)) return -EINVAL; - } - if (!asma->file) { - mutex_unlock(&ashmem_mutex); + vmfile = READ_ONCE(asma->file); + if (!vmfile) return -EBADF; - } - mutex_unlock(&ashmem_mutex); - - ret = vfs_llseek(asma->file, offset, origin); + ret = vfs_llseek(vmfile, offset, origin); if (ret < 0) return ret; /** Copy f_pos from backing file, since f_ops->llseek() sets it */ - file->f_pos = asma->file->f_pos; + file->f_pos = vmfile->f_pos; return ret; } @@ -373,77 +156,81 @@ ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr, return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); } -static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +static int ashmem_file_setup(struct ashmem_area *asma, size_t size, + struct vm_area_struct *vma) { static struct file_operations vmfile_fops; + static DEFINE_SPINLOCK(vmfile_fops_lock); + struct file *vmfile; + + vmfile = shmem_file_setup(ASHMEM_NAME_DEF, size, vma->vm_flags); + if (IS_ERR(vmfile)) + return PTR_ERR(vmfile); + + /* + * override mmap operation of the vmfile so that it can't be + * remapped which would lead to creation of a new vma with no + * asma permission checks. Have to override get_unmapped_area + * as well to prevent VM_BUG_ON check for f_ops modification. + */ + if (!READ_ONCE(vmfile_fops.mmap)) { + spin_lock(&vmfile_fops_lock); + if (!vmfile_fops.mmap) { + vmfile_fops = *vmfile->f_op; + vmfile_fops.get_unmapped_area = + ashmem_vmfile_get_unmapped_area; + WRITE_ONCE(vmfile_fops.mmap, ashmem_vmfile_mmap); + } + spin_unlock(&vmfile_fops_lock); + } + vmfile->f_op = &vmfile_fops; + vmfile->f_mode |= FMODE_LSEEK; + + WRITE_ONCE(asma->file, vmfile); + return 0; +} + +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +{ struct ashmem_area *asma = file->private_data; + unsigned long prot_mask; + size_t size; int ret = 0; - mutex_lock(&ashmem_mutex); - /* user needs to SET_SIZE before mapping */ - if (!asma->size) { - ret = -EINVAL; - goto out; - } + size = READ_ONCE(asma->size); + if (unlikely(!size)) + return -EINVAL; /* requested mapping size larger than object size */ - if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) { - ret = -EINVAL; - goto out; - } + if (vma->vm_end - vma->vm_start > PAGE_ALIGN(size)) + return -EINVAL; /* requested protection bits must match our allowed protection mask */ - if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & - calc_vm_prot_bits(PROT_MASK, 0)) { - ret = -EPERM; - goto out; - } - vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + prot_mask = READ_ONCE(asma->prot_mask); + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(prot_mask, 0)) & + calc_vm_prot_bits(PROT_MASK, 0))) + return -EPERM; - if (!asma->file) { - char *name = ASHMEM_NAME_DEF; - struct file *vmfile; - struct inode *inode; + vma->vm_flags &= ~calc_vm_may_flags(~prot_mask); - if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') - name = asma->name; + if (!READ_ONCE(asma->file)) { + mutex_lock(&asma->mmap_lock); + if (!asma->file) + ret = ashmem_file_setup(asma, size, vma); + mutex_unlock(&asma->mmap_lock); - /* ... and allocate the backing shmem file */ - vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); - if (IS_ERR(vmfile)) { - ret = PTR_ERR(vmfile); - goto out; - } - vmfile->f_mode |= FMODE_LSEEK; - inode = file_inode(vmfile); - lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class); - asma->file = vmfile; - /* - * override mmap operation of the vmfile so that it can't be - * remapped which would lead to creation of a new vma with no - * asma permission checks. Have to override get_unmapped_area - * as well to prevent VM_BUG_ON check for f_ops modification. - */ - if (!vmfile_fops.mmap) { - vmfile_fops = *vmfile->f_op; - vmfile_fops.mmap = ashmem_vmfile_mmap; - vmfile_fops.get_unmapped_area = - ashmem_vmfile_get_unmapped_area; - } - vmfile->f_op = &vmfile_fops; + if (ret) + return ret; } + get_file(asma->file); - /* - * XXX - Reworked to use shmem_zero_setup() instead of - * shmem_set_file while we're in staging. -jstultz - */ if (vma->vm_flags & VM_SHARED) { ret = shmem_zero_setup(vma); if (ret) { fput(asma->file); - goto out; + return ret; } } else { vma_set_anonymous(vma); @@ -453,414 +240,55 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = asma->file; -out: - mutex_unlock(&ashmem_mutex); - return ret; -} - -/* - * ashmem_shrink - our cache shrinker, called from mm/vmscan.c - * - * 'nr_to_scan' is the number of objects to scan for freeing. - * - * 'gfp_mask' is the mask of the allocation that got us into this mess. - * - * Return value is the number of objects freed or -1 if we cannot - * proceed without risk of deadlock (due to gfp_mask). - * - * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial - * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' - * pages freed. - */ -static unsigned long -ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) -{ - unsigned long freed = 0; - - /* We might recurse into filesystem code, so bail out if necessary */ - if (!(sc->gfp_mask & __GFP_FS)) - return SHRINK_STOP; - - if (!mutex_trylock(&ashmem_mutex)) - return -1; - - while (!list_empty(&ashmem_lru_list)) { - struct ashmem_range *range = - list_first_entry(&ashmem_lru_list, typeof(*range), lru); - loff_t start = range->pgstart * PAGE_SIZE; - loff_t end = (range->pgend + 1) * PAGE_SIZE; - struct file *f = range->asma->file; - - get_file(f); - atomic_inc(&ashmem_shrink_inflight); - range->purged = ASHMEM_WAS_PURGED; - lru_del(range); - - freed += range_size(range); - mutex_unlock(&ashmem_mutex); - f->f_op->fallocate(f, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); - fput(f); - if (atomic_dec_and_test(&ashmem_shrink_inflight)) - wake_up_all(&ashmem_shrink_wait); - if (!mutex_trylock(&ashmem_mutex)) - goto out; - if (--sc->nr_to_scan <= 0) - break; - } - mutex_unlock(&ashmem_mutex); -out: - return freed; -} - -static unsigned long -ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) -{ - /* - * note that lru_count is count of pages on the lru, not a count of - * objects on the list. This means the scan function needs to return the - * number of pages freed, not the number of objects scanned. - */ - return lru_count; + return 0; } -static struct shrinker ashmem_shrinker = { - .count_objects = ashmem_shrink_count, - .scan_objects = ashmem_shrink_scan, - /* - * XXX (dchinner): I wish people would comment on why they need on - * significant changes to the default value here - */ - .seeks = DEFAULT_SEEKS * 4, -}; - static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) { - int ret = 0; - - mutex_lock(&ashmem_mutex); - /* the user can only remove, not add, protection bits */ - if ((asma->prot_mask & prot) != prot) { - ret = -EINVAL; - goto out; - } + if (unlikely((READ_ONCE(asma->prot_mask) & prot) != prot)) + return -EINVAL; /* does the application expect PROT_READ to imply PROT_EXEC? */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) prot |= PROT_EXEC; - asma->prot_mask = prot; - -out: - mutex_unlock(&ashmem_mutex); - return ret; -} - -static int set_name(struct ashmem_area *asma, void __user *name) -{ - int len; - int ret = 0; - char local_name[ASHMEM_NAME_LEN]; - - /* - * Holding the ashmem_mutex while doing a copy_from_user might cause - * an data abort which would try to access mmap_sem. If another - * thread has invoked ashmem_mmap then it will be holding the - * semaphore and will be waiting for ashmem_mutex, there by leading to - * deadlock. We'll release the mutex and take the name to a local - * variable that does not need protection and later copy the local - * variable to the structure member with lock held. - */ - len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); - if (len < 0) - return len; - if (len == ASHMEM_NAME_LEN) - local_name[ASHMEM_NAME_LEN - 1] = '\0'; - mutex_lock(&ashmem_mutex); - /* cannot change an existing mapping's name */ - if (asma->file) - ret = -EINVAL; - else - strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); - - mutex_unlock(&ashmem_mutex); - return ret; -} - -static int get_name(struct ashmem_area *asma, void __user *name) -{ - int ret = 0; - size_t len; - /* - * Have a local variable to which we'll copy the content - * from asma with the lock held. Later we can copy this to the user - * space safely without holding any locks. So even if we proceed to - * wait for mmap_sem, it won't lead to deadlock. - */ - char local_name[ASHMEM_NAME_LEN]; - - mutex_lock(&ashmem_mutex); - if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { - /* - * Copying only `len', instead of ASHMEM_NAME_LEN, bytes - * prevents us from revealing one user's stack to another. - */ - len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; - memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); - } else { - len = sizeof(ASHMEM_NAME_DEF); - memcpy(local_name, ASHMEM_NAME_DEF, len); - } - mutex_unlock(&ashmem_mutex); - - /* - * Now we are just copying from the stack variable to userland - * No lock held - */ - if (copy_to_user(name, local_name, len)) - ret = -EFAULT; - return ret; -} - -/* - * ashmem_pin - pin the given ashmem region, returning whether it was - * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). - * - * Caller must hold ashmem_mutex. - */ -static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend, - struct ashmem_range **new_range) -{ - struct ashmem_range *range, *next; - int ret = ASHMEM_NOT_PURGED; - - list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { - /* moved past last applicable page; we can short circuit */ - if (range_before_page(range, pgstart)) - break; - - /* - * The user can ask us to pin pages that span multiple ranges, - * or to pin pages that aren't even unpinned, so this is messy. - * - * Four cases: - * 1. The requested range subsumes an existing range, so we - * just remove the entire matching range. - * 2. The requested range overlaps the start of an existing - * range, so we just update that range. - * 3. The requested range overlaps the end of an existing - * range, so we just update that range. - * 4. The requested range punches a hole in an existing range, - * so we have to update one side of the range and then - * create a new range for the other side. - */ - if (page_range_in_range(range, pgstart, pgend)) { - ret |= range->purged; - - /* Case #1: Easy. Just nuke the whole thing. */ - if (page_range_subsumes_range(range, pgstart, pgend)) { - range_del(range); - continue; - } - - /* Case #2: We overlap from the start, so adjust it */ - if (range->pgstart >= pgstart) { - range_shrink(range, pgend + 1, range->pgend); - continue; - } - - /* Case #3: We overlap from the rear, so adjust it */ - if (range->pgend <= pgend) { - range_shrink(range, range->pgstart, - pgstart - 1); - continue; - } - - /* - * Case #4: We eat a chunk out of the middle. A bit - * more complicated, we allocate a new range for the - * second half and adjust the first chunk's endpoint. - */ - range_alloc(asma, range, range->purged, - pgend + 1, range->pgend, new_range); - range_shrink(range, range->pgstart, pgstart - 1); - break; - } - } - - return ret; -} - -/* - * ashmem_unpin - unpin the given range of pages. Returns zero on success. - * - * Caller must hold ashmem_mutex. - */ -static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend, - struct ashmem_range **new_range) -{ - struct ashmem_range *range, *next; - unsigned int purged = ASHMEM_NOT_PURGED; - -restart: - list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { - /* short circuit: this is our insertion point */ - if (range_before_page(range, pgstart)) - break; - - /* - * The user can ask us to unpin pages that are already entirely - * or partially pinned. We handle those two cases here. - */ - if (page_range_subsumed_by_range(range, pgstart, pgend)) - return 0; - if (page_range_in_range(range, pgstart, pgend)) { - pgstart = min(range->pgstart, pgstart); - pgend = max(range->pgend, pgend); - purged |= range->purged; - range_del(range); - goto restart; - } - } - - range_alloc(asma, range, purged, pgstart, pgend, new_range); + WRITE_ONCE(asma->prot_mask, prot); return 0; } -/* - * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the - * given interval are unpinned and ASHMEM_IS_PINNED otherwise. - * - * Caller must hold ashmem_mutex. - */ -static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, - size_t pgend) -{ - struct ashmem_range *range; - int ret = ASHMEM_IS_PINNED; - - list_for_each_entry(range, &asma->unpinned_list, unpinned) { - if (range_before_page(range, pgstart)) - break; - if (page_range_in_range(range, pgstart, pgend)) { - ret = ASHMEM_IS_UNPINNED; - break; - } - } - - return ret; -} - -static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, - void __user *p) -{ - struct ashmem_pin pin; - size_t pgstart, pgend; - int ret = -EINVAL; - struct ashmem_range *range = NULL; - - if (copy_from_user(&pin, p, sizeof(pin))) - return -EFAULT; - - if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) { - range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); - if (!range) - return -ENOMEM; - } - - mutex_lock(&ashmem_mutex); - wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight)); - - if (!asma->file) - goto out_unlock; - - /* per custom, you can pass zero for len to mean "everything onward" */ - if (!pin.len) - pin.len = PAGE_ALIGN(asma->size) - pin.offset; - - if ((pin.offset | pin.len) & ~PAGE_MASK) - goto out_unlock; - - if (((__u32)-1) - pin.offset < pin.len) - goto out_unlock; - - if (PAGE_ALIGN(asma->size) < pin.offset + pin.len) - goto out_unlock; - - pgstart = pin.offset / PAGE_SIZE; - pgend = pgstart + (pin.len / PAGE_SIZE) - 1; - - switch (cmd) { - case ASHMEM_PIN: - ret = ashmem_pin(asma, pgstart, pgend, &range); - break; - case ASHMEM_UNPIN: - ret = ashmem_unpin(asma, pgstart, pgend, &range); - break; - case ASHMEM_GET_PIN_STATUS: - ret = ashmem_get_pin_status(asma, pgstart, pgend); - break; - } - -out_unlock: - mutex_unlock(&ashmem_mutex); - if (range) - kmem_cache_free(ashmem_range_cachep, range); - - return ret; -} - static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ashmem_area *asma = file->private_data; - long ret = -ENOTTY; switch (cmd) { case ASHMEM_SET_NAME: - ret = set_name(asma, (void __user *)arg); - break; + return 0; case ASHMEM_GET_NAME: - ret = get_name(asma, (void __user *)arg); - break; + return 0; case ASHMEM_SET_SIZE: - ret = -EINVAL; - mutex_lock(&ashmem_mutex); - if (!asma->file) { - ret = 0; - asma->size = (size_t)arg; - } - mutex_unlock(&ashmem_mutex); - break; + if (READ_ONCE(asma->file)) + return -EINVAL; + + WRITE_ONCE(asma->size, (size_t)arg); + return 0; case ASHMEM_GET_SIZE: - ret = asma->size; - break; + return READ_ONCE(asma->size); case ASHMEM_SET_PROT_MASK: - ret = set_prot_mask(asma, arg); - break; + return set_prot_mask(asma, arg); case ASHMEM_GET_PROT_MASK: - ret = asma->prot_mask; - break; + return READ_ONCE(asma->prot_mask); case ASHMEM_PIN: + return 0; case ASHMEM_UNPIN: + return 0; case ASHMEM_GET_PIN_STATUS: - ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); - break; + return ASHMEM_IS_PINNED; case ASHMEM_PURGE_ALL_CACHES: - ret = -EPERM; - if (capable(CAP_SYS_ADMIN)) { - struct shrink_control sc = { - .gfp_mask = GFP_KERNEL, - .nr_to_scan = LONG_MAX, - }; - ret = ashmem_shrink_count(&ashmem_shrinker, &sc); - ashmem_shrink_scan(&ashmem_shrinker, &sc); - } - break; + return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; } - return ret; + return -ENOTTY; } /* support of 32bit userspace on 64bit platforms */ @@ -879,23 +307,7 @@ static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, return ashmem_ioctl(file, cmd, arg); } #endif -#ifdef CONFIG_PROC_FS -static void ashmem_show_fdinfo(struct seq_file *m, struct file *file) -{ - struct ashmem_area *asma = file->private_data; - - mutex_lock(&ashmem_mutex); - - if (asma->file) - seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino); - if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') - seq_printf(m, "name:\t%s\n", - asma->name + ASHMEM_NAME_PREFIX_LEN); - - mutex_unlock(&ashmem_mutex); -} -#endif static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, @@ -907,9 +319,6 @@ static const struct file_operations ashmem_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = compat_ashmem_ioctl, #endif -#ifdef CONFIG_PROC_FS - .show_fdinfo = ashmem_show_fdinfo, -#endif }; static struct miscdevice ashmem_misc = { @@ -920,44 +329,27 @@ static struct miscdevice ashmem_misc = { static int __init ashmem_init(void) { - int ret = -ENOMEM; + int ret; ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", sizeof(struct ashmem_area), 0, 0, NULL); - if (!ashmem_area_cachep) { + if (unlikely(!ashmem_area_cachep)) { pr_err("failed to create slab cache\n"); + ret = -ENOMEM; goto out; } - ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", - sizeof(struct ashmem_range), - 0, 0, NULL); - if (!ashmem_range_cachep) { - pr_err("failed to create slab cache\n"); - goto out_free1; - } - ret = misc_register(&ashmem_misc); - if (ret) { + if (unlikely(ret)) { pr_err("failed to register misc device!\n"); - goto out_free2; - } - - ret = register_shrinker(&ashmem_shrinker); - if (ret) { - pr_err("failed to register shrinker!\n"); - goto out_demisc; + goto out_free1; } pr_info("initialized\n"); return 0; -out_demisc: - misc_deregister(&ashmem_misc); -out_free2: - kmem_cache_destroy(ashmem_range_cachep); out_free1: kmem_cache_destroy(ashmem_area_cachep); out: diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c index 62e8e00e3c10..9001581b0f87 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_utils_api.c @@ -798,7 +798,8 @@ static void util_scan_update_esp_data(struct wlan_esp_ie *esp_information, esp_ie = (struct wlan_esp_ie *) util_scan_entry_esp_info(scan_entry); - total_elements = esp_ie->esp_len; + // Ignore ESP_ID_EXTN element + total_elements = esp_ie->esp_len - 1; data = (uint8_t *)esp_ie + 3; do_div(total_elements, ESP_INFORMATION_LIST_LENGTH); @@ -808,7 +809,7 @@ static void util_scan_update_esp_data(struct wlan_esp_ie *esp_information, } for (i = 0; i < total_elements && - data < ((uint8_t *)esp_ie + esp_ie->esp_len + 3); i++) { + data < ((uint8_t *)esp_ie + esp_ie->esp_len); i++) { esp_info = (struct wlan_esp_info *)data; if (esp_info->access_category == ESP_AC_BK) { qdf_mem_copy(&esp_information->esp_info_AC_BK, @@ -1484,6 +1485,16 @@ static QDF_STATUS util_scan_parse_mbssid(struct wlan_objmgr_pdev *pdev, if (!tmp) break; + /* + * The max_bssid_indicator field is mandatory, therefore the + * length of the MBSSID element should at least be 1. + */ + if(!tmp[1]) + { + scm_debug_rl("MBSSID IE is of length zero"); + break; + } + mbssid_info.profile_count = 1 << tmp[2]; mbssid_end_pos = tmp + tmp[1] + 2; /* Skip Element ID, Len, MaxBSSID Indicator */ diff --git a/drivers/staging/qcacld-3.0/configs/default_defconfig b/drivers/staging/qcacld-3.0/configs/default_defconfig index d2d06c89a229..ebf510baa482 100644 --- a/drivers/staging/qcacld-3.0/configs/default_defconfig +++ b/drivers/staging/qcacld-3.0/configs/default_defconfig @@ -68,7 +68,6 @@ ifeq (y,$(findstring y,$(CONFIG_ICNSS) $(CONFIG_ICNSS_MODULE))) CONFIG_HELIUMPLUS := y CONFIG_64BIT_PADDR := y CONFIG_FEATURE_TSO := y - CONFIG_FEATURE_TSO_DEBUG := y ifeq ($(CONFIG_INET_LRO), y) CONFIG_WLAN_LRO := y else @@ -361,7 +360,6 @@ CONFIG_DP_INTR_POLL_BASED := y CONFIG_TX_PER_PDEV_DESC_POOL := y CONFIG_DP_TRACE := y CONFIG_FEATURE_TSO := y -CONFIG_TSO_DEBUG_LOG_ENABLE := y CONFIG_DP_LFR := y CONFIG_DUP_RX_DESC_WAR := y CONFIG_HTT_PADDR64 := y @@ -431,9 +429,6 @@ CONFIG_WLAN_LOG_FATAL := y CONFIG_WLAN_LOG_ERROR := y CONFIG_WLAN_LOG_WARN := y CONFIG_WLAN_LOG_INFO := y -CONFIG_WLAN_LOG_DEBUG := y -CONFIG_WLAN_LOG_ENTER := y -CONFIG_WLAN_LOG_EXIT := y #Enable OL debug and wmi unified functions CONFIG_ATH_PERF_PWR_OFFLOAD := y @@ -684,21 +679,12 @@ CONFIG_FEATURE_HTC_CREDIT_HISTORY := y #Flag to enable MTRACE feature CONFIG_TRACE_RECORD_FEATURE := y -#Flag to enable p2p debug feature -CONFIG_WLAN_FEATURE_P2P_DEBUG := y - -#Flag to enable roam debug log -CONFIG_FEATURE_ROAM_DEBUG := y - #Flag to enable DFS Master feature CONFIG_WLAN_DFS_MASTER_ENABLE := y #Flag to enable WEXT support for STA/AP/P2P interfaces CONFIG_WLAN_WEXT_SUPPORT_ENABLE := y -#Flag to enable/disable MTRACE feature -CONFIG_ENABLE_MTRACE_LOG := y - #Flag to enable nud tracking feature CONFIG_WLAN_NUD_TRACKING := y @@ -741,9 +727,6 @@ CONFIG_FEATURE_WLAN_WAPI := y CONFIG_AGEIE_ON_SCAN_RESULTS := y -#Flag to enable FW log parsing support feature -CONFIG_FEATURE_FW_LOG_PARSING := y - CONFIG_PTT_SOCK_SVC_ENABLE := y CONFIG_SOFTAP_CHANNEL_RANGE := y CONFIG_FEATURE_WLAN_SCAN_PNO := y @@ -842,9 +825,6 @@ ifeq ($(CONFIG_LITHIUM), y) CONFIG_WLAN_DP_PENDING_MEM_FLUSH := y endif -#Flag to enable hdd memory dump feature -CONFIG_FEATURE_MEMDUMP_ENABLE := y - #Flag to enable/disable WLAN D0-WOW ifeq ($(CONFIG_PCI_MSM), y) ifeq ($(CONFIG_HIF_PCI), y) diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/hdd_dp_cfg.h b/drivers/staging/qcacld-3.0/core/hdd/inc/hdd_dp_cfg.h index bfef109fac5a..427aa0be1bb5 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/hdd_dp_cfg.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/hdd_dp_cfg.h @@ -951,30 +951,6 @@ CFG_INI_BOOL("enable_multicast_replay_filter", \ true, "Enable filtering of replayed multicast packets") -/* - * - * rx_wakelock_timeout - Amount of time to hold wakelock for RX unicast packets - * @Min: 0 - * @Max: 100 - * @Default: 50 - * - * This ini item configures the amount of time, in milliseconds, that the driver - * should prevent system power collapse after receiving an RX unicast packet. - * A conigured value of 0 disables the RX Wakelock feature completely. - * - * Related: None. - * - * Supported Feature: RX Wakelock - * - * Usage: Internal/External - * - * - */ -#define CFG_DP_RX_WAKELOCK_TIMEOUT \ - CFG_INI_UINT("rx_wakelock_timeout", \ - 0, 100, 50, CFG_VALUE_OR_DEFAULT, \ - "Amount of time to hold wakelock for RX unicast packets") - /* * * num_dp_rx_threads - Control to set the number of dp rx threads @@ -1229,7 +1205,6 @@ CFG(CFG_DP_CE_SERVICE_MAX_YIELD_TIME) \ CFG(CFG_DP_ENABLE_TCP_PARAM_UPDATE) \ CFG(CFG_DP_FILTER_MULTICAST_REPLAY) \ - CFG(CFG_DP_RX_WAKELOCK_TIMEOUT) \ CFG(CFG_DP_NUM_DP_RX_THREADS) \ CFG(CFG_DP_HTC_WMI_CREDIT_CNT) \ CFG_DP_ENABLE_FASTPATH_ALL \ diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h index eb844a5332d9..00658744e402 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h @@ -195,7 +195,6 @@ struct hdd_config { uint32_t rx_thread_affinity_mask; uint8_t cpu_map_list[CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST_LEN]; bool multicast_replay_filter; - uint32_t rx_wakelock_timeout; uint8_t num_dp_rx_threads; #ifdef CONFIG_DP_TRACE bool enable_dp_trace; diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h index 7fcfd7983ce2..879f664a229b 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h @@ -1782,7 +1782,6 @@ struct hdd_context { /** P2P Device MAC Address for the adapter */ struct qdf_mac_addr p2p_device_address; - qdf_wake_lock_t rx_wake_lock; qdf_wake_lock_t sap_wake_lock; /* Flag keeps track of wiphy suspend/resume */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c index c09ae27e213c..918c381afec2 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c @@ -5588,9 +5588,7 @@ int hdd_set_genie_to_csr(struct hdd_adapter *adapter, (security_ie[1] + 2))) hdd_err("Failed to set the crypto params from IE"); #endif - qdf_trace_hex_dump(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG, - roam_profile->pRSNReqIE, - roam_profile->nRSNReqIELength); + hdd_ctx = WLAN_HDD_GET_CTX(adapter); if (hdd_ctx->force_rsne_override && (security_ie[0] == DOT11F_EID_RSN)) { diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c index bf44251b7383..b402376c184e 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c @@ -212,30 +212,7 @@ static struct cdev wlan_hdd_state_cdev; static struct class *class; static dev_t device; #ifndef MODULE -static struct gwlan_loader *wlan_loader; -static ssize_t wlan_boot_cb(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count); -struct gwlan_loader { - bool loaded_state; - struct kobject *boot_wlan_obj; - struct attribute_group *attr_group; -}; - -static struct kobj_attribute wlan_boot_attribute = - __ATTR(boot_wlan, 0220, NULL, wlan_boot_cb); - -static struct attribute *attrs[] = { - &wlan_boot_attribute.attr, - NULL, -}; -#define MODULE_INITIALIZED 1 - -#ifdef MULTI_IF_NAME -#define WLAN_LOADER_NAME "boot_" MULTI_IF_NAME -#else -#define WLAN_LOADER_NAME "boot_wlan" -#endif +static struct work_struct boot_work; #endif /* the Android framework expects this param even though we don't use it */ @@ -8220,32 +8197,6 @@ static int hdd_init_netlink_services(struct hdd_context *hdd_ctx) return ret; } -/** - * hdd_rx_wake_lock_destroy() - Destroy RX wakelock - * @hdd_ctx: HDD context. - * - * Destroy RX wakelock. - * - * Return: None. - */ -static void hdd_rx_wake_lock_destroy(struct hdd_context *hdd_ctx) -{ - qdf_wake_lock_destroy(&hdd_ctx->rx_wake_lock); -} - -/** - * hdd_rx_wake_lock_create() - Create RX wakelock - * @hdd_ctx: HDD context. - * - * Create RX wakelock. - * - * Return: None. - */ -static void hdd_rx_wake_lock_create(struct hdd_context *hdd_ctx) -{ - qdf_wake_lock_create(&hdd_ctx->rx_wake_lock, "qcom_rx_wakelock"); -} - /** * hdd_context_deinit() - Deinitialize HDD context * @hdd_ctx: HDD context. @@ -8264,8 +8215,6 @@ static int hdd_context_deinit(struct hdd_context *hdd_ctx) hdd_sap_context_destroy(hdd_ctx); - hdd_rx_wake_lock_destroy(hdd_ctx); - hdd_scan_context_destroy(hdd_ctx); qdf_list_destroy(&hdd_ctx->hdd_adapters); @@ -10405,8 +10354,6 @@ static int hdd_context_init(struct hdd_context *hdd_ctx) if (ret) goto list_destroy; - hdd_rx_wake_lock_create(hdd_ctx); - ret = hdd_sap_context_init(hdd_ctx); if (ret) goto scan_destroy; @@ -10430,7 +10377,6 @@ static int hdd_context_init(struct hdd_context *hdd_ctx) scan_destroy: hdd_scan_context_destroy(hdd_ctx); - hdd_rx_wake_lock_destroy(hdd_ctx); list_destroy: qdf_list_destroy(&hdd_ctx->hdd_adapters); @@ -15311,6 +15257,7 @@ static int hdd_driver_load(void) return errno; } +#ifdef MODULE /** * hdd_driver_unload() - Performs the driver-level unload operation * @@ -15397,131 +15344,6 @@ static void hdd_driver_unload(void) hdd_qdf_deinit(); } - -#ifndef MODULE -/** - * wlan_boot_cb() - Wlan boot callback - * @kobj: object whose directory we're creating the link in. - * @attr: attribute the user is interacting with - * @buff: the buffer containing the user data - * @count: number of bytes in the buffer - * - * This callback is invoked when the fs is ready to start the - * wlan driver initialization. - * - * Return: 'count' on success or a negative error code in case of failure - */ -static ssize_t wlan_boot_cb(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, - size_t count) -{ - - if (wlan_loader->loaded_state) { - hdd_err("wlan driver already initialized"); - return -EALREADY; - } - - if (hdd_driver_load()) - return -EIO; - - wlan_loader->loaded_state = MODULE_INITIALIZED; - - return count; -} - -/** - * hdd_sysfs_cleanup() - cleanup sysfs - * - * Return: None - * - */ -static void hdd_sysfs_cleanup(void) -{ - /* remove from group */ - if (wlan_loader->boot_wlan_obj && wlan_loader->attr_group) - sysfs_remove_group(wlan_loader->boot_wlan_obj, - wlan_loader->attr_group); - - /* unlink the object from parent */ - kobject_del(wlan_loader->boot_wlan_obj); - - /* free the object */ - kobject_put(wlan_loader->boot_wlan_obj); - - kfree(wlan_loader->attr_group); - kfree(wlan_loader); - - wlan_loader = NULL; -} - -/** - * wlan_init_sysfs() - Creates the sysfs to be invoked when the fs is - * ready - * - * This is creates the syfs entry boot_wlan. Which shall be invoked - * when the filesystem is ready. - * - * QDF API cannot be used here since this function is called even before - * initializing WLAN driver. - * - * Return: 0 for success, errno on failure - */ -static int wlan_init_sysfs(void) -{ - int ret = -ENOMEM; - - wlan_loader = kzalloc(sizeof(*wlan_loader), GFP_KERNEL); - if (!wlan_loader) - return -ENOMEM; - - wlan_loader->boot_wlan_obj = NULL; - wlan_loader->attr_group = kzalloc(sizeof(*(wlan_loader->attr_group)), - GFP_KERNEL); - if (!wlan_loader->attr_group) - goto error_return; - - wlan_loader->loaded_state = 0; - wlan_loader->attr_group->attrs = attrs; - - wlan_loader->boot_wlan_obj = kobject_create_and_add(WLAN_LOADER_NAME, - kernel_kobj); - if (!wlan_loader->boot_wlan_obj) { - hdd_err("sysfs create and add failed"); - goto error_return; - } - - ret = sysfs_create_group(wlan_loader->boot_wlan_obj, - wlan_loader->attr_group); - if (ret) { - hdd_err("sysfs create group failed; errno:%d", ret); - goto error_return; - } - - return 0; - -error_return: - hdd_sysfs_cleanup(); - - return ret; -} - -/** - * wlan_deinit_sysfs() - Removes the sysfs created to initialize the wlan - * - * Return: 0 on success or errno on failure - */ -static int wlan_deinit_sysfs(void) -{ - if (!wlan_loader) { - hdd_err("wlan_loader is null"); - return -EINVAL; - } - - hdd_sysfs_cleanup(); - return 0; -} - #endif /* MODULE */ #ifdef MODULE @@ -15540,15 +15362,17 @@ static int hdd_module_init(void) return 0; } #else -static int __init hdd_module_init(void) +static void wlan_hdd_boot_fn(struct work_struct *work) { - int ret = -EINVAL; + hdd_driver_load(); +} - ret = wlan_init_sysfs(); - if (ret) - hdd_err("Failed to create sysfs entry"); +static int __init hdd_module_init(void) +{ + INIT_WORK(&boot_work, wlan_hdd_boot_fn); + schedule_work(&boot_work); - return ret; + return 0; } #endif @@ -15565,12 +15389,6 @@ static void __exit hdd_module_exit(void) { hdd_driver_unload(); } -#else -static void __exit hdd_module_exit(void) -{ - hdd_driver_unload(); - wlan_deinit_sysfs(); -} #endif static int fwpath_changed_handler(const char *kmessage, @@ -16952,8 +16770,12 @@ wlan_hdd_add_monitor_check(struct hdd_context *hdd_ctx, #endif /* WLAN_FEATURE_PKT_CAPTURE */ /* Register the module init/exit functions */ +#ifdef MODULE module_init(hdd_module_init); module_exit(hdd_module_exit); +#else +late_initcall(hdd_module_init); +#endif MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Qualcomm Atheros, Inc."); diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c index 0e3d78652336..53630c6eded5 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c @@ -950,19 +950,6 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf) skb->protocol = eth_type_trans(skb, skb->dev); - /* hold configurable wakelock for unicast traffic */ - if (!hdd_is_current_high_throughput(hdd_ctx) && - hdd_ctx->config->rx_wakelock_timeout && - skb->pkt_type != PACKET_BROADCAST && - skb->pkt_type != PACKET_MULTICAST) { - cds_host_diag_log_work(&hdd_ctx->rx_wake_lock, - hdd_ctx->config->rx_wakelock_timeout, - WIFI_POWER_EVENT_WAKELOCK_HOLD_RX); - qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock, - hdd_ctx->config-> - rx_wakelock_timeout); - } - /* Remove SKB from internal tracking table before submitting * it to stack */ diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c index 5823039e9e0d..5a6019dce409 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c @@ -1466,66 +1466,6 @@ static bool hdd_is_mcast_replay(struct sk_buff *skb) return false; } -/** - * hdd_is_arp_local() - check if local or non local arp - * @skb: pointer to sk_buff - * - * Return: true if local arp or false otherwise. - */ -static bool hdd_is_arp_local(struct sk_buff *skb) -{ - struct arphdr *arp; - struct in_ifaddr **ifap = NULL; - struct in_ifaddr *ifa = NULL; - struct in_device *in_dev; - unsigned char *arp_ptr; - __be32 tip; - - arp = (struct arphdr *)skb->data; - if (arp->ar_op == htons(ARPOP_REQUEST)) { - in_dev = __in_dev_get_rtnl(skb->dev); - if (in_dev) { - for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; - ifap = &ifa->ifa_next) { - if (!strcmp(skb->dev->name, ifa->ifa_label)) - break; - } - } - - if (ifa && ifa->ifa_local) { - arp_ptr = (unsigned char *)(arp + 1); - arp_ptr += (skb->dev->addr_len + 4 + - skb->dev->addr_len); - memcpy(&tip, arp_ptr, 4); - hdd_debug("ARP packet: local IP: %x dest IP: %x", - ifa->ifa_local, tip); - if (ifa->ifa_local == tip) - return true; - } - } - - return false; -} - -/** - * hdd_is_rx_wake_lock_needed() - check if wake lock is needed - * @skb: pointer to sk_buff - * - * RX wake lock is needed for: - * 1) Unicast data packet OR - * 2) Local ARP data packet - * - * Return: true if wake lock is needed or false otherwise. - */ -static bool hdd_is_rx_wake_lock_needed(struct sk_buff *skb) -{ - if ((skb->pkt_type != PACKET_BROADCAST && - skb->pkt_type != PACKET_MULTICAST) || hdd_is_arp_local(skb)) - return true; - - return false; -} - #ifdef RECEIVE_OFFLOAD /** * hdd_resolve_rx_ol_mode() - Resolve Rx offload method, LRO or GRO @@ -2066,7 +2006,6 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, struct hdd_station_ctx *sta_ctx = NULL; unsigned int cpu_index; struct qdf_mac_addr *mac_addr, *dest_mac_addr; - bool wake_lock = false; uint8_t pkt_type = 0; bool track_arp = false; struct wlan_objmgr_vdev *vdev; @@ -2176,21 +2115,6 @@ QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, continue; } - /* hold configurable wakelock for unicast traffic */ - if (!hdd_is_current_high_throughput(hdd_ctx) && - hdd_ctx->config->rx_wakelock_timeout && - sta_ctx->conn_info.is_authenticated) - wake_lock = hdd_is_rx_wake_lock_needed(skb); - - if (wake_lock) { - cds_host_diag_log_work(&hdd_ctx->rx_wake_lock, - hdd_ctx->config->rx_wakelock_timeout, - WIFI_POWER_EVENT_WAKELOCK_HOLD_RX); - qdf_wake_lock_timeout_acquire(&hdd_ctx->rx_wake_lock, - hdd_ctx->config-> - rx_wakelock_timeout); - } - /* Remove SKB from internal tracking table before submitting * it to stack */ @@ -3157,8 +3081,6 @@ void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc, hdd_set_rx_mode_value(hdd_ctx); config->multicast_replay_filter = cfg_get(psoc, CFG_DP_FILTER_MULTICAST_REPLAY); - config->rx_wakelock_timeout = - cfg_get(psoc, CFG_DP_RX_WAKELOCK_TIMEOUT); config->num_dp_rx_threads = cfg_get(psoc, CFG_DP_NUM_DP_RX_THREADS); config->cfg_wmi_credit_cnt = cfg_get(psoc, CFG_DP_HTC_WMI_CREDIT_CNT); hdd_dp_dp_trace_cfg_update(config, psoc); diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c index b63b2190de97..9b60209d6313 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -15431,7 +15430,7 @@ static void csr_set_mgmt_enc_type(struct csr_roam_profile *profile, tDot11fBeaconIEs *ies, struct join_req *csr_join_req) { - if (profile->MFPEnabled || profile->MFPCapable) + if (profile->MFPEnabled) csr_join_req->MgmtEncryptionType = profile->mgmt_encryption_type; else @@ -15441,8 +15440,6 @@ static void csr_set_mgmt_enc_type(struct csr_roam_profile *profile, !(profile->MFPRequired) && !csr_is_mfpc_capable(&ies->RSN)) csr_join_req->MgmtEncryptionType = eSIR_ED_NONE; - - sme_debug("Mgmt encryption type %x", csr_join_req->MgmtEncryptionType); } #else static inline void csr_set_mgmt_enc_type(struct csr_roam_profile *profile, diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c index f69721de8e27..8cc13a916b0b 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2011-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -4056,8 +4055,6 @@ uint8_t csr_construct_rsn_ie(struct mac_context *mac, uint32_t sessionId, pIesLocal->RSN.RSN_Cap[1] & rsn_ie.RSN_Cap[1]; } - sme_debug("RSN CAP: %x %x", pIesLocal->RSN.RSN_Cap[0], - pIesLocal->RSN.RSN_Cap[1]); } /* See if the cyphers in the Bss description match with the * settings in the profile. @@ -4152,7 +4149,8 @@ uint8_t csr_construct_rsn_ie(struct mac_context *mac, uint32_t sessionId, /* Advertise BIP in group cipher key management only if PMF is * enabled and AP is capable. */ - if ((RSNCapabilities.MFPCapable && pProfile->MFPCapable)) { + if (pProfile->MFPEnabled && + (RSNCapabilities.MFPCapable && pProfile->MFPCapable)) { pGroupMgmtCipherSuite = (uint8_t *) pPMK + sizeof(uint16_t) + (pPMK->cPMKIDs * PMKID_LEN); @@ -4178,7 +4176,8 @@ uint8_t csr_construct_rsn_ie(struct mac_context *mac, uint32_t sessionId, (pPMK->cPMKIDs * PMKID_LEN)); #ifdef WLAN_FEATURE_11W - if ((RSNCapabilities.MFPCapable && pProfile->MFPCapable)) { + if (pProfile->MFPEnabled && + (RSNCapabilities.MFPCapable && pProfile->MFPCapable)) { if (0 == pPMK->cPMKIDs) pRSNIe->IeHeader.Length += sizeof(uint16_t); pRSNIe->IeHeader.Length += CSR_WPA_OUI_SIZE; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 5897c38235fe..db9d7318ee77 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1723,8 +1723,7 @@ static int __init thermal_init(void) mutex_init(&poweroff_lock); thermal_passive_wq = alloc_workqueue("thermal_passive_wq", - WQ_HIGHPRI | WQ_UNBOUND - | WQ_FREEZABLE, + WQ_UNBOUND | WQ_FREEZABLE, THERMAL_MAX_ACTIVE); if (!thermal_passive_wq) { result = -ENOMEM; diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index b1a42d749803..926d6f5018e5 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -535,6 +535,7 @@ config USB_CONFIGFS_F_UVC depends on USB_CONFIGFS depends on VIDEO_V4L2 depends on VIDEO_DEV + select VIDEOBUF2_DMA_SG select VIDEOBUF2_VMALLOC select USB_F_UVC help diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile index f27f707088f5..d65314a09c44 100644 --- a/drivers/usb/gadget/function/Makefile +++ b/drivers/usb/gadget/function/Makefile @@ -41,8 +41,9 @@ usb_f_uac1_legacy-y := f_uac1_legacy.o u_uac1_legacy.o obj-$(CONFIG_USB_F_UAC1_LEGACY) += usb_f_uac1_legacy.o usb_f_uac2-y := f_uac2.o obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o -usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o -obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o +#usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o +#obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o +obj-$(CONFIG_USB_F_UVC) += uvc-new/ usb_f_midi-y := f_midi.o obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o usb_f_hid-y := f_hid.o diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index 78fb6c8f2822..96e8d3381907 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -55,8 +55,6 @@ ipc_log_string(_mtp_ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__) #endif -#define MTP_RX_BUFFER_INIT_SIZE 1048576 -#define MTP_TX_BUFFER_INIT_SIZE 1048576 #define MTP_BULK_BUFFER_SIZE 16384 #define INTR_BUFFER_SIZE 28 #define MAX_INST_NAME_LEN 40 @@ -93,10 +91,10 @@ #define MAX_ITERATION 100 -unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE; +unsigned int mtp_rx_req_len = MTP_BULK_BUFFER_SIZE; module_param(mtp_rx_req_len, uint, 0644); -unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE; +unsigned int mtp_tx_req_len = MTP_BULK_BUFFER_SIZE; module_param(mtp_tx_req_len, uint, 0644); unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX; @@ -530,6 +528,9 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev, dev->ep_intr = ep; retry_tx_alloc: + if (mtp_tx_req_len > MTP_BULK_BUFFER_SIZE) + mtp_tx_reqs = 4; + /* now allocate requests for our endpoints */ for (i = 0; i < mtp_tx_reqs; i++) { req = mtp_request_new(dev->ep_in, mtp_tx_req_len); @@ -752,8 +753,8 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf, break; } - if (count > mtp_tx_req_len) - xfer = mtp_tx_req_len; + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; else xfer = count; if (xfer && copy_from_user(req->buf, buf, xfer)) { @@ -852,8 +853,8 @@ static void send_file_work(struct work_struct *data) break; } - if (count > mtp_tx_req_len) - xfer = mtp_tx_req_len; + if (count > MTP_BULK_BUFFER_SIZE) + xfer = MTP_BULK_BUFFER_SIZE; else xfer = count; diff --git a/drivers/usb/gadget/function/uvc-new/Makefile b/drivers/usb/gadget/function/uvc-new/Makefile new file mode 100644 index 000000000000..f5608faffe68 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/Makefile @@ -0,0 +1,2 @@ +usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o +obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o diff --git a/drivers/usb/gadget/function/uvc-new/f_uvc.c b/drivers/usb/gadget/function/uvc-new/f_uvc.c new file mode 100644 index 000000000000..c7a12d6a3d60 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/f_uvc.c @@ -0,0 +1,1038 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * uvc_gadget.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uvc.h" +#include "uvc_configfs.h" +#include "uvc_v4l2.h" +#include "uvc_video.h" + +unsigned int uvc_gadget_trace_param; +module_param_named(trace, uvc_gadget_trace_param, uint, 0644); +MODULE_PARM_DESC(trace, "Trace level bitmask"); + +/* -------------------------------------------------------------------------- + * Function descriptors + */ + +/* string IDs are assigned dynamically */ + +static struct usb_string uvc_en_us_strings[] = { + /* [UVC_STRING_CONTROL_IDX].s = DYNAMIC, */ + [UVC_STRING_STREAMING_IDX].s = "Video Streaming", + { } +}; + +static struct usb_gadget_strings uvc_stringtab = { + .language = 0x0409, /* en-us */ + .strings = uvc_en_us_strings, +}; + +static struct usb_gadget_strings *uvc_function_strings[] = { + &uvc_stringtab, + NULL, +}; + +#define UVC_INTF_VIDEO_CONTROL 0 +#define UVC_INTF_VIDEO_STREAMING 1 + +#define UVC_STATUS_MAX_PACKET_SIZE 16 /* 16 bytes status */ + +static struct usb_interface_assoc_descriptor uvc_iad = { + .bLength = sizeof(uvc_iad), + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + .bFirstInterface = 0, + .bInterfaceCount = 2, + .bFunctionClass = USB_CLASS_VIDEO, + .bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION, + .bFunctionProtocol = 0x00, + .iFunction = 0, +}; + +static struct usb_interface_descriptor uvc_control_intf = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_CONTROL, + .bAlternateSetting = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOCONTROL, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_endpoint_descriptor uvc_control_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), + .bInterval = 8, +}; + +static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = { + .bLength = sizeof(uvc_ss_control_comp), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* The following 3 values can be tweaked if necessary. */ + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), +}; + +static struct uvc_control_endpoint_descriptor uvc_control_cs_ep = { + .bLength = UVC_DT_CONTROL_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubType = UVC_EP_INTERRUPT, + .wMaxTransferSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), +}; + +static struct usb_interface_descriptor uvc_streaming_intf_alt0 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_interface_descriptor uvc_streaming_intf_alt1 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING, + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_endpoint_descriptor uvc_fs_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* + * The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_endpoint_descriptor uvc_hs_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* + * The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_endpoint_descriptor uvc_ss_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* + * The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = { + .bLength = sizeof(uvc_ss_streaming_comp), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* + * The bMaxBurst, bmAttributes and wBytesPerInterval values will be + * initialized from module parameters. + */ +}; + +static const struct usb_descriptor_header * const uvc_fs_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_fs_streaming_ep, + NULL, +}; + +static const struct usb_descriptor_header * const uvc_hs_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_hs_streaming_ep, + NULL, +}; + +static const struct usb_descriptor_header * const uvc_ss_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_ss_streaming_ep, + (struct usb_descriptor_header *) &uvc_ss_streaming_comp, + NULL, +}; + +/* -------------------------------------------------------------------------- + * Control requests + */ + +static void +uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct uvc_device *uvc = req->context; + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + + if (uvc->event_setup_out) { + uvc->event_setup_out = 0; + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_DATA; + uvc_event->data.length = min_t(unsigned int, req->actual, + sizeof(uvc_event->data.data)); + memcpy(&uvc_event->data.data, req->buf, uvc_event->data.length); + v4l2_event_queue(&uvc->vdev, &v4l2_event); + } +} + +static int +uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct uvc_device *uvc = to_uvc(f); + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + unsigned int interface = le16_to_cpu(ctrl->wIndex) & 0xff; + struct usb_ctrlrequest *mctrl; + + if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) { + uvcg_info(f, "invalid request type\n"); + return -EINVAL; + } + + /* Stall too big requests. */ + if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE) + return -EINVAL; + + /* + * Tell the complete callback to generate an event for the next request + * that will be enqueued by UVCIOC_SEND_RESPONSE. + */ + uvc->event_setup_out = !(ctrl->bRequestType & USB_DIR_IN); + uvc->event_length = le16_to_cpu(ctrl->wLength); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_SETUP; + memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); + + /* check for the interface number, fixup the interface number in + * the ctrl request so the userspace doesn't have to bother with + * offset and configfs parsing + */ + mctrl = &uvc_event->req; + mctrl->wIndex &= ~cpu_to_le16(0xff); + if (interface == uvc->streaming_intf) + mctrl->wIndex = cpu_to_le16(UVC_STRING_STREAMING_IDX); + + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + return 0; +} + +void uvc_function_setup_continue(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + + usb_composite_setup_continue(cdev); +} + +static int +uvc_function_get_alt(struct usb_function *f, unsigned interface) +{ + struct uvc_device *uvc = to_uvc(f); + + uvcg_info(f, "%s(%u)\n", __func__, interface); + + if (interface == uvc->control_intf) + return 0; + else if (interface != uvc->streaming_intf) + return -EINVAL; + else + return uvc->video.ep->enabled ? 1 : 0; +} + +static int +uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) +{ + struct uvc_device *uvc = to_uvc(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + int ret; + + uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt); + + if (interface == uvc->control_intf) { + if (alt) + return -EINVAL; + + uvcg_info(f, "reset UVC Control\n"); + usb_ep_disable(uvc->control_ep); + + if (!uvc->control_ep->desc) + if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep)) + return -EINVAL; + + usb_ep_enable(uvc->control_ep); + + if (uvc->state == UVC_STATE_DISCONNECTED) { + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_CONNECT; + uvc_event->speed = cdev->gadget->speed; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_CONNECTED; + } + + return 0; + } + + if (interface != uvc->streaming_intf) + return -EINVAL; + + /* TODO + if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep)) + return alt ? -EINVAL : 0; + */ + + switch (alt) { + case 0: + if (uvc->state != UVC_STATE_STREAMING) + return 0; + + if (uvc->video.ep) + usb_ep_disable(uvc->video.ep); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_STREAMOFF; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_CONNECTED; + return 0; + + case 1: + if (uvc->state != UVC_STATE_CONNECTED) + return 0; + + if (!uvc->video.ep) + return -EINVAL; + + uvcg_info(f, "reset UVC\n"); + usb_ep_disable(uvc->video.ep); + + ret = config_ep_by_speed(f->config->cdev->gadget, + &(uvc->func), uvc->video.ep); + if (ret) + return ret; + usb_ep_enable(uvc->video.ep); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_STREAMON; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + return USB_GADGET_DELAYED_STATUS; + + default: + return -EINVAL; + } +} + +static void +uvc_function_disable(struct usb_function *f) +{ + struct uvc_device *uvc = to_uvc(f); + struct v4l2_event v4l2_event; + + uvcg_info(f, "%s()\n", __func__); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_DISCONNECT; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_DISCONNECTED; + + usb_ep_disable(uvc->video.ep); + usb_ep_disable(uvc->control_ep); +} + +/* -------------------------------------------------------------------------- + * Connection / disconnection + */ + +void +uvc_function_connect(struct uvc_device *uvc) +{ + int ret; + + if ((ret = usb_function_activate(&uvc->func)) < 0) + uvcg_info(&uvc->func, "UVC connect failed with %d\n", ret); +} + +void +uvc_function_disconnect(struct uvc_device *uvc) +{ + int ret; + + if ((ret = usb_function_deactivate(&uvc->func)) < 0) + uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret); +} + +/* -------------------------------------------------------------------------- + * USB probe and disconnect + */ + +static ssize_t function_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct uvc_device *uvc = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", uvc->func.fi->group.cg_item.ci_name); +} + +static DEVICE_ATTR_RO(function_name); + +static int +uvc_register_video(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + int ret; + + /* TODO reference counting. */ + memset(&uvc->vdev, 0, sizeof(uvc->vdev)); + uvc->vdev.v4l2_dev = &uvc->v4l2_dev; + uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev; + uvc->vdev.fops = &uvc_v4l2_fops; + uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops; + uvc->vdev.release = video_device_release_empty; + uvc->vdev.vfl_dir = VFL_DIR_TX; + uvc->vdev.lock = &uvc->video.mutex; + uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; + strscpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name)); + + video_set_drvdata(&uvc->vdev, uvc); + + ret = video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1); + if (ret < 0) + return ret; + + ret = device_create_file(&uvc->vdev.dev, &dev_attr_function_name); + if (ret < 0) { + video_unregister_device(&uvc->vdev); + return ret; + } + + return 0; +} + +#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \ + do { \ + memcpy(mem, desc, (desc)->bLength); \ + *(dst)++ = mem; \ + mem += (desc)->bLength; \ + } while (0); + +#define UVC_COPY_DESCRIPTORS(mem, dst, src) \ + do { \ + const struct usb_descriptor_header * const *__src; \ + for (__src = src; *__src; ++__src) { \ + memcpy(mem, *__src, (*__src)->bLength); \ + *dst++ = mem; \ + mem += (*__src)->bLength; \ + } \ + } while (0) + +static struct usb_descriptor_header ** +uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed) +{ + struct uvc_input_header_descriptor *uvc_streaming_header; + struct uvc_header_descriptor *uvc_control_header; + const struct uvc_descriptor_header * const *uvc_control_desc; + const struct uvc_descriptor_header * const *uvc_streaming_cls; + const struct usb_descriptor_header * const *uvc_streaming_std; + const struct usb_descriptor_header * const *src; + struct usb_descriptor_header **dst; + struct usb_descriptor_header **hdr; + unsigned int control_size; + unsigned int streaming_size; + unsigned int n_desc; + unsigned int bytes; + void *mem; + + switch (speed) { + case USB_SPEED_SUPER: + uvc_control_desc = uvc->desc.ss_control; + uvc_streaming_cls = uvc->desc.ss_streaming; + uvc_streaming_std = uvc_ss_streaming; + break; + + case USB_SPEED_HIGH: + uvc_control_desc = uvc->desc.fs_control; + uvc_streaming_cls = uvc->desc.hs_streaming; + uvc_streaming_std = uvc_hs_streaming; + break; + + case USB_SPEED_FULL: + default: + uvc_control_desc = uvc->desc.fs_control; + uvc_streaming_cls = uvc->desc.fs_streaming; + uvc_streaming_std = uvc_fs_streaming; + break; + } + + if (!uvc_control_desc || !uvc_streaming_cls) + return ERR_PTR(-ENODEV); + + /* + * Descriptors layout + * + * uvc_iad + * uvc_control_intf + * Class-specific UVC control descriptors + * uvc_control_ep + * uvc_control_cs_ep + * uvc_ss_control_comp (for SS only) + * uvc_streaming_intf_alt0 + * Class-specific UVC streaming descriptors + * uvc_{fs|hs}_streaming + */ + + /* Count descriptors and compute their size. */ + control_size = 0; + streaming_size = 0; + bytes = uvc_iad.bLength + uvc_control_intf.bLength + + uvc_control_ep.bLength + uvc_control_cs_ep.bLength + + uvc_streaming_intf_alt0.bLength; + + if (speed == USB_SPEED_SUPER) { + bytes += uvc_ss_control_comp.bLength; + n_desc = 6; + } else { + n_desc = 5; + } + + for (src = (const struct usb_descriptor_header **)uvc_control_desc; + *src; ++src) { + control_size += (*src)->bLength; + bytes += (*src)->bLength; + n_desc++; + } + for (src = (const struct usb_descriptor_header **)uvc_streaming_cls; + *src; ++src) { + streaming_size += (*src)->bLength; + bytes += (*src)->bLength; + n_desc++; + } + for (src = uvc_streaming_std; *src; ++src) { + bytes += (*src)->bLength; + n_desc++; + } + + mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL); + if (mem == NULL) + return NULL; + + hdr = mem; + dst = mem; + mem += (n_desc + 1) * sizeof(*src); + + /* Copy the descriptors. */ + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad); + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf); + + uvc_control_header = mem; + UVC_COPY_DESCRIPTORS(mem, dst, + (const struct usb_descriptor_header **)uvc_control_desc); + uvc_control_header->wTotalLength = cpu_to_le16(control_size); + uvc_control_header->bInCollection = 1; + uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf; + + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep); + if (speed == USB_SPEED_SUPER) + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_control_comp); + + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep); + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0); + + uvc_streaming_header = mem; + UVC_COPY_DESCRIPTORS(mem, dst, + (const struct usb_descriptor_header**)uvc_streaming_cls); + uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size); + uvc_streaming_header->bEndpointAddress = uvc->video.ep->address; + + UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std); + + *dst = NULL; + return hdr; +} + +static int +uvc_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct uvc_device *uvc = to_uvc(f); + struct usb_string *us; + unsigned int max_packet_mult; + unsigned int max_packet_size; + struct usb_ep *ep; + struct f_uvc_opts *opts; + int ret = -EINVAL; + + uvcg_info(f, "%s()\n", __func__); + + opts = fi_to_f_uvc_opts(f->fi); + /* Sanity check the streaming endpoint module parameters. */ + opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U); + opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); + opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); + + /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ + if (opts->streaming_maxburst && + (opts->streaming_maxpacket % 1024) != 0) { + opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); + uvcg_info(f, "overriding streaming_maxpacket to %d\n", + opts->streaming_maxpacket); + } + + /* + * Fill in the FS/HS/SS Video Streaming specific descriptors from the + * module parameters. + * + * NOTE: We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. + */ + if (opts->streaming_maxpacket <= 1024) { + max_packet_mult = 1; + max_packet_size = opts->streaming_maxpacket; + } else if (opts->streaming_maxpacket <= 2048) { + max_packet_mult = 2; + max_packet_size = opts->streaming_maxpacket / 2; + } else { + max_packet_mult = 3; + max_packet_size = opts->streaming_maxpacket / 3; + } + + uvc_fs_streaming_ep.wMaxPacketSize = + cpu_to_le16(min(opts->streaming_maxpacket, 1023U)); + uvc_fs_streaming_ep.bInterval = opts->streaming_interval; + + uvc_hs_streaming_ep.wMaxPacketSize = + cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); + + /* A high-bandwidth endpoint must specify a bInterval value of 1 */ + if (max_packet_mult > 1) + uvc_hs_streaming_ep.bInterval = 1; + else + uvc_hs_streaming_ep.bInterval = opts->streaming_interval; + + uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); + uvc_ss_streaming_ep.bInterval = opts->streaming_interval; + uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1; + uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; + uvc_ss_streaming_comp.wBytesPerInterval = + cpu_to_le16(max_packet_size * max_packet_mult * + (opts->streaming_maxburst + 1)); + + /* Allocate endpoints. */ + ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); + if (!ep) { + uvcg_info(f, "Unable to allocate control EP\n"); + goto error; + } + uvc->control_ep = ep; + + if (gadget_is_superspeed(c->cdev->gadget)) + ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep, + &uvc_ss_streaming_comp); + else if (gadget_is_dualspeed(cdev->gadget)) + ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep); + else + ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep); + + if (!ep) { + uvcg_info(f, "Unable to allocate streaming EP\n"); + goto error; + } + uvc->video.ep = ep; + + uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address; + uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address; + uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address; + + uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name; + us = usb_gstrings_attach(cdev, uvc_function_strings, + ARRAY_SIZE(uvc_en_us_strings)); + if (IS_ERR(us)) { + ret = PTR_ERR(us); + goto error; + } + uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id; + uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id; + ret = us[UVC_STRING_STREAMING_IDX].id; + uvc_streaming_intf_alt0.iInterface = ret; + uvc_streaming_intf_alt1.iInterface = ret; + + /* Allocate interface IDs. */ + if ((ret = usb_interface_id(c, f)) < 0) + goto error; + uvc_iad.bFirstInterface = ret; + uvc_control_intf.bInterfaceNumber = ret; + uvc->control_intf = ret; + opts->control_interface = ret; + + if ((ret = usb_interface_id(c, f)) < 0) + goto error; + uvc_streaming_intf_alt0.bInterfaceNumber = ret; + uvc_streaming_intf_alt1.bInterfaceNumber = ret; + uvc->streaming_intf = ret; + opts->streaming_interface = ret; + + /* Copy descriptors */ + f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL); + if (IS_ERR(f->fs_descriptors)) { + ret = PTR_ERR(f->fs_descriptors); + f->fs_descriptors = NULL; + goto error; + } + if (gadget_is_dualspeed(cdev->gadget)) { + f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH); + if (IS_ERR(f->hs_descriptors)) { + ret = PTR_ERR(f->hs_descriptors); + f->hs_descriptors = NULL; + goto error; + } + } + if (gadget_is_superspeed(c->cdev->gadget)) { + f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER); + if (IS_ERR(f->ss_descriptors)) { + ret = PTR_ERR(f->ss_descriptors); + f->ss_descriptors = NULL; + goto error; + } + } + + /* Preallocate control endpoint request. */ + uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); + uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL); + if (uvc->control_req == NULL || uvc->control_buf == NULL) { + ret = -ENOMEM; + goto error; + } + + uvc->control_req->buf = uvc->control_buf; + uvc->control_req->complete = uvc_function_ep0_complete; + uvc->control_req->context = uvc; + + if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) { + uvcg_err(f, "failed to register V4L2 device\n"); + goto error; + } + + /* Initialise video. */ + ret = uvcg_video_init(&uvc->video, uvc); + if (ret < 0) + goto v4l2_error; + + /* Register a V4L2 device. */ + ret = uvc_register_video(uvc); + if (ret < 0) { + uvcg_err(f, "failed to register video device\n"); + goto v4l2_error; + } + + return 0; + +v4l2_error: + v4l2_device_unregister(&uvc->v4l2_dev); +error: + if (uvc->control_req) + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); + + usb_free_all_descriptors(f); + return ret; +} + +/* -------------------------------------------------------------------------- + * USB gadget function + */ + +static void uvc_free_inst(struct usb_function_instance *f) +{ + struct f_uvc_opts *opts = fi_to_f_uvc_opts(f); + + mutex_destroy(&opts->lock); + kfree(opts); +} + +static struct usb_function_instance *uvc_alloc_inst(void) +{ + struct f_uvc_opts *opts; + struct uvc_camera_terminal_descriptor *cd; + struct uvc_processing_unit_descriptor *pd; + struct uvc_output_terminal_descriptor *od; + struct uvc_color_matching_descriptor *md; + struct uvc_descriptor_header **ctl_cls; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + opts->func_inst.free_func_inst = uvc_free_inst; + mutex_init(&opts->lock); + + cd = &opts->uvc_camera_terminal; + cd->bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3); + cd->bDescriptorType = USB_DT_CS_INTERFACE; + cd->bDescriptorSubType = UVC_VC_INPUT_TERMINAL; + cd->bTerminalID = 1; + cd->wTerminalType = cpu_to_le16(0x0201); + cd->bAssocTerminal = 0; + cd->iTerminal = 0; + cd->wObjectiveFocalLengthMin = cpu_to_le16(0); + cd->wObjectiveFocalLengthMax = cpu_to_le16(0); + cd->wOcularFocalLength = cpu_to_le16(0); + cd->bControlSize = 3; + cd->bmControls[0] = 2; + cd->bmControls[1] = 0; + cd->bmControls[2] = 0; + + pd = &opts->uvc_processing; + pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(2); + pd->bDescriptorType = USB_DT_CS_INTERFACE; + pd->bDescriptorSubType = UVC_VC_PROCESSING_UNIT; + pd->bUnitID = 2; + pd->bSourceID = 1; + pd->wMaxMultiplier = cpu_to_le16(16*1024); + pd->bControlSize = 2; + pd->bmControls[0] = 1; + pd->bmControls[1] = 0; + pd->iProcessing = 0; + pd->bmVideoStandards = 0; + + od = &opts->uvc_output_terminal; + od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE; + od->bDescriptorType = USB_DT_CS_INTERFACE; + od->bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL; + od->bTerminalID = 3; + od->wTerminalType = cpu_to_le16(0x0101); + od->bAssocTerminal = 0; + od->bSourceID = 2; + od->iTerminal = 0; + + md = &opts->uvc_color_matching; + md->bLength = UVC_DT_COLOR_MATCHING_SIZE; + md->bDescriptorType = USB_DT_CS_INTERFACE; + md->bDescriptorSubType = UVC_VS_COLORFORMAT; + md->bColorPrimaries = 1; + md->bTransferCharacteristics = 1; + md->bMatrixCoefficients = 4; + + /* Prepare fs control class descriptors for configfs-based gadgets */ + ctl_cls = opts->uvc_fs_control_cls; + ctl_cls[0] = NULL; /* assigned elsewhere by configfs */ + ctl_cls[1] = (struct uvc_descriptor_header *)cd; + ctl_cls[2] = (struct uvc_descriptor_header *)pd; + ctl_cls[3] = (struct uvc_descriptor_header *)od; + ctl_cls[4] = NULL; /* NULL-terminate */ + opts->fs_control = + (const struct uvc_descriptor_header * const *)ctl_cls; + + /* Prepare hs control class descriptors for configfs-based gadgets */ + ctl_cls = opts->uvc_ss_control_cls; + ctl_cls[0] = NULL; /* assigned elsewhere by configfs */ + ctl_cls[1] = (struct uvc_descriptor_header *)cd; + ctl_cls[2] = (struct uvc_descriptor_header *)pd; + ctl_cls[3] = (struct uvc_descriptor_header *)od; + ctl_cls[4] = NULL; /* NULL-terminate */ + opts->ss_control = + (const struct uvc_descriptor_header * const *)ctl_cls; + + opts->streaming_interval = 1; + opts->streaming_maxpacket = 1024; + snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera"); + + ret = uvcg_attach_configfs(opts); + if (ret < 0) { + kfree(opts); + return ERR_PTR(ret); + } + + return &opts->func_inst; +} + +static void uvc_free(struct usb_function *f) +{ + struct uvc_device *uvc = to_uvc(f); + struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts, + func_inst); + config_item_put(&uvc->header->item); + --opts->refcnt; + kfree(uvc); +} + +static void uvc_function_unbind(struct usb_configuration *c, + struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct uvc_device *uvc = to_uvc(f); + struct uvc_video *video = &uvc->video; + long wait_ret = 1; + + uvcg_info(f, "%s()\n", __func__); + + if (video->async_wq) + destroy_workqueue(video->async_wq); + + /* + * If we know we're connected via v4l2, then there should be a cleanup + * of the device from userspace either via UVC_EVENT_DISCONNECT or + * though the video device removal uevent. Allow some time for the + * application to close out before things get deleted. + */ + if (uvc->func_connected) { + uvcg_dbg(f, "waiting for clean disconnect\n"); + wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue, + uvc->func_connected == false, msecs_to_jiffies(500)); + uvcg_dbg(f, "done waiting with ret: %ld\n", wait_ret); + } + + device_remove_file(&uvc->vdev.dev, &dev_attr_function_name); + video_unregister_device(&uvc->vdev); + v4l2_device_unregister(&uvc->v4l2_dev); + + if (uvc->func_connected) { + /* + * Wait for the release to occur to ensure there are no longer any + * pending operations that may cause panics when resources are cleaned + * up. + */ + uvcg_warn(f, "%s no clean disconnect, wait for release\n", __func__); + wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue, + uvc->func_connected == false, msecs_to_jiffies(1000)); + uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret); + } + + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); + + usb_free_all_descriptors(f); +} + +static struct usb_function *uvc_alloc(struct usb_function_instance *fi) +{ + struct uvc_device *uvc; + struct f_uvc_opts *opts; + struct uvc_descriptor_header **strm_cls; + struct config_item *streaming, *header, *h; + + uvc = kzalloc(sizeof(*uvc), GFP_KERNEL); + if (uvc == NULL) + return ERR_PTR(-ENOMEM); + + mutex_init(&uvc->video.mutex); + uvc->state = UVC_STATE_DISCONNECTED; + init_waitqueue_head(&uvc->func_connected_queue); + opts = fi_to_f_uvc_opts(fi); + + mutex_lock(&opts->lock); + if (opts->uvc_fs_streaming_cls) { + strm_cls = opts->uvc_fs_streaming_cls; + opts->fs_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + if (opts->uvc_hs_streaming_cls) { + strm_cls = opts->uvc_hs_streaming_cls; + opts->hs_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + if (opts->uvc_ss_streaming_cls) { + strm_cls = opts->uvc_ss_streaming_cls; + opts->ss_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + + uvc->desc.fs_control = opts->fs_control; + uvc->desc.ss_control = opts->ss_control; + uvc->desc.fs_streaming = opts->fs_streaming; + uvc->desc.hs_streaming = opts->hs_streaming; + uvc->desc.ss_streaming = opts->ss_streaming; + + streaming = config_group_find_item(&opts->func_inst.group, "streaming"); + if (!streaming) + goto err_config; + + header = config_group_find_item(to_config_group(streaming), "header"); + config_item_put(streaming); + if (!header) + goto err_config; + + h = config_group_find_item(to_config_group(header), "h"); + config_item_put(header); + if (!h) + goto err_config; + + uvc->header = to_uvcg_streaming_header(h); + if (!uvc->header->linked) { + mutex_unlock(&opts->lock); + kfree(uvc); + return ERR_PTR(-EBUSY); + } + + ++opts->refcnt; + mutex_unlock(&opts->lock); + + /* Register the function. */ + uvc->func.name = "uvc"; + uvc->func.bind = uvc_function_bind; + uvc->func.unbind = uvc_function_unbind; + uvc->func.get_alt = uvc_function_get_alt; + uvc->func.set_alt = uvc_function_set_alt; + uvc->func.disable = uvc_function_disable; + uvc->func.setup = uvc_function_setup; + uvc->func.free_func = uvc_free; + uvc->func.bind_deactivated = true; + + return &uvc->func; + +err_config: + mutex_unlock(&opts->lock); + kfree(uvc); + return ERR_PTR(-ENOENT); +} + +DECLARE_USB_FUNCTION_INIT(uvc, uvc_alloc_inst, uvc_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Laurent Pinchart"); diff --git a/drivers/usb/gadget/function/uvc-new/f_uvc.h b/drivers/usb/gadget/function/uvc-new/f_uvc.h new file mode 100644 index 000000000000..1db972d4beeb --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/f_uvc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * f_uvc.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#ifndef _F_UVC_H_ +#define _F_UVC_H_ + +struct uvc_device; + +void uvc_function_setup_continue(struct uvc_device *uvc); + +void uvc_function_connect(struct uvc_device *uvc); + +void uvc_function_disconnect(struct uvc_device *uvc); + +#endif /* _F_UVC_H_ */ diff --git a/drivers/usb/gadget/function/uvc-new/u_uvc.h b/drivers/usb/gadget/function/uvc-new/u_uvc.h new file mode 100644 index 000000000000..24b8681b0d6f --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/u_uvc.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * u_uvc.h + * + * Utility definitions for the uvc function + * + * Copyright (c) 2013-2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz + */ + +#ifndef U_UVC_H +#define U_UVC_H + +#include +#include +#include + +#define fi_to_f_uvc_opts(f) container_of(f, struct f_uvc_opts, func_inst) + +struct f_uvc_opts { + struct usb_function_instance func_inst; + unsigned int streaming_interval; + unsigned int streaming_maxpacket; + unsigned int streaming_maxburst; + + unsigned int control_interface; + unsigned int streaming_interface; + char function_name[32]; + + /* + * Control descriptors array pointers for full-/high-speed and + * super-speed. They point by default to the uvc_fs_control_cls and + * uvc_ss_control_cls arrays respectively. Legacy gadgets must + * override them in their gadget bind callback. + */ + const struct uvc_descriptor_header * const *fs_control; + const struct uvc_descriptor_header * const *ss_control; + + /* + * Streaming descriptors array pointers for full-speed, high-speed and + * super-speed. They will point to the uvc_[fhs]s_streaming_cls arrays + * for configfs-based gadgets. Legacy gadgets must initialize them in + * their gadget bind callback. + */ + const struct uvc_descriptor_header * const *fs_streaming; + const struct uvc_descriptor_header * const *hs_streaming; + const struct uvc_descriptor_header * const *ss_streaming; + + /* Default control descriptors for configfs-based gadgets. */ + struct uvc_camera_terminal_descriptor uvc_camera_terminal; + struct uvc_processing_unit_descriptor uvc_processing; + struct uvc_output_terminal_descriptor uvc_output_terminal; + struct uvc_color_matching_descriptor uvc_color_matching; + + /* + * Control descriptors pointers arrays for full-/high-speed and + * super-speed. The first element is a configurable control header + * descriptor, the other elements point to the fixed default control + * descriptors. Used by configfs only, must not be touched by legacy + * gadgets. + */ + struct uvc_descriptor_header *uvc_fs_control_cls[5]; + struct uvc_descriptor_header *uvc_ss_control_cls[5]; + + /* + * Streaming descriptors for full-speed, high-speed and super-speed. + * Used by configfs only, must not be touched by legacy gadgets. The + * arrays are allocated at runtime as the number of descriptors isn't + * known in advance. + */ + struct uvc_descriptor_header **uvc_fs_streaming_cls; + struct uvc_descriptor_header **uvc_hs_streaming_cls; + struct uvc_descriptor_header **uvc_ss_streaming_cls; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This lock protects the descriptors from concurrent access by + * read/write and symlink creation/removal. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_UVC_H */ diff --git a/drivers/usb/gadget/function/uvc-new/uvc.h b/drivers/usb/gadget/function/uvc-new/uvc.h new file mode 100644 index 000000000000..40226b1f7e14 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * uvc_gadget.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#ifndef _UVC_GADGET_H_ +#define _UVC_GADGET_H_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "uvc_queue.h" + +struct usb_ep; +struct usb_request; +struct uvc_descriptor_header; +struct uvc_device; + +/* ------------------------------------------------------------------------ + * Debugging, printing and logging + */ + +#define UVC_TRACE_PROBE (1 << 0) +#define UVC_TRACE_DESCR (1 << 1) +#define UVC_TRACE_CONTROL (1 << 2) +#define UVC_TRACE_FORMAT (1 << 3) +#define UVC_TRACE_CAPTURE (1 << 4) +#define UVC_TRACE_CALLS (1 << 5) +#define UVC_TRACE_IOCTL (1 << 6) +#define UVC_TRACE_FRAME (1 << 7) +#define UVC_TRACE_SUSPEND (1 << 8) +#define UVC_TRACE_STATUS (1 << 9) + +#define UVC_WARN_MINMAX 0 +#define UVC_WARN_PROBE_DEF 1 + +extern unsigned int uvc_gadget_trace_param; + +#define uvc_trace(flag, msg...) \ + do { \ + if (uvc_gadget_trace_param & flag) \ + printk(KERN_DEBUG "uvcvideo: " msg); \ + } while (0) + +#define uvcg_dbg(f, fmt, args...) \ + dev_dbg(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args) +#define uvcg_info(f, fmt, args...) \ + dev_info(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args) +#define uvcg_warn(f, fmt, args...) \ + dev_warn(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args) +#define uvcg_err(f, fmt, args...) \ + dev_err(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args) + +/* ------------------------------------------------------------------------ + * Driver specific constants + */ + +#define UVC_MAX_REQUEST_SIZE 64 +#define UVC_MAX_EVENTS 4 + +#define UVCG_REQUEST_HEADER_LEN 12 + +/* ------------------------------------------------------------------------ + * Structures + */ +struct uvc_request { + struct usb_request *req; + u8 *req_buffer; + struct uvc_video *video; + struct sg_table sgt; + u8 header[UVCG_REQUEST_HEADER_LEN]; + struct uvc_buffer *last_buf; +}; + +struct uvc_video { + struct uvc_device *uvc; + struct usb_ep *ep; + + struct work_struct pump; + struct workqueue_struct *async_wq; + + /* Frame parameters */ + u8 bpp; + u32 fcc; + unsigned int width; + unsigned int height; + unsigned int imagesize; + struct mutex mutex; /* protects frame parameters */ + + unsigned int uvc_num_requests; + + /* Requests */ + unsigned int req_size; + struct uvc_request *ureq; + struct list_head req_free; + spinlock_t req_lock; + + unsigned int req_int_count; + + void (*encode) (struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf); + + /* Context data used by the completion handler */ + __u32 payload_size; + __u32 max_payload_size; + + struct uvc_video_queue queue; + unsigned int fid; +}; + +enum uvc_state { + UVC_STATE_DISCONNECTED, + UVC_STATE_CONNECTED, + UVC_STATE_STREAMING, +}; + +struct uvc_device { + struct video_device vdev; + struct v4l2_device v4l2_dev; + enum uvc_state state; + struct usb_function func; + struct uvc_video video; + bool func_connected; + wait_queue_head_t func_connected_queue; + + struct uvcg_streaming_header *header; + + /* Descriptors */ + struct { + const struct uvc_descriptor_header * const *fs_control; + const struct uvc_descriptor_header * const *ss_control; + const struct uvc_descriptor_header * const *fs_streaming; + const struct uvc_descriptor_header * const *hs_streaming; + const struct uvc_descriptor_header * const *ss_streaming; + } desc; + + unsigned int control_intf; + struct usb_ep *control_ep; + struct usb_request *control_req; + void *control_buf; + + unsigned int streaming_intf; + + /* Events */ + unsigned int event_length; + unsigned int event_setup_out : 1; +}; + +static inline struct uvc_device *to_uvc(struct usb_function *f) +{ + return container_of(f, struct uvc_device, func); +} + +struct uvc_file_handle { + struct v4l2_fh vfh; + struct uvc_video *device; + bool is_uvc_app_handle; +}; + +#define to_uvc_file_handle(handle) \ + container_of(handle, struct uvc_file_handle, vfh) + +/* ------------------------------------------------------------------------ + * Functions + */ + +extern void uvc_function_setup_continue(struct uvc_device *uvc); +extern void uvc_endpoint_stream(struct uvc_device *dev); + +extern void uvc_function_connect(struct uvc_device *uvc); +extern void uvc_function_disconnect(struct uvc_device *uvc); + +#endif /* _UVC_GADGET_H_ */ diff --git a/drivers/usb/gadget/function/uvc-new/uvc_configfs.c b/drivers/usb/gadget/function/uvc-new/uvc_configfs.c new file mode 100644 index 000000000000..c9228181925d --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_configfs.c @@ -0,0 +1,2495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * uvc_configfs.c + * + * Configfs support for the uvc function. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz + */ + +#include "uvc_configfs.h" + +#include + +/* ----------------------------------------------------------------------------- + * Global Utility Structures and Macros + */ + +#define UVC_ATTR(prefix, cname, aname) \ +static struct configfs_attribute prefix##attr_##cname = { \ + .ca_name = __stringify(aname), \ + .ca_mode = S_IRUGO | S_IWUGO, \ + .ca_owner = THIS_MODULE, \ + .show = prefix##cname##_show, \ + .store = prefix##cname##_store, \ +} + +#define UVC_ATTR_RO(prefix, cname, aname) \ +static struct configfs_attribute prefix##attr_##cname = { \ + .ca_name = __stringify(aname), \ + .ca_mode = S_IRUGO, \ + .ca_owner = THIS_MODULE, \ + .show = prefix##cname##_show, \ +} + +#define le8_to_cpu(x) (x) +#define cpu_to_le8(x) (x) + +static int uvcg_config_compare_u32(const void *l, const void *r) +{ + u32 li = *(const u32 *)l; + u32 ri = *(const u32 *)r; + + return li < ri ? -1 : li == ri ? 0 : 1; +} + +struct uvcg_config_group_type { + struct config_item_type type; + const char *name; + const struct uvcg_config_group_type **children; + int (*create_children)(struct config_group *group); +}; + +static void uvcg_config_item_release(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + kfree(group); +} + +static struct configfs_item_operations uvcg_config_item_ops = { + .release = uvcg_config_item_release, +}; + +static int uvcg_config_create_group(struct config_group *parent, + const struct uvcg_config_group_type *type); + +static int uvcg_config_create_children(struct config_group *group, + const struct uvcg_config_group_type *type) +{ + const struct uvcg_config_group_type **child; + int ret; + + if (type->create_children) + return type->create_children(group); + + for (child = type->children; child && *child; ++child) { + ret = uvcg_config_create_group(group, *child); + if (ret < 0) + return ret; + } + + return 0; +} + +static int uvcg_config_create_group(struct config_group *parent, + const struct uvcg_config_group_type *type) +{ + struct config_group *group; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return -ENOMEM; + + config_group_init_type_name(group, type->name, &type->type); + configfs_add_default_group(group, parent); + + return uvcg_config_create_children(group, type); +} + +static void uvcg_config_remove_children(struct config_group *group) +{ + struct config_group *child, *n; + + list_for_each_entry_safe(child, n, &group->default_groups, group_entry) { + list_del(&child->group_entry); + uvcg_config_remove_children(child); + config_item_put(&child->cg_item); + } +} + +/* ----------------------------------------------------------------------------- + * control/header/ + * control/header + */ + +#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \ +static ssize_t uvcg_control_header_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct uvcg_control_header *ch = to_uvcg_control_header(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = ch->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_control_header_##cname##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct uvcg_control_header *ch = to_uvcg_control_header(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\ + int ret; \ + u##bits num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = ch->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (ch->linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou##bits(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + ch->desc.aname = cpu_to_le##bits(num); \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +UVC_ATTR(uvcg_control_header_, cname, aname) + +UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, 16, 0xffff); + +UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, 32, 0x7fffffff); + +#undef UVCG_CTRL_HDR_ATTR + +static struct configfs_attribute *uvcg_control_header_attrs[] = { + &uvcg_control_header_attr_bcd_uvc, + &uvcg_control_header_attr_dw_clock_frequency, + NULL, +}; + +static const struct config_item_type uvcg_control_header_type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_control_header_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item *uvcg_control_header_make(struct config_group *group, + const char *name) +{ + struct uvcg_control_header *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_HEADER_SIZE(1); + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VC_HEADER; + h->desc.bcdUVC = cpu_to_le16(0x0100); + h->desc.dwClockFrequency = cpu_to_le32(48000000); + + config_item_init_type_name(&h->item, name, &uvcg_control_header_type); + + return &h->item; +} + +static struct configfs_group_operations uvcg_control_header_grp_ops = { + .make_item = uvcg_control_header_make, +}; + +static const struct uvcg_config_group_type uvcg_control_header_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_control_header_grp_ops, + .ct_owner = THIS_MODULE, + }, + .name = "header", +}; + +/* ----------------------------------------------------------------------------- + * control/processing/default + */ + +#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, bits) \ +static ssize_t uvcg_default_processing_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &group->cg_subsys->su_mutex; \ + struct uvc_processing_unit_descriptor *pd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + pd = &opts->uvc_processing; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_default_processing_, cname, aname) + +UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, 8); +UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, 8); +UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, 16); +UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, 8); + +#undef UVCG_DEFAULT_PROCESSING_ATTR + +static ssize_t uvcg_default_processing_bm_controls_show( + struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_processing_unit_descriptor *pd; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + pd = &opts->uvc_processing; + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < pd->bControlSize; ++i) { + result += sprintf(pg, "%u\n", pd->bmControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +UVC_ATTR_RO(uvcg_default_processing_, bm_controls, bmControls); + +static struct configfs_attribute *uvcg_default_processing_attrs[] = { + &uvcg_default_processing_attr_b_unit_id, + &uvcg_default_processing_attr_b_source_id, + &uvcg_default_processing_attr_w_max_multiplier, + &uvcg_default_processing_attr_bm_controls, + &uvcg_default_processing_attr_i_processing, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_default_processing_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_processing_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "default", +}; + +/* ----------------------------------------------------------------------------- + * control/processing + */ + +static const struct uvcg_config_group_type uvcg_processing_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "processing", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_default_processing_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * control/terminal/camera/default + */ + +#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, bits) \ +static ssize_t uvcg_default_camera_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &group->cg_subsys->su_mutex; \ + struct uvc_camera_terminal_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> \ + ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_camera_terminal; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_default_camera_, cname, aname) + +UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, 8); +UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, 16); +UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, 8); +UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, 8); +UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin, + 16); +UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax, + 16); +UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength, + 16); + +#undef UVCG_DEFAULT_CAMERA_ATTR + +static ssize_t uvcg_default_camera_bm_controls_show( + struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_camera_terminal_descriptor *cd; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> + ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_camera_terminal; + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < cd->bControlSize; ++i) { + result += sprintf(pg, "%u\n", cd->bmControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +UVC_ATTR_RO(uvcg_default_camera_, bm_controls, bmControls); + +static struct configfs_attribute *uvcg_default_camera_attrs[] = { + &uvcg_default_camera_attr_b_terminal_id, + &uvcg_default_camera_attr_w_terminal_type, + &uvcg_default_camera_attr_b_assoc_terminal, + &uvcg_default_camera_attr_i_terminal, + &uvcg_default_camera_attr_w_objective_focal_length_min, + &uvcg_default_camera_attr_w_objective_focal_length_max, + &uvcg_default_camera_attr_w_ocular_focal_length, + &uvcg_default_camera_attr_bm_controls, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_default_camera_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_camera_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "default", +}; + +/* ----------------------------------------------------------------------------- + * control/terminal/camera + */ + +static const struct uvcg_config_group_type uvcg_camera_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "camera", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_default_camera_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * control/terminal/output/default + */ + +#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, bits) \ +static ssize_t uvcg_default_output_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &group->cg_subsys->su_mutex; \ + struct uvc_output_terminal_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = group->cg_item.ci_parent->ci_parent-> \ + ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_output_terminal; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_default_output_, cname, aname) + +UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8); +UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16); +UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8); +UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8); + +#undef UVCG_DEFAULT_OUTPUT_ATTR + +static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item, + char *page) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID)); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + u8 num; + + result = kstrtou8(page, 0, &num); + if (result) + return result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + cd->bSourceID = num; + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return len; +} +UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID); + +static struct configfs_attribute *uvcg_default_output_attrs[] = { + &uvcg_default_output_attr_b_terminal_id, + &uvcg_default_output_attr_w_terminal_type, + &uvcg_default_output_attr_b_assoc_terminal, + &uvcg_default_output_attr_b_source_id, + &uvcg_default_output_attr_i_terminal, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_default_output_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_output_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "default", +}; + +/* ----------------------------------------------------------------------------- + * control/terminal/output + */ + +static const struct uvcg_config_group_type uvcg_output_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "output", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_default_output_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * control/terminal + */ + +static const struct uvcg_config_group_type uvcg_terminal_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "terminal", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_camera_grp_type, + &uvcg_output_grp_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * control/class/{fs|ss} + */ + +struct uvcg_control_class_group { + struct config_group group; + const char *name; +}; + +static inline struct uvc_descriptor_header +**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o) +{ + struct uvcg_control_class_group *group = + container_of(i, struct uvcg_control_class_group, + group.cg_item); + + if (!strcmp(group->name, "fs")) + return o->uvc_fs_control_cls; + + if (!strcmp(group->name, "ss")) + return o->uvc_ss_control_cls; + + return NULL; +} + +static int uvcg_control_class_allow_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *control, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header **class_array; + struct uvcg_control_header *target_hdr; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + control = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(control), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(control->ci_parent); + + mutex_lock(&opts->lock); + + class_array = uvcg_get_ctl_class_arr(src, opts); + if (!class_array) + goto unlock; + if (opts->refcnt || class_array[0]) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_control_header(target); + ++target_hdr->linked; + class_array[0] = (struct uvc_descriptor_header *)&target_hdr->desc; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + config_item_put(header); + mutex_unlock(su_mutex); + return ret; +} + +static void uvcg_control_class_drop_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *control, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header **class_array; + struct uvcg_control_header *target_hdr; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + control = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(control), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(control->ci_parent); + + mutex_lock(&opts->lock); + + class_array = uvcg_get_ctl_class_arr(src, opts); + if (!class_array || opts->refcnt) + goto unlock; + + target_hdr = to_uvcg_control_header(target); + --target_hdr->linked; + class_array[0] = NULL; + +unlock: + mutex_unlock(&opts->lock); +out: + config_item_put(header); + mutex_unlock(su_mutex); +} + +static struct configfs_item_operations uvcg_control_class_item_ops = { + .release = uvcg_config_item_release, + .allow_link = uvcg_control_class_allow_link, + .drop_link = uvcg_control_class_drop_link, +}; + +static const struct config_item_type uvcg_control_class_type = { + .ct_item_ops = &uvcg_control_class_item_ops, + .ct_owner = THIS_MODULE, +}; + +/* ----------------------------------------------------------------------------- + * control/class + */ + +static int uvcg_control_class_create_children(struct config_group *parent) +{ + static const char * const names[] = { "fs", "ss" }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(names); ++i) { + struct uvcg_control_class_group *group; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return -ENOMEM; + + group->name = names[i]; + + config_group_init_type_name(&group->group, group->name, + &uvcg_control_class_type); + configfs_add_default_group(&group->group, parent); + } + + return 0; +} + +static const struct uvcg_config_group_type uvcg_control_class_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "class", + .create_children = uvcg_control_class_create_children, +}; + +/* ----------------------------------------------------------------------------- + * control + */ + +static ssize_t uvcg_default_control_b_interface_number_show( + struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + int result = 0; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = item->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + result += sprintf(page, "%u\n", opts->control_interface); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber); + +static struct configfs_attribute *uvcg_default_control_attrs[] = { + &uvcg_default_control_attr_b_interface_number, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_control_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_control_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "control", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_control_header_grp_type, + &uvcg_processing_grp_type, + &uvcg_terminal_grp_type, + &uvcg_control_class_grp_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * streaming/uncompressed + * streaming/mjpeg + */ + +static const char * const uvcg_format_names[] = { + "uncompressed", + "mjpeg", +}; + +static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &f->group.cg_subsys->su_mutex; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = f->group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + result = sprintf(pg, "0x"); + pg += result; + for (i = 0; i < UVCG_STREAMING_CONTROL_SIZE; ++i) { + result += sprintf(pg, "%x\n", f->bmaControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +static ssize_t uvcg_format_bma_controls_store(struct uvcg_format *ch, + const char *page, size_t len) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->group.cg_subsys->su_mutex; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + if (ch->linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + if (len < 4 || *page != '0' || + (*(page + 1) != 'x' && *(page + 1) != 'X')) + goto end; + ret = hex2bin(ch->bmaControls, page + 2, 1); + if (ret < 0) + goto end; + ret = len; +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +/* ----------------------------------------------------------------------------- + * streaming/header/ + * streaming/header + */ + +static void uvcg_format_set_indices(struct config_group *fmt); + +static int uvcg_streaming_header_allow_link(struct config_item *src, + struct config_item *target) +{ + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + struct uvcg_streaming_header *src_hdr; + struct uvcg_format *target_fmt = NULL; + struct uvcg_format_ptr *format_ptr; + int i, ret = -EINVAL; + + src_hdr = to_uvcg_streaming_header(src); + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = src->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + + if (src_hdr->linked) { + ret = -EBUSY; + goto out; + } + + /* + * Linking is only allowed to direct children of the format nodes + * (streaming/uncompressed or streaming/mjpeg nodes). First check that + * the grand-parent of the target matches the grand-parent of the source + * (the streaming node), and then verify that the target parent is a + * format node. + */ + if (src->ci_parent->ci_parent != target->ci_parent->ci_parent) + goto out; + + for (i = 0; i < ARRAY_SIZE(uvcg_format_names); ++i) { + if (!strcmp(target->ci_parent->ci_name, uvcg_format_names[i])) + break; + } + + if (i == ARRAY_SIZE(uvcg_format_names)) + goto out; + + target_fmt = container_of(to_config_group(target), struct uvcg_format, + group); + if (!target_fmt) + goto out; + + uvcg_format_set_indices(to_config_group(target)); + + format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL); + if (!format_ptr) { + ret = -ENOMEM; + goto out; + } + ret = 0; + format_ptr->fmt = target_fmt; + list_add_tail(&format_ptr->entry, &src_hdr->formats); + ++src_hdr->num_fmt; + ++target_fmt->linked; + +out: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +static void uvcg_streaming_header_drop_link(struct config_item *src, + struct config_item *target) +{ + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + struct uvcg_streaming_header *src_hdr; + struct uvcg_format *target_fmt = NULL; + struct uvcg_format_ptr *format_ptr, *tmp; + + src_hdr = to_uvcg_streaming_header(src); + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = src->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + target_fmt = container_of(to_config_group(target), struct uvcg_format, + group); + if (!target_fmt) + goto out; + + list_for_each_entry_safe(format_ptr, tmp, &src_hdr->formats, entry) + if (format_ptr->fmt == target_fmt) { + list_del(&format_ptr->entry); + kfree(format_ptr); + --src_hdr->num_fmt; + break; + } + + --target_fmt->linked; + +out: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); +} + +static struct configfs_item_operations uvcg_streaming_header_item_ops = { + .release = uvcg_config_item_release, + .allow_link = uvcg_streaming_header_allow_link, + .drop_link = uvcg_streaming_header_drop_link, +}; + +#define UVCG_STREAMING_HEADER_ATTR(cname, aname, bits) \ +static ssize_t uvcg_streaming_header_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &sh->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = sh->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_streaming_header_, cname, aname) + +UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, 8); +UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, 8); +UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, 8); +UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, 8); +UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, 8); + +#undef UVCG_STREAMING_HEADER_ATTR + +static struct configfs_attribute *uvcg_streaming_header_attrs[] = { + &uvcg_streaming_header_attr_bm_info, + &uvcg_streaming_header_attr_b_terminal_link, + &uvcg_streaming_header_attr_b_still_capture_method, + &uvcg_streaming_header_attr_b_trigger_support, + &uvcg_streaming_header_attr_b_trigger_usage, + NULL, +}; + +static const struct config_item_type uvcg_streaming_header_type = { + .ct_item_ops = &uvcg_streaming_header_item_ops, + .ct_attrs = uvcg_streaming_header_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item +*uvcg_streaming_header_make(struct config_group *group, const char *name) +{ + struct uvcg_streaming_header *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&h->formats); + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_INPUT_HEADER; + h->desc.bTerminalLink = 3; + h->desc.bControlSize = UVCG_STREAMING_CONTROL_SIZE; + + config_item_init_type_name(&h->item, name, &uvcg_streaming_header_type); + + return &h->item; +} + +static struct configfs_group_operations uvcg_streaming_header_grp_ops = { + .make_item = uvcg_streaming_header_make, +}; + +static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_streaming_header_grp_ops, + .ct_owner = THIS_MODULE, + }, + .name = "header", +}; + +/* ----------------------------------------------------------------------------- + * streaming/// + */ + +#define UVCG_FRAME_ATTR(cname, aname, bits) \ +static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\ +{ \ + struct uvcg_frame *f = to_uvcg_frame(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", f->frame.cname); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \ + const char *page, size_t len)\ +{ \ + struct uvcg_frame *f = to_uvcg_frame(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct uvcg_format *fmt; \ + struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\ + typeof(f->frame.cname) num; \ + int ret; \ + \ + ret = kstrtou##bits(page, 0, &num); \ + if (ret) \ + return ret; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + fmt = to_uvcg_format(f->item.ci_parent); \ + \ + mutex_lock(&opts->lock); \ + if (fmt->linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + f->frame.cname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +UVC_ATTR(uvcg_frame_, cname, aname); + +static ssize_t uvcg_frame_b_frame_index_show(struct config_item *item, + char *page) +{ + struct uvcg_frame *f = to_uvcg_frame(item); + struct uvcg_format *fmt; + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct config_item *fmt_item; + struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex; + int result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + fmt_item = f->item.ci_parent; + fmt = to_uvcg_format(fmt_item); + + if (!fmt->linked) { + result = -EBUSY; + goto out; + } + + opts_item = fmt_item->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", f->frame.b_frame_index); + mutex_unlock(&opts->lock); + +out: + mutex_unlock(su_mutex); + return result; +} + +UVC_ATTR_RO(uvcg_frame_, b_frame_index, bFrameIndex); + +UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, 8); +UVCG_FRAME_ATTR(w_width, wWidth, 16); +UVCG_FRAME_ATTR(w_height, wHeight, 16); +UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32); +UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32); +UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32); +UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32); + +#undef UVCG_FRAME_ATTR + +static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item, + char *page) +{ + struct uvcg_frame *frm = to_uvcg_frame(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &frm->item.ci_group->cg_subsys->su_mutex; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = frm->item.ci_parent->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) { + result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +static inline int __uvcg_count_frm_intrv(char *buf, void *priv) +{ + ++*((int *)priv); + return 0; +} + +static inline int __uvcg_fill_frm_intrv(char *buf, void *priv) +{ + u32 num, **interv; + int ret; + + ret = kstrtou32(buf, 0, &num); + if (ret) + return ret; + + interv = priv; + **interv = num; + ++*interv; + + return 0; +} + +static int __uvcg_iter_frm_intrv(const char *page, size_t len, + int (*fun)(char *, void *), void *priv) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(u32) * 8 + 1 + 1]; + const char *pg = page; + int i, ret; + + if (!fun) + return -EINVAL; + + while (pg - page < len) { + i = 0; + while (i < sizeof(buf) && (pg - page < len) && + *pg != '\0' && *pg != '\n') + buf[i++] = *pg++; + if (i == sizeof(buf)) + return -EINVAL; + while ((pg - page < len) && (*pg == '\0' || *pg == '\n')) + ++pg; + buf[i] = '\0'; + ret = fun(buf, priv); + if (ret) + return ret; + } + + return 0; +} + +static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item, + const char *page, size_t len) +{ + struct uvcg_frame *ch = to_uvcg_frame(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct uvcg_format *fmt; + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex; + int ret = 0, n = 0; + u32 *frm_intrv, *tmp; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->item.ci_parent->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + fmt = to_uvcg_format(ch->item.ci_parent); + + mutex_lock(&opts->lock); + if (fmt->linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n); + if (ret) + goto end; + + tmp = frm_intrv = kcalloc(n, sizeof(u32), GFP_KERNEL); + if (!frm_intrv) { + ret = -ENOMEM; + goto end; + } + + ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp); + if (ret) { + kfree(frm_intrv); + goto end; + } + + kfree(ch->dw_frame_interval); + ch->dw_frame_interval = frm_intrv; + ch->frame.b_frame_interval_type = n; + sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval), + uvcg_config_compare_u32, NULL); + ret = len; + +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval); + +static struct configfs_attribute *uvcg_frame_attrs[] = { + &uvcg_frame_attr_b_frame_index, + &uvcg_frame_attr_bm_capabilities, + &uvcg_frame_attr_w_width, + &uvcg_frame_attr_w_height, + &uvcg_frame_attr_dw_min_bit_rate, + &uvcg_frame_attr_dw_max_bit_rate, + &uvcg_frame_attr_dw_max_video_frame_buffer_size, + &uvcg_frame_attr_dw_default_frame_interval, + &uvcg_frame_attr_dw_frame_interval, + NULL, +}; + +static const struct config_item_type uvcg_frame_type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_frame_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item *uvcg_frame_make(struct config_group *group, + const char *name) +{ + struct uvcg_frame *h; + struct uvcg_format *fmt; + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct uvcg_frame_ptr *frame_ptr; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->frame.b_descriptor_type = USB_DT_CS_INTERFACE; + h->frame.b_frame_index = 1; + h->frame.w_width = 640; + h->frame.w_height = 360; + h->frame.dw_min_bit_rate = 18432000; + h->frame.dw_max_bit_rate = 55296000; + h->frame.dw_max_video_frame_buffer_size = 460800; + h->frame.dw_default_frame_interval = 666666; + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + fmt = to_uvcg_format(&group->cg_item); + if (fmt->type == UVCG_UNCOMPRESSED) { + h->frame.b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED; + h->fmt_type = UVCG_UNCOMPRESSED; + } else if (fmt->type == UVCG_MJPEG) { + h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG; + h->fmt_type = UVCG_MJPEG; + } else { + mutex_unlock(&opts->lock); + kfree(h); + return ERR_PTR(-EINVAL); + } + + frame_ptr = kzalloc(sizeof(*frame_ptr), GFP_KERNEL); + if (!frame_ptr) { + mutex_unlock(&opts->lock); + kfree(h); + return ERR_PTR(-ENOMEM); + } + + frame_ptr->frm = h; + list_add_tail(&frame_ptr->entry, &fmt->frames); + ++fmt->num_frames; + mutex_unlock(&opts->lock); + + config_item_init_type_name(&h->item, name, &uvcg_frame_type); + + return &h->item; +} + +static void uvcg_frame_drop(struct config_group *group, struct config_item *item) +{ + struct uvcg_format *fmt; + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct uvcg_frame *target_frm = NULL; + struct uvcg_frame_ptr *frame_ptr, *tmp; + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + target_frm = container_of(item, struct uvcg_frame, item); + fmt = to_uvcg_format(&group->cg_item); + + list_for_each_entry_safe(frame_ptr, tmp, &fmt->frames, entry) + if (frame_ptr->frm == target_frm) { + list_del(&frame_ptr->entry); + kfree(frame_ptr); + --fmt->num_frames; + break; + } + mutex_unlock(&opts->lock); + + config_item_put(item); +} + +static void uvcg_format_set_indices(struct config_group *fmt) +{ + struct config_item *ci; + unsigned int i = 1; + + list_for_each_entry(ci, &fmt->cg_children, ci_entry) { + struct uvcg_frame *frm; + + if (ci->ci_type != &uvcg_frame_type) + continue; + + frm = to_uvcg_frame(ci); + frm->frame.b_frame_index = i++; + } +} + +/* ----------------------------------------------------------------------------- + * streaming/uncompressed/ + */ + +static struct configfs_group_operations uvcg_uncompressed_group_ops = { + .make_item = uvcg_frame_make, + .drop_item = uvcg_frame_drop, +}; + +static ssize_t uvcg_uncompressed_guid_format_show(struct config_item *item, + char *page) +{ + struct uvcg_uncompressed *ch = to_uvcg_uncompressed(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat)); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return sizeof(ch->desc.guidFormat); +} + +static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item, + const char *page, size_t len) +{ + struct uvcg_uncompressed *ch = to_uvcg_uncompressed(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; + int ret; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + if (ch->fmt.linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + memcpy(ch->desc.guidFormat, page, + min(sizeof(ch->desc.guidFormat), len)); + ret = sizeof(ch->desc.guidFormat); + +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +UVC_ATTR(uvcg_uncompressed_, guid_format, guidFormat); + +#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, bits) \ +static ssize_t uvcg_uncompressed_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_uncompressed_, cname, aname); + +#define UVCG_UNCOMPRESSED_ATTR(cname, aname, bits) \ +static ssize_t uvcg_uncompressed_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_uncompressed_##cname##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct uvcg_uncompressed *u = to_uvcg_uncompressed(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int ret; \ + u8 num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (u->fmt.linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou8(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + /* index values in uvc are never 0 */ \ + if (!num) { \ + ret = -EINVAL; \ + goto end; \ + } \ + \ + u->desc.aname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +UVC_ATTR(uvcg_uncompressed_, cname, aname); + +UVCG_UNCOMPRESSED_ATTR_RO(b_format_index, bFormatIndex, 8); +UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8); +UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8); +UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8); +UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8); +UVCG_UNCOMPRESSED_ATTR_RO(bm_interlace_flags, bmInterfaceFlags, 8); + +#undef UVCG_UNCOMPRESSED_ATTR +#undef UVCG_UNCOMPRESSED_ATTR_RO + +static inline ssize_t +uvcg_uncompressed_bma_controls_show(struct config_item *item, char *page) +{ + struct uvcg_uncompressed *unc = to_uvcg_uncompressed(item); + return uvcg_format_bma_controls_show(&unc->fmt, page); +} + +static inline ssize_t +uvcg_uncompressed_bma_controls_store(struct config_item *item, + const char *page, size_t len) +{ + struct uvcg_uncompressed *unc = to_uvcg_uncompressed(item); + return uvcg_format_bma_controls_store(&unc->fmt, page, len); +} + +UVC_ATTR(uvcg_uncompressed_, bma_controls, bmaControls); + +static struct configfs_attribute *uvcg_uncompressed_attrs[] = { + &uvcg_uncompressed_attr_b_format_index, + &uvcg_uncompressed_attr_guid_format, + &uvcg_uncompressed_attr_b_bits_per_pixel, + &uvcg_uncompressed_attr_b_default_frame_index, + &uvcg_uncompressed_attr_b_aspect_ratio_x, + &uvcg_uncompressed_attr_b_aspect_ratio_y, + &uvcg_uncompressed_attr_bm_interlace_flags, + &uvcg_uncompressed_attr_bma_controls, + NULL, +}; + +static const struct config_item_type uvcg_uncompressed_type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_uncompressed_group_ops, + .ct_attrs = uvcg_uncompressed_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_uncompressed_make(struct config_group *group, + const char *name) +{ + static char guid[] = { + 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 + }; + struct uvcg_uncompressed *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE; + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED; + memcpy(h->desc.guidFormat, guid, sizeof(guid)); + h->desc.bBitsPerPixel = 16; + h->desc.bDefaultFrameIndex = 1; + h->desc.bAspectRatioX = 0; + h->desc.bAspectRatioY = 0; + h->desc.bmInterfaceFlags = 0; + h->desc.bCopyProtect = 0; + + INIT_LIST_HEAD(&h->fmt.frames); + h->fmt.type = UVCG_UNCOMPRESSED; + config_group_init_type_name(&h->fmt.group, name, + &uvcg_uncompressed_type); + + return &h->fmt.group; +} + +static struct configfs_group_operations uvcg_uncompressed_grp_ops = { + .make_group = uvcg_uncompressed_make, +}; + +static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_uncompressed_grp_ops, + .ct_owner = THIS_MODULE, + }, + .name = "uncompressed", +}; + +/* ----------------------------------------------------------------------------- + * streaming/mjpeg/ + */ + +static struct configfs_group_operations uvcg_mjpeg_group_ops = { + .make_item = uvcg_frame_make, + .drop_item = uvcg_frame_drop, +}; + +#define UVCG_MJPEG_ATTR_RO(cname, aname, bits) \ +static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\ +{ \ + struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_mjpeg_, cname, aname) + +#define UVCG_MJPEG_ATTR(cname, aname, bits) \ +static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\ +{ \ + struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_mjpeg_##cname##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int ret; \ + u8 num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (u->fmt.linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou8(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + /* index values in uvc are never 0 */ \ + if (!num) { \ + ret = -EINVAL; \ + goto end; \ + } \ + \ + u->desc.aname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +UVC_ATTR(uvcg_mjpeg_, cname, aname) + +UVCG_MJPEG_ATTR_RO(b_format_index, bFormatIndex, 8); +UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8); +UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8); +UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8); +UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8); +UVCG_MJPEG_ATTR_RO(bm_interlace_flags, bmInterfaceFlags, 8); + +#undef UVCG_MJPEG_ATTR +#undef UVCG_MJPEG_ATTR_RO + +static inline ssize_t +uvcg_mjpeg_bma_controls_show(struct config_item *item, char *page) +{ + struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); + return uvcg_format_bma_controls_show(&u->fmt, page); +} + +static inline ssize_t +uvcg_mjpeg_bma_controls_store(struct config_item *item, + const char *page, size_t len) +{ + struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); + return uvcg_format_bma_controls_store(&u->fmt, page, len); +} + +UVC_ATTR(uvcg_mjpeg_, bma_controls, bmaControls); + +static struct configfs_attribute *uvcg_mjpeg_attrs[] = { + &uvcg_mjpeg_attr_b_format_index, + &uvcg_mjpeg_attr_b_default_frame_index, + &uvcg_mjpeg_attr_bm_flags, + &uvcg_mjpeg_attr_b_aspect_ratio_x, + &uvcg_mjpeg_attr_b_aspect_ratio_y, + &uvcg_mjpeg_attr_bm_interlace_flags, + &uvcg_mjpeg_attr_bma_controls, + NULL, +}; + +static const struct config_item_type uvcg_mjpeg_type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_mjpeg_group_ops, + .ct_attrs = uvcg_mjpeg_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_mjpeg_make(struct config_group *group, + const char *name) +{ + struct uvcg_mjpeg *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_FORMAT_MJPEG_SIZE; + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_FORMAT_MJPEG; + h->desc.bDefaultFrameIndex = 1; + h->desc.bAspectRatioX = 0; + h->desc.bAspectRatioY = 0; + h->desc.bmInterfaceFlags = 0; + h->desc.bCopyProtect = 0; + + INIT_LIST_HEAD(&h->fmt.frames); + h->fmt.type = UVCG_MJPEG; + config_group_init_type_name(&h->fmt.group, name, + &uvcg_mjpeg_type); + + return &h->fmt.group; +} + +static struct configfs_group_operations uvcg_mjpeg_grp_ops = { + .make_group = uvcg_mjpeg_make, +}; + +static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_group_ops = &uvcg_mjpeg_grp_ops, + .ct_owner = THIS_MODULE, + }, + .name = "mjpeg", +}; + +/* ----------------------------------------------------------------------------- + * streaming/color_matching/default + */ + +#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \ +static ssize_t uvcg_default_color_matching_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &group->cg_subsys->su_mutex; \ + struct uvc_color_matching_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_color_matching; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname) + +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8); +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics, + bTransferCharacteristics, 8); +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8); + +#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR + +static struct configfs_attribute *uvcg_default_color_matching_attrs[] = { + &uvcg_default_color_matching_attr_b_color_primaries, + &uvcg_default_color_matching_attr_b_transfer_characteristics, + &uvcg_default_color_matching_attr_b_matrix_coefficients, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_default_color_matching_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_color_matching_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "default", +}; + +/* ----------------------------------------------------------------------------- + * streaming/color_matching + */ + +static const struct uvcg_config_group_type uvcg_color_matching_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "color_matching", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_default_color_matching_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * streaming/class/{fs|hs|ss} + */ + +struct uvcg_streaming_class_group { + struct config_group group; + const char *name; +}; + +static inline struct uvc_descriptor_header +***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o) +{ + struct uvcg_streaming_class_group *group = + container_of(i, struct uvcg_streaming_class_group, + group.cg_item); + + if (!strcmp(group->name, "fs")) + return &o->uvc_fs_streaming_cls; + + if (!strcmp(group->name, "hs")) + return &o->uvc_hs_streaming_cls; + + if (!strcmp(group->name, "ss")) + return &o->uvc_ss_streaming_cls; + + return NULL; +} + +enum uvcg_strm_type { + UVCG_HEADER = 0, + UVCG_FORMAT, + UVCG_FRAME +}; + +/* + * Iterate over a hierarchy of streaming descriptors' config items. + * The items are created by the user with configfs. + * + * It "processes" the header pointed to by @priv1, then for each format + * that follows the header "processes" the format itself and then for + * each frame inside a format "processes" the frame. + * + * As a "processing" function the @fun is used. + * + * __uvcg_iter_strm_cls() is used in two context: first, to calculate + * the amount of memory needed for an array of streaming descriptors + * and second, to actually fill the array. + * + * @h: streaming header pointer + * @priv2: an "inout" parameter (the caller might want to see the changes to it) + * @priv3: an "inout" parameter (the caller might want to see the changes to it) + * @fun: callback function for processing each level of the hierarchy + */ +static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h, + void *priv2, void *priv3, + int (*fun)(void *, void *, void *, int, enum uvcg_strm_type type)) +{ + struct uvcg_format_ptr *f; + struct config_group *grp; + struct config_item *item; + struct uvcg_frame *frm; + int ret, i, j; + + if (!fun) + return -EINVAL; + + i = j = 0; + ret = fun(h, priv2, priv3, 0, UVCG_HEADER); + if (ret) + return ret; + list_for_each_entry(f, &h->formats, entry) { + ret = fun(f->fmt, priv2, priv3, i++, UVCG_FORMAT); + if (ret) + return ret; + grp = &f->fmt->group; + list_for_each_entry(item, &grp->cg_children, ci_entry) { + frm = to_uvcg_frame(item); + ret = fun(frm, priv2, priv3, j++, UVCG_FRAME); + if (ret) + return ret; + } + } + + return ret; +} + +/* + * Count how many bytes are needed for an array of streaming descriptors. + * + * @priv1: pointer to a header, format or frame + * @priv2: inout parameter, accumulated size of the array + * @priv3: inout parameter, accumulated number of the array elements + * @n: unused, this function's prototype must match @fun in __uvcg_iter_strm_cls + */ +static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n, + enum uvcg_strm_type type) +{ + size_t *size = priv2; + size_t *count = priv3; + + switch (type) { + case UVCG_HEADER: { + struct uvcg_streaming_header *h = priv1; + + *size += sizeof(h->desc); + /* bmaControls */ + *size += h->num_fmt * UVCG_STREAMING_CONTROL_SIZE; + } + break; + case UVCG_FORMAT: { + struct uvcg_format *fmt = priv1; + + if (fmt->type == UVCG_UNCOMPRESSED) { + struct uvcg_uncompressed *u = + container_of(fmt, struct uvcg_uncompressed, + fmt); + + *size += sizeof(u->desc); + } else if (fmt->type == UVCG_MJPEG) { + struct uvcg_mjpeg *m = + container_of(fmt, struct uvcg_mjpeg, fmt); + + *size += sizeof(m->desc); + } else { + return -EINVAL; + } + } + break; + case UVCG_FRAME: { + struct uvcg_frame *frm = priv1; + int sz = sizeof(frm->dw_frame_interval); + + *size += sizeof(frm->frame); + *size += frm->frame.b_frame_interval_type * sz; + } + break; + } + + ++*count; + + return 0; +} + +/* + * Fill an array of streaming descriptors. + * + * @priv1: pointer to a header, format or frame + * @priv2: inout parameter, pointer into a block of memory + * @priv3: inout parameter, pointer to a 2-dimensional array + */ +static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n, + enum uvcg_strm_type type) +{ + void **dest = priv2; + struct uvc_descriptor_header ***array = priv3; + size_t sz; + + **array = *dest; + ++*array; + + switch (type) { + case UVCG_HEADER: { + struct uvc_input_header_descriptor *ihdr = *dest; + struct uvcg_streaming_header *h = priv1; + struct uvcg_format_ptr *f; + + memcpy(*dest, &h->desc, sizeof(h->desc)); + *dest += sizeof(h->desc); + sz = UVCG_STREAMING_CONTROL_SIZE; + list_for_each_entry(f, &h->formats, entry) { + memcpy(*dest, f->fmt->bmaControls, sz); + *dest += sz; + } + ihdr->bLength = sizeof(h->desc) + h->num_fmt * sz; + ihdr->bNumFormats = h->num_fmt; + } + break; + case UVCG_FORMAT: { + struct uvcg_format *fmt = priv1; + + if (fmt->type == UVCG_UNCOMPRESSED) { + struct uvcg_uncompressed *u = + container_of(fmt, struct uvcg_uncompressed, + fmt); + + u->desc.bFormatIndex = n + 1; + u->desc.bNumFrameDescriptors = fmt->num_frames; + memcpy(*dest, &u->desc, sizeof(u->desc)); + *dest += sizeof(u->desc); + } else if (fmt->type == UVCG_MJPEG) { + struct uvcg_mjpeg *m = + container_of(fmt, struct uvcg_mjpeg, fmt); + + m->desc.bFormatIndex = n + 1; + m->desc.bNumFrameDescriptors = fmt->num_frames; + memcpy(*dest, &m->desc, sizeof(m->desc)); + *dest += sizeof(m->desc); + } else { + return -EINVAL; + } + } + break; + case UVCG_FRAME: { + struct uvcg_frame *frm = priv1; + struct uvc_descriptor_header *h = *dest; + + sz = sizeof(frm->frame); + memcpy(*dest, &frm->frame, sz); + *dest += sz; + sz = frm->frame.b_frame_interval_type * + sizeof(*frm->dw_frame_interval); + memcpy(*dest, frm->dw_frame_interval, sz); + *dest += sz; + if (frm->fmt_type == UVCG_UNCOMPRESSED) + h->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE( + frm->frame.b_frame_interval_type); + else if (frm->fmt_type == UVCG_MJPEG) + h->bLength = UVC_DT_FRAME_MJPEG_SIZE( + frm->frame.b_frame_interval_type); + } + break; + } + + return 0; +} + +static int uvcg_streaming_class_allow_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *streaming, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header ***class_array, **cl_arr; + struct uvcg_streaming_header *target_hdr; + void *data, *data_save; + size_t size = 0, count = 0; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + streaming = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(streaming), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(streaming->ci_parent); + + mutex_lock(&opts->lock); + + class_array = __uvcg_get_stream_class_arr(src, opts); + if (!class_array || *class_array || opts->refcnt) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_streaming_header(target); + ret = __uvcg_iter_strm_cls(target_hdr, &size, &count, __uvcg_cnt_strm); + if (ret) + goto unlock; + + count += 2; /* color_matching, NULL */ + *class_array = kcalloc(count, sizeof(void *), GFP_KERNEL); + if (!*class_array) { + ret = -ENOMEM; + goto unlock; + } + + data = data_save = kzalloc(size, GFP_KERNEL); + if (!data) { + kfree(*class_array); + *class_array = NULL; + ret = -ENOMEM; + goto unlock; + } + cl_arr = *class_array; + ret = __uvcg_iter_strm_cls(target_hdr, &data, &cl_arr, + __uvcg_fill_strm); + if (ret) { + kfree(*class_array); + *class_array = NULL; + /* + * __uvcg_fill_strm() called from __uvcg_iter_stream_cls() + * might have advanced the "data", so use a backup copy + */ + kfree(data_save); + goto unlock; + } + *cl_arr = (struct uvc_descriptor_header *)&opts->uvc_color_matching; + + ++target_hdr->linked; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + config_item_put(header); + mutex_unlock(su_mutex); + return ret; +} + +static void uvcg_streaming_class_drop_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *streaming, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header ***class_array; + struct uvcg_streaming_header *target_hdr; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + streaming = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(streaming), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(streaming->ci_parent); + + mutex_lock(&opts->lock); + + class_array = __uvcg_get_stream_class_arr(src, opts); + if (!class_array || !*class_array) + goto unlock; + + if (opts->refcnt) + goto unlock; + + target_hdr = to_uvcg_streaming_header(target); + --target_hdr->linked; + kfree(**class_array); + kfree(*class_array); + *class_array = NULL; + +unlock: + mutex_unlock(&opts->lock); +out: + config_item_put(header); + mutex_unlock(su_mutex); +} + +static struct configfs_item_operations uvcg_streaming_class_item_ops = { + .release = uvcg_config_item_release, + .allow_link = uvcg_streaming_class_allow_link, + .drop_link = uvcg_streaming_class_drop_link, +}; + +static const struct config_item_type uvcg_streaming_class_type = { + .ct_item_ops = &uvcg_streaming_class_item_ops, + .ct_owner = THIS_MODULE, +}; + +/* ----------------------------------------------------------------------------- + * streaming/class + */ + +static int uvcg_streaming_class_create_children(struct config_group *parent) +{ + static const char * const names[] = { "fs", "hs", "ss" }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(names); ++i) { + struct uvcg_streaming_class_group *group; + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return -ENOMEM; + + group->name = names[i]; + + config_group_init_type_name(&group->group, group->name, + &uvcg_streaming_class_type); + configfs_add_default_group(&group->group, parent); + } + + return 0; +} + +static const struct uvcg_config_group_type uvcg_streaming_class_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_owner = THIS_MODULE, + }, + .name = "class", + .create_children = uvcg_streaming_class_create_children, +}; + +/* ----------------------------------------------------------------------------- + * streaming + */ + +static ssize_t uvcg_default_streaming_b_interface_number_show( + struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + int result = 0; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = item->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + result += sprintf(page, "%u\n", opts->streaming_interface); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +UVC_ATTR_RO(uvcg_default_streaming_, b_interface_number, bInterfaceNumber); + +static struct configfs_attribute *uvcg_default_streaming_attrs[] = { + &uvcg_default_streaming_attr_b_interface_number, + NULL, +}; + +static const struct uvcg_config_group_type uvcg_streaming_grp_type = { + .type = { + .ct_item_ops = &uvcg_config_item_ops, + .ct_attrs = uvcg_default_streaming_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "streaming", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_streaming_header_grp_type, + &uvcg_uncompressed_grp_type, + &uvcg_mjpeg_grp_type, + &uvcg_color_matching_grp_type, + &uvcg_streaming_class_grp_type, + NULL, + }, +}; + +/* ----------------------------------------------------------------------------- + * UVC function + */ + +static void uvc_func_item_release(struct config_item *item) +{ + struct f_uvc_opts *opts = to_f_uvc_opts(item); + + uvcg_config_remove_children(to_config_group(item)); + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations uvc_func_item_ops = { + .release = uvc_func_item_release, +}; + +#define UVCG_OPTS_ATTR(cname, aname, limit) \ +static ssize_t f_uvc_opts_##cname##_show( \ + struct config_item *item, char *page) \ +{ \ + struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", opts->cname); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t \ +f_uvc_opts_##cname##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + unsigned int num; \ + int ret; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtouint(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + opts->cname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +UVC_ATTR(f_uvc_opts_, cname, cname) + +UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16); +UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072); +UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15); + +#undef UVCG_OPTS_ATTR + +#define UVCG_OPTS_STRING_ATTR(cname, aname) \ +static ssize_t f_uvc_opts_string_##cname##_show(struct config_item *item,\ + char *page) \ +{ \ + struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = snprintf(page, sizeof(opts->aname), "%s", opts->aname);\ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_uvc_opts_string_##cname##_store(struct config_item *item,\ + const char *page, size_t len) \ +{ \ + struct f_uvc_opts *opts = to_f_uvc_opts(item); \ + int size = min(sizeof(opts->aname), len + 1); \ + int ret = 0; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = strscpy(opts->aname, page, size); \ + if (ret == -E2BIG) \ + ret = size - 1; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +UVC_ATTR(f_uvc_opts_string_, cname, aname) + +UVCG_OPTS_STRING_ATTR(function_name, function_name); + +#undef UVCG_OPTS_STRING_ATTR + +static struct configfs_attribute *uvc_attrs[] = { + &f_uvc_opts_attr_streaming_interval, + &f_uvc_opts_attr_streaming_maxpacket, + &f_uvc_opts_attr_streaming_maxburst, + &f_uvc_opts_string_attr_function_name, + NULL, +}; + +static const struct uvcg_config_group_type uvc_func_type = { + .type = { + .ct_item_ops = &uvc_func_item_ops, + .ct_attrs = uvc_attrs, + .ct_owner = THIS_MODULE, + }, + .name = "", + .children = (const struct uvcg_config_group_type*[]) { + &uvcg_control_grp_type, + &uvcg_streaming_grp_type, + NULL, + }, +}; + +int uvcg_attach_configfs(struct f_uvc_opts *opts) +{ + int ret; + + config_group_init_type_name(&opts->func_inst.group, uvc_func_type.name, + &uvc_func_type.type); + + ret = uvcg_config_create_children(&opts->func_inst.group, + &uvc_func_type); + if (ret < 0) + config_group_put(&opts->func_inst.group); + + return ret; +} diff --git a/drivers/usb/gadget/function/uvc-new/uvc_configfs.h b/drivers/usb/gadget/function/uvc-new/uvc_configfs.h new file mode 100644 index 000000000000..ad2ec8c4c78c --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_configfs.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * uvc_configfs.h + * + * Configfs support for the uvc function. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz + */ +#ifndef UVC_CONFIGFS_H +#define UVC_CONFIGFS_H + +#include + +#include "u_uvc.h" + +static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_uvc_opts, + func_inst.group); +} + +#define UVCG_STREAMING_CONTROL_SIZE 1 + +DECLARE_UVC_HEADER_DESCRIPTOR(1); + +struct uvcg_control_header { + struct config_item item; + struct UVC_HEADER_DESCRIPTOR(1) desc; + unsigned linked; +}; + +static inline struct uvcg_control_header *to_uvcg_control_header(struct config_item *item) +{ + return container_of(item, struct uvcg_control_header, item); +} + +enum uvcg_format_type { + UVCG_UNCOMPRESSED = 0, + UVCG_MJPEG, +}; + +struct uvcg_format { + struct config_group group; + enum uvcg_format_type type; + unsigned linked; + struct list_head frames; + unsigned num_frames; + __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE]; +}; + +struct uvcg_format_ptr { + struct uvcg_format *fmt; + struct list_head entry; +}; + +static inline struct uvcg_format *to_uvcg_format(struct config_item *item) +{ + return container_of(to_config_group(item), struct uvcg_format, group); +} + +struct uvcg_streaming_header { + struct config_item item; + struct uvc_input_header_descriptor desc; + unsigned linked; + struct list_head formats; + unsigned num_fmt; +}; + +static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item) +{ + return container_of(item, struct uvcg_streaming_header, item); +} + +struct uvcg_frame_ptr { + struct uvcg_frame *frm; + struct list_head entry; +}; + +struct uvcg_frame { + struct config_item item; + enum uvcg_format_type fmt_type; + struct { + u8 b_length; + u8 b_descriptor_type; + u8 b_descriptor_subtype; + u8 b_frame_index; + u8 bm_capabilities; + u16 w_width; + u16 w_height; + u32 dw_min_bit_rate; + u32 dw_max_bit_rate; + u32 dw_max_video_frame_buffer_size; + u32 dw_default_frame_interval; + u8 b_frame_interval_type; + } __attribute__((packed)) frame; + u32 *dw_frame_interval; +}; + +static inline struct uvcg_frame *to_uvcg_frame(struct config_item *item) +{ + return container_of(item, struct uvcg_frame, item); +} + +/* ----------------------------------------------------------------------------- + * streaming/uncompressed/ + */ + +struct uvcg_uncompressed { + struct uvcg_format fmt; + struct uvc_format_uncompressed desc; +}; + +static inline struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item) +{ + return container_of(to_uvcg_format(item), struct uvcg_uncompressed, fmt); +} + +/* ----------------------------------------------------------------------------- + * streaming/mjpeg/ + */ + +struct uvcg_mjpeg { + struct uvcg_format fmt; + struct uvc_format_mjpeg desc; +}; + +static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item) +{ + return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt); +} + +int uvcg_attach_configfs(struct f_uvc_opts *opts); + +#endif /* UVC_CONFIGFS_H */ diff --git a/drivers/usb/gadget/function/uvc-new/uvc_queue.c b/drivers/usb/gadget/function/uvc-new/uvc_queue.c new file mode 100644 index 000000000000..debe9c2727e4 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_queue.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * uvc_queue.c -- USB Video Class driver - Buffers management + * + * Copyright (C) 2005-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "uvc.h" + +/* ------------------------------------------------------------------------ + * Video buffers queue management. + * + * Video queues is initialized by uvcg_queue_init(). The function performs + * basic initialization of the uvc_video_queue struct and never fails. + * + * Video buffers are managed by videobuf2. The driver uses a mutex to protect + * the videobuf2 queue operations by serializing calls to videobuf2 and a + * spinlock to protect the IRQ queue that holds the buffers to be processed by + * the driver. + */ + +/* ----------------------------------------------------------------------------- + * videobuf2 queue operations + */ + +static int uvc_queue_setup(struct vb2_queue *vq, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vq); + struct uvc_video *video = container_of(queue, struct uvc_video, queue); + unsigned int req_size; + unsigned int nreq; + + if (*nbuffers > UVC_MAX_VIDEO_BUFFERS) + *nbuffers = UVC_MAX_VIDEO_BUFFERS; + + *nplanes = 1; + + sizes[0] = video->imagesize; + + req_size = video->ep->maxpacket + * max_t(unsigned int, video->ep->maxburst, 1) + * (video->ep->mult); + + /* We divide by two, to increase the chance to run + * into fewer requests for smaller framesizes. + */ + nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size); + nreq = clamp(nreq, 4U, 64U); + video->uvc_num_requests = nreq; + + return 0; +} + +static int uvc_buffer_prepare(struct vb2_buffer *vb) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); + + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && + vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { + uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); + return -EINVAL; + } + + if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) + return -ENODEV; + + buf->state = UVC_BUF_STATE_QUEUED; + if (queue->use_sg) { + buf->sgt = vb2_dma_sg_plane_desc(vb, 0); + buf->sg = buf->sgt->sgl; + } else { + buf->mem = vb2_plane_vaddr(vb, 0); + } + buf->length = vb2_plane_size(vb, 0); + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + buf->bytesused = 0; + else + buf->bytesused = vb2_get_plane_payload(vb, 0); + + return 0; +} + +static void uvc_buffer_queue(struct vb2_buffer *vb) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); + unsigned long flags; + + spin_lock_irqsave(&queue->irqlock, flags); + + if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { + list_add_tail(&buf->queue, &queue->irqqueue); + } else { + /* + * If the device is disconnected return the buffer to userspace + * directly. The next QBUF call will fail with -ENODEV. + */ + buf->state = UVC_BUF_STATE_ERROR; + vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); + } + + spin_unlock_irqrestore(&queue->irqlock, flags); +} + +static const struct vb2_ops uvc_queue_qops = { + .queue_setup = uvc_queue_setup, + .buf_prepare = uvc_buffer_prepare, + .buf_queue = uvc_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type, + struct mutex *lock) +{ + struct uvc_video *video = container_of(queue, struct uvc_video, queue); + struct usb_composite_dev *cdev = video->uvc->func.config->cdev; + int ret; + + queue->queue.type = type; + queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + queue->queue.drv_priv = queue; + queue->queue.buf_struct_size = sizeof(struct uvc_buffer); + queue->queue.ops = &uvc_queue_qops; + queue->queue.lock = lock; + if (cdev->gadget->sg_supported) { + pr_warn("%s: cdev->gadget->sg_supported == true, but force disabled\n", __func__); + /*queue->queue.mem_ops = &vb2_dma_sg_memops; + queue->use_sg = 1;*/ + } + queue->queue.mem_ops = &vb2_vmalloc_memops; + + queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY + | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; + queue->queue.dev = dev; + + ret = vb2_queue_init(&queue->queue); + if (ret) + return ret; + + spin_lock_init(&queue->irqlock); + INIT_LIST_HEAD(&queue->irqqueue); + queue->flags = 0; + + return 0; +} + +/* + * Free the video buffers. + */ +void uvcg_free_buffers(struct uvc_video_queue *queue) +{ + vb2_queue_release(&queue->queue); +} + +/* + * Allocate the video buffers. + */ +int uvcg_alloc_buffers(struct uvc_video_queue *queue, + struct v4l2_requestbuffers *rb) +{ + int ret; + + ret = vb2_reqbufs(&queue->queue, rb); + + return ret ? ret : rb->count; +} + +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) +{ + return vb2_querybuf(&queue->queue, buf); +} + +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) +{ + return vb2_qbuf(&queue->queue, buf); +} + +/* + * Dequeue a video buffer. If nonblocking is false, block until a buffer is + * available. + */ +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, + int nonblocking) +{ + return vb2_dqbuf(&queue->queue, buf, nonblocking); +} + +/* + * Poll the video queue. + * + * This function implements video queue polling and is intended to be used by + * the device poll handler. + */ +__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, + poll_table *wait) +{ + return vb2_poll(&queue->queue, file, wait); +} + +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) +{ + return vb2_mmap(&queue->queue, vma); +} + +#ifndef CONFIG_MMU +/* + * Get unmapped area. + * + * NO-MMU arch need this function to make mmap() work correctly. + */ +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff) +{ + return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); +} +#endif + +/* + * Cancel the video buffers queue. + * + * Cancelling the queue marks all buffers on the irq queue as erroneous, + * wakes them up and removes them from the queue. + * + * If the disconnect parameter is set, further calls to uvc_queue_buffer will + * fail with -ENODEV. + * + * This function acquires the irq spinlock and can be called from interrupt + * context. + */ +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) +{ + struct uvc_buffer *buf; + unsigned long flags; + + spin_lock_irqsave(&queue->irqlock, flags); + while (!list_empty(&queue->irqqueue)) { + buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, + queue); + list_del(&buf->queue); + buf->state = UVC_BUF_STATE_ERROR; + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); + } + queue->buf_used = 0; + + /* + * This must be protected by the irqlock spinlock to avoid race + * conditions between uvc_queue_buffer and the disconnection event that + * could result in an interruptible wait in uvc_dequeue_buffer. Do not + * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED + * state outside the queue code. + */ + if (disconnect) + queue->flags |= UVC_QUEUE_DISCONNECTED; + spin_unlock_irqrestore(&queue->irqlock, flags); +} + +/* + * Enable or disable the video buffers queue. + * + * The queue must be enabled before starting video acquisition and must be + * disabled after stopping it. This ensures that the video buffers queue + * state can be properly initialized before buffers are accessed from the + * interrupt handler. + * + * Enabling the video queue initializes parameters (such as sequence number, + * sync pattern, ...). If the queue is already enabled, return -EBUSY. + * + * Disabling the video queue cancels the queue and removes all buffers from + * the main queue. + * + * This function can't be called from interrupt context. Use + * uvcg_queue_cancel() instead. + */ +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) +{ + unsigned long flags; + int ret = 0; + + if (enable) { + ret = vb2_streamon(&queue->queue, queue->queue.type); + if (ret < 0) + return ret; + + queue->sequence = 0; + queue->buf_used = 0; + queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; + } else { + ret = vb2_streamoff(&queue->queue, queue->queue.type); + if (ret < 0) + return ret; + + spin_lock_irqsave(&queue->irqlock, flags); + INIT_LIST_HEAD(&queue->irqqueue); + + /* + * FIXME: We need to clear the DISCONNECTED flag to ensure that + * applications will be able to queue buffers for the next + * streaming run. However, clearing it here doesn't guarantee + * that the device will be reconnected in the meantime. + */ + queue->flags &= ~UVC_QUEUE_DISCONNECTED; + spin_unlock_irqrestore(&queue->irqlock, flags); + } + + return ret; +} + +/* called with &queue_irqlock held.. */ +void uvcg_complete_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf) +{ + if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) { + queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; + buf->state = UVC_BUF_STATE_ERROR; + vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); + return; + } + + buf->buf.field = V4L2_FIELD_NONE; + buf->buf.sequence = queue->sequence++; + buf->buf.vb2_buf.timestamp = ktime_get_ns(); + + vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); +} + +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) +{ + struct uvc_buffer *buf = NULL; + + if (!list_empty(&queue->irqqueue)) + buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, + queue); + + return buf; +} diff --git a/drivers/usb/gadget/function/uvc-new/uvc_queue.h b/drivers/usb/gadget/function/uvc-new/uvc_queue.h new file mode 100644 index 000000000000..3b4767c30d4b --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_queue.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _UVC_QUEUE_H_ +#define _UVC_QUEUE_H_ + +#include +#include +#include + +#include + +struct file; +struct mutex; + +/* Maximum frame size in bytes, for sanity checking. */ +#define UVC_MAX_FRAME_SIZE (16*1024*1024) +/* Maximum number of video buffers. */ +#define UVC_MAX_VIDEO_BUFFERS 32 + +/* ------------------------------------------------------------------------ + * Structures. + */ + +enum uvc_buffer_state { + UVC_BUF_STATE_IDLE = 0, + UVC_BUF_STATE_QUEUED = 1, + UVC_BUF_STATE_ACTIVE = 2, + UVC_BUF_STATE_DONE = 3, + UVC_BUF_STATE_ERROR = 4, +}; + +struct uvc_buffer { + struct vb2_v4l2_buffer buf; + struct list_head queue; + + enum uvc_buffer_state state; + void *mem; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int offset; + unsigned int length; + unsigned int bytesused; +}; + +#define UVC_QUEUE_DISCONNECTED (1 << 0) +#define UVC_QUEUE_DROP_INCOMPLETE (1 << 1) + +struct uvc_video_queue { + struct vb2_queue queue; + + unsigned int flags; + __u32 sequence; + + unsigned int buf_used; + + bool use_sg; + + spinlock_t irqlock; /* Protects flags and irqqueue */ + struct list_head irqqueue; +}; + +static inline int uvc_queue_streaming(struct uvc_video_queue *queue) +{ + return vb2_is_streaming(&queue->queue); +} + +int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type, + struct mutex *lock); + +void uvcg_free_buffers(struct uvc_video_queue *queue); + +int uvcg_alloc_buffers(struct uvc_video_queue *queue, + struct v4l2_requestbuffers *rb); + +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, + struct v4l2_buffer *buf, int nonblocking); + +__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, + struct file *file, poll_table *wait); + +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff); +#endif /* CONFIG_MMU */ + +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect); + +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable); + +void uvcg_complete_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf); + +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue); + +#endif /* _UVC_QUEUE_H_ */ diff --git a/drivers/usb/gadget/function/uvc-new/uvc_v4l2.c b/drivers/usb/gadget/function/uvc-new/uvc_v4l2.c new file mode 100644 index 000000000000..e64cd797b0e7 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_v4l2.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * uvc_v4l2.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "f_uvc.h" +#include "uvc.h" +#include "uvc_queue.h" +#include "uvc_video.h" +#include "uvc_v4l2.h" +#include "uvc_configfs.h" + +static struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat) +{ + char guid[16] = UVC_GUID_FORMAT_MJPEG; + struct uvc_format_desc *format; + struct uvcg_uncompressed *unc; + + if (uformat->type == UVCG_UNCOMPRESSED) { + unc = to_uvcg_uncompressed(&uformat->group.cg_item); + if (!unc) + return ERR_PTR(-EINVAL); + + memcpy(guid, unc->desc.guidFormat, sizeof(guid)); + } + + format = uvc_format_by_guid(guid); + if (!format) + return ERR_PTR(-EINVAL); + + return format; +} + +static int uvc_v4l2_get_bytesperline(struct uvcg_format *uformat, + struct uvcg_frame *uframe) +{ + struct uvcg_uncompressed *u; + + if (uformat->type == UVCG_UNCOMPRESSED) { + u = to_uvcg_uncompressed(&uformat->group.cg_item); + if (!u) + return 0; + + return u->desc.bBitsPerPixel * uframe->frame.w_width / 8; + } + + return 0; +} + +static int uvc_get_frame_size(struct uvcg_format *uformat, + struct uvcg_frame *uframe) +{ + unsigned int bpl = uvc_v4l2_get_bytesperline(uformat, uframe); + + return bpl ? bpl * uframe->frame.w_height : + uframe->frame.dw_max_video_frame_buffer_size; +} + +static struct uvcg_format *find_format_by_index(struct uvc_device *uvc, int index) +{ + struct uvcg_format_ptr *format; + struct uvcg_format *uformat = NULL; + int i = 1; + + list_for_each_entry(format, &uvc->header->formats, entry) { + if (index == i) { + uformat = format->fmt; + break; + } + i++; + } + + return uformat; +} + +static struct uvcg_frame *find_frame_by_index(struct uvc_device *uvc, + struct uvcg_format *uformat, + int index) +{ + struct uvcg_format_ptr *format; + struct uvcg_frame_ptr *frame; + struct uvcg_frame *uframe = NULL; + + list_for_each_entry(format, &uvc->header->formats, entry) { + if (format->fmt->type != uformat->type) + continue; + list_for_each_entry(frame, &format->fmt->frames, entry) { + if (index == frame->frm->frame.b_frame_index) { + uframe = frame->frm; + break; + } + } + } + + return uframe; +} + +static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc, + u32 pixelformat) +{ + struct uvcg_format_ptr *format; + struct uvcg_format *uformat = NULL; + + list_for_each_entry(format, &uvc->header->formats, entry) { + struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt); + + if (fmtdesc->fcc == pixelformat) { + uformat = format->fmt; + break; + } + } + + return uformat; +} + +static struct uvcg_frame *find_closest_frame_by_size(struct uvc_device *uvc, + struct uvcg_format *uformat, + u16 rw, u16 rh) +{ + struct uvc_video *video = &uvc->video; + struct uvcg_format_ptr *format; + struct uvcg_frame_ptr *frame; + struct uvcg_frame *uframe = NULL; + unsigned int d, maxd; + + /* Find the closest image size. The distance between image sizes is + * the size in pixels of the non-overlapping regions between the + * requested size and the frame-specified size. + */ + maxd = (unsigned int)-1; + + list_for_each_entry(format, &uvc->header->formats, entry) { + if (format->fmt->type != uformat->type) + continue; + + list_for_each_entry(frame, &format->fmt->frames, entry) { + u16 w, h; + + w = frame->frm->frame.w_width; + h = frame->frm->frame.w_height; + + d = min(w, rw) * min(h, rh); + d = w*h + rw*rh - 2*d; + if (d < maxd) { + maxd = d; + uframe = frame->frm; + } + + if (maxd == 0) + break; + } + } + + if (!uframe) + uvcg_dbg(&video->uvc->func, "Unsupported size %ux%u\n", rw, rh); + + return uframe; +} + +/* -------------------------------------------------------------------------- + * Requests handling + */ + +static int +uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + struct usb_request *req = uvc->control_req; + + if (data->length < 0) + return usb_ep_set_halt(cdev->gadget->ep0); + + req->length = min_t(unsigned int, uvc->event_length, data->length); + req->zero = data->length < uvc->event_length; + + memcpy(req->buf, data->data, req->length); + + return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL); +} + +/* -------------------------------------------------------------------------- + * V4L2 ioctls + */ + +static int +uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct usb_composite_dev *cdev = uvc->func.config->cdev; + + strscpy(cap->driver, "g_uvc", sizeof(cap->driver)); + strscpy(cap->card, cdev->gadget->name, sizeof(cap->card)); + strscpy(cap->bus_info, dev_name(&cdev->gadget->dev), + sizeof(cap->bus_info)); + return 0; +} + +static int +uvc_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + fmt->fmt.pix.pixelformat = video->fcc; + fmt->fmt.pix.width = video->width; + fmt->fmt.pix.height = video->height; + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = video->bpp * video->width / 8; + fmt->fmt.pix.sizeimage = video->imagesize; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + fmt->fmt.pix.priv = 0; + + return 0; +} + +static int +uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + struct uvcg_format *uformat; + struct uvcg_frame *uframe; + u8 *fcc; + + if (fmt->type != video->queue.queue.type) + return -EINVAL; + + fcc = (u8 *)&fmt->fmt.pix.pixelformat; + uvcg_dbg(&uvc->func, "Trying format 0x%08x (%c%c%c%c): %ux%u\n", + fmt->fmt.pix.pixelformat, + fcc[0], fcc[1], fcc[2], fcc[3], + fmt->fmt.pix.width, fmt->fmt.pix.height); + + uformat = find_format_by_pix(uvc, fmt->fmt.pix.pixelformat); + if (!uformat) + return -EINVAL; + + uframe = find_closest_frame_by_size(uvc, uformat, + fmt->fmt.pix.width, fmt->fmt.pix.height); + if (!uframe) + return -EINVAL; + + fmt->fmt.pix.width = uframe->frame.w_width; + fmt->fmt.pix.height = uframe->frame.w_height; + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); + fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); + fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + fmt->fmt.pix.priv = 0; + + return 0; +} + +static int +uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + int ret; + + ret = uvc_v4l2_try_format(file, fh, fmt); + if (ret) + return ret; + + video->fcc = fmt->fmt.pix.pixelformat; + video->bpp = fmt->fmt.pix.bytesperline * 8 / video->width; + video->width = fmt->fmt.pix.width; + video->height = fmt->fmt.pix.height; + video->imagesize = fmt->fmt.pix.sizeimage; + + return ret; +} + +static int +uvc_v4l2_enum_frameintervals(struct file *file, void *fh, + struct v4l2_frmivalenum *fival) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvcg_format *uformat = NULL; + struct uvcg_frame *uframe = NULL; + struct uvcg_frame_ptr *frame; + + uformat = find_format_by_pix(uvc, fival->pixel_format); + if (!uformat) + return -EINVAL; + + list_for_each_entry(frame, &uformat->frames, entry) { + if (frame->frm->frame.w_width == fival->width && + frame->frm->frame.w_height == fival->height) { + uframe = frame->frm; + break; + } + } + if (!uframe) + return -EINVAL; + + if (fival->index >= uframe->frame.b_frame_interval_type) + return -EINVAL; + + fival->discrete.numerator = + uframe->dw_frame_interval[fival->index]; + + /* TODO: handle V4L2_FRMIVAL_TYPE_STEPWISE */ + fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; + fival->discrete.denominator = 10000000; + v4l2_simplify_fraction(&fival->discrete.numerator, + &fival->discrete.denominator, 8, 333); + + return 0; +} + +static int +uvc_v4l2_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvcg_format *uformat = NULL; + struct uvcg_frame *uframe = NULL; + + uformat = find_format_by_pix(uvc, fsize->pixel_format); + if (!uformat) + return -EINVAL; + + if (fsize->index >= uformat->num_frames) + return -EINVAL; + + uframe = find_frame_by_index(uvc, uformat, fsize->index + 1); + if (!uframe) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; + fsize->discrete.width = uframe->frame.w_width; + fsize->discrete.height = uframe->frame.w_height; + + return 0; +} + +static int +uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_format_desc *fmtdesc; + struct uvcg_format *uformat; + + if (f->index >= uvc->header->num_fmt) + return -EINVAL; + + uformat = find_format_by_index(uvc, f->index + 1); + if (!uformat) + return -EINVAL; + + if (uformat->type != UVCG_UNCOMPRESSED) + f->flags |= V4L2_FMT_FLAG_COMPRESSED; + + fmtdesc = to_uvc_format(uformat); + f->pixelformat = fmtdesc->fcc; + + strscpy(f->description, fmtdesc->name, sizeof(f->description)); + f->description[strlen(fmtdesc->name) - 1] = 0; + + return 0; +} + +static int +uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + if (b->type != video->queue.queue.type) + return -EINVAL; + + return uvcg_alloc_buffers(&video->queue, b); +} + +static int +uvc_v4l2_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + return uvcg_query_buffer(&video->queue, b); +} + +static int +uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + int ret; + + ret = uvcg_queue_buffer(&video->queue, b); + if (ret < 0) + return ret; + + if (uvc->state == UVC_STATE_STREAMING) + queue_work(video->async_wq, &video->pump); + + return ret; +} + +static int +uvc_v4l2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK); +} + +static int +uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + int ret; + + if (type != video->queue.queue.type) + return -EINVAL; + + /* Enable UVC video. */ + ret = uvcg_video_enable(video, 1); + if (ret < 0) + return ret; + + /* + * Complete the alternate setting selection setup phase now that + * userspace is ready to provide video frames. + */ + uvc_function_setup_continue(uvc); + uvc->state = UVC_STATE_STREAMING; + + return 0; +} + +static int +uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + if (type != video->queue.queue.type) + return -EINVAL; + + return uvcg_video_enable(video, 0); +} + +static int +uvc_v4l2_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct uvc_device *uvc = video_get_drvdata(fh->vdev); + struct uvc_file_handle *handle = to_uvc_file_handle(fh); + int ret; + + if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST) + return -EINVAL; + + if (sub->type == UVC_EVENT_SETUP && uvc->func_connected) + return -EBUSY; + + ret = v4l2_event_subscribe(fh, sub, 2, NULL); + if (ret < 0) + return ret; + + if (sub->type == UVC_EVENT_SETUP) { + uvc->func_connected = true; + handle->is_uvc_app_handle = true; + uvc_function_connect(uvc); + } + + return 0; +} + +static void uvc_v4l2_disable(struct uvc_device *uvc) +{ + uvc_function_disconnect(uvc); + uvcg_video_enable(&uvc->video, 0); + uvcg_free_buffers(&uvc->video.queue); + uvc->func_connected = false; + wake_up_interruptible(&uvc->func_connected_queue); +} + +static int +uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + struct uvc_device *uvc = video_get_drvdata(fh->vdev); + struct uvc_file_handle *handle = to_uvc_file_handle(fh); + int ret; + + ret = v4l2_event_unsubscribe(fh, sub); + if (ret < 0) + return ret; + + if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) { + uvc_v4l2_disable(uvc); + handle->is_uvc_app_handle = false; + } + + return 0; +} + +static long +uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio, + unsigned int cmd, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + switch (cmd) { + case UVCIOC_SEND_RESPONSE: + return uvc_send_response(uvc, arg); + + default: + return -ENOIOCTLCMD; + } +} + +const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = { + .vidioc_querycap = uvc_v4l2_querycap, + .vidioc_try_fmt_vid_out = uvc_v4l2_try_format, + .vidioc_g_fmt_vid_out = uvc_v4l2_get_format, + .vidioc_s_fmt_vid_out = uvc_v4l2_set_format, + .vidioc_enum_frameintervals = uvc_v4l2_enum_frameintervals, + .vidioc_enum_framesizes = uvc_v4l2_enum_framesizes, + .vidioc_enum_fmt_vid_out = uvc_v4l2_enum_format, + .vidioc_reqbufs = uvc_v4l2_reqbufs, + .vidioc_querybuf = uvc_v4l2_querybuf, + .vidioc_qbuf = uvc_v4l2_qbuf, + .vidioc_dqbuf = uvc_v4l2_dqbuf, + .vidioc_streamon = uvc_v4l2_streamon, + .vidioc_streamoff = uvc_v4l2_streamoff, + .vidioc_subscribe_event = uvc_v4l2_subscribe_event, + .vidioc_unsubscribe_event = uvc_v4l2_unsubscribe_event, + .vidioc_default = uvc_v4l2_ioctl_default, +}; + +/* -------------------------------------------------------------------------- + * V4L2 + */ + +static int +uvc_v4l2_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_file_handle *handle; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (handle == NULL) + return -ENOMEM; + + v4l2_fh_init(&handle->vfh, vdev); + v4l2_fh_add(&handle->vfh); + + handle->device = &uvc->video; + file->private_data = &handle->vfh; + + return 0; +} + +static int +uvc_v4l2_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data); + struct uvc_video *video = handle->device; + + mutex_lock(&video->mutex); + if (handle->is_uvc_app_handle) + uvc_v4l2_disable(uvc); + mutex_unlock(&video->mutex); + + file->private_data = NULL; + v4l2_fh_del(&handle->vfh); + v4l2_fh_exit(&handle->vfh); + kfree(handle); + + return 0; +} + +static int +uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_mmap(&uvc->video.queue, vma); +} + +static __poll_t +uvc_v4l2_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_poll(&uvc->video.queue, file, wait); +} + +#ifndef CONFIG_MMU +static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff); +} +#endif + +const struct v4l2_file_operations uvc_v4l2_fops = { + .owner = THIS_MODULE, + .open = uvc_v4l2_open, + .release = uvc_v4l2_release, + .unlocked_ioctl = video_ioctl2, + .mmap = uvc_v4l2_mmap, + .poll = uvc_v4l2_poll, +#ifndef CONFIG_MMU + .get_unmapped_area = uvcg_v4l2_get_unmapped_area, +#endif +}; diff --git a/drivers/usb/gadget/function/uvc-new/uvc_v4l2.h b/drivers/usb/gadget/function/uvc-new/uvc_v4l2.h new file mode 100644 index 000000000000..1576005b61fd --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_v4l2.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * uvc_v4l2.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz + */ + +#ifndef __UVC_V4L2_H__ +#define __UVC_V4L2_H__ + +extern const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops; +extern const struct v4l2_file_operations uvc_v4l2_fops; + +#endif /* __UVC_V4L2_H__ */ diff --git a/drivers/usb/gadget/function/uvc-new/uvc_video.c b/drivers/usb/gadget/function/uvc-new/uvc_video.c new file mode 100644 index 000000000000..e81865978299 --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_video.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * uvc_video.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "uvc.h" +#include "uvc_queue.h" +#include "uvc_video.h" + +/* -------------------------------------------------------------------------- + * Video codecs + */ + +static int +uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf, + u8 *data, int len) +{ + struct uvc_device *uvc = container_of(video, struct uvc_device, video); + struct usb_composite_dev *cdev = uvc->func.config->cdev; + struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp); + int pos = 2; + + data[1] = UVC_STREAM_EOH | video->fid; + + if (video->queue.buf_used == 0 && ts.tv_sec) { + /* dwClockFrequency is 48 MHz */ + u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48; + + data[1] |= UVC_STREAM_PTS; + put_unaligned_le32(pts, &data[pos]); + pos += 4; + } + + if (cdev->gadget->ops->get_frame) { + u32 sof, stc; + + sof = usb_gadget_frame_number(cdev->gadget); + ktime_get_ts64(&ts); + stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48; + + data[1] |= UVC_STREAM_SCR; + put_unaligned_le32(stc, &data[pos]); + put_unaligned_le16(sof, &data[pos+4]); + pos += 6; + } + + data[0] = pos; + + if (buf->bytesused - video->queue.buf_used <= len - pos) + data[1] |= UVC_STREAM_EOF; + + return pos; +} + +static int +uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf, + u8 *data, int len) +{ + struct uvc_video_queue *queue = &video->queue; + unsigned int nbytes; + void *mem; + + /* Copy video data to the USB buffer. */ + mem = buf->mem + queue->buf_used; + nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); + + memcpy(data, mem, nbytes); + queue->buf_used += nbytes; + + return nbytes; +} + +static void +uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf) +{ + void *mem = req->buf; + struct uvc_request *ureq = req->context; + int len = video->req_size; + int ret; + + /* Add a header at the beginning of the payload. */ + if (video->payload_size == 0) { + ret = uvc_video_encode_header(video, buf, mem, len); + video->payload_size += ret; + mem += ret; + len -= ret; + } + + /* Process video data. */ + len = min((int)(video->max_payload_size - video->payload_size), len); + ret = uvc_video_encode_data(video, buf, mem, len); + + video->payload_size += ret; + len -= ret; + + req->length = video->req_size - len; + req->zero = video->payload_size == video->max_payload_size; + + if (buf->bytesused == video->queue.buf_used) { + video->queue.buf_used = 0; + buf->state = UVC_BUF_STATE_DONE; + list_del(&buf->queue); + video->fid ^= UVC_STREAM_FID; + ureq->last_buf = buf; + + video->payload_size = 0; + } + + if (video->payload_size == video->max_payload_size || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE || + buf->bytesused == video->queue.buf_used) + video->payload_size = 0; +} + +static void +uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf) +{ + unsigned int pending = buf->bytesused - video->queue.buf_used; + struct uvc_request *ureq = req->context; + struct scatterlist *sg, *iter; + unsigned int len = video->req_size; + unsigned int sg_left, part = 0; + unsigned int i; + int header_len; + + sg = ureq->sgt.sgl; + sg_init_table(sg, ureq->sgt.nents); + + /* Init the header. */ + header_len = uvc_video_encode_header(video, buf, ureq->header, + video->req_size); + sg_set_buf(sg, ureq->header, header_len); + len -= header_len; + + if (pending <= len) + len = pending; + + req->length = (len == pending) ? + len + header_len : video->req_size; + + /* Init the pending sgs with payload */ + sg = sg_next(sg); + + for_each_sg(sg, iter, ureq->sgt.nents - 1, i) { + if (!len || !buf->sg || !buf->sg->length) + break; + + sg_left = buf->sg->length - buf->offset; + part = min_t(unsigned int, len, sg_left); + + sg_set_page(iter, sg_page(buf->sg), part, buf->offset); + + if (part == sg_left) { + buf->offset = 0; + buf->sg = sg_next(buf->sg); + } else { + buf->offset += part; + } + len -= part; + } + + /* Assign the video data with header. */ + req->buf = NULL; + req->sg = ureq->sgt.sgl; + req->num_sgs = i + 1; + + req->length -= len; + video->queue.buf_used += req->length - header_len; + + if (buf->bytesused == video->queue.buf_used || !buf->sg || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) { + video->queue.buf_used = 0; + buf->state = UVC_BUF_STATE_DONE; + buf->offset = 0; + list_del(&buf->queue); + video->fid ^= UVC_STREAM_FID; + ureq->last_buf = buf; + } +} + +static void +uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf) +{ + void *mem = req->buf; + struct uvc_request *ureq = req->context; + int len = video->req_size; + int ret; + + /* Add the header. */ + ret = uvc_video_encode_header(video, buf, mem, len); + mem += ret; + len -= ret; + + /* Process video data. */ + ret = uvc_video_encode_data(video, buf, mem, len); + len -= ret; + + req->length = video->req_size - len; + + if (buf->bytesused == video->queue.buf_used || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) { + video->queue.buf_used = 0; + buf->state = UVC_BUF_STATE_DONE; + list_del(&buf->queue); + video->fid ^= UVC_STREAM_FID; + ureq->last_buf = buf; + } +} + +/* -------------------------------------------------------------------------- + * Request handling + */ + +static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req) +{ + int ret; + + ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + if (ret < 0) { + uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n", + ret); + + /* If the endpoint is disabled the descriptor may be NULL. */ + if (video->ep->desc) { + /* Isochronous endpoints can't be halted. */ + if (usb_endpoint_xfer_bulk(video->ep->desc)) + usb_ep_set_halt(video->ep); + } + } + + return ret; +} + +static void +uvc_video_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct uvc_request *ureq = req->context; + struct uvc_video *video = ureq->video; + struct uvc_video_queue *queue = &video->queue; + struct uvc_device *uvc = video->uvc; + unsigned long flags; + + switch (req->status) { + case 0: + break; + + case -EXDEV: + uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n"); + queue->flags |= UVC_QUEUE_DROP_INCOMPLETE; + break; + + case -ESHUTDOWN: /* disconnect from host. */ + uvcg_dbg(&video->uvc->func, "VS request cancelled.\n"); + uvcg_queue_cancel(queue, 1); + break; + + default: + uvcg_warn(&video->uvc->func, + "VS request completed with status %d.\n", + req->status); + uvcg_queue_cancel(queue, 0); + } + + if (ureq->last_buf) { + uvcg_complete_buffer(&video->queue, ureq->last_buf); + ureq->last_buf = NULL; + } + + spin_lock_irqsave(&video->req_lock, flags); + list_add_tail(&req->list, &video->req_free); + spin_unlock_irqrestore(&video->req_lock, flags); + + if (uvc->state == UVC_STATE_STREAMING) + queue_work(video->async_wq, &video->pump); +} + +static int +uvc_video_free_requests(struct uvc_video *video) +{ + unsigned int i; + + if (video->ureq) { + for (i = 0; i < video->uvc_num_requests; ++i) { + sg_free_table(&video->ureq[i].sgt); + + if (video->ureq[i].req) { + usb_ep_free_request(video->ep, video->ureq[i].req); + video->ureq[i].req = NULL; + } + + if (video->ureq[i].req_buffer) { + kfree(video->ureq[i].req_buffer); + video->ureq[i].req_buffer = NULL; + } + } + + kfree(video->ureq); + video->ureq = NULL; + } + + INIT_LIST_HEAD(&video->req_free); + video->req_size = 0; + return 0; +} + +static int +uvc_video_alloc_requests(struct uvc_video *video) +{ + unsigned int req_size; + unsigned int i; + int ret = -ENOMEM; + + BUG_ON(video->req_size); + + req_size = video->ep->maxpacket + * max_t(unsigned int, video->ep->maxburst, 1) + * (video->ep->mult); + + video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL); + if (video->ureq == NULL) + return -ENOMEM; + + for (i = 0; i < video->uvc_num_requests; ++i) { + video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL); + if (video->ureq[i].req_buffer == NULL) + goto error; + + video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL); + if (video->ureq[i].req == NULL) + goto error; + + video->ureq[i].req->buf = video->ureq[i].req_buffer; + video->ureq[i].req->length = 0; + video->ureq[i].req->complete = uvc_video_complete; + video->ureq[i].req->context = &video->ureq[i]; + video->ureq[i].video = video; + video->ureq[i].last_buf = NULL; + + list_add_tail(&video->ureq[i].req->list, &video->req_free); + /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */ + sg_alloc_table(&video->ureq[i].sgt, + DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN, + PAGE_SIZE) + 2, GFP_KERNEL); + } + + video->req_size = req_size; + + return 0; + +error: + uvc_video_free_requests(video); + return ret; +} + +/* -------------------------------------------------------------------------- + * Video streaming + */ + +/* + * uvcg_video_pump - Pump video data into the USB requests + * + * This function fills the available USB requests (listed in req_free) with + * video data from the queued buffers. + */ +static void uvcg_video_pump(struct work_struct *work) +{ + struct uvc_video *video = container_of(work, struct uvc_video, pump); + struct uvc_video_queue *queue = &video->queue; + struct usb_request *req = NULL; + struct uvc_buffer *buf; + unsigned long flags; + int ret; + bool buf_int; + /* video->max_payload_size is only set when using bulk transfer */ + bool is_bulk = video->max_payload_size; + + while (video->ep->enabled) { + /* + * Retrieve the first available USB request, protected by the + * request lock. + */ + spin_lock_irqsave(&video->req_lock, flags); + if (list_empty(&video->req_free)) { + spin_unlock_irqrestore(&video->req_lock, flags); + return; + } + req = list_first_entry(&video->req_free, struct usb_request, + list); + list_del(&req->list); + spin_unlock_irqrestore(&video->req_lock, flags); + + /* + * Retrieve the first available video buffer and fill the + * request, protected by the video queue irqlock. + */ + spin_lock_irqsave(&queue->irqlock, flags); + buf = uvcg_queue_head(queue); + + if (buf != NULL) { + video->encode(req, video, buf); + /* Always interrupt for the last request of a video buffer */ + buf_int = buf->state == UVC_BUF_STATE_DONE; + } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) { + /* + * No video buffer available; the queue is still connected and + * we're traferring over ISOC. Queue a 0 length request to + * prevent missed ISOC transfers. + */ + req->length = 0; + buf_int = false; + } else { + /* + * Either queue has been disconnected or no video buffer + * available to bulk transfer. Either way, stop processing + * further. + */ + spin_unlock_irqrestore(&queue->irqlock, flags); + break; + } + + /* + * With usb3 we have more requests. This will decrease the + * interrupt load to a quarter but also catches the corner + * cases, which needs to be handled. + */ + if (list_empty(&video->req_free) || buf_int || + !(video->req_int_count % + DIV_ROUND_UP(video->uvc_num_requests, 4))) { + video->req_int_count = 0; + req->no_interrupt = 0; + } else { + req->no_interrupt = 1; + } + + /* Queue the USB request */ + ret = uvcg_video_ep_queue(video, req); + spin_unlock_irqrestore(&queue->irqlock, flags); + + if (ret < 0) { + uvcg_queue_cancel(queue, 0); + break; + } + + /* Endpoint now owns the request */ + req = NULL; + video->req_int_count++; + } + + if (!req) + return; + + spin_lock_irqsave(&video->req_lock, flags); + list_add_tail(&req->list, &video->req_free); + spin_unlock_irqrestore(&video->req_lock, flags); + return; +} + +/* + * Enable or disable the video stream. + */ +int uvcg_video_enable(struct uvc_video *video, int enable) +{ + unsigned int i; + int ret; + + if (video->ep == NULL) { + uvcg_info(&video->uvc->func, + "Video enable failed, device is uninitialized.\n"); + return -ENODEV; + } + + if (!enable) { + cancel_work_sync(&video->pump); + uvcg_queue_cancel(&video->queue, 0); + + for (i = 0; i < video->uvc_num_requests; ++i) + if (video->ureq && video->ureq[i].req) + usb_ep_dequeue(video->ep, video->ureq[i].req); + + uvc_video_free_requests(video); + uvcg_queue_enable(&video->queue, 0); + return 0; + } + + if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0) + return ret; + + if ((ret = uvc_video_alloc_requests(video)) < 0) + return ret; + + if (video->max_payload_size) { + video->encode = uvc_video_encode_bulk; + video->payload_size = 0; + } else + video->encode = video->queue.use_sg ? + uvc_video_encode_isoc_sg : uvc_video_encode_isoc; + + video->req_int_count = 0; + + queue_work(video->async_wq, &video->pump); + + return ret; +} + +/* + * Initialize the UVC video stream. + */ +int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc) +{ + INIT_LIST_HEAD(&video->req_free); + spin_lock_init(&video->req_lock); + INIT_WORK(&video->pump, uvcg_video_pump); + + /* Allocate a work queue for asynchronous video pump handler. */ + video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0); + if (!video->async_wq) + return -EINVAL; + + video->uvc = uvc; + video->fcc = V4L2_PIX_FMT_YUYV; + video->bpp = 16; + video->width = 320; + video->height = 240; + video->imagesize = 320 * 240 * 2; + + /* Initialize the video buffers queue. */ + uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent, + V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex); + return 0; +} diff --git a/drivers/usb/gadget/function/uvc-new/uvc_video.h b/drivers/usb/gadget/function/uvc-new/uvc_video.h new file mode 100644 index 000000000000..03adeefa343b --- /dev/null +++ b/drivers/usb/gadget/function/uvc-new/uvc_video.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * uvc_video.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz + */ +#ifndef __UVC_VIDEO_H__ +#define __UVC_VIDEO_H__ + +struct uvc_video; + +int uvcg_video_enable(struct uvc_video *video, int enable); + +int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc); + +#endif /* __UVC_VIDEO_H__ */ diff --git a/fs/Makefile b/fs/Makefile index e4a54c7b71c7..d5ed911b0a2f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -121,7 +121,11 @@ obj-$(CONFIG_NILFS2_FS) += nilfs2/ obj-$(CONFIG_BEFS_FS) += befs/ obj-$(CONFIG_HOSTFS) += hostfs/ obj-$(CONFIG_CACHEFILES) += cachefiles/ -obj-$(CONFIG_DEBUG_FS) += debugfs/ +ifeq ($(CONFIG_DEBUG_FS),y) +obj-y += debugfs/ +else +obj-$(CONFIG_DEBUG_KMEMLEAK) += debugfs/ +endif obj-$(CONFIG_TRACING) += tracefs/ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ diff --git a/fs/dcache.c b/fs/dcache.c index 98363cb3d4b1..c978c8b6e5b7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -70,7 +70,7 @@ * If no ancestor relationship: * arbitrary, since it's serialized on rename_lock */ -int sysctl_vfs_cache_pressure __read_mostly = 100; +int sysctl_vfs_cache_pressure __read_mostly = 75; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); diff --git a/fs/debugfs/Makefile b/fs/debugfs/Makefile index 840c45696668..a1f5982cd771 100644 --- a/fs/debugfs/Makefile +++ b/fs/debugfs/Makefile @@ -1,4 +1,7 @@ debugfs-objs := inode.o file.o -obj-$(CONFIG_DEBUG_FS) += debugfs.o +ifeq ($(CONFIG_DEBUG_KMEMLEAK),y) +ccflags-y := -DCONFIG_DEBUG_FS +endif +obj-y += debugfs.o diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e210c7f0cb63..1a0e71cfc3a1 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -197,6 +197,8 @@ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) kfree(gc_th); sbi->gc_thread = NULL; } + set_task_ioprio(sbi->gc_thread->f2fs_gc_task, + IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); out: return err; } diff --git a/fs/namespace.c b/fs/namespace.c index 0e25713d09fa..d0cf0e37c232 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2832,9 +2832,10 @@ long do_mount(const char *dev_name, const char __user *dir_name, if (retval) goto dput_out; - /* Default to relatime unless overriden */ - if (!(flags & MS_NOATIME)) - mnt_flags |= MNT_RELATIME; + /* Default to NOATIME and NODIRATIME */ + mnt_flags |= MNT_NOATIME; + mnt_flags |= MNT_NODIRATIME; + /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) diff --git a/include/drm/drm_refresh_rate.h b/include/drm/drm_refresh_rate.h new file mode 100644 index 000000000000..b0c6cfe634e2 --- /dev/null +++ b/include/drm/drm_refresh_rate.h @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Sultan Alsawaf . + */ + +unsigned int dsi_panel_get_refresh_rate(void); diff --git a/include/linux/android_kabi.h b/include/linux/android_kabi.h index 9c7b6c035ad3..d1ad017acf3b 100644 --- a/include/linux/android_kabi.h +++ b/include/linux/android_kabi.h @@ -59,17 +59,11 @@ #else #define _ANDROID_KABI_REPLACE(_orig, _new) \ - union { \ - _new; \ - struct { \ - _orig; \ - }; \ - __ANDROID_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ - } + _new #endif /* __GENKSYMS__ */ -#define _ANDROID_KABI_RESERVE(n) u64 android_kabi_reserved##n +#define _ANDROID_KABI_RESERVE(n) /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5e5f600ba5d0..8c5053940784 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -718,12 +718,10 @@ struct request_queue { #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ -#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) -#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_POLL)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index e5af028c08b4..80014831b48c 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -59,9 +59,9 @@ extern void normalize_rt_tasks(void); /* - * default timeslice is 100 msecs (used only for SCHED_RR tasks). + * default timeslice is 1 jiffy (used only for SCHED_RR tasks). * Timeslices get refilled after they expire. */ -#define RR_TIMESLICE (100 * HZ / 1000) +#define RR_TIMESLICE (1) #endif /* _LINUX_SCHED_RT_H */ diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 790ca021203a..a022809009ac 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -34,14 +34,6 @@ */ struct sync_file { struct file *file; - /** - * @user_name: - * - * Name of the sync file provided by userspace, for merged fences. - * Otherwise generated through driver callbacks (in which case the - * entire array is 0). - */ - char user_name[32]; #ifdef CONFIG_DEBUG_FS struct list_head sync_file_list; #endif @@ -57,6 +49,5 @@ struct sync_file { struct sync_file *sync_file_create(struct dma_fence *fence); struct dma_fence *sync_file_get_fence(int fd); -char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len); #endif /* _LINUX_SYNC_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index dfdb10eceeea..d00aa70f5b5d 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -18,7 +18,7 @@ #include #define THERMAL_TRIPS_NONE -1 -#define THERMAL_MAX_TRIPS 12 +#define THERMAL_MAX_TRIPS 16 /* invalid cooling state */ #define THERMAL_CSTATE_INVALID -1UL diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0e5b5ee47fef..2b6b7c90d3dc 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -45,12 +45,16 @@ struct vm_struct { struct vmap_area { unsigned long va_start; unsigned long va_end; + + /* + * Largest available free size in subtree. + */ + unsigned long subtree_max_size; unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ struct llist_node purge_list; /* "lazy purge" list */ struct vm_struct *vm; - struct rcu_head rcu_head; }; /* diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index cdc87ec61e54..5816b5442a80 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -384,4 +384,29 @@ int v4l2_g_parm_cap(struct video_device *vdev, int v4l2_s_parm_cap(struct video_device *vdev, struct v4l2_subdev *sd, struct v4l2_streamparm *a); +/** + * v4l2_get_link_freq - Get link rate from transmitter + * + * @handler: The transmitter's control handler + * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on + * D-PHY, samples per clock on parallel. 0 otherwise. + * @div: The divisor between pixel rate and link frequency. Number of data lanes + * times two on D-PHY, 1 on parallel. 0 otherwise. + * + * This function is intended for obtaining the link frequency from the + * transmitter sub-devices. It returns the link rate, either from the + * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value + * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter. + * + * Returns link frequency on success, otherwise a negative error code: + * -ENOENT: Link frequency or pixel rate control not found + * -EINVAL: Invalid link frequency value + */ +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, + unsigned int div); + +void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, + unsigned int n_terms, unsigned int threshold); +u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator); + #endif /* V4L2_COMMON_H_ */ diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h new file mode 100644 index 000000000000..ead61667883f --- /dev/null +++ b/include/media/v4l2-uvc.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * v4l2 uvc internal API header + * + * Some commonly needed functions for uvc drivers + */ + +#ifndef __LINUX_V4L2_UVC_H +#define __LINUX_V4L2_UVC_H + +/* ------------------------------------------------------------------------ + * GUIDs + */ +#define UVC_GUID_UVC_CAMERA \ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} +#define UVC_GUID_UVC_OUTPUT \ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} +#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03} +#define UVC_GUID_UVC_PROCESSING \ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01} +#define UVC_GUID_UVC_SELECTOR \ + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02} + +#define UVC_GUID_FORMAT_MJPEG \ + { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_YUY2 \ + { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_YUY2_ISIGHT \ + { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_NV12 \ + { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_YV12 \ + { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_I420 \ + { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_UYVY \ + { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y800 \ + { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y8 \ + { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y10 \ + { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y12 \ + { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y16 \ + { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_BY8 \ + { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_BA81 \ + { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_GBRG \ + { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_GRBG \ + { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_RGGB \ + { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_BG16 \ + { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_GB16 \ + { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_RG16 \ + { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_GR16 \ + { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_RGBP \ + { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_BGR3 \ + { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ + 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} +#define UVC_GUID_FORMAT_M420 \ + { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} + +#define UVC_GUID_FORMAT_H264 \ + { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y8I \ + { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y12I \ + { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Z16 \ + { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_RW10 \ + { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_INVZ \ + { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \ + 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b} +#define UVC_GUID_FORMAT_INZI \ + { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \ + 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a} +#define UVC_GUID_FORMAT_INVI \ + { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \ + 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f} + +#define UVC_GUID_FORMAT_D3DFMT_L8 \ + {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \ + {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} + +/* ------------------------------------------------------------------------ + * Video formats + */ + +struct uvc_format_desc { + char *name; + u8 guid[16]; + u32 fcc; +}; + +static struct uvc_format_desc uvc_fmts[] = { + { + .name = "YUV 4:2:2 (YUYV)", + .guid = UVC_GUID_FORMAT_YUY2, + .fcc = V4L2_PIX_FMT_YUYV, + }, + { + .name = "YUV 4:2:2 (YUYV)", + .guid = UVC_GUID_FORMAT_YUY2_ISIGHT, + .fcc = V4L2_PIX_FMT_YUYV, + }, + { + .name = "YUV 4:2:0 (NV12)", + .guid = UVC_GUID_FORMAT_NV12, + .fcc = V4L2_PIX_FMT_NV12, + }, + { + .name = "MJPEG", + .guid = UVC_GUID_FORMAT_MJPEG, + .fcc = V4L2_PIX_FMT_MJPEG, + }, + { + .name = "YVU 4:2:0 (YV12)", + .guid = UVC_GUID_FORMAT_YV12, + .fcc = V4L2_PIX_FMT_YVU420, + }, + { + .name = "YUV 4:2:0 (I420)", + .guid = UVC_GUID_FORMAT_I420, + .fcc = V4L2_PIX_FMT_YUV420, + }, + { + .name = "YUV 4:2:0 (M420)", + .guid = UVC_GUID_FORMAT_M420, + .fcc = V4L2_PIX_FMT_M420, + }, + { + .name = "YUV 4:2:2 (UYVY)", + .guid = UVC_GUID_FORMAT_UYVY, + .fcc = V4L2_PIX_FMT_UYVY, + }, + { + .name = "Greyscale 8-bit (Y800)", + .guid = UVC_GUID_FORMAT_Y800, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .name = "Greyscale 8-bit (Y8 )", + .guid = UVC_GUID_FORMAT_Y8, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .name = "Greyscale 8-bit (D3DFMT_L8)", + .guid = UVC_GUID_FORMAT_D3DFMT_L8, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .name = "IR 8-bit (L8_IR)", + .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .name = "Greyscale 10-bit (Y10 )", + .guid = UVC_GUID_FORMAT_Y10, + .fcc = V4L2_PIX_FMT_Y10, + }, + { + .name = "Greyscale 12-bit (Y12 )", + .guid = UVC_GUID_FORMAT_Y12, + .fcc = V4L2_PIX_FMT_Y12, + }, + { + .name = "Greyscale 16-bit (Y16 )", + .guid = UVC_GUID_FORMAT_Y16, + .fcc = V4L2_PIX_FMT_Y16, + }, + { + .name = "BGGR Bayer (BY8 )", + .guid = UVC_GUID_FORMAT_BY8, + .fcc = V4L2_PIX_FMT_SBGGR8, + }, + { + .name = "BGGR Bayer (BA81)", + .guid = UVC_GUID_FORMAT_BA81, + .fcc = V4L2_PIX_FMT_SBGGR8, + }, + { + .name = "GBRG Bayer (GBRG)", + .guid = UVC_GUID_FORMAT_GBRG, + .fcc = V4L2_PIX_FMT_SGBRG8, + }, + { + .name = "GRBG Bayer (GRBG)", + .guid = UVC_GUID_FORMAT_GRBG, + .fcc = V4L2_PIX_FMT_SGRBG8, + }, + { + .name = "RGGB Bayer (RGGB)", + .guid = UVC_GUID_FORMAT_RGGB, + .fcc = V4L2_PIX_FMT_SRGGB8, + }, + { + .name = "RGB565", + .guid = UVC_GUID_FORMAT_RGBP, + .fcc = V4L2_PIX_FMT_RGB565, + }, + { + .name = "BGR 8:8:8 (BGR3)", + .guid = UVC_GUID_FORMAT_BGR3, + .fcc = V4L2_PIX_FMT_BGR24, + }, + { + .name = "H.264", + .guid = UVC_GUID_FORMAT_H264, + .fcc = V4L2_PIX_FMT_H264, + }, + { + .name = "Greyscale 8 L/R (Y8I)", + .guid = UVC_GUID_FORMAT_Y8I, + .fcc = V4L2_PIX_FMT_Y8I, + }, + { + .name = "Greyscale 12 L/R (Y12I)", + .guid = UVC_GUID_FORMAT_Y12I, + .fcc = V4L2_PIX_FMT_Y12I, + }, + { + .name = "Depth data 16-bit (Z16)", + .guid = UVC_GUID_FORMAT_Z16, + .fcc = V4L2_PIX_FMT_Z16, + }, + { + .name = "Bayer 10-bit (SRGGB10P)", + .guid = UVC_GUID_FORMAT_RW10, + .fcc = V4L2_PIX_FMT_SRGGB10P, + }, + { + .name = "Bayer 16-bit (SBGGR16)", + .guid = UVC_GUID_FORMAT_BG16, + .fcc = V4L2_PIX_FMT_SBGGR16, + }, + { + .name = "Bayer 16-bit (SGBRG16)", + .guid = UVC_GUID_FORMAT_GB16, + .fcc = V4L2_PIX_FMT_SGBRG16, + }, + { + .name = "Bayer 16-bit (SRGGB16)", + .guid = UVC_GUID_FORMAT_RG16, + .fcc = V4L2_PIX_FMT_SRGGB16, + }, + { + .name = "Bayer 16-bit (SGRBG16)", + .guid = UVC_GUID_FORMAT_GR16, + .fcc = V4L2_PIX_FMT_SGRBG16, + }, + { + .name = "Depth data 16-bit (Z16)", + .guid = UVC_GUID_FORMAT_INVZ, + .fcc = V4L2_PIX_FMT_Z16, + }, + { + .name = "Greyscale 10-bit (Y10 )", + .guid = UVC_GUID_FORMAT_INVI, + .fcc = V4L2_PIX_FMT_Y10, + }, + { + .name = "IR:Depth 26-bit (INZI)", + .guid = UVC_GUID_FORMAT_INZI, + .fcc = V4L2_PIX_FMT_INZI, + }, +}; + +static inline struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]) +{ + unsigned int len = ARRAY_SIZE(uvc_fmts); + unsigned int i; + + for (i = 0; i < len; ++i) { + if (memcmp(guid, uvc_fmts[i].guid, 16) == 0) + return &uvc_fmts[i]; + } + + return NULL; +} + +#endif /* __LINUX_V4L2_UVC_H */ diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 443863c7b8da..b93799fe46e8 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -24,7 +24,7 @@ struct dst_ops { void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *dev, int how); - struct dst_entry * (*negative_advice)(struct dst_entry *); + void (*negative_advice)(struct sock *sk, struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, diff --git a/include/net/sock.h b/include/net/sock.h index 0f8a1803d09e..231f9a21a18f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1900,19 +1900,12 @@ sk_dst_get(struct sock *sk) static inline void dst_negative_advice(struct sock *sk) { - struct dst_entry *ndst, *dst = __sk_dst_get(sk); + struct dst_entry *dst = __sk_dst_get(sk); sk_rethink_txhash(sk); - if (dst && dst->ops->negative_advice) { - ndst = dst->ops->negative_advice(dst); - - if (ndst != dst) { - rcu_assign_pointer(sk->sk_dst_cache, ndst); - sk_tx_queue_clear(sk); - sk->sk_dst_pending_confirm = 0; - } - } + if (dst && dst->ops->negative_advice) + dst->ops->negative_advice(sk, dst); } static inline void diff --git a/include/uapi/linux/usb/g_uvc-new.h b/include/uapi/linux/usb/g_uvc-new.h new file mode 100644 index 000000000000..8d7824dde1b2 --- /dev/null +++ b/include/uapi/linux/usb/g_uvc-new.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * g_uvc.h -- USB Video Class Gadget driver API + * + * Copyright (C) 2009-2010 Laurent Pinchart + */ + +#ifndef __LINUX_USB_G_UVC_H +#define __LINUX_USB_G_UVC_H + +#include +#include +#include + +#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0) +#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0) +#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1) +#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2) +#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3) +#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4) +#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5) +#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5) + +#define UVC_STRING_CONTROL_IDX 0 +#define UVC_STRING_STREAMING_IDX 1 + +struct uvc_request_data { + __s32 length; + __u8 data[60]; +}; + +struct uvc_event { + union { + enum usb_device_speed speed; + struct usb_ctrlrequest req; + struct uvc_request_data data; + }; +}; + +#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data) + +#endif /* __LINUX_USB_G_UVC_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index c21cce755943..3288c7d7980d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1416,7 +1416,7 @@ void enable_nonboot_cpus(void) error = _cpu_up(cpu, 1, CPUHP_ONLINE); trace_suspend_resume(TPS("CPU_ON"), cpu, false); if (!error) { - pr_info("CPU%d is up\n", cpu); + pr_debug("CPU%d is up\n", cpu); cpu_device = get_cpu_device(cpu); if (!cpu_device) pr_err("%s: failed to get cpu%d device\n", diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index c457d77c86a5..4a055872178d 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -28,7 +28,6 @@ #include #include #include -#include #ifdef CONFIG_DEBUG_MUTEXES # include "mutex-debug.h" @@ -536,21 +535,31 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, { bool ret = true; - rcu_read_lock(); - while (__mutex_owner(lock) == owner) { + for (;;) { + unsigned int cpu; + bool same_owner; + /* - * Ensure we emit the owner->on_cpu, dereference _after_ - * checking lock->owner still matches owner. If that fails, + * Ensure lock->owner still matches owner. If that fails, * owner might point to freed memory. If it still matches, * the rcu_read_lock() ensures the memory stays valid. */ - barrier(); + rcu_read_lock(); + same_owner = __mutex_owner(lock) == owner; + if (same_owner) { + ret = owner->on_cpu; + if (ret) + cpu = task_cpu(owner); + } + rcu_read_unlock(); + + if (!ret || !same_owner) + break; /* * Use vcpu_is_preempted to detect lock holder preemption issue. */ - if (!owner->on_cpu || need_resched() || - vcpu_is_preempted(task_cpu(owner))) { + if (need_resched() || vcpu_is_preempted(cpu)) { ret = false; break; } @@ -562,7 +571,6 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, cpu_relax(); } - rcu_read_unlock(); return ret; } @@ -664,17 +672,6 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, * values at the cost of a few extra spins. */ cpu_relax(); - - /* - * On arm systems, we must slow down the waiter's repeated - * aquisition of spin_mlock and atomics on the lock count, or - * we risk starving out a thread attempting to release the - * mutex. The mutex slowpath release must take spin lock - * wait_lock. This spin lock can share a monitor with the - * other waiter atomics in the mutex data structure, so must - * take care to rate limit the waiters. - */ - udelay(1); } if (!waiter) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index ddc9cbd7700b..06c4627df5a7 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -402,31 +402,35 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner = READ_ONCE(sem->owner); - if (!is_rwsem_owner_spinnable(owner)) + if (!owner || !is_rwsem_owner_spinnable(owner)) return false; - rcu_read_lock(); - while (owner && (READ_ONCE(sem->owner) == owner)) { + while (true) { + bool on_cpu, same_owner; + /* - * Ensure we emit the owner->on_cpu, dereference _after_ - * checking sem->owner still matches owner, if that fails, + * Ensure sem->owner still matches owner. If that fails, * owner might point to free()d memory, if it still matches, * the rcu_read_lock() ensures the memory stays valid. */ - barrier(); + rcu_read_lock(); + same_owner = sem->owner == owner; + if (same_owner) + on_cpu = owner_on_cpu(owner); + rcu_read_unlock(); + + if (!same_owner) + break; /* * abort spinning when need_resched or owner is not running or * owner's cpu is preempted. */ - if (need_resched() || !owner_on_cpu(owner)) { - rcu_read_unlock(); + if (!on_cpu || need_resched()) return false; - } cpu_relax(); } - rcu_read_unlock(); /* * If there is a new owner or the owner is not set, we continue diff --git a/kernel/power/qos.c b/kernel/power/qos.c index b016a67b91d7..17a1a6b940ed 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -54,7 +54,7 @@ /* * locking rule: all changes to constraints or notifiers lists * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock - * held, taken with _irqsave. One lock to rule them all + * held. One lock to rule them all */ struct pm_qos_object { struct pm_qos_constraints *constraints; @@ -198,7 +198,6 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) struct pm_qos_constraints *c; struct pm_qos_request *req; char *type; - unsigned long flags; int tot_reqs = 0; int active_reqs = 0; @@ -213,7 +212,7 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) } /* Lock to ensure we have a snapshot */ - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); if (plist_head_empty(&c->list)) { seq_puts(s, "Empty!\n"); goto out; @@ -249,7 +248,7 @@ static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused) type, pm_qos_get_value(c), active_reqs, tot_reqs); out: - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); return 0; } @@ -334,12 +333,11 @@ static inline int pm_qos_set_value_for_cpus(struct pm_qos_constraints *c, int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, enum pm_qos_req_action action, int value, bool dev_req) { - unsigned long flags; int prev_value, curr_value, new_value; struct cpumask cpus; int ret; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); prev_value = pm_qos_get_value(c); if (value == PM_QOS_DEFAULT_VALUE) new_value = c->default_value; @@ -372,7 +370,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, pm_qos_set_value(c, curr_value); ret = pm_qos_set_value_for_cpus(c, dev_req, &cpus); - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); trace_pm_qos_update_target(action, prev_value, curr_value); @@ -425,10 +423,9 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val) { - unsigned long irqflags; s32 prev_value, curr_value; - spin_lock_irqsave(&pm_qos_lock, irqflags); + spin_lock(&pm_qos_lock); prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; @@ -452,7 +449,7 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf, curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; - spin_unlock_irqrestore(&pm_qos_lock, irqflags); + spin_unlock(&pm_qos_lock); trace_pm_qos_update_flags(action, prev_value, curr_value); return prev_value != curr_value; @@ -487,12 +484,11 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active); int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) { - unsigned long irqflags; int cpu; struct pm_qos_constraints *c = NULL; int val; - spin_lock_irqsave(&pm_qos_lock, irqflags); + spin_lock(&pm_qos_lock); c = pm_qos_array[pm_qos_class]->constraints; val = c->default_value; @@ -511,7 +507,7 @@ int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) break; } } - spin_unlock_irqrestore(&pm_qos_lock, irqflags); + spin_unlock(&pm_qos_lock); return val; } @@ -884,7 +880,6 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { s32 value; - unsigned long flags; struct pm_qos_request *req = filp->private_data; if (!req) @@ -892,9 +887,9 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf, if (!pm_qos_request_active(req)) return -EINVAL; - spin_lock_irqsave(&pm_qos_lock, flags); + spin_lock(&pm_qos_lock); value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints); - spin_unlock_irqrestore(&pm_qos_lock, flags); + spin_unlock(&pm_qos_lock); return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32)); } @@ -905,6 +900,9 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, s32 value; struct pm_qos_request *req; + /* Don't let userspace impose restrictions on CPU idle levels */ + return count; + if (count == sizeof(s32)) { if (copy_from_user(&value, buf, sizeof(s32))) return -EFAULT; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d355f782a8d9..d9eb33051fc0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4982,7 +4982,8 @@ static void __setscheduler_params(struct task_struct *p, if (policy == SETPARAM_POLICY) policy = p->policy; - p->policy = policy; + /* Replace SCHED_FIFO with SCHED_RR to reduce latency */ + p->policy = policy == SCHED_FIFO ? SCHED_RR : policy; if (dl_policy(policy)) __setparam_dl(p, attr); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2fc7d4f61bf6..8dfefc5dbe20 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -1091,7 +1091,7 @@ static void sugov_policy_free(struct sugov_policy *sg_policy) static int sugov_kthread_create(struct sugov_policy *sg_policy) { struct task_struct *thread; - struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 }; + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO - 1 }; struct cpufreq_policy *policy = sg_policy->policy; int ret; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b8d4a6968eec..14ef3d2f14c7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -595,8 +595,7 @@ config HAVE_DEBUG_KMEMLEAK config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && HAVE_DEBUG_KMEMLEAK - select DEBUG_FS + depends on HAVE_DEBUG_KMEMLEAK select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS select CRC32 diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 3b859201f84c..cabcf0848b30 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -150,31 +150,12 @@ EXPORT_SYMBOL(sg_init_one); */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { - if (nents == SG_MAX_SINGLE_ALLOC) { - /* - * Kmemleak doesn't track page allocations as they are not - * commonly used (in a raw form) for kernel data structures. - * As we chain together a list of pages and then a normal - * kmalloc (tracked by kmemleak), in order to for that last - * allocation not to become decoupled (and thus a - * false-positive) we need to inform kmemleak of all the - * intermediate allocations. - */ - void *ptr = (void *) __get_free_page(gfp_mask); - kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); - return ptr; - } else - return kmalloc_array(nents, sizeof(struct scatterlist), - gfp_mask); + return kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask); } static void sg_kfree(struct scatterlist *sg, unsigned int nents) { - if (nents == SG_MAX_SINGLE_ALLOC) { - kmemleak_free(sg); - free_page((unsigned long) sg); - } else - kfree(sg); + kfree(sg); } /** diff --git a/mm/Makefile b/mm/Makefile index 0b2820ecf4a5..35006a7f1543 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -22,11 +22,12 @@ KCOV_INSTRUMENT_mmzone.o := n KCOV_INSTRUMENT_vmstat.o := n mmu-y := nommu.o -mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ +mmu-$(CONFIG_MMU) := gup.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o \ page_vma_mapped.o pagewalk.o pgtable-generic.o \ rmap.o vmalloc.o +mmu-$(CONFIG_HIGHMEM) += highmem.o ifdef CONFIG_MM_EVENT_STAT mmu-$(CONFIG_MMU) += mm_event.o @@ -108,3 +109,5 @@ obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o obj-$(CONFIG_HMM) += hmm.o obj-$(CONFIG_MEMFD_CREATE) += memfd.o + +CFLAGS_kmemleak.o += -DCONFIG_DEBUG_FS diff --git a/mm/compaction.c b/mm/compaction.c index 012c38ceb0c0..ac56bdae390f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1218,7 +1218,7 @@ typedef enum { * Allow userspace to control policy on scanning the unevictable LRU for * compactable pages. */ -int sysctl_compact_unevictable_allowed __read_mostly = 1; +int sysctl_compact_unevictable_allowed __read_mostly = 0; /* * Isolate all pages that can be migrated from the first suitable block, diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 740cba23f4a3..73680678efde 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -563,11 +563,10 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; - object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); - if (!object) { - pr_warn("Cannot allocate a kmemleak_object structure\n"); - kmemleak_disable(); - return NULL; + while (1) { + object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); + if (object) + break; } INIT_LIST_HEAD(&object->object_list); diff --git a/mm/mmap.c b/mm/mmap.c index ec3874bbbe2c..483139c649f6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3666,11 +3666,6 @@ subsys_initcall(init_user_reserve); */ static int init_admin_reserve(void) { - unsigned long free_kbytes; - - free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); - - sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); diff --git a/mm/util.c b/mm/util.c index a47ac1ae7135..47741c67b2a8 100644 --- a/mm/util.c +++ b/mm/util.c @@ -615,7 +615,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ -unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ +unsigned long sysctl_admin_reserve_kbytes __read_mostly; /* 0MB */ int overcommit_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3a57f9227572..422a3b0f6067 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -332,14 +333,67 @@ static DEFINE_SPINLOCK(vmap_area_lock); LIST_HEAD(vmap_area_list); static LLIST_HEAD(vmap_purge_list); static struct rb_root vmap_area_root = RB_ROOT; +static bool vmap_initialized __read_mostly; -/* The vmap cache globals are protected by vmap_area_lock */ -static struct rb_node *free_vmap_cache; -static unsigned long cached_hole_size; -static unsigned long cached_vstart; -static unsigned long cached_align; +/* + * This kmem_cache is used for vmap_area objects. Instead of + * allocating from slab we reuse an object from this cache to + * make things faster. Especially in "no edge" splitting of + * free block. + */ +static struct kmem_cache *vmap_area_cachep; + +/* + * This linked list is used in pair with free_vmap_area_root. + * It gives O(1) access to prev/next to perform fast coalescing. + */ +static LIST_HEAD(free_vmap_area_list); + +/* + * This augment red-black tree represents the free vmap space. + * All vmap_area objects in this tree are sorted by va->va_start + * address. It is used for allocation and merging when a vmap + * object is released. + * + * Each vmap_area node contains a maximum available free block + * of its sub-tree, right or left. Therefore it is possible to + * find a lowest match of free area. + */ +static struct rb_root free_vmap_area_root = RB_ROOT; + +static __always_inline unsigned long +va_size(struct vmap_area *va) +{ + return (va->va_end - va->va_start); +} + +static __always_inline unsigned long +get_subtree_max_size(struct rb_node *node) +{ + struct vmap_area *va; + + va = rb_entry_safe(node, struct vmap_area, rb_node); + return va ? va->subtree_max_size : 0; +} + +/* + * Gets called when remove the node and rotate. + */ +static __always_inline unsigned long +compute_subtree_max_size(struct vmap_area *va) +{ + return max3(va_size(va), + get_subtree_max_size(va->rb_node.rb_left), + get_subtree_max_size(va->rb_node.rb_right)); +} -static unsigned long vmap_area_pcpu_hole; +RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, + struct vmap_area, rb_node, unsigned long, subtree_max_size, + compute_subtree_max_size) + +static void purge_vmap_area_lazy(void); +static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); +static unsigned long lazy_max_pages(void); static atomic_long_t nr_vmalloc_pages; @@ -367,41 +421,522 @@ static struct vmap_area *__find_vmap_area(unsigned long addr) return NULL; } -static void __insert_vmap_area(struct vmap_area *va) -{ - struct rb_node **p = &vmap_area_root.rb_node; - struct rb_node *parent = NULL; - struct rb_node *tmp; +/* + * This function returns back addresses of parent node + * and its left or right link for further processing. + */ +static __always_inline struct rb_node ** +find_va_links(struct vmap_area *va, + struct rb_root *root, struct rb_node *from, + struct rb_node **parent) +{ + struct vmap_area *tmp_va; + struct rb_node **link; + + if (root) { + link = &root->rb_node; + if (unlikely(!*link)) { + *parent = NULL; + return link; + } + } else { + link = &from; + } - while (*p) { - struct vmap_area *tmp_va; + /* + * Go to the bottom of the tree. When we hit the last point + * we end up with parent rb_node and correct direction, i name + * it link, where the new va->rb_node will be attached to. + */ + do { + tmp_va = rb_entry(*link, struct vmap_area, rb_node); - parent = *p; - tmp_va = rb_entry(parent, struct vmap_area, rb_node); - if (va->va_start < tmp_va->va_end) - p = &(*p)->rb_left; - else if (va->va_end > tmp_va->va_start) - p = &(*p)->rb_right; + /* + * During the traversal we also do some sanity check. + * Trigger the BUG() if there are sides(left/right) + * or full overlaps. + */ + if (va->va_start < tmp_va->va_end && + va->va_end <= tmp_va->va_start) + link = &(*link)->rb_left; + else if (va->va_end > tmp_va->va_start && + va->va_start >= tmp_va->va_end) + link = &(*link)->rb_right; else BUG(); + } while (*link); + + *parent = &tmp_va->rb_node; + return link; +} + +static __always_inline struct list_head * +get_va_next_sibling(struct rb_node *parent, struct rb_node **link) +{ + struct list_head *list; + + if (unlikely(!parent)) + /* + * The red-black tree where we try to find VA neighbors + * before merging or inserting is empty, i.e. it means + * there is no free vmap space. Normally it does not + * happen but we handle this case anyway. + */ + return NULL; + + list = &rb_entry(parent, struct vmap_area, rb_node)->list; + return (&parent->rb_right == link ? list->next : list); +} + +static __always_inline void +link_va(struct vmap_area *va, struct rb_root *root, + struct rb_node *parent, struct rb_node **link, struct list_head *head) +{ + /* + * VA is still not in the list, but we can + * identify its future previous list_head node. + */ + if (likely(parent)) { + head = &rb_entry(parent, struct vmap_area, rb_node)->list; + if (&parent->rb_right != link) + head = head->prev; } - rb_link_node(&va->rb_node, parent, p); - rb_insert_color(&va->rb_node, &vmap_area_root); + /* Insert to the rb-tree */ + rb_link_node(&va->rb_node, parent, link); + if (root == &free_vmap_area_root) { + /* + * Some explanation here. Just perform simple insertion + * to the tree. We do not set va->subtree_max_size to + * its current size before calling rb_insert_augmented(). + * It is because of we populate the tree from the bottom + * to parent levels when the node _is_ in the tree. + * + * Therefore we set subtree_max_size to zero after insertion, + * to let __augment_tree_propagate_from() puts everything to + * the correct order later on. + */ + rb_insert_augmented(&va->rb_node, + root, &free_vmap_area_rb_augment_cb); + va->subtree_max_size = 0; + } else { + rb_insert_color(&va->rb_node, root); + } - /* address-sort this list */ - tmp = rb_prev(&va->rb_node); - if (tmp) { - struct vmap_area *prev; - prev = rb_entry(tmp, struct vmap_area, rb_node); - list_add_rcu(&va->list, &prev->list); - } else - list_add_rcu(&va->list, &vmap_area_list); + /* Address-sort this list */ + list_add(&va->list, head); } -static void purge_vmap_area_lazy(void); +static __always_inline void +unlink_va(struct vmap_area *va, struct rb_root *root) +{ + /* + * During merging a VA node can be empty, therefore + * not linked with the tree nor list. Just check it. + */ + if (!RB_EMPTY_NODE(&va->rb_node)) { + if (root == &free_vmap_area_root) + rb_erase_augmented(&va->rb_node, + root, &free_vmap_area_rb_augment_cb); + else + rb_erase(&va->rb_node, root); -static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); + list_del(&va->list); + RB_CLEAR_NODE(&va->rb_node); + } +} + +/* + * This function populates subtree_max_size from bottom to upper + * levels starting from VA point. The propagation must be done + * when VA size is modified by changing its va_start/va_end. Or + * in case of newly inserting of VA to the tree. + * + * It means that __augment_tree_propagate_from() must be called: + * - After VA has been inserted to the tree(free path); + * - After VA has been shrunk(allocation path); + * - After VA has been increased(merging path). + * + * Please note that, it does not mean that upper parent nodes + * and their subtree_max_size are recalculated all the time up + * to the root node. + * + * 4--8 + * /\ + * / \ + * / \ + * 2--2 8--8 + * + * For example if we modify the node 4, shrinking it to 2, then + * no any modification is required. If we shrink the node 2 to 1 + * its subtree_max_size is updated only, and set to 1. If we shrink + * the node 8 to 6, then its subtree_max_size is set to 6 and parent + * node becomes 4--6. + */ +static __always_inline void +augment_tree_propagate_from(struct vmap_area *va) +{ + struct rb_node *node = &va->rb_node; + unsigned long new_va_sub_max_size; + + while (node) { + va = rb_entry(node, struct vmap_area, rb_node); + new_va_sub_max_size = compute_subtree_max_size(va); + + /* + * If the newly calculated maximum available size of the + * subtree is equal to the current one, then it means that + * the tree is propagated correctly. So we have to stop at + * this point to save cycles. + */ + if (va->subtree_max_size == new_va_sub_max_size) + break; + + va->subtree_max_size = new_va_sub_max_size; + node = rb_parent(&va->rb_node); + } +} + +static void +insert_vmap_area(struct vmap_area *va, + struct rb_root *root, struct list_head *head) +{ + struct rb_node **link; + struct rb_node *parent; + + link = find_va_links(va, root, NULL, &parent); + link_va(va, root, parent, link, head); +} + +static void +insert_vmap_area_augment(struct vmap_area *va, + struct rb_node *from, struct rb_root *root, + struct list_head *head) +{ + struct rb_node **link; + struct rb_node *parent; + + if (from) + link = find_va_links(va, NULL, from, &parent); + else + link = find_va_links(va, root, NULL, &parent); + + link_va(va, root, parent, link, head); + augment_tree_propagate_from(va); +} + +/* + * Merge de-allocated chunk of VA memory with previous + * and next free blocks. If coalesce is not done a new + * free area is inserted. If VA has been merged, it is + * freed. + */ +static __always_inline void +merge_or_add_vmap_area(struct vmap_area *va, + struct rb_root *root, struct list_head *head) +{ + struct vmap_area *sibling; + struct list_head *next; + struct rb_node **link; + struct rb_node *parent; + bool merged = false; + + /* + * Find a place in the tree where VA potentially will be + * inserted, unless it is merged with its sibling/siblings. + */ + link = find_va_links(va, root, NULL, &parent); + + /* + * Get next node of VA to check if merging can be done. + */ + next = get_va_next_sibling(parent, link); + if (unlikely(next == NULL)) + goto insert; + + /* + * start end + * | | + * |<------VA------>|<-----Next----->| + * | | + * start end + */ + if (next != head) { + sibling = list_entry(next, struct vmap_area, list); + if (sibling->va_start == va->va_end) { + sibling->va_start = va->va_start; + + /* Check and update the tree if needed. */ + augment_tree_propagate_from(sibling); + + /* Remove this VA, it has been merged. */ + unlink_va(va, root); + + /* Free vmap_area object. */ + kmem_cache_free(vmap_area_cachep, va); + + /* Point to the new merged area. */ + va = sibling; + merged = true; + } + } + + /* + * start end + * | | + * |<-----Prev----->|<------VA------>| + * | | + * start end + */ + if (next->prev != head) { + sibling = list_entry(next->prev, struct vmap_area, list); + if (sibling->va_end == va->va_start) { + sibling->va_end = va->va_end; + + /* Check and update the tree if needed. */ + augment_tree_propagate_from(sibling); + + /* Remove this VA, it has been merged. */ + unlink_va(va, root); + + /* Free vmap_area object. */ + kmem_cache_free(vmap_area_cachep, va); + + return; + } + } + +insert: + if (!merged) { + link_va(va, root, parent, link, head); + augment_tree_propagate_from(va); + } +} + +static __always_inline bool +is_within_this_va(struct vmap_area *va, unsigned long size, + unsigned long align, unsigned long vstart) +{ + unsigned long nva_start_addr; + + if (va->va_start > vstart) + nva_start_addr = ALIGN(va->va_start, align); + else + nva_start_addr = ALIGN(vstart, align); + + /* Can be overflowed due to big size or alignment. */ + if (nva_start_addr + size < nva_start_addr || + nva_start_addr < vstart) + return false; + + return (nva_start_addr + size <= va->va_end); +} + +/* + * Find the first free block(lowest start address) in the tree, + * that will accomplish the request corresponding to passing + * parameters. + */ +static __always_inline struct vmap_area * +find_vmap_lowest_match(unsigned long size, + unsigned long align, unsigned long vstart) +{ + struct vmap_area *va; + struct rb_node *node; + unsigned long length; + + /* Start from the root. */ + node = free_vmap_area_root.rb_node; + + /* Adjust the search size for alignment overhead. */ + length = size + align - 1; + + while (node) { + va = rb_entry(node, struct vmap_area, rb_node); + + if (get_subtree_max_size(node->rb_left) >= length && + vstart < va->va_start) { + node = node->rb_left; + } else { + if (is_within_this_va(va, size, align, vstart)) + return va; + + /* + * Does not make sense to go deeper towards the right + * sub-tree if it does not have a free block that is + * equal or bigger to the requested search length. + */ + if (get_subtree_max_size(node->rb_right) >= length) { + node = node->rb_right; + continue; + } + + /* + * OK. We roll back and find the fist right sub-tree, + * that will satisfy the search criteria. It can happen + * only once due to "vstart" restriction. + */ + while ((node = rb_parent(node))) { + va = rb_entry(node, struct vmap_area, rb_node); + if (is_within_this_va(va, size, align, vstart)) + return va; + + if (get_subtree_max_size(node->rb_right) >= length && + vstart <= va->va_start) { + node = node->rb_right; + break; + } + } + } + } + + return NULL; +} + +enum fit_type { + NOTHING_FIT = 0, + FL_FIT_TYPE = 1, /* full fit */ + LE_FIT_TYPE = 2, /* left edge fit */ + RE_FIT_TYPE = 3, /* right edge fit */ + NE_FIT_TYPE = 4 /* no edge fit */ +}; + +static __always_inline enum fit_type +classify_va_fit_type(struct vmap_area *va, + unsigned long nva_start_addr, unsigned long size) +{ + enum fit_type type; + + /* Check if it is within VA. */ + if (nva_start_addr < va->va_start || + nva_start_addr + size > va->va_end) + return NOTHING_FIT; + + /* Now classify. */ + if (va->va_start == nva_start_addr) { + if (va->va_end == nva_start_addr + size) + type = FL_FIT_TYPE; + else + type = LE_FIT_TYPE; + } else if (va->va_end == nva_start_addr + size) { + type = RE_FIT_TYPE; + } else { + type = NE_FIT_TYPE; + } + + return type; +} + +static __always_inline int +adjust_va_to_fit_type(struct vmap_area *va, + unsigned long nva_start_addr, unsigned long size, + enum fit_type type) +{ + struct vmap_area *lva; + + if (type == FL_FIT_TYPE) { + /* + * No need to split VA, it fully fits. + * + * | | + * V NVA V + * |---------------| + */ + unlink_va(va, &free_vmap_area_root); + kmem_cache_free(vmap_area_cachep, va); + } else if (type == LE_FIT_TYPE) { + /* + * Split left edge of fit VA. + * + * | | + * V NVA V R + * |-------|-------| + */ + va->va_start += size; + } else if (type == RE_FIT_TYPE) { + /* + * Split right edge of fit VA. + * + * | | + * L V NVA V + * |-------|-------| + */ + va->va_end = nva_start_addr; + } else if (type == NE_FIT_TYPE) { + /* + * Split no edge of fit VA. + * + * | | + * L V NVA V R + * |---|-------|---| + */ + lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); + if (unlikely(!lva)) + return -1; + + /* + * Build the remainder. + */ + lva->va_start = va->va_start; + lva->va_end = nva_start_addr; + + /* + * Shrink this VA to remaining size. + */ + va->va_start = nva_start_addr + size; + } else { + return -1; + } + + if (type != FL_FIT_TYPE) { + augment_tree_propagate_from(va); + + if (type == NE_FIT_TYPE) + insert_vmap_area_augment(lva, &va->rb_node, + &free_vmap_area_root, &free_vmap_area_list); + } + + return 0; +} + +/* + * Returns a start address of the newly allocated area, if success. + * Otherwise a vend is returned that indicates failure. + */ +static __always_inline unsigned long +__alloc_vmap_area(unsigned long size, unsigned long align, + unsigned long vstart, unsigned long vend, int node) +{ + unsigned long nva_start_addr; + struct vmap_area *va; + enum fit_type type; + int ret; + + va = find_vmap_lowest_match(size, align, vstart); + if (unlikely(!va)) + return vend; + + if (va->va_start > vstart) + nva_start_addr = ALIGN(va->va_start, align); + else + nva_start_addr = ALIGN(vstart, align); + + /* Check the "vend" restriction. */ + if (nva_start_addr + size > vend) + return vend; + + /* Classify what we have found. */ + type = classify_va_fit_type(va, nva_start_addr, size); + if (WARN_ON_ONCE(type == NOTHING_FIT)) + return vend; + + /* Update the free vmap_area. */ + ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); + if (ret) + return vend; + + return nva_start_addr; +} /* * Allocate a region of KVA of the specified size and alignment, within the @@ -413,18 +948,19 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, int node, gfp_t gfp_mask) { struct vmap_area *va; - struct rb_node *n; unsigned long addr; int purged = 0; - struct vmap_area *first; BUG_ON(!size); BUG_ON(offset_in_page(size)); BUG_ON(!is_power_of_2(align)); + if (unlikely(!vmap_initialized)) + return ERR_PTR(-EBUSY); + might_sleep(); - va = kmalloc_node(sizeof(struct vmap_area), + va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); @@ -437,87 +973,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, retry: spin_lock(&vmap_area_lock); - /* - * Invalidate cache if we have more permissive parameters. - * cached_hole_size notes the largest hole noticed _below_ - * the vmap_area cached in free_vmap_cache: if size fits - * into that hole, we want to scan from vstart to reuse - * the hole instead of allocating above free_vmap_cache. - * Note that __free_vmap_area may update free_vmap_cache - * without updating cached_hole_size or cached_align. - */ - if (!free_vmap_cache || - size < cached_hole_size || - vstart < cached_vstart || - align < cached_align) { -nocache: - cached_hole_size = 0; - free_vmap_cache = NULL; - } - /* record if we encounter less permissive parameters */ - cached_vstart = vstart; - cached_align = align; - - /* find starting point for our search */ - if (free_vmap_cache) { - first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); - addr = ALIGN(first->va_end, align); - if (addr < vstart) - goto nocache; - if (addr + size < addr) - goto overflow; - - } else { - addr = ALIGN(vstart, align); - if (addr + size < addr) - goto overflow; - - n = vmap_area_root.rb_node; - first = NULL; - - while (n) { - struct vmap_area *tmp; - tmp = rb_entry(n, struct vmap_area, rb_node); - if (tmp->va_end >= addr) { - first = tmp; - if (tmp->va_start <= addr) - break; - n = n->rb_left; - } else - n = n->rb_right; - } - - if (!first) - goto found; - } - - /* from the starting point, walk areas until a suitable hole is found */ - while (addr + size > first->va_start && addr + size <= vend) { - if (addr + cached_hole_size < first->va_start) - cached_hole_size = first->va_start - addr; - addr = ALIGN(first->va_end, align); - if (addr + size < addr) - goto overflow; - - if (list_is_last(&first->list, &vmap_area_list)) - goto found; - - first = list_next_entry(first, list); - } -found: /* - * Check also calculated address against the vstart, - * because it can be 0 because of big align request. + * If an allocation fails, the "vend" address is + * returned. Therefore trigger the overflow path. */ - if (addr + size > vend || addr < vstart) + addr = __alloc_vmap_area(size, align, vstart, vend, node); + if (unlikely(addr == vend)) goto overflow; va->va_start = addr; va->va_end = addr + size; va->flags = 0; - __insert_vmap_area(va); - free_vmap_cache = &va->rb_node; + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); + spin_unlock(&vmap_area_lock); BUG_ON(!IS_ALIGNED(va->va_start, align)); @@ -546,7 +1015,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) pr_warn("vmap allocation for size %lu failed: use vmalloc= to increase size\n", size); - kfree(va); + + kmem_cache_free(vmap_area_cachep, va); return ERR_PTR(-EBUSY); } @@ -566,35 +1036,16 @@ static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); - if (free_vmap_cache) { - if (va->va_end < cached_vstart) { - free_vmap_cache = NULL; - } else { - struct vmap_area *cache; - cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); - if (va->va_start <= cache->va_start) { - free_vmap_cache = rb_prev(&va->rb_node); - /* - * We don't try to update cached_hole_size or - * cached_align, but it won't go very wrong. - */ - } - } - } - rb_erase(&va->rb_node, &vmap_area_root); - RB_CLEAR_NODE(&va->rb_node); - list_del_rcu(&va->list); - /* - * Track the highest possible candidate for pcpu area - * allocation. Areas outside of vmalloc area can be returned - * here too, consider only end addresses which fall inside - * vmalloc area proper. + * Remove from the busy tree/list. */ - if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) - vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); + unlink_va(va, &vmap_area_root); - kfree_rcu(va, rcu_head); + /* + * Merge VA with its neighbors, otherwise just add it. + */ + merge_or_add_vmap_area(va, + &free_vmap_area_root, &free_vmap_area_list); } /* @@ -640,7 +1091,7 @@ static unsigned long lazy_max_pages(void) return log * (32UL * 1024 * 1024 / PAGE_SIZE); } -static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); +static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* * Serialize vmap purging. There is no actual criticial section protected @@ -658,7 +1109,7 @@ static void purge_fragmented_blocks_allcpus(void); */ void set_iounmap_nonlazy(void) { - atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); + atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); } /* @@ -666,34 +1117,40 @@ void set_iounmap_nonlazy(void) */ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) { + unsigned long resched_threshold; struct llist_node *valist; struct vmap_area *va; struct vmap_area *n_va; - bool do_free = false; lockdep_assert_held(&vmap_purge_lock); valist = llist_del_all(&vmap_purge_list); + if (unlikely(valist == NULL)) + return false; + + /* + * TODO: to calculate a flush range without looping. + * The list can be up to lazy_max_pages() elements. + */ llist_for_each_entry(va, valist, purge_list) { if (va->va_start < start) start = va->va_start; if (va->va_end > end) end = va->va_end; - do_free = true; } - if (!do_free) - return false; - flush_tlb_kernel_range(start, end); + resched_threshold = lazy_max_pages() << 1; spin_lock(&vmap_area_lock); llist_for_each_entry_safe(va, n_va, valist, purge_list) { - int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; + unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; __free_vmap_area(va); - atomic_sub(nr, &vmap_lazy_nr); - cond_resched_lock(&vmap_area_lock); + atomic_long_sub(nr, &vmap_lazy_nr); + + if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) + cond_resched_lock(&vmap_area_lock); } spin_unlock(&vmap_area_lock); return true; @@ -729,10 +1186,10 @@ static void purge_vmap_area_lazy(void) */ static void free_vmap_area_noflush(struct vmap_area *va) { - int nr_lazy; + unsigned long nr_lazy; - nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, - &vmap_lazy_nr); + nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> + PAGE_SHIFT, &vmap_lazy_nr); /* After this point, we may free va at any time */ llist_add(&va->purge_list, &vmap_purge_list); @@ -795,8 +1252,6 @@ static struct vmap_area *find_vmap_area(unsigned long addr) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) -static bool vmap_initialized __read_mostly = false; - struct vmap_block_queue { spinlock_t lock; struct list_head free; @@ -1250,12 +1705,58 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) vm_area_add_early(vm); } +static void vmap_init_free_space(void) +{ + unsigned long vmap_start = 1; + const unsigned long vmap_end = ULONG_MAX; + struct vmap_area *busy, *free; + + /* + * B F B B B F + * -|-----|.....|-----|-----|-----|.....|- + * | The KVA space | + * |<--------------------------------->| + */ + list_for_each_entry(busy, &vmap_area_list, list) { + if (busy->va_start - vmap_start > 0) { + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = busy->va_start; + + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } + } + + vmap_start = busy->va_end; + } + + if (vmap_end - vmap_start > 0) { + free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = vmap_end; + + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } + } +} + void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; + /* + * Create the cache for vmap_area objects. + */ + vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); + for_each_possible_cpu(i) { struct vmap_block_queue *vbq; struct vfree_deferred *p; @@ -1270,16 +1771,21 @@ void __init vmalloc_init(void) /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { - va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); + va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (WARN_ON_ONCE(!va)) + continue; + va->flags = VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; - __insert_vmap_area(va); + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); } - vmap_area_pcpu_hole = VMALLOC_END; - + /* + * Now we can initialize a free vmap space. + */ + vmap_init_free_space(); vmap_initialized = true; } @@ -2401,81 +2907,64 @@ static struct vmap_area *node_to_va(struct rb_node *n) } /** - * pvm_find_next_prev - find the next and prev vmap_area surrounding @end - * @end: target address - * @pnext: out arg for the next vmap_area - * @pprev: out arg for the previous vmap_area + * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to + * @addr: target address * - * Returns: %true if either or both of next and prev are found, - * %false if no vmap_area exists - * - * Find vmap_areas end addresses of which enclose @end. ie. if not - * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. + * Returns: vmap_area if it is found. If there is no such area + * the first highest(reverse order) vmap_area is returned + * i.e. va->va_start < addr && va->va_end < addr or NULL + * if there are no any areas before @addr. */ -static bool pvm_find_next_prev(unsigned long end, - struct vmap_area **pnext, - struct vmap_area **pprev) +static struct vmap_area * +pvm_find_va_enclose_addr(unsigned long addr) { - struct rb_node *n = vmap_area_root.rb_node; - struct vmap_area *va = NULL; + struct vmap_area *va, *tmp; + struct rb_node *n; + + n = free_vmap_area_root.rb_node; + va = NULL; while (n) { - va = rb_entry(n, struct vmap_area, rb_node); - if (end < va->va_end) - n = n->rb_left; - else if (end > va->va_end) + tmp = rb_entry(n, struct vmap_area, rb_node); + if (tmp->va_start <= addr) { + va = tmp; + if (tmp->va_end >= addr) + break; + n = n->rb_right; - else - break; + } else { + n = n->rb_left; + } } - if (!va) - return false; - - if (va->va_end > end) { - *pnext = va; - *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); - } else { - *pprev = va; - *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); - } - return true; + return va; } /** - * pvm_determine_end - find the highest aligned address between two vmap_areas - * @pnext: in/out arg for the next vmap_area - * @pprev: in/out arg for the previous vmap_area - * @align: alignment - * - * Returns: determined end address - * - * Find the highest aligned address between *@pnext and *@pprev below - * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned - * down address is between the end addresses of the two vmap_areas. + * pvm_determine_end_from_reverse - find the highest aligned address + * of free block below VMALLOC_END + * @va: + * in - the VA we start the search(reverse order); + * out - the VA with the highest aligned end address. * - * Please note that the address returned by this function may fall - * inside *@pnext vmap_area. The caller is responsible for checking - * that. + * Returns: determined end address within vmap_area */ -static unsigned long pvm_determine_end(struct vmap_area **pnext, - struct vmap_area **pprev, - unsigned long align) +static unsigned long +pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) { - const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); + unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; - if (*pnext) - addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); - else - addr = vmalloc_end; - - while (*pprev && (*pprev)->va_end > addr) { - *pnext = *pprev; - *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); + if (likely(*va)) { + list_for_each_entry_from_reverse((*va), + &free_vmap_area_list, list) { + addr = min((*va)->va_end & ~(align - 1), vmalloc_end); + if ((*va)->va_start < addr) + return addr; + } } - return addr; + return 0; } /** @@ -2495,12 +2984,12 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * - * Despite its complicated look, this allocator is rather simple. It - * does everything top-down and scans areas from the end looking for - * matching slot. While scanning, if any of the areas overlaps with - * existing vmap_area, the base address is pulled down to fit the - * area. Scanning is repeated till all the areas fit and then all - * necessary data structures are inserted and the result is returned. + * Despite its complicated look, this allocator is rather simple. It + * does everything top-down and scans free blocks from the end looking + * for matching base. While scanning, if any of the areas do not fit the + * base address is pulled down to fit the area. Scanning is repeated till + * all the areas fit and then all necessary data structures are inserted + * and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, @@ -2508,11 +2997,12 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); - struct vmap_area **vas, *prev, *next; + struct vmap_area **vas, *va; struct vm_struct **vms; int area, area2, last_area, term_area; - unsigned long base, start, end, last_end; + unsigned long base, start, size, end, last_end; bool purged = false; + enum fit_type type; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); @@ -2548,7 +3038,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, goto err_free2; for (area = 0; area < nr_vms; area++) { - vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); + vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; @@ -2561,49 +3051,29 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, start = offsets[area]; end = start + sizes[area]; - if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { - base = vmalloc_end - last_end; - goto found; - } - base = pvm_determine_end(&next, &prev, align) - end; + va = pvm_find_va_enclose_addr(vmalloc_end); + base = pvm_determine_end_from_reverse(&va, align) - end; while (true) { - BUG_ON(next && next->va_end <= base + end); - BUG_ON(prev && prev->va_end > base + end); - /* * base might have underflowed, add last_end before * comparing. */ - if (base + last_end < vmalloc_start + last_end) { - spin_unlock(&vmap_area_lock); - if (!purged) { - purge_vmap_area_lazy(); - purged = true; - goto retry; - } - goto err_free; - } + if (base + last_end < vmalloc_start + last_end) + goto overflow; /* - * If next overlaps, move base downwards so that it's - * right below next and then recheck. + * Fitting base has not been found. */ - if (next && next->va_start < base + end) { - base = pvm_determine_end(&next, &prev, align) - end; - term_area = area; - continue; - } + if (va == NULL) + goto overflow; /* - * If prev overlaps, shift down next and prev and move - * base so that it's right below new next and then - * recheck. + * If this VA does not fit, move base downwards and recheck. */ - if (prev && prev->va_end > base + start) { - next = prev; - prev = node_to_va(rb_prev(&next->rb_node)); - base = pvm_determine_end(&next, &prev, align) - end; + if (base + start < va->va_start || base + end > va->va_end) { + va = node_to_va(rb_prev(&va->rb_node)); + base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } @@ -2615,21 +3085,40 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; + start = offsets[area]; end = start + sizes[area]; - pvm_find_next_prev(base + end, &next, &prev); + va = pvm_find_va_enclose_addr(base + end); } -found: + /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { - struct vmap_area *va = vas[area]; + int ret; - va->va_start = base + offsets[area]; - va->va_end = va->va_start + sizes[area]; - __insert_vmap_area(va); - } + start = base + offsets[area]; + size = sizes[area]; - vmap_area_pcpu_hole = base + offsets[last_area]; + va = pvm_find_va_enclose_addr(start); + if (WARN_ON_ONCE(va == NULL)) + /* It is a BUG(), but trigger recovery instead. */ + goto recovery; + + type = classify_va_fit_type(va, start, size); + if (WARN_ON_ONCE(type == NOTHING_FIT)) + /* It is a BUG(), but trigger recovery instead. */ + goto recovery; + + ret = adjust_va_to_fit_type(va, start, size, type); + if (unlikely(ret)) + goto recovery; + + /* Allocated area. */ + va = vas[area]; + va->va_start = start; + va->va_end = start + size; + + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); + } spin_unlock(&vmap_area_lock); @@ -2641,9 +3130,38 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, kfree(vas); return vms; +recovery: + /* Remove previously inserted areas. */ + while (area--) { + __free_vmap_area(vas[area]); + vas[area] = NULL; + } + +overflow: + spin_unlock(&vmap_area_lock); + if (!purged) { + purge_vmap_area_lazy(); + purged = true; + + /* Before "retry", check if we recover. */ + for (area = 0; area < nr_vms; area++) { + if (vas[area]) + continue; + + vas[area] = kmem_cache_zalloc( + vmap_area_cachep, GFP_KERNEL); + if (!vas[area]) + goto err_free; + } + + goto retry; + } + err_free: for (area = 0; area < nr_vms; area++) { - kfree(vas[area]); + if (vas[area]) + kmem_cache_free(vmap_area_cachep, vas[area]); + kfree(vms[area]); } err_free2: diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 57e2316529d0..583ae6d1e79a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -140,7 +140,8 @@ static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ipv4_default_advmss(const struct dst_entry *dst); static unsigned int ipv4_mtu(const struct dst_entry *dst); -static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); +static void ipv4_negative_advice(struct sock *sk, + struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, @@ -848,22 +849,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf __ip_do_redirect(rt, skb, &fl4, true); } -static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) +static void ipv4_negative_advice(struct sock *sk, + struct dst_entry *dst) { struct rtable *rt = (struct rtable *)dst; - struct dst_entry *ret = dst; - if (rt) { - if (dst->obsolete > 0) { - ip_rt_put(rt); - ret = NULL; - } else if ((rt->rt_flags & RTCF_REDIRECTED) || - rt->dst.expires) { - ip_rt_put(rt); - ret = NULL; - } - } - return ret; + if ((dst->obsolete > 0) || + (rt->rt_flags & RTCF_REDIRECTED) || + rt->dst.expires) + sk_dst_reset(sk); } /* diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ee5a598a6301..cd16084d8ec6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2558,7 +2558,7 @@ static int __net_init tcp_sk_init(struct net *net) *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } - net->ipv4.sysctl_tcp_ecn = 2; + net->ipv4.sysctl_tcp_ecn = 1; net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b5b3fa3e33da..3b0aaa90e074 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -88,7 +88,8 @@ enum rt6_nud_state { static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ip6_default_advmss(const struct dst_entry *dst); static unsigned int ip6_mtu(const struct dst_entry *dst); -static struct dst_entry *ip6_negative_advice(struct dst_entry *); +static void ip6_negative_advice(struct sock *sk, + struct dst_entry *dst); static void ip6_dst_destroy(struct dst_entry *); static void ip6_dst_ifdown(struct dst_entry *, struct net_device *dev, int how); @@ -2281,24 +2282,24 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) return dst_ret; } -static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) +static void ip6_negative_advice(struct sock *sk, + struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; - if (rt) { - if (rt->rt6i_flags & RTF_CACHE) { - rcu_read_lock(); - if (rt6_check_expired(rt)) { - rt6_remove_exception_rt(rt); - dst = NULL; - } - rcu_read_unlock(); - } else { - dst_release(dst); - dst = NULL; + if (rt->rt6i_flags & RTF_CACHE) { + rcu_read_lock(); + if (rt6_check_expired(rt)) { + /* counteract the dst_release() in sk_dst_reset() */ + dst_hold(dst); + sk_dst_reset(sk); + + rt6_remove_exception_rt(rt); } + rcu_read_unlock(); + return; } - return dst; + sk_dst_reset(sk); } static void ip6_link_failure(struct sk_buff *skb) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6fe578773a51..c3fbd839384e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2558,15 +2558,10 @@ static void xfrm_link_failure(struct sk_buff *skb) /* Impossible. Such dst must be popped before reaches point of failure. */ } -static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) +static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst) { - if (dst) { - if (dst->obsolete) { - dst_release(dst); - dst = NULL; - } - } - return dst; + if (dst->obsolete) + sk_dst_reset(sk); } static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr) diff --git a/scripts/setlocalversion b/scripts/setlocalversion index 5ca29cbf2fa1..6c67617f4381 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -81,48 +81,12 @@ scm_version() echo "+" return fi - # If we are past a tagged commit (like - # "v2.6.30-rc5-302-g72357d5"), we pretty print it. - # - # Ensure the abbreviated sha1 has exactly 12 - # hex characters, to make the output - # independent of git version, local - # core.abbrev settings and/or total number of - # objects in the current repository - passing - # --abbrev=12 ensures a minimum of 12, and the - # awk substr() then picks the 'g' and first 12 - # hex chars. - if atag="$(git describe --abbrev=12 2>/dev/null)"; then - echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),substr($(NF),0,13))}' - - # If we don't have a tag at all we print -g{commitish}, - # again using exactly 12 hex chars. - else - head="$(echo $head | cut -c1-12)" - printf '%s%s' -g $head - fi fi # Is this git on svn? if git config --get svn-remote.svn.url >/dev/null; then printf -- '-svn%s' "`git svn find-rev $head`" fi - - # Check for uncommitted changes. - # First, with git-status, but --no-optional-locks is only - # supported in git >= 2.14, so fall back to git-diff-index if - # it fails. Note that git-diff-index does not refresh the - # index, so it may give misleading results. See - # git-update-index(1), git-diff-index(1), and git-status(1). - if { - git --no-optional-locks status -uno --porcelain 2>/dev/null || - git diff-index --name-only HEAD - } | grep -qvE '^(.. )?scripts/package'; then - printf '%s' -dirty - fi - - # All done with git - return fi # Check for mercurial and a mercurial repo. diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 12dec5f0ad55..5b4fdf8e9a5f 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1167,6 +1167,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; + case USB_ID(0x05ac, 0x110a): /* Apple USB C to 3.5mm adapter */ + if(!strcmp(kctl->id.name, "PCM Playback Volume")){ + /* Set PCM Playback Volume for channel 1 and 2 to maximum supported volume */ + snd_usb_set_cur_mix_value(cval, 1, 0, cval->max); + snd_usb_set_cur_mix_value(cval, 2, 1, cval->max); + } + break; + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): diff --git a/techpack/audio/asoc/Kbuild b/techpack/audio/asoc/Kbuild index a0d8de1f66e7..08c40b0598ed 100644 --- a/techpack/audio/asoc/Kbuild +++ b/techpack/audio/asoc/Kbuild @@ -286,6 +286,3 @@ machine_dlkm-y := $(MACHINE_OBJS) obj-$(CONFIG_SND_SOC_CPE) += cpe_lsm_dlkm.o cpe_lsm_dlkm-y := $(CPE_LSM_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/Kbuild b/techpack/audio/asoc/codecs/Kbuild index 91149d22b73e..f0c0df0cb9b8 100644 --- a/techpack/audio/asoc/codecs/Kbuild +++ b/techpack/audio/asoc/codecs/Kbuild @@ -267,6 +267,3 @@ mbhc_dlkm-y := $(MBHC_OBJS) obj-$(CONFIG_SND_SOC_MSM_HDMI_CODEC_RX) += hdmi_dlkm.o hdmi_dlkm-y := $(HDMICODEC_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/aqt1000/Kbuild b/techpack/audio/asoc/codecs/aqt1000/Kbuild index d83955f0ad37..c7f8b895c6c6 100644 --- a/techpack/audio/asoc/codecs/aqt1000/Kbuild +++ b/techpack/audio/asoc/codecs/aqt1000/Kbuild @@ -115,6 +115,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_SND_SOC_AQT1000) += aqt1000_cdc_dlkm.o aqt1000_cdc_dlkm-y := $(AQT1000_CDC_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/bolero/Kbuild b/techpack/audio/asoc/codecs/bolero/Kbuild index 15e5dc576da3..9042ebfa8e1e 100644 --- a/techpack/audio/asoc/codecs/bolero/Kbuild +++ b/techpack/audio/asoc/codecs/bolero/Kbuild @@ -154,6 +154,3 @@ tx_macro_dlkm-y := $(TX_OBJS) obj-$(CONFIG_RX_MACRO) += rx_macro_dlkm.o rx_macro_dlkm-y := $(RX_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/csra66x0/Kbuild b/techpack/audio/asoc/codecs/csra66x0/Kbuild index ef599622dd25..fe77d62718f8 100644 --- a/techpack/audio/asoc/codecs/csra66x0/Kbuild +++ b/techpack/audio/asoc/codecs/csra66x0/Kbuild @@ -100,6 +100,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_SND_SOC_CSRA66X0) += csra66x0_dlkm.o csra66x0_dlkm-y := $(CSRA66X0_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/ep92/Kbuild b/techpack/audio/asoc/codecs/ep92/Kbuild index fc46a72f775d..ee4db7f39caf 100644 --- a/techpack/audio/asoc/codecs/ep92/Kbuild +++ b/techpack/audio/asoc/codecs/ep92/Kbuild @@ -101,6 +101,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_SND_SOC_EP92) += ep92_dlkm.o ep92_dlkm-y := $(EP92_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/rouleur/Kbuild b/techpack/audio/asoc/codecs/rouleur/Kbuild index b59bcb1194ef..97965788b5e6 100644 --- a/techpack/audio/asoc/codecs/rouleur/Kbuild +++ b/techpack/audio/asoc/codecs/rouleur/Kbuild @@ -115,6 +115,3 @@ rouleur_slave_dlkm-y := $(ROULEUR_SLAVE_OBJS) obj-$(CONFIG_PM2250_SPMI) += pm2250_spmi_dlkm.o pm2250_spmi_dlkm-y := $(PM2250_SPMI_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/wcd934x/Kbuild b/techpack/audio/asoc/codecs/wcd934x/Kbuild index d372ed762cef..404cc8dca141 100644 --- a/techpack/audio/asoc/codecs/wcd934x/Kbuild +++ b/techpack/audio/asoc/codecs/wcd934x/Kbuild @@ -121,6 +121,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_SND_SOC_WCD934X) += wcd934x_dlkm.o wcd934x_dlkm-y := $(WCD934X_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/wcd937x/Kbuild b/techpack/audio/asoc/codecs/wcd937x/Kbuild index 5bbbad7cbdf5..b81fed3efd34 100644 --- a/techpack/audio/asoc/codecs/wcd937x/Kbuild +++ b/techpack/audio/asoc/codecs/wcd937x/Kbuild @@ -123,6 +123,3 @@ wcd937x_dlkm-y := $(WCD937X_OBJS) obj-$(CONFIG_SND_SOC_WCD937X_SLAVE) += wcd937x_slave_dlkm.o wcd937x_slave_dlkm-y := $(WCD937X_SLAVE_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/wcd938x/Kbuild b/techpack/audio/asoc/codecs/wcd938x/Kbuild index 3dfe142bcfc2..bc5284355db2 100644 --- a/techpack/audio/asoc/codecs/wcd938x/Kbuild +++ b/techpack/audio/asoc/codecs/wcd938x/Kbuild @@ -111,6 +111,3 @@ wcd938x_dlkm-y := $(WCD938X_OBJS) obj-$(CONFIG_SND_SOC_WCD938X_SLAVE) += wcd938x_slave_dlkm.o wcd938x_slave_dlkm-y := $(WCD938X_SLAVE_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/asoc/codecs/wsa883x/Kbuild b/techpack/audio/asoc/codecs/wsa883x/Kbuild index ac608b46c6a3..a7013e40ea4a 100644 --- a/techpack/audio/asoc/codecs/wsa883x/Kbuild +++ b/techpack/audio/asoc/codecs/wsa883x/Kbuild @@ -102,6 +102,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_SND_SOC_WSA883X) += wsa883x_dlkm.o wsa883x_dlkm-y := $(WSA883X_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/dsp/Kbuild b/techpack/audio/dsp/Kbuild index 490b458bfc2b..09c77a2f9171 100644 --- a/techpack/audio/dsp/Kbuild +++ b/techpack/audio/dsp/Kbuild @@ -262,6 +262,3 @@ q6_pdr_dlkm-y := $(QDSP6_PDR_OBJS) obj-$(CONFIG_MSM_QDSP6_NOTIFIER) += q6_notifier_dlkm.o q6_notifier_dlkm-y := $(QDSP6_NOTIFIER_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/dsp/codecs/Kbuild b/techpack/audio/dsp/codecs/Kbuild index 808bfe22a076..6a6a3da96ccc 100644 --- a/techpack/audio/dsp/codecs/Kbuild +++ b/techpack/audio/dsp/codecs/Kbuild @@ -161,6 +161,3 @@ endif # Module information used by KBuild framework obj-$(CONFIG_MSM_QDSP6V2_CODECS) += native_dlkm.o native_dlkm-y := $(NATIVE_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/ipc/Kbuild b/techpack/audio/ipc/Kbuild index 755d6d8eacd5..d63930a06391 100644 --- a/techpack/audio/ipc/Kbuild +++ b/techpack/audio/ipc/Kbuild @@ -190,6 +190,3 @@ apr_dlkm-y := $(APRV_GLINK) obj-$(CONFIG_WCD_DSP_GLINK) += wglink_dlkm.o wglink_dlkm-y := $(WDSP_GLINK) - -# inject some build related information -CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/audio/soc/Kbuild b/techpack/audio/soc/Kbuild index 4fedbddcf460..d7d0c32a52fb 100644 --- a/techpack/audio/soc/Kbuild +++ b/techpack/audio/soc/Kbuild @@ -211,6 +211,3 @@ swr_ctrl_dlkm-y := $(SWR_CTRL_OBJS) obj-$(CONFIG_WCD_SPI_AC) += wcd_spi_acc_ctl_dlkm.o wcd_spi_acc_ctl_dlkm-y := $(WCD_SPI_ACC_CTL_OBJS) - -# inject some build related information -DEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\" diff --git a/techpack/camera/drivers/cam_smmu/cam_smmu_api.c b/techpack/camera/drivers/cam_smmu/cam_smmu_api.c index cc8650a39c8e..5ee39ed7f232 100644 --- a/techpack/camera/drivers/cam_smmu/cam_smmu_api.c +++ b/techpack/camera/drivers/cam_smmu/cam_smmu_api.c @@ -2666,14 +2666,13 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd, /* add to the list */ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list); - return 0; - err_unmap_sg: dma_buf_unmap_attachment(attach, table, dma_dir); err_detach: dma_buf_detach(dmabuf, attach); err_put: - dma_buf_put(dmabuf); + if (rc) + dma_buf_put(dmabuf); err_out: return rc; } diff --git a/techpack/camera/drivers/cam_utils/Makefile b/techpack/camera/drivers/cam_utils/Makefile index f691779b9cd1..34c17f244344 100644 --- a/techpack/camera/drivers/cam_utils/Makefile +++ b/techpack/camera/drivers/cam_utils/Makefile @@ -8,4 +8,3 @@ ccflags-y += -I$(srctree)/techpack/camera/drivers/cam_smmu/ obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils.o cam_utils-objs += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_utils-objs += cam_trace.o cam_common_util.o cam_cx_ipeak.o -obj-$(CONFIG_SPECTRA_CAMERA) += cam_debug_util.o diff --git a/techpack/camera/drivers/cam_utils/cam_debug_util.h b/techpack/camera/drivers/cam_utils/cam_debug_util.h index 181a1558a904..caf95d1dcb41 100644 --- a/techpack/camera/drivers/cam_utils/cam_debug_util.h +++ b/techpack/camera/drivers/cam_utils/cam_debug_util.h @@ -55,8 +55,10 @@ * @fmt : Formatted string which needs to be print in the log * */ -void cam_debug_log(unsigned int module_id, const char *func, const int line, - const char *fmt, ...); +static inline void cam_debug_log(unsigned int module_id, const char *func, + const int line, const char *fmt, ...) +{ +} /* * cam_get_module_name() @@ -65,7 +67,10 @@ void cam_debug_log(unsigned int module_id, const char *func, const int line, * * @module_id : Module ID which is using this function */ -const char *cam_get_module_name(unsigned int module_id); +static inline const char *cam_get_module_name(unsigned int module_id) +{ + return NULL; +} /* * CAM_ERR @@ -75,9 +80,9 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_ERR(__module, fmt, args...) \ - pr_info("CAM_ERR: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_ERR(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) + /* * CAM_WARN * @brief : This Macro will print warning logs @@ -86,9 +91,9 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_WARN(__module, fmt, args...) \ - pr_info("CAM_WARN: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_WARN(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) + /* * CAM_INFO * @brief : This Macro will print Information logs @@ -97,9 +102,8 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_INFO(__module, fmt, args...) \ - pr_info("CAM_INFO: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_INFO(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) /* * CAM_INFO_RATE_LIMIT @@ -109,9 +113,8 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_INFO_RATE_LIMIT(__module, fmt, args...) \ - pr_info_ratelimited("CAM_INFO: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_INFO_RATE_LIMIT(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) /* * CAM_DBG @@ -121,16 +124,16 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_DBG(__module, fmt, args...) \ +#define CAM_DBG(__module, fmt, args...) \ cam_debug_log(__module, __func__, __LINE__, fmt, ##args) /* * CAM_ERR_RATE_LIMIT * @brief : This Macro will print error print logs with ratelimit */ -#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \ - pr_info_ratelimited("CAM_ERR: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_ERR_RATE_LIMIT(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) + /* * CAM_WARN_RATE_LIMIT * @brief : This Macro will print warning logs with ratelimit @@ -139,9 +142,8 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_WARN_RATE_LIMIT(__module, fmt, args...) \ - pr_info_ratelimited("CAM_WARN: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, __LINE__, ##args) +#define CAM_WARN_RATE_LIMIT(__module, fmt, args...) \ + cam_debug_log(__module, __func__, __LINE__, fmt, ##args) /* * CAM_WARN_RATE_LIMIT_CUSTOM @@ -153,17 +155,7 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_WARN_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ - ({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - (interval * HZ), \ - burst); \ - if (__ratelimit(&_rs)) \ - pr_info( \ - "CAM_WARN: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, \ - __LINE__, ##args); \ - }) +#define CAM_WARN_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) /* * CAM_INFO_RATE_LIMIT_CUSTOM @@ -175,17 +167,7 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_INFO_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ - ({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - (interval * HZ), \ - burst); \ - if (__ratelimit(&_rs)) \ - pr_info( \ - "CAM_INFO: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, \ - __LINE__, ##args); \ - }) +#define CAM_INFO_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) /* * CAM_ERR_RATE_LIMIT_CUSTOM @@ -197,16 +179,6 @@ const char *cam_get_module_name(unsigned int module_id); * @fmt : Formatted string which needs to be print in log * @args : Arguments which needs to be print in log */ -#define CAM_ERR_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) \ - ({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - (interval * HZ), \ - burst); \ - if (__ratelimit(&_rs)) \ - pr_info( \ - "CAM_ERR: %s: %s: %d " fmt "\n", \ - cam_get_module_name(__module), __func__, \ - __LINE__, ##args); \ - }) +#define CAM_ERR_RATE_LIMIT_CUSTOM(__module, interval, burst, fmt, args...) #endif /* _CAM_DEBUG_UTIL_H_ */ diff --git a/techpack/display/config/saipdisp.conf b/techpack/display/config/saipdisp.conf index dbbf3c847dbb..b225d21642ec 100644 --- a/techpack/display/config/saipdisp.conf +++ b/techpack/display/config/saipdisp.conf @@ -2,12 +2,9 @@ export CONFIG_DRM_MSM=y export CONFIG_DRM_MSM_SDE=y export CONFIG_SYNC_FILE=y export CONFIG_DRM_MSM_DSI=y -export CONFIG_DRM_MSM_DP=y export CONFIG_QCOM_MDSS_DP_PLL=y export CONFIG_DSI_PARSER=y export CONFIG_DRM_SDE_WB=y -export CONFIG_DRM_MSM_REGISTER_LOGGING=y export CONFIG_QCOM_MDSS_PLL=y export CONFIG_MSM_SDE_ROTATOR=y -export CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y export CONFIG_DRM_SDE_RSC=y diff --git a/techpack/display/config/saipdispconf.h b/techpack/display/config/saipdispconf.h index 049024839701..f3e3d1773232 100644 --- a/techpack/display/config/saipdispconf.h +++ b/techpack/display/config/saipdispconf.h @@ -7,13 +7,9 @@ #define CONFIG_DRM_MSM_SDE 1 #define CONFIG_SYNC_FILE 1 #define CONFIG_DRM_MSM_DSI 1 -#define CONFIG_DRM_MSM_DP 1 #define CONFIG_QCOM_MDSS_DP_PLL 1 #define CONFIG_DSI_PARSER 1 #define CONFIG_DRM_SDE_WB 1 -#define CONFIG_DRM_MSM_REGISTER_LOGGING 1 -#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1 #define CONFIG_QCOM_MDSS_PLL 1 #define CONFIG_MSM_SDE_ROTATOR 1 -#define CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG 1 #define CONFIG_DRM_SDE_RSC 1 diff --git a/techpack/display/msm/dsi/dsi_backlight.c b/techpack/display/msm/dsi/dsi_backlight.c index e4cb20c00c6e..faf76d019078 100644 --- a/techpack/display/msm/dsi/dsi_backlight.c +++ b/techpack/display/msm/dsi/dsi_backlight.c @@ -690,7 +690,7 @@ static int dsi_backlight_update_status(struct backlight_device *bd) dsi_backlight_hbm_dimming_restart(bl); if (dsi_panel_initialized(panel) && bl->update_bl) { - pr_info("req:%d bl:%d state:0x%x\n", + pr_debug("req:%d bl:%d state:0x%x\n", bd->props.brightness, bl_lvl, bd->props.state); dsi_panel_bl_elvss_update(bd, ELVSS_PRE_UPDATE); @@ -1153,7 +1153,7 @@ int dsi_backlight_early_dpms(struct dsi_backlight_config *bl, int power_mode) if (!bd) return 0; - pr_info("power_mode:%d state:0x%0x\n", power_mode, bd->props.state); + pr_debug("power_mode:%d state:0x%0x\n", power_mode, bd->props.state); mutex_lock(&bl->state_lock); state = get_state_after_dpms(bl, power_mode); @@ -1208,7 +1208,7 @@ int dsi_backlight_late_dpms(struct dsi_backlight_config *bl, int power_mode) backlight_update_status(bd); sysfs_notify(&bd->dev.kobj, NULL, "state"); - pr_info("sysfs_notify state:0x%0x\n", bd->props.state); + pr_debug("sysfs_notify state:0x%0x\n", bd->props.state); return 0; } diff --git a/techpack/display/msm/dsi/dsi_display.c b/techpack/display/msm/dsi/dsi_display.c index 2cf726b92d1c..df4681e40738 100644 --- a/techpack/display/msm/dsi/dsi_display.c +++ b/techpack/display/msm/dsi/dsi_display.c @@ -52,6 +52,8 @@ static inline bool is_lp_mode(int power_mode) power_mode == SDE_MODE_DPMS_LP2; } +static unsigned int cur_refresh_rate = 60; + static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display, u32 mask, bool enable) { @@ -6732,9 +6734,9 @@ int dsi_display_set_mode(struct dsi_display *display, goto error; } - DSI_INFO("mdp_transfer_time_us=%d us\n", + DSI_DEBUG("mdp_transfer_time_us=%d us\n", adj_mode.priv_info->mdp_transfer_time_us); - DSI_INFO("hactive= %d,vactive= %d,fps=%d\n", + DSI_DEBUG("hactive= %d,vactive= %d,fps=%d\n", timing.h_active, timing.v_active, timing.refresh_rate); @@ -7533,6 +7535,12 @@ int dsi_display_pre_commit(void *display, return rc; } +unsigned int dsi_panel_get_refresh_rate(void) +{ + return READ_ONCE(cur_refresh_rate); +} +EXPORT_SYMBOL(dsi_panel_get_refresh_rate); + int dsi_display_enable(struct dsi_display *display) { int rc = 0; @@ -7572,6 +7580,7 @@ int dsi_display_enable(struct dsi_display *display) mutex_lock(&display->display_lock); mode = display->panel->cur_mode; + WRITE_ONCE(cur_refresh_rate, mode->timing.refresh_rate); if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) { rc = dsi_panel_post_switch(display->panel); diff --git a/techpack/display/msm/sde/sde_crtc.c b/techpack/display/msm/sde/sde_crtc.c index 889980f62ec1..14dc93e56502 100644 --- a/techpack/display/msm/sde/sde_crtc.c +++ b/techpack/display/msm/sde/sde_crtc.c @@ -1640,9 +1640,11 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc, mixer[i].hw_ctl); /* clear dim_layer settings */ - lm = mixer[i].hw_lm; - if (lm->ops.clear_dim_layer) - lm->ops.clear_dim_layer(lm); + if (sde_crtc_state->num_dim_layers > 0) { + lm = mixer[i].hw_lm; + if (lm->ops.clear_dim_layer) + lm->ops.clear_dim_layer(lm); + } } _sde_crtc_swap_mixers_for_right_partial_update(crtc); @@ -6333,7 +6335,7 @@ static void __sde_crtc_early_wakeup_work(struct kthread_work *work) } if (!sde_crtc->enabled) { - SDE_INFO("sde crtc is not enabled\n"); + SDE_DEBUG("sde crtc is not enabled\n"); return; } diff --git a/techpack/display/msm/sde/sde_hw_catalog.c b/techpack/display/msm/sde/sde_hw_catalog.c index 6a20a15c55d0..f5e4ccbd99ac 100644 --- a/techpack/display/msm/sde/sde_hw_catalog.c +++ b/techpack/display/msm/sde/sde_hw_catalog.c @@ -1627,6 +1627,7 @@ static int sde_sspp_parse_dt(struct device_node *np, sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off = PROP_BITVALUE_ACCESS(prop_value, SSPP_CLK_CTRL, i, 1); + sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].val = -1; } SDE_DEBUG( @@ -2140,6 +2141,7 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off = PROP_BITVALUE_ACCESS(prop_value, WB_CLK_CTRL, i, 1); + sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].val = -1; } wb->format_list = sde_cfg->wb_formats; @@ -3636,6 +3638,7 @@ static int sde_parse_reg_dma_dt(struct device_node *np, sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off = PROP_BITVALUE_ACCESS(prop_value, REG_DMA_CLK_CTRL, 0, 1); + sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].val = -1; } end: diff --git a/techpack/display/msm/sde/sde_hw_catalog.h b/techpack/display/msm/sde/sde_hw_catalog.h index 8a4d1930c1ef..00a12d498e3e 100644 --- a/techpack/display/msm/sde/sde_hw_catalog.h +++ b/techpack/display/msm/sde/sde_hw_catalog.h @@ -739,10 +739,12 @@ enum sde_clk_ctrl_type { /* struct sde_clk_ctrl_reg : Clock control register * @reg_off: register offset * @bit_off: bit offset + * @val: current bit value */ struct sde_clk_ctrl_reg { u32 reg_off; u32 bit_off; + int val; }; /* struct sde_mdp_cfg : MDP TOP-BLK instance info diff --git a/techpack/display/msm/sde/sde_hw_mdss.h b/techpack/display/msm/sde/sde_hw_mdss.h index 15f0b91d3c2a..62a75c65d29f 100644 --- a/techpack/display/msm/sde/sde_hw_mdss.h +++ b/techpack/display/msm/sde/sde_hw_mdss.h @@ -85,11 +85,11 @@ enum sde_format_flags { #define SDE_VSYNC_SOURCE_INTF_1 4 #define SDE_VSYNC_SOURCE_INTF_2 5 #define SDE_VSYNC_SOURCE_INTF_3 6 -#define SDE_VSYNC_SOURCE_WD_TIMER_4 11 -#define SDE_VSYNC_SOURCE_WD_TIMER_3 12 -#define SDE_VSYNC_SOURCE_WD_TIMER_2 13 -#define SDE_VSYNC_SOURCE_WD_TIMER_1 14 -#define SDE_VSYNC_SOURCE_WD_TIMER_0 15 +#define SDE_VSYNC_SOURCE_WD_TIMER_4 0x11 +#define SDE_VSYNC_SOURCE_WD_TIMER_3 0x12 +#define SDE_VSYNC_SOURCE_WD_TIMER_2 0x13 +#define SDE_VSYNC_SOURCE_WD_TIMER_1 0x14 +#define SDE_VSYNC_SOURCE_WD_TIMER_0 0x15 enum sde_hw_blk_type { SDE_HW_BLK_TOP = 0, diff --git a/techpack/display/msm/sde/sde_hw_top.c b/techpack/display/msm/sde/sde_hw_top.c index b02cd17cbb28..5af23dff8bc5 100644 --- a/techpack/display/msm/sde/sde_hw_top.c +++ b/techpack/display/msm/sde/sde_hw_top.c @@ -177,6 +177,7 @@ static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp, static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp, enum sde_clk_ctrl_type clk_ctrl, bool enable) { + struct sde_clk_ctrl_reg *ctrl_reg; struct sde_hw_blk_reg_map *c; u32 reg_off, bit_off; u32 reg_val, new_val; @@ -190,8 +191,12 @@ static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp, if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX) return false; - reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off; - bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off; + ctrl_reg = (struct sde_clk_ctrl_reg *)&mdp->caps->clk_ctrls[clk_ctrl]; + if (cmpxchg(&ctrl_reg->val, !enable, enable) == enable) + return enable; + + reg_off = ctrl_reg->reg_off; + bit_off = ctrl_reg->bit_off; reg_val = SDE_REG_READ(c, reg_off); diff --git a/techpack/video/msm/vidc/msm_vidc_debug.c b/techpack/video/msm/vidc/msm_vidc_debug.c index aea639869f3c..5baba12f3108 100644 --- a/techpack/video/msm/vidc/msm_vidc_debug.c +++ b/techpack/video/msm/vidc/msm_vidc_debug.c @@ -10,8 +10,7 @@ #include "vidc_hfi_api.h" #include -int msm_vidc_debug = VIDC_ERR | VIDC_PRINTK | - FW_ERROR | FW_FATAL | FW_FTRACE; +int msm_vidc_debug = 0; EXPORT_SYMBOL(msm_vidc_debug); bool msm_vidc_lossless_encode = !true;