kernel/generic: Restore kernel files for v6.6

This is an automatically generated commit which aids following Kernel patch
history, as git will see the move and copy as a rename thus defeating the
purpose.

For the original discussion see:
https://lists.openwrt.org/pipermail/openwrt-devel/2023-October/041673.html

Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
Link: https://github.com/openwrt/openwrt/pull/16547
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
This commit is contained in:
Mieczyslaw Nalewaj 2025-04-30 00:02:32 +02:00 committed by Christian Marangi
parent 480537ed36
commit 456b7a5d48
No known key found for this signature in database
GPG key ID: AC001D09ADBFEAD7
627 changed files with 108805 additions and 0 deletions

View file

@ -0,0 +1,56 @@
From 4c8a49854130da0117a0fdb858551824919a2389 Mon Sep 17 00:00:00 2001
From: Ingo Molnar <mingo@kernel.org>
Date: Tue, 27 Feb 2024 09:58:15 +0100
Subject: [PATCH] smp: Avoid 'setup_max_cpus' namespace collision/shadowing
bringup_nonboot_cpus() gets passed the 'setup_max_cpus'
variable in init/main.c - which is also the name of the parameter,
shadowing the name.
To reduce confusion and to allow the 'setup_max_cpus' value
to be #defined in the <linux/smp.h> header, use the 'max_cpus'
name for the function parameter name.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
---
include/linux/cpu.h | 2 +-
kernel/cpu.c | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -109,7 +109,7 @@ void notify_cpu_starting(unsigned int cp
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
-void bringup_nonboot_cpus(unsigned int setup_max_cpus);
+void bringup_nonboot_cpus(unsigned int max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1905,17 +1905,17 @@ static bool __init cpuhp_bringup_cpus_pa
static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
#endif /* CONFIG_HOTPLUG_PARALLEL */
-void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
+void __init bringup_nonboot_cpus(unsigned int max_cpus)
{
- if (!setup_max_cpus)
+ if (!max_cpus)
return;
/* Try parallel bringup optimization if enabled */
- if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
+ if (cpuhp_bringup_cpus_parallel(max_cpus))
return;
/* Full per CPU serialized bringup */
- cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE);
+ cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
}
#ifdef CONFIG_PM_SLEEP_SMP

View file

@ -0,0 +1,58 @@
From 443df175be581618d6ff781dc3af3aa1a9ba789d Mon Sep 17 00:00:00 2001
From: Tony Ambardar <Tony.Ambardar@gmail.com>
Date: Fri, 31 May 2024 23:55:55 -0700
Subject: [PATCH 1/2] compiler_types.h: Define __retain for
__attribute__((__retain__))
Some code includes the __used macro to prevent functions and data from
being optimized out. This macro implements __attribute__((__used__)), which
operates at the compiler and IR-level, and so still allows a linker to
remove objects intended to be kept.
Compilers supporting __attribute__((__retain__)) can address this gap by
setting the flag SHF_GNU_RETAIN on the section of a function/variable,
indicating to the linker the object should be retained. This attribute is
available since gcc 11, clang 13, and binutils 2.36.
Provide a __retain macro implementing __attribute__((__retain__)), whose
first user will be the '__bpf_kfunc' tag.
Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
Cc: stable@vger.kernel.org # v6.6+
Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com>
---
include/linux/compiler_types.h | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -145,6 +145,29 @@ static inline void __chk_io_ptr(const vo
#define __has_builtin(x) (0)
#endif
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>

View file

@ -0,0 +1,65 @@
From ac507ed9882fd91a94657d68fe9ceac04b957103 Mon Sep 17 00:00:00 2001
From: Tony Ambardar <Tony.Ambardar@gmail.com>
Date: Sat, 1 Jun 2024 00:00:21 -0700
Subject: [PATCH 2/2] bpf: Harden __bpf_kfunc tag against linker kfunc removal
BPF kfuncs are often not directly referenced and may be inadvertently
removed by optimization steps during kernel builds, thus the __bpf_kfunc
tag mitigates against this removal by including the __used macro. However,
this macro alone does not prevent removal during linking, and may still
yield build warnings (e.g. on mips64el):
LD vmlinux
BTFIDS vmlinux
WARN: resolve_btfids: unresolved symbol bpf_verify_pkcs7_signature
WARN: resolve_btfids: unresolved symbol bpf_lookup_user_key
WARN: resolve_btfids: unresolved symbol bpf_lookup_system_key
WARN: resolve_btfids: unresolved symbol bpf_key_put
WARN: resolve_btfids: unresolved symbol bpf_iter_task_next
WARN: resolve_btfids: unresolved symbol bpf_iter_css_task_new
WARN: resolve_btfids: unresolved symbol bpf_get_file_xattr
WARN: resolve_btfids: unresolved symbol bpf_ct_insert_entry
WARN: resolve_btfids: unresolved symbol bpf_cgroup_release
WARN: resolve_btfids: unresolved symbol bpf_cgroup_from_id
WARN: resolve_btfids: unresolved symbol bpf_cgroup_acquire
WARN: resolve_btfids: unresolved symbol bpf_arena_free_pages
NM System.map
SORTTAB vmlinux
OBJCOPY vmlinux.32
Update the __bpf_kfunc tag to better guard against linker optimization by
including the new __retain compiler macro, which fixes the warnings above.
Verify the __retain macro with readelf by checking object flags for 'R':
$ readelf -Wa kernel/trace/bpf_trace.o
Section Headers:
[Nr] Name Type Address Off Size ES Flg Lk Inf Al
...
[178] .text.bpf_key_put PROGBITS 00000000 6420 0050 00 AXR 0 0 8
...
Key to Flags:
...
R (retain), D (mbind), p (processor specific)
Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/r/202401211357.OCX9yllM-lkp@intel.com/
Fixes: 57e7c169cd6a ("bpf: Add __bpf_kfunc tag for marking kernel functions as kfuncs")
Cc: stable@vger.kernel.org # v6.6+
Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com>
---
include/linux/btf.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -81,7 +81,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used noinline
+#define __bpf_kfunc __used __retain noinline
/*
* Return the name of the passed struct, if exists, or halt the build if for

View file

@ -0,0 +1,59 @@
From fab45b962749184e1a1a57c7c583782b78fad539 Mon Sep 17 00:00:00 2001
From: Sam James <sam@gentoo.org>
Date: Tue, 13 Aug 2024 20:49:06 +0100
Subject: [PATCH] libbpf: Workaround -Wmaybe-uninitialized false positive
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
In `elf_close`, we get this with GCC 15 -O3 (at least):
```
In function elf_close,
inlined from elf_close at elf.c:53:6,
inlined from elf_find_func_offset_from_file at elf.c:384:2:
elf.c:57:9: warning: elf_fd.elf may be used uninitialized [-Wmaybe-uninitialized]
57 | elf_end(elf_fd->elf);
| ^~~~~~~~~~~~~~~~~~~~
elf.c: In function elf_find_func_offset_from_file:
elf.c:377:23: note: elf_fd.elf was declared here
377 | struct elf_fd elf_fd;
| ^~~~~~
In function elf_close,
inlined from elf_close at elf.c:53:6,
inlined from elf_find_func_offset_from_file at elf.c:384:2:
elf.c:58:9: warning: elf_fd.fd may be used uninitialized [-Wmaybe-uninitialized]
58 | close(elf_fd->fd);
| ^~~~~~~~~~~~~~~~~
elf.c: In function elf_find_func_offset_from_file:
elf.c:377:23: note: elf_fd.fd was declared here
377 | struct elf_fd elf_fd;
| ^~~~~~
```
In reality, our use is fine, it's just that GCC doesn't model errno
here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly
by initializing elf_fd.fd to -1 and elf_fd.elf to NULL.
I've done this in two other functions as well given it could easily
occur there too (same access/use pattern).
Signed-off-by: Sam James <sam@gentoo.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://gcc.gnu.org/PR114952
Link: https://lore.kernel.org/bpf/14ec488a1cac02794c2fa2b83ae0cef1bce2cb36.1723578546.git.sam@gentoo.org
---
tools/lib/bpf/elf.c | 3 +++
1 file changed, 3 insertions(+)
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -16,6 +16,9 @@ int elf_open(const char *binary_path, st
int fd, ret;
Elf *elf;
+ elf_fd->elf = NULL;
+ elf_fd->fd = -1;
+
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("elf: failed to init libelf for %s\n", binary_path);
return -LIBBPF_ERRNO__LIBELF;

View file

@ -0,0 +1,172 @@
From ed0f941022515ff40473ea5335769a5dc2524a3f Mon Sep 17 00:00:00 2001
From: Yuntao Liu <liuyuntao12@huawei.com>
Date: Mon, 3 Jun 2024 16:37:50 +0100
Subject: [PATCH] ARM: 9404/1: arm32: enable HAVE_LD_DEAD_CODE_DATA_ELIMINATION
The current arm32 architecture does not yet support the
HAVE_LD_DEAD_CODE_DATA_ELIMINATION feature. arm32 is widely used in
embedded scenarios, and enabling this feature would be beneficial for
reducing the size of the kernel image.
In order to make this work, we keep the necessary tables by annotating
them with KEEP, also it requires further changes to linker script to KEEP
some tables and wildcard compiler generated sections into the right place.
When using ld.lld for linking, KEEP is not recognized within the OVERLAY
command, and Ard proposed a concise method to solve this problem.
It boots normally with defconfig, vexpress_defconfig and tinyconfig.
The size comparison of zImage is as follows:
defconfig vexpress_defconfig tinyconfig
5137712 5138024 424192 no dce
5032560 4997824 298384 dce
2.0% 2.7% 29.7% shrink
When using smaller config file, there is a significant reduction in the
size of the zImage.
We also tested this patch on a commercially available single-board
computer, and the comparison is as follows:
a15eb_config
2161384 no dce
2092240 dce
3.2% shrink
The zImage size has been reduced by approximately 3.2%, which is 70KB on
2.1M.
Signed-off-by: Yuntao Liu <liuyuntao12@huawei.com>
Tested-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
---
arch/arm/Kconfig | 1 +
arch/arm/boot/compressed/vmlinux.lds.S | 2 +-
arch/arm/include/asm/vmlinux.lds.h | 2 +-
arch/arm/kernel/entry-armv.S | 3 +++
arch/arm/kernel/vmlinux-xip.lds.S | 4 ++--
arch/arm/kernel/vmlinux.lds.S | 6 +++---
drivers/firmware/efi/libstub/Makefile | 4 ++++
7 files changed, 15 insertions(+), 7 deletions(-)
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -111,6 +111,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -125,7 +125,7 @@ SECTIONS
. = BSS_START;
__bss_start = .;
- .bss : { *(.bss) }
+ .bss : { *(.bss .bss.*) }
_end = .;
. = ALIGN(8); /* the stack must be 64-bit aligned */
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -42,7 +42,7 @@
#define PROC_INFO \
. = ALIGN(4); \
__proc_info_begin = .; \
- *(.proc.info.init) \
+ KEEP(*(.proc.info.init)) \
__proc_info_end = .;
#define IDMAP_TEXT \
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -1073,6 +1073,7 @@ vector_addrexcptn:
.globl vector_fiq
.section .vectors, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
@@ -1086,6 +1087,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vec
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
.section .vectors.bhb.loop8, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_loop8_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
@@ -1098,6 +1100,7 @@ THUMB( .reloc ., R_ARM_THM_PC12, .L__vec
W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits
+ .reloc .text, R_ARM_NONE, .
W(b) vector_rst
W(b) vector_bhb_bpiall_und
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(*(__ex_table))
+ ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@@ -83,7 +83,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(*(__ex_table))
+ ARM_MMU_KEEP(KEEP(*(__ex_table)))
__stop___ex_table = .;
}
@@ -99,7 +99,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
@@ -116,7 +116,7 @@ SECTIONS
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -67,6 +67,10 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
+# The .data section would be renamed to .data.efistub, therefore, remove
+# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
+KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
+
lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
file.o mem.o random.o randomalloc.o pci.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \

View file

@ -0,0 +1,51 @@
From 65033574ade97afccba074d837fd269903a83a9a Mon Sep 17 00:00:00 2001
From: Catalin Marinas <catalin.marinas@arm.com>
Date: Thu, 5 Oct 2023 16:40:30 +0100
Subject: [PATCH] arm64: swiotlb: Reduce the default size if no ZONE_DMA
bouncing needed
With CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC enabled, the arm64 kernel still
allocates the default SWIOTLB buffer (64MB) even if ZONE_DMA is disabled
or all the RAM fits into this zone. However, this potentially wastes a
non-negligible amount of memory on platforms with little RAM.
Reduce the SWIOTLB size to 1MB per 1GB of RAM if only needed for
kmalloc() buffer bouncing.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Suggested-by: Ross Burton <ross.burton@arm.com>
Cc: Ross Burton <ross.burton@arm.com>
Cc: Will Deacon <will@kernel.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
arch/arm64/mm/init.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -16,6 +16,7 @@
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
+#include <linux/math.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of.h>
@@ -493,8 +494,16 @@ void __init mem_init(void)
{
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
- if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
+ /*
+ * If no bouncing needed for ZONE_DMA, reduce the swiotlb
+ * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
+ */
+ unsigned long size =
+ DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
+ swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
swiotlb = true;
+ }
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);

View file

@ -0,0 +1,161 @@
From 66a5c40f60f5d88ad8d47ba6a4ba05892853fa1f Mon Sep 17 00:00:00 2001
From: Tanzir Hasan <tanzirh@google.com>
Date: Tue, 26 Dec 2023 18:00:00 +0000
Subject: [PATCH] kernel.h: removed REPEAT_BYTE from kernel.h
This patch creates wordpart.h and includes it in asm/word-at-a-time.h
for all architectures. WORD_AT_A_TIME_CONSTANTS depends on kernel.h
because of REPEAT_BYTE. Moving this to another header and including it
where necessary allows us to not include the bloated kernel.h. Making
this implicit dependency on REPEAT_BYTE explicit allows for later
improvements in the lib/string.c inclusion list.
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Suggested-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Tanzir Hasan <tanzirh@google.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Link: https://lore.kernel.org/r/20231226-libstringheader-v6-1-80aa08c7652c@google.com
Signed-off-by: Kees Cook <keescook@chromium.org>
---
arch/arm/include/asm/word-at-a-time.h | 3 ++-
arch/arm64/include/asm/word-at-a-time.h | 3 ++-
arch/powerpc/include/asm/word-at-a-time.h | 4 ++--
arch/riscv/include/asm/word-at-a-time.h | 3 ++-
arch/s390/include/asm/word-at-a-time.h | 3 ++-
arch/sh/include/asm/word-at-a-time.h | 2 ++
arch/x86/include/asm/word-at-a-time.h | 3 ++-
arch/x86/kvm/mmu/mmu.c | 1 +
fs/namei.c | 2 +-
include/asm-generic/word-at-a-time.h | 3 ++-
include/linux/kernel.h | 8 --------
include/linux/wordpart.h | 13 +++++++++++++
12 files changed, 31 insertions(+), 17 deletions(-)
create mode 100644 include/linux/wordpart.h
--- a/arch/arm/include/asm/word-at-a-time.h
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -8,7 +8,8 @@
* Little-endian word-at-a-time zero byte handling.
* Heavily based on the x86 algorithm.
*/
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -9,7 +9,8 @@
#ifndef __AARCH64EB__
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -4,8 +4,8 @@
/*
* Word-at-a-time interfaces for PowerPC.
*/
-
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/asm-compat.h>
#include <asm/extable.h>
--- a/arch/sh/include/asm/word-at-a-time.h
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -5,6 +5,8 @@
#ifdef CONFIG_CPU_BIG_ENDIAN
# include <asm-generic/word-at-a-time.h>
#else
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
/*
* Little-endian version cribbed from x86.
*/
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -2,7 +2,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
/*
* This is largely generic for little-endian machines, but the
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -47,6 +47,7 @@
#include <linux/kern_levels.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
+#include <linux/wordpart.h>
#include <asm/page.h>
#include <asm/memtype.h>
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -17,8 +17,8 @@
#include <linux/init.h>
#include <linux/export.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/wordpart.h>
#include <linux/fs.h>
#include <linux/filelock.h>
#include <linux/namei.h>
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -2,7 +2,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/byteorder.h>
#ifdef __BIG_ENDIAN
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -38,14 +38,6 @@
#define STACK_MAGIC 0xdeadbeef
-/**
- * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
- * @x: value to repeat
- *
- * NOTE: @x is not checked for > 0xff; larger values produce odd results.
- */
-#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-
/* generic data direction definitions */
#define READ 0
#define WRITE 1
--- /dev/null
+++ b/include/linux/wordpart.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_WORDPART_H
+#define _LINUX_WORDPART_H
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#endif // _LINUX_WORDPART_H

View file

@ -0,0 +1,107 @@
From adeb04362d74188c1e22ccb824b15a0a7b3de2f4 Mon Sep 17 00:00:00 2001
From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Date: Wed, 14 Feb 2024 19:26:32 +0200
Subject: [PATCH] kernel.h: Move upper_*_bits() and lower_*_bits() to
wordpart.h
The wordpart.h header is collecting APIs related to the handling
parts of the word (usually in byte granularity). The upper_*_bits()
and lower_*_bits() are good candidates to be moved to there.
This helps to clean up header dependency hell with regard to kernel.h
as the latter gathers completely unrelated stuff together and slows
down compilation (especially when it's included into other header).
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://lore.kernel.org/r/20240214172752.3605073-1-andriy.shevchenko@linux.intel.com
Reviewed-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
include/linux/kernel.h | 30 ++----------------------------
include/linux/wordpart.h | 29 +++++++++++++++++++++++++++++
2 files changed, 31 insertions(+), 28 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -32,6 +32,8 @@
#include <linux/sprintf.h>
#include <linux/static_call_types.h>
#include <linux/instruction_pointer.h>
+#include <linux/wordpart.h>
+
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -57,34 +59,6 @@
} \
)
-/**
- * upper_32_bits - return bits 32-63 of a number
- * @n: the number we're accessing
- *
- * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
- * the "right shift count >= width of type" warning when that quantity is
- * 32-bits.
- */
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
-
-/**
- * lower_32_bits - return bits 0-31 of a number
- * @n: the number we're accessing
- */
-#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
-
-/**
- * upper_16_bits - return bits 16-31 of a number
- * @n: the number we're accessing
- */
-#define upper_16_bits(n) ((u16)((n) >> 16))
-
-/**
- * lower_16_bits - return bits 0-15 of a number
- * @n: the number we're accessing
- */
-#define lower_16_bits(n) ((u16)((n) & 0xffff))
-
struct completion;
struct user;
--- a/include/linux/wordpart.h
+++ b/include/linux/wordpart.h
@@ -2,6 +2,35 @@
#ifndef _LINUX_WORDPART_H
#define _LINUX_WORDPART_H
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+
+/**
+ * upper_16_bits - return bits 16-31 of a number
+ * @n: the number we're accessing
+ */
+#define upper_16_bits(n) ((u16)((n) >> 16))
+
+/**
+ * lower_16_bits - return bits 0-15 of a number
+ * @n: the number we're accessing
+ */
+#define lower_16_bits(n) ((u16)((n) & 0xffff))
+
/**
* REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
* @x: value to repeat

View file

@ -0,0 +1,206 @@
From 8cd2accb71f5eb8e92d775fc1978d3779875c2e5 Mon Sep 17 00:00:00 2001
From: Baoquan He <bhe@redhat.com>
Date: Fri, 8 Dec 2023 15:30:34 +0800
Subject: [PATCH] mips, kexec: fix the incorrect ifdeffery and dependency of
CONFIG_KEXEC
The select of KEXEC for CRASH_DUMP in kernel/Kconfig.kexec will be
dropped, then compiling errors will be triggered if below config items are
set:
===
CONFIG_CRASH_CORE=y
CONFIG_KEXEC_CORE=y
CONFIG_CRASH_DUMP=y
===
--------------------------------------------------------------------
mipsel-linux-ld: kernel/kexec_core.o: in function `kimage_free':
kernel/kexec_core.c:(.text+0x2200): undefined reference to `machine_kexec_cleanup'
mipsel-linux-ld: kernel/kexec_core.o: in function `__crash_kexec':
kernel/kexec_core.c:(.text+0x2480): undefined reference to `machine_crash_shutdown'
mipsel-linux-ld: kernel/kexec_core.c:(.text+0x2488): undefined reference to `machine_kexec'
mipsel-linux-ld: kernel/kexec_core.o: in function `kernel_kexec':
kernel/kexec_core.c:(.text+0x29b8): undefined reference to `machine_shutdown'
mipsel-linux-ld: kernel/kexec_core.c:(.text+0x29c0): undefined reference to `machine_kexec'
--------------------------------------------------------------------
Here, change the dependency of building kexec_core related object files,
and the ifdeffery in mips from CONFIG_KEXEC to CONFIG_KEXEC_CORE.
Link: https://lkml.kernel.org/r/20231208073036.7884-4-bhe@redhat.com
Signed-off-by: Baoquan He <bhe@redhat.com>
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202311302042.sn8cDPIX-lkp@intel.com/
Cc: Eric DeVolder <eric_devolder@yahoo.com>
Cc: Ignat Korchagin <ignat@cloudflare.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
arch/mips/cavium-octeon/smp.c | 4 ++--
arch/mips/include/asm/kexec.h | 2 +-
arch/mips/include/asm/smp-ops.h | 2 +-
arch/mips/include/asm/smp.h | 2 +-
arch/mips/kernel/Makefile | 2 +-
arch/mips/kernel/smp-bmips.c | 4 ++--
arch/mips/kernel/smp-cps.c | 10 +++++-----
arch/mips/loongson64/reset.c | 4 ++--
arch/mips/loongson64/smp.c | 2 +-
9 files changed, 16 insertions(+), 16 deletions(-)
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
@@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -31,7 +31,7 @@ static inline void crash_setup_regs(stru
prepare_frametrace(newregs);
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
struct kimage;
extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -35,7 +35,7 @@ struct plat_smp_ops {
void (*cpu_die)(unsigned int cpu);
void (*cleanup_dead_cpu)(unsigned cpu);
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
void (*kexec_nonboot_cpu)(void);
#endif
};
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned in
extern void __noreturn play_dead(void);
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static inline void kexec_nonboot_cpu(void)
{
extern const struct plat_smp_ops *mp_ops; /* private */
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
@@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -395,7 +395,7 @@ static void cps_smp_finish(void)
local_irq_enable();
}
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
enum cpu_death {
CPU_DEATH_HALT,
@@ -432,7 +432,7 @@ static void cps_shutdown_this_cpu(enum c
}
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static void cps_kexec_nonboot_cpu(void)
{
@@ -442,9 +442,9 @@ static void cps_kexec_nonboot_cpu(void)
cps_shutdown_this_cpu(CPU_DEATH_POWER);
}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
#ifdef CONFIG_HOTPLUG_CPU
@@ -613,7 +613,7 @@ static const struct plat_smp_ops cps_smp
.cpu_die = cps_cpu_die,
.cleanup_dead_cpu = cps_cleanup_dead_cpu,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
#endif
};
--- a/arch/mips/loongson64/reset.c
+++ b/arch/mips/loongson64/reset.c
@@ -39,7 +39,7 @@ static int firmware_poweroff(struct sys_
return NOTIFY_DONE;
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/* 0X80000000~0X80200000 is safe */
#define MAX_ARGS 64
@@ -152,7 +152,7 @@ static int __init mips_reboot_setup(void
firmware_poweroff, NULL);
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
if (WARN_ON(!kexec_argv))
return -ENOMEM;
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -883,7 +883,7 @@ const struct plat_smp_ops loongson3_smp_
.cpu_disable = loongson3_cpu_disable,
.cpu_die = loongson3_cpu_die,
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};

View file

@ -0,0 +1,171 @@
From a5c05453a13ab324ad8719e8a23dfb6af01f3652 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 20 Jun 2024 17:26:42 +0200
Subject: [PATCH 1/4] mips: bmips: rework and cache CBR addr handling
Rework the handling of the CBR address and cache it. This address
doesn't change and can be cached instead of reading the register every
time.
This is in preparation of permitting to tweak the CBR address in DT with
broken SoC or bootloader.
bmips_cbr_addr is defined in setup.c for each arch to keep compatibility
with legacy brcm47xx/brcm63xx and generic BMIPS target.
Acked-by: Florian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/bcm47xx/prom.c | 3 +++
arch/mips/bcm47xx/setup.c | 4 ++++
arch/mips/bcm63xx/prom.c | 3 +++
arch/mips/bcm63xx/setup.c | 4 ++++
arch/mips/bmips/dma.c | 2 +-
arch/mips/bmips/setup.c | 7 ++++++-
arch/mips/include/asm/bmips.h | 1 +
arch/mips/kernel/smp-bmips.c | 4 ++--
8 files changed, 24 insertions(+), 4 deletions(-)
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -32,6 +32,7 @@
#include <linux/ssb/ssb_driver_chipcommon.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/smp.h>
+#include <asm/bmips.h>
#include <asm/bootinfo.h>
#include <bcm47xx.h>
#include <bcm47xx_board.h>
@@ -109,6 +110,8 @@ static __init void prom_init_mem(void)
void __init prom_init(void)
{
+ /* Cache CBR addr before CPU/DMA setup */
+ bmips_cbr_addr = BMIPS_GET_CBR();
prom_init_mem();
setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
}
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -37,6 +37,7 @@
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/bcma/bcma_soc.h>
+#include <asm/bmips.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/prom.h>
@@ -45,6 +46,9 @@
#include <bcm47xx.h>
#include <bcm47xx_board.h>
+/* CBR addr doesn't change and we can cache it */
+void __iomem *bmips_cbr_addr __read_mostly;
+
union bcm47xx_bus bcm47xx_bus;
EXPORT_SYMBOL(bcm47xx_bus);
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -22,6 +22,9 @@ void __init prom_init(void)
{
u32 reg, mask;
+ /* Cache CBR addr before CPU/DMA setup */
+ bmips_cbr_addr = BMIPS_GET_CBR();
+
bcm63xx_cpu_init();
/* stop any running watchdog */
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -12,6 +12,7 @@
#include <linux/memblock.h>
#include <linux/ioport.h>
#include <linux/pm.h>
+#include <asm/bmips.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <asm/reboot.h>
@@ -22,6 +23,9 @@
#include <bcm63xx_io.h>
#include <bcm63xx_gpio.h>
+/* CBR addr doesn't change and we can cache it */
+void __iomem *bmips_cbr_addr __read_mostly;
+
void bcm63xx_machine_halt(void)
{
pr_info("System halted\n");
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -9,7 +9,7 @@ bool bmips_rac_flush_disable;
void arch_sync_dma_for_cpu_all(void)
{
- void __iomem *cbr = BMIPS_GET_CBR();
+ void __iomem *cbr = bmips_cbr_addr;
u32 cfg;
if (boot_cpu_type() != CPU_BMIPS3300 &&
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -34,6 +34,9 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
+/* CBR addr doesn't change and we can cache it */
+void __iomem *bmips_cbr_addr __read_mostly;
+
extern bool bmips_rac_flush_disable;
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
@@ -111,7 +114,7 @@ static void bcm6358_quirks(void)
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
- !!BMIPS_GET_CBR();
+ !!bmips_cbr_addr;
}
static void bcm6368_quirks(void)
@@ -144,6 +147,8 @@ static void __init bmips_init_cfe(void)
void __init prom_init(void)
{
+ /* Cache CBR addr before CPU/DMA setup */
+ bmips_cbr_addr = BMIPS_GET_CBR();
bmips_init_cfe();
bmips_cpu_setup();
register_bmips_smp_ops();
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -81,6 +81,7 @@ extern char bmips_smp_movevec[];
extern char bmips_smp_int_vec[];
extern char bmips_smp_int_vec_end[];
+extern void __iomem *bmips_cbr_addr;
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
extern cpumask_t bmips_booted_mask;
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -518,7 +518,7 @@ static void bmips_set_reset_vec(int cpu,
info.val = val;
bmips_set_reset_vec_remote(&info);
} else {
- void __iomem *cbr = BMIPS_GET_CBR();
+ void __iomem *cbr = bmips_cbr_addr;
if (cpu == 0)
__raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
@@ -591,7 +591,7 @@ asmlinkage void __weak plat_wired_tlb_se
void bmips_cpu_setup(void)
{
- void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
+ void __iomem __maybe_unused *cbr = bmips_cbr_addr;
u32 __maybe_unused cfg;
switch (current_cpu_type()) {

View file

@ -0,0 +1,111 @@
From b95b30e50aed225d26e20737873ae2404941901c Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 20 Jun 2024 17:26:44 +0200
Subject: [PATCH 3/4] mips: bmips: setup: make CBR address configurable
Add support to provide CBR address from DT to handle broken
SoC/Bootloader that doesn't correctly init it. This permits to use the
RAC flush even in these condition.
To provide a CBR address from DT, the property "brcm,bmips-cbr-reg"
needs to be set in the "cpus" node. On DT init, this property presence
will be checked and will set the bmips_cbr_addr value accordingly. Also
bmips_rac_flush_disable will be set to false as RAC flush can be
correctly supported.
The CBR address from DT will overwrite the cached one and the
one set in the CBR register will be ignored.
Also the DT CBR address is validated on being outside DRAM window.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Acked-by: Florian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/bcm47xx/setup.c | 6 +++++-
arch/mips/bcm63xx/setup.c | 6 +++++-
arch/mips/bmips/setup.c | 30 ++++++++++++++++++++++++++++--
3 files changed, 38 insertions(+), 4 deletions(-)
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -46,7 +46,11 @@
#include <bcm47xx.h>
#include <bcm47xx_board.h>
-/* CBR addr doesn't change and we can cache it */
+/*
+ * CBR addr doesn't change and we can cache it.
+ * For broken SoC/Bootloader CBR addr might also be provided via DT
+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
+ */
void __iomem *bmips_cbr_addr __read_mostly;
union bcm47xx_bus bcm47xx_bus;
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -23,7 +23,11 @@
#include <bcm63xx_io.h>
#include <bcm63xx_gpio.h>
-/* CBR addr doesn't change and we can cache it */
+/*
+ * CBR addr doesn't change and we can cache it.
+ * For broken SoC/Bootloader CBR addr might also be provided via DT
+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
+ */
void __iomem *bmips_cbr_addr __read_mostly;
void bcm63xx_machine_halt(void)
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -34,7 +34,11 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
-/* CBR addr doesn't change and we can cache it */
+/*
+ * CBR addr doesn't change and we can cache it.
+ * For broken SoC/Bootloader CBR addr might also be provided via DT
+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
+ */
void __iomem *bmips_cbr_addr __read_mostly;
extern bool bmips_rac_flush_disable;
@@ -208,13 +212,35 @@ void __init plat_mem_setup(void)
void __init device_tree_init(void)
{
struct device_node *np;
+ u32 addr;
unflatten_and_copy_device_tree();
/* Disable SMP boot unless both CPUs are listed in DT and !disabled */
np = of_find_node_by_name(NULL, "cpus");
- if (np && of_get_available_child_count(np) <= 1)
+ if (!np)
+ return;
+
+ if (of_get_available_child_count(np) <= 1)
bmips_smp_enabled = 0;
+
+ /* Check if DT provide a CBR address */
+ if (of_property_read_u32(np, "brcm,bmips-cbr-reg", &addr))
+ goto exit;
+
+ /* Make sure CBR address is outside DRAM window */
+ if (addr >= (u32)memblock_start_of_DRAM() &&
+ addr < (u32)memblock_end_of_DRAM()) {
+ WARN(1, "DT CBR %x inside DRAM window. Ignoring DT CBR.\n",
+ addr);
+ goto exit;
+ }
+
+ bmips_cbr_addr = (void __iomem *)addr;
+ /* Since CBR is provided by DT, enable RAC flush */
+ bmips_rac_flush_disable = false;
+
+exit:
of_node_put(np);
}

View file

@ -0,0 +1,57 @@
From 04f38d1a4db017f17e82442727b91ce03dd72759 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Daniel=20Gonz=C3=A1lez=20Cabanelas?= <dgcbueu@gmail.com>
Date: Thu, 20 Jun 2024 17:26:45 +0200
Subject: [PATCH 4/4] mips: bmips: enable RAC on BMIPS4350
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The data RAC is left disabled by the bootloader in some SoCs, at least in
the core it boots from.
Enabling this feature increases the performance up to +30% depending on the
task.
Signed-off-by: Daniel González Cabanelas <dgcbueu@gmail.com>
Signed-off-by: Álvaro Fernández Rojas <noltari@gmail.com>
[ rework code and reduce code duplication ]
Acked-by: Florian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/kernel/smp-bmips.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -592,6 +592,7 @@ asmlinkage void __weak plat_wired_tlb_se
void bmips_cpu_setup(void)
{
void __iomem __maybe_unused *cbr = bmips_cbr_addr;
+ u32 __maybe_unused rac_addr;
u32 __maybe_unused cfg;
switch (current_cpu_type()) {
@@ -620,6 +621,23 @@ void bmips_cpu_setup(void)
__raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
break;
+ case CPU_BMIPS4350:
+ rac_addr = BMIPS_RAC_CONFIG_1;
+
+ if (!(read_c0_brcm_cmt_local() & (1 << 31)))
+ rac_addr = BMIPS_RAC_CONFIG;
+
+ /* Enable data RAC */
+ cfg = __raw_readl(cbr + rac_addr);
+ __raw_writel(cfg | 0xf, cbr + rac_addr);
+ __raw_readl(cbr + rac_addr);
+
+ /* Flush stale data out of the readahead cache */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ break;
+
case CPU_BMIPS4380:
/* CBG workaround for early BMIPS4380 CPUs */
switch (read_c0_prid()) {

View file

@ -0,0 +1,36 @@
From 8e7daa85641c9559c113f6b217bdc923397de77c Mon Sep 17 00:00:00 2001
From: William Zhang <william.zhang@broadcom.com>
Date: Thu, 22 Feb 2024 19:47:58 -0800
Subject: [PATCH] mtd: rawnand: brcmnand: Support write protection setting from
dts
The write protection feature is controlled by the module parameter wp_on
with default set to enabled. But not all the board use this feature
especially in BCMBCA broadband board. And module parameter is not
sufficient as different board can have different option. Add a device
tree property and allow this feature to be configured through the board
dts on per board basis.
Signed-off-by: William Zhang <william.zhang@broadcom.com>
Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
Reviewed-by: Kamal Dasu <kamal.dasu@broadcom.com>
Reviewed-by: David Regan <dregan@broadcom.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20240223034758.13753-14-william.zhang@broadcom.com
---
drivers/mtd/nand/raw/brcmnand/brcmnand.c | 4 ++++
1 file changed, 4 insertions(+)
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -3194,6 +3194,10 @@ int brcmnand_probe(struct platform_devic
/* Disable XOR addressing */
brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+ /* Check if the board connects the WP pin */
+ if (of_property_read_bool(dn, "brcm,wp-not-connected"))
+ wp_on = 0;
+
if (ctrl->features & BRCMNAND_HAS_WP) {
/* Permanently disable write protection */
if (wp_on == 2)

View file

@ -0,0 +1,123 @@
From 25d88bfd35bac3196eafa666e3b05033b46ffa21 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:32:00 +0000
Subject: [PATCH 1/8] dt-bindings: mtd: add basic bindings for UBI
Add basic bindings for UBI devices and volumes.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Reviewed-by: Rob Herring <robh@kernel.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
.../bindings/mtd/partitions/linux,ubi.yaml | 65 +++++++++++++++++++
.../bindings/mtd/partitions/ubi-volume.yaml | 35 ++++++++++
2 files changed, 100 insertions(+)
create mode 100644 Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
create mode 100644 Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/linux,ubi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unsorted Block Images
+
+description: |
+ UBI ("Unsorted Block Images") is a volume management system for raw
+ flash devices which manages multiple logical volumes on a single
+ physical flash device and spreads the I/O load (i.e wear-leveling)
+ across the whole flash chip.
+
+maintainers:
+ - Daniel Golle <daniel@makrotopia.org>
+
+allOf:
+ - $ref: partition.yaml#
+
+properties:
+ compatible:
+ const: linux,ubi
+
+ volumes:
+ type: object
+ description: UBI Volumes
+
+ patternProperties:
+ "^ubi-volume-.*$":
+ $ref: /schemas/mtd/partitions/ubi-volume.yaml#
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ reg = <0x0 0x100000>;
+ label = "bootloader";
+ read-only;
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x1ff00000>;
+ label = "ubi";
+ compatible = "linux,ubi";
+
+ volumes {
+ ubi-volume-caldata {
+ volid = <2>;
+ volname = "rf";
+ };
+ };
+ };
+ };
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/ubi-volume.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UBI volume
+
+description: |
+ This binding describes a single UBI volume. Volumes can be matches either
+ by their ID or their name, or both.
+
+maintainers:
+ - Daniel Golle <daniel@makrotopia.org>
+
+properties:
+ volid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Match UBI volume ID
+
+ volname:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Match UBI volume ID
+
+anyOf:
+ - required:
+ - volid
+
+ - required:
+ - volname
+
+# This is a generic file other binding inherit from and extend
+additionalProperties: true

View file

@ -0,0 +1,50 @@
From 95b113222b5164ac0887eb5c514ff3970a0136f0 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:32:11 +0000
Subject: [PATCH 2/8] dt-bindings: mtd: ubi-volume: allow UBI volumes to
provide NVMEM
UBI volumes may be used to contain NVMEM bits, typically device MAC
addresses or wireless radio calibration data.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Reviewed-by: Rob Herring <robh@kernel.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
.../devicetree/bindings/mtd/partitions/linux,ubi.yaml | 10 ++++++++++
.../devicetree/bindings/mtd/partitions/ubi-volume.yaml | 5 +++++
2 files changed, 15 insertions(+)
--- a/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
@@ -59,6 +59,16 @@ examples:
ubi-volume-caldata {
volid = <2>;
volname = "rf";
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eeprom@0 {
+ reg = <0x0 0x1000>;
+ };
+ };
};
};
};
--- a/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
@@ -24,6 +24,11 @@ properties:
description:
Match UBI volume ID
+ nvmem-layout:
+ $ref: /schemas/nvmem/layouts/nvmem-layout.yaml#
+ description:
+ This container may reference an NVMEM layout parser.
+
anyOf:
- required:
- volid

View file

@ -0,0 +1,285 @@
From 2bba1cdcfcd2907d0696cc0139f1bd078d36ee81 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:32:35 +0000
Subject: [PATCH 3/8] mtd: ubi: block: use notifier to create ubiblock from
parameter
Use UBI_VOLUME_ADDED notification to create ubiblock device specified
on kernel cmdline or module parameter.
This makes thing more simple and has the advantage that ubiblock devices
on volumes which are not present at the time the ubi module is probed
will still be created.
Suggested-by: Zhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/block.c | 136 ++++++++++++++++++++--------------------
drivers/mtd/ubi/kapi.c | 54 +++++++++++-----
drivers/mtd/ubi/ubi.h | 1 +
3 files changed, 106 insertions(+), 85 deletions(-)
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -532,6 +532,70 @@ static int ubiblock_resize(struct ubi_vo
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -539,10 +603,7 @@ static int ubiblock_notify(struct notifi
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -568,56 +629,6 @@ static struct notifier_block ubiblock_no
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -643,18 +654,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -956,6 +956,7 @@ void ubi_free_internal_volumes(struct ub
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);

View file

@ -0,0 +1,205 @@
From 6e331888643887ce85657527bc03f97d46235e71 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:33:14 +0000
Subject: [PATCH 4/8] mtd: ubi: attach from device tree
Introduce device tree compatible 'linux,ubi' and attach compatible MTD
devices using the MTD add notifier. This is needed for a UBI device to
be available early at boot (and not only after late_initcall), so
volumes on them can be used eg. as NVMEM providers for other drivers.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/build.c | 135 ++++++++++++++++++++++++++++------------
1 file changed, 96 insertions(+), 39 deletions(-)
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -1214,43 +1215,43 @@ static struct mtd_info * __init open_mtd
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1298,25 +1299,79 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_slab;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
- ubi_debugfs_exit();
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
@@ -1326,13 +1381,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {

View file

@ -0,0 +1,180 @@
From 924731fbed3247e3b82b8ab17db587ee28c2e781 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:33:24 +0000
Subject: [PATCH 5/8] mtd: ubi: introduce pre-removal notification for UBI
volumes
Introduce a new notification type UBI_VOLUME_SHUTDOWN to inform users
that a volume is just about to be removed.
This is needed because users (such as the NVMEM subsystem) expect that
at the time their removal function is called, the parenting device is
still available (for removal of sysfs nodes, for example, in case of
NVMEM which otherwise WARNs on volume removal).
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/build.c | 19 ++++++++++++++-----
drivers/mtd/ubi/kapi.c | 2 +-
drivers/mtd/ubi/ubi.h | 2 ++
drivers/mtd/ubi/vmt.c | 17 +++++++++++++++--
include/linux/mtd/ubi.h | 2 ++
5 files changed, 34 insertions(+), 8 deletions(-)
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -91,7 +91,7 @@ static struct ubi_device *ubi_devices[UB
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -259,6 +259,9 @@ struct ubi_device *ubi_get_device(int ub
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -296,7 +299,7 @@ struct ubi_device *ubi_get_by_major(int
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -325,7 +328,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -512,7 +515,7 @@ static void ubi_free_volumes_from(struct
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -1094,7 +1097,6 @@ int ubi_detach_mtd_dev(int ubi_num, int
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1105,6 +1107,13 @@ int ubi_detach_mtd_dev(int ubi_num, int
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -345,6 +345,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -564,6 +565,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -189,7 +189,7 @@ int ubi_create_volume(struct ubi_device
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -352,6 +352,19 @@ int ubi_remove_volume(struct ubi_volume_
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -192,6 +192,7 @@ struct ubi_device_info {
* or a volume was removed)
* @UBI_VOLUME_RESIZED: a volume has been re-sized
* @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
* @UBI_VOLUME_UPDATED: data has been written to a volume
*
* These constants define which type of event has happened when a volume
@@ -202,6 +203,7 @@ enum {
UBI_VOLUME_REMOVED,
UBI_VOLUME_RESIZED,
UBI_VOLUME_RENAMED,
+ UBI_VOLUME_SHUTDOWN,
UBI_VOLUME_UPDATED,
};

View file

@ -0,0 +1,66 @@
From 1c54542170819e36baa43c17ca55bb3d7da89a53 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:33:38 +0000
Subject: [PATCH 6/8] mtd: ubi: populate ubi volume fwnode
Look for the 'volumes' subnode of an MTD partition attached to a UBI
device and attach matching child nodes to UBI volumes.
This allows UBI volumes to be referenced in device tree, e.g. for use
as NVMEM providers.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/vmt.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -124,6 +124,31 @@ static void vol_release(struct device *d
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ return fw_vol;
+ }
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -605,6 +631,7 @@ int ubi_add_volume(struct ubi_device *ub
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);

View file

@ -0,0 +1,244 @@
From 15fc7dc926c91c871f6c0305b2938dbdeb14203b Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Tue, 19 Dec 2023 02:33:48 +0000
Subject: [PATCH 7/8] mtd: ubi: provide NVMEM layer over UBI volumes
In an ideal world we would like UBI to be used where ever possible on a
NAND chip. And with UBI support in ARM Trusted Firmware and U-Boot it
is possible to achieve an (almost-)all-UBI flash layout. Hence the need
for a way to also use UBI volumes to store board-level constants, such
as MAC addresses and calibration data of wireless interfaces.
Add UBI volume NVMEM driver module exposing UBI volumes as NVMEM
providers. Allow UBI devices to have a "volumes" firmware subnode with
volumes which may be compatible with "nvmem-cells".
Access to UBI volumes via the NVMEM interface at this point is
read-only, and it is slow, opening and closing the UBI volume for each
access due to limitations of the NVMEM provider API.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/Kconfig | 12 +++
drivers/mtd/ubi/Makefile | 1 +
drivers/mtd/ubi/nvmem.c | 188 +++++++++++++++++++++++++++++++++++++++
3 files changed, 201 insertions(+)
create mode 100644 drivers/mtd/ubi/nvmem.c
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -104,4 +104,16 @@ config MTD_UBI_BLOCK
If in doubt, say "N".
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+#include <asm/div64.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = do_div(lnum, unv->usable_leb_size);
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return bytes_left == 0 ? 0 : -EIO;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");

View file

@ -0,0 +1,34 @@
From 04231c61dcd51db0f12061e49bb761b197109f2f Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Thu, 29 Feb 2024 03:47:24 +0000
Subject: [PATCH 8/8] mtd: ubi: fix NVMEM over UBI volumes on 32-bit systems
A compiler warning related to sizeof(int) != 8 when calling do_div()
is triggered when building on 32-bit platforms.
Address this by using integer types having a well-defined size.
Fixes: 3ce485803da1 ("mtd: ubi: provide NVMEM layer over UBI volumes")
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
Tested-by: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
---
drivers/mtd/ubi/nvmem.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
--- a/drivers/mtd/ubi/nvmem.c
+++ b/drivers/mtd/ubi/nvmem.c
@@ -23,9 +23,12 @@ struct ubi_nvmem {
static int ubi_nvmem_reg_read(void *priv, unsigned int from,
void *val, size_t bytes)
{
- int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
+ size_t to_read, bytes_left = bytes;
struct ubi_nvmem *unv = priv;
struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint64_t lnum = from;
+ int err = 0;
desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
if (IS_ERR(desc))

View file

@ -0,0 +1,53 @@
From 03cb793b26834ddca170ba87057c8f883772dd45 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 3 Oct 2024 00:11:41 +0200
Subject: [PATCH 1/5] block: add support for defining read-only partitions
Add support for defining read-only partitions and complete support for
it in the cmdline partition parser as the additional "ro" after a
partition is scanned but never actually applied.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241002221306.4403-2-ansuelsmth@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
block/blk.h | 1 +
block/partitions/cmdline.c | 3 +++
block/partitions/core.c | 3 +++
3 files changed, 7 insertions(+)
--- a/block/blk.h
+++ b/block/blk.h
@@ -424,6 +424,7 @@ void blk_free_ext_minor(unsigned int min
#define ADDPART_FLAG_NONE 0
#define ADDPART_FLAG_RAID 1
#define ADDPART_FLAG_WHOLEDISK 2
+#define ADDPART_FLAG_READONLY 4
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
sector_t length);
int bdev_del_partition(struct gendisk *disk, int partno);
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -237,6 +237,9 @@ static int add_part(int slot, struct cmd
put_partition(state, slot, subpart->from >> 9,
subpart->size >> 9);
+ if (subpart->flags & PF_RDONLY)
+ state->parts[slot].flags |= ADDPART_FLAG_READONLY;
+
info = &state->parts[slot].info;
strscpy(info->volname, subpart->name, sizeof(info->volname));
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -392,6 +392,9 @@ static struct block_device *add_partitio
goto out_del;
}
+ if (flags & ADDPART_FLAG_READONLY)
+ bdev->bd_read_only = true;
+
/* everything is up and running, commence */
err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
if (err)

View file

@ -0,0 +1,94 @@
From e5f587242b6072ffab4f4a084a459a59f3035873 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 3 Oct 2024 00:11:43 +0200
Subject: [PATCH 3/5] block: introduce add_disk_fwnode()
Introduce add_disk_fwnode() as a replacement of device_add_disk() that
permits to pass and attach a fwnode to disk dev.
This variant can be useful for eMMC that might have the partition table
for the disk defined in DT. A parser can later make use of the attached
fwnode to parse the related table and init the hardcoded partition for
the disk.
device_add_disk() is converted to a simple wrapper of add_disk_fwnode()
with the fwnode entry set as NULL.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241002221306.4403-4-ansuelsmth@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
block/genhd.c | 28 ++++++++++++++++++++++++----
include/linux/blkdev.h | 3 +++
2 files changed, 27 insertions(+), 4 deletions(-)
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -383,16 +383,18 @@ int disk_scan_partitions(struct gendisk
}
/**
- * device_add_disk - add disk information to kernel list
+ * add_disk_fwnode - add disk information to kernel list with fwnode
* @parent: parent device for the disk
* @disk: per-device partitioning information
* @groups: Additional per-device sysfs groups
+ * @fwnode: attached disk fwnode
*
* This function registers the partitioning information in @disk
- * with the kernel.
+ * with the kernel. Also attach a fwnode to the disk device.
*/
-int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups)
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode)
{
struct device *ddev = disk_to_dev(disk);
@@ -451,6 +453,8 @@ int __must_check device_add_disk(struct
ddev->parent = parent;
ddev->groups = groups;
dev_set_name(ddev, "%s", disk->disk_name);
+ if (fwnode)
+ device_set_node(ddev, fwnode);
if (!(disk->flags & GENHD_FL_HIDDEN))
ddev->devt = MKDEV(disk->major, disk->first_minor);
ret = device_add(ddev);
@@ -552,6 +556,22 @@ out_exit_elevator:
elevator_exit(disk->queue);
return ret;
}
+EXPORT_SYMBOL_GPL(add_disk_fwnode);
+
+/**
+ * device_add_disk - add disk information to kernel list
+ * @parent: parent device for the disk
+ * @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
+ *
+ * This function registers the partitioning information in @disk
+ * with the kernel.
+ */
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
+{
+ return add_disk_fwnode(parent, disk, groups, NULL);
+}
EXPORT_SYMBOL(device_add_disk);
static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -741,6 +741,9 @@ static inline unsigned int blk_queue_dep
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)

View file

@ -0,0 +1,104 @@
From 45ff6c340ddfc2dade74d5b7a8962c778ab7042c Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 3 Oct 2024 00:11:44 +0200
Subject: [PATCH 4/5] mmc: block: attach partitions fwnode if found in mmc-card
Attach partitions fwnode if found in mmc-card and register disk with it.
This permits block partition to reference the node and register a
partition table defined in DT for the special case for embedded device
that doesn't have a partition table flashed but have an hardcoded
partition table passed from the system.
JEDEC BOOT partition boot0/boot1 are supported but in DT we refer with
the JEDEC name of boot1 and boot2 to better adhere to documentation.
Also JEDEC GP partition gp0/1/2/3 are supported but in DT we refer with
the JEDEC name of gp1/2/3/4 to better adhere to documentration.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Link: https://lore.kernel.org/r/20241002221306.4403-5-ansuelsmth@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
drivers/mmc/core/block.c | 55 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2455,6 +2455,56 @@ static inline int mmc_blk_readonly(struc
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
+/*
+ * Search for a declared partitions node for the disk in mmc-card related node.
+ *
+ * This is to permit support for partition table defined in DT in special case
+ * where a partition table is not written in the disk and is expected to be
+ * passed from the running system.
+ *
+ * For the user disk, "partitions" node is searched.
+ * For the special HW disk, "partitions-" node with the appended name is used
+ * following this conversion table (to adhere to JEDEC naming)
+ * - boot0 -> partitions-boot1
+ * - boot1 -> partitions-boot2
+ * - gp0 -> partitions-gp1
+ * - gp1 -> partitions-gp2
+ * - gp2 -> partitions-gp3
+ * - gp3 -> partitions-gp4
+ */
+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
+ const char *subname)
+{
+ const char *node_name = "partitions";
+
+ if (subname) {
+ mmc_dev = mmc_dev->parent;
+
+ /*
+ * Check if we are allocating a BOOT disk boot0/1 disk.
+ * In DT we use the JEDEC naming boot1/2.
+ */
+ if (!strcmp(subname, "boot0"))
+ node_name = "partitions-boot1";
+ if (!strcmp(subname, "boot1"))
+ node_name = "partitions-boot2";
+ /*
+ * Check if we are allocating a GP disk gp0/1/2/3 disk.
+ * In DT we use the JEDEC naming gp1/2/3/4.
+ */
+ if (!strcmp(subname, "gp0"))
+ node_name = "partitions-gp1";
+ if (!strcmp(subname, "gp1"))
+ node_name = "partitions-gp2";
+ if (!strcmp(subname, "gp2"))
+ node_name = "partitions-gp3";
+ if (!strcmp(subname, "gp3"))
+ node_name = "partitions-gp4";
+ }
+
+ return device_get_named_child_node(mmc_dev, node_name);
+}
+
static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
@@ -2463,6 +2513,7 @@ static struct mmc_blk_data *mmc_blk_allo
int area_type,
unsigned int part_type)
{
+ struct fwnode_handle *disk_fwnode;
struct mmc_blk_data *md;
int devidx, ret;
char cap_str[10];
@@ -2568,7 +2619,9 @@ static struct mmc_blk_data *mmc_blk_allo
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
+ ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
+ disk_fwnode);
if (ret)
goto err_put_disk;
return md;

View file

@ -0,0 +1,200 @@
From 884555b557e5e6d41c866e2cd8d7b32f50ec974b Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 3 Oct 2024 00:11:45 +0200
Subject: [PATCH 5/5] block: add support for partition table defined in OF
Add support for partition table defined in Device Tree. Similar to how
it's done with MTD, add support for defining a fixed partition table in
device tree.
A common scenario for this is fixed block (eMMC) embedded devices that
have no MBR or GPT partition table to save storage space. Bootloader
access the block device with absolute address of data.
This is to complete the functionality with an equivalent implementation
with providing partition table with bootargs, for case where the booargs
can't be modified and tweaking the Device Tree is the only solution to
have an usabe partition table.
The implementation follow the fixed-partitions parser used on MTD
devices where a "partitions" node is expected to be declared with
"fixed-partitions" compatible in the OF node of the disk device
(mmc-card for eMMC for example) and each child node declare a label
and a reg with offset and size. If label is not declared, the node name
is used as fallback. Eventually is also possible to declare the read-only
property to flag the partition as read-only.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20241002221306.4403-6-ansuelsmth@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
block/partitions/Kconfig | 9 ++++
block/partitions/Makefile | 1 +
block/partitions/check.h | 1 +
block/partitions/core.c | 3 ++
block/partitions/of.c | 110 ++++++++++++++++++++++++++++++++++++++
5 files changed, 124 insertions(+)
create mode 100644 block/partitions/of.c
--- a/block/partitions/Kconfig
+++ b/block/partitions/Kconfig
@@ -270,4 +270,13 @@ config CMDLINE_PARTITION
Say Y here if you want to read the partition table from bootargs.
The format for the command line is just like mtdparts.
+config OF_PARTITION
+ bool "Device Tree partition support" if PARTITION_ADVANCED
+ depends on OF
+ help
+ Say Y here if you want to enable support for partition table
+ defined in Device Tree. (mainly for eMMC)
+ The format for the device tree node is just like MTD fixed-partition
+ schema.
+
endmenu
--- a/block/partitions/Makefile
+++ b/block/partitions/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CMDLINE_PARTITION) += cmdli
obj-$(CONFIG_MAC_PARTITION) += mac.o
obj-$(CONFIG_LDM_PARTITION) += ldm.o
obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
+obj-$(CONFIG_OF_PARTITION) += of.o
obj-$(CONFIG_OSF_PARTITION) += osf.o
obj-$(CONFIG_SGI_PARTITION) += sgi.o
obj-$(CONFIG_SUN_PARTITION) += sun.o
--- a/block/partitions/check.h
+++ b/block/partitions/check.h
@@ -62,6 +62,7 @@ int karma_partition(struct parsed_partit
int ldm_partition(struct parsed_partitions *state);
int mac_partition(struct parsed_partitions *state);
int msdos_partition(struct parsed_partitions *state);
+int of_partition(struct parsed_partitions *state);
int osf_partition(struct parsed_partitions *state);
int sgi_partition(struct parsed_partitions *state);
int sun_partition(struct parsed_partitions *state);
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -43,6 +43,9 @@ static int (*const check_part[])(struct
#ifdef CONFIG_CMDLINE_PARTITION
cmdline_partition,
#endif
+#ifdef CONFIG_OF_PARTITION
+ of_partition, /* cmdline have priority to OF */
+#endif
#ifdef CONFIG_EFI_PARTITION
efi_partition, /* this must come before msdos */
#endif
--- /dev/null
+++ b/block/partitions/of.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blkdev.h>
+#include <linux/major.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include "check.h"
+
+static int validate_of_partition(struct device_node *np, int slot)
+{
+ u64 offset, size;
+ int len;
+
+ const __be32 *reg = of_get_property(np, "reg", &len);
+ int a_cells = of_n_addr_cells(np);
+ int s_cells = of_n_size_cells(np);
+
+ /* Make sure reg len match the expected addr and size cells */
+ if (len / sizeof(*reg) != a_cells + s_cells)
+ return -EINVAL;
+
+ /* Validate offset conversion from bytes to sectors */
+ offset = of_read_number(reg, a_cells);
+ if (offset % SECTOR_SIZE)
+ return -EINVAL;
+
+ /* Validate size conversion from bytes to sectors */
+ size = of_read_number(reg + a_cells, s_cells);
+ if (!size || size % SECTOR_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void add_of_partition(struct parsed_partitions *state, int slot,
+ struct device_node *np)
+{
+ struct partition_meta_info *info;
+ char tmp[sizeof(info->volname) + 4];
+ const char *partname;
+ int len;
+
+ const __be32 *reg = of_get_property(np, "reg", &len);
+ int a_cells = of_n_addr_cells(np);
+ int s_cells = of_n_size_cells(np);
+
+ /* Convert bytes to sector size */
+ u64 offset = of_read_number(reg, a_cells) / SECTOR_SIZE;
+ u64 size = of_read_number(reg + a_cells, s_cells) / SECTOR_SIZE;
+
+ put_partition(state, slot, offset, size);
+
+ if (of_property_read_bool(np, "read-only"))
+ state->parts[slot].flags |= ADDPART_FLAG_READONLY;
+
+ /*
+ * Follow MTD label logic, search for label property,
+ * fallback to node name if not found.
+ */
+ info = &state->parts[slot].info;
+ partname = of_get_property(np, "label", &len);
+ if (!partname)
+ partname = of_get_property(np, "name", &len);
+ strscpy(info->volname, partname, sizeof(info->volname));
+
+ snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+}
+
+int of_partition(struct parsed_partitions *state)
+{
+ struct device *ddev = disk_to_dev(state->disk);
+ struct device_node *np;
+ int slot;
+
+ struct device_node *partitions_np = of_node_get(ddev->of_node);
+
+ if (!partitions_np ||
+ !of_device_is_compatible(partitions_np, "fixed-partitions"))
+ return 0;
+
+ slot = 1;
+ /* Validate parition offset and size */
+ for_each_child_of_node(partitions_np, np) {
+ if (validate_of_partition(np, slot)) {
+ of_node_put(np);
+ of_node_put(partitions_np);
+
+ return -1;
+ }
+
+ slot++;
+ }
+
+ slot = 1;
+ for_each_child_of_node(partitions_np, np) {
+ if (slot >= state->limit) {
+ of_node_put(np);
+ break;
+ }
+
+ add_of_partition(state, slot, np);
+
+ slot++;
+ }
+
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
+
+ return 1;
+}

View file

@ -0,0 +1,146 @@
From 49c8e854869d673df8452f24dfa8989cd0f615a8 Mon Sep 17 00:00:00 2001
From: Martin Kurbanov <mmkurbanov@salutedevices.com>
Date: Mon, 2 Oct 2023 17:04:58 +0300
Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA002G
Add support for FORESEE F35SQA002G SPI NAND.
Datasheet:
https://www.longsys.com/uploads/LM-00006FORESEEF35SQA002GDatasheet_1650183701.pdf
Signed-off-by: Martin Kurbanov <mmkurbanov@salutedevices.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20231002140458.147605-1-mmkurbanov@salutedevices.com
---
drivers/mtd/nand/spi/Makefile | 2 +-
drivers/mtd/nand/spi/core.c | 1 +
drivers/mtd/nand/spi/foresee.c | 95 ++++++++++++++++++++++++++++++++++
include/linux/mtd/spinand.h | 1 +
4 files changed, 98 insertions(+), 1 deletion(-)
create mode 100644 drivers/mtd/nand/spi/foresee.c
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
+spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -940,6 +940,7 @@ static const struct spinand_manufacturer
&alliancememory_spinand_manufacturer,
&ato_spinand_manufacturer,
&esmt_c8_spinand_manufacturer,
+ &foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
--- /dev/null
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FORESEE 0xCD
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
+ .ecc = f35sqa002g_ooblayout_ecc,
+ .free = f35sqa002g_ooblayout_free,
+};
+
+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ default:
+ break;
+ }
+
+ /* More than 1-bit error was detected in one or more sectors and
+ * cannot be corrected.
+ */
+ return -EBADMSG;
+}
+
+static const struct spinand_info foresee_spinand_table[] = {
+ SPINAND_INFO("F35SQA002G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer foresee_spinand_manufacturer = {
+ .id = SPINAND_MFR_FORESEE,
+ .name = "FORESEE",
+ .chips = foresee_spinand_table,
+ .nchips = ARRAY_SIZE(foresee_spinand_table),
+ .ops = &foresee_spinand_manuf_ops,
+};
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -263,6 +263,7 @@ struct spinand_manufacturer {
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;

View file

@ -0,0 +1,38 @@
From ae461cde5c559675fc4c0ba351c7c31ace705f56 Mon Sep 17 00:00:00 2001
From: Bohdan Chubuk <chbgdn@gmail.com>
Date: Sun, 10 Nov 2024 22:50:47 +0200
Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA001G
Add support for FORESEE F35SQA001G SPI NAND.
Similar to F35SQA002G, but differs in capacity.
Datasheet:
- https://cdn.ozdisan.com/ETicaret_Dosya/704795_871495.pdf
Tested on Xiaomi AX3000T flashed with OpenWRT.
Signed-off-by: Bohdan Chubuk <chbgdn@gmail.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
drivers/mtd/nand/spi/foresee.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -81,6 +81,16 @@ static const struct spinand_info foresee
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&f35sqa002g_ooblayout,
f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
};
static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {

View file

@ -0,0 +1,880 @@
From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
From: Md Sadre Alam <quic_mdalam@quicinc.com>
Date: Wed, 20 Nov 2024 14:45:01 +0530
Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
Add qcom prefix to all the api which will be commonly
used by spi nand driver and raw nand driver.
Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
1 file changed, 160 insertions(+), 160 deletions(-)
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -53,7 +53,7 @@
#define NAND_READ_LOCATION_LAST_CW_2 0xf48
#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-/* dummy register offsets, used by write_reg_dma */
+/* dummy register offsets, used by qcom_write_reg_dma */
#define NAND_DEV_CMD1_RESTORE 0xdead
#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
@@ -211,7 +211,7 @@
/*
* Flags used in DMA descriptor preparation helper functions
- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
*/
/* Don't set the EOT in current tx BAM sgl */
#define NAND_BAM_NO_EOT BIT(0)
@@ -550,7 +550,7 @@ struct qcom_nandc_props {
};
/* Frees the BAM transaction memory */
-static void free_bam_transaction(struct qcom_nand_controller *nandc)
+static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn = nandc->bam_txn;
@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
/* Allocates and Initializes the BAM transaction */
static struct bam_transaction *
-alloc_bam_transaction(struct qcom_nand_controller *nandc)
+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn;
size_t bam_txn_size;
@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
}
/* Clears the BAM transaction indexes */
-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
+static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
{
struct bam_transaction *bam_txn = nandc->bam_txn;
@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
}
/* Callback for DMA descriptor completion */
-static void qpic_bam_dma_done(void *data)
+static void qcom_qpic_bam_dma_done(void *data)
{
struct bam_transaction *bam_txn = data;
@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand
iowrite32(val, nandc->base + offset);
}
-static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
{
if (!nandc->props->supports_bam)
return;
@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
* for BAM. This descriptor will be added in the NAND DMA descriptor queue
* which will be submitted to DMA engine.
*/
-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
- struct dma_chan *chan,
- unsigned long flags)
+static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan,
+ unsigned long flags)
{
struct desc_info *desc;
struct scatterlist *sgl;
@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
* NAND_BAM_NEXT_SGL will be used for starting the separate SGL
* after the current command element.
*/
-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr,
- int size, unsigned int flags)
+static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
{
int bam_ce_size;
int i, ret;
@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
if (flags & NAND_BAM_NWD) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_FENCE |
- DMA_PREP_CMD);
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
if (ret)
return ret;
}
@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
- const void *vaddr,
- int size, unsigned int flags)
+static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags)
{
int ret;
struct bam_transaction *bam_txn = nandc->bam_txn;
@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
* is not set, form the DMA descriptor
*/
if (!(flags & NAND_BAM_NO_EOT)) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
if (ret)
return ret;
}
@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
return 0;
}
-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr, int size,
- bool flow_control)
+static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
{
struct desc_info *desc;
struct dma_async_tx_descriptor *dma_desc;
@@ -1069,15 +1068,15 @@ err:
}
/*
- * read_reg_dma: prepares a descriptor to read a given number of
+ * qcom_read_reg_dma: prepares a descriptor to read a given number of
* contiguous registers to the reg_read_buf pointer
*
* @first: offset of the first register in the contiguous block
* @num_regs: number of registers to read
* @flags: flags to control DMA descriptor preparation
*/
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
+static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
{
bool flow_control = false;
void *vaddr;
@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
first = dev_cmd_reg_addr(nandc, first);
if (nandc->props->supports_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
num_regs, flags);
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr,
+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
- * write_reg_dma: prepares a descriptor to write a given number of
+ * qcom_write_reg_dma: prepares a descriptor to write a given number of
* contiguous registers
*
* @vaddr: contiguous memory from where register value will
@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
* @num_regs: number of registers to write
* @flags: flags to control DMA descriptor preparation
*/
-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
- int first, int num_regs, unsigned int flags)
+static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ int first, int num_regs, unsigned int flags)
{
bool flow_control = false;
@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
if (nandc->props->supports_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
num_regs, flags);
if (first == NAND_FLASH_CMD)
flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr,
+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
num_regs * sizeof(u32), flow_control);
}
/*
- * read_data_dma: prepares a DMA descriptor to transfer data from the
+ * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
* controller's internal buffer to the buffer 'vaddr'
*
* @reg_off: offset within the controller's data buffer
@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
* @size: DMA transaction size in bytes
* @flags: flags to control DMA descriptor preparation
*/
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
+static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
{
if (nandc->props->supports_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
}
/*
- * write_data_dma: prepares a DMA descriptor to transfer data from
+ * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
* 'vaddr' to the controller's internal buffer
*
* @reg_off: offset within the controller's data buffer
@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
* @size: DMA transaction size in bytes
* @flags: flags to control DMA descriptor preparation
*/
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
+static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
{
if (nandc->props->supports_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
}
/*
@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
if (!nandc->props->qpic_version2)
- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
- NAND_ERASED_CW_DETECT_CFG, 1, 0);
- write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
- NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
reg = &nandc->regs->read_location_last0;
if (nandc->props->supports_bam)
- write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
} else {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
if (!nandc->props->qpic_version2)
- write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
}
/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
+static int qcom_submit_descs(struct qcom_nand_controller *nandc)
{
struct desc_info *desc, *n;
dma_cookie_t cookie = 0;
@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
if (nandc->props->supports_bam) {
if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
if (ret)
goto err_unmap_free_desc;
}
if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
if (ret)
goto err_unmap_free_desc;
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_CMD);
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (ret)
goto err_unmap_free_desc;
}
@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
cookie = dmaengine_submit(desc->dma_desc);
if (nandc->props->supports_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
bam_txn->last_cmd_desc->callback_param = bam_txn;
dma_async_issue_pending(nandc->tx_chan);
@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
err_unmap_free_desc:
/*
* Unmap the dma sg_list and free the desc allocated by both
- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
*/
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
list_del(&desc->node);
@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
}
/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
+static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
{
nandc->reg_read_pos = 0;
- nandc_dev_to_mem(nandc, false);
+ qcom_nandc_dev_to_mem(nandc, false);
}
/*
@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
- nandc_dev_to_mem(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
host->use_ecc = false;
if (nandc->props->qpic_version2)
raw_cw = ecc->steps - 1;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
config_nand_cw_read(chip, false, raw_cw);
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
- nandc_dev_to_mem(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
config_nand_cw_read(chip, true, i);
if (data_buf)
- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
- data_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
if (data_buf)
@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
int size;
int ret;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = true;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
set_address(host, 0, page);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
oob_size = ecc->bytes;
}
- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write page\n");
return ret;
@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- write_data_dma(nandc, reg_off, data_buf, data_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
- write_data_dma(nandc, reg_off, data_buf, data_size2,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
return ret;
@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, data_size + oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
return ret;
@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
*/
host->use_ecc = false;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, host->cw_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
return ret;
@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
if (nandc->props->supports_bam)
- free_bam_transaction(nandc);
+ qcom_free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
if (nandc->props->supports_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
unsigned long start = jiffies + msecs_to_jiffies(time_ms);
u32 flash;
- nandc_dev_to_mem(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
do {
flash = le32_to_cpu(nandc->reg_read_buf[0]);
@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
nandc->regs->cmd = q_op.cmd_reg;
nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting status descriptor\n");
goto err_out;
}
- nandc_dev_to_mem(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < num_cw; i++) {
flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
nandc->regs->cmd = q_op.cmd_reg;
nandc->regs->addr0 = q_op.addr1_reg;
@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting read id descriptor\n");
goto err_out;
@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_dev_to_mem(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
err_out:
@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
nandc->regs->cmd = q_op.cmd_reg;
nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
- write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting misc descriptor\n");
goto err_out;
@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
nandc->regs->cmd = q_op.cmd_reg;
nandc->regs->addr0 = 0;
@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
nandc_set_read_loc(chip, 0, 0, 0, len, 1);
if (!nandc->props->qpic_version2) {
- write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
nandc->buf_count = len;
@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
config_nand_single_cw_page_read(chip, false, 0);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
if (!nandc->props->qpic_version2) {
- write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+ NAND_BAM_NEXT_SGL);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting param page descriptor\n");
goto err_out;
@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
* maximum codeword size
*/
nandc->max_cwperpage = 1;
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");

View file

@ -0,0 +1,198 @@
From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
From: Md Sadre Alam <quic_mdalam@quicinc.com>
Date: Wed, 20 Nov 2024 14:45:03 +0530
Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
Use the bitfield macro FIELD_PREP, and GENMASK to
do the shift and mask in one go. This makes the code
more readable.
Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
drivers/mtd/nand/raw/qcom_nandc.c | 97 ++++++++++++++--------------
include/linux/mtd/nand-qpic-common.h | 31 +++++----
2 files changed, 67 insertions(+), 61 deletions(-)
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
(num_cw - 1) << CW_PER_PAGE);
cfg1 = cpu_to_le32(host->cfg1_raw);
- ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
}
nandc->regs->cmd = cmd;
@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_data << UD_SIZE_BYTES
- | 0 << DISABLE_STATUS_AFTER_WRITE
- | 5 << NUM_ADDR_CYCLES
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
- | 0 << STATUS_BFR_READ
- | 1 << SET_RD_MODE_AFTER_STATUS
- | host->spare_bytes << SPARE_SIZE_BYTES;
-
- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | bad_block_byte << BAD_BLOCK_BYTE_NUM
- | 0 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | host->bch_enabled << ENABLE_BCH_ECC;
-
- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_size << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES;
-
- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE;
-
- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
- | 0 << ECC_SW_RESET
- | host->cw_data << ECC_NUM_DATA_BYTES
- | 1 << ECC_FORCE_CLK_OPEN
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
+
+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
if (!nandc->props->qpic_version2)
host->ecc_buf_cfg = 0x203 << NUM_STEPS;
@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
nandc->regs->addr0 = 0;
nandc->regs->addr1 = 0;
- nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
- 512 << UD_SIZE_BYTES |
- 5 << NUM_ADDR_CYCLES |
- 0 << SPARE_SIZE_BYTES);
-
- nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
- 0 << CS_ACTIVE_BSY |
- 17 << BAD_BLOCK_BYTE_NUM |
- 1 << BAD_BLOCK_IN_SPARE_AREA |
- 2 << WR_RD_BSY_GAP |
- 0 << WIDE_FLASH |
- 1 << DEV0_CFG1_ECC_DISABLE);
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
if (!nandc->props->qpic_version2)
- nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_version2) {
--- a/include/linux/mtd/nand-qpic-common.h
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -70,35 +70,42 @@
#define BS_CORRECTABLE_ERR_MSK 0x1f
/* NAND_DEVn_CFG0 bits */
-#define DISABLE_STATUS_AFTER_WRITE 4
+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
#define CW_PER_PAGE 6
+#define CW_PER_PAGE_MASK GENMASK(8, 6)
#define UD_SIZE_BYTES 9
#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-#define ECC_PARITY_SIZE_BYTES_RS 19
+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
#define SPARE_SIZE_BYTES 23
#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
#define NUM_ADDR_CYCLES 27
-#define STATUS_BFR_READ 30
-#define SET_RD_MODE_AFTER_STATUS 31
+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
+#define STATUS_BFR_READ BIT(30)
+#define SET_RD_MODE_AFTER_STATUS BIT(31)
/* NAND_DEVn_CFG0 bits */
-#define DEV0_CFG1_ECC_DISABLE 0
-#define WIDE_FLASH 1
+#define DEV0_CFG1_ECC_DISABLE BIT(0)
+#define WIDE_FLASH BIT(1)
#define NAND_RECOVERY_CYCLES 2
-#define CS_ACTIVE_BSY 5
+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
+#define CS_ACTIVE_BSY BIT(5)
#define BAD_BLOCK_BYTE_NUM 6
-#define BAD_BLOCK_IN_SPARE_AREA 16
+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
#define WR_RD_BSY_GAP 17
-#define ENABLE_BCH_ECC 27
+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
+#define ENABLE_BCH_ECC BIT(27)
/* NAND_DEV0_ECC_CFG bits */
-#define ECC_CFG_ECC_DISABLE 0
-#define ECC_SW_RESET 1
+#define ECC_CFG_ECC_DISABLE BIT(0)
+#define ECC_SW_RESET BIT(1)
#define ECC_MODE 4
+#define ECC_MODE_MASK GENMASK(5, 4)
#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
#define ECC_NUM_DATA_BYTES 16
#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-#define ECC_FORCE_CLK_OPEN 30
+#define ECC_FORCE_CLK_OPEN BIT(30)
/* NAND_DEV_CMD1 bits */
#define READ_ADDR 0

View file

@ -0,0 +1,64 @@
From 9d4ffbcfde283f2a87ea45128ddf7e6651facdd9 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 7 Feb 2025 20:42:38 +0100
Subject: [PATCH] mtd: rawnand: qcom: fix broken config in
qcom_param_page_type_exec
Fix broken config in qcom_param_page_type_exec caused by copy-paste error
from commit 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
In qcom_param_page_type_exec the value needs to be set to
nandc->regs->cfg0 instead of host->cfg0. This wrong configuration caused
the Qcom NANDC driver to malfunction on any device that makes use of it
(IPQ806x, IPQ40xx, IPQ807x, IPQ60xx) with the following error:
[ 0.885369] nand: device found, Manufacturer ID: 0x2c, Chip ID: 0xaa
[ 0.885909] nand: Micron NAND 256MiB 1,8V 8-bit
[ 0.892499] nand: 256 MiB, SLC, erase size: 128 KiB, page size: 2048, OOB size: 64
[ 0.896823] nand: ECC (step, strength) = (512, 8) does not fit in OOB
[ 0.896836] qcom-nandc 79b0000.nand-controller: No valid ECC settings possible
[ 0.910996] bam-dma-engine 7984000.dma-controller: Cannot free busy channel
[ 0.918070] qcom-nandc: probe of 79b0000.nand-controller failed with error -28
Restore original configuration fix the problem and makes the driver work
again.
Cc: stable@vger.kernel.org
Fixes: 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
---
drivers/mtd/nand/raw/qcom_nandc.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(str
nandc->regs->addr0 = 0;
nandc->regs->addr1 = 0;
- host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
- FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
- FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
- FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+ nandc->regs->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
- host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
- FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
- FIELD_PREP(CS_ACTIVE_BSY, 0) |
- FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
- FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
- FIELD_PREP(WIDE_FLASH, 0) |
- FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+ nandc->regs->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
if (!nandc->props->qpic_version2)
nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);

View file

@ -0,0 +1,77 @@
From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
From: Md Sadre Alam <quic_mdalam@quicinc.com>
Date: Mon, 6 Jan 2025 18:45:58 +0530
Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Fix a buffer overflow issue in qcom_clear_bam_transaction by using
struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
On x86 architecture, the following error occurs due to warnings being
treated as errors:
In function fortify_memset_chk,
inlined from qcom_clear_bam_transaction at
drivers/mtd/nand/qpic_common.c:88:2:
./include/linux/fortify-string.h:480:25: error: call to __write_overflow_field
declared with attribute warning: detected write beyond size of field
(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
480 | __write_overflow_field(p_size_field, size);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LD [M] drivers/mtd/nand/nandcore.o
CC [M] drivers/w1/masters/mxc_w1.o
cc1: all warnings being treated as errors
This patch addresses the issue by grouping the related fields in
struct bam_transaction using struct_group and updating the memset call
accordingly.
Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
drivers/mtd/nand/qpic_common.c | 2 +-
include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
2 files changed, 12 insertions(+), 9 deletions(-)
--- a/drivers/mtd/nand/qpic_common.c
+++ b/drivers/mtd/nand/qpic_common.c
@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct q
if (!nandc->props->supports_bam)
return;
- memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
bam_txn->last_data_desc = NULL;
sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
--- a/include/linux/mtd/nand-qpic-common.h
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -254,14 +254,17 @@ struct bam_transaction {
struct dma_async_tx_descriptor *last_data_desc;
struct dma_async_tx_descriptor *last_cmd_desc;
struct completion txn_done;
- u32 bam_ce_pos;
- u32 bam_ce_start;
- u32 cmd_sgl_pos;
- u32 cmd_sgl_start;
- u32 tx_sgl_pos;
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
+ struct_group(bam_positions,
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+
+ );
};
/*

View file

@ -0,0 +1,28 @@
From cf1ba3cb245020459f2ca446b7a7b199839f5d83 Mon Sep 17 00:00:00 2001
From: Dan Carpenter <dan.carpenter@linaro.org>
Date: Thu, 6 Mar 2025 12:40:01 +0300
Subject: [PATCH] spi: spi-qpic-snand: Fix ECC_CFG_ECC_DISABLE shift in
qcom_spi_read_last_cw()
The ECC_CFG_ECC_DISABLE define is BIT(0). It's supposed to be used
directly instead of used as a shifter.
Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
Link: https://patch.msgid.link/2f4b0a0b-2c03-41c0-8a4a-3d789a83832d@stanley.mountain
Signed-off-by: Mark Brown <broonie@kernel.org>
---
drivers/spi/spi-qpic-snand.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -514,7 +514,7 @@ static int qcom_spi_read_last_cw(struct
cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
0 << CW_PER_PAGE;
cfg1 = ecc_cfg->cfg1_raw;
- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
snandc->regs->cmd = snandc->qspi->cmd;
snandc->regs->cfg0 = cpu_to_le32(cfg0);

View file

@ -0,0 +1,35 @@
From d450cdd9c4398add1f2aa7200f2c95f1e3b9f9fa Mon Sep 17 00:00:00 2001
From: Gabor Juhos <j4g8y7@gmail.com>
Date: Thu, 13 Mar 2025 19:31:21 +0100
Subject: [PATCH] spi: spi-qpic-snand: avoid memleak in
qcom_spi_ecc_init_ctx_pipelined()
When the allocation of the OOB buffer fails, the
qcom_spi_ecc_init_ctx_pipelined() function returns without freeing
the memory allocated for 'ecc_cfg' thus it can cause a memory leak.
Call kfree() to free 'ecc_cfg' before returning from the function
to avoid that.
Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
Link: https://patch.msgid.link/20250313-qpic-snand-memleak-fix-v1-1-e54e78d1da3a@gmail.com
Signed-off-by: Mark Brown <broonie@kernel.org>
---
drivers/spi/spi-qpic-snand.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -263,8 +263,10 @@ static int qcom_spi_ecc_init_ctx_pipelin
return -ENOMEM;
snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
GFP_KERNEL);
- if (!snandc->qspi->oob_buf)
+ if (!snandc->qspi->oob_buf) {
+ kfree(ecc_cfg);
return -ENOMEM;
+ }
memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);

View file

@ -0,0 +1,49 @@
From d32c4e58545f17caaa854415f854691e32d42075 Mon Sep 17 00:00:00 2001
From: Geert Uytterhoeven <geert+renesas@glider.be>
Date: Wed, 26 Mar 2025 15:22:19 +0100
Subject: [PATCH] spi: SPI_QPIC_SNAND should be tristate and depend on MTD
SPI_QPIC_SNAND is the only driver that selects MTD instead of depending
on it, which could lead to circular dependencies. Moreover, as
SPI_QPIC_SNAND is bool, this forces MTD (and various related symbols) to
be built-in, as can be seen in an allmodconfig kernel.
Except for a missing semicolon, there is no reason why SPI_QPIC_SNAND
cannot be tristate; all MODULE_*() boilerplate is already present.
Hence make SPI_QPIC_SNAND tristate, let it depend on MTD, and add the
missing semicolon.
Fixes: 7304d1909080ef0c ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Link: https://patch.msgid.link/b63db431cbf35223a4400e44c296293d32c4543c.1742998909.git.geert+renesas@glider.be
Signed-off-by: Mark Brown <broonie@kernel.org>
---
drivers/spi/Kconfig | 4 ++--
drivers/spi/spi-qpic-snand.c | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -871,9 +871,9 @@ config SPI_QCOM_QSPI
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
config SPI_QPIC_SNAND
- bool "QPIC SNAND controller"
+ tristate "QPIC SNAND controller"
depends on ARCH_QCOM || COMPILE_TEST
- select MTD
+ depends on MTD
help
QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
QPIC controller supports both parallel nand and serial nand.
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -1614,7 +1614,7 @@ static const struct of_device_id qcom_sn
.data = &ipq9574_snandc_props,
},
{}
-}
+};
MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
static struct platform_driver qcom_spi_driver = {

View file

@ -0,0 +1,29 @@
From f48d80503504257682e493dc17408f2f0b47bcfa Mon Sep 17 00:00:00 2001
From: Gabor Juhos <j4g8y7@gmail.com>
Date: Thu, 20 Mar 2025 19:11:59 +0100
Subject: [PATCH] spi: spi-qpic-snand: use kmalloc() for OOB buffer allocation
The qcom_spi_ecc_init_ctx_pipelined() function allocates zeroed
memory for the OOB buffer, then it fills the buffer with '0xff'
bytes right after the allocation. In this case zeroing the memory
during allocation is superfluous, so use kmalloc() instead of
kzalloc() to avoid that.
Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
Link: https://patch.msgid.link/20250320-qpic-snand-kmalloc-v1-1-94e267550675@gmail.com
Signed-off-by: Mark Brown <broonie@kernel.org>
---
drivers/spi/spi-qpic-snand.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -261,7 +261,7 @@ static int qcom_spi_ecc_init_ctx_pipelin
ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
if (!ecc_cfg)
return -ENOMEM;
- snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+ snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
GFP_KERNEL);
if (!snandc->qspi->oob_buf) {
kfree(ecc_cfg);

View file

@ -0,0 +1,31 @@
From 1824520e7477bedf76bd08c32261c755e6405cd9 Mon Sep 17 00:00:00 2001
From: Daniel Golle <daniel@makrotopia.org>
Date: Mon, 12 Aug 2024 02:56:41 +0100
Subject: [PATCH] mtd: spinand: set bitflip_threshold to 75% of ECC strength
Reporting an unclean read from SPI-NAND only when the maximum number
of correctable bitflip errors has been hit seems a bit late.
UBI LEB scrubbing, which depends on the lower MTD device reporting
correctable bitflips, then only kicks in when it's almost too late.
Set bitflip_threshold to 75% of the ECC strength, which is also the
default for raw NAND.
Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Reviewed-by: Frieder Schrempf <frieder.schrempf@kontron.de>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/2117e387260b0a96f95b8e1652ff79e0e2d71d53.1723427450.git.daniel@makrotopia.org
---
drivers/mtd/nand/spi/core.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -1287,6 +1287,7 @@ static int spinand_init(struct spinand_d
/* Propagate ECC information to mtd_info */
mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
ret = spinand_create_dirmaps(spinand);
if (ret) {

View file

@ -0,0 +1,65 @@
From e2a9fcb36e851adb5b25c4acea53a290fd48a636 Mon Sep 17 00:00:00 2001
From: Robert Marko <robimarko@gmail.com>
Date: Mon, 5 Aug 2024 19:51:02 +0200
Subject: [PATCH] mtd: spinand: winbond: add support for W25N01KV
Add support for Winbond W25N01KV 1Gbit SPI-NAND.
It has 4-bit on-die ECC.
Signed-off-by: Robert Marko <robimarko@gmail.com>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20240805175125.6658-1-robimarko@gmail.com
---
drivers/mtd/nand/spi/winbond.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -74,6 +74,18 @@ static int w25m02gv_select_target(struct
return spi_mem_exec_op(spinand->spimem, &op);
}
+static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = 64 + (8 * section);
+ region->length = 7;
+
+ return 0;
+}
+
static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -98,6 +110,11 @@ static int w25n02kv_ooblayout_free(struc
return 0;
}
+static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
+ .ecc = w25n01kv_ooblayout_ecc,
+ .free = w25n02kv_ooblayout_free,
+};
+
static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.ecc = w25n02kv_ooblayout_ecc,
.free = w25n02kv_ooblayout_free,
@@ -160,6 +177,15 @@ static const struct spinand_info winbond
&update_cache_variants),
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+ SPINAND_INFO("W25N01KV",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
+ NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
SPINAND_INFO("W25N02KV",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),

View file

@ -0,0 +1,75 @@
From 56364c910691f6d10ba88c964c9041b9ab777bd6 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Mar 2024 08:40:28 +0100
Subject: [PATCH 1/4] net: Remove conditional threaded-NAPI wakeup based on
task state.
A NAPI thread is scheduled by first setting NAPI_STATE_SCHED bit. If
successful (the bit was not yet set) then the NAPI_STATE_SCHED_THREADED
is set but only if thread's state is not TASK_INTERRUPTIBLE (is
TASK_RUNNING) followed by task wakeup.
If the task is idle (TASK_INTERRUPTIBLE) then the
NAPI_STATE_SCHED_THREADED bit is not set. The thread is no relying on
the bit but always leaving the wait-loop after returning from schedule()
because there must have been a wakeup.
The smpboot-threads implementation for per-CPU threads requires an
explicit condition and does not support "if we get out of schedule()
then there must be something to do".
Removing this optimisation simplifies the following integration.
Set NAPI_STATE_SCHED_THREADED unconditionally on wakeup and rely on it
in the wait path by removing the `woken' condition.
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/core/dev.c | 14 ++------------
1 file changed, 2 insertions(+), 12 deletions(-)
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4514,13 +4514,7 @@ static inline void ____napi_schedule(str
*/
thread = READ_ONCE(napi->thread);
if (thread) {
- /* Avoid doing set_bit() if the thread is in
- * INTERRUPTIBLE state, cause napi_thread_wait()
- * makes sure to proceed with napi polling
- * if the thread is explicitly woken from here.
- */
- if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
- set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
@@ -6676,8 +6670,6 @@ static int napi_poll(struct napi_struct
static int napi_thread_wait(struct napi_struct *napi)
{
- bool woken = false;
-
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
@@ -6686,15 +6678,13 @@ static int napi_thread_wait(struct napi_
* Testing SCHED bit is not enough because SCHED bit might be
* set by some other busy poll thread or by napi_disable().
*/
- if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
+ if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
WARN_ON(!list_empty(&napi->poll_list));
__set_current_state(TASK_RUNNING);
return 0;
}
schedule();
- /* woken being true indicates this thread owns this napi. */
- woken = true;
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);

View file

@ -0,0 +1,330 @@
From dad6b97702639fba27a2bd3e986982ad6f0db3a7 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Mar 2024 08:40:29 +0100
Subject: [PATCH 2/4] net: Allow to use SMP threads for backlog NAPI.
Backlog NAPI is a per-CPU NAPI struct only (with no device behind it)
used by drivers which don't do NAPI them self, RPS and parts of the
stack which need to avoid recursive deadlocks while processing a packet.
The non-NAPI driver use the CPU local backlog NAPI. If RPS is enabled
then a flow for the skb is computed and based on the flow the skb can be
enqueued on a remote CPU. Scheduling/ raising the softirq (for backlog's
NAPI) on the remote CPU isn't trivial because the softirq is only
scheduled on the local CPU and performed after the hardirq is done.
In order to schedule a softirq on the remote CPU, an IPI is sent to the
remote CPU which schedules the backlog-NAPI on the then local CPU.
On PREEMPT_RT interrupts are force-threaded. The soft interrupts are
raised within the interrupt thread and processed after the interrupt
handler completed still within the context of the interrupt thread. The
softirq is handled in the context where it originated.
With force-threaded interrupts enabled, ksoftirqd is woken up if a
softirq is raised from hardirq context. This is the case if it is raised
from an IPI. Additionally there is a warning on PREEMPT_RT if the
softirq is raised from the idle thread.
This was done for two reasons:
- With threaded interrupts the processing should happen in thread
context (where it originated) and ksoftirqd is the only thread for
this context if raised from hardirq. Using the currently running task
instead would "punish" a random task.
- Once ksoftirqd is active it consumes all further softirqs until it
stops running. This changed recently and is no longer the case.
Instead of keeping the backlog NAPI in ksoftirqd (in force-threaded/
PREEMPT_RT setups) I am proposing NAPI-threads for backlog.
The "proper" setup with threaded-NAPI is not doable because the threads
are not pinned to an individual CPU and can be modified by the user.
Additionally a dummy network device would have to be assigned. Also
CPU-hotplug has to be considered if additional CPUs show up.
All this can be probably done/ solved but the smpboot-threads already
provide this infrastructure.
Sending UDP packets over loopback expects that the packet is processed
within the call. Delaying it by handing it over to the thread hurts
performance. It is not beneficial to the outcome if the context switch
happens immediately after enqueue or after a while to process a few
packets in a batch.
There is no need to always use the thread if the backlog NAPI is
requested on the local CPU. This restores the loopback throuput. The
performance drops mostly to the same value after enabling RPS on the
loopback comparing the IPI and the tread result.
Create NAPI-threads for backlog if request during boot. The thread runs
the inner loop from napi_threaded_poll(), the wait part is different. It
checks for NAPI_STATE_SCHED (the backlog NAPI can not be disabled).
The NAPI threads for backlog are optional, it has to be enabled via the boot
argument "thread_backlog_napi". It is mandatory for PREEMPT_RT to avoid the
wakeup of ksoftirqd from the IPI.
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/core/dev.c | 148 +++++++++++++++++++++++++++++++++++++------------
1 file changed, 113 insertions(+), 35 deletions(-)
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -78,6 +78,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/smpboot.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/string.h>
@@ -217,6 +218,31 @@ static inline struct hlist_head *dev_ind
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
+#ifndef CONFIG_PREEMPT_RT
+
+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
+
+static int __init setup_backlog_napi_threads(char *arg)
+{
+ static_branch_enable(&use_backlog_threads_key);
+ return 0;
+}
+early_param("thread_backlog_napi", setup_backlog_napi_threads);
+
+static bool use_backlog_threads(void)
+{
+ return static_branch_unlikely(&use_backlog_threads_key);
+}
+
+#else
+
+static bool use_backlog_threads(void)
+{
+ return true;
+}
+
+#endif
+
static inline void rps_lock_irqsave(struct softnet_data *sd,
unsigned long *flags)
{
@@ -4482,6 +4508,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
/*************************************************************************
* Receiver routines
*************************************************************************/
+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
int netdev_max_backlog __read_mostly = 1000;
EXPORT_SYMBOL(netdev_max_backlog);
@@ -4514,12 +4541,16 @@ static inline void ____napi_schedule(str
*/
thread = READ_ONCE(napi->thread);
if (thread) {
+ if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
+ goto use_local_napi;
+
set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
wake_up_process(thread);
return;
}
}
+use_local_napi:
list_add_tail(&napi->poll_list, &sd->poll_list);
WRITE_ONCE(napi->list_owner, smp_processor_id());
/* If not called from net_rx_action()
@@ -4765,6 +4796,11 @@ static void napi_schedule_rps(struct sof
#ifdef CONFIG_RPS
if (sd != mysd) {
+ if (use_backlog_threads()) {
+ __napi_schedule_irqoff(&sd->backlog);
+ return;
+ }
+
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
@@ -5988,7 +6024,7 @@ static void net_rps_action_and_irq_enabl
#ifdef CONFIG_RPS
struct softnet_data *remsd = sd->rps_ipi_list;
- if (remsd) {
+ if (!use_backlog_threads() && remsd) {
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -6003,7 +6039,7 @@ static void net_rps_action_and_irq_enabl
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- return sd->rps_ipi_list != NULL;
+ return !use_backlog_threads() && sd->rps_ipi_list;
#else
return false;
#endif
@@ -6047,7 +6083,7 @@ static int process_backlog(struct napi_s
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
again = false;
} else {
skb_queue_splice_tail_init(&sd->input_pkt_queue,
@@ -6713,43 +6749,48 @@ static void skb_defer_free_flush(struct
}
}
-static int napi_threaded_poll(void *data)
+static void napi_threaded_poll_loop(struct napi_struct *napi)
{
- struct napi_struct *napi = data;
struct softnet_data *sd;
- void *have;
+ unsigned long last_qs = jiffies;
- while (!napi_thread_wait(napi)) {
- unsigned long last_qs = jiffies;
+ for (;;) {
+ bool repoll = false;
+ void *have;
- for (;;) {
- bool repoll = false;
+ local_bh_disable();
+ sd = this_cpu_ptr(&softnet_data);
+ sd->in_napi_threaded_poll = true;
- local_bh_disable();
- sd = this_cpu_ptr(&softnet_data);
- sd->in_napi_threaded_poll = true;
-
- have = netpoll_poll_lock(napi);
- __napi_poll(napi, &repoll);
- netpoll_poll_unlock(have);
-
- sd->in_napi_threaded_poll = false;
- barrier();
-
- if (sd_has_rps_ipi_waiting(sd)) {
- local_irq_disable();
- net_rps_action_and_irq_enable(sd);
- }
- skb_defer_free_flush(sd);
- local_bh_enable();
+ have = netpoll_poll_lock(napi);
+ __napi_poll(napi, &repoll);
+ netpoll_poll_unlock(have);
+
+ sd->in_napi_threaded_poll = false;
+ barrier();
+
+ if (sd_has_rps_ipi_waiting(sd)) {
+ local_irq_disable();
+ net_rps_action_and_irq_enable(sd);
+ }
+ skb_defer_free_flush(sd);
+ local_bh_enable();
- if (!repoll)
- break;
+ if (!repoll)
+ break;
- rcu_softirq_qs_periodic(last_qs);
- cond_resched();
- }
+ rcu_softirq_qs_periodic(last_qs);
+ cond_resched();
}
+}
+
+static int napi_threaded_poll(void *data)
+{
+ struct napi_struct *napi = data;
+
+ while (!napi_thread_wait(napi))
+ napi_threaded_poll_loop(napi);
+
return 0;
}
@@ -11334,7 +11375,7 @@ static int dev_cpu_dead(unsigned int old
list_del_init(&napi->poll_list);
if (napi->poll == process_backlog)
- napi->state = 0;
+ napi->state &= NAPIF_STATE_THREADED;
else
____napi_schedule(sd, napi);
}
@@ -11342,12 +11383,14 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
+ if (!use_backlog_threads()) {
#ifdef CONFIG_RPS
- remsd = oldsd->rps_ipi_list;
- oldsd->rps_ipi_list = NULL;
+ remsd = oldsd->rps_ipi_list;
+ oldsd->rps_ipi_list = NULL;
#endif
- /* send out pending IPI's on offline CPU */
- net_rps_send_ipi(remsd);
+ /* send out pending IPI's on offline CPU */
+ net_rps_send_ipi(remsd);
+ }
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
@@ -11610,6 +11653,38 @@ static struct pernet_operations __net_in
*
*/
+static int backlog_napi_should_run(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+}
+
+static void run_backlog_napi(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+
+ napi_threaded_poll_loop(&sd->backlog);
+}
+
+static void backlog_napi_setup(unsigned int cpu)
+{
+ struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
+ struct napi_struct *napi = &sd->backlog;
+
+ napi->thread = this_cpu_read(backlog_napi);
+ set_bit(NAPI_STATE_THREADED, &napi->state);
+}
+
+static struct smp_hotplug_thread backlog_threads = {
+ .store = &backlog_napi,
+ .thread_should_run = backlog_napi_should_run,
+ .thread_fn = run_backlog_napi,
+ .thread_comm = "backlog_napi/%u",
+ .setup = backlog_napi_setup,
+};
+
/*
* This is called single threaded during boot, so no need
* to take the rtnl semaphore.
@@ -11660,7 +11735,10 @@ static int __init net_dev_init(void)
init_gro_hash(&sd->backlog);
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p;
+ INIT_LIST_HEAD(&sd->backlog.poll_list);
}
+ if (use_backlog_threads())
+ smpboot_register_percpu_thread(&backlog_threads);
dev_boot_phase = 0;

View file

@ -0,0 +1,121 @@
From 80d2eefcb4c84aa9018b2a997ab3a4c567bc821a Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Mar 2024 08:40:30 +0100
Subject: [PATCH 3/4] net: Use backlog-NAPI to clean up the defer_list.
The defer_list is a per-CPU list which is used to free skbs outside of
the socket lock and on the CPU on which they have been allocated.
The list is processed during NAPI callbacks so ideally the list is
cleaned up.
Should the amount of skbs on the list exceed a certain water mark then
the softirq is triggered remotely on the target CPU by invoking a remote
function call. The raise of the softirqs via a remote function call
leads to waking the ksoftirqd on PREEMPT_RT which is undesired.
The backlog-NAPI threads already provide the infrastructure which can be
utilized to perform the cleanup of the defer_list.
The NAPI state is updated with the input_pkt_queue.lock acquired. It
order not to break the state, it is needed to also wake the backlog-NAPI
thread with the lock held. This requires to acquire the use the lock in
rps_lock_irq*() if the backlog-NAPI threads are used even with RPS
disabled.
Move the logic of remotely starting softirqs to clean up the defer_list
into kick_defer_list_purge(). Make sure a lock is held in
rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI
for defer_list cleanup if backlog-NAPI is available.
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
include/linux/netdevice.h | 1 +
net/core/dev.c | 25 +++++++++++++++++++++----
net/core/skbuff.c | 4 ++--
3 files changed, 24 insertions(+), 6 deletions(-)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3308,6 +3308,7 @@ static inline void dev_xmit_recursion_de
__this_cpu_dec(softnet_data.xmit.recursion);
}
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -246,7 +246,7 @@ static bool use_backlog_threads(void)
static inline void rps_lock_irqsave(struct softnet_data *sd,
unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_save(*flags);
@@ -254,7 +254,7 @@ static inline void rps_lock_irqsave(stru
static inline void rps_lock_irq_disable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_disable();
@@ -263,7 +263,7 @@ static inline void rps_lock_irq_disable(
static inline void rps_unlock_irq_restore(struct softnet_data *sd,
unsigned long *flags)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(*flags);
@@ -271,7 +271,7 @@ static inline void rps_unlock_irq_restor
static inline void rps_unlock_irq_enable(struct softnet_data *sd)
{
- if (IS_ENABLED(CONFIG_RPS))
+ if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_enable();
@@ -4815,6 +4815,23 @@ static void napi_schedule_rps(struct sof
__napi_schedule_irqoff(&mysd->backlog);
}
+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
+{
+ unsigned long flags;
+
+ if (use_backlog_threads()) {
+ rps_lock_irqsave(sd, &flags);
+
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ __napi_schedule_irqoff(&sd->backlog);
+
+ rps_unlock_irq_restore(sd, &flags);
+
+ } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+ smp_call_function_single_async(cpu, &sd->defer_csd);
+ }
+}
+
#ifdef CONFIG_NET_FLOW_LIMIT
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
#endif
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6863,8 +6863,8 @@ nodefer: __kfree_skb(skb);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
- smp_call_function_single_async(cpu, &sd->defer_csd);
+ if (unlikely(kick))
+ kick_defer_list_purge(sd, cpu);
}
static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,

View file

@ -0,0 +1,164 @@
From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 25 Mar 2024 08:40:31 +0100
Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
The rps_lock.*() functions use the inner lock of a sk_buff_head for
locking. This lock is used if RPS is enabled, otherwise the list is
accessed lockless and disabling interrupts is enough for the
synchronisation because it is only accessed CPU local. Not only the list
is protected but also the NAPI state protected.
With the addition of backlog threads, the lock is also needed because of
the cross CPU access even without RPS. The clean up of the defer_list
list is also done via backlog threads (if enabled).
It has been suggested to rename the locking function since it is no
longer just RPS.
Rename the rps_lock*() functions to backlog_lock*().
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/core/dev.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
#endif
-static inline void rps_lock_irqsave(struct softnet_data *sd,
- unsigned long *flags)
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
+ unsigned long *flags)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
local_irq_save(*flags);
}
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_lock_irq(&sd->input_pkt_queue.lock);
@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
local_irq_disable();
}
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- unsigned long *flags)
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
+ unsigned long *flags)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
local_irq_restore(*flags);
}
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
{
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
spin_unlock_irq(&sd->input_pkt_queue.lock);
@@ -4820,12 +4820,12 @@ void kick_defer_list_purge(struct softne
unsigned long flags;
if (use_backlog_threads()) {
- rps_lock_irqsave(sd, &flags);
+ backlog_lock_irq_save(sd, &flags);
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
__napi_schedule_irqoff(&sd->backlog);
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
smp_call_function_single_async(cpu, &sd->defer_csd);
@@ -4887,7 +4887,7 @@ static int enqueue_to_backlog(struct sk_
reason = SKB_DROP_REASON_NOT_SPECIFIED;
sd = &per_cpu(softnet_data, cpu);
- rps_lock_irqsave(sd, &flags);
+ backlog_lock_irq_save(sd, &flags);
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
@@ -4896,7 +4896,7 @@ static int enqueue_to_backlog(struct sk_
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
return NET_RX_SUCCESS;
}
@@ -4911,7 +4911,7 @@ enqueue:
drop:
sd->dropped++;
- rps_unlock_irq_restore(sd, &flags);
+ backlog_unlock_irq_restore(sd, &flags);
dev_core_stats_rx_dropped_inc(skb->dev);
kfree_skb_reason(skb, reason);
@@ -5942,7 +5942,7 @@ static void flush_backlog(struct work_st
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -5950,7 +5950,7 @@ static void flush_backlog(struct work_st
input_queue_head_incr(sd);
}
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
@@ -5968,14 +5968,14 @@ static bool flush_required(int cpu)
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
/* as insertion into process_queue happens with the rps lock held,
* process_queue access may race only with dequeue
*/
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
!skb_queue_empty_lockless(&sd->process_queue);
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
return do_flush;
#endif
@@ -6090,7 +6090,7 @@ static int process_backlog(struct napi_s
}
- rps_lock_irq_disable(sd);
+ backlog_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
@@ -6106,7 +6106,7 @@ static int process_backlog(struct napi_s
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
}
- rps_unlock_irq_enable(sd);
+ backlog_unlock_irq_enable(sd);
}
return work;

View file

@ -0,0 +1,30 @@
From 6c06c88fa838fcc1b7e5380facd086f57fd9d1c4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <kabel@kernel.org>
Date: Sun, 4 Feb 2024 15:16:46 +0100
Subject: [PATCH] net: mdio: add 2.5g and 5g related PMA speed constants
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add constants indicating 2.5g and 5g ability in the MMD PMA speed
register.
Signed-off-by: Marek Behún <kabel@kernel.org>
Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
Link: https://lore.kernel.org/r/98e15038-d96c-442f-93e4-410100d27866@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
include/uapi/linux/mdio.h | 2 ++
1 file changed, 2 insertions(+)
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -138,6 +138,8 @@
#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PMA_SPEED_2_5G 0x2000 /* 2.5G capable */
+#define MDIO_PMA_SPEED_5G 0x4000 /* 5G capable */
#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */
#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */

View file

@ -0,0 +1,94 @@
From: Jakub Sitnicki <jakub@cloudflare.com>
Date: Wed, 26 Jun 2024 19:51:26 +0200
Subject: [PATCH] udp: Allow GSO transmit from devices with no checksum offload
Today sending a UDP GSO packet from a TUN device results in an EIO error:
import fcntl, os, struct
from socket import *
TUNSETIFF = 0x400454CA
IFF_TUN = 0x0001
IFF_NO_PI = 0x1000
UDP_SEGMENT = 103
tun_fd = os.open("/dev/net/tun", os.O_RDWR)
ifr = struct.pack("16sH", b"tun0", IFF_TUN | IFF_NO_PI)
fcntl.ioctl(tun_fd, TUNSETIFF, ifr)
os.system("ip addr add 192.0.2.1/24 dev tun0")
os.system("ip link set dev tun0 up")
s = socket(AF_INET, SOCK_DGRAM)
s.setsockopt(SOL_UDP, UDP_SEGMENT, 1200)
s.sendto(b"x" * 3000, ("192.0.2.2", 9)) # EIO
This is due to a check in the udp stack if the egress device offers
checksum offload. While TUN/TAP devices, by default, don't advertise this
capability because it requires support from the TUN/TAP reader.
However, the GSO stack has a software fallback for checksum calculation,
which we can use. This way we don't force UDP_SEGMENT users to handle the
EIO error and implement a segmentation fallback.
Lift the restriction so that UDP_SEGMENT can be used with any egress
device. We also need to adjust the UDP GSO code to match the GSO stack
expectation about ip_summed field, as set in commit 8d63bee643f1 ("net:
avoid skb_warn_bad_offload false positives on UFO"). Otherwise we will hit
the bad offload check.
Users should, however, expect a potential performance impact when
batch-sending packets with UDP_SEGMENT without checksum offload on the
egress device. In such case the packet payload is read twice: first during
the sendmsg syscall when copying data from user memory, and then in the GSO
stack for checksum computation. This double memory read can be less
efficient than a regular sendmsg where the checksum is calculated during
the initial data copy from user memory.
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20240626-linux-udpgso-v2-1-422dfcbd6b48@cloudflare.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -942,8 +942,7 @@ static int udp_send_skb(struct sk_buff *
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -384,6 +384,14 @@ struct sk_buff *__udp_gso_segment(struct
else
uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
+ /* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same
+ * meaning. However, check for bad offloads in the GSO stack expects the
+ * latter, if the checksum was calculated in software. To vouch for the
+ * segment skbs we actually need to set it on the gso_skb.
+ */
+ if (gso_skb->ip_summed == CHECKSUM_NONE)
+ gso_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
/* update refcount for the packet */
if (copy_dtor) {
int delta = sum_truesize - gso_skb->truesize;
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1258,8 +1258,7 @@ static int udp_v6_send_skb(struct sk_buf
kfree_skb(skb);
return -EINVAL;
}
- if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
- dst_xfrm(skb_dst(skb))) {
+ if (is_udplite || dst_xfrm(skb_dst(skb))) {
kfree_skb(skb);
return -EIO;
}

View file

@ -0,0 +1,69 @@
From: Jakub Sitnicki <jakub@cloudflare.com>
Date: Thu, 8 Aug 2024 11:56:21 +0200
Subject: [PATCH] net: Make USO depend on CSUM offload
UDP segmentation offload inherently depends on checksum offload. It should
not be possible to disable checksum offload while leaving USO enabled.
Enforce this dependency in code.
There is a single tx-udp-segmentation feature flag to indicate support for
both IPv4/6, hence the devices wishing to support USO must offer checksum
offload for both IP versions.
Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-1-f5c5b4149ab9@cloudflare.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9796,6 +9796,15 @@ static void netdev_sync_lower_features(s
}
}
+static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
+{
+ netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
+ bool hw_csum = features & NETIF_F_HW_CSUM;
+
+ return ip_csum || hw_csum;
+}
+
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -9877,15 +9886,9 @@ static netdev_features_t netdev_fix_feat
features &= ~NETIF_F_LRO;
}
- if (features & NETIF_F_HW_TLS_TX) {
- bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- bool hw_csum = features & NETIF_F_HW_CSUM;
-
- if (!ip_csum && !hw_csum) {
- netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
- features &= ~NETIF_F_HW_TLS_TX;
- }
+ if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+ features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
@@ -9893,6 +9896,11 @@ static netdev_features_t netdev_fix_feat
features &= ~NETIF_F_HW_TLS_RX;
}
+ if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
+ netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
+ features &= ~NETIF_F_GSO_UDP_L4;
+ }
+
return features;
}

View file

@ -0,0 +1,86 @@
From: Jakub Sitnicki <jakub@cloudflare.com>
Date: Thu, 8 Aug 2024 11:56:22 +0200
Subject: [PATCH] udp: Fall back to software USO if IPv6 extension headers are
present
In commit 10154dbded6d ("udp: Allow GSO transmit from devices with no
checksum offload") we have intentionally allowed UDP GSO packets marked
CHECKSUM_NONE to pass to the GSO stack, so that they can be segmented and
checksummed by a software fallback when the egress device lacks these
features.
What was not taken into consideration is that a CHECKSUM_NONE skb can be
handed over to the GSO stack also when the egress device advertises the
tx-udp-segmentation / NETIF_F_GSO_UDP_L4 feature.
This will happen when there are IPv6 extension headers present, which we
check for in __ip6_append_data(). Syzbot has discovered this scenario,
producing a warning as below:
ip6tnl0: caps=(0x00000006401d7869, 0x00000006401d7869)
WARNING: CPU: 0 PID: 5112 at net/core/dev.c:3293 skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
Modules linked in:
CPU: 0 PID: 5112 Comm: syz-executor391 Not tainted 6.10.0-rc7-syzkaller-01603-g80ab5445da62 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 06/07/2024
RIP: 0010:skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
[...]
Call Trace:
<TASK>
__skb_gso_segment+0x3be/0x4c0 net/core/gso.c:127
skb_gso_segment include/net/gso.h:83 [inline]
validate_xmit_skb+0x585/0x1120 net/core/dev.c:3661
__dev_queue_xmit+0x17a4/0x3e90 net/core/dev.c:4415
neigh_output include/net/neighbour.h:542 [inline]
ip6_finish_output2+0xffa/0x1680 net/ipv6/ip6_output.c:137
ip6_finish_output+0x41e/0x810 net/ipv6/ip6_output.c:222
ip6_send_skb+0x112/0x230 net/ipv6/ip6_output.c:1958
udp_v6_send_skb+0xbf5/0x1870 net/ipv6/udp.c:1292
udpv6_sendmsg+0x23b3/0x3270 net/ipv6/udp.c:1588
sock_sendmsg_nosec net/socket.c:730 [inline]
__sock_sendmsg+0xef/0x270 net/socket.c:745
____sys_sendmsg+0x525/0x7d0 net/socket.c:2585
___sys_sendmsg net/socket.c:2639 [inline]
__sys_sendmmsg+0x3b2/0x740 net/socket.c:2725
__do_sys_sendmmsg net/socket.c:2754 [inline]
__se_sys_sendmmsg net/socket.c:2751 [inline]
__x64_sys_sendmmsg+0xa0/0xb0 net/socket.c:2751
do_syscall_x64 arch/x86/entry/common.c:52 [inline]
do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
entry_SYSCALL_64_after_hwframe+0x77/0x7f
[...]
</TASK>
We are hitting the bad offload warning because when an egress device is
capable of handling segmentation offload requested by
skb_shinfo(skb)->gso_type, the chain of gso_segment callbacks won't produce
any segment skbs and return NULL. See the skb_gso_ok() branch in
{__udp,tcp,sctp}_gso_segment helpers.
To fix it, force a fallback to software USO when processing a packet with
IPv6 extension headers, since we don't know if these can checksummed by
all devices which offer USO.
Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
Reported-by: syzbot+e15b7e15b8a751a91d9a@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/000000000000e1609a061d5330ce@google.com/
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-2-f5c5b4149ab9@cloudflare.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -283,6 +283,12 @@ struct sk_buff *__udp_gso_segment(struct
!(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
return ERR_PTR(-EINVAL);
+ /* We don't know if egress device can segment and checksum the packet
+ * when IPv6 extension headers are present. Fall back to software GSO.
+ */
+ if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
+ features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
+
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),

View file

@ -0,0 +1,29 @@
From: Breno Leitao <leitao@debian.org>
Date: Wed, 28 Feb 2024 03:31:21 -0800
Subject: [PATCH] net: get stats64 if device if driver is configured
If the network driver is relying in the net core to do stats allocation,
then we want to dev_get_tstats64() instead of netdev_stats_to_stats64(),
since there are per-cpu stats that needs to be taken in consideration.
This will also simplify the drivers in regard to statistics. Once the
driver sets NETDEV_PCPU_STAT_TSTATS, it doesn't not need to allocate the
stacks, neither it needs to set `.ndo_get_stats64 = dev_get_tstats64`
for the generic stats collection function anymore.
Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10703,6 +10703,8 @@ struct rtnl_link_stats64 *dev_get_stats(
ops->ndo_get_stats64(dev, storage);
} else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
+ } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ dev_get_tstats64(dev, storage);
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}

View file

@ -0,0 +1,139 @@
From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Fri, 13 Oct 2023 14:48:21 +0800
Subject: [PATCH] page_pool: fragment API support for 32-bit arch with 64-bit
DMA
Currently page_pool_alloc_frag() is not supported in 32-bit
arch with 64-bit DMA because of the overlap issue between
pp_frag_count and dma_addr_upper in 'struct page' for those
arches, which seems to be quite common, see [1], which means
driver may need to handle it when using fragment API.
It is assumed that the combination of the above arch with an
address space >16TB does not exist, as all those arches have
64b equivalent, it seems logical to use the 64b version for a
system with a large address space. It is also assumed that dma
address is page aligned when we are dma mapping a page aligned
buffer, see [2].
That means we're storing 12 bits of 0 at the lower end for a
dma address, we can reuse those bits for the above arches to
support 32b+12b, which is 16TB of memory.
If we make a wrong assumption, a warning is emitted so that
user can report to us.
1. https://lore.kernel.org/all/20211117075652.58299-1-linyunsheng@huawei.com/
2. https://lore.kernel.org/all/20230818145145.4b357c89@kernel.org/
Tested-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Guillaume Tucker <guillaume.tucker@collabora.com>
CC: Matthew Wilcox <willy@infradead.org>
CC: Linux-MM <linux-mm@kvack.org>
Link: https://lore.kernel.org/r/20231013064827.61135-2-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -125,18 +125,7 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- union {
- /**
- * dma_addr_upper: might require a 64-bit
- * value on 32-bit architectures.
- */
- unsigned long dma_addr_upper;
- /**
- * For frag page support, not supported in
- * 32-bit architectures with 64-bit DMA.
- */
- atomic_long_t pp_frag_count;
- };
+ atomic_long_t pp_frag_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -197,7 +197,7 @@ static inline void page_pool_recycle_dir
page_pool_put_full_page(pool, page, true);
}
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
+#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
(sizeof(dma_addr_t) > sizeof(unsigned long))
/**
@@ -211,17 +211,25 @@ static inline dma_addr_t page_pool_get_d
{
dma_addr_t ret = page->dma_addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
return ret;
}
-static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ page->dma_addr = addr >> PAGE_SHIFT;
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
+ }
+
page->dma_addr = addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- page->dma_addr_upper = upper_32_bits(addr);
+ return false;
}
static inline bool page_pool_put(struct page_pool *pool)
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -211,10 +211,6 @@ static int page_pool_init(struct page_po
*/
}
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
- pool->p.flags & PP_FLAG_PAGE_FRAG)
- return -EINVAL;
-
#ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats)
@@ -363,12 +359,20 @@ static bool page_pool_dma_map(struct pag
if (dma_mapping_error(pool->p.dev, dma))
return false;
- page_pool_set_dma_addr(page, dma);
+ if (page_pool_set_dma_addr(page, dma))
+ goto unmap_failed;
if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
return true;
+
+unmap_failed:
+ WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
+ dma_unmap_page_attrs(pool->p.dev, dma,
+ PAGE_SIZE << pool->p.order, pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ return false;
}
static void page_pool_set_pp_info(struct page_pool *pool,

View file

@ -0,0 +1,183 @@
From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Fri, 20 Oct 2023 17:59:48 +0800
Subject: [PATCH] page_pool: unify frag_count handling in
page_pool_is_last_frag()
Currently when page_pool_create() is called with
PP_FLAG_PAGE_FRAG flag, page_pool_alloc_pages() is only
allowed to be called under the below constraints:
1. page_pool_fragment_page() need to be called to setup
page->pp_frag_count immediately.
2. page_pool_defrag_page() often need to be called to drain
the page->pp_frag_count when there is no more user will
be holding on to that page.
Those constraints exist in order to support a page to be
split into multi fragments.
And those constraints have some overhead because of the
cache line dirtying/bouncing and atomic update.
Those constraints are unavoidable for case when we need a
page to be split into more than one fragment, but there is
also case that we want to avoid the above constraints and
their overhead when a page can't be split as it can only
hold a fragment as requested by user, depending on different
use cases:
use case 1: allocate page without page splitting.
use case 2: allocate page with page splitting.
use case 3: allocate page with or without page splitting
depending on the fragment size.
Currently page pool only provide page_pool_alloc_pages() and
page_pool_alloc_frag() API to enable the 1 & 2 separately,
so we can not use a combination of 1 & 2 to enable 3, it is
not possible yet because of the per page_pool flag
PP_FLAG_PAGE_FRAG.
So in order to allow allocating unsplit page without the
overhead of split page while still allow allocating split
page we need to remove the per page_pool flag in
page_pool_is_last_frag(), as best as I can think of, it seems
there are two methods as below:
1. Add per page flag/bit to indicate a page is split or
not, which means we might need to update that flag/bit
everytime the page is recycled, dirtying the cache line
of 'struct page' for use case 1.
2. Unify the page->pp_frag_count handling for both split and
unsplit page by assuming all pages in the page pool is split
into a big fragment initially.
As page pool already supports use case 1 without dirtying the
cache line of 'struct page' whenever a page is recyclable, we
need to support the above use case 3 with minimal overhead,
especially not adding any noticeable overhead for use case 1,
and we are already doing an optimization by not updating
pp_frag_count in page_pool_defrag_page() for the last fragment
user, this patch chooses to unify the pp_frag_count handling
to support the above use case 3.
There is no noticeable performance degradation and some
justification for unifying the frag_count handling with this
patch applied using a micro-benchmark testing in [1].
1. https://lore.kernel.org/all/bf2591f8-7b3c-4480-bb2c-31dc9da1d6ac@huawei.com/
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
CC: Lorenzo Bianconi <lorenzo@kernel.org>
CC: Alexander Duyck <alexander.duyck@gmail.com>
CC: Liang Chen <liangchen.linux@gmail.com>
CC: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20231020095952.11055-2-linyunsheng@huawei.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -115,28 +115,49 @@ static inline long page_pool_defrag_page
long ret;
/* If nr == pp_frag_count then we have cleared all remaining
- * references to the page. No need to actually overwrite it, instead
- * we can leave this to be overwritten by the calling function.
+ * references to the page:
+ * 1. 'n == 1': no need to actually overwrite it.
+ * 2. 'n != 1': overwrite it with one, which is the rare case
+ * for pp_frag_count draining.
*
- * The main advantage to doing this is that an atomic_read is
- * generally a much cheaper operation than an atomic update,
- * especially when dealing with a page that may be partitioned
- * into only 2 or 3 pieces.
+ * The main advantage to doing this is that not only we avoid a atomic
+ * update, as an atomic_read is generally a much cheaper operation than
+ * an atomic update, especially when dealing with a page that may be
+ * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count
+ * handling by ensuring all pages have partitioned into only 1 piece
+ * initially, and only overwrite it when the page is partitioned into
+ * more than one piece.
*/
- if (atomic_long_read(&page->pp_frag_count) == nr)
+ if (atomic_long_read(&page->pp_frag_count) == nr) {
+ /* As we have ensured nr is always one for constant case using
+ * the BUILD_BUG_ON(), only need to handle the non-constant case
+ * here for pp_frag_count draining, which is a rare case.
+ */
+ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
+ if (!__builtin_constant_p(nr))
+ atomic_long_set(&page->pp_frag_count, 1);
+
return 0;
+ }
ret = atomic_long_sub_return(nr, &page->pp_frag_count);
WARN_ON(ret < 0);
+
+ /* We are the last user here too, reset pp_frag_count back to 1 to
+ * ensure all pages have been partitioned into 1 piece initially,
+ * this should be the rare case when the last two fragment users call
+ * page_pool_defrag_page() currently.
+ */
+ if (unlikely(!ret))
+ atomic_long_set(&page->pp_frag_count, 1);
+
return ret;
}
-static inline bool page_pool_is_last_frag(struct page_pool *pool,
- struct page *page)
+static inline bool page_pool_is_last_frag(struct page *page)
{
- /* If fragments aren't enabled or count is 0 we were the last user */
- return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- (page_pool_defrag_page(page, 1) == 0);
+ /* If page_pool_defrag_page() returns 0, we were the last user */
+ return page_pool_defrag_page(page, 1) == 0;
}
/**
@@ -161,7 +182,7 @@ static inline void page_pool_put_page(st
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_frag(pool, page))
+ if (!page_pool_is_last_frag(page))
return;
page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -380,6 +380,14 @@ static void page_pool_set_pp_info(struct
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
+
+ /* Ensuring all pages have been split into one fragment initially:
+ * page_pool_set_pp_info() is only called once for every page when it
+ * is allocated from the page allocator and page_pool_fragment_page()
+ * is dirtying the same cache line as the page->pp_magic above, so
+ * the overhead is negligible.
+ */
+ page_pool_fragment_page(page, 1);
if (pool->p.init_callback)
pool->p.init_callback(page, pool->p.init_arg);
}
@@ -676,7 +684,7 @@ void page_pool_put_page_bulk(struct page
struct page *page = virt_to_head_page(data[i]);
/* It is not the last user for the page frag case */
- if (!page_pool_is_last_frag(pool, page))
+ if (!page_pool_is_last_frag(page))
continue;
page = __page_pool_put_page(pool, page, -1, false);
@@ -752,8 +760,7 @@ struct page *page_pool_alloc_frag(struct
unsigned int max_size = PAGE_SIZE << pool->p.order;
struct page *page = pool->frag_page;
- if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- size > max_size))
+ if (WARN_ON(size > max_size))
return NULL;
size = ALIGN(size, dma_get_cache_alignment());

View file

@ -0,0 +1,101 @@
From 8928756d53d5b99dcd18073dc7738b8ebdbe7d96 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:42 +0200
Subject: [PATCH 1/6] net: move skb_gro_receive_list from udp to core
This helper function will be used for TCP fraglist GRO support
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
include/net/gro.h | 1 +
net/core/gro.c | 27 +++++++++++++++++++++++++++
net/ipv4/udp_offload.c | 27 ---------------------------
3 files changed, 28 insertions(+), 27 deletions(-)
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -439,6 +439,7 @@ static inline __wsum ip6_gro_compute_pse
}
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
static inline void gro_normal_list(struct napi_struct *napi)
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -228,6 +228,33 @@ done:
return 0;
}
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk ownership - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
{
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -474,33 +474,6 @@ out:
return segs;
}
-static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk ownership - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- skb->sk = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,

View file

@ -0,0 +1,177 @@
From bee88cd5bd83d40b8aec4d6cb729378f707f6197 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:43 +0200
Subject: [PATCH 2/6] net: add support for segmenting TCP fraglist GSO packets
Preparation for adding TCP fraglist GRO support. It expects packets to be
combined in a similar way as UDP fraglist GSO packets.
For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/ipv4/tcp_offload.c | 67 ++++++++++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 58 ++++++++++++++++++++++++++++++++++
2 files changed, 125 insertions(+)
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -31,6 +31,70 @@ static void tcp_gso_tstamp(struct sk_buf
}
}
+static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
+ __be32 *oldip, __be32 newip,
+ __be16 *oldport, __be16 newport)
+{
+ struct tcphdr *th;
+ struct iphdr *iph;
+
+ if (*oldip == newip && *oldport == newport)
+ return;
+
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+
+ inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
+ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ *oldport = newport;
+
+ csum_replace4(&iph->check, *oldip, newip);
+ *oldip = newip;
+}
+
+static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct tcphdr *th;
+ const struct iphdr *iph;
+ struct sk_buff *seg;
+ struct tcphdr *th2;
+ struct iphdr *iph2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ip_hdr(seg);
+ th2 = tcp_hdr(seg->next);
+ iph2 = ip_hdr(seg->next);
+
+ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
+ iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ip_hdr(seg);
+
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->saddr, iph->saddr,
+ &th2->source, th->source);
+ __tcpv4_gso_segment_csum(seg,
+ &iph2->daddr, iph->daddr,
+ &th2->dest, th->dest);
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv4_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -40,6 +104,9 @@ static struct sk_buff *tcp4_gso_segment(
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return __tcp4_gso_segment_list(skb, features);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -40,6 +40,61 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
return 0;
}
+static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
+ __be16 *oldport, __be16 newport)
+{
+ struct tcphdr *th;
+
+ if (*oldport == newport)
+ return;
+
+ th = tcp_hdr(seg);
+ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ *oldport = newport;
+}
+
+static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
+{
+ const struct tcphdr *th;
+ const struct ipv6hdr *iph;
+ struct sk_buff *seg;
+ struct tcphdr *th2;
+ struct ipv6hdr *iph2;
+
+ seg = segs;
+ th = tcp_hdr(seg);
+ iph = ipv6_hdr(seg);
+ th2 = tcp_hdr(seg->next);
+ iph2 = ipv6_hdr(seg->next);
+
+ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
+ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
+ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
+ return segs;
+
+ while ((seg = seg->next)) {
+ th2 = tcp_hdr(seg);
+ iph2 = ipv6_hdr(seg);
+
+ iph2->saddr = iph->saddr;
+ iph2->daddr = iph->daddr;
+ __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
+ __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
+ }
+
+ return segs;
+}
+
+static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
+ if (IS_ERR(skb))
+ return skb;
+
+ return __tcpv6_gso_segment_list_csum(skb);
+}
+
static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -51,6 +106,9 @@ static struct sk_buff *tcp6_gso_segment(
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+ return __tcp6_gso_segment_list(skb, features);
+
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);

View file

@ -0,0 +1,75 @@
From 8d95dc474f85481652a0e422d2f1f079de81f63c Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:44 +0200
Subject: [PATCH 3/6] net: add code for TCP fraglist GRO
This implements fraglist GRO similar to how it's handled in UDP, however
no functional changes are added yet. The next change adds a heuristic for
using fraglist GRO instead of regular GRO.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/ipv4/tcp_offload.c | 21 +++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 9 +++++++++
2 files changed, 30 insertions(+)
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -342,6 +342,18 @@ found:
flush |= p->decrypted ^ skb->decrypted;
#endif
+ if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
+ flush |= (__force int)(flags ^ tcp_flag_word(th2));
+ flush |= skb->ip_summed != p->ip_summed;
+ flush |= skb->csum_level != p->csum_level;
+ flush |= NAPI_GRO_CB(p)->count >= 64;
+
+ if (flush || skb_gro_receive_list(p, skb))
+ mss = 1;
+
+ goto out_check_final;
+ }
+
if (flush || skb_gro_receive(p, skb)) {
mss = 1;
goto out_check_final;
@@ -406,6 +418,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_com
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
+ if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+
+ __skb_incr_checksum_unnecessary(skb);
+
+ return 0;
+ }
+
th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
&iph->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;

View file

@ -0,0 +1,88 @@
From 80e85fbdf19ecc4dfa31ecf639adb55555db02fe Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:45 +0200
Subject: [PATCH 4/6] net: create tcp_gro_lookup helper function
This pulls the flow port matching out of tcp_gro_receive, so that it can be
reused for the next change, which adds the TCP fraglist GRO heuristic.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
include/net/tcp.h | 1 +
net/ipv4/tcp_offload.c | 41 +++++++++++++++++++++++++----------------
2 files changed, 26 insertions(+), 16 deletions(-)
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2101,6 +2101,7 @@ void tcp_v4_destroy_sock(struct sock *sk
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -251,6 +251,27 @@ out:
return segs;
}
+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
+{
+ struct tcphdr *th2;
+ struct sk_buff *p;
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ th2 = tcp_hdr(p);
+ if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ return p;
+ }
+
+ return NULL;
+}
+
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
{
struct sk_buff *pp = NULL;
@@ -288,24 +309,12 @@ struct sk_buff *tcp_gro_receive(struct l
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
- list_for_each_entry(p, head, list) {
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
-
- th2 = tcp_hdr(p);
-
- if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
-
- goto found;
- }
- p = NULL;
- goto out_check_final;
+ p = tcp_gro_lookup(head, th);
+ if (!p)
+ goto out_check_final;
-found:
/* Include the IP ID check below from the inner most IP hdr */
+ th2 = tcp_hdr(p);
flush = NAPI_GRO_CB(p)->flush;
flush |= (__force int)(flags & TCP_FLAG_CWR);
flush |= (__force int)((flags ^ tcp_flag_word(th2)) &

View file

@ -0,0 +1,166 @@
From 7516b27c555c1711ec17a5d891befb6986e573a3 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:46 +0200
Subject: [PATCH 5/6] net: create tcp_gro_header_pull helper function
Pull the code out of tcp_gro_receive in order to access the tcp header
from tcp4/6_gro_receive.
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
include/net/tcp.h | 4 ++-
net/ipv4/tcp_offload.c | 55 +++++++++++++++++++++++++---------------
net/ipv6/tcpv6_offload.c | 18 +++++++++----
3 files changed, 50 insertions(+), 27 deletions(-)
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2101,8 +2101,10 @@ void tcp_v4_destroy_sock(struct sock *sk
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th);
INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -272,40 +272,46 @@ struct sk_buff *tcp_gro_lookup(struct li
return NULL;
}
-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
{
- struct sk_buff *pp = NULL;
- struct sk_buff *p;
+ unsigned int thlen, hlen, off;
struct tcphdr *th;
- struct tcphdr *th2;
- unsigned int len;
- unsigned int thlen;
- __be32 flags;
- unsigned int mss = 1;
- unsigned int hlen;
- unsigned int off;
- int flush = 1;
- int i;
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
th = skb_gro_header(skb, hlen, off);
if (unlikely(!th))
- goto out;
+ return NULL;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
- goto out;
+ return NULL;
hlen = off + thlen;
if (skb_gro_header_hard(skb, hlen)) {
th = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!th))
- goto out;
+ return NULL;
}
skb_gro_pull(skb, thlen);
+ return th;
+}
+
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ unsigned int thlen = th->doff * 4;
+ struct sk_buff *pp = NULL;
+ struct sk_buff *p;
+ struct tcphdr *th2;
+ unsigned int len;
+ __be32 flags;
+ unsigned int mss = 1;
+ int flush = 1;
+ int i;
+
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
@@ -384,7 +390,6 @@ out_check_final:
if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
pp = p;
-out:
NAPI_GRO_CB(skb)->flush |= (flush != 0);
return pp;
@@ -411,15 +416,23 @@ EXPORT_SYMBOL(tcp_gro_complete);
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
+ struct tcphdr *th;
+
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
skb_gro_checksum_validate(skb, IPPROTO_TCP,
- inet_gro_compute_pseudo)) {
- NAPI_GRO_CB(skb)->flush = 1;
- return NULL;
- }
+ inet_gro_compute_pseudo))
+ goto flush;
- return tcp_gro_receive(head, skb);
+ th = tcp_gro_pull_header(skb);
+ if (!th)
+ goto flush;
+
+ return tcp_gro_receive(head, skb, th);
+
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -16,15 +16,23 @@
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
+ struct tcphdr *th;
+
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
skb_gro_checksum_validate(skb, IPPROTO_TCP,
- ip6_gro_compute_pseudo)) {
- NAPI_GRO_CB(skb)->flush = 1;
- return NULL;
- }
+ ip6_gro_compute_pseudo))
+ goto flush;
+
+ th = tcp_gro_pull_header(skb);
+ if (!th)
+ goto flush;
+
+ return tcp_gro_receive(head, skb, th);
- return tcp_gro_receive(head, skb);
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
}
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)

View file

@ -0,0 +1,140 @@
From c9d1d23e5239f41700be69133a5769ac5ebc88a8 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 2 May 2024 10:44:47 +0200
Subject: [PATCH 6/6] net: add heuristic for enabling TCP fraglist GRO
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.
When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.
Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.
rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on: 770 Mbit/s, CPU 40% idle
Acked-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/ipv4/tcp_offload.c | 32 ++++++++++++++++++++++++++++++++
net/ipv6/tcpv6_offload.c | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 67 insertions(+)
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -413,6 +413,36 @@ void tcp_gro_complete(struct sk_buff *sk
}
EXPORT_SYMBOL(tcp_gro_complete);
+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ const struct iphdr *iph;
+ struct sk_buff *p;
+ struct sock *sk;
+ struct net *net;
+ int iif, sdif;
+
+ if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
+ return;
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+ iph = skb_gro_network_header(skb);
+ net = dev_net(skb->dev);
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -428,6 +458,8 @@ struct sk_buff *tcp4_gro_receive(struct
if (!th)
goto flush;
+ tcp4_check_fraglist_gro(head, skb, th);
+
return tcp_gro_receive(head, skb, th);
flush:
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,45 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *hdr;
+ struct sk_buff *p;
+ struct sock *sk;
+ struct net *net;
+ int iif, sdif;
+
+ if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
+ return;
+
+ p = tcp_gro_lookup(head, th);
+ if (p) {
+ NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+ return;
+ }
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+ hdr = skb_gro_network_header(skb);
+ net = dev_net(skb->dev);
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ NAPI_GRO_CB(skb)->is_flist = !sk;
+ if (sk)
+ sock_put(sk);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -28,6 +61,8 @@ struct sk_buff *tcp6_gro_receive(struct
if (!th)
goto flush;
+ tcp6_check_fraglist_gro(head, skb, th);
+
return tcp_gro_receive(head, skb, th);
flush:

View file

@ -0,0 +1,80 @@
From 17bd3bd82f9f79f3feba15476c2b2c95a9b11ff8 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Thu, 26 Sep 2024 10:53:14 +0200
Subject: [PATCH] net: gso: fix tcp fraglist segmentation after pull from
frag_list
Detect tcp gso fraglist skbs with corrupted geometry (see below) and
pass these to skb_segment instead of skb_segment_list, as the first
can segment them correctly.
Valid SKB_GSO_FRAGLIST skbs
- consist of two or more segments
- the head_skb holds the protocol headers plus first gso_size
- one or more frag_list skbs hold exactly one segment
- all but the last must be gso_size
Optional datapath hooks such as NAT and BPF (bpf_skb_pull_data) can
modify these skbs, breaking these invariants.
In extreme cases they pull all data into skb linear. For TCP, this
causes a NULL ptr deref in __tcpv4_gso_segment_list_csum at
tcp_hdr(seg->next).
Detect invalid geometry due to pull, by checking head_skb size.
Don't just drop, as this may blackhole a destination. Convert to be
able to pass to regular skb_segment.
Approach and description based on a patch by Willem de Bruijn.
Link: https://lore.kernel.org/netdev/20240428142913.18666-1-shiming.cheng@mediatek.com/
Link: https://lore.kernel.org/netdev/20240922150450.3873767-1-willemdebruijn.kernel@gmail.com/
Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
Cc: stable@vger.kernel.org
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Link: https://patch.msgid.link/20240926085315.51524-1-nbd@nbd.name
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
net/ipv4/tcp_offload.c | 10 ++++++++--
net/ipv6/tcpv6_offload.c | 10 ++++++++--
2 files changed, 16 insertions(+), 4 deletions(-)
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -104,8 +104,14 @@ static struct sk_buff *tcp4_gso_segment(
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
return ERR_PTR(-EINVAL);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
- return __tcp4_gso_segment_list(skb, features);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
+ return __tcp4_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ }
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct iphdr *iph = ip_hdr(skb);
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -158,8 +158,14 @@ static struct sk_buff *tcp6_gso_segment(
if (!pskb_may_pull(skb, sizeof(*th)))
return ERR_PTR(-EINVAL);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
- return __tcp6_gso_segment_list(skb, features);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
+ return __tcp6_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ }
if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);

View file

@ -0,0 +1,59 @@
From daa624d3c2ddffdcbad140a9625a4064371db44f Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 11 Mar 2025 22:25:30 +0100
Subject: [PATCH] net: ipv6: fix TCP GSO segmentation with NAT
When updating the source/destination address, the TCP/UDP checksum needs to
be updated as well.
Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://patch.msgid.link/20250311212530.91519-1-nbd@nbd.name
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
net/ipv6/tcpv6_offload.c | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -93,14 +93,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
}
static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
+ struct in6_addr *oldip,
+ const struct in6_addr *newip,
__be16 *oldport, __be16 newport)
{
- struct tcphdr *th;
+ struct tcphdr *th = tcp_hdr(seg);
+
+ if (!ipv6_addr_equal(oldip, newip)) {
+ inet_proto_csum_replace16(&th->check, seg,
+ oldip->s6_addr32,
+ newip->s6_addr32,
+ true);
+ *oldip = *newip;
+ }
if (*oldport == newport)
return;
- th = tcp_hdr(seg);
inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
*oldport = newport;
}
@@ -128,10 +137,10 @@ static struct sk_buff *__tcpv6_gso_segme
th2 = tcp_hdr(seg);
iph2 = ipv6_hdr(seg);
- iph2->saddr = iph->saddr;
- iph2->daddr = iph->daddr;
- __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
- __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
+ __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
+ &th2->source, th->source);
+ __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
+ &th2->dest, th->dest);
}
return segs;

View file

@ -0,0 +1,135 @@
From c661050f93d3fd37a33c06041bb18a89688de7d2 Mon Sep 17 00:00:00 2001
From: Breno Leitao <leitao@debian.org>
Date: Mon, 22 Apr 2024 05:38:56 -0700
Subject: [PATCH] net: create a dummy net_device allocator
It is impossible to use init_dummy_netdev together with alloc_netdev()
as the 'setup' argument.
This is because alloc_netdev() initializes some fields in the net_device
structure, and later init_dummy_netdev() memzero them all. This causes
some problems as reported here:
https://lore.kernel.org/all/20240322082336.49f110cc@kernel.org/
Split the init_dummy_netdev() function in two. Create a new function called
init_dummy_netdev_core() that does not memzero the net_device structure.
Then have init_dummy_netdev() memzero-ing and calling
init_dummy_netdev_core(), keeping the old behaviour.
init_dummy_netdev_core() is the new function that could be called as an
argument for alloc_netdev().
Also, create a helper to allocate and initialize dummy net devices,
leveraging init_dummy_netdev_core() as the setup argument. This function
basically simplify the allocation of dummy devices, by allocating and
initializing it. Freeing the device continue to be done through
free_netdev()
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/netdevice.h | 3 +++
net/core/dev.c | 56 ++++++++++++++++++++++++++-------------
2 files changed, 41 insertions(+), 18 deletions(-)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4569,6 +4569,9 @@ static inline void netif_addr_unlock_bh(
void ether_setup(struct net_device *dev);
+/* Allocate dummy net_device */
+struct net_device *alloc_netdev_dummy(int sizeof_priv);
+
/* Support for loadable net-drivers */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10390,25 +10390,12 @@ err_free_name:
}
EXPORT_SYMBOL(register_netdevice);
-/**
- * init_dummy_netdev - init a dummy network device for NAPI
- * @dev: device to init
- *
- * This takes a network device structure and initialize the minimum
- * amount of fields so it can be used to schedule NAPI polls without
- * registering a full blown interface. This is to be used by drivers
- * that need to tie several hardware interfaces to a single NAPI
- * poll scheduler due to HW limitations.
+/* Initialize the core of a dummy net device.
+ * This is useful if you are calling this function after alloc_netdev(),
+ * since it does not memset the net_device fields.
*/
-int init_dummy_netdev(struct net_device *dev)
+static void init_dummy_netdev_core(struct net_device *dev)
{
- /* Clear everything. Note we don't initialize spinlocks
- * are they aren't supposed to be taken by any of the
- * NAPI code and this dummy netdev is supposed to be
- * only ever used for NAPI polls
- */
- memset(dev, 0, sizeof(struct net_device));
-
/* make sure we BUG if trying to hit standard
* register/unregister code path
*/
@@ -10428,12 +10415,32 @@ int init_dummy_netdev(struct net_device
* because users of this 'device' dont need to change
* its refcount.
*/
+}
+
+/**
+ * init_dummy_netdev - init a dummy network device for NAPI
+ * @dev: device to init
+ *
+ * This takes a network device structure and initializes the minimum
+ * amount of fields so it can be used to schedule NAPI polls without
+ * registering a full blown interface. This is to be used by drivers
+ * that need to tie several hardware interfaces to a single NAPI
+ * poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+ /* Clear everything. Note we don't initialize spinlocks
+ * as they aren't supposed to be taken by any of the
+ * NAPI code and this dummy netdev is supposed to be
+ * only ever used for NAPI polls
+ */
+ memset(dev, 0, sizeof(struct net_device));
+ init_dummy_netdev_core(dev);
return 0;
}
EXPORT_SYMBOL_GPL(init_dummy_netdev);
-
/**
* register_netdev - register a network device
* @dev: device to register
@@ -11027,6 +11034,19 @@ void free_netdev(struct net_device *dev)
EXPORT_SYMBOL(free_netdev);
/**
+ * alloc_netdev_dummy - Allocate and initialize a dummy net device.
+ * @sizeof_priv: size of private data to allocate space for
+ *
+ * Return: the allocated net_device on success, NULL otherwise
+ */
+struct net_device *alloc_netdev_dummy(int sizeof_priv)
+{
+ return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
+ init_dummy_netdev_core);
+}
+EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
+
+/**
* synchronize_net - Synchronize with packet receive processing
*
* Wait for packets currently being received to be done.

View file

@ -0,0 +1,29 @@
From 84443741faab9045d53f022a9ac6a6633067a481 Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 14 Feb 2024 15:42:35 +0100
Subject: [PATCH] netfilter: nf_tables: fix bidirectional offload regression
Commit 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
made unidirectional flow offload possible, while completely ignoring (and
breaking) bidirectional flow offload for nftables.
Add the missing flag that was left out as an exercise for the reader :)
Cc: Vlad Buslov <vladbu@nvidia.com>
Fixes: 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
Reported-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
net/netfilter/nft_flow_offload.c | 1 +
1 file changed, 1 insertion(+)
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -367,6 +367,7 @@ static void nft_flow_offload_eval(const
if (tcph)
flow_offload_ct_tcp(ct);
+ __set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
ret = flow_offload_add(flowtable, flow);
if (ret < 0)
goto err_flow_add;

View file

@ -0,0 +1,183 @@
From e1fbfa4a995d42e02e22b0dff2f8b4fdee1504b3 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 14 Nov 2023 15:08:42 +0100
Subject: [PATCH 2/3] net: phy: aquantia: move MMD_VEND define to header
Move MMD_VEND define to header to clean things up and in preparation for
firmware loading support that require some define placed in
aquantia_main.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/aquantia/aquantia.h | 69 +++++++++++++++++++++++
drivers/net/phy/aquantia/aquantia_hwmon.c | 14 -----
drivers/net/phy/aquantia/aquantia_main.c | 55 ------------------
3 files changed, 69 insertions(+), 69 deletions(-)
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -9,6 +9,75 @@
#include <linux/device.h>
#include <linux/phy.h>
+/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_FW_ID 0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
+/* Vendor specific 1, MDIO_MMD_VEND2 */
+#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
+#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
+#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
+#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424
+#define VEND1_THERMAL_STAT1 0xc820
+#define VEND1_THERMAL_STAT2 0xc821
+#define VEND1_THERMAL_STAT2_VALID BIT(0)
+#define VEND1_GENERAL_STAT1 0xc830
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13)
+#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12)
+#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11)
+
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
+#define VEND1_GLOBAL_RSVD_STAT1 0xc885
+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
+#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+
+#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
+#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
+
+#define VEND1_GLOBAL_INT_STD_MASK 0xff00
+#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15)
+#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12)
+#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10)
+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9)
+#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8)
+#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7)
+#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6)
+#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0)
+
+#define VEND1_GLOBAL_INT_VEND_MASK 0xff01
+#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15)
+#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14)
+#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13)
+#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12)
+#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+
#if IS_REACHABLE(CONFIG_HWMON)
int aqr_hwmon_probe(struct phy_device *phydev);
#else
--- a/drivers/net/phy/aquantia/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
@@ -13,20 +13,6 @@
#include "aquantia.h"
-/* Vendor specific 1, MDIO_MMD_VEND2 */
-#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
-#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
-#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
-#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424
-#define VEND1_THERMAL_STAT1 0xc820
-#define VEND1_THERMAL_STAT2 0xc821
-#define VEND1_THERMAL_STAT2_VALID BIT(0)
-#define VEND1_GENERAL_STAT1 0xc830
-#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14)
-#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13)
-#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12)
-#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11)
-
#if IS_REACHABLE(CONFIG_HWMON)
static umode_t aqr_hwmon_is_visible(const void *data,
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -91,61 +91,6 @@
#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
-/* Vendor specific 1, MDIO_MMD_VEND1 */
-#define VEND1_GLOBAL_FW_ID 0x0020
-#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
-#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
-
-#define VEND1_GLOBAL_GEN_STAT2 0xc831
-#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
-
-/* The following registers all have similar layouts; first the registers... */
-#define VEND1_GLOBAL_CFG_10M 0x0310
-#define VEND1_GLOBAL_CFG_100M 0x031b
-#define VEND1_GLOBAL_CFG_1G 0x031c
-#define VEND1_GLOBAL_CFG_2_5G 0x031d
-#define VEND1_GLOBAL_CFG_5G 0x031e
-#define VEND1_GLOBAL_CFG_10G 0x031f
-/* ...and now the fields */
-#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
-#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
-
-#define VEND1_GLOBAL_RSVD_STAT1 0xc885
-#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
-#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
-
-#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
-#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
-#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
-
-#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
-#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
-
-#define VEND1_GLOBAL_INT_STD_MASK 0xff00
-#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15)
-#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12)
-#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11)
-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10)
-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9)
-#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8)
-#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7)
-#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6)
-#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0)
-
-#define VEND1_GLOBAL_INT_VEND_MASK 0xff01
-#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15)
-#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14)
-#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13)
-#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12)
-#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
-
/* Sleep and timeout for checking if the Processor-Intensive
* MDIO operation is finished
*/

View file

@ -0,0 +1,504 @@
From e93984ebc1c82bd34f7a1b3391efaceee0a8ae96 Mon Sep 17 00:00:00 2001
From: Robert Marko <robimarko@gmail.com>
Date: Tue, 14 Nov 2023 15:08:43 +0100
Subject: [PATCH 3/3] net: phy: aquantia: add firmware load support
Aquantia PHY-s require firmware to be loaded before they start operating.
It can be automatically loaded in case when there is a SPI-NOR connected
to Aquantia PHY-s or can be loaded from the host via MDIO.
This patch adds support for loading the firmware via MDIO as in most cases
there is no SPI-NOR being used to save on cost.
Firmware loading code itself is ported from mainline U-boot with cleanups.
The firmware has mixed values both in big and little endian.
PHY core itself is big-endian but it expects values to be in little-endian.
The firmware is little-endian but CRC-16 value for it is stored at the end
of firmware in big-endian.
It seems the PHY does the conversion internally from firmware that is
little-endian to the PHY that is big-endian on using the mailbox
but mailbox returns a big-endian CRC-16 to verify the written data
integrity.
Co-developed-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Robert Marko <robimarko@gmail.com>
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/aquantia/Kconfig | 1 +
drivers/net/phy/aquantia/Makefile | 2 +-
drivers/net/phy/aquantia/aquantia.h | 32 ++
drivers/net/phy/aquantia/aquantia_firmware.c | 370 +++++++++++++++++++
drivers/net/phy/aquantia/aquantia_main.c | 6 +
5 files changed, 410 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/phy/aquantia/aquantia_firmware.c
--- a/drivers/net/phy/aquantia/Kconfig
+++ b/drivers/net/phy/aquantia/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config AQUANTIA_PHY
tristate "Aquantia PHYs"
+ select CRC_CCITT
help
Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
--- a/drivers/net/phy/aquantia/Makefile
+++ b/drivers/net/phy/aquantia/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-aquantia-objs += aquantia_main.o
+aquantia-objs += aquantia_main.o aquantia_firmware.o
ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -10,10 +10,35 @@
#include <linux/phy.h>
/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_SC 0x0
+#define VEND1_GLOBAL_SC_SOFT_RESET BIT(15)
+#define VEND1_GLOBAL_SC_LOW_POWER BIT(11)
+
#define VEND1_GLOBAL_FW_ID 0x0020
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1 0x0200
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE BIT(15)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE BIT(14)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET BIT(12)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_BUSY BIT(8)
+
+#define VEND1_GLOBAL_MAILBOX_INTERFACE2 0x0201
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3 0x0202
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK, (u16)((x) >> 16))
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4 0x0203
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK GENMASK(15, 2)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK, (u16)(x))
+
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5 0x0204
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK, (u16)((x) >> 16))
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6 0x0205
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK GENMASK(15, 0)
+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(x) FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK, (u16)(x))
+
/* The following registers all have similar layouts; first the registers... */
#define VEND1_GLOBAL_CFG_10M 0x0310
#define VEND1_GLOBAL_CFG_100M 0x031b
@@ -28,6 +53,11 @@
#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
/* Vendor specific 1, MDIO_MMD_VEND2 */
+#define VEND1_GLOBAL_CONTROL2 0xc001
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST BIT(15)
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD BIT(6)
+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL BIT(0)
+
#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
@@ -83,3 +113,5 @@ int aqr_hwmon_probe(struct phy_device *p
#else
static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
#endif
+
+int aqr_firmware_load(struct phy_device *phydev);
--- /dev/null
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/of.h>
+#include <linux/firmware.h>
+#include <linux/crc-ccitt.h>
+#include <linux/nvmem-consumer.h>
+
+#include <asm/unaligned.h>
+
+#include "aquantia.h"
+
+#define UP_RESET_SLEEP 100
+
+/* addresses of memory segments in the phy */
+#define DRAM_BASE_ADDR 0x3FFE0000
+#define IRAM_BASE_ADDR 0x40000000
+
+/* firmware image format constants */
+#define VERSION_STRING_SIZE 0x40
+#define VERSION_STRING_OFFSET 0x0200
+/* primary offset is written at an offset from the start of the fw blob */
+#define PRIMARY_OFFSET_OFFSET 0x8
+/* primary offset needs to be then added to a base offset */
+#define PRIMARY_OFFSET_SHIFT 12
+#define PRIMARY_OFFSET(x) ((x) << PRIMARY_OFFSET_SHIFT)
+#define HEADER_OFFSET 0x300
+
+struct aqr_fw_header {
+ u32 padding;
+ u8 iram_offset[3];
+ u8 iram_size[3];
+ u8 dram_offset[3];
+ u8 dram_size[3];
+} __packed;
+
+enum aqr_fw_src {
+ AQR_FW_SRC_NVMEM = 0,
+ AQR_FW_SRC_FS,
+};
+
+static const char * const aqr_fw_src_string[] = {
+ [AQR_FW_SRC_NVMEM] = "NVMEM",
+ [AQR_FW_SRC_FS] = "FS",
+};
+
+/* AQR firmware doesn't have fixed offsets for iram and dram section
+ * but instead provide an header with the offset to use on reading
+ * and parsing the firmware.
+ *
+ * AQR firmware can't be trusted and each offset is validated to be
+ * not negative and be in the size of the firmware itself.
+ */
+static bool aqr_fw_validate_get(size_t size, size_t offset, size_t get_size)
+{
+ return offset + get_size <= size;
+}
+
+static int aqr_fw_get_be16(const u8 *data, size_t offset, size_t size, u16 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
+ return -EINVAL;
+
+ *value = get_unaligned_be16(data + offset);
+
+ return 0;
+}
+
+static int aqr_fw_get_le16(const u8 *data, size_t offset, size_t size, u16 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
+ return -EINVAL;
+
+ *value = get_unaligned_le16(data + offset);
+
+ return 0;
+}
+
+static int aqr_fw_get_le24(const u8 *data, size_t offset, size_t size, u32 *value)
+{
+ if (!aqr_fw_validate_get(size, offset, sizeof(u8) * 3))
+ return -EINVAL;
+
+ *value = get_unaligned_le24(data + offset);
+
+ return 0;
+}
+
+/* load data into the phy's memory */
+static int aqr_fw_load_memory(struct phy_device *phydev, u32 addr,
+ const u8 *data, size_t len)
+{
+ u16 crc = 0, up_crc;
+ size_t pos;
+
+ /* PHY expect addr in LE */
+ addr = (__force u32)cpu_to_le32(addr);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE3,
+ VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(addr));
+ phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE4,
+ VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(addr));
+
+ /* We assume and enforce the size to be word aligned.
+ * If a firmware that is not word aligned is found, please report upstream.
+ */
+ for (pos = 0; pos < len; pos += sizeof(u32)) {
+ u32 word;
+
+ /* FW data is always stored in little-endian */
+ word = get_unaligned((const u32 *)(data + pos));
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE5,
+ VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(word));
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE6,
+ VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(word));
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE1,
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE |
+ VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE);
+
+ /* calculate CRC as we load data to the mailbox.
+ * We convert word to big-endian as PHY is BE and mailbox will
+ * return a BE CRC.
+ */
+ word = (__force u32)cpu_to_be32(word);
+ crc = crc_ccitt_false(crc, (u8 *)&word, sizeof(word));
+ }
+
+ up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE2);
+ if (crc != up_crc) {
+ phydev_err(phydev, "CRC mismatch: calculated 0x%04x PHY 0x%04x\n",
+ crc, up_crc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aqr_fw_boot(struct phy_device *phydev, const u8 *data, size_t size,
+ enum aqr_fw_src fw_src)
+{
+ u16 calculated_crc, read_crc, read_primary_offset;
+ u32 iram_offset = 0, iram_size = 0;
+ u32 dram_offset = 0, dram_size = 0;
+ char version[VERSION_STRING_SIZE];
+ u32 primary_offset = 0;
+ int ret;
+
+ /* extract saved CRC at the end of the fw
+ * CRC is saved in big-endian as PHY is BE
+ */
+ ret = aqr_fw_get_be16(data, size - sizeof(u16), size, &read_crc);
+ if (ret) {
+ phydev_err(phydev, "bad firmware CRC in firmware\n");
+ return ret;
+ }
+ calculated_crc = crc_ccitt_false(0, data, size - sizeof(u16));
+ if (read_crc != calculated_crc) {
+ phydev_err(phydev, "bad firmware CRC: file 0x%04x calculated 0x%04x\n",
+ read_crc, calculated_crc);
+ return -EINVAL;
+ }
+
+ /* Get the primary offset to extract DRAM and IRAM sections. */
+ ret = aqr_fw_get_le16(data, PRIMARY_OFFSET_OFFSET, size, &read_primary_offset);
+ if (ret) {
+ phydev_err(phydev, "bad primary offset in firmware\n");
+ return ret;
+ }
+ primary_offset = PRIMARY_OFFSET(read_primary_offset);
+
+ /* Find the DRAM and IRAM sections within the firmware file.
+ * Make sure the fw_header is correctly in the firmware.
+ */
+ if (!aqr_fw_validate_get(size, primary_offset + HEADER_OFFSET,
+ sizeof(struct aqr_fw_header))) {
+ phydev_err(phydev, "bad fw_header in firmware\n");
+ return -EINVAL;
+ }
+
+ /* offset are in LE and values needs to be converted to cpu endian */
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, iram_offset),
+ size, &iram_offset);
+ if (ret) {
+ phydev_err(phydev, "bad iram offset in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, iram_size),
+ size, &iram_size);
+ if (ret) {
+ phydev_err(phydev, "invalid iram size in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, dram_offset),
+ size, &dram_offset);
+ if (ret) {
+ phydev_err(phydev, "bad dram offset in firmware\n");
+ return ret;
+ }
+ ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
+ offsetof(struct aqr_fw_header, dram_size),
+ size, &dram_size);
+ if (ret) {
+ phydev_err(phydev, "invalid dram size in firmware\n");
+ return ret;
+ }
+
+ /* Increment the offset with the primary offset.
+ * Validate iram/dram offset and size.
+ */
+ iram_offset += primary_offset;
+ if (iram_size % sizeof(u32)) {
+ phydev_err(phydev, "iram size if not aligned to word size. Please report this upstream!\n");
+ return -EINVAL;
+ }
+ if (!aqr_fw_validate_get(size, iram_offset, iram_size)) {
+ phydev_err(phydev, "invalid iram offset for iram size\n");
+ return -EINVAL;
+ }
+
+ dram_offset += primary_offset;
+ if (dram_size % sizeof(u32)) {
+ phydev_err(phydev, "dram size if not aligned to word size. Please report this upstream!\n");
+ return -EINVAL;
+ }
+ if (!aqr_fw_validate_get(size, dram_offset, dram_size)) {
+ phydev_err(phydev, "invalid iram offset for iram size\n");
+ return -EINVAL;
+ }
+
+ phydev_dbg(phydev, "primary %d IRAM offset=%d size=%d DRAM offset=%d size=%d\n",
+ primary_offset, iram_offset, iram_size, dram_offset, dram_size);
+
+ if (!aqr_fw_validate_get(size, dram_offset + VERSION_STRING_OFFSET,
+ VERSION_STRING_SIZE)) {
+ phydev_err(phydev, "invalid version in firmware\n");
+ return -EINVAL;
+ }
+ strscpy(version, (char *)data + dram_offset + VERSION_STRING_OFFSET,
+ VERSION_STRING_SIZE);
+ if (version[0] == '\0') {
+ phydev_err(phydev, "invalid version in firmware\n");
+ return -EINVAL;
+ }
+ phydev_info(phydev, "loading firmware version '%s' from '%s'\n", version,
+ aqr_fw_src_string[fw_src]);
+
+ /* stall the microcprocessor */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL | VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
+
+ phydev_dbg(phydev, "loading DRAM 0x%08x from offset=%d size=%d\n",
+ DRAM_BASE_ADDR, dram_offset, dram_size);
+ ret = aqr_fw_load_memory(phydev, DRAM_BASE_ADDR, data + dram_offset,
+ dram_size);
+ if (ret)
+ return ret;
+
+ phydev_dbg(phydev, "loading IRAM 0x%08x from offset=%d size=%d\n",
+ IRAM_BASE_ADDR, iram_offset, iram_size);
+ ret = aqr_fw_load_memory(phydev, IRAM_BASE_ADDR, data + iram_offset,
+ iram_size);
+ if (ret)
+ return ret;
+
+ /* make sure soft reset and low power mode are clear */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_SC,
+ VEND1_GLOBAL_SC_SOFT_RESET | VEND1_GLOBAL_SC_LOW_POWER);
+
+ /* Release the microprocessor. UP_RESET must be held for 100 usec. */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL |
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD |
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST);
+ usleep_range(UP_RESET_SLEEP, UP_RESET_SLEEP * 2);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
+ VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
+
+ return 0;
+}
+
+static int aqr_firmware_load_nvmem(struct phy_device *phydev)
+{
+ struct nvmem_cell *cell;
+ size_t size;
+ u8 *buf;
+ int ret;
+
+ cell = nvmem_cell_get(&phydev->mdio.dev, "firmware");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &size);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto exit;
+ }
+
+ ret = aqr_fw_boot(phydev, buf, size, AQR_FW_SRC_NVMEM);
+ if (ret)
+ phydev_err(phydev, "firmware loading failed: %d\n", ret);
+
+ kfree(buf);
+exit:
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+static int aqr_firmware_load_fs(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw;
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(dev->of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ret;
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret) {
+ phydev_err(phydev, "failed to find FW file %s (%d)\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = aqr_fw_boot(phydev, fw->data, fw->size, AQR_FW_SRC_FS);
+ if (ret)
+ phydev_err(phydev, "firmware loading failed: %d\n", ret);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int aqr_firmware_load(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Check if the firmware is not already loaded by pooling
+ * the current version returned by the PHY. If 0 is returned,
+ * no firmware is loaded.
+ */
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (ret > 0)
+ goto exit;
+
+ ret = aqr_firmware_load_nvmem(phydev);
+ if (!ret)
+ goto exit;
+
+ ret = aqr_firmware_load_fs(phydev);
+ if (ret)
+ return ret;
+
+exit:
+ return 0;
+}
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -658,11 +658,17 @@ static int aqr107_resume(struct phy_devi
static int aqr107_probe(struct phy_device *phydev)
{
+ int ret;
+
phydev->priv = devm_kzalloc(&phydev->mdio.dev,
sizeof(struct aqr107_priv), GFP_KERNEL);
if (!phydev->priv)
return -ENOMEM;
+ ret = aqr_firmware_load(phydev);
+ if (ret)
+ return ret;
+
return aqr_hwmon_probe(phydev);
}

View file

@ -0,0 +1,69 @@
From 6a3b8c573b5a152a6aa7a0b54c5e18b84c6ba6f5 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:49 +0100
Subject: [PATCH 02/13] net: phy: at803x: move disable WOL to specific at8031
probe
Move the WOL disable call to specific at8031 probe to make at803x_probe
more generic and drop extra check for PHY ID.
Keep the same previous behaviour by first calling at803x_probe and then
disabling WOL.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -886,15 +886,6 @@ static int at803x_probe(struct phy_devic
priv->is_fiber = true;
break;
}
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
- if (ret)
- return ret;
}
return 0;
@@ -1591,6 +1582,22 @@ static int at803x_cable_test_start(struc
return 0;
}
+static int at8031_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -2092,7 +2099,7 @@ static struct phy_driver at803x_driver[]
PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
.name = "Qualcomm Atheros AR8031/AR8033",
.flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
+ .probe = at8031_probe,
.config_init = at803x_config_init,
.config_aneg = at803x_config_aneg,
.soft_reset = genphy_soft_reset,

View file

@ -0,0 +1,129 @@
From 07b1ad83b9ed6db1735ba10baf67b7a565ac0cef Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:50 +0100
Subject: [PATCH 03/13] net: phy: at803x: raname hw_stats functions to qca83xx
specific name
The function and the struct related to hw_stats were specific to qca83xx
PHY but were called following the convention in the driver of calling
everything with at803x prefix.
To better organize the code, rename these function a more specific name
to better describe that they are specific to 83xx PHY family.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 44 ++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 22 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -295,7 +295,7 @@ struct at803x_hw_stat {
enum stat_access_type access_type;
};
-static struct at803x_hw_stat at803x_hw_stats[] = {
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
@@ -311,7 +311,7 @@ struct at803x_priv {
bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(at803x_hw_stats)];
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
};
struct at803x_context {
@@ -529,24 +529,24 @@ static void at803x_get_wol(struct phy_de
wol->wolopts |= WAKE_MAGIC;
}
-static int at803x_get_sset_count(struct phy_device *phydev)
+static int qca83xx_get_sset_count(struct phy_device *phydev)
{
- return ARRAY_SIZE(at803x_hw_stats);
+ return ARRAY_SIZE(qca83xx_hw_stats);
}
-static void at803x_get_strings(struct phy_device *phydev, u8 *data)
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
{
int i;
- for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
strscpy(data + i * ETH_GSTRING_LEN,
- at803x_hw_stats[i].string, ETH_GSTRING_LEN);
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
-static u64 at803x_get_stat(struct phy_device *phydev, int i)
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
{
- struct at803x_hw_stat stat = at803x_hw_stats[i];
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
struct at803x_priv *priv = phydev->priv;
int val;
u64 ret;
@@ -567,13 +567,13 @@ static u64 at803x_get_stat(struct phy_de
return ret;
}
-static void at803x_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
{
int i;
- for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++)
- data[i] = at803x_get_stat(phydev, i);
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
}
static int at803x_suspend(struct phy_device *phydev)
@@ -2175,9 +2175,9 @@ static struct phy_driver at803x_driver[]
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
.suspend = qca83xx_suspend,
.resume = qca83xx_resume,
}, {
@@ -2191,9 +2191,9 @@ static struct phy_driver at803x_driver[]
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
.suspend = qca83xx_suspend,
.resume = qca83xx_resume,
}, {
@@ -2207,9 +2207,9 @@ static struct phy_driver at803x_driver[]
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
.soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
.suspend = qca83xx_suspend,
.resume = qca83xx_resume,
}, {

View file

@ -0,0 +1,155 @@
From d43cff3f82336c0bd965ea552232d9f4ddac71a6 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:51 +0100
Subject: [PATCH 04/13] net: phy: at803x: move qca83xx specific check in
dedicated functions
Rework qca83xx specific check to dedicated function to tidy things up
and drop useless phy_id check.
Also drop an useless link_change_notify for QCA8337 as it did nothing an
returned early.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 68 ++++++++++++++++++++++------------------
1 file changed, 37 insertions(+), 31 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1623,27 +1623,26 @@ static int qca83xx_config_init(struct ph
break;
}
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
* Disable on init and enable only with 100m speed following
* qca original source code.
*/
- if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
- phydev->drv->phy_id == QCA8327_B_PHY_ID)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
+ return qca83xx_config_init(phydev);
}
static void qca83xx_link_change_notify(struct phy_device *phydev)
{
- /* QCA8337 doesn't require DAC Amplitude adjustement */
- if (phydev->drv->phy_id == QCA8337_PHY_ID)
- return;
-
/* Set DAC Amplitude adjustment to +6% for 100m on link running */
if (phydev->state == PHY_RUNNING) {
if (phydev->speed == SPEED_100)
@@ -1686,19 +1685,6 @@ static int qca83xx_resume(struct phy_dev
static int qca83xx_suspend(struct phy_device *phydev)
{
- u16 mask = 0;
-
- /* Only QCA8337 support actual suspend.
- * QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- if (phydev->drv->phy_id == QCA8337_PHY_ID) {
- genphy_suspend(phydev);
- } else {
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
- }
-
at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
AT803X_DEBUG_GATE_CLK_IN1000, 0);
@@ -1709,6 +1695,27 @@ static int qca83xx_suspend(struct phy_de
return 0;
}
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
{
int ret;
@@ -2170,7 +2177,6 @@ static struct phy_driver at803x_driver[]
.phy_id_mask = QCA8K_PHY_ID_MASK,
.name = "Qualcomm Atheros 8337 internal PHY",
/* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
.config_init = qca83xx_config_init,
@@ -2178,7 +2184,7 @@ static struct phy_driver at803x_driver[]
.get_sset_count = qca83xx_get_sset_count,
.get_strings = qca83xx_get_strings,
.get_stats = qca83xx_get_stats,
- .suspend = qca83xx_suspend,
+ .suspend = qca8337_suspend,
.resume = qca83xx_resume,
}, {
/* QCA8327-A from switch QCA8327-AL1A */
@@ -2189,12 +2195,12 @@ static struct phy_driver at803x_driver[]
.link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
+ .config_init = qca8327_config_init,
.soft_reset = genphy_soft_reset,
.get_sset_count = qca83xx_get_sset_count,
.get_strings = qca83xx_get_strings,
.get_stats = qca83xx_get_stats,
- .suspend = qca83xx_suspend,
+ .suspend = qca8327_suspend,
.resume = qca83xx_resume,
}, {
/* QCA8327-B from switch QCA8327-BL1A */
@@ -2205,12 +2211,12 @@ static struct phy_driver at803x_driver[]
.link_change_notify = qca83xx_link_change_notify,
.probe = at803x_probe,
.flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
+ .config_init = qca8327_config_init,
.soft_reset = genphy_soft_reset,
.get_sset_count = qca83xx_get_sset_count,
.get_strings = qca83xx_get_strings,
.get_stats = qca83xx_get_stats,
- .suspend = qca83xx_suspend,
+ .suspend = qca8327_suspend,
.resume = qca83xx_resume,
}, {
/* Qualcomm QCA8081 */

View file

@ -0,0 +1,94 @@
From 900eef75cc5018e149c52fe305c9c3fe424c52a7 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:52 +0100
Subject: [PATCH 05/13] net: phy: at803x: move specific DT option for at8031 to
specific probe
Move specific DT options for at8031 to specific probe to tidy things up
and make at803x_parse_dt more generic.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 55 ++++++++++++++++++++++------------------
1 file changed, 31 insertions(+), 24 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -825,30 +825,6 @@ static int at803x_parse_dt(struct phy_de
}
}
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
- if (ret < 0)
- return ret;
- }
-
return 0;
}
@@ -1582,6 +1558,30 @@ static int at803x_cable_test_start(struc
return 0;
}
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at803x_sfp_ops);
+}
+
static int at8031_probe(struct phy_device *phydev)
{
int ret;
@@ -1590,6 +1590,13 @@ static int at8031_probe(struct phy_devic
if (ret)
return ret;
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
/* Disable WoL in 1588 register which is enabled
* by default
*/

View file

@ -0,0 +1,78 @@
From 25d2ba94005fac18fe68878cddff59a67e115554 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:53 +0100
Subject: [PATCH 06/13] net: phy: at803x: move specific at8031 probe mode check
to dedicated probe
Move specific at8031 probe mode check to dedicated probe to make
at803x_probe more generic and keep code tidy.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 39 +++++++++++++++++++--------------------
1 file changed, 19 insertions(+), 20 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -844,26 +844,6 @@ static int at803x_probe(struct phy_devic
if (ret)
return ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- int mode_cfg;
-
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
- }
-
return 0;
}
@@ -1584,6 +1564,9 @@ static int at8031_parse_dt(struct phy_de
static int at8031_probe(struct phy_device *phydev)
{
+ struct at803x_priv *priv = phydev->priv;
+ int mode_cfg;
+ int ccr;
int ret;
ret = at803x_probe(phydev);
@@ -1597,6 +1580,22 @@ static int at8031_probe(struct phy_devic
if (ret)
return ret;
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
/* Disable WoL in 1588 register which is enabled
* by default
*/

View file

@ -0,0 +1,86 @@
From 3ae3bc426eaf57ca8f53d75777d9a5ef779bc7b7 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:54 +0100
Subject: [PATCH 07/13] net: phy: at803x: move specific at8031 config_init to
dedicated function
Move specific at8031 config_init to dedicated function to make
at803x_config_init more generic and tidy things up.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 45 ++++++++++++++++++++++------------------
1 file changed, 25 insertions(+), 20 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -951,27 +951,8 @@ static int at803x_hibernation_mode_confi
static int at803x_config_init(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int ret;
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
- }
-
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
@@ -1604,6 +1585,30 @@ static int at8031_probe(struct phy_devic
AT803X_WOL_EN, 0);
}
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -2113,7 +2118,7 @@ static struct phy_driver at803x_driver[]
.name = "Qualcomm Atheros AR8031/AR8033",
.flags = PHY_POLL_CABLE_TEST,
.probe = at8031_probe,
- .config_init = at803x_config_init,
+ .config_init = at8031_config_init,
.config_aneg = at803x_config_aneg,
.soft_reset = genphy_soft_reset,
.set_wol = at803x_set_wol,

View file

@ -0,0 +1,92 @@
From 27b89c9dc1b0393090d68d651b82f30ad2696baa Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:55 +0100
Subject: [PATCH 08/13] net: phy: at803x: move specific at8031 WOL bits to
dedicated function
Move specific at8031 WOL enable/disable to dedicated function to make
at803x_set_wol more generic.
This is needed in preparation for PHY driver split as qca8081 share the
same function to toggle WOL settings.
In this new implementation WOL module in at8031 is enabled after the
generic interrupt is setup. This should not cause any problem as the
WOL_INT has a separate implementation and only relay on MAC bits.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 42 ++++++++++++++++++++++++----------------
1 file changed, 25 insertions(+), 17 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -466,27 +466,11 @@ static int at803x_set_wol(struct phy_dev
phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- /* Enable WOL function for 1588 */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- if (ret)
- return ret;
- }
/* Enable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
if (ret)
return ret;
} else {
- /* Disable WoL function for 1588 */
- if (phydev->drv->phy_id == ATH8031_PHY_ID) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
- if (ret)
- return ret;
- }
/* Disable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
if (ret)
@@ -1609,6 +1593,30 @@ static int at8031_config_init(struct phy
return at803x_config_init(phydev);
}
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -2121,7 +2129,7 @@ static struct phy_driver at803x_driver[]
.config_init = at8031_config_init,
.config_aneg = at803x_config_aneg,
.soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
+ .set_wol = at8031_set_wol,
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,

View file

@ -0,0 +1,78 @@
From 30dd62191d3dd97c08f7f9dc9ce77ffab457e4fb Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:56 +0100
Subject: [PATCH 09/13] net: phy: at803x: move specific at8031 config_intr to
dedicated function
Move specific at8031 config_intr bits to dedicated function to make
at803x_config_initr more generic.
This is needed in preparation for PHY driver split as qca8081 share the
same function to setup interrupts.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 30 ++++++++++++++++++++++++------
1 file changed, 24 insertions(+), 6 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -990,7 +990,6 @@ static int at803x_ack_interrupt(struct p
static int at803x_config_intr(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int err;
int value;
@@ -1007,10 +1006,6 @@ static int at803x_config_intr(struct phy
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
- if (priv->is_fiber) {
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
- }
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
} else {
@@ -1617,6 +1612,29 @@ static int at8031_set_wol(struct phy_dev
return ret;
}
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -2137,7 +2155,7 @@ static struct phy_driver at803x_driver[]
.write_page = at803x_write_page,
.get_features = at803x_get_features,
.read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
+ .config_intr = at8031_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,

View file

@ -0,0 +1,78 @@
From a5ab9d8e7ae0da8328ac1637a9755311508dc8ab Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:57 +0100
Subject: [PATCH 10/13] net: phy: at803x: make at8031 related DT functions name
more specific
Rename at8031 related DT function name to a more specific name
referencing they are only related to at8031 and not to the generic
at803x PHY family.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -583,7 +583,7 @@ static int at803x_resume(struct phy_devi
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
-static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct phy_device *phydev = rdev_get_drvdata(rdev);
@@ -596,7 +596,7 @@ static int at803x_rgmii_reg_set_voltage_
AT803X_DEBUG_RGMII_1V8, 0);
}
-static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
{
struct phy_device *phydev = rdev_get_drvdata(rdev);
int val;
@@ -610,8 +610,8 @@ static int at803x_rgmii_reg_get_voltage_
static const struct regulator_ops vddio_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
};
static const unsigned int vddio_voltage_table[] = {
@@ -666,7 +666,7 @@ static int at8031_register_regulators(st
return 0;
}
-static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
@@ -710,10 +710,10 @@ static int at803x_sfp_insert(void *upstr
return 0;
}
-static const struct sfp_upstream_ops at803x_sfp_ops = {
+static const struct sfp_upstream_ops at8031_sfp_ops = {
.attach = phy_sfp_attach,
.detach = phy_sfp_detach,
- .module_insert = at803x_sfp_insert,
+ .module_insert = at8031_sfp_insert,
};
static int at803x_parse_dt(struct phy_device *phydev)
@@ -1519,7 +1519,7 @@ static int at8031_parse_dt(struct phy_de
}
/* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at803x_sfp_ops);
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
}
static int at8031_probe(struct phy_device *phydev)

View file

@ -0,0 +1,297 @@
From f932a6dc8bae0dae9645b5b1b4c65aed8a8acb2a Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:58 +0100
Subject: [PATCH 11/13] net: phy: at803x: move at8031 functions in dedicated
section
Move at8031 functions in dedicated section with dedicated at8031
parse_dt and probe.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 266 +++++++++++++++++++--------------------
1 file changed, 133 insertions(+), 133 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -583,139 +583,6 @@ static int at803x_resume(struct phy_devi
return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
}
-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
-};
-
static int at803x_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -1498,6 +1365,139 @@ static int at803x_cable_test_start(struc
return 0;
}
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
static int at8031_parse_dt(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;

View file

@ -0,0 +1,114 @@
From 21a2802a8365cfa82cc02187c1f95136d85592ad Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:51:59 +0100
Subject: [PATCH 12/13] net: phy: at803x: move at8035 specific DT parse to
dedicated probe
Move at8035 specific DT parse for clock out frequency to dedicated probe
to make at803x probe function more generic.
This is to tidy code and no behaviour change are intended.
Detection logic is changed, we check if the clk 25m mask is set and if
it's not zero, we assume the qca,clk-out-frequency property is set.
The property is checked in the generic at803x_parse_dt called by
at803x_probe.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 60 +++++++++++++++++++++++++++-------------
1 file changed, 41 insertions(+), 19 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -638,23 +638,6 @@ static int at803x_parse_dt(struct phy_de
priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
-
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- if (phydev->drv->phy_id == ATH8030_PHY_ID ||
- phydev->drv->phy_id == ATH8035_PHY_ID) {
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
}
ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
@@ -1635,6 +1618,45 @@ static int at8031_config_intr(struct phy
return at803x_config_intr(phydev);
}
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
static int qca83xx_config_init(struct phy_device *phydev)
{
u8 switch_revision;
@@ -2107,7 +2129,7 @@ static struct phy_driver at803x_driver[]
PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
.name = "Qualcomm Atheros AR8035",
.flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
+ .probe = at8035_probe,
.config_aneg = at803x_config_aneg,
.config_init = at803x_config_init,
.soft_reset = genphy_soft_reset,
@@ -2128,7 +2150,7 @@ static struct phy_driver at803x_driver[]
.phy_id = ATH8030_PHY_ID,
.name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at803x_probe,
+ .probe = at8035_probe,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,

View file

@ -0,0 +1,219 @@
From ef9df47b449e32e06501a11272809be49019bdb6 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 8 Dec 2023 15:52:00 +0100
Subject: [PATCH 13/13] net: phy: at803x: drop specific PHY ID check from cable
test functions
Drop specific PHY ID check for cable test functions for at803x. This is
done to make functions more generic. While at it better describe what
the functions does by using more symbolic function names.
PHYs that requires to set additional reg are moved to specific function
calling the more generic one.
cdt_start and cdt_wait_for_completion are changed to take an additional
arg to pass specific values specific to the PHY.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 95 +++++++++++++++++++++-------------------
1 file changed, 50 insertions(+), 45 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1222,31 +1222,16 @@ static int at803x_cdt_fault_length(u16 s
return (dt * 824) / 10;
}
-static int at803x_cdt_start(struct phy_device *phydev, int pair)
+static int at803x_cdt_start(struct phy_device *phydev,
+ u32 cdt_start)
{
- u16 cdt;
-
- /* qca8081 takes the different bit 15 to enable CDT test */
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- cdt = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT |
- QCA808X_CDT_INTER_CHECK_DIS;
- else
- cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
-
- return phy_write(phydev, AT803X_CDT, cdt);
+ return phy_write(phydev, AT803X_CDT, cdt_start);
}
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
+static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
{
int val, ret;
- u16 cdt_en;
-
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- cdt_en = QCA808X_CDT_ENABLE_TEST;
- else
- cdt_en = AT803X_CDT_ENABLE_TEST;
/* One test run takes about 25ms */
ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
@@ -1266,11 +1251,13 @@ static int at803x_cable_test_one_pair(st
};
int ret, val;
- ret = at803x_cdt_start(phydev, pair);
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
if (ret)
return ret;
- ret = at803x_cdt_wait_for_completion(phydev);
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
if (ret)
return ret;
@@ -1292,19 +1279,11 @@ static int at803x_cable_test_one_pair(st
}
static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
+ bool *finished, unsigned long pair_mask)
{
- unsigned long pair_mask;
int retries = 20;
int pair, ret;
- if (phydev->phy_id == ATH9331_PHY_ID ||
- phydev->phy_id == ATH8032_PHY_ID ||
- phydev->phy_id == QCA9561_PHY_ID)
- pair_mask = 0x3;
- else
- pair_mask = 0xf;
-
*finished = false;
/* According to the datasheet the CDT can be performed when
@@ -1331,7 +1310,7 @@ static int at803x_cable_test_get_status(
return 0;
}
-static int at803x_cable_test_start(struct phy_device *phydev)
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
{
/* Enable auto-negotiation, but advertise no capabilities, no link
* will be established. A restart of the auto-negotiation is not
@@ -1339,11 +1318,11 @@ static int at803x_cable_test_start(struc
*/
phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
- if (phydev->phy_id != ATH9331_PHY_ID &&
- phydev->phy_id != ATH8032_PHY_ID &&
- phydev->phy_id != QCA9561_PHY_ID)
- phy_write(phydev, MII_CTRL1000, 0);
+}
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
/* we do all the (time consuming) work later */
return 0;
}
@@ -1618,6 +1597,29 @@ static int at8031_config_intr(struct phy
return at803x_config_intr(phydev);
}
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
static int at8035_parse_dt(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -2041,11 +2043,14 @@ static int qca808x_cable_test_get_status
*finished = false;
- ret = at803x_cdt_start(phydev, 0);
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT |
+ QCA808X_CDT_INTER_CHECK_DIS;
+ ret = at803x_cdt_start(phydev, val);
if (ret)
return ret;
- ret = at803x_cdt_wait_for_completion(phydev);
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
if (ret)
return ret;
@@ -2143,8 +2148,8 @@ static struct phy_driver at803x_driver[]
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8030 */
.phy_id = ATH8030_PHY_ID,
@@ -2181,8 +2186,8 @@ static struct phy_driver at803x_driver[]
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
}, {
/* Qualcomm Atheros AR8032 */
PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
@@ -2197,7 +2202,7 @@ static struct phy_driver at803x_driver[]
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
}, {
/* ATHEROS AR9331 */
PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
@@ -2210,7 +2215,7 @@ static struct phy_driver at803x_driver[]
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
.read_status = at803x_read_status,
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
@@ -2226,7 +2231,7 @@ static struct phy_driver at803x_driver[]
.config_intr = at803x_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at803x_cable_test_get_status,
+ .cable_test_get_status = at8032_cable_test_get_status,
.read_status = at803x_read_status,
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,

View file

@ -0,0 +1,116 @@
From 8e732f1c6f2dc5e18f766d0f1b11df9db2dd044a Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 14 Dec 2023 01:44:31 +0100
Subject: [PATCH 1/2] net: phy: at803x: move specific qca808x config_aneg to
dedicated function
Move specific qca808x config_aneg to dedicated function to permit easier
split of qca808x portion from at803x driver.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 66 ++++++++++++++++++++++++----------------
1 file changed, 40 insertions(+), 26 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1045,9 +1045,8 @@ static int at803x_config_mdix(struct phy
FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
}
-static int at803x_config_aneg(struct phy_device *phydev)
+static int at803x_prepare_config_aneg(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
int ret;
ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
@@ -1064,33 +1063,22 @@ static int at803x_config_aneg(struct phy
return ret;
}
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- /* Do not restart auto-negotiation by setting ret to 0 defautly,
- * when calling __genphy_config_aneg later.
- */
- ret = 0;
-
- if (phydev->drv->phy_id == QCA8081_PHY_ID) {
- int phy_ctrl = 0;
+ return 0;
+}
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
- }
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
- return __genphy_config_aneg(phydev, ret);
+ return genphy_config_aneg(phydev);
}
static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
@@ -2118,6 +2106,32 @@ static int qca808x_get_features(struct p
return 0;
}
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
static void qca808x_link_change_notify(struct phy_device *phydev)
{
/* Assert interface sgmii fifo on link down, deassert it on link up,
@@ -2295,7 +2309,7 @@ static struct phy_driver at803x_driver[]
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
.get_features = qca808x_get_features,
- .config_aneg = at803x_config_aneg,
+ .config_aneg = qca808x_config_aneg,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_status = qca808x_read_status,

View file

@ -0,0 +1,97 @@
From 38eb804e8458ba181a03a0498ce4bf84eebd1931 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 14 Dec 2023 01:44:32 +0100
Subject: [PATCH 2/2] net: phy: at803x: make read specific status function more
generic
Rework read specific status function to be more generic. The function
apply different speed mask based on the PHY ID. Make it more generic by
adding an additional arg to pass the specific speed (ss) mask and use
the provided mask to parse the speed value.
This is needed to permit an easier deatch of qca808x code from the
at803x driver.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 26 ++++++++++++++++++--------
1 file changed, 18 insertions(+), 8 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -301,6 +301,11 @@ static struct at803x_hw_stat qca83xx_hw_
{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
};
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
struct at803x_priv {
int flags;
u16 clk_25m_reg;
@@ -921,7 +926,8 @@ static void at803x_link_change_notify(st
}
}
-static int at803x_read_specific_status(struct phy_device *phydev)
+static int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
{
int ss;
@@ -940,11 +946,8 @@ static int at803x_read_specific_status(s
if (sfc < 0)
return sfc;
- /* qca8081 takes the different bits for speed value from at803x */
- if (phydev->drv->phy_id == QCA8081_PHY_ID)
- speed = FIELD_GET(QCA808X_SS_SPEED_MASK, ss);
- else
- speed = FIELD_GET(AT803X_SS_SPEED_MASK, ss);
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
switch (speed) {
case AT803X_SS_SPEED_10:
@@ -989,6 +992,7 @@ static int at803x_read_specific_status(s
static int at803x_read_status(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
+ struct at803x_ss_mask ss_mask = { 0 };
int err, old_link = phydev->link;
if (priv->is_1000basex)
@@ -1012,7 +1016,9 @@ static int at803x_read_status(struct phy
if (err < 0)
return err;
- err = at803x_read_specific_status(phydev);
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
if (err < 0)
return err;
@@ -1869,6 +1875,7 @@ static int qca808x_config_init(struct ph
static int qca808x_read_status(struct phy_device *phydev)
{
+ struct at803x_ss_mask ss_mask = { 0 };
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
@@ -1882,7 +1889,10 @@ static int qca808x_read_status(struct ph
if (ret)
return ret;
- ret = at803x_read_specific_status(phydev);
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
if (ret < 0)
return ret;

View file

@ -0,0 +1,27 @@
From fc9d7264ddc32eaa647d6bfcdc25cdf9f786fde0 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Mon, 18 Dec 2023 00:27:39 +0100
Subject: [PATCH 1/2] net: phy: at803x: remove extra space after cast
Remove extra space after cast as reported by checkpatch to keep code
clean.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20231217232739.27065-1-ansuelsmth@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
drivers/net/phy/at803x.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -462,7 +462,7 @@ static int at803x_set_wol(struct phy_dev
if (!ndev)
return -ENODEV;
- mac = (const u8 *) ndev->dev_addr;
+ mac = (const u8 *)ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return -EINVAL;

View file

@ -0,0 +1,38 @@
From 3ab5720881a924fb6405d9e6a3b09f1026467c47 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Mon, 18 Dec 2023 00:25:08 +0100
Subject: [PATCH 2/2] net: phy: at803x: replace msleep(1) with usleep_range
Replace msleep(1) with usleep_range as suggested by timers-howto guide.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20231217232508.26470-1-ansuelsmth@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
drivers/net/phy/at803x.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -916,9 +916,9 @@ static void at803x_link_change_notify(st
at803x_context_save(phydev, &context);
phy_device_reset(phydev, 1);
- msleep(1);
+ usleep_range(1000, 2000);
phy_device_reset(phydev, 0);
- msleep(1);
+ usleep_range(1000, 2000);
at803x_context_restore(phydev, &context);
@@ -1733,7 +1733,7 @@ static int qca83xx_resume(struct phy_dev
if (ret)
return ret;
- msleep(1);
+ usleep_range(1000, 2000);
return 0;
}

View file

@ -0,0 +1,152 @@
From 7961ef1fa10ec35ad6923fb5751877116e4b035b Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 19 Dec 2023 21:21:24 +0100
Subject: [PATCH] net: phy: at803x: better align function varibles to open
parenthesis
Better align function variables to open parenthesis as suggested by
checkpatch script for qca808x function to make code cleaner.
For cable_test_get_status function some additional rework was needed to
handle too long functions.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 67 ++++++++++++++++++++++------------------
1 file changed, 37 insertions(+), 30 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1781,27 +1781,27 @@ static int qca808x_phy_fast_retrain_conf
return ret;
phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
+ QCA808X_TOP_OPTION1_DATA);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
+ QCA808X_MMD3_DEBUG_1_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
+ QCA808X_MMD3_DEBUG_4_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
+ QCA808X_MMD3_DEBUG_5_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
+ QCA808X_MMD3_DEBUG_3_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
+ QCA808X_MMD3_DEBUG_6_VALUE);
phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
+ QCA808X_MMD3_DEBUG_2_VALUE);
return 0;
}
@@ -1838,13 +1838,14 @@ static int qca808x_config_init(struct ph
/* Active adc&vga on 802.3az for the link 1000M and 100M */
ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
if (ret)
return ret;
/* Adjust the threshold on 802.3az for the link 1000M */
ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL, QCA808X_MMD3_AZ_TRAINING_VAL);
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
if (ret)
return ret;
@@ -1870,7 +1871,8 @@ static int qca808x_config_init(struct ph
/* Configure adc threshold as 100mv for the link 10M */
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK, QCA808X_ADC_THRESHOLD_100MV);
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
}
static int qca808x_read_status(struct phy_device *phydev)
@@ -1883,7 +1885,7 @@ static int qca808x_read_status(struct ph
return ret;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
ret = genphy_read_status(phydev);
if (ret)
@@ -1913,7 +1915,7 @@ static int qca808x_read_status(struct ph
*/
if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
+ qca808x_is_prefer_master(phydev)) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
qca808x_phy_ms_seed_enable(phydev, true);
@@ -2070,18 +2072,22 @@ static int qca808x_cable_test_get_status
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
qca808x_cable_test_result_trans(pair_d));
- if (qca808x_cdt_fault_length_valid(pair_a))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A));
- if (qca808x_cdt_fault_length_valid(pair_b))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B));
- if (qca808x_cdt_fault_length_valid(pair_c))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C));
- if (qca808x_cdt_fault_length_valid(pair_d))
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D,
- qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D));
+ if (qca808x_cdt_fault_length_valid(pair_a)) {
+ val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ }
+ if (qca808x_cdt_fault_length_valid(pair_b)) {
+ val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ }
+ if (qca808x_cdt_fault_length_valid(pair_c)) {
+ val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ }
+ if (qca808x_cdt_fault_length_valid(pair_d)) {
+ val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ }
*finished = true;
@@ -2148,8 +2154,9 @@ static void qca808x_link_change_notify(s
* the interface device address is always phy address added by 1.
*/
mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN, phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
}
static struct phy_driver at803x_driver[] = {

View file

@ -0,0 +1,62 @@
From 22eb276098da820d9440fad22901f9b74ed4d659 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 4 Jan 2024 22:30:38 +0100
Subject: [PATCH 1/4] net: phy: at803x: generalize cdt fault length function
Generalize cable test fault length function since they all base on the
same magic values (already reverse engineered to understand the meaning
of it) to have consistenct values on every PHY.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1192,10 +1192,8 @@ static bool at803x_cdt_fault_length_vali
return false;
}
-static int at803x_cdt_fault_length(u16 status)
+static int at803x_cdt_fault_length(int dt)
{
- int dt;
-
/* According to the datasheet the distance to the fault is
* DELTA_TIME * 0.824 meters.
*
@@ -1211,8 +1209,6 @@ static int at803x_cdt_fault_length(u16 s
* With a VF of 0.69 we get the factor 0.824 mentioned in the
* datasheet.
*/
- dt = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, status);
-
return (dt * 824) / 10;
}
@@ -1265,9 +1261,11 @@ static int at803x_cable_test_one_pair(st
ethnl_cable_test_result(phydev, ethtool_pair[pair],
at803x_cable_test_result_trans(val));
- if (at803x_cdt_fault_length_valid(val))
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
at803x_cdt_fault_length(val));
+ }
return 1;
}
@@ -1992,7 +1990,8 @@ static int qca808x_cdt_fault_length(stru
if (val < 0)
return val;
- return (FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val) * 824) / 10;
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val);
+ return at803x_cdt_fault_length(val);
}
static int qca808x_cable_test_start(struct phy_device *phydev)

View file

@ -0,0 +1,118 @@
From e0e9ada1df6133513249861c1d91c1dbefd9383b Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 4 Jan 2024 22:30:39 +0100
Subject: [PATCH 2/4] net: phy: at803x: refactor qca808x cable test get status
function
Refactor qca808x cable test get status function to remove code
duplication and clean things up.
The same logic is applied to each pair hence it can be generalized and
moved to a common function.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 80 ++++++++++++++++++++++++----------------
1 file changed, 49 insertions(+), 31 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -2035,10 +2035,43 @@ static int qca808x_cable_test_start(stru
return 0;
}
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ u16 pair_code;
+ int length;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ethnl_cable_test_result(phydev, pair,
+ qca808x_cable_test_result_trans(pair_code));
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
{
int ret, val;
- int pair_a, pair_b, pair_c, pair_d;
*finished = false;
@@ -2057,36 +2090,21 @@ static int qca808x_cable_test_get_status
if (val < 0)
return val;
- pair_a = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, val);
- pair_b = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, val);
- pair_c = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, val);
- pair_d = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, val);
-
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
- qca808x_cable_test_result_trans(pair_a));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
- qca808x_cable_test_result_trans(pair_b));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
- qca808x_cable_test_result_trans(pair_c));
- ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
- qca808x_cable_test_result_trans(pair_d));
-
- if (qca808x_cdt_fault_length_valid(pair_a)) {
- val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A);
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- }
- if (qca808x_cdt_fault_length_valid(pair_b)) {
- val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B);
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- }
- if (qca808x_cdt_fault_length_valid(pair_c)) {
- val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C);
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- }
- if (qca808x_cdt_fault_length_valid(pair_d)) {
- val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D);
- ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- }
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
*finished = true;

View file

@ -0,0 +1,182 @@
From ea73e5ea442ee2aade67b1fb1233ccb3cbea2ceb Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 4 Jan 2024 22:30:40 +0100
Subject: [PATCH 3/4] net: phy: at803x: add support for cdt cross short test
for qca808x
QCA808x PHY Family supports Cable Diagnostic Test also for Cross Pair
Short.
Add all the define to make enable and support these additional tests.
Cross Short test was previously disabled by default, this is now changed
and enabled by default. In this mode, the mask changed a bit and length
is shifted based on the fault condition.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 86 ++++++++++++++++++++++++++++++++--------
1 file changed, 69 insertions(+), 17 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -254,6 +254,7 @@
#define QCA808X_CDT_ENABLE_TEST BIT(15)
#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
#define QCA808X_CDT_LENGTH_UNIT BIT(10)
#define QCA808X_MMD3_CDT_STATUS 0x8064
@@ -261,16 +262,44 @@
#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH GENMASK(7, 0)
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL 0
-#define QCA808X_CDT_STATUS_STAT_NORMAL 1
-#define QCA808X_CDT_STATUS_STAT_OPEN 2
-#define QCA808X_CDT_STATUS_STAT_SHORT 3
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
/* QCA808X 1G chip type */
#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
@@ -1941,8 +1970,17 @@ static int qca808x_soft_reset(struct phy
static bool qca808x_cdt_fault_length_valid(int cdt_code)
{
switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SHORT:
- case QCA808X_CDT_STATUS_STAT_OPEN:
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
return true;
default:
return false;
@@ -1954,17 +1992,28 @@ static int qca808x_cable_test_result_tra
switch (cdt_code) {
case QCA808X_CDT_STATUS_STAT_NORMAL:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_OPEN:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
case QCA808X_CDT_STATUS_STAT_FAIL:
default:
return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
}
}
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair)
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
{
int val;
u32 cdt_length_reg = 0;
@@ -1990,7 +2039,11 @@ static int qca808x_cdt_fault_length(stru
if (val < 0)
return val;
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val);
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
return at803x_cdt_fault_length(val);
}
@@ -2038,8 +2091,8 @@ static int qca808x_cable_test_start(stru
static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
u16 status)
{
+ int length, result;
u16 pair_code;
- int length;
switch (pair) {
case ETHTOOL_A_CABLE_PAIR_A:
@@ -2058,11 +2111,11 @@ static int qca808x_cable_test_get_pair_s
return -EINVAL;
}
- ethnl_cable_test_result(phydev, pair,
- qca808x_cable_test_result_trans(pair_code));
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair);
+ length = qca808x_cdt_fault_length(phydev, pair, result);
ethnl_cable_test_fault_length(phydev, pair, length);
}
@@ -2076,8 +2129,7 @@ static int qca808x_cable_test_get_status
*finished = false;
val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT |
- QCA808X_CDT_INTER_CHECK_DIS;
+ QCA808X_CDT_LENGTH_UNIT;
ret = at803x_cdt_start(phydev, val);
if (ret)
return ret;

View file

@ -0,0 +1,62 @@
From c34d9452d4e5d98a655d7b625e85466320885416 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 4 Jan 2024 22:30:41 +0100
Subject: [PATCH 4/4] net: phy: at803x: make read_status more generic
Make read_status more generic in preparation on moving it to shared
library as other PHY Family Driver will have the exact same
implementation.
The only specific part was a check for AR8031/33 if 1000basex was used.
The check is moved to a dedicated function specific for those PHYs.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/at803x.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1020,13 +1020,9 @@ static int at803x_read_specific_status(s
static int at803x_read_status(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
struct at803x_ss_mask ss_mask = { 0 };
int err, old_link = phydev->link;
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
@@ -1618,6 +1614,17 @@ static int at8031_config_intr(struct phy
return at803x_config_intr(phydev);
}
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev);
+
+ return at803x_read_status(phydev);
+}
+
/* AR8031 and AR8035 share the same cable test get status reg */
static int at8031_cable_test_get_status(struct phy_device *phydev,
bool *finished)
@@ -2281,7 +2288,7 @@ static struct phy_driver at803x_driver[]
.read_page = at803x_read_page,
.write_page = at803x_write_page,
.get_features = at803x_get_features,
- .read_status = at803x_read_status,
+ .read_status = at8031_read_status,
.config_intr = at8031_config_intr,
.handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,

View file

@ -0,0 +1,408 @@
From 7196062b64ee470b91015f3d2e82d225948258ea Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 25 Jan 2024 21:37:01 +0100
Subject: [PATCH 5/5] net: phy: at803x: add LED support for qca808x
Add LED support for QCA8081 PHY.
Documentation for this LEDs PHY is very scarce even with NDA access
to Documentation for OEMs. Only the blink pattern are documented and are
very confusing most of the time. No documentation is present about
forcing the LED on/off or to always blink.
Those settings were reversed by poking the regs and trying to find the
correct bits to trigger these modes. Some bits mode are not clear and
maybe the documentation option are not 100% correct. For the sake of LED
support the reversed option are enough to add support for current LED
APIs.
Supported HW control modes are:
- tx
- rx
- link_10
- link_100
- link_1000
- link_2500
- half_duplex
- full_duplex
Also add support for LED polarity set to set LED polarity to active
high or low. QSDK sets this value to high by default but PHY reset value
doesn't have this enabled by default.
QSDK also sets 2 additional bits but their usage is not clear, info about
this is added in the header. It was verified that for correct function
of the LED if active high is needed, only BIT 6 is needed.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20240125203702.4552-6-ansuelsmth@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/phy/at803x.c | 327 +++++++++++++++++++++++++++++++++++++++
1 file changed, 327 insertions(+)
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -301,6 +301,87 @@
/* Added for reference of existence but should be handled by wait_for_completion already */
#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+#define QCA808X_MMD7_LED_GLOBAL 0x8073
+#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
+#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
+/* Values are the same for both BLINK_1 and BLINK_2 */
+#define QCA808X_LED_BLINK_FREQ_MASK GENMASK(5, 3)
+#define QCA808X_LED_BLINK_FREQ_2HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+#define QCA808X_LED_BLINK_FREQ_4HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+#define QCA808X_LED_BLINK_FREQ_8HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+#define QCA808X_LED_BLINK_FREQ_16HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+#define QCA808X_LED_BLINK_FREQ_32HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+#define QCA808X_LED_BLINK_FREQ_64HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+#define QCA808X_LED_BLINK_FREQ_128HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+#define QCA808X_LED_BLINK_FREQ_256HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+#define QCA808X_LED_BLINK_DUTY_MASK GENMASK(2, 0)
+#define QCA808X_LED_BLINK_DUTY_50_50 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+#define QCA808X_LED_BLINK_DUTY_75_25 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+#define QCA808X_LED_BLINK_DUTY_25_75 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+#define QCA808X_LED_BLINK_DUTY_33_67 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+#define QCA808X_LED_BLINK_DUTY_67_33 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+#define QCA808X_LED_BLINK_DUTY_17_83 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+#define QCA808X_LED_BLINK_DUTY_83_17 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+#define QCA808X_LED_BLINK_DUTY_8_92 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+
+#define QCA808X_MMD7_LED2_CTRL 0x8074
+#define QCA808X_MMD7_LED2_FORCE_CTRL 0x8075
+#define QCA808X_MMD7_LED1_CTRL 0x8076
+#define QCA808X_MMD7_LED1_FORCE_CTRL 0x8077
+#define QCA808X_MMD7_LED0_CTRL 0x8078
+#define QCA808X_MMD7_LED_CTRL(x) (0x8078 - ((x) * 2))
+
+/* LED hw control pattern is the same for every LED */
+#define QCA808X_LED_PATTERN_MASK GENMASK(15, 0)
+#define QCA808X_LED_SPEED2500_ON BIT(15)
+#define QCA808X_LED_SPEED2500_BLINK BIT(14)
+/* Follow blink trigger even if duplex or speed condition doesn't match */
+#define QCA808X_LED_BLINK_CHECK_BYPASS BIT(13)
+#define QCA808X_LED_FULL_DUPLEX_ON BIT(12)
+#define QCA808X_LED_HALF_DUPLEX_ON BIT(11)
+#define QCA808X_LED_TX_BLINK BIT(10)
+#define QCA808X_LED_RX_BLINK BIT(9)
+#define QCA808X_LED_TX_ON_10MS BIT(8)
+#define QCA808X_LED_RX_ON_10MS BIT(7)
+#define QCA808X_LED_SPEED1000_ON BIT(6)
+#define QCA808X_LED_SPEED100_ON BIT(5)
+#define QCA808X_LED_SPEED10_ON BIT(4)
+#define QCA808X_LED_COLLISION_BLINK BIT(3)
+#define QCA808X_LED_SPEED1000_BLINK BIT(2)
+#define QCA808X_LED_SPEED100_BLINK BIT(1)
+#define QCA808X_LED_SPEED10_BLINK BIT(0)
+
+#define QCA808X_MMD7_LED0_FORCE_CTRL 0x8079
+#define QCA808X_MMD7_LED_FORCE_CTRL(x) (0x8079 - ((x) * 2))
+
+/* LED force ctrl is the same for every LED
+ * No documentation exist for this, not even internal one
+ * with NDA as QCOM gives only info about configuring
+ * hw control pattern rules and doesn't indicate any way
+ * to force the LED to specific mode.
+ * These define comes from reverse and testing and maybe
+ * lack of some info or some info are not entirely correct.
+ * For the basic LED control and hw control these finding
+ * are enough to support LED control in all the required APIs.
+ *
+ * On doing some comparison with implementation with qca807x,
+ * it was found that it's 1:1 equal to it and confirms all the
+ * reverse done. It was also found further specification with the
+ * force mode and the blink modes.
+ */
+#define QCA808X_LED_FORCE_EN BIT(15)
+#define QCA808X_LED_FORCE_MODE_MASK GENMASK(14, 13)
+#define QCA808X_LED_FORCE_BLINK_1 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+#define QCA808X_LED_FORCE_BLINK_2 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+#define QCA808X_LED_FORCE_ON FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+#define QCA808X_LED_FORCE_OFF FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+
+#define QCA808X_MMD7_LED_POLARITY_CTRL 0x901a
+/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+ */
+#define QCA808X_LED_ACTIVE_HIGH BIT(6)
+
/* QCA808X 1G chip type */
#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
@@ -346,6 +427,7 @@ struct at803x_priv {
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+ int led_polarity_mode;
};
struct at803x_context {
@@ -706,6 +788,9 @@ static int at803x_probe(struct phy_devic
if (!priv)
return -ENOMEM;
+ /* Init LED polarity mode to -1 */
+ priv->led_polarity_mode = -1;
+
phydev->priv = priv;
ret = at803x_parse_dt(phydev);
@@ -2235,6 +2320,242 @@ static void qca808x_link_change_notify(s
phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
}
+static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED2500_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable BLINK_CHECK_BYPASS by default to make the LED
+ * blink even with duplex or speed mode not enabled.
+ */
+ *offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN);
+}
+
+static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 2)
+ return -EINVAL;
+
+ return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, offload_trigger = 0;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca808x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK,
+ offload_trigger);
+}
+
+static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return false;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+
+ return !(val & QCA808X_LED_FORCE_EN);
+}
+
+static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca808x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_SPEED2500_ON)
+ set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK);
+}
+
+static int qca808x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ if (!value) {
+ ret = qca808x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF);
+}
+
+static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int ret;
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+
+ /* Set blink to 50% off, 50% on at 4Hz by default */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+ QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+ QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+ if (ret)
+ return ret;
+
+ /* We use BLINK_1 for normal blinking */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+ if (ret)
+ return ret;
+
+ /* We set blink to 4Hz, aka 250ms */
+ *delay_on = 250 / 2;
+ *delay_off = 250 / 2;
+
+ return 0;
+}
+
+static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ struct at803x_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* PHY polarity is global and can't be set per LED.
+ * To detect this, check if last requested polarity mode
+ * match the new one.
+ */
+ if (priv->led_polarity_mode >= 0 &&
+ priv->led_polarity_mode != active_low) {
+ phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+ return -EINVAL;
+ }
+
+ /* Save the last PHY polarity mode */
+ priv->led_polarity_mode = active_low;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH,
+ active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
@@ -2411,6 +2732,12 @@ static struct phy_driver at803x_driver[]
.cable_test_start = qca808x_cable_test_start,
.cable_test_get_status = qca808x_cable_test_get_status,
.link_change_notify = qca808x_link_change_notify,
+ .led_brightness_set = qca808x_led_brightness_set,
+ .led_blink_set = qca808x_led_blink_set,
+ .led_hw_is_supported = qca808x_led_hw_is_supported,
+ .led_hw_control_set = qca808x_led_hw_control_set,
+ .led_hw_control_get = qca808x_led_hw_control_get,
+ .led_polarity_set = qca808x_led_polarity_set,
}, };
module_phy_driver(at803x_driver);

View file

@ -0,0 +1,243 @@
From 6fb760972c49490b03f3db2ad64cf30bdd28c54a Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Mon, 29 Jan 2024 15:15:20 +0100
Subject: [PATCH 2/5] net: phy: qcom: create and move functions to shared
library
Create and move functions to shared library in preparation for qca83xx
PHY Family to be detached from at803x driver.
Only the shared defines are moved to the shared qcom.h header.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20240129141600.2592-3-ansuelsmth@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/phy/qcom/Kconfig | 4 ++
drivers/net/phy/qcom/Makefile | 1 +
drivers/net/phy/qcom/at803x.c | 69 +----------------------------
drivers/net/phy/qcom/qcom-phy-lib.c | 53 ++++++++++++++++++++++
drivers/net/phy/qcom/qcom.h | 34 ++++++++++++++
5 files changed, 94 insertions(+), 67 deletions(-)
create mode 100644 drivers/net/phy/qcom/qcom-phy-lib.c
create mode 100644 drivers/net/phy/qcom/qcom.h
--- a/drivers/net/phy/qcom/Kconfig
+++ b/drivers/net/phy/qcom/Kconfig
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_NET_PHYLIB
+ tristate
+
config AT803X_PHY
tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
+ select QCOM_NET_PHYLIB
depends on REGULATOR
help
Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
--- a/drivers/net/phy/qcom/Makefile
+++ b/drivers/net/phy/qcom/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -22,6 +22,8 @@
#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
+#include "qcom.h"
+
#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
#define AT803X_SFC_ASSERT_CRS BIT(11)
#define AT803X_SFC_FORCE_LINK BIT(10)
@@ -84,9 +86,6 @@
#define AT803X_REG_CHIP_CONFIG 0x1f
#define AT803X_BT_BX_REG_SEL 0x8000
-#define AT803X_DEBUG_ADDR 0x1D
-#define AT803X_DEBUG_DATA 0x1E
-
#define AT803X_MODE_CFG_MASK 0x0F
#define AT803X_MODE_CFG_BASET_RGMII 0x00
#define AT803X_MODE_CFG_BASET_SGMII 0x01
@@ -103,19 +102,6 @@
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
-#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
-#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
-#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-
-#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
-#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
-#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
-#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
-#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
-
#define AT803X_DEBUG_REG_3C 0x3C
#define AT803X_DEBUG_REG_GREEN 0x3D
@@ -393,18 +379,6 @@ MODULE_DESCRIPTION("Qualcomm Atheros AR8
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
-enum stat_access_type {
- PHY,
- MMD
-};
-
-struct at803x_hw_stat {
- const char *string;
- u8 reg;
- u32 mask;
- enum stat_access_type access_type;
-};
-
static struct at803x_hw_stat qca83xx_hw_stats[] = {
{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
@@ -439,45 +413,6 @@ struct at803x_context {
u16 led_control;
};
-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, data);
-}
-
-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_read(phydev, AT803X_DEBUG_DATA);
-}
-
-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
- u16 clear, u16 set)
-{
- u16 val;
- int ret;
-
- ret = at803x_debug_reg_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- val = ret & 0xffff;
- val &= ~clear;
- val |= set;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, val);
-}
-
static int at803x_write_page(struct phy_device *phydev, int page)
{
int mask;
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_read(phydev, AT803X_DEBUG_DATA);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
+
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set)
+{
+ u16 val;
+ int ret;
+
+ ret = at803x_debug_reg_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret & 0xffff;
+ val &= ~clear;
+ val |= set;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, val);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
+
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define AT803X_DEBUG_ADDR 0x1D
+#define AT803X_DEBUG_DATA 0x1E
+
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
+#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
+
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
+#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
+
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set);
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);

View file

@ -0,0 +1,638 @@
From 2e45d404d99d43bb7127b74b5dea8818df64996c Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Mon, 29 Jan 2024 15:15:21 +0100
Subject: [PATCH 3/5] net: phy: qcom: deatch qca83xx PHY driver from at803x
Deatch qca83xx PHY driver from at803x.
The QCA83xx PHYs implement specific function and doesn't use generic
at803x so it can be detached from the driver and moved to a dedicated
one.
Probe function and priv struct is reimplemented to allocate and use
only the qca83xx specific data. Unused data from at803x PHY driver
are dropped from at803x priv struct.
This is to make slimmer PHY drivers instead of including lots of bloat
that would never be used in specific SoC.
A new Kconfig flag QCA83XX_PHY is introduced to compile the new
introduced PHY driver.
As the Kconfig name starts with Qualcomm the same order is kept.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20240129141600.2592-4-ansuelsmth@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
drivers/net/phy/qcom/Kconfig | 11 +-
drivers/net/phy/qcom/Makefile | 1 +
drivers/net/phy/qcom/at803x.c | 235 ----------------------------
drivers/net/phy/qcom/qca83xx.c | 275 +++++++++++++++++++++++++++++++++
4 files changed, 284 insertions(+), 238 deletions(-)
create mode 100644 drivers/net/phy/qcom/qca83xx.c
--- a/drivers/net/phy/qcom/Kconfig
+++ b/drivers/net/phy/qcom/Kconfig
@@ -3,9 +3,14 @@ config QCOM_NET_PHYLIB
tristate
config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
+ tristate "Qualcomm Atheros AR803X PHYs"
select QCOM_NET_PHYLIB
depends on REGULATOR
help
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
+ Currently supports the AR8030, AR8031, AR8033, AR8035 model
+
+config QCA83XX_PHY
+ tristate "Qualcomm Atheros QCA833x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the internal QCA8337(Internal qca8k PHY) model
--- a/drivers/net/phy/qcom/Makefile
+++ b/drivers/net/phy/qcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -102,17 +102,10 @@
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-#define AT803X_DEBUG_REG_3C 0x3C
-
-#define AT803X_DEBUG_REG_GREEN 0x3D
-#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
-
#define AT803X_DEBUG_REG_1F 0x1F
#define AT803X_DEBUG_PLL_ON BIT(2)
#define AT803X_DEBUG_RGMII_1V8 BIT(3)
-#define MDIO_AZ_DEBUG 0x800D
-
/* AT803x supports either the XTAL input pad, an internal PLL or the
* DSP as clock reference for the clock output pad. The XTAL reference
* is only used for 25 MHz output, all other frequencies need the PLL.
@@ -163,13 +156,7 @@
#define QCA8081_PHY_ID 0x004dd101
-#define QCA8327_A_PHY_ID 0x004dd033
-#define QCA8327_B_PHY_ID 0x004dd034
-#define QCA8337_PHY_ID 0x004dd036
#define QCA9561_PHY_ID 0x004dd042
-#define QCA8K_PHY_ID_MASK 0xffffffff
-
-#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
#define AT803X_PAGE_FIBER 0
#define AT803X_PAGE_COPPER 1
@@ -379,12 +366,6 @@ MODULE_DESCRIPTION("Qualcomm Atheros AR8
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
-static struct at803x_hw_stat qca83xx_hw_stats[] = {
- { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-};
-
struct at803x_ss_mask {
u16 speed_mask;
u8 speed_shift;
@@ -400,7 +381,6 @@ struct at803x_priv {
bool is_1000basex;
struct regulator_dev *vddio_rdev;
struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
int led_polarity_mode;
};
@@ -564,53 +544,6 @@ static void at803x_get_wol(struct phy_de
wol->wolopts |= WAKE_MAGIC;
}
-static int qca83xx_get_sset_count(struct phy_device *phydev)
-{
- return ARRAY_SIZE(qca83xx_hw_stats);
-}
-
-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- strscpy(data + i * ETH_GSTRING_LEN,
- qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
-{
- struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- struct at803x_priv *priv = phydev->priv;
- int val;
- u64 ret;
-
- if (stat.access_type == MMD)
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
- else
- val = phy_read(phydev, stat.reg);
-
- if (val < 0) {
- ret = U64_MAX;
- } else {
- val = val & stat.mask;
- priv->stats[i] += val;
- ret = priv->stats[i];
- }
-
- return ret;
-}
-
-static void qca83xx_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
- data[i] = qca83xx_get_stat(phydev, i);
-}
-
static int at803x_suspend(struct phy_device *phydev)
{
int value;
@@ -1707,124 +1640,6 @@ static int at8035_probe(struct phy_devic
return at8035_parse_dt(phydev);
}
-static int qca83xx_config_init(struct phy_device *phydev)
-{
- u8 switch_revision;
-
- switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
-
- switch (switch_revision) {
- case 1:
- /* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
- /* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
- break;
-
- case 2:
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
- fallthrough;
- case 4:
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
- break;
- }
-
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
-}
-
-static int qca8327_config_init(struct phy_device *phydev)
-{
- /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- * Disable on init and enable only with 100m speed following
- * qca original source code.
- */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
-
- return qca83xx_config_init(phydev);
-}
-
-static void qca83xx_link_change_notify(struct phy_device *phydev)
-{
- /* Set DAC Amplitude adjustment to +6% for 100m on link running */
- if (phydev->state == PHY_RUNNING) {
- if (phydev->speed == SPEED_100)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN,
- QCA8327_DEBUG_MANU_CTRL_EN);
- } else {
- /* Reset DAC Amplitude adjustment */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
- }
-}
-
-static int qca83xx_resume(struct phy_device *phydev)
-{
- int ret, val;
-
- /* Skip reset if not suspended */
- if (!phydev->suspended)
- return 0;
-
- /* Reinit the port, reset values set by suspend */
- qca83xx_config_init(phydev);
-
- /* Reset the port on port resume */
- phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
-
- /* On resume from suspend the switch execute a reset and
- * restart auto-negotiation. Wait for reset to complete.
- */
- ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
- 50000, 600000, true);
- if (ret)
- return ret;
-
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-static int qca83xx_suspend(struct phy_device *phydev)
-{
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- AT803X_DEBUG_GATE_CLK_IN1000, 0);
-
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
- AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
-
- return 0;
-}
-
-static int qca8337_suspend(struct phy_device *phydev)
-{
- /* Only QCA8337 support actual suspend. */
- genphy_suspend(phydev);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca8327_suspend(struct phy_device *phydev)
-{
- u16 mask = 0;
-
- /* QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
-
- return qca83xx_suspend(phydev);
-}
-
static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
{
int ret;
@@ -2599,53 +2414,6 @@ static struct phy_driver at803x_driver[]
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
- /* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8337 internal PHY",
- /* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8337_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-A internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-B internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
/* Qualcomm QCA8081 */
PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
.name = "Qualcomm QCA8081",
@@ -2683,9 +2451,6 @@ static struct mdio_device_id __maybe_unu
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
--- /dev/null
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
+
+#define MDIO_AZ_DEBUG 0x800D
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
+struct qca83xx_priv {
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+};
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+static int qca83xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(qca83xx_hw_stats);
+}
+
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ struct qca83xx_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
+}
+
+static int qca83xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca83xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ return qca83xx_config_init(phydev);
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
+static struct phy_driver qca83xx_driver[] = {
+{
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, };
+
+module_phy_driver(qca83xx_driver);
+
+static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);

View file

@ -0,0 +1,28 @@
From ebb30ccbbdbd6fae5177b676da4f4ac92bb4f635 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 15 Dec 2023 14:15:31 +0100
Subject: [PATCH 1/4] net: phy: make addr type u8 in phy_package_shared struct
Switch addr type in phy_package_shared struct to u8.
The value is already checked to be non negative and to be less than
PHY_MAX_ADDR, hence u8 is better suited than using int.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/linux/phy.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -338,7 +338,7 @@ struct mdio_bus_stats {
* phy_package_leave().
*/
struct phy_package_shared {
- int addr;
+ u8 addr;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;

View file

@ -0,0 +1,341 @@
From 9eea577eb1155fe4a183bc5e7bf269b0b2e7a6ba Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 15 Dec 2023 14:15:32 +0100
Subject: [PATCH 2/4] net: phy: extend PHY package API to support multiple
global address
Current API for PHY package are limited to single address to configure
global settings for the PHY package.
It was found that some PHY package (for example the qca807x, a PHY
package that is shipped with a bundle of 5 PHY) requires multiple PHY
address to configure global settings. An example scenario is a PHY that
have a dedicated PHY for PSGMII/serdes calibrarion and have a specific
PHY in the package where the global PHY mode is set and affects every
other PHY in the package.
Change the API in the following way:
- Change phy_package_join() to take the base addr of the PHY package
instead of the global PHY addr.
- Make __/phy_package_write/read() require an additional arg that
select what global PHY address to use by passing the offset from the
base addr passed on phy_package_join().
Each user of this API is updated to follow this new implementation
following a pattern where an enum is defined to declare the offset of the
addr.
We also drop the check if shared is defined as any user of the
phy_package_read/write is expected to use phy_package_join first. Misuse
of this will correctly trigger a kernel panic for NULL pointer
exception.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/bcm54140.c | 16 ++++++--
drivers/net/phy/mscc/mscc.h | 5 +++
drivers/net/phy/mscc/mscc_main.c | 4 +-
drivers/net/phy/phy_device.c | 35 +++++++++--------
include/linux/phy.h | 64 +++++++++++++++++++++-----------
5 files changed, 80 insertions(+), 44 deletions(-)
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -128,6 +128,10 @@
#define BCM54140_DEFAULT_DOWNSHIFT 5
#define BCM54140_MAX_DOWNSHIFT 9
+enum bcm54140_global_phy {
+ BCM54140_BASE_ADDR = 0,
+};
+
struct bcm54140_priv {
int port;
int base_addr;
@@ -429,11 +433,13 @@ static int bcm54140_base_read_rdb(struct
int ret;
phy_lock_mdio_bus(phydev);
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_ADDR, rdb);
if (ret < 0)
goto out;
- ret = __phy_package_read(phydev, MII_BCM54XX_RDB_DATA);
+ ret = __phy_package_read(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_DATA);
out:
phy_unlock_mdio_bus(phydev);
@@ -446,11 +452,13 @@ static int bcm54140_base_write_rdb(struc
int ret;
phy_lock_mdio_bus(phydev);
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_ADDR, rdb);
if (ret < 0)
goto out;
- ret = __phy_package_write(phydev, MII_BCM54XX_RDB_DATA, val);
+ ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
+ MII_BCM54XX_RDB_DATA, val);
out:
phy_unlock_mdio_bus(phydev);
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -416,6 +416,11 @@ struct vsc8531_private {
* gpio_lock: used for PHC operations. Common for all PHYs as the load/save GPIO
* is shared.
*/
+
+enum vsc85xx_global_phy {
+ VSC88XX_BASE_ADDR = 0,
+};
+
struct vsc85xx_shared_private {
struct mutex gpio_lock;
};
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -711,7 +711,7 @@ int phy_base_write(struct phy_device *ph
dump_stack();
}
- return __phy_package_write(phydev, regnum, val);
+ return __phy_package_write(phydev, VSC88XX_BASE_ADDR, regnum, val);
}
/* phydev->bus->mdio_lock should be locked when using this function */
@@ -722,7 +722,7 @@ int phy_base_read(struct phy_device *phy
dump_stack();
}
- return __phy_package_read(phydev, regnum);
+ return __phy_package_read(phydev, VSC88XX_BASE_ADDR, regnum);
}
u32 vsc85xx_csr_read(struct phy_device *phydev,
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1658,20 +1658,22 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_1
/**
* phy_package_join - join a common PHY group
* @phydev: target phy_device struct
- * @addr: cookie and PHY address for global register access
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
* @priv_size: if non-zero allocate this amount of bytes for private data
*
* This joins a PHY group and provides a shared storage for all phydevs in
* this group. This is intended to be used for packages which contain
* more than one PHY, for example a quad PHY transceiver.
*
- * The addr parameter serves as a cookie which has to have the same value
- * for all members of one group and as a PHY address to access generic
- * registers of a PHY package. Usually, one of the PHY addresses of the
- * different PHYs in the package provides access to these global registers.
+ * The base_addr parameter serves as cookie which has to have the same values
+ * for all members of one group and as the base PHY address of the PHY package
+ * for offset calculation to access generic registers of a PHY package.
+ * Usually, one of the PHY addresses of the different PHYs in the package
+ * provides access to these global registers.
* The address which is given here, will be used in the phy_package_read()
- * and phy_package_write() convenience functions. If your PHY doesn't have
- * global registers you can just pick any of the PHY addresses.
+ * and phy_package_write() convenience functions as base and added to the
+ * passed offset in those functions.
*
* This will set the shared pointer of the phydev to the shared storage.
* If this is the first call for a this cookie the shared storage will be
@@ -1681,17 +1683,17 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_1
* Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
* with the same cookie but a different priv_size is an error.
*/
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
{
struct mii_bus *bus = phydev->mdio.bus;
struct phy_package_shared *shared;
int ret;
- if (addr < 0 || addr >= PHY_MAX_ADDR)
+ if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
return -EINVAL;
mutex_lock(&bus->shared_lock);
- shared = bus->shared[addr];
+ shared = bus->shared[base_addr];
if (!shared) {
ret = -ENOMEM;
shared = kzalloc(sizeof(*shared), GFP_KERNEL);
@@ -1703,9 +1705,9 @@ int phy_package_join(struct phy_device *
goto err_free;
shared->priv_size = priv_size;
}
- shared->addr = addr;
+ shared->base_addr = base_addr;
refcount_set(&shared->refcnt, 1);
- bus->shared[addr] = shared;
+ bus->shared[base_addr] = shared;
} else {
ret = -EINVAL;
if (priv_size && priv_size != shared->priv_size)
@@ -1743,7 +1745,7 @@ void phy_package_leave(struct phy_device
return;
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
- bus->shared[shared->addr] = NULL;
+ bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
kfree(shared->priv);
kfree(shared);
@@ -1762,7 +1764,8 @@ static void devm_phy_package_leave(struc
* devm_phy_package_join - resource managed phy_package_join()
* @dev: device that is registering this PHY package
* @phydev: target phy_device struct
- * @addr: cookie and PHY address for global register access
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
* @priv_size: if non-zero allocate this amount of bytes for private data
*
* Managed phy_package_join(). Shared storage fetched by this function,
@@ -1770,7 +1773,7 @@ static void devm_phy_package_leave(struc
* phy_package_join() for more information.
*/
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size)
+ int base_addr, size_t priv_size)
{
struct phy_device **ptr;
int ret;
@@ -1780,7 +1783,7 @@ int devm_phy_package_join(struct device
if (!ptr)
return -ENOMEM;
- ret = phy_package_join(phydev, addr, priv_size);
+ ret = phy_package_join(phydev, base_addr, priv_size);
if (!ret) {
*ptr = phydev;
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -327,7 +327,8 @@ struct mdio_bus_stats {
/**
* struct phy_package_shared - Shared information in PHY packages
- * @addr: Common PHY address used to combine PHYs in one package
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -338,7 +339,7 @@ struct mdio_bus_stats {
* phy_package_leave().
*/
struct phy_package_shared {
- u8 addr;
+ u8 base_addr;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -1969,10 +1970,10 @@ int phy_ethtool_get_link_ksettings(struc
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size);
+ int base_addr, size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
@@ -1995,46 +1996,65 @@ int __phy_hwtstamp_set(struct phy_device
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
{
struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
- if (!shared)
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
return -EIO;
- return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
}
-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
+
+ return mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+
+static inline int __phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
- return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
}
static inline int phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
static inline int __phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
static inline bool __phy_package_set_once(struct phy_device *phydev,

View file

@ -0,0 +1,116 @@
From 028672bd1d73cf65249a420c1de75e8d2acd2f6a Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 15 Dec 2023 14:15:33 +0100
Subject: [PATCH 3/4] net: phy: restructure __phy_write/read_mmd to helper and
phydev user
Restructure phy_write_mmd and phy_read_mmd to implement generic helper
for direct mdiobus access for mmd and use these helper for phydev user.
This is needed in preparation of PHY package API that requires generic
access to the mdiobus and are deatched from phydev struct but instead
access them based on PHY package base_addr and offsets.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/phy-core.c | 64 ++++++++++++++++++--------------------
1 file changed, 30 insertions(+), 34 deletions(-)
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -540,6 +540,28 @@ static void mmd_phy_indirect(struct mii_
devad | MII_MMD_CTRL_NOINCR);
}
+static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum)
+{
+ if (is_c45)
+ return __mdiobus_c45_read(bus, phy_addr, devad, regnum);
+
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ /* Read the content of the MMD's selected register */
+ return __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
+}
+
+static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val)
+{
+ if (is_c45)
+ return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val);
+
+ mmd_phy_indirect(bus, phy_addr, devad, regnum);
+ /* Write the data into MMD's selected register */
+ return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
+}
+
/**
* __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
@@ -551,26 +573,14 @@ static void mmd_phy_indirect(struct mii_
*/
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
{
- int val;
-
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv && phydev->drv->read_mmd) {
- val = phydev->drv->read_mmd(phydev, devad, regnum);
- } else if (phydev->is_c45) {
- val = __mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr,
- devad, regnum);
- } else {
- struct mii_bus *bus = phydev->mdio.bus;
- int phy_addr = phydev->mdio.addr;
-
- mmd_phy_indirect(bus, phy_addr, devad, regnum);
-
- /* Read the content of the MMD's selected register */
- val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
- }
- return val;
+ if (phydev->drv && phydev->drv->read_mmd)
+ return phydev->drv->read_mmd(phydev, devad, regnum);
+
+ return mmd_phy_read(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devad, regnum);
}
EXPORT_SYMBOL(__phy_read_mmd);
@@ -607,28 +617,14 @@ EXPORT_SYMBOL(phy_read_mmd);
*/
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
{
- int ret;
-
if (regnum > (u16)~0 || devad > 32)
return -EINVAL;
- if (phydev->drv && phydev->drv->write_mmd) {
- ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
- } else if (phydev->is_c45) {
- ret = __mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr,
- devad, regnum, val);
- } else {
- struct mii_bus *bus = phydev->mdio.bus;
- int phy_addr = phydev->mdio.addr;
+ if (phydev->drv && phydev->drv->write_mmd)
+ return phydev->drv->write_mmd(phydev, devad, regnum, val);
- mmd_phy_indirect(bus, phy_addr, devad, regnum);
-
- /* Write the data into MMD's selected register */
- __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
-
- ret = 0;
- }
- return ret;
+ return mmd_phy_write(phydev->mdio.bus, phydev->mdio.addr,
+ phydev->is_c45, devad, regnum, val);
}
EXPORT_SYMBOL(__phy_write_mmd);

View file

@ -0,0 +1,196 @@
From d63710fc0f1a501fd75a7025e3070a96ffa1645f Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Fri, 15 Dec 2023 14:15:34 +0100
Subject: [PATCH 4/4] net: phy: add support for PHY package MMD read/write
Some PHY in PHY package may require to read/write MMD regs to correctly
configure the PHY package.
Add support for these additional required function in both lock and no
lock variant.
It's assumed that the entire PHY package is either C22 or C45. We use
C22 or C45 way of writing/reading to mmd regs based on the passed phydev
whether it's C22 or C45.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/phy-core.c | 140 +++++++++++++++++++++++++++++++++++++
include/linux/phy.h | 16 +++++
2 files changed, 156 insertions(+)
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -651,6 +651,146 @@ int phy_write_mmd(struct phy_device *phy
EXPORT_SYMBOL(phy_write_mmd);
/**
+ * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+}
+EXPORT_SYMBOL(__phy_package_read_mmd);
+
+/**
+ * phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+ int val;
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+ val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+ phy_unlock_mdio_bus(phydev);
+
+ return val;
+}
+EXPORT_SYMBOL(phy_package_read_mmd);
+
+/**
+ * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+}
+EXPORT_SYMBOL(__phy_package_write_mmd);
+
+/**
+ * phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+ int ret;
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+ ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+EXPORT_SYMBOL(phy_package_write_mmd);
+
+/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -2057,6 +2057,22 @@ static inline int __phy_package_write(st
return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
static inline bool __phy_package_set_once(struct phy_device *phydev,
unsigned int b)
{

View file

@ -0,0 +1,36 @@
From f2ec98566775dd4341ec1dcf93aa5859c60de826 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 1 Feb 2024 14:46:00 +0100
Subject: [PATCH 1/2] net: phy: qcom: qca808x: fix logic error in LED
brightness set
In switching to using phy_modify_mmd and a more short version of the
LED ON/OFF condition in later revision, it was made a logic error where
value ? QCA808X_LED_FORCE_ON : QCA808X_LED_FORCE_OFF is always true as
value is always OR with QCA808X_LED_FORCE_EN due to missing ()
resulting in the testing condition being QCA808X_LED_FORCE_EN | value.
Add the () to apply the correct condition and restore correct
functionality of the brightness ON/OFF.
Fixes: 7196062b64ee ("net: phy: at803x: add LED support for qca808x")
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/qcom/qca808x.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -820,8 +820,8 @@ static int qca808x_led_brightness_set(st
return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
- QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
- QCA808X_LED_FORCE_OFF);
+ QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF));
}
static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,

View file

@ -0,0 +1,41 @@
From f203c8c77c7616c099647636f4c67d59a45fe8a2 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Thu, 1 Feb 2024 14:46:01 +0100
Subject: [PATCH 2/2] net: phy: qcom: qca808x: default to LED active High if
not set
qca808x PHY provide support for the led_polarity_set OP to configure
and apply the active-low property but on PHY reset, the Active High bit
is not set resulting in the LED driven as active-low.
To fix this, check if active-low is not set in DT and enable Active High
polarity by default to restore correct funcionality of the LED.
Fixes: 7196062b64ee ("net: phy: at803x: add LED support for qca808x")
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/qcom/qca808x.c | 10 ++++++++++
1 file changed, 10 insertions(+)
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -290,8 +290,18 @@ static int qca808x_probe(struct phy_devi
static int qca808x_config_init(struct phy_device *phydev)
{
+ struct qca808x_priv *priv = phydev->priv;
int ret;
+ /* Default to LED Active High if active-low not in DT */
+ if (priv->led_polarity_mode == -1) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH);
+ if (ret)
+ return ret;
+ }
+
/* Active adc&vga on 802.3az for the link 1000M and 100M */
ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);

View file

@ -0,0 +1,211 @@
From 385ef48f468696d6d172eb367656a3466fa0408d Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 6 Feb 2024 18:31:05 +0100
Subject: [PATCH 02/10] net: phy: add support for scanning PHY in PHY packages
nodes
Add support for scanning PHY in PHY package nodes. PHY packages nodes
are just container for actual PHY on the MDIO bus.
Their PHY address defined in the PHY package node are absolute and
reflect the address on the MDIO bus.
mdio_bus.c and of_mdio.c is updated to now support and parse also
PHY package subnode by checking if the node name match
"ethernet-phy-package".
As PHY package reg is mandatory and each PHY in the PHY package must
have a reg, every invalid PHY Package node is ignored and will be
skipped by the autoscan fallback.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/mdio/of_mdio.c | 79 +++++++++++++++++++++++++++-----------
drivers/net/phy/mdio_bus.c | 44 +++++++++++++++++----
2 files changed, 92 insertions(+), 31 deletions(-)
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -138,6 +138,53 @@ bool of_mdiobus_child_is_phy(struct devi
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
+ bool *scanphys)
+{
+ struct device_node *child;
+ int addr, rc = 0;
+
+ /* Loop over the child nodes and register a phy_device for each phy */
+ for_each_available_child_of_node(np, child) {
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Ignore invalid ethernet-phy-package node */
+ if (!of_find_property(child, "reg", NULL))
+ continue;
+
+ rc = __of_mdiobus_parse_phys(mdio, child, NULL);
+ if (rc && rc != -ENODEV)
+ goto exit;
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
+ /* Skip scanning for invalid ethernet-phy-package node */
+ if (scanphys)
+ *scanphys = true;
+ continue;
+ }
+
+ if (of_mdiobus_child_is_phy(child))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ else
+ rc = of_mdiobus_register_device(mdio, child, addr);
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ of_node_put(child);
+ return rc;
+}
+
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -179,33 +226,18 @@ int __of_mdiobus_register(struct mii_bus
return rc;
/* Loop over the child nodes and register a phy_device for each phy */
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0) {
- scanphys = true;
- continue;
- }
-
- if (of_mdiobus_child_is_phy(child))
- rc = of_mdiobus_register_phy(mdio, child, addr);
- else
- rc = of_mdiobus_register_device(mdio, child, addr);
-
- if (rc == -ENODEV)
- dev_err(&mdio->dev,
- "MDIO device at address %d is missing.\n",
- addr);
- else if (rc)
- goto unregister;
- }
+ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
+ if (rc)
+ goto unregister;
if (!scanphys)
return 0;
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
- /* Skip PHYs with reg property set */
- if (of_property_present(child, "reg"))
+ /* Skip PHYs with reg property set or ethernet-phy-package node */
+ if (of_property_present(child, "reg") ||
+ of_node_name_eq(child, "ethernet-phy-package"))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -226,15 +258,16 @@ int __of_mdiobus_register(struct mii_bus
if (!rc)
break;
if (rc != -ENODEV)
- goto unregister;
+ goto put_unregister;
}
}
}
return 0;
-unregister:
+put_unregister:
of_node_put(child);
+unregister:
mdiobus_unregister(mdio);
return rc;
}
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -455,19 +455,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
* found, set the of_node pointer for the mdio device. This allows
* auto-probed phy devices to be supplied with information passed in
* via DT.
+ * If a PHY package is found, PHY is searched also there.
*/
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
struct device_node *child;
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- for_each_available_child_of_node(bus->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
int addr;
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_find_property(child, "reg", NULL)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
addr = of_mdio_parse_addr(dev, child);
if (addr < 0)
continue;
@@ -477,9 +492,22 @@ static void of_mdiobus_link_mdiodev(stru
/* The refcount on "child" is passed to the mdio
* device. Do _not_ use of_node_put(child) here.
*/
- return;
+ return 0;
}
}
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,

View file

@ -0,0 +1,185 @@
From 471e8fd3afcef5a9f9089f0bd21965ad9ba35c91 Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 6 Feb 2024 18:31:06 +0100
Subject: [PATCH 03/10] net: phy: add devm/of_phy_package_join helper
Add devm/of_phy_package_join helper to join PHYs in a PHY package. These
are variant of the manual phy_package_join with the difference that
these will use DT nodes to derive the base_addr instead of manually
passing an hardcoded value.
An additional value is added in phy_package_shared, "np" to reference
the PHY package node pointer in specific PHY driver probe_once and
config_init_once functions to make use of additional specific properties
defined in the PHY package node in DT.
The np value is filled only with of_phy_package_join if a valid PHY
package node is found. A valid PHY package node must have the node name
set to "ethernet-phy-package".
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/phy_device.c | 96 ++++++++++++++++++++++++++++++++++++
include/linux/phy.h | 6 +++
2 files changed, 102 insertions(+)
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1706,6 +1706,7 @@ int phy_package_join(struct phy_device *
shared->priv_size = priv_size;
}
shared->base_addr = base_addr;
+ shared->np = NULL;
refcount_set(&shared->refcnt, 1);
bus->shared[base_addr] = shared;
} else {
@@ -1729,6 +1730,63 @@ err_unlock:
EXPORT_SYMBOL_GPL(phy_package_join);
/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
* phy_package_leave - leave a common PHY group
* @phydev: target phy_device struct
*
@@ -1744,6 +1802,10 @@ void phy_package_leave(struct phy_device
if (!shared)
return;
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
@@ -1797,6 +1859,40 @@ int devm_phy_package_join(struct device
EXPORT_SYMBOL_GPL(devm_phy_package_join);
/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -329,6 +329,7 @@ struct mdio_bus_stats {
* struct phy_package_shared - Shared information in PHY packages
* @base_addr: Base PHY address of PHY package used to combine PHYs
* in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -340,6 +341,8 @@ struct mdio_bus_stats {
*/
struct phy_package_shared {
u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -1971,9 +1974,12 @@ int phy_ethtool_set_link_ksettings(struc
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);

View file

@ -0,0 +1,583 @@
From 737eb75a815f9c08dcbb6631db57f4f4b0540a5b Mon Sep 17 00:00:00 2001
From: Christian Marangi <ansuelsmth@gmail.com>
Date: Tue, 6 Feb 2024 18:31:07 +0100
Subject: [PATCH 04/10] net: phy: qcom: move more function to shared library
Move more function to shared library in preparation for introduction of
new PHY Family qca807x that will make use of both functions from at803x
and qca808x as it's a transition PHY with some implementation of at803x
and some from the new qca808x.
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
drivers/net/phy/qcom/at803x.c | 35 -----
drivers/net/phy/qcom/qca808x.c | 205 ----------------------------
drivers/net/phy/qcom/qcom-phy-lib.c | 193 ++++++++++++++++++++++++++
drivers/net/phy/qcom/qcom.h | 51 +++++++
4 files changed, 244 insertions(+), 240 deletions(-)
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -504,41 +504,6 @@ static void at803x_link_change_notify(st
}
}
-static int at803x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
-
- ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
- err = at803x_read_specific_status(phydev, ss_mask);
- if (err < 0)
- return err;
-
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
- phy_resolve_aneg_pause(phydev);
-
- return 0;
-}
-
static int at803x_config_aneg(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
--- a/drivers/net/phy/qcom/qca808x.c
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -2,7 +2,6 @@
#include <linux/phy.h>
#include <linux/module.h>
-#include <linux/ethtool_netlink.h>
#include "qcom.h"
@@ -63,55 +62,6 @@
#define QCA808X_DBG_AN_TEST 0xb
#define QCA808X_HIBERNATION_EN BIT(15)
-#define QCA808X_CDT_ENABLE_TEST BIT(15)
-#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
-#define QCA808X_CDT_STATUS BIT(11)
-#define QCA808X_CDT_LENGTH_UNIT BIT(10)
-
-#define QCA808X_MMD3_CDT_STATUS 0x8064
-#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
-#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
-#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
-#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
-
-#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
-#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
-#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
-#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-
-#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
-#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
-
-#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
-#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
-
-/* NORMAL are MDI with type set to 0 */
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-
-/* Added for reference of existence but should be handled by wait_for_completion already */
-#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
-
#define QCA808X_MMD7_LED_GLOBAL 0x8073
#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
@@ -406,86 +356,6 @@ static int qca808x_soft_reset(struct phy
return ret;
}
-static bool qca808x_cdt_fault_length_valid(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return true;
- default:
- return false;
- }
-}
-
-static int qca808x_cable_test_result_trans(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
- case QCA808X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
- int result)
-{
- int val;
- u32 cdt_length_reg = 0;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
- break;
- default:
- return -EINVAL;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
- if (val < 0)
- return val;
-
- if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
- else
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
-
- return at803x_cdt_fault_length(val);
-}
-
static int qca808x_cable_test_start(struct phy_device *phydev)
{
int ret;
@@ -526,81 +396,6 @@ static int qca808x_cable_test_start(stru
return 0;
}
-
-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
- u16 status)
-{
- int length, result;
- u16 pair_code;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
- break;
- default:
- return -EINVAL;
- }
-
- result = qca808x_cable_test_result_trans(pair_code);
- ethnl_cable_test_result(phydev, pair, result);
-
- if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair, result);
- ethnl_cable_test_fault_length(phydev, pair, length);
- }
-
- return 0;
-}
-
-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
-{
- int ret, val;
-
- *finished = false;
-
- val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
- if (val < 0)
- return val;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- if (ret)
- return ret;
-
- *finished = true;
-
- return 0;
-}
static int qca808x_get_features(struct phy_device *phydev)
{
--- a/drivers/net/phy/qcom/qcom-phy-lib.c
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
#include "qcom.h"
@@ -311,6 +312,42 @@ int at803x_prepare_config_aneg(struct ph
}
EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+int at803x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_status);
+
static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
{
int val;
@@ -427,3 +464,159 @@ int at803x_cdt_wait_for_completion(struc
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
+}
+
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+
+ *finished = false;
+
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
+
+ *finished = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
--- a/drivers/net/phy/qcom/qcom.h
+++ b/drivers/net/phy/qcom/qcom.h
@@ -54,6 +54,55 @@
#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
@@ -110,6 +159,7 @@ int at803x_read_specific_status(struct p
struct at803x_ss_mask ss_mask);
int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
int at803x_prepare_config_aneg(struct phy_device *phydev);
+int at803x_read_status(struct phy_device *phydev);
int at803x_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data);
int at803x_set_tunable(struct phy_device *phydev,
@@ -118,3 +168,4 @@ int at803x_cdt_fault_length(int dt);
int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
int at803x_cdt_wait_for_completion(struct phy_device *phydev,
u32 cdt_en);
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);

Some files were not shown because too many files have changed in this diff Show more