-
Notifications
You must be signed in to change notification settings - Fork 108
[Deepin-Kernel-SIG] [linux 6.6-y] [Upstream] Add folio_end_read #690
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: linux-6.6.y
Are you sure you want to change the base?
Changes from all commits
1851613
f68e48b
6894f96
efe1f38
a5fd89b
ed77f40
409e640
274dc49
b04f8fd
5576c83
daef225
0efb534
2006515
5361dfc
1d8d227
b30a671
b426260
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -73,7 +73,8 @@ int __mips_test_and_clear_bit(unsigned long nr, | |
| volatile unsigned long *addr); | ||
| int __mips_test_and_change_bit(unsigned long nr, | ||
| volatile unsigned long *addr); | ||
|
|
||
| bool __mips_xor_is_negative_byte(unsigned long mask, | ||
| volatile unsigned long *addr); | ||
|
|
||
| /* | ||
| * set_bit - Atomically set a bit in memory | ||
|
|
@@ -279,6 +280,28 @@ static inline int test_and_change_bit(unsigned long nr, | |
| return res; | ||
| } | ||
|
|
||
| static inline bool xor_unlock_is_negative_byte(unsigned long mask, | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue (complexity): Consider factoring out the conditional and inline assembly duplication into a helper function to improve code reuse and readability. Consider factoring out the conditional and inline assembly duplication into a helper that picks the “mask” at a single point. For example:
```c
static inline unsigned long xor_unlock_atomic(volatile unsigned long *p,
unsigned long effective_mask)
{
unsigned long orig;
if (!kernel_uses_llsc)
orig = __mips_xor_is_negative_byte(effective_mask, p);
else
orig = __test_bit_op(*p, "%0", "xor\t%1, %0, %3", "ir"(effective_mask));
return orig;
}
static inline bool xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *p)
{
unsigned long eff_mask = kernel_uses_llsc ? BIT(7) : mask;
unsigned long orig;
smp_mb__before_atomic();
orig = xor_unlock_atomic(p, eff_mask);
smp_llsc_mb();
return (orig & eff_mask) != 0;
}This consolidates the conditional logic and reduces duplicate inline assembly while retaining original functionality. |
||
| volatile unsigned long *p) | ||
| { | ||
| unsigned long orig; | ||
| bool res; | ||
|
|
||
| smp_mb__before_atomic(); | ||
|
|
||
| if (!kernel_uses_llsc) { | ||
| res = __mips_xor_is_negative_byte(mask, p); | ||
| } else { | ||
| orig = __test_bit_op(*p, "%0", | ||
| "xor\t%1, %0, %3", | ||
| "ir"(mask)); | ||
| res = (orig & BIT(7)) != 0; | ||
| } | ||
|
|
||
| smp_llsc_mb(); | ||
|
|
||
| return res; | ||
| } | ||
|
|
||
| #undef __bit_op | ||
| #undef __test_bit_op | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||
|---|---|---|---|---|---|---|---|---|
|
|
@@ -146,3 +146,17 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | |||||||
| return res; | ||||||||
| } | ||||||||
| EXPORT_SYMBOL(__mips_test_and_change_bit); | ||||||||
|
|
||||||||
| bool __mips_xor_is_negative_byte(unsigned long mask, | ||||||||
| volatile unsigned long *addr) | ||||||||
| { | ||||||||
| unsigned long flags; | ||||||||
| unsigned long data; | ||||||||
|
|
||||||||
| raw_local_irq_save(flags); | ||||||||
| data = *addr; | ||||||||
| *addr = data ^ mask; | ||||||||
| raw_local_irq_restore(flags); | ||||||||
|
|
||||||||
| return (data & BIT(7)) != 0; | ||||||||
| } | ||||||||
|
||||||||
| } | |
| } | |
| EXPORT_SYMBOL(__mips_xor_is_negative_byte); |
| Original file line number | Diff line number | Diff line change | ||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -70,15 +70,8 @@ static void __read_end_io(struct bio *bio) | |||||||||||
| { | ||||||||||||
| struct folio_iter fi; | ||||||||||||
|
|
||||||||||||
| bio_for_each_folio_all(fi, bio) { | ||||||||||||
| struct folio *folio = fi.folio; | ||||||||||||
|
|
||||||||||||
| if (bio->bi_status) | ||||||||||||
| folio_clear_uptodate(folio); | ||||||||||||
| else | ||||||||||||
| folio_mark_uptodate(folio); | ||||||||||||
| folio_unlock(folio); | ||||||||||||
| } | ||||||||||||
| bio_for_each_folio_all(fi, bio) | ||||||||||||
| folio_end_read(fi.folio, bio->bi_status == 0); | ||||||||||||
| if (bio->bi_private) | ||||||||||||
| mempool_free(bio->bi_private, bio_post_read_ctx_pool); | ||||||||||||
| bio_put(bio); | ||||||||||||
|
|
@@ -335,8 +328,7 @@ int ext4_mpage_readpages(struct inode *inode, | |||||||||||
| if (ext4_need_verity(inode, folio->index) && | ||||||||||||
| !fsverity_verify_folio(folio)) | ||||||||||||
| goto set_error_page; | ||||||||||||
| folio_mark_uptodate(folio); | ||||||||||||
| folio_unlock(folio); | ||||||||||||
| folio_end_read(folio, true); | ||||||||||||
|
||||||||||||
| folio_end_read(folio, true); | |
| if (!folio_test_uptodate(folio)) | |
| folio_end_read(folio, true); | |
| else | |
| folio_unlock(folio); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
issue (complexity): Consider using GNU atomic builtins to replace the inline assembly loop with a single atomic XOR operation, if the platform supports it, to reduce complexity and duplicate logic while preserving functionality .
Consider using the GNU atomic builtins to simplify the inline assembly loop. For example, if the platform supports __atomic builtins, you can replace the inline assembly with a single atomic XOR operation:
This preserves the functionality while reducing the low-level complexity and duplicate logic.