mirror of
https://github.com/Evolution-X-Devices/kernel_google_b1c1
synced 2026-01-27 18:24:29 +00:00
This patchset brings some performance improvements and the addition of the LZO-RLE
algorithm to the kernel, also usable in zram (yup, tested, works but LZ4 is still ok for us).
The main performance improvement is for SWAP space: the locking has changed and
the swap cache is now split in 64MB trunks.
This gives us a reduction of the median page fault latency of 375%, from 15uS to 4uS,
and an improvement of 192% on the swap throughput (this includes "virtual" swap
devices, like zRAM!). The real world user experience improvement of this on a mobile
device is seen after a day or two of usage, where it usually starts losing just a little
performance due to the large amount of apps kept open in background: now I cannot
notice any more performance loss and the user experience is now basically the same as
if the phone was in its first 2 hours of boot life.
Other performance improvements include, in short:
UDP v4/v6: 10% more performance on single RX queue
Userspace applications will be faster when checking running time of threads
2-5% improvements on heavy multipliers (yeah, not a lot, but was totally free...)
Improvements on rare conditions during sparsetruncate of about 0.3% to a
way more rare around 20% improvement (that's never gonna happen, but there
is no performance drop anywhere).
Tested on SoMC Tama Akatsuki RoW
This was taken from
Repo:
https://github.com/sonyxperiadev/kernel
PR: 2039 ([2.3.2.r1.4] Performance improvements)
31 lines
801 B
C
31 lines
801 B
C
#ifndef _LINUX_SWAP_SLOTS_H
|
|
#define _LINUX_SWAP_SLOTS_H
|
|
|
|
#include <linux/swap.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/mutex.h>
|
|
|
|
#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH
|
|
#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE)
|
|
#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE)
|
|
|
|
struct swap_slots_cache {
|
|
bool lock_initialized;
|
|
struct mutex alloc_lock; /* protects slots, nr, cur */
|
|
swp_entry_t *slots;
|
|
int nr;
|
|
int cur;
|
|
spinlock_t free_lock; /* protects slots_ret, n_ret */
|
|
swp_entry_t *slots_ret;
|
|
int n_ret;
|
|
};
|
|
|
|
void disable_swap_slots_cache_lock(void);
|
|
void reenable_swap_slots_cache_unlock(void);
|
|
int enable_swap_slots_cache(void);
|
|
int free_swap_slot(swp_entry_t entry);
|
|
|
|
extern bool swap_slot_cache_enabled;
|
|
|
|
#endif /* _LINUX_SWAP_SLOTS_H */
|