aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch')
-rw-r--r--target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch62
1 files changed, 62 insertions, 0 deletions
diff --git a/target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch b/target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch
new file mode 100644
index 000000000..aec689108
--- /dev/null
+++ b/target/linux/goldfish/patches-2.6.30/0055-mm-Add-min_free_order_shift-tunable.patch
@@ -0,0 +1,62 @@
+From d620f695290e4ffb1586420ba1dbbb5b2c8c075d Mon Sep 17 00:00:00 2001
+From: =?utf-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com>
+Date: Tue, 17 Feb 2009 14:51:02 -0800
+Subject: [PATCH 055/134] mm: Add min_free_order_shift tunable.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf-8
+Content-Transfer-Encoding: 8bit
+
+By default the kernel tries to keep half as much memory free at each
+order as it does for one order below. This can be too agressive when
+running without swap.
+
+Signed-off-by: Arve Hjønnevåg <arve@android.com>
+---
+ kernel/sysctl.c | 9 +++++++++
+ mm/page_alloc.c | 3 ++-
+ 2 files changed, 11 insertions(+), 1 deletions(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -77,6 +77,7 @@ extern int suid_dumpable;
+ extern char core_pattern[];
+ extern int pid_max;
+ extern int min_free_kbytes;
++extern int min_free_order_shift;
+ extern int pid_max_min, pid_max_max;
+ extern int sysctl_drop_caches;
+ extern int percpu_pagelist_fraction;
+@@ -1138,6 +1139,14 @@ static struct ctl_table vm_table[] = {
+ .extra1 = &zero,
+ },
+ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "min_free_order_shift",
++ .data = &min_free_order_shift,
++ .maxlen = sizeof(min_free_order_shift),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
+ .ctl_name = VM_PERCPU_PAGELIST_FRACTION,
+ .procname = "percpu_pagelist_fraction",
+ .data = &percpu_pagelist_fraction,
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -119,6 +119,7 @@ static char * const zone_names[MAX_NR_ZO
+ };
+
+ int min_free_kbytes = 1024;
++int min_free_order_shift = 1;
+
+ unsigned long __meminitdata nr_kernel_pages;
+ unsigned long __meminitdata nr_all_pages;
+@@ -1258,7 +1259,7 @@ int zone_watermark_ok(struct zone *z, in
+ free_pages -= z->free_area[o].nr_free << o;
+
+ /* Require fewer higher order pages to be free */
+- min >>= 1;
++ min >>= min_free_order_shift;
+
+ if (free_pages <= min)
+ return 0;