xen/tmem: Don't over-write tmem_frontswap_poolid after tmem_frontswap_init set it.
[linux-2.6-microblaze.git] / drivers / xen / tmem.c
index e3600be..0f0493c 100644 (file)
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/cleancache.h>
-
-/* temporary ifdef until include/linux/frontswap.h is upstream */
-#ifdef CONFIG_FRONTSWAP
 #include <linux/frontswap.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 #include <xen/tmem.h>
 
+#ifndef CONFIG_XEN_TMEM_MODULE
+bool __read_mostly tmem_enabled = false;
+
+static int __init enable_tmem(char *s)
+{
+       tmem_enabled = true;
+       return 1;
+}
+__setup("tmem", enable_tmem);
+#endif
+
+#ifdef CONFIG_CLEANCACHE
+static bool cleancache __read_mostly = true;
+module_param(cleancache, bool, S_IRUGO);
+static bool selfballooning __read_mostly = true;
+module_param(selfballooning, bool, S_IRUGO);
+#endif /* CONFIG_CLEANCACHE */
+
+#ifdef CONFIG_FRONTSWAP
+static bool frontswap __read_mostly = true;
+module_param(frontswap, bool, S_IRUGO);
+#else /* CONFIG_FRONTSWAP */
+#define frontswap (0)
+#endif /* CONFIG_FRONTSWAP */
+
+#ifdef CONFIG_XEN_SELFBALLOONING
+static bool selfshrinking __read_mostly = true;
+module_param(selfshrinking, bool, S_IRUGO);
+#endif /* CONFIG_XEN_SELFBALLOONING */
+
 #define TMEM_CONTROL               0
 #define TMEM_NEW_POOL              1
 #define TMEM_DESTROY_POOL          2
@@ -129,16 +155,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
        return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
 }
 
-#ifndef CONFIG_XEN_TMEM_MODULE
-bool __read_mostly tmem_enabled = false;
-
-static int __init enable_tmem(char *s)
-{
-       tmem_enabled = true;
-       return 1;
-}
-__setup("tmem", enable_tmem);
-#endif
 
 #ifdef CONFIG_CLEANCACHE
 static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +246,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
-static bool disable_cleancache __read_mostly;
-static bool disable_selfballooning __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_selfballooning, bool, S_IRUGO);
-#else
-static int __init no_cleancache(char *s)
-{
-       disable_cleancache = true;
-       return 1;
-}
-__setup("nocleancache", no_cleancache);
-#endif
-
 static struct cleancache_ops tmem_cleancache_ops = {
        .put_page = tmem_cleancache_put_page,
        .get_page = tmem_cleancache_get_page,
@@ -361,20 +363,6 @@ static void tmem_frontswap_init(unsigned ignored)
                    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
 }
 
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_selfshrinking __read_mostly;
-#ifdef CONFIG_XEN_TMEM_MODULE
-module_param(disable_frontswap, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
-#else
-static int __init no_frontswap(char *s)
-{
-       disable_frontswap = true;
-       return 1;
-}
-__setup("nofrontswap", no_frontswap);
-#endif
-
 static struct frontswap_ops tmem_frontswap_ops = {
        .store = tmem_frontswap_store,
        .load = tmem_frontswap_load,
@@ -382,8 +370,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
        .invalidate_area = tmem_frontswap_flush_area,
        .init = tmem_frontswap_init
 };
-#else  /* CONFIG_FRONTSWAP */
-#define disable_frontswap_selfshrinking 1
 #endif
 
 static int xen_tmem_init(void)
@@ -391,12 +377,12 @@ static int xen_tmem_init(void)
        if (!xen_domain())
                return 0;
 #ifdef CONFIG_FRONTSWAP
-       if (tmem_enabled && !disable_frontswap) {
+       if (tmem_enabled && frontswap) {
                char *s = "";
-               struct frontswap_ops *old_ops =
-                       frontswap_register_ops(&tmem_frontswap_ops);
+               struct frontswap_ops *old_ops;
 
                tmem_frontswap_poolid = -1;
+               old_ops = frontswap_register_ops(&tmem_frontswap_ops);
                if (IS_ERR(old_ops) || old_ops) {
                        if (IS_ERR(old_ops))
                                return PTR_ERR(old_ops);
@@ -408,7 +394,7 @@ static int xen_tmem_init(void)
 #endif
 #ifdef CONFIG_CLEANCACHE
        BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
-       if (tmem_enabled && !disable_cleancache) {
+       if (tmem_enabled && cleancache) {
                char *s = "";
                struct cleancache_ops *old_ops =
                        cleancache_register_ops(&tmem_cleancache_ops);
@@ -419,8 +405,15 @@ static int xen_tmem_init(void)
        }
 #endif
 #ifdef CONFIG_XEN_SELFBALLOONING
-       xen_selfballoon_init(!disable_selfballooning,
-                               !disable_frontswap_selfshrinking);
+       /*
+        * There is no point of driving pages to the swap system if they
+        * aren't going anywhere in tmem universe.
+        */
+       if (!frontswap) {
+               selfshrinking = false;
+               selfballooning = false;
+       }
+       xen_selfballoon_init(selfballooning, selfshrinking);
 #endif
        return 0;
 }