Skip to content

Instantly share code, notes, and snippets.

@laoar
Created August 11, 2020 10:48
Show Gist options
  • Save laoar/08824b6356b54c5fa9cede8a886e204b to your computer and use it in GitHub Desktop.
Save laoar/08824b6356b54c5fa9cede8a886e204b to your computer and use it in GitHub Desktop.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9b3c5df75..16fc88b3f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -416,6 +416,8 @@ struct address_space {
unsigned long nrpages; /* number of total pages */
/* number of shadow or DAX exceptional entries */
unsigned long nrexceptional;
+ unsigned long nractive;
+ unsigned long nrinactive;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits */
@@ -553,6 +555,15 @@ static inline void mapping_allow_writable(struct address_space *mapping)
atomic_inc(&mapping->i_mmap_writable);
}
+static inline void mapping_update_lru_size(struct address_space *mapping,
+ enum lru_list lru,
+ long nr)
+{
+ if (lru == LRU_ACTIVE_FILE)
+ mapping->nractive += nr;
+ else if (lru == LRU_INACTIVE_FILE)
+ mapping->nrinactive += nr;
+}
/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 10191c28f..08f05488e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -47,22 +47,34 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ unsigned long nr = hpage_nr_pages(page);
+
+ if (page->mapping)
+ mapping_update_lru_size(page->mapping, lru, nr);
+ update_lru_size(lruvec, lru, page_zonenum(page), nr);
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ unsigned long nr = hpage_nr_pages(page);
+
+ if (page->mapping)
+ mapping_update_lru_size(page->mapping, lru, nr);
+ update_lru_size(lruvec, lru, page_zonenum(page), nr);
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
+ unsigned long nr = hpage_nr_pages(page);
+
+ if (page->mapping)
+ mapping_update_lru_size(page->mapping, lru, -nr);
list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), -nr);
}
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8e36afbb3..17e006d07 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1689,6 +1689,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst);
+ if (page->mapping)
+ mapping_update_lru_size(page->mapping, lru, -nr_pages);
break;
case -EBUSY:
@@ -2036,6 +2038,8 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
SetPageLRU(page);
nr_pages = hpage_nr_pages(page);
+ if (page->mapping)
+ mapping_update_lru_size(page->mapping, lru, nr_pages);
update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment