for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
- bool rsv_on_error;
u32 hash;
/*
* cache (remove_huge_page) BEFORE removing the
* region/reserve map (hugetlb_unreserve_pages). In
* rare out of memory conditions, removal of the
- * region/reserve map could fail. Before free'ing
- * the page, note PagePrivate which is used in case
- * of error.
+ * region/reserve map could fail. Correspondingly,
+ * the subpool and global reserve usage count can need
+ * to be adjusted.
*/
- rsv_on_error = !PagePrivate(page);
+ VM_BUG_ON(PagePrivate(page));
remove_huge_page(page);
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1)))
- hugetlb_fix_reserve_counts(inode,
- rsv_on_error);
+ hugetlb_fix_reserve_counts(inode);
}
unlock_page(page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
struct vm_area_struct *vma,
* appear as a "reserved" entry instead of simply dangling with incorrect
* counts.
*/
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
+void hugetlb_fix_reserve_counts(struct inode *inode)
{
struct hugepage_subpool *spool = subpool_inode(inode);
long rsv_adjust;
rsv_adjust = hugepage_subpool_get_pages(spool, 1);
- if (restore_reserve && rsv_adjust) {
+ if (rsv_adjust) {
struct hstate *h = hstate_inode(inode);
hugetlb_acct_memory(h, 1);