--- zzzz-none-000/linux-2.6.28.10/mm/truncate.c 2009-05-02 18:54:43.000000000 +0000 +++ puma5-6360-529/linux-2.6.28.10/mm/truncate.c 2010-04-07 09:56:53.000000000 +0000 @@ -314,7 +314,66 @@ } return ret; } +#ifdef CONFIG_FUSIV_USB_OPTIMIZATION +/* Dervied from __invalidate_mapping_pages. Maximum 32 pages can be validated + and index is stored back */ +unsigned long fusiv_invalidate_mapping_pages(struct address_space *mapping,pgoff_t end) +{ + struct pagevec pvec; + pgoff_t next; + unsigned long ret = 0,error=0; + struct page *page; + int i; + next = mapping->page_tree.last_reclaim_index; + pagevec_init(&pvec, 0); + while (next <= end && (ret < 32 ) && (!error)&& + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + for (i = 0; i < pagevec_count(&pvec); i++) { + pgoff_t index; + page = pvec.pages[i]; + + if ( !trylock_page(page)) { + error =1; + break; + } + + /* + * We really shouldn't be looking at the ->index of an + * unlocked page. But we're not allowed to lock these + * pages. So we rely upon nobody altering the ->index + * of this (pinned-by-us) page. + */ + index = page->index; + if (index > next) + next = index; + next++; + + if (!(PageDirty(page) || PageWriteback(page)|| + (page_mapped(page)))) { + ret += invalidate_complete_page(mapping, page); + } + else error=1; + unlock_page(page); + if ((next > end) || error || (ret >=32 ) ) + break; + } + pagevec_release(&pvec); + } + if ( !ret ) { // No page could be free up... it could be due to wrong reclaim index + if ( !error ) { + if (!(page = find_get_page(mapping,mapping->page_tree.last_reclaim_index))) + mapping->page_tree.last_reclaim_index=0; // start from 0 offset + else + page_cache_release(page); // free up reference + } + } + else + mapping->page_tree.last_reclaim_index=next; + return ret; +} +EXPORT_SYMBOL(fusiv_invalidate_mapping_pages); +#endif /** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate