本文共 2353 字,大约阅读时间需要 7 分钟。
boolzone_watermark_ok_safe(struct zone *z, unsigned int order,
unsigned long mark, intclasszone_idx)
{
long free_pages = zone_page_state(z,NR_FREE_PAGES);
if (z->percpu_drift_mark &&free_pages < z->percpu_drift_mark)
free_pages = zone_page_state_snapshot(z,NR_FREE_PAGES);
return__zone_watermark_ok(z, order, mark, classzone_idx, 0,
free_pages);
}
boolzone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, unsigned intalloc_flags)
{
return __zone_watermark_ok(z, order,mark, classzone_idx, alloc_flags,
zone_page_state(z,NR_FREE_PAGES));
}
static inlinebool zone_watermark_fast(struct zone *z, unsigned int order,
unsigned long mark, intclasszone_idx, unsigned int alloc_flags)
{
long free_pages = zone_page_state(z,NR_FREE_PAGES);
long cma_pages = 0;
#ifdef CONFIG_CMA
/* If allocation can't use CMA areasdon't use free CMA pages */
if (!(alloc_flags & ALLOC_CMA))
cma_pages = zone_page_state(z,NR_FREE_CMA_PAGES);
#endif
if (!order && (free_pages -cma_pages) > mark + z->lowmem_reserve[classzone_idx])
return true;
return__zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
free_pages);
}
bool __zone_watermark_ok(struct zone *z, unsigned intorder, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
long free_pages)
{
long min = mark;
int o;
const bool alloc_harder = (alloc_flags& ALLOC_HARDER);
/* free_pages may go negative - that's OK*/
free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
min -= min / 2;
if (likely(!alloc_harder))
free_pages -=z->nr_reserved_highatomic;
else
min -= min / 4;
#ifdef CONFIG_CMA
/* If allocation can't use CMA areasdon't use free CMA pages */
if (!(alloc_flags & ALLOC_CMA))
free_pages -= zone_page_state(z,NR_FREE_CMA_PAGES);
#endif
if (free_pages <= min +z->lowmem_reserve[classzone_idx])
return false;
/* If this is an order-0 request then thewatermark is fine */
if (!order)
return true;
/* For a high-order request, check atleast one suitable page is free */
for (o = order; o <MAX_ORDER; o++) {
struct free_area *area = &z->free_area[o];
int mt;
if (!area->nr_free)
continue;
if (alloc_harder)
return true;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if(!list_empty(&area->free_list[mt]))
returntrue;
}
#ifdef CONFIG_CMA
if((alloc_flags&ALLOC_CMA)&&
!list_empty(&area->free_list[MIGRATE_CMA])){
return true;
}
#endif
}
return false;
}
转载地址:http://baqti.baihongyu.com/