内存管理 · 2015-03-11 0

【Linux内存管理】伙伴管理算法(4)

此处承接前面未深入分析的页面释放部分,主要详细分析伙伴管理算法中页面释放的实现。页面释放的函数入口是__free_page(),其实则是一个宏定义。

具体实现:

【file:/include/linux/gfp.h】
#define __free_page(page) __free_pages((page), 0)

 

而__free_pages()的实现:

【file:/mm/page_alloc.c】
void __free_pages(struct page *page, unsigned int order)
{
    if (put_page_testzero(page)) {
        if (order == 0)
            free_hot_cold_page(page, 0);
        else
            __free_pages_ok(page, order);
    }
}

 

其中put_page_testzero()是对page结构的_count引用计数做原子减及测试,用于检查内存页面是否仍被使用,如果不再使用,则进行释放。其中order表示页面数量,如果释放的是单页,则会调用free_hot_cold_page()将页面释放至per-cpu page缓存中,而不是伙伴管理算法;真正的释放至伙伴管理算法的是__free_pages_ok(),同时也是用于多个页面释放的情况。

此处接着则由free_hot_cold_page()开始分析:

【file:/mm/page_alloc.c】
/*
 * Free a 0-order page
 * cold == 1 ? free a cold page : free a hot page
 */
void free_hot_cold_page(struct page *page, int cold)
{
    struct zone *zone = page_zone(page);
    struct per_cpu_pages *pcp;
    unsigned long flags;
    int migratetype;

    if (!free_pages_prepare(page, 0))
        return;

    migratetype = get_pageblock_migratetype(page);
    set_freepage_migratetype(page, migratetype);
    local_irq_save(flags);
    __count_vm_event(PGFREE);

    /*
     * We only track unmovable, reclaimable and movable on pcp lists.
     * Free ISOLATE pages back to the allocator because they are being
     * offlined but treat RESERVE as movable pages so we can get those
     * areas back if necessary. Otherwise, we may have to free
     * excessively into the page allocator
     */
    if (migratetype >= MIGRATE_PCPTYPES) {
        if (unlikely(is_migrate_isolate(migratetype))) {
            free_one_page(zone, page, 0, migratetype);
            goto out;
        }
        migratetype = MIGRATE_MOVABLE;
    }

    pcp = &this_cpu_ptr(zone->pageset)->pcp;
    if (cold)
        list_add_tail(&page->lru, &pcp->lists[migratetype]);
    else
        list_add(&page->lru, &pcp->lists[migratetype]);
    pcp->count++;
    if (pcp->count >= pcp->high) {
        unsigned long batch = ACCESS_ONCE(pcp->batch);
        free_pcppages_bulk(zone, batch, pcp);
        pcp->count -= batch;
    }

out:
    local_irq_restore(flags);
}

 

先看一下free_pages_prepare()的实现:

【file:/mm/page_alloc.c】
static bool free_pages_prepare(struct page *page, unsigned int order)
{
    int i;
    int bad = 0;

    trace_mm_page_free(page, order);
    kmemcheck_free_shadow(page, order);

    if (PageAnon(page))
        page->mapping = NULL;
    for (i = 0; i < (1 << order); i++)
        bad += free_pages_check(page + i);
    if (bad)
        return false;

    if (!PageHighMem(page)) {
        debug_check_no_locks_freed(page_address(page),
                       PAGE_SIZE << order);
        debug_check_no_obj_freed(page_address(page),
                       PAGE_SIZE << order);
    }
    arch_free_page(page, order);
    kernel_map_pages(page, 1 << order, 0);

    return true;
}

 

其中trace_mm_page_free()用于trace追踪机制;而kmemcheck_free_shadow()用于内存检测工具kmemcheck,如果未定义CONFIG_KMEMCHECK的情况下,它是一个空函数。接着后面的PageAnon()等都是用于检查页面状态的情况,以判断页面是否允许释放,避免错误释放页面。由此可知该函数主要作用是检查和调试。

接着回到free_hot_cold_page()函数中,get_pageblock_migratetype()和set_freepage_migratetype()分别是获取和设置页面的迁移类型,即设置到page->index;local_irq_save()和末尾的local_irq_restore()则用于保存恢复中断请求标识。

if (migratetype >= MIGRATE_PCPTYPES) {

if (unlikely(is_migrate_isolate(migratetype))) {

free_one_page(zone, page, 0, migratetype);

goto out;

}

migratetype = MIGRATE_MOVABLE;

}

这里面的MIGRATE_PCPTYPES用来表示每CPU页框高速缓存的数据结构中的链表的迁移类型数目,如果某个页面类型大于MIGRATE_PCPTYPES则表示其可挂到可移动列表中,如果迁移类型是MIGRATE_ISOLATE则直接将该其释放到伙伴管理算法中。

末尾部分:

pcp = &this_cpu_ptr(zone->pageset)->pcp;

if (cold)

list_add_tail(&page->lru, &pcp->lists[migratetype]);

else

list_add(&page->lru, &pcp->lists[migratetype]);

pcp->count++;

if (pcp->count >= pcp->high) {

unsigned long batch = ACCESS_ONCE(pcp->batch);

free_pcppages_bulk(zone, batch, pcp);

pcp->count -= batch;

}

其中pcp表示内存管理区的每CPU管理结构,cold表示冷热页面,如果是冷页就将其挂接到对应迁移类型的链表尾,而若是热页则挂接到对应迁移类型的链表头。其中if (pcp->count >= pcp->high)判断值得注意,其用于如果释放的页面超过了每CPU缓存的最大页面数时,则将其批量释放至伙伴管理算法中,其中批量数为pcp->batch。

具体分析一下释放至伙伴管理算法的实现free_pcppages_bulk():

【file:/mm/page_alloc.c】
/*
 * Frees a number of pages from the PCP lists
 * Assumes all pages on list are in same zone, and of same order.
 * count is the number of pages to free.
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
static void free_pcppages_bulk(struct zone *zone, int count,
                    struct per_cpu_pages *pcp)
{
    int migratetype = 0;
    int batch_free = 0;
    int to_free = count;

    spin_lock(&zone->lock);
    zone->pages_scanned = 0;

    while (to_free) {
        struct page *page;
        struct list_head *list;

        /*
         * Remove pages from lists in a round-robin fashion. A
         * batch_free count is maintained that is incremented when an
         * empty list is encountered.  This is so more pages are freed
         * off fuller lists instead of spinning excessively around empty
         * lists
         */
        do {
            batch_free++;
            if (++migratetype == MIGRATE_PCPTYPES)
                migratetype = 0;
            list = &pcp->lists[migratetype];
        } while (list_empty(list));

        /* This is the only non-empty list. Free them all. */
        if (batch_free == MIGRATE_PCPTYPES)
            batch_free = to_free;

        do {
            int mt;	/* migratetype of the to-be-freed page */

            page = list_entry(list->prev, struct page, lru);
            /* must delete as __free_one_page list manipulates */
            list_del(&page->lru);
            mt = get_freepage_migratetype(page);
            /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
            __free_one_page(page, zone, 0, mt);
            trace_mm_page_pcpu_drain(page, 0, mt);
            if (likely(!is_migrate_isolate_page(page))) {
                __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
                if (is_migrate_cma(mt))
                    __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
            }
        } while (--to_free && --batch_free && !list_empty(list));
    }
    spin_unlock(&zone->lock);
}

 

里面while大循环用于计数释放指定批量数的页面。其中释放方式是先自MIGRATE_UNMOVABLE迁移类型起(止于MIGRATE_PCPTYPES迁移类型),遍历各个链表统计其链表中页面数:

do {

batch_free++;

if (++migratetype == MIGRATE_PCPTYPES)

migratetype = 0;

list = &pcp->lists[migratetype];

} while (list_empty(list));

如果只有MIGRATE_PCPTYPES迁移类型的链表为非空链表,则全部页面将从该链表中释放。

后面的do{}while()里面,其先将页面从lru链表中去除,然后获取页面的迁移类型,通过__free_one_page()释放页面,最后使用__mod_zone_page_state()修改管理区的状态值。

着重分析一下__free_one_page()的实现:

【file:/mm/page_alloc.c】
/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.
 *
 * -- nyc
 */

static inline void __free_one_page(struct page *page,
        struct zone *zone, unsigned int order,
        int migratetype)
{
    unsigned long page_idx;
    unsigned long combined_idx;
    unsigned long uninitialized_var(buddy_idx);
    struct page *buddy;

    VM_BUG_ON(!zone_is_initialized(zone));

    if (unlikely(PageCompound(page)))
        if (unlikely(destroy_compound_page(page, order)))
            return;

    VM_BUG_ON(migratetype == -1);

    page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

    VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
    VM_BUG_ON_PAGE(bad_range(zone, page), page);

    while (order < MAX_ORDER-1) {
        buddy_idx = __find_buddy_index(page_idx, order);
        buddy = page + (buddy_idx - page_idx);
        if (!page_is_buddy(page, buddy, order))
            break;
        /*
         * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
         * merge with it and move up one order.
         */
        if (page_is_guard(buddy)) {
            clear_page_guard_flag(buddy);
            set_page_private(page, 0);
            __mod_zone_freepage_state(zone, 1 << order,
                          migratetype);
        } else {
            list_del(&buddy->lru);
            zone->free_area[order].nr_free--;
            rmv_page_order(buddy);
        }
        combined_idx = buddy_idx & page_idx;
        page = page + (combined_idx - page_idx);
        page_idx = combined_idx;
        order++;
    }
    set_page_order(page, order);

    /*
     * If this is not the largest possible page, check if the buddy
     * of the next-highest order is free. If it is, it's possible
     * that pages are being freed that will coalesce soon. In case,
     * that is happening, add the free page to the tail of the list
     * so it's less likely to be used soon and more likely to be merged
     * as a higher order page
     */
    if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
        struct page *higher_page, *higher_buddy;
        combined_idx = buddy_idx & page_idx;
        higher_page = page + (combined_idx - page_idx);
        buddy_idx = __find_buddy_index(combined_idx, order + 1);
        higher_buddy = higher_page + (buddy_idx - combined_idx);
        if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
            list_add_tail(&page->lru,
                &zone->free_area[order].free_list[migratetype]);
            goto out;
        }
    }

    list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
    zone->free_area[order].nr_free++;
}

 

于while (order < MAX_ORDER-1)前面主要是对释放的页面进行检查校验操作。而while循环内,通过__find_buddy_index()获取与当前释放的页面处于同一阶的伙伴页面索引值,同时藉此索引值计算出伙伴页面地址,并做伙伴页面检查以确定其是否可以合并,若否则退出;接着if (page_is_guard(buddy))用于对页面的debug_flags成员做检查,由于未配置CONFIG_DEBUG_PAGEALLOC,page_is_guard()固定返回false;则剩下的操作主要就是将页面从分配链中摘除,同时将页面合并并将其处于的阶提升一级。

退出while循环后,通过set_page_order()设置页面最终可合并成为的管理阶。最后判断当前合并的页面是否为最大阶,否则将页面放至伙伴管理链表的末尾,避免其过早被分配,得以机会进一步与高阶页面进行合并。末了,将最后的挂入的阶的空闲计数加1。

至此伙伴管理算法的页面释放完毕。

而__free_pages_ok()的页面释放实现调用栈则是:

__free_pages_ok()

—>free_one_page()

—>__free_one_page()

殊途同归,最终还是__free_one_page()来释放,具体的过程就不再仔细分析了。

【篇外小记】

trace_mm_page_free()具体实现位置:

【file:/include/trace/event/kmem.h】
TRACE_EVENT(mm_page_free,

    TP_PROTO(struct page *page, unsigned int order),

    TP_ARGS(page, order),

    TP_STRUCT__entry(
        __field(	struct page *,	page		)
        __field(	unsigned int,	order		)
    ),

    TP_fast_assign(
        __entry->page		= page;
        __entry->order		= order;
    ),

    TP_printk("page=%p pfn=%lu order=%d",
            __entry->page,
            page_to_pfn(__entry->page),
            __entry->order)
);

 

其TRACE_EVENT()是一个宏,具体实现:

【file:/include/linux/tracepoint.h】
#define TRACE_EVENT(name, proto, args, struct, assign, print)	\
    DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))

 

继而查找DECLARE_TRACE()宏定义:

【file:/include/linux/tracepoint.h】
#define DECLARE_TRACE(name, proto, args)				\
        __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,	\
                PARAMS(void *__data, proto),		\
                PARAMS(__data, args))

 

最后由__DECLARE_TRACE()宏展开:

【file:/include/linux/tracepoint.h】
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
    extern struct tracepoint __tracepoint_##name;			\
    static inline void trace_##name(proto)				\
    {								\
        if (static_key_false(&__tracepoint_##name.key))		\
            __DO_TRACE(&__tracepoint_##name,		\
                TP_PROTO(data_proto),			\
                TP_ARGS(data_args),			\
                TP_CONDITION(cond),,);			\
    }								\
    __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args),		\
        PARAMS(cond), PARAMS(data_proto), PARAMS(data_args))	\
    static inline int						\
    register_trace_##name(void (*probe)(data_proto), void *data)	\
    {								\
        return tracepoint_probe_register(#name, (void *)probe,	\
                         data);			\
    }								\
    static inline int						\
    unregister_trace_##name(void (*probe)(data_proto), void *data)	\
    {								\
        return tracepoint_probe_unregister(#name, (void *)probe, \
                           data);		\
    }								\
    static inline void						\
    check_trace_callback_type_##name(void (*cb)(data_proto))	\
    {								\
    }

 

在C语言中,宏里面的双井号“##”被称为连接符,是一种预处理运算符,用于把两个语言符号连接组合成单个语言符号。于是乎,trace和name串起来则会成为trace_mm_page_free。类似这样的定义还特别多,大部分trace函数都是这么来的。值得注意的是__DECLARE_TRACE()不仅仅是定义实现了trace函数,同时还定义实现了trace函数的注册及去注册。

诸如此函数的还有trace_mm_page_pcpu_drain等函数。