Release 4.15 lib/list_sort.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/list_sort.h>
#include <linux/list.h>
#define MAX_LIST_LENGTH_BITS 20
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
static struct list_head *merge(void *priv,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b),
struct list_head *a, struct list_head *b)
{
struct list_head head, *tail = &head;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
if ((*cmp)(priv, a, b) <= 0) {
tail->next = a;
a = a->next;
} else {
tail->next = b;
b = b->next;
}
tail = tail->next;
}
tail->next = a?:b;
return head.next;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Chinner | 79 | 62.20% | 1 | 50.00% |
Don Mullis | 48 | 37.80% | 1 | 50.00% |
Total | 127 | 100.00% | 2 | 100.00% |
/*
* Combine final list merge with restoration of standard doubly-linked
* list structure. This approach duplicates code from merge(), but
* runs faster than the tidier alternatives of either a separate final
* prev-link restoration pass, or maintaining the prev links
* throughout.
*/
static void merge_and_restore_back_links(void *priv,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b),
struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
u8 count = 0;
while (a && b) {
/* if equal, take 'a' -- important for sort stability */
if ((*cmp)(priv, a, b) <= 0) {
tail->next = a;
a->prev = tail;
a = a->next;
} else {
tail->next = b;
b->prev = tail;
b = b->next;
}
tail = tail->next;
}
tail->next = a ? : b;
do {
/*
* In worst cases this loop may run many iterations.
* Continue callbacks to the client even though no
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
if (unlikely(!(++count)))
(*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
} while (tail->next);
tail->next = head;
head->prev = tail;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Don Mullis | 187 | 92.12% | 2 | 66.67% |
Rasmus Villemoes | 16 | 7.88% | 1 | 33.33% |
Total | 203 | 100.00% | 3 | 100.00% |
/**
* list_sort - sort a list
* @priv: private data, opaque to list_sort(), passed to @cmp
* @head: the list to sort
* @cmp: the elements comparison function
*
* This function implements "merge sort", which has O(nlog(n))
* complexity.
*
* The comparison function @cmp must return a negative value if @a
* should sort before @b, and a positive value if @a should sort after
* @b. If @a and @b are equivalent, and their original relative
* ordering is to be preserved, @cmp must return 0.
*/
void list_sort(void *priv, struct list_head *head,
int (*cmp)(void *priv, struct list_head *a,
struct list_head *b))
{
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
-- last slot is a sentinel */
int lev; /* index into part[] */
int max_lev = 0;
struct list_head *list;
if (list_empty(head))
return;
memset(part, 0, sizeof(part));
head->prev->next = NULL;
list = head->next;
while (list) {
struct list_head *cur = list;
list = list->next;
cur->next = NULL;
for (lev = 0; part[lev]; lev++) {
cur = merge(priv, cmp, part[lev], cur);
part[lev] = NULL;
}
if (lev > max_lev) {
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
printk_once(KERN_DEBUG "list too long for efficiency\n");
lev--;
}
max_lev = lev;
}
part[lev] = cur;
}
for (lev = 0; lev < max_lev; lev++)
if (part[lev])
list = merge(priv, cmp, part[lev], list);
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Don Mullis | 253 | 99.61% | 1 | 50.00% |
Andrew Morton | 1 | 0.39% | 1 | 50.00% |
Total | 254 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(list_sort);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Don Mullis | 500 | 80.91% | 3 | 37.50% |
David Chinner | 91 | 14.72% | 1 | 12.50% |
Rasmus Villemoes | 25 | 4.05% | 2 | 25.00% |
Andrew Morton | 1 | 0.16% | 1 | 12.50% |
Greg Kroah-Hartman | 1 | 0.16% | 1 | 12.50% |
Total | 618 | 100.00% | 8 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.