Release 4.17 lib/sort.c
// SPDX-License-Identifier: GPL-2.0
/*
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
*
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
static int alignment_ok(const void *base, int align)
{
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
((unsigned long)base & (align - 1)) == 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Wagner | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static void u32_swap(void *a, void *b, int size)
{
u32 t = *(u32 *)a;
*(u32 *)a = *(u32 *)b;
*(u32 *)b = t;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 49 | 98.00% | 1 | 50.00% |
Adrian Bunk | 1 | 2.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static void u64_swap(void *a, void *b, int size)
{
u64 t = *(u64 *)a;
*(u64 *)a = *(u64 *)b;
*(u64 *)b = t;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Wagner | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
static void generic_swap(void *a, void *b, int size)
{
char t;
do {
t = *(char *)a;
*(char *)a++ = *(char *)b;
*(char *)b++ = t;
} while (--size > 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 64 | 98.46% | 1 | 50.00% |
Adrian Bunk | 1 | 1.54% | 1 | 50.00% |
Total | 65 | 100.00% | 2 | 100.00% |
/**
* sort - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
* swap_func function optimized to your element type.
*
* Sorting time is O(n log n) both on average and worst-case. While
* qsort is about 20% faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
void sort(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *),
void (*swap_func)(void *, void *, int size))
{
/* pre-scale counters for performance */
int i = (num/2 - 1) * size, n = num * size, c, r;
if (!swap_func) {
if (size == 4 && alignment_ok(base, 4))
swap_func = u32_swap;
else if (size == 8 && alignment_ok(base, 8))
swap_func = u64_swap;
else
swap_func = generic_swap;
}
/* heapify */
for ( ; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
if (c < n - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i > 0; i -= size) {
swap_func(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
if (c < i - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 272 | 82.67% | 1 | 20.00% |
Daniel Wagner | 36 | 10.94% | 1 | 20.00% |
keios | 10 | 3.04% | 1 | 20.00% |
Fengguang Wu | 10 | 3.04% | 1 | 20.00% |
Subbaiah Venkata | 1 | 0.30% | 1 | 20.00% |
Total | 329 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(sort);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Mackall | 395 | 71.30% | 1 | 11.11% |
Daniel Wagner | 122 | 22.02% | 1 | 11.11% |
Fengguang Wu | 11 | 1.99% | 1 | 11.11% |
keios | 10 | 1.81% | 1 | 11.11% |
Kostenzer Felix | 7 | 1.26% | 1 | 11.11% |
Adrian Bunk | 5 | 0.90% | 1 | 11.11% |
Rasmus Villemoes | 2 | 0.36% | 1 | 11.11% |
Greg Kroah-Hartman | 1 | 0.18% | 1 | 11.11% |
Subbaiah Venkata | 1 | 0.18% | 1 | 11.11% |
Total | 554 | 100.00% | 9 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.