From 77801ff0202e96fa3a22bdfbdb6a82641f3b179b Mon Sep 17 00:00:00 2001 From: Bobby Bingham Date: Mon, 18 Aug 2014 22:59:59 -0500 Subject: Optimize rotate Previously, we used three rotations. This was simple, but was aggressively inlined by gcc at higher optimization levels, massively bloating the code. Now, we take the front of the array, which is to be rotated to the end, and using a series of swaps, move it progressively closer to the end. When we can no longer perform a swap of the required size because we're too close to the end, we view the remaining operation as a rotate to the right with a smaller shift size. We repeat as necessary. This generates somewhat smaller code than the three reverses, even at -Os. It also shows up to a 25% overall performance improvement for grailsort on some inputs. --- wikisort.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'wikisort.c') diff --git a/wikisort.c b/wikisort.c index b04b3ff..65af111 100644 --- a/wikisort.c +++ b/wikisort.c @@ -85,7 +85,7 @@ void wikisort(void *unsorted, size_t nmel, size_t width, cmpfun cmp) /* if blocks in b are all less than those in a, just need a rotate */ if (cmp(base + (nmel - 1) * width, base) <= 0) { - rotate(base, nmel, width, a.nmel * bnmel + buf.nmel); + rotate(base, nmel * width, (a.nmel * bnmel + buf.nmel) * width); return; } @@ -102,7 +102,7 @@ void wikisort(void *unsorted, size_t nmel, size_t width, cmpfun cmp) pendb_nmel += rolled_blocks * bnmel; if (pendb_nmel) { size_t offset = pendb_nmel - binary_search(a.base, a.base - pendb_nmel * width, pendb_nmel, width, cmp); - rotate(a.base - offset * width, bnmel + offset, width, offset); + rotate(a.base - offset * width, (bnmel + offset) * width, offset * width); pendb_nmel -= offset; if (penda.nmel && pendb_nmel) { -- cgit v1.2.3