Timing Quake III hack only works when compiled with optimizations

368 views Asked by At

So I just discovered the very interesting Quake III inverse square root hack. After learning how it works and all, I decided to test it. I found that the hack only outperformed math.h 1/sqrt(X) when compiled with optimizations enabled.

The hack's implementation:

float q_sqrt(float x) {
    float x2 = x * 0.5F;
    int i = *( int* )&x;                  // evil floating point bit hack
    i = 0x5f3759df - (i >> 1);            // what the fuck?
    x = *( float* )&i;
    x = x * ( 1.5F - ( (x2 * x * x) ) );  //1st iteration
  //y = y * ( 1.5F - ( (x2 * y * y) ) );  //2nd iteration, can be removed
    return x;
}

To test how fast 1/sqrt(x) runs compared to q_sqrt(x):

//qtest.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>

/*
Implementation of 1/sqrt(x) used in tue quake III game
*/
float q_sqrt(float x) {
    float x2 = x * 0.5F;
    int i = *( int* )&x;                  // evil floating point bit hack
    i = 0x5f3759df - (i >> 1);            // what the fuck?
    x = *( float* )&i;
    x = x * ( 1.5F - ( (x2 * x * x) ) );  //1st iteration
  //y = y * ( 1.5F - ( (x2 * y * y) ) );  //2nd iteration, can be removed
    return x;
}


int main(int argc, char *argv[]) {
    struct timespec start, stop;
    //Will work on floats in the range [0,100]
    float maxn = 100;
    //Work on 10000 random floats or as many as user provides
    size_t num = 10000;
    //Bogus
    float ans = 0;
    //Measure nanoseconds
    size_t ns = 0;
    if (argc > 1)
        num = atoll(argv[1]);
    if (num <= 0) return -1;
    //Compute "num" random floats 
    float *vecs = malloc(num * sizeof(float));
    if (!vecs) return -1;
    for (int i = 0; i < num; i++)
        vecs[i] = maxn * ( (float)rand() / (float)RAND_MAX );

    fprintf(stderr, "Measuring 1/sqrt(x)\n");
    clock_gettime( CLOCK_REALTIME, &start);
    for (size_t i = 0; i < num; i++)
        ans += 1 / sqrt(vecs[i]);
    clock_gettime( CLOCK_REALTIME, &stop);
    ns = ( stop.tv_sec - start.tv_sec ) * 1E9 + ( stop.tv_nsec - start.tv_nsec );
    fprintf(stderr, "1/sqrt(x) took %.6f nanosecods\n", (double)ns/num );


    fprintf(stderr, "Measuring q_sqrt(x)\n");
    clock_gettime( CLOCK_REALTIME, &start);
    for (size_t i = 0; i < num; i++)
        ans += q_sqrt(vecs[i]);
    clock_gettime( CLOCK_REALTIME, &stop);
    ns = ( stop.tv_sec - start.tv_sec ) * 1E9 + ( stop.tv_nsec - start.tv_nsec );
    fprintf(stderr, "q_sqrt(x) took %.6f nanosecods\n", (double)ns/num );

    //Side by side
  //for (size_t i = 0; i < num; i++)
  //    fprintf(stdout, "%.6f\t%.6f\n", 1/sqrt(vecs[i]),q_sqrt(vecs[i]));
    free(vecs);
}

On my system (Ryzen 3700X) I get:

gcc -Wall -pedantic -o qtest qtest.c -lm
./qtest
Measuring 1/sqrt(x)
1/sqrt(x) took 4.470000 nanosecods
Measuring q_sqrt(x)
q_sqrt(x) took 4.859000 nanosecods


gcc -Wall -pedantic -O1 -o qtest qtest.c -lm
./qtest
Measuring 1/sqrt(x)
1/sqrt(x) took 0.378000 nanosecods
Measuring q_sqrt(x)
q_sqrt(x) took 0.497000 nanosecods


gcc -Wall -pedantic -O2 -o qtest qtest.c -lm
qtest.c: In function ‘q_sqrt’:
qtest.c:11:14: warning: dereferencing type-punned pointer will break strict-aliasing rules [-Wstrict-aliasing]
  11 |     int i = *( int* )&x;                  // evil floating point bit hack
     |
qtest.c:13:10: warning: dereferencing type-punned pointer will break strict-aliasing rules [-Wstrict-aliasing]
  13 |     x = *( float* )&i;
     |
./qtest
Measuring 1/sqrt(x)
1/sqrt(x) took 0.500000 nanosecods
Measuring q_sqrt(x)
q_sqrt(x) took 0.002000 nanosecods

My expectation was that q_sqrt(x) was going to work better than 1/sqrt(X) out of the box. After reading some more I now know that either libm is way better optimized or my CPU is equipped with a hardware solution for sqrt(X). After all, CPUs have changes by leaps and bounds since the development of the quick inverse root hack.

What I don't understand is what type of optimizations would the compiler be applying to make it so much faster. Of course maybe my benchmark is ill conceived?

Thanks for any help!!

2

There are 2 answers

0
Guillaume Petitjean On BEST ANSWER

As you said, most of modern CPUs include a Floating Point Unit that usually provides a hardware instruction to compute square root. FPUs also provide division instructions so I would expect your processor (although I don't know it) to be able to compute an inverse sqrt in only a few assembly instructions. Your results are a bit surprising: you should check whether the FPU is really used. I don't know Ryzen but on ARM processors you can compile your software to use either hardware floating point instructions or software libraries.

Now to answer your questions: GCC optimizations are a complex story and it is usually impossible to predict precisely the effect of a given level on performance. So run some tests as you did, or have a look here for theory.

0
Morten Jensen On

The concrete difference on CLang/LLVM are these.

Without optimization (-O0):

q_sqrt(float):                             # @q_sqrt(float)
        push    rbp
        mov     rbp, rsp
        movss   dword ptr [rbp - 4], xmm0
        movss   xmm0, dword ptr [rip + .LCPI0_1] # xmm0 = mem[0],zero,zero,zero
        mulss   xmm0, dword ptr [rbp - 4]
        movss   dword ptr [rbp - 8], xmm0
        mov     eax, dword ptr [rbp - 4]
        mov     dword ptr [rbp - 12], eax
        mov     ecx, dword ptr [rbp - 12]
        sar     ecx, 1
        mov     eax, 1597463007
        sub     eax, ecx
        mov     dword ptr [rbp - 12], eax
        movss   xmm0, dword ptr [rbp - 12]      # xmm0 = mem[0],zero,zero,zero
        movss   dword ptr [rbp - 4], xmm0
        movss   xmm0, dword ptr [rbp - 4]       # xmm0 = mem[0],zero,zero,zero
        movss   xmm2, dword ptr [rbp - 8]       # xmm2 = mem[0],zero,zero,zero
        mulss   xmm2, dword ptr [rbp - 4]
        mulss   xmm2, dword ptr [rbp - 4]
        movss   xmm1, dword ptr [rip + .LCPI0_0] # xmm1 = mem[0],zero,zero,zero
        subss   xmm1, xmm2
        mulss   xmm0, xmm1
        movss   dword ptr [rbp - 4], xmm0
        movss   xmm0, dword ptr [rbp - 4]       # xmm0 = mem[0],zero,zero,zero
        pop     rbp
        ret

With optimization (-Ofast):

q_sqrt(float):                             # @q_sqrt(float)
        movd    eax, xmm0
        sar     eax
        mov     ecx, 1597463007
        sub     ecx, eax
        movd    xmm1, ecx
        mulss   xmm0, dword ptr [rip + .LCPI0_0]
        movdqa  xmm2, xmm1
        mulss   xmm2, xmm1
        mulss   xmm0, xmm2
        addss   xmm0, dword ptr [rip + .LCPI0_1]
        mulss   xmm0, xmm1
        ret

You can use https://godbolt.org/ to check the assembly output of your compiler, using various different flags and check how it impacts the output.