Inspired by this article. Neat tricks for speeding up integer computations.
Note: cin.sync_with_stdio(false);
disables synchronous IO and gives you a performance boost.
If used, you should only use cin
for reading input
(don't use both cin
and scanf
when sync is disabled, for example)
or you will get unexpected results.
x = x << 1; // x = x * 2
x = x << 6; // x = x * 64
x = x >> 1; // x = x / 2
x = x >> 3; // x = x / 8
a ^= b; // int temp = b
b ^= a; // b = a
a ^= b; // a = temp
i = -~i; // i++
i = ~-i; // i--
i = ~i + 1; // or
i = (i ^ -1) + 1; // i = -i
x = 131 & (4 - 1); // x = 131 % 4
(i & 1) == 0; // (i % 2) == 0
(a^b) == 0; // a == b
x < 0 ? -x : x; // abs(x)
(x ^ (x >> 31)) - (x >> 31) // abs(x)
a ^ b >= 0; // a * b > 0
(x + 0.5) >> 0; // round(x)
(x + 1) >> 0; // ceil(x)
x >> 0; // floor(x)
x | (1<<n)
x & ~(1<<n)
x ^ (1<<n)
unsigned int v; //only works if v is 32 bit v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++;
n >> 0
5.7812 >> 0 // 5
int maxInt = ~(1 << 31); int maxInt = (1 << 31) - 1; int maxInt = (1 << -1) - 1; int maxInt = -1u >> 1;
int minInt = 1 << 31; int minInt = 1 << -1;
long maxLong = ((long)1 << 127) - 1;
n << 1; // n*2
n >> 1; // n/2
n << m;
n >> m;
This is 35% faster in Javascript
(a^b) == 0; // a == b !(a^b) // use in an if
(n & 1) == 1;
//version 1 a ^= b; b ^= a; a ^= b;
//version 2 a = a ^ b ^ (b = a)
//version 1 x < 0 ? -x : x;
//version 2 (x ^ (x >> 31)) - (x >> 31);
b & ((a-b) >> 31) | a & (~(a-b) >> 31);
a & ((a-b) >> 31) | b & (~(a-b) >> 31);
(x ^ y) >= 0;
i = ~i + 1; // or i = (i ^ -1) + 1; // i = -i
1 << n;
n > 0 && (n & (n - 1)) == 0;
m & ((1 << n) - 1);
(x + y) >> 1; ((x ^ y) >> 1) + (x & y);
(n >> (m-1)) & 1;
n & ~(1 << (m-1));
if (x & (1<<n)) { n-th bit is set } else { n-th bit is not set }
x & (-x)
~x & (x+1)
x | (x+1)
x & (x-1)
-~n
~-n
~n + 1; (n ^ -1) + 1; if (x == a) x = b; if (x == b) x = a;
x = a ^ b ^ x;
((n & 10101010) >> 1) | ((n & 01010101) << 1)
(n^m)&-(n^m) // returns 2^x where x is the position of the different bit (0 based)
~(n^m)&(n^m)+1 // returns 2^x where x is the position of the common bit (0 based)
These are techniques inspired by the fast inverse square root method. Most of these are original.
#include <stdint.h> typedef union {float flt; uint32_t bits} lens_t; uint32_t f2i(float x) { return ((lens_t) {.flt = x}).bits; } Caveat: Type pruning via unions is undefined in C++; use std::memcpy instead.
float i2f(uint32_t x) { return ((lens_t) {.bits = x}).flt; }
frexp gives the 2n decomposition of a number, so that man, exp = frexp(x) means that man * 2exp = x and 0.5 <= man < 1.
man, exp = frexp(x); return (uint32_t)((2 * man + exp + 125) * 0x800000);
Caveat: This will have at most 2-16 relative error, since man + 125 clobbers the last 8 bits, saving the first 16 bits of your mantissa.
OR by space => (x | ' ') Result is always lowercase even if letter is already lowercase eg. ('a' | ' ') => 'a' ; ('A' | ' ') => 'a'
AND by underline => (x & '') Result is always uppercase even if letter is already uppercase eg. ('a' & '') => 'A' ; ('A' & '_') => 'A'
XOR by space => (x ^ ' ') eg. ('a' ^ ' ') => 'A' ; ('A' ^ ' ') => 'a'
AND by chr(31)/binary('11111')/(hex('1F') => (x & "\x1F") Result is in 1..26 range, letter case is not important eg. ('a' & "\x1F") => 1 ; ('B' & "\x1F") => 2
AND by ? => (x & '?') or XOR by @ => (x ^ '@') eg. ('C' & '?') => 3 ; ('Z' ^ '@') => 26
XOR by backtick/chr(96)/binary('1100000')/hex('60') => (x ^ '') eg. ('d' ^ '
') => 4 ; ('x' ^ '`') => 24
R8 = (R5 << 3) | (R5 >> 2) G8 = (R5 << 3) | (R5 >> 2) B8 = (R5 << 3) | (R5 >> 2) Note: using anything other than the English letters will produce garbage results