💾 Archived View for gmi.noulin.net › gitRepositories › hashfunctions › file › murmur3.c.gmi captured on 2023-01-29 at 11:36:27. Gemini links have been rewritten to link to archived content
-=-=-=-=-=-=-
murmur3.c (7524B)
1 //----------------------------------------------------------------------------- 2 // MurmurHash3 was written by Austin Appleby, and is placed in the public 3 // domain. The author hereby disclaims copyright to this source code. 4 5 // Note - The x86 and x64 versions do _not_ produce the same results, as the 6 // algorithms are optimized for their respective platforms. You can still 7 // compile and run any of them on any platform, but your performance with the 8 // non-native version will be less than optimal. 9 10 #include "murmur3.h" 11 12 //----------------------------------------------------------------------------- 13 // Platform-specific functions and macros 14 15 #ifdef __GNUC__ 16 #define FORCE_INLINE __attribute__((always_inline)) inline 17 #else 18 #define FORCE_INLINE inline 19 #endif 20 21 static FORCE_INLINE uint32_t rotl32 ( uint32_t x, int8_t r ) 22 { 23 return (x << r) | (x >> (32 - r)); 24 } 25 26 static FORCE_INLINE uint64_t rotl64 ( uint64_t x, int8_t r ) 27 { 28 return (x << r) | (x >> (64 - r)); 29 } 30 31 #define ROTL32(x,y) rotl32(x,y) 32 #define ROTL64(x,y) rotl64(x,y) 33 34 #define BIG_CONSTANT(x) (x##LLU) 35 36 //----------------------------------------------------------------------------- 37 // Block read - if your platform needs to do endian-swapping or can only 38 // handle aligned reads, do the conversion here 39 40 #define getblock(p, i) (p[i]) 41 42 //----------------------------------------------------------------------------- 43 // Finalization mix - force all bits of a hash block to avalanche 44 45 static FORCE_INLINE uint32_t fmix32 ( uint32_t h ) 46 { 47 h ^= h >> 16; 48 h *= 0x85ebca6b; 49 h ^= h >> 13; 50 h *= 0xc2b2ae35; 51 h ^= h >> 16; 52 53 return h; 54 } 55 56 //---------- 57 58 static FORCE_INLINE uint64_t fmix64 ( uint64_t k ) 59 { 60 k ^= k >> 33; 61 k *= BIG_CONSTANT(0xff51afd7ed558ccd); 62 k ^= k >> 33; 63 k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); 64 k ^= k >> 33; 65 66 return k; 67 } 68 69 //----------------------------------------------------------------------------- 70 71 void murmurHash3_x86_32 ( const void * key, int len, 72 uint32_t seed, void * out ) 73 { 74 const uint8_t * data = (const uint8_t*)key; 75 const int nblocks = len / 4; 76 int i; 77 78 uint32_t h1 = seed; 79 80 uint32_t c1 = 0xcc9e2d51; 81 uint32_t c2 = 0x1b873593; 82 83 //---------- 84 // body 85 86 const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); 87 88 for(i = -nblocks; i; i++) 89 { 90 uint32_t k1 = getblock(blocks,i); 91 92 k1 *= c1; 93 k1 = ROTL32(k1,15); 94 k1 *= c2; 95 96 h1 ^= k1; 97 h1 = ROTL32(h1,13); 98 h1 = h1*5+0xe6546b64; 99 } 100 101 //---------- 102 // tail 103 104 const uint8_t * tail = (const uint8_t*)(data + nblocks*4); 105 106 uint32_t k1 = 0; 107 108 switch(len & 3) 109 { 110 case 3: k1 ^= tail[2] << 16; 111 case 2: k1 ^= tail[1] << 8; 112 case 1: k1 ^= tail[0]; 113 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; 114 }; 115 116 //---------- 117 // finalization 118 119 h1 ^= len; 120 121 h1 = fmix32(h1); 122 123 *(uint32_t*)out = h1; 124 } 125 126 //----------------------------------------------------------------------------- 127 128 void murmurHash3_x86_128 ( const void * key, const int len, 129 uint32_t seed, void * out ) 130 { 131 const uint8_t * data = (const uint8_t*)key; 132 const int nblocks = len / 16; 133 int i; 134 135 uint32_t h1 = seed; 136 uint32_t h2 = seed; 137 uint32_t h3 = seed; 138 uint32_t h4 = seed; 139 140 uint32_t c1 = 0x239b961b; 141 uint32_t c2 = 0xab0e9789; 142 uint32_t c3 = 0x38b34ae5; 143 uint32_t c4 = 0xa1e38b93; 144 145 //---------- 146 // body 147 148 const uint32_t * blocks = (const uint32_t *)(data + nblocks*16); 149 150 for(i = -nblocks; i; i++) 151 { 152 uint32_t k1 = getblock(blocks,i*4+0); 153 uint32_t k2 = getblock(blocks,i*4+1); 154 uint32_t k3 = getblock(blocks,i*4+2); 155 uint32_t k4 = getblock(blocks,i*4+3); 156 157 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; 158 159 h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; 160 161 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; 162 163 h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; 164 165 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; 166 167 h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; 168 169 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; 170 171 h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; 172 } 173 174 //---------- 175 // tail 176 177 const uint8_t * tail = (const uint8_t*)(data + nblocks*16); 178 179 uint32_t k1 = 0; 180 uint32_t k2 = 0; 181 uint32_t k3 = 0; 182 uint32_t k4 = 0; 183 184 switch(len & 15) 185 { 186 case 15: k4 ^= tail[14] << 16; 187 case 14: k4 ^= tail[13] << 8; 188 case 13: k4 ^= tail[12] << 0; 189 k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; 190 191 case 12: k3 ^= tail[11] << 24; 192 case 11: k3 ^= tail[10] << 16; 193 case 10: k3 ^= tail[ 9] << 8; 194 case 9: k3 ^= tail[ 8] << 0; 195 k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; 196 197 case 8: k2 ^= tail[ 7] << 24; 198 case 7: k2 ^= tail[ 6] << 16; 199 case 6: k2 ^= tail[ 5] << 8; 200 case 5: k2 ^= tail[ 4] << 0; 201 k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; 202 203 case 4: k1 ^= tail[ 3] << 24; 204 case 3: k1 ^= tail[ 2] << 16; 205 case 2: k1 ^= tail[ 1] << 8; 206 case 1: k1 ^= tail[ 0] << 0; 207 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; 208 }; 209 210 //---------- 211 // finalization 212 213 h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; 214 215 h1 += h2; h1 += h3; h1 += h4; 216 h2 += h1; h3 += h1; h4 += h1; 217 218 h1 = fmix32(h1); 219 h2 = fmix32(h2); 220 h3 = fmix32(h3); 221 h4 = fmix32(h4); 222 223 h1 += h2; h1 += h3; h1 += h4; 224 h2 += h1; h3 += h1; h4 += h1; 225 226 ((uint32_t*)out)[0] = h1; 227 ((uint32_t*)out)[1] = h2; 228 ((uint32_t*)out)[2] = h3; 229 ((uint32_t*)out)[3] = h4; 230 } 231 232 //----------------------------------------------------------------------------- 233 234 void murmurHash3_x64_128 ( const void * key, const int len, 235 const uint32_t seed, void * out ) 236 { 237 const uint8_t * data = (const uint8_t*)key; 238 const int nblocks = len / 16; 239 int i; 240 241 uint64_t h1 = seed; 242 uint64_t h2 = seed; 243 244 uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); 245 uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); 246 247 //---------- 248 // body 249 250 const uint64_t * blocks = (const uint64_t *)(data); 251 252 for(i = 0; i < nblocks; i++) 253 { 254 uint64_t k1 = getblock(blocks,i*2+0); 255 uint64_t k2 = getblock(blocks,i*2+1); 256 257 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; 258 259 h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; 260 261 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; 262 263 h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; 264 } 265 266 //---------- 267 // tail 268 269 const uint8_t * tail = (const uint8_t*)(data + nblocks*16); 270 271 uint64_t k1 = 0; 272 uint64_t k2 = 0; 273 274 switch(len & 15) 275 { 276 case 15: k2 ^= (uint64_t)(tail[14]) << 48; 277 case 14: k2 ^= (uint64_t)(tail[13]) << 40; 278 case 13: k2 ^= (uint64_t)(tail[12]) << 32; 279 case 12: k2 ^= (uint64_t)(tail[11]) << 24; 280 case 11: k2 ^= (uint64_t)(tail[10]) << 16; 281 case 10: k2 ^= (uint64_t)(tail[ 9]) << 8; 282 case 9: k2 ^= (uint64_t)(tail[ 8]) << 0; 283 k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; 284 285 case 8: k1 ^= (uint64_t)(tail[ 7]) << 56; 286 case 7: k1 ^= (uint64_t)(tail[ 6]) << 48; 287 case 6: k1 ^= (uint64_t)(tail[ 5]) << 40; 288 case 5: k1 ^= (uint64_t)(tail[ 4]) << 32; 289 case 4: k1 ^= (uint64_t)(tail[ 3]) << 24; 290 case 3: k1 ^= (uint64_t)(tail[ 2]) << 16; 291 case 2: k1 ^= (uint64_t)(tail[ 1]) << 8; 292 case 1: k1 ^= (uint64_t)(tail[ 0]) << 0; 293 k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; 294 }; 295 296 //---------- 297 // finalization 298 299 h1 ^= len; h2 ^= len; 300 301 h1 += h2; 302 h2 += h1; 303 304 h1 = fmix64(h1); 305 h2 = fmix64(h2); 306 307 h1 += h2; 308 h2 += h1; 309 310 ((uint64_t*)out)[0] = h1; 311 ((uint64_t*)out)[1] = h2; 312 } 313 314 //----------------------------------------------------------------------------- 315