So I'm trying to make a super light, deliberately memory heavy, yet very fast hashtable for very fast lookups where I don't care about memory usage and I don't care if it makes a rare mistake.
Basically it just creates a ginormous array (yes array, not slice), hashes a string using a modified FNVa hash (modified to give only a hash within the array bounds) and then saves or lookups up the value using the hash as the array index. In theory this should be the fastest possible way to store and retrieve a key=>value pair.
This is my benchmark:
package main
import (
"fmt"
"time"
)
const dicsize250 = 2097152000 // tested 115 collisions
type Dictionary250_uint16 struct {
dictionary [dicsize250]uint16
}
func (d *Dictionary250_uint16) Add(s string, v uint16) {
i := id(s,dicsize250)
d.dictionary[i]=v
return
}
func (d *Dictionary250_uint16) Delete(s string) {
i := id(s,dicsize250)
d.dictionary[i]=0
return
}
func (d *Dictionary250_uint16) Exists(s string) bool {
i := id(s,dicsize250)
if d.dictionary[i]==0 {
return false
} else {
return true
}
}
func (d *Dictionary250_uint16) Find(s string) uint16 {
i := id(s,dicsize250)
return d.dictionary[i]
}
// This is a FNVa hash algorithm, modified to limit to dicsize
func id(s string, dicsize uint64) uint64 {
var hash uint64 = 2166136261
for _, c := range s {
hash = (hash^uint64(c))*16777619
}
return hash%dicsize
}
var donothing bool
func main() {
dic := new(Dictionary250_uint16)
dic.Add(`test1`,10)
dic.Add(`test2`,20)
dic.Add(`test3`,30)
dic.Add(`test4`,40)
dic.Add(`test5`,50)
mc := make(map[string]uint16)
mc[`test1`]=10
mc[`test2`]=10
mc[`test3`]=10
mc[`test4`]=10
mc[`test5`]=10
var t1 uint
var t2 uint
var t3 uint
donothing = true
// Dic hit
t1 = uint(time.Now().UnixNano())
for i:=0; i<50000000; i++ {
if dic.Exists(`test4`) {
donothing = true
}
}
t3 = uint(time.Now().UnixNano())
t2 = t3-t1
fmt.Println("Dic (hit) took ",t2)
// Dic miss
t1 = uint(time.Now().UnixNano())
for i:=0; i<50000000; i++ {
if dic.Exists(`whate`) {
donothing = true
}
}
t3 = uint(time.Now().UnixNano())
t2 = t3-t1
fmt.Println("Dic (miss) took ",t2)
// Map hit
t1 = uint(time.Now().UnixNano())
for i:=0; i<50000000; i++ {
_,ok := mc[`test4`]
if ok {
donothing=true
}
}
t3 = uint(time.Now().UnixNano())
t2 = t3-t1
fmt.Println("Map (hit) took ",t2)
// Map miss
t1 = uint(time.Now().UnixNano())
for i:=0; i<50000000; i++ {
_,ok := mc[`whate`]
if ok {
donothing=true
}
}
t3 = uint(time.Now().UnixNano())
t2 = t3-t1
fmt.Println("Map (miss) took ",t2)
donothing = false
}
The results I get are:
Dic (hit) took 2,858,604,059
Dic (miss) took 2,457,173,526
Map (hit) took 1,574,306,146
Map (miss) took 2,525,206,080
Basically my hashtable implementation is a lot slower, especially on hits, than just using a map. I don't see how this is possible since map
is a heavy implementation (in comparison) which doesn't ever have any collisions, and does a lot more calculations. Whereas my implementation is super simple and relies on having a massive array of all possible indices.
What I am doing wrong?