I wrote a simple program that ORs all the values contained in a huge go slice. When I use a 10 times bigger slice, I would expect a 10x performance drop. However when executing the provided test, there is a huge performance gap. The program output is the following :
oadam@oadam-Latitude-E6510:~/$ go test -bench .
testing: warning: no tests to run
PASS
BenchmarkLittle 2000000000 0.11 ns/op
BenchmarkBig 1 2417869962 ns/op
ok _/home/oadam/ 5.048s
And the code
package main
import (
"math/rand"
"testing"
)
const (
little = 5000000
big = 50000000
)
var a = make([]uint32, big)
func benchOR(b *testing.B, l int) {
for i := 0; i < l; i++ {
a[i] = rand.Uint32()
}
var result uint32
for i := 0; i < l; i++ {
result |= a[i]
}
}
func BenchmarkLittle(b *testing.B) {
benchOR(b, little)
}
func BenchmarkBig(b *testing.B) {
benchOR(b, big)
}
EDIT: must be a bug in go test -bench. Using a manual timing I don't reproduce
package main
import (
"log"
"math/rand"
"time"
)
const (
little = 5000000
big = 50000000
)
var a = make([]uint32, big)
func initA(l int) {
for i := 0; i < l; i++ {
a[i] = rand.Uint32()
}
}
func test(l int) uint32 {
var result uint32
for i := 0; i < l; i++ {
result |= a[i]
}
return result
}
func main() {
initA(little)
var before = time.Now()
test(little)
log.Println(time.Since(before))
initA(big)
var before2 = time.Now()
test(big)
log.Println(time.Since(before2))
}