doufeiqiong3515 2014-08-07 05:53
浏览 74

转到:内存使用过多,内存泄漏

I am very, very memory careful as I have to write programs that need to cope with massive datasets.

Currently my application quickly reaches 32GB of memory, starts swapping, and then gets killed by the system.

I do not understand how this can be since all variables are collectable (in functions and quickly released) except TokensStruct and TokensCount in the Trainer struct. TokensCount is just a uint. TokensStruct is a 1,000,000 row slice of [5]uint32 and string, so that means 20 bytes + string, which we could call a maximum of 50 bytes per record. 50*1000000 = 50MB of memory required. So this script should therefore not use much more than 50MB + overhead + temporary collectable variables in the functions (maybe another 50MB max.) The maximum potential size of TokensStruct is 5,000,000, as this is the size of dictionary, but even then it would be only 250MB of memory. dictionary is a map and apparently uses around 600MB of memory, as that is how the app starts, but this is not an issue because dictionary is only loaded once and never written to again.

Instead it uses 32GB of memory then dies. By the speed that it does this I expect it would happily get to 1TB of memory if it could. The memory appears to increase in a linear fashion with the size of the files being loaded, meaning that it appears to never clear any memory at all. Everything that enters the app is allocated more memory and memory is never freed.

I tried implementing runtime.GC() in case the garbage collection wasn't running often enough, but this made no difference.

Since the memory usage increases in a linear fashion then this would imply that there is a memory leak in GetTokens() or LoadZip(). I don't know how this could be, since they are both functions and only do one task and then close. Or it could be that the tokens variable in Start() is the cause of the leak. Basically it looks like every file that is loaded and parsed is never released from memory, as that is the only way that the memory could fill up in a linear fashion and keep on rising up to 32GB++.

Absolute nightmare! What's wrong with Go? Any way to fix this?

package main

import (
    "bytes"
    "code.google.com/p/go.text/transform"
    "code.google.com/p/go.text/unicode/norm"
    "compress/zlib"
    "encoding/gob"
    "fmt"
    "github.com/AlasdairF/BinSearch"
    "io/ioutil"
    "os"
    "regexp"
    "runtime"
    "strings"
    "unicode"
    "unicode/utf8"
)

type TokensStruct struct {
    binsearch.Key_string
    Value [][5]uint32
}

type Trainer struct {
    Tokens      TokensStruct
    TokensCount uint
}

func checkErr(err error) {
    if err == nil {
        return
    }
    fmt.Println(`Some Error:`, err)
    panic(err)
}

// Local helper function for normalization of UTF8 strings.
func isMn(r rune) bool {
    return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
}

// This map is used by RemoveAccents function to convert non-accented characters.
var transliterations = map[rune]string{'Æ': "E", 'Ð': "D", 'Ł': "L", 'Ø': "OE", 'Þ': "Th", 'ß': "ss", 'æ': "e", 'ð': "d", 'ł': "l", 'ø': "oe", 'þ': "th", 'Œ': "OE", 'œ': "oe"}

//  removeAccentsBytes converts accented UTF8 characters into their non-accented equivalents, from a []byte.
func removeAccentsBytesDashes(b []byte) ([]byte, error) {
    mnBuf := make([]byte, len(b))
    t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
    n, _, err := t.Transform(mnBuf, b, true)
    if err != nil {
        return nil, err
    }
    mnBuf = mnBuf[:n]
    tlBuf := bytes.NewBuffer(make([]byte, 0, len(mnBuf)*2))
    for i, w := 0, 0; i < len(mnBuf); i += w {
        r, width := utf8.DecodeRune(mnBuf[i:])
        if r == '-' {
            tlBuf.WriteByte(' ')
        } else {
            if d, ok := transliterations[r]; ok {
                tlBuf.WriteString(d)
            } else {
                tlBuf.WriteRune(r)
            }
        }
        w = width
    }
    return tlBuf.Bytes(), nil
}

func LoadZip(filename string) ([]byte, error) {
    // Open file for reading
    fi, err := os.Open(filename)
    if err != nil {
        return nil, err
    }
    defer fi.Close()
    // Attach ZIP reader
    fz, err := zlib.NewReader(fi)
    if err != nil {
        return nil, err
    }
    defer fz.Close()
    // Pull
    data, err := ioutil.ReadAll(fz)
    if err != nil {
        return nil, err
    }
    return norm.NFC.Bytes(data), nil // return normalized
}

func getTokens(pibn string) []string {
    var data []byte
    var err error
    data, err = LoadZip(`/storedir/` + pibn + `/text.zip`)
    checkErr(err)
    data, err = removeAccentsBytesDashes(data)
    checkErr(err)
    data = bytes.ToLower(data)
    data = reg2.ReplaceAll(data, []byte("$2")) // remove contractions
    data = reg.ReplaceAllLiteral(data, nil)
    tokens := strings.Fields(string(data))
    return tokens
}

func (t *Trainer) Start() {
    data, err := ioutil.ReadFile(`list.txt`)
    checkErr(err)
    pibns := bytes.Fields(data)
    for i, pibn := range pibns {
        tokens := getTokens(string(pibn))
        t.addTokens(tokens)
        if i%100 == 0 {
            runtime.GC() // I added this just to try to stop the memory craziness, but it makes no difference
        }
    }
}

func (t *Trainer) addTokens(tokens []string) {
    for _, tok := range tokens {
        if _, ok := dictionary[tok]; ok {
            if indx, ok2 := t.Tokens.Find(tok); ok2 {
                ar := t.Tokens.Value[indx]
                ar[0]++
                t.Tokens.Value[indx] = ar
                t.TokensCount++
            } else {
                t.Tokens.AddKeyAt(tok, indx)
                t.Tokens.Value = append(t.Tokens.Value, [5]uint32{0, 0, 0, 0, 0})
                copy(t.Tokens.Value[indx+1:], t.Tokens.Value[indx:])
                t.Tokens.Value[indx] = [5]uint32{1, 0, 0, 0, 0}
                t.TokensCount++
            }
        }
    }
    return
}

func LoadDictionary() {
    dictionary = make(map[string]bool)
    data, err := ioutil.ReadFile(`dictionary`)
    checkErr(err)
    words := bytes.Fields(data)
    for _, word := range words {
        strword := string(word)
        dictionary[strword] = false
    }
}

var reg = regexp.MustCompile(`[^a-z0-9\s]`)
var reg2 = regexp.MustCompile(`\b(c|l|all|dall|dell|nell|sull|coll|pell|gl|agl|dagl|degl|negl|sugl|un|m|t|s|v|d|qu|n|j)'([a-z])`) //contractions
var dictionary map[string]bool

func main() {
    trainer := new(Trainer)
    LoadDictionary()
    trainer.Start()
}
  • 写回答

2条回答 默认 最新

  • dongqi8863 2014-08-07 07:25
    关注

    1 How large are "list.txt" and "dictionary"? If it is so large, No wonder the memory is so large

     pibns := bytes.Fields(data)
    

    how much is len(pibns)?

    2 start the gc debug ( do GODEBUG="gctrace=1" ./yourprogram ) to see if there is any gc happening

    3 do some profile like this:

        func lookupMem(){
          if f, err := os.Create("mem_prof"+time.Now.Unix()); err != nil {
              log.Debug("record memory profile failed: %v", err)
          } else {
              runtime.GC()
              pprof.WriteHeapProfile(f)                                                                                                                                        
              f.Close()
          }
          if f, err := os.Create("heap_prof" + "." + timestamp); err != nil {
            log.Debug("heap profile failed:", err)
          } else {
            p := pprof.Lookup("heap")
            p.WriteTo(f, 2)
          }
       }
    
        func (t *Trainer) Start() {      
        .......
          if i%1000==0 {
            //if `len(pibns)` is not very large , record some meminfo
            lookupMem()
          }
        .......
    
    评论

报告相同问题?

悬赏问题

  • ¥20 求各位懂行的人,注册表能不能看到usb使用得具体信息,干了什么,传输了什么数据
  • ¥15 个人网站被恶意大量访问,怎么办
  • ¥15 Vue3 大型图片数据拖动排序
  • ¥15 Centos / PETGEM
  • ¥15 划分vlan后不通了
  • ¥20 用雷电模拟器安装百达屋apk一直闪退
  • ¥15 算能科技20240506咨询(拒绝大模型回答)
  • ¥15 自适应 AR 模型 参数估计Matlab程序
  • ¥100 角动量包络面如何用MATLAB绘制
  • ¥15 merge函数占用内存过大