switching to temporarily use glog library

This commit is contained in:
Chris Lu
2013-08-08 23:57:22 -07:00
parent b27947b355
commit ed154053c8
34 changed files with 199 additions and 167 deletions

View File

@@ -1,7 +1,7 @@
package storage
import (
"log"
"code.google.com/p/weed-fs/go/glog"
"math/rand"
"os"
"runtime"
@@ -39,7 +39,7 @@ func TestCdbMap1Mem(t *testing.T) {
t.Fatalf("error opening cdb: %s", err)
}
b := getMemStats()
log.Printf("opening cdb consumed %d bytes", b-a)
glog.V(0).Infof("opening cdb consumed %d bytes", b-a)
defer nm.Close()
a = getMemStats()
@@ -47,7 +47,7 @@ func TestCdbMap1Mem(t *testing.T) {
t.Fatalf("error visiting %s: %s", nm, err)
}
b = getMemStats()
log.Printf("visit cdb %d consumed %d bytes", i, b-a)
glog.V(0).Infof("visit cdb %d consumed %d bytes", i, b-a)
nm.Close()
indexFile, err := os.Open(testIndexFilename)
@@ -61,7 +61,7 @@ func TestCdbMap1Mem(t *testing.T) {
}
defer nm.Close()
b = getMemStats()
log.Printf("opening idx consumed %d bytes", b-a)
glog.V(0).Infof("opening idx consumed %d bytes", b-a)
i = 0
a = getMemStats()
@@ -69,7 +69,7 @@ func TestCdbMap1Mem(t *testing.T) {
t.Fatalf("error visiting %s: %s", nm, err)
}
b = getMemStats()
log.Printf("visit idx %d consumed %d bytes", i, b-a)
glog.V(0).Infof("visit idx %d consumed %d bytes", i, b-a)
}
func BenchmarkCdbMap9List(t *testing.B) {
@@ -88,7 +88,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
}
defer idx.Close()
b := getMemStats()
log.Printf("LoadNeedleMap consumed %d bytes", b-a)
glog.V(0).Infof("LoadNeedleMap consumed %d bytes", b-a)
cdbFn := testIndexFilename + ".cdb"
a = getMemStats()
@@ -99,10 +99,10 @@ func BenchmarkCdbMap9List(t *testing.B) {
}
defer m.Close()
b = getMemStats()
log.Printf("OpenCdbMap consumed %d bytes", b-a)
glog.V(0).Infof("OpenCdbMap consumed %d bytes", b-a)
i := 0
log.Printf("checking whether the cdb contains every key")
glog.V(0).Infoln("checking whether the cdb contains every key")
t.StartTimer()
err = idx.Visit(func(nv NeedleValue) error {
if i > t.N || rand.Intn(10) < 9 {
@@ -110,7 +110,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
}
i++
if i%1000 == 0 {
log.Printf("%d. %s", i, nv)
glog.V(0).Infof("%d. %s", i, nv)
}
if nv2, ok := m.Get(uint64(nv.Key)); !ok || nv2 == nil {
t.Errorf("%s in index, not in cdb", nv.Key)
@@ -130,7 +130,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
}
i = 0
log.Printf("checking wheter the cdb contains no stray keys")
glog.V(0).Infoln("checking wheter the cdb contains no stray keys")
t.StartTimer()
err = m.Visit(func(nv NeedleValue) error {
if i > t.N || rand.Intn(10) < 9 {
@@ -147,7 +147,7 @@ func BenchmarkCdbMap9List(t *testing.B) {
}
i++
if i%1000 == 0 {
log.Printf("%d. %s", i, nv)
glog.V(0).Infof("%d. %s", i, nv)
}
t.SetBytes(int64(nv.Size))
return nil

View File

@@ -38,13 +38,13 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
}
if i := cs.binarySearchValues(key); i >= 0 {
ret = cs.values[i].Size
//println("key", key, "old size", ret)
//glog.V(4).Infoln("key", key, "old size", ret)
cs.values[i].Offset, cs.values[i].Size = offset, size
} else {
needOverflow := cs.counter >= batch
needOverflow = needOverflow || cs.counter > 0 && cs.values[cs.counter-1].Key > key
if needOverflow {
//println("start", cs.start, "counter", cs.counter, "key", key)
//glog.V(4).Infoln("start", cs.start, "counter", cs.counter, "key", key)
if oldValue, found := cs.overflow[key]; found {
ret = oldValue.Size
}
@@ -52,7 +52,7 @@ func (cs *CompactSection) Set(key Key, offset uint32, size uint32) uint32 {
} else {
p := &cs.values[cs.counter]
p.Key, p.Offset, p.Size = key, offset, size
//println("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
//glog.V(4).Infoln("added index", cs.counter, "key", key, cs.values[cs.counter].Key)
cs.counter++
}
}
@@ -88,16 +88,16 @@ func (cs *CompactSection) binarySearchValues(key Key) int {
if h >= 0 && cs.values[h].Key < key {
return -2
}
//println("looking for key", key)
//glog.V(4).Infoln("looking for key", key)
for l <= h {
m := (l + h) / 2
//println("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
//glog.V(4).Infoln("mid", m, "key", cs.values[m].Key, cs.values[m].Offset, cs.values[m].Size)
if cs.values[m].Key < key {
l = m + 1
} else if key < cs.values[m].Key {
h = m - 1
} else {
//println("found", m)
//glog.V(4).Infoln("found", m)
return m
}
}
@@ -116,7 +116,7 @@ func NewCompactMap() CompactMap {
func (cm *CompactMap) Set(key Key, offset uint32, size uint32) uint32 {
x := cm.binarySearchCompactSection(key)
if x < 0 {
//println(x, "creating", len(cm.list), "section1, starting", key)
//glog.V(4).Infoln(x, "creating", len(cm.list), "section1, starting", key)
cm.list = append(cm.list, NewCompactSection(key))
x = len(cm.list) - 1
}

View File

@@ -2,7 +2,7 @@ package storage
import (
"code.google.com/p/weed-fs/go/util"
"log"
"code.google.com/p/weed-fs/go/glog"
"os"
"testing"
)
@@ -23,7 +23,7 @@ func LoadNewNeedleMap(file *os.File) CompactMap {
count, e := file.Read(bytes)
if count > 0 {
fstat, _ := file.Stat()
log.Println("Loading index file", fstat.Name(), "size", fstat.Size())
glog.V(0).Infoln("Loading index file", fstat.Name(), "size", fstat.Size())
}
for count > 0 && e == nil {
for i := 0; i < count; i += 16 {

View File

@@ -20,7 +20,7 @@ func TestXYZ(t *testing.T) {
// for i := uint32(0); i < 100; i++ {
// if v := m.Get(Key(i)); v != nil {
// println(i, "=", v.Key, v.Offset, v.Size)
// glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size)
// }
// }

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"compress/flate"
"compress/gzip"
"code.google.com/p/weed-fs/go/glog"
"io/ioutil"
"strings"
)
@@ -36,11 +37,11 @@ func GzipData(input []byte) ([]byte, error) {
buf := new(bytes.Buffer)
w, _ := gzip.NewWriterLevel(buf, flate.BestCompression)
if _, err := w.Write(input); err != nil {
println("error compressing data:", err)
glog.V(4).Infoln("error compressing data:", err)
return nil, err
}
if err := w.Close(); err != nil {
println("error closing compressed data:", err)
glog.V(4).Infoln("error closing compressed data:", err)
return nil, err
}
return buf.Bytes(), nil
@@ -51,7 +52,7 @@ func UnGzipData(input []byte) ([]byte, error) {
defer r.Close()
output, err := ioutil.ReadAll(r)
if err != nil {
println("error uncompressing data:", err)
glog.V(4).Infoln("error uncompressing data:", err)
}
return output, err
}

View File

@@ -4,6 +4,7 @@ import (
"code.google.com/p/weed-fs/go/util"
"encoding/hex"
"strings"
"code.google.com/p/weed-fs/go/glog"
)
type FileId struct {
@@ -18,7 +19,7 @@ func NewFileId(VolumeId VolumeId, Key uint64, Hashcode uint32) *FileId {
func ParseFileId(fid string) *FileId {
a := strings.Split(fid, ",")
if len(a) != 2 {
println("Invalid fid", fid, ", split length", len(a))
glog.V(4).Infoln("Invalid fid", fid, ", split length", len(a))
return nil
}
vid_string, key_hash_string := a[0], a[1]

View File

@@ -5,7 +5,7 @@ import (
"encoding/hex"
"errors"
"io/ioutil"
"log"
"code.google.com/p/weed-fs/go/glog"
"mime"
"net/http"
"path"
@@ -41,13 +41,13 @@ type Needle struct {
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, e error) {
form, fe := r.MultipartReader()
if fe != nil {
log.Println("MultipartReader [ERROR]", fe)
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
e = fe
return
}
part, fe := form.NextPart()
if fe != nil {
log.Println("Reading Multi part [ERROR]", fe)
glog.V(0).Infoln("Reading Multi part [ERROR]", fe)
e = fe
return
}
@@ -60,7 +60,7 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
}
data, e = ioutil.ReadAll(part)
if e != nil {
log.Println("Reading Content [ERROR]", e)
glog.V(0).Infoln("Reading Content [ERROR]", e)
return
}
dotIndex := strings.LastIndex(fileName, ".")
@@ -131,7 +131,7 @@ func (n *Needle) ParsePath(fid string) {
length := len(fid)
if length <= 8 {
if length > 0 {
log.Println("Invalid fid", fid, "length", length)
glog.V(0).Infoln("Invalid fid", fid, "length", length)
}
return
}
@@ -153,7 +153,7 @@ func ParseKeyHash(key_hash_string string) (uint64, uint32) {
key_hash_bytes, khe := hex.DecodeString(key_hash_string)
key_hash_len := len(key_hash_bytes)
if khe != nil || key_hash_len <= 4 {
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
glog.V(0).Infoln("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
return 0, 0
}
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])

View File

@@ -57,14 +57,14 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) {
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
if offset > 0 {
oldSize := nm.m.Set(Key(key), offset, size)
//log.Println("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
//glog.V(0).Infoln("reading key", key, "offset", offset, "size", size, "oldSize", oldSize)
if oldSize > 0 {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}
} else {
oldSize := nm.m.Delete(Key(key))
//log.Println("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
//glog.V(0).Infoln("removing key", key, "offset", offset, "size", size, "oldSize", oldSize)
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}

View File

@@ -5,7 +5,7 @@ import (
"errors"
"fmt"
"io"
"log"
"code.google.com/p/weed-fs/go/glog"
"os"
)
@@ -27,12 +27,12 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
defer func(s io.Seeker, off int64) {
if err != nil {
if _, e = s.Seek(off, 0); e != nil {
log.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
glog.V(0).Infof("Failed to seek %s back to %d with error: %s", w, off, e.Error())
}
}
}(s, end)
} else {
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e)
err = fmt.Errorf("Cnnot Read Current Volume Position: %s", e.Error())
return
}
}

View File

@@ -5,7 +5,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"code.google.com/p/weed-fs/go/glog"
"net/url"
"strconv"
"strings"
@@ -95,7 +95,7 @@ func (s *Store) addVolume(vid VolumeId, replicationType ReplicationType) error {
return fmt.Errorf("Volume Id %s already exists!", vid)
}
if location := s.findFreeLocation(); location != nil {
log.Println("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
glog.V(0).Infoln("In dir", location.directory, "adds volume =", vid, ", replicationType =", replicationType)
if volume, err := NewVolume(location.directory, vid, replicationType); err == nil {
location.volumes[vid] = volume
return nil
@@ -163,14 +163,14 @@ func (l *DiskLocation) loadExistingVolumes() {
if l.volumes[vid] == nil {
if v, e := NewVolume(l.directory, vid, CopyNil); e == nil {
l.volumes[vid] = v
log.Println("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
glog.V(0).Infoln("In dir", l.directory, "read volume =", vid, "replicationType =", v.ReplicaType, "version =", v.Version(), "size =", v.Size())
}
}
}
}
}
}
log.Println("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
glog.V(0).Infoln("Store started on dir:", l.directory, "with", len(l.volumes), "volumes", "max", l.maxVolumeCount)
}
func (s *Store) Status() []*VolumeInfo {
var stats []*VolumeInfo
@@ -259,15 +259,15 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) {
err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize())
}
if err != nil && s.volumeSizeLimit < v.ContentSize()+uint64(size) && s.volumeSizeLimit >= v.ContentSize() {
log.Println("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
glog.V(0).Infoln("volume", i, "size is", v.ContentSize(), "close to", s.volumeSizeLimit)
if err = s.Join(); err != nil {
log.Printf("error with Join: %s", err)
glog.V(0).Infoln("error with Join:", err)
}
}
}
return
}
log.Println("volume", i, "not found!")
glog.V(0).Infoln("volume", i, "not found!")
err = fmt.Errorf("Volume %s not found!", i)
return
}

View File

@@ -2,10 +2,10 @@ package storage
import (
"bytes"
"code.google.com/p/weed-fs/go/glog"
"errors"
"fmt"
"io"
"log"
"os"
"path"
"sync"
@@ -45,7 +45,7 @@ func NewVolume(dirname string, id VolumeId, replicationType ReplicationType) (v
e = v.load(true)
return
}
func LoadVolumeOnly(dirname string, id VolumeId) (v *Volume, e error) {
func loadVolumeWithoutIndex(dirname string, id VolumeId) (v *Volume, e error) {
v = &Volume{dir: dirname, Id: id}
v.SuperBlock = SuperBlock{ReplicaType: CopyNil}
e = v.load(false)
@@ -57,12 +57,12 @@ func (v *Volume) load(alsoLoadIndex bool) error {
v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644)
if e != nil {
if !os.IsPermission(e) {
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
}
if v.dataFile, e = os.Open(fileName + ".dat"); e != nil {
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e)
return fmt.Errorf("cannot open Volume Data %s.dat: %s", fileName, e.Error())
}
log.Printf("opening " + fileName + ".dat in READONLY mode")
glog.V(0).Infoln("opening " + fileName + ".dat in READONLY mode")
v.readOnly = true
}
if v.ReplicaType == CopyNil {
@@ -73,28 +73,40 @@ func (v *Volume) load(alsoLoadIndex bool) error {
if e == nil && alsoLoadIndex {
var indexFile *os.File
if v.readOnly {
glog.V(4).Infoln("opening file", fileName+".idx")
if indexFile, e = os.Open(fileName + ".idx"); e != nil && !os.IsNotExist(e) {
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e)
return fmt.Errorf("cannot open index file %s.idx: %s", fileName, e.Error())
}
if indexFile != nil {
log.Printf("converting %s.idx to %s.cdb", fileName, fileName)
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
log.Printf("error converting %s.idx to %s.cdb: %s", fileName, fileName)
} else {
indexFile.Close()
os.Remove(indexFile.Name())
indexFile = nil
glog.V(4).Infoln("check file", fileName+".cdb")
if _, err := os.Stat(fileName + ".cdb"); os.IsNotExist(err) {
glog.V(0).Infof("converting %s.idx to %s.cdb", fileName, fileName)
if e = ConvertIndexToCdb(fileName+".cdb", indexFile); e != nil {
glog.V(0).Infof("error converting %s.idx to %s.cdb: %s", fileName, e.Error())
} else {
indexFile.Close()
indexFile = nil
}
}
}
v.nm, e = OpenCdbMap(fileName + ".cdb")
return e
} else {
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
if e != nil {
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e)
glog.V(4).Infoln("open file", fileName+".cdb")
if v.nm, e = OpenCdbMap(fileName + ".cdb"); e != nil {
if os.IsNotExist(e) {
glog.V(0).Infof("Failed to read cdb file :%s, fall back to normal readonly mode.", fileName)
} else {
glog.V(0).Infof("%s.cdb open errro:%s", fileName, e.Error())
return e
}
}
}
glog.V(4).Infoln("open to write file", fileName+".idx")
indexFile, e = os.OpenFile(fileName+".idx", os.O_RDWR|os.O_CREATE, 0644)
if e != nil {
return fmt.Errorf("cannot create Volume Data %s.dat: %s", fileName, e.Error())
}
glog.V(4).Infoln("loading file", fileName+".idx")
v.nm, e = LoadNeedleMap(indexFile)
glog.V(4).Infoln("loading error:", e)
}
return e
}
@@ -108,7 +120,7 @@ func (v *Volume) Size() int64 {
if e == nil {
return stat.Size()
}
log.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
glog.V(0).Infof("Failed to read file size %s %s", v.dataFile.Name(), e.Error())
return -1
}
func (v *Volume) Close() {
@@ -120,7 +132,7 @@ func (v *Volume) Close() {
func (v *Volume) maybeWriteSuperBlock() error {
stat, e := v.dataFile.Stat()
if e != nil {
log.Printf("failed to stat datafile %s: %s", v.dataFile, e)
glog.V(0).Infof("failed to stat datafile %s: %s", v.dataFile, e.Error())
return e
}
if stat.Size() == 0 {
@@ -221,10 +233,10 @@ func (v *Volume) delete(n *Needle) (uint32, error) {
//fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size)
if ok {
size := nv.Size
if err:= v.nm.Delete(n.Id); err != nil {
if err := v.nm.Delete(n.Id); err != nil {
return size, err
}
if _, err:= v.dataFile.Seek(0, 2); err != nil {
if _, err := v.dataFile.Seek(0, 2); err != nil {
return size, err
}
n.Data = make([]byte, 0)
@@ -286,7 +298,7 @@ func (v *Volume) freeze() error {
defer v.accessLock.Unlock()
bn, _ := nakeFilename(v.dataFile.Name())
cdbFn := bn + ".cdb"
log.Printf("converting %s to %s", nm.indexFile.Name(), cdbFn)
glog.V(0).Infof("converting %s to %s", nm.indexFile.Name(), cdbFn)
err := DumpNeedleMapToCdb(cdbFn, nm)
if err != nil {
return err
@@ -304,7 +316,7 @@ func ScanVolumeFile(dirname string, id VolumeId,
visitSuperBlock func(SuperBlock) error,
visitNeedle func(n *Needle, offset uint32) error) (err error) {
var v *Volume
if v, err = LoadVolumeOnly(dirname, id); err != nil {
if v, err = loadVolumeWithoutIndex(dirname, id); err != nil {
return
}
if err = visitSuperBlock(v.SuperBlock); err != nil {
@@ -361,7 +373,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
return err
}, func(n *Needle, offset uint32) error {
nv, ok := v.nm.Get(n.Id)
//log.Println("file size is", n.Size, "rest", rest)
//glog.V(0).Infoln("file size is", n.Size, "rest", rest)
if ok && nv.Offset*NeedlePaddingSize == offset {
if nv.Size > 0 {
if _, err = nm.Put(n.Id, new_offset/NeedlePaddingSize, n.Size); err != nil {
@@ -371,7 +383,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro
return fmt.Errorf("cannot append needle: %s", err)
}
new_offset += n.DiskSize()
//log.Println("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
//glog.V(0).Infoln("saving key", n.Id, "volume offset", old_offset, "=>", new_offset, "data_size", n.Size, "rest", rest)
}
}
return nil