refactor filer_pb.Entry and filer.Entry to use GetChunks()
for later locking on reading chunks
This commit is contained in:
@@ -158,7 +158,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
model := &Model{
|
||||
@@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
model := &Model{
|
||||
|
||||
@@ -100,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ type Entry struct {
|
||||
}
|
||||
|
||||
func (entry *Entry) Size() uint64 {
|
||||
return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content)))
|
||||
return maxUint64(maxUint64(TotalSize(entry.GetChunks()), entry.FileSize), uint64(len(entry.Content)))
|
||||
}
|
||||
|
||||
func (entry *Entry) Timestamp() time.Time {
|
||||
@@ -91,7 +91,7 @@ func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) {
|
||||
}
|
||||
message.IsDirectory = entry.IsDirectory()
|
||||
message.Attributes = EntryAttributeToPb(entry)
|
||||
message.Chunks = entry.Chunks
|
||||
message.Chunks = entry.GetChunks()
|
||||
message.Extended = entry.Extended
|
||||
message.HardLinkId = entry.HardLinkId
|
||||
message.HardLinkCounter = entry.HardLinkCounter
|
||||
@@ -123,6 +123,10 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) GetChunks() []*filer_pb.FileChunk {
|
||||
return entry.Chunks
|
||||
}
|
||||
|
||||
func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
|
||||
t := &Entry{}
|
||||
t.FullPath = util.NewFullPath(dir, entry.Name)
|
||||
|
||||
@@ -82,7 +82,7 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = weed_util.MaybeGzipData(meta)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,19 +31,19 @@ func FileSize(entry *filer_pb.Entry) (size uint64) {
|
||||
fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
|
||||
}
|
||||
}
|
||||
return maxUint64(TotalSize(entry.Chunks), fileSize)
|
||||
return maxUint64(TotalSize(entry.GetChunks()), fileSize)
|
||||
}
|
||||
|
||||
func ETag(entry *filer_pb.Entry) (etag string) {
|
||||
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
|
||||
return ETagChunks(entry.Chunks)
|
||||
return ETagChunks(entry.GetChunks())
|
||||
}
|
||||
return fmt.Sprintf("%x", entry.Attributes.Md5)
|
||||
}
|
||||
|
||||
func ETagEntry(entry *Entry) (etag string) {
|
||||
if entry.Attr.Md5 == nil {
|
||||
return ETagChunks(entry.Chunks)
|
||||
return ETagChunks(entry.GetChunks())
|
||||
}
|
||||
return fmt.Sprintf("%x", entry.Attr.Md5)
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
|
||||
return fc.LoadFromBytes(entry.Content)
|
||||
}
|
||||
|
||||
return fc.loadFromChunks(filer, entry.Content, entry.Chunks, entry.Size())
|
||||
return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size())
|
||||
}
|
||||
|
||||
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
||||
}
|
||||
|
||||
if shouldDeleteChunks && !isDeleteCollection {
|
||||
f.DirectDeleteChunks(entry.Chunks)
|
||||
f.DirectDeleteChunks(entry.GetChunks())
|
||||
}
|
||||
|
||||
// delete the file or folder
|
||||
@@ -93,7 +93,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||
// hard link chunk data are deleted separately
|
||||
err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId})
|
||||
} else {
|
||||
err = onChunksFn(sub.Chunks)
|
||||
err = onChunksFn(sub.GetChunks())
|
||||
}
|
||||
}
|
||||
if err != nil && !ignoreRecursiveError {
|
||||
|
||||
@@ -143,17 +143,17 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
|
||||
return
|
||||
}
|
||||
if newEntry == nil {
|
||||
f.DeleteChunks(oldEntry.Chunks)
|
||||
f.DeleteChunks(oldEntry.GetChunks())
|
||||
return
|
||||
}
|
||||
|
||||
var toDelete []*filer_pb.FileChunk
|
||||
newChunkIds := make(map[string]bool)
|
||||
newDataChunks, newManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
|
||||
newEntry.Chunks, 0, math.MaxInt64)
|
||||
newEntry.GetChunks(), 0, math.MaxInt64)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to resolve new entry chunks when delete old entry chunks. new: %s, old: %s",
|
||||
newEntry.Chunks, oldEntry.Chunks)
|
||||
newEntry.GetChunks(), oldEntry.Chunks)
|
||||
return
|
||||
}
|
||||
for _, newChunk := range newDataChunks {
|
||||
@@ -164,10 +164,10 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
|
||||
}
|
||||
|
||||
oldDataChunks, oldManifestChunks, err := ResolveChunkManifest(f.MasterClient.GetLookupFileIdFunction(),
|
||||
oldEntry.Chunks, 0, math.MaxInt64)
|
||||
oldEntry.GetChunks(), 0, math.MaxInt64)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s",
|
||||
newEntry.Chunks, oldEntry.Chunks)
|
||||
newEntry.GetChunks(), oldEntry.GetChunks())
|
||||
return
|
||||
}
|
||||
for _, oldChunk := range oldDataChunks {
|
||||
|
||||
@@ -153,7 +153,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, each
|
||||
}
|
||||
}
|
||||
// println("processing", hourMinuteEntry.FullPath)
|
||||
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
|
||||
chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
|
||||
if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
|
||||
chunkedFileReader.Close()
|
||||
if err == io.EOF {
|
||||
|
||||
@@ -36,11 +36,11 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("find %s: %v", fullpath, err)
|
||||
} else {
|
||||
offset = int64(TotalSize(entry.Chunks))
|
||||
offset = int64(TotalSize(entry.GetChunks()))
|
||||
}
|
||||
|
||||
// append to existing chunks
|
||||
entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset))
|
||||
entry.Chunks = append(entry.GetChunks(), uploadResult.ToPbFileChunk(assignResult.Fid, offset))
|
||||
|
||||
// update the entry
|
||||
err = f.CreateEntry(context.Background(), entry, false, false, nil, false)
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestProtoMarshal(t *testing.T) {
|
||||
notification2 := &filer_pb.EventNotification{}
|
||||
proto.Unmarshal(text, notification2)
|
||||
|
||||
if notification2.OldEntry.Chunks[0].SourceFileId != notification.OldEntry.Chunks[0].SourceFileId {
|
||||
if notification2.OldEntry.GetChunks()[0].SourceFileId != notification.OldEntry.GetChunks()[0].SourceFileId {
|
||||
t.Fatalf("marshal/unmarshal error: %s", text)
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk, size uint64) ([]byte, er
|
||||
|
||||
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
|
||||
fc := NewFilerConf()
|
||||
err := fc.loadFromChunks(f, entry.Content, entry.Chunks, FileSize(entry))
|
||||
err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry))
|
||||
if err != nil {
|
||||
glog.Errorf("read filer conf chunks: %v", err)
|
||||
return
|
||||
|
||||
@@ -118,7 +118,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err
|
||||
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
filer_pb.BeforeEntrySerialization(entry.Chunks)
|
||||
filer_pb.BeforeEntrySerialization(entry.GetChunks())
|
||||
if entry.Mime == "application/octet-stream" {
|
||||
entry.Mime = ""
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err
|
||||
stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
filer_pb.BeforeEntrySerialization(entry.Chunks)
|
||||
filer_pb.BeforeEntrySerialization(entry.GetChunks())
|
||||
if entry.Mime == "application/octet-stream" {
|
||||
entry.Mime = ""
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
|
||||
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
|
||||
filer_pb.AfterEntryDeserialization(entry.Chunks)
|
||||
filer_pb.AfterEntryDeserialization(entry.GetChunks())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -239,7 +239,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath
|
||||
// glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit)
|
||||
return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.Chunks)
|
||||
filer_pb.AfterEntryDeserialization(entry.GetChunks())
|
||||
return eachEntryFunc(entry)
|
||||
})
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||
// glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit)
|
||||
adjustedEntryFunc := func(entry *Entry) bool {
|
||||
fsw.maybeReadHardLink(ctx, entry)
|
||||
filer_pb.AfterEntryDeserialization(entry.Chunks)
|
||||
filer_pb.AfterEntryDeserialization(entry.GetChunks())
|
||||
return eachEntryFunc(entry)
|
||||
}
|
||||
lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, adjustedEntryFunc)
|
||||
|
||||
@@ -75,7 +75,7 @@ func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) er
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = weed_util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = weed_util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -129,7 +129,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
|
||||
}
|
||||
|
||||
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
|
||||
@@ -185,7 +185,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = weed_util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
@@ -195,7 +195,7 @@ func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.Fu
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
@@ -336,7 +336,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||
FullPath: weed_util.NewFullPath(string(dirPath), fileName),
|
||||
}
|
||||
|
||||
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
|
||||
@@ -107,7 +107,7 @@ func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func (entry *Entry) IsInRemoteOnly() bool {
|
||||
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
|
||||
return len(entry.GetChunks()) == 0 && entry.Remote != nil && entry.Remote.RemoteSize > 0
|
||||
}
|
||||
|
||||
func MapFullPathToRemoteStorageLocation(localMountedDir util.FullPath, remoteMountedLocation *remote_pb.RemoteStorageLocation, fp util.FullPath) *remote_pb.RemoteStorageLocation {
|
||||
|
||||
@@ -23,7 +23,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
|
||||
return err
|
||||
}
|
||||
|
||||
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, int64(FileSize(respLookupEntry.Entry)))
|
||||
return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.GetChunks(), 0, int64(FileSize(respLookupEntry.Entry)))
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ func (store *UniversalRedisStore) doInsertEntry(ctx context.Context, entry *file
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ func (store *UniversalRedis2Store) doInsertEntry(ctx context.Context, entry *fil
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ func (store *UniversalRedis3Store) doInsertEntry(ctx context.Context, entry *fil
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ func (store *UniversalRedisLuaStore) InsertEntry(ctx context.Context, entry *fil
|
||||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
value = util.MaybeGzipData(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("saved", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
||||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
// println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data))
|
||||
// println("read", entry.FullPath, "chunks", len(entry.GetChunks()), "data", len(data), string(data))
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||
}
|
||||
lastFileName = fileName
|
||||
|
||||
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
|
||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
|
||||
@@ -30,7 +30,7 @@ func HasData(entry *filer_pb.Entry) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
return len(entry.Chunks) > 0
|
||||
return len(entry.GetChunks()) > 0
|
||||
}
|
||||
|
||||
func IsSameData(a, b *filer_pb.Entry) bool {
|
||||
@@ -64,7 +64,7 @@ func NewFileReader(filerClient filer_pb.FilerClient, entry *filer_pb.Entry) io.R
|
||||
if len(entry.Content) > 0 {
|
||||
return bytes.NewReader(entry.Content)
|
||||
}
|
||||
return NewChunkStreamReader(filerClient, entry.Chunks)
|
||||
return NewChunkStreamReader(filerClient, entry.GetChunks())
|
||||
}
|
||||
|
||||
func StreamContent(masterClient wdclient.HasLookupFileIdFunction, writer io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
|
||||
|
||||
@@ -144,7 +144,7 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent
|
||||
return fmt.Errorf("encode %s: %s", entry.FullPath, err)
|
||||
}
|
||||
|
||||
if len(entry.Chunks) > filer.CountEntryChunksForGzip {
|
||||
if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
|
||||
meta = util.MaybeGzipData(meta)
|
||||
}
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
|
||||
Reference in New Issue
Block a user