cp file can work

1. consolidate to filer_pb.FileChunk
2. dir add file, mkdir
3. file flush, write

updates having issue
This commit is contained in:
Chris Lu
2018-05-16 00:08:44 -07:00
parent c7a71d35b0
commit b303a02461
14 changed files with 619 additions and 102 deletions

View File

@@ -9,6 +9,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"time"
"os"
)
func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) {
@@ -21,16 +23,11 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L
return nil, fmt.Errorf("%s not found under %s", req.Name, req.Directory)
}
var fileId string
if !entry.IsDirectory() && len(entry.Chunks) > 0 {
fileId = string(entry.Chunks[0].Fid)
}
return &filer_pb.LookupDirectoryEntryResponse{
Entry: &filer_pb.Entry{
Name: req.Name,
IsDirectory: entry.IsDirectory(),
FileId: fileId,
Chunks: entry.Chunks,
},
}, nil
}
@@ -44,16 +41,12 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie
resp := &filer_pb.ListEntriesResponse{}
for _, entry := range entries {
var fileId string
if !entry.IsDirectory() && len(entry.Chunks) > 0 {
fileId = string(entry.Chunks[0].Fid)
}
glog.V(0).Infof("%s attr=%v size=%d", entry.Name(), entry.Attr, filer2.Chunks(entry.Chunks).TotalSize())
resp.Entries = append(resp.Entries, &filer_pb.Entry{
Name: entry.Name(),
IsDirectory: entry.IsDirectory(),
FileId: fileId,
Chunks: entry.Chunks,
Attributes: &filer_pb.FuseAttributes{
FileSize: filer2.Chunks(entry.Chunks).TotalSize(),
Mtime: entry.Mtime.Unix(),
@@ -106,15 +99,63 @@ func (fs *FilerServer) GetFileContent(ctx context.Context, req *filer_pb.GetFile
}, nil
}
func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) {
err = fs.filer.CreateEntry(&filer2.Entry{
FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
Attr: filer2.Attr{
Mtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Crtime: time.Unix(req.Entry.Attributes.Mtime, 0),
Mode: os.FileMode(req.Entry.Attributes.FileMode),
Uid: req.Entry.Attributes.Uid,
Gid: req.Entry.Attributes.Gid,
},
})
if err == nil {
}
return &filer_pb.CreateEntryResponse{}, err
}
func (fs *FilerServer) AppendFileChunks(ctx context.Context, req *filer_pb.AppendFileChunksRequest) (*filer_pb.AppendFileChunksResponse, error) {
err := fs.filer.AppendFileChunk(
filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)),
req.Entry.Chunks,
)
return &filer_pb.AppendFileChunksResponse{}, err
}
func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) {
entry, err := fs.filer.DeleteEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name)))
if err == nil {
for _, chunk := range entry.Chunks {
fid := string(chunk.Fid)
if err = operation.DeleteFile(fs.getMasterNode(), fid, fs.jwt(fid)); err != nil {
glog.V(0).Infof("deleting file %s: %v", fid, err)
if err = operation.DeleteFile(fs.getMasterNode(), chunk.FileId, fs.jwt(chunk.FileId)); err != nil {
glog.V(0).Infof("deleting file %s: %v", chunk.FileId, err)
}
}
}
return &filer_pb.DeleteEntryResponse{}, err
}
func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) {
assignResult, err := operation.Assign(fs.master, &operation.VolumeAssignRequest{
Count: uint64(req.Count),
Replication: req.Replication,
Collection: req.Collection,
})
if err != nil {
return nil, fmt.Errorf("assign volume: %v", err)
}
if assignResult.Error != "" {
return nil, fmt.Errorf("assign volume result: %v", assignResult.Error)
}
return &filer_pb.AssignVolumeResponse{
FileId: assignResult.Fid,
Count: int32(assignResult.Count),
Url: assignResult.Url,
PublicUrl: assignResult.PublicUrl,
}, err
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/filer2"
"strconv"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
@@ -22,9 +23,9 @@ func (fs *FilerServer) registerHandler(w http.ResponseWriter, r *http.Request) {
Attr: filer2.Attr{
Mode: 0660,
},
Chunks: []filer2.FileChunk{{
Fid: filer2.FileId(fileId),
Size: fileSize,
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
Size: fileSize,
}},
}
err = fs.filer.CreateEntry(entry)

View File

@@ -98,7 +98,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
}
// FIXME pick the right fid
fileId := string(entry.Chunks[0].Fid)
fileId := entry.Chunks[0].FileId
urlLocation, err := operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil {

View File

@@ -23,6 +23,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
type FilerPostResult struct {
@@ -80,7 +81,7 @@ func (fs *FilerServer) queryFileInfoByPath(w http.ResponseWriter, r *http.Reques
glog.V(0).Infoln("failing to find path in filer store", path, err.Error())
writeJsonError(w, r, http.StatusInternalServerError, err)
} else if found {
fileId = string(entry.Chunks[0].Fid)
fileId = entry.Chunks[0].FileId
urlLocation, err = operation.LookupFileId(fs.getMasterNode(), fileId)
if err != nil {
glog.V(1).Infoln("operation LookupFileId %s failed, err is %s", fileId, err.Error())
@@ -318,7 +319,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
// also delete the old fid unless PUT operation
if r.Method != "PUT" {
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); err == nil && found {
oldFid := string(entry.Chunks[0].Fid)
oldFid := entry.Chunks[0].FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
} else if err != nil && err != filer.ErrNotFound {
glog.V(0).Infof("error %v occur when finding %s in filer store", err, path)
@@ -331,9 +332,9 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
Attr: filer2.Attr{
Mode: 0660,
},
Chunks: []filer2.FileChunk{{
Fid: filer2.FileId(fileId),
Size: uint64(r.ContentLength),
Chunks: []*filer_pb.FileChunk{{
FileId: fileId,
Size: uint64(r.ContentLength),
}},
}
if db_err := fs.filer.CreateEntry(entry); db_err != nil {
@@ -415,7 +416,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
fileName = path.Base(fileName)
}
var fileChunks []filer2.FileChunk
var fileChunks []*filer_pb.FileChunk
totalBytesRead := int64(0)
tmpBufferSize := int32(1024 * 1024)
@@ -455,8 +456,8 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
// Save to chunk manifest structure
fileChunks = append(fileChunks,
filer2.FileChunk{
Fid: filer2.FileId(fileId),
&filer_pb.FileChunk{
FileId: fileId,
Offset: chunkOffset,
Size: uint64(chunkBufOffset),
},
@@ -483,7 +484,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte
if r.Method != "PUT" {
if found, entry, err := fs.filer.FindEntry(filer2.FullPath(path)); found && err == nil {
for _, chunk := range entry.Chunks {
oldFid := string(chunk.Fid)
oldFid := chunk.FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
}
} else if err != nil {
@@ -535,7 +536,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
if entry != nil && !entry.IsDirectory() {
for _, chunk := range entry.Chunks {
oldFid := string(chunk.Fid)
oldFid := chunk.FileId
operation.DeleteFile(fs.getMasterNode(), oldFid, fs.jwt(oldFid))
}
}