* Fix: prevent panic when swap file creation fails * weed mount: fix race condition in swap file initialization Ensure thread-safe access to sf.file and other state in NewSwapFileChunk and FreeResource by using sf.chunkTrackingLock consistently. Also set sf.file to nil after closing to prevent reuse. * weed mount: improve swap directory creation logic - Check error for os.MkdirAll and log it if it fails. - Use 0700 permissions for the swap directory for better security. - Improve error logging context. * weed mount: add unit tests for swap file creation Add tests to verify: - Concurrent initialization of the swap file. - Correct directory permissions (0700). - Automatic directory recreation if deleted. * weed mount: fix thread-safety in swap file unit tests Use atomic.Uint32 to track failures within goroutines in TestSwapFile_NewSwapFileChunk_Concurrent to avoid unsafe calls to t.Errorf from multiple goroutines. * weed mount: simplify swap file creation logic Refactor the directory check and retry logic for better readability and to avoid re-using the main error variable for directory creation errors. Remove redundant error logging. * weed mount: improve error checking in swap file tests Explicitly check if NewSwapFileChunk returns nil to provide more informative failures. * weed mount: update DirtyPages interface to return error Propagate errors from SaveDataAt when swap file creation fails. This prevents potential panics in the write path. * weed mount: handle AddPage errors in write paths Update ChunkedDirtyPages and PageWriter to propagate errors and update WFS.Write and WFS.CopyFileRange to return fuse.EIO on failure. * weed mount: update swap directory creation error message Change "recreate" to "create/recreate" to better reflect that this path is also taken during the initial creation of the swap directory. --------- Co-authored-by: lixiang58 <lixiang58@lenovo.com> Co-authored-by: Chris Lu <chris.lu@gmail.com>
96 lines
2.6 KiB
Go
96 lines
2.6 KiB
Go
package mount
|
|
|
|
import (
|
|
"fmt"
|
|
"io"
|
|
"sync"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
"github.com/seaweedfs/seaweedfs/weed/mount/page_writer"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
|
)
|
|
|
|
type ChunkedDirtyPages struct {
|
|
fh *FileHandle
|
|
writeWaitGroup sync.WaitGroup
|
|
lastErr error
|
|
collection string
|
|
replication string
|
|
uploadPipeline *page_writer.UploadPipeline
|
|
hasWrites bool
|
|
}
|
|
|
|
var (
|
|
_ = page_writer.DirtyPages(&ChunkedDirtyPages{})
|
|
)
|
|
|
|
func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages {
|
|
|
|
dirtyPages := &ChunkedDirtyPages{
|
|
fh: fh,
|
|
}
|
|
|
|
swapFileDir := fh.wfs.option.getUniqueCacheDirForWrite()
|
|
|
|
dirtyPages.uploadPipeline = page_writer.NewUploadPipeline(fh.wfs.concurrentWriters, chunkSize,
|
|
dirtyPages.saveChunkedFileIntervalToStorage, fh.wfs.option.ConcurrentWriters, swapFileDir)
|
|
|
|
return dirtyPages
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) error {
|
|
pages.hasWrites = true
|
|
|
|
glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data)))
|
|
_, err := pages.uploadPipeline.SaveDataAt(data, offset, isSequential, tsNs)
|
|
|
|
return err
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) FlushData() error {
|
|
if !pages.hasWrites {
|
|
return nil
|
|
}
|
|
pages.uploadPipeline.FlushAll()
|
|
if pages.lastErr != nil {
|
|
return fmt.Errorf("flush data: %v", pages.lastErr)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64, tsNs int64) (maxStop int64) {
|
|
if !pages.hasWrites {
|
|
return
|
|
}
|
|
return pages.uploadPipeline.MaybeReadDataAt(data, startOffset, tsNs)
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reader, offset int64, size int64, modifiedTsNs int64, cleanupFn func()) {
|
|
|
|
defer cleanupFn()
|
|
|
|
fileFullPath := pages.fh.FullPath()
|
|
fileName := fileFullPath.Name()
|
|
chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset, modifiedTsNs)
|
|
if err != nil {
|
|
glog.V(0).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err)
|
|
pages.lastErr = err
|
|
return
|
|
}
|
|
pages.fh.AddChunks([]*filer_pb.FileChunk{chunk})
|
|
pages.fh.entryChunkGroup.AddChunk(chunk)
|
|
glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size)
|
|
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) Destroy() {
|
|
pages.uploadPipeline.Shutdown()
|
|
}
|
|
|
|
func (pages *ChunkedDirtyPages) LockForRead(startOffset, stopOffset int64) {
|
|
pages.uploadPipeline.LockForRead(startOffset, stopOffset)
|
|
}
|
|
func (pages *ChunkedDirtyPages) UnlockForRead(startOffset, stopOffset int64) {
|
|
pages.uploadPipeline.UnlockForRead(startOffset, stopOffset)
|
|
}
|