* test: restore maintenance mode coverage in TestVolumeMarkReadonlyWritableErrorPaths PR #8360 removed the maintenance mode assertions because the refactored check ordering (volume lookup before maintenance check) caused the original test to hit "not found" instead of "maintenance mode" — the test used a non-existent volume ID. Restore coverage by allocating a real volume, then verifying: - existing volume in maintenance mode returns "maintenance mode" - non-existent volume in maintenance mode still returns "not found" (validating the new check ordering) * test: add coverage for ScrubVolume MarkBrokenVolumesReadonly flag PR #8360 added the mark_broken_volumes_readonly field to ScrubVolumeRequest but no tests exercised the new logic paths. Add three integration tests: - HealthyVolume: flag is a no-op when scrub finds no broken volumes - CorruptVolume: corrupted .idx triggers broken detection; without the flag the volume stays writable, with the flag it becomes read-only - MaintenanceMode: makeVolumeReadonly fails under maintenance and ScrubVolume propagates the error via errors.Join * refactor: extract CorruptIndexFile and EnableMaintenanceMode test helpers Move duplicated idx corruption and maintenance mode setup into framework.CorruptIndexFile() and framework.EnableMaintenanceMode() helpers. Use defer for file close in the corruption helper.
184 lines
6.5 KiB
Go
184 lines
6.5 KiB
Go
package volume_server_grpc_test
|
|
|
|
import (
|
|
"context"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
|
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
)
|
|
|
|
func TestVolumeMarkReadonlyAndWritableLifecycle(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(72)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
|
|
VolumeId: volumeID,
|
|
Persist: false,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeMarkReadonly failed: %v", err)
|
|
}
|
|
|
|
readOnlyStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeStatus after readonly failed: %v", err)
|
|
}
|
|
if !readOnlyStatus.GetIsReadOnly() {
|
|
t.Fatalf("VolumeStatus expected readonly=true after VolumeMarkReadonly")
|
|
}
|
|
|
|
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeMarkWritable failed: %v", err)
|
|
}
|
|
|
|
writableStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeStatus after writable failed: %v", err)
|
|
}
|
|
if writableStatus.GetIsReadOnly() {
|
|
t.Fatalf("VolumeStatus expected readonly=false after VolumeMarkWritable")
|
|
}
|
|
}
|
|
|
|
func TestVolumeMarkReadonlyPersistTrue(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(74)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
|
|
VolumeId: volumeID,
|
|
Persist: true,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeMarkReadonly persist=true failed: %v", err)
|
|
}
|
|
|
|
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeStatus after persist readonly failed: %v", err)
|
|
}
|
|
if !statusResp.GetIsReadOnly() {
|
|
t.Fatalf("VolumeStatus expected readonly=true after persist readonly")
|
|
}
|
|
}
|
|
|
|
func TestVolumeMarkReadonlyWritableErrorPaths(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(75)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
// non-existent volumes should return "not found"
|
|
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 98771, Persist: true})
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
t.Fatalf("VolumeMarkReadonly missing-volume error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 98772})
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
t.Fatalf("VolumeMarkWritable missing-volume error mismatch: %v", err)
|
|
}
|
|
|
|
// enter maintenance mode
|
|
framework.EnableMaintenanceMode(t, ctx, grpcClient)
|
|
|
|
// existing volume in maintenance mode should return "maintenance mode" error
|
|
_, err = grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: volumeID, Persist: true})
|
|
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
|
t.Fatalf("VolumeMarkReadonly maintenance error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: volumeID})
|
|
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
|
t.Fatalf("VolumeMarkWritable maintenance error mismatch: %v", err)
|
|
}
|
|
|
|
// non-existent volume in maintenance mode should still return "not found"
|
|
// (volume lookup happens before maintenance check)
|
|
_, err = grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 98773, Persist: true})
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
t.Fatalf("VolumeMarkReadonly missing-volume in maintenance error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 98774})
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
t.Fatalf("VolumeMarkWritable missing-volume in maintenance error mismatch: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestDeleteCollectionRemovesVolumeAndIsIdempotent(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(73)
|
|
const collection = "it-delete-collection"
|
|
|
|
framework.AllocateVolume(t, grpcClient, volumeID, collection)
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeStatus before DeleteCollection failed: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
|
|
if err != nil {
|
|
t.Fatalf("DeleteCollection existing collection failed: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err == nil {
|
|
t.Fatalf("VolumeStatus should fail after collection delete")
|
|
}
|
|
if !strings.Contains(err.Error(), "not found volume") {
|
|
t.Fatalf("VolumeStatus after DeleteCollection error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
|
|
if err != nil {
|
|
t.Fatalf("DeleteCollection idempotent retry failed: %v", err)
|
|
}
|
|
}
|