Add volume server integration test suite and CI workflow (#8322)
* docs(volume_server): add integration test development plan * test(volume_server): add integration harness and profile matrix * test(volume_server/http): add admin and options integration coverage * test(volume_server/grpc): add state and status integration coverage * test(volume_server): auto-build weed binary and harden cluster startup * test(volume_server/http): add upload read range head delete coverage * test(volume_server/grpc): expand admin lifecycle and state coverage * docs(volume_server): update progress tracker for implemented tests * test(volume_server/http): cover if-none-match and invalid-range branches * test(volume_server/grpc): add batch delete integration coverage * docs(volume_server): log latest HTTP and gRPC test coverage * ci(volume_server): run volume server integration tests in github actions * test(volume_server/grpc): add needle status configure ping and leave coverage * docs(volume_server): record additional grpc coverage progress * test(volume_server/grpc): add vacuum integration coverage * docs(volume_server): record vacuum test coverage progress * test(volume_server/grpc): add read and write needle blob error-path coverage * docs(volume_server): record data rw grpc coverage progress * test(volume_server/http): add jwt auth integration coverage * test(volume_server/grpc): add sync copy and stream error-path coverage * docs(volume_server): record jwt and sync/copy test coverage * test(volume_server/grpc): add scrub and query integration coverage * test(volume_server/grpc): add volume tail sender and receiver coverage * docs(volume_server): record scrub query and tail test progress * test(volume_server/grpc): add readonly writable and collection lifecycle coverage * test(volume_server/http): add public-port cors and method parity coverage * test(volume_server/grpc): add blob meta and read-all success path coverage * test(volume_server/grpc): expand scrub and query variation coverage * test(volume_server/grpc): add tiering and remote fetch error-path coverage * test(volume_server/http): add unchanged write and delete edge-case coverage * test(volume_server/grpc): add ping unknown and unreachable target coverage * test(volume_server/grpc): add volume delete only-empty variation coverage * test(volume_server/http): add jwt fid-mismatch auth coverage * test(volume_server/grpc): add scrub ec auto-select empty coverage * test(volume_server/grpc): stabilize ping timestamp assertion * docs(volume_server): update integration coverage progress log * test(volume_server/grpc): add tier remote backend and config variation coverage * docs(volume_server): record tier remote variation progress * test(volume_server/grpc): add incremental copy and receive-file protocol coverage * test(volume_server/http): add read path shape and if-modified-since coverage * test(volume_server/grpc): add copy-file compaction and receive-file success coverage * test(volume_server/http): add passthrough headers and static asset coverage * test(volume_server/grpc): add ping filer unreachable coverage * docs(volume_server): record copy receive and http variant progress * test(volume_server/grpc): add erasure coding maintenance and missing-path coverage * docs(volume_server): record initial erasure coding rpc coverage * test(volume_server/http): add multi-range multipart response coverage * docs(volume_server): record multi-range http coverage progress * test(volume_server/grpc): add query empty-stripe no-match coverage * docs(volume_server): record query no-match stream behavior coverage * test(volume_server/http): add upload throttling timeout and replicate bypass coverage * docs(volume_server): record upload throttling coverage progress * test(volume_server/http): add download throttling timeout coverage * docs(volume_server): record download throttling coverage progress * test(volume_server/http): add jwt wrong-cookie fid mismatch coverage * docs(volume_server): record jwt wrong-cookie mismatch coverage * test(volume_server/http): add jwt expired-token rejection coverage * docs(volume_server): record jwt expired-token coverage * test(volume_server/http): add jwt query and cookie transport coverage * docs(volume_server): record jwt token transport coverage * test(volume_server/http): add jwt token-source precedence coverage * docs(volume_server): record jwt token-source precedence coverage * test(volume_server/http): add jwt header-over-cookie precedence coverage * docs(volume_server): record jwt header cookie precedence coverage * test(volume_server/http): add jwt query-over-cookie precedence coverage * docs(volume_server): record jwt query cookie precedence coverage * test(volume_server/grpc): add setstate version mismatch and nil-state coverage * docs(volume_server): record setstate validation coverage * test(volume_server/grpc): add readonly persist-true lifecycle coverage * docs(volume_server): record readonly persist variation coverage * test(volume_server/http): add options origin cors header coverage * docs(volume_server): record options origin cors coverage * test(volume_server/http): add trace unsupported-method parity coverage * docs(volume_server): record trace method parity coverage * test(volume_server/grpc): add batch delete cookie-check variation coverage * docs(volume_server): record batch delete cookie-check coverage * test(volume_server/grpc): add admin lifecycle missing and maintenance variants * docs(volume_server): record admin lifecycle edge-case coverage * test(volume_server/grpc): add mixed batch delete status matrix coverage * docs(volume_server): record mixed batch delete matrix coverage * test(volume_server/http): add jwt-profile ui access gating coverage * docs(volume_server): record jwt ui-gating http coverage * test(volume_server/http): add propfind unsupported-method parity coverage * docs(volume_server): record propfind method parity coverage * test(volume_server/grpc): add volume configure success and rollback-path coverage * docs(volume_server): record volume configure branch coverage * test(volume_server/grpc): add volume needle status missing-path coverage * docs(volume_server): record volume needle status error-path coverage * test(volume_server/http): add readDeleted query behavior coverage * docs(volume_server): record readDeleted http behavior coverage * test(volume_server/http): add delete ts override parity coverage * docs(volume_server): record delete ts parity coverage * test(volume_server/grpc): add invalid blob/meta offset coverage * docs(volume_server): record invalid blob/meta offset coverage * test(volume_server/grpc): add read-all mixed volume abort coverage * docs(volume_server): record read-all mixed-volume abort coverage * test(volume_server/http): assert head response body parity * docs(volume_server): record head body parity assertion * test(volume_server/grpc): assert status state and memory payload completeness * docs(volume_server): record volume server status payload coverage * test(volume_server/grpc): add batch delete chunk-manifest rejection coverage * docs(volume_server): record batch delete chunk-manifest coverage * test(volume_server/grpc): add query cookie-mismatch eof parity coverage * docs(volume_server): record query cookie-mismatch parity coverage * test(volume_server/grpc): add ping master success target coverage * docs(volume_server): record ping master success coverage * test(volume_server/http): add head if-none-match conditional parity * docs(volume_server): record head if-none-match parity coverage * test(volume_server/http): add head if-modified-since parity coverage * docs(volume_server): record head if-modified-since parity coverage * test(volume_server/http): add connect unsupported-method parity coverage * docs(volume_server): record connect method parity coverage * test(volume_server/http): assert options allow-headers cors parity * docs(volume_server): record options allow-headers coverage * test(volume_server/framework): add dual volume cluster integration harness * test(volume_server/http): add missing-local read mode proxy redirect local coverage * docs(volume_server): record read mode missing-local matrix coverage * test(volume_server/http): add download over-limit replica proxy fallback coverage * docs(volume_server): record download replica fallback coverage * test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage * docs(volume_server): record missing-local readDeleted mode coverage * test(volume_server/framework): add single-volume cluster with filer harness * test(volume_server/grpc): add ping filer success target coverage * docs(volume_server): record ping filer success coverage * test(volume_server/http): add proxied-loop guard download timeout coverage * docs(volume_server): record proxied-loop download coverage * test(volume_server/http): add disabled upload and download limit coverage * docs(volume_server): record disabled throttling path coverage * test(volume_server/grpc): add idempotent volume server leave coverage * docs(volume_server): record leave idempotence coverage * test(volume_server/http): add redirect collection query preservation coverage * docs(volume_server): record redirect collection query coverage * test(volume_server/http): assert admin server headers on status and health * docs(volume_server): record admin server header coverage * test(volume_server/http): assert healthz request-id echo parity * docs(volume_server): record healthz request-id parity coverage * test(volume_server/http): add over-limit invalid-vid download branch coverage * docs(volume_server): record over-limit invalid-vid branch coverage * test(volume_server/http): add public-port static asset coverage * docs(volume_server): record public static endpoint coverage * test(volume_server/http): add public head method parity coverage * docs(volume_server): record public head parity coverage * test(volume_server/http): add throttling wait-then-proceed path coverage * docs(volume_server): record throttling wait-then-proceed coverage * test(volume_server/http): add read cookie-mismatch not-found coverage * docs(volume_server): record read cookie-mismatch coverage * test(volume_server/http): add throttling timeout-recovery coverage * docs(volume_server): record throttling timeout-recovery coverage * test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage * docs(volume_server): record ec positive lifecycle coverage * test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage * docs(volume_server): record ec shard read/blob delete lifecycle coverage * test(volume_server/grpc): add ec rebuild and to-volume error branch coverage * docs(volume_server): record ec rebuild and to-volume branch coverage * test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage * docs(volume_server): record ec shards-to-volume success coverage * test(volume_server/grpc): add ec receive and copy-file missing-source coverage * docs(volume_server): record ec receive and copy-file coverage * test(volume_server/grpc): add ec last-shard delete cleanup coverage * docs(volume_server): record ec last-shard delete cleanup coverage * test(volume_server/grpc): add volume copy success path coverage * docs(volume_server): record volume copy success coverage * test(volume_server/grpc): add volume copy overwrite-destination coverage * docs(volume_server): record volume copy overwrite coverage * test(volume_server/http): add write error-path variant coverage * docs(volume_server): record http write error-path coverage * test(volume_server/http): add conditional header precedence coverage * docs(volume_server): record conditional header precedence coverage * test(volume_server/http): add oversized combined range guard coverage * docs(volume_server): record oversized range guard coverage * test(volume_server/http): add image resize and crop read coverage * docs(volume_server): record image transform coverage * test(volume_server/http): add chunk-manifest expansion and bypass coverage * docs(volume_server): record chunk-manifest read coverage * test(volume_server/http): add compressed read encoding matrix coverage * docs(volume_server): record compressed read matrix coverage * test(volume_server/grpc): add tail receiver source replication coverage * docs(volume_server): record tail receiver replication coverage * test(volume_server/grpc): add tail sender large-needle chunking coverage * docs(volume_server): record tail sender chunking coverage * test(volume_server/grpc): add ec-backed volume needle status coverage * docs(volume_server): record ec-backed needle status coverage * test(volume_server/grpc): add ec shard copy from peer success coverage * docs(volume_server): record ec shard copy success coverage * test(volume_server/http): add chunk-manifest delete child cleanup coverage * docs(volume_server): record chunk-manifest delete cleanup coverage * test(volume_server/http): add chunk-manifest delete failure-path coverage * docs(volume_server): record chunk-manifest delete failure coverage * test(volume_server/grpc): add ec shard copy source-unavailable coverage * docs(volume_server): record ec shard copy source-unavailable coverage * parallel
This commit is contained in:
445
test/volume_server/grpc/admin_extra_test.go
Normal file
445
test/volume_server/grpc/admin_extra_test.go
Normal file
@@ -0,0 +1,445 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/cluster"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVolumeNeedleStatusForUploadedFile(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(21)
|
||||
const needleID = uint64(778899)
|
||||
const cookie = uint32(0xA1B2C3D4)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
client := framework.NewHTTPClient()
|
||||
payload := []byte("needle-status-payload")
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: needleID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeNeedleStatus failed: %v", err)
|
||||
}
|
||||
if statusResp.GetNeedleId() != needleID {
|
||||
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
|
||||
}
|
||||
if statusResp.GetCookie() != cookie {
|
||||
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
|
||||
}
|
||||
if statusResp.GetSize() == 0 {
|
||||
t.Fatalf("expected non-zero needle size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeNeedleStatusViaEcShardsWhenNormalVolumeUnmounted(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(26)
|
||||
const needleID = uint64(778900)
|
||||
const cookie = uint32(0xA1B2C3D5)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
payload := []byte("needle-status-ec-path-payload")
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
|
||||
VolumeId: volumeID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeUnmount failed: %v", err)
|
||||
}
|
||||
|
||||
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: needleID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeNeedleStatus via EC shards failed: %v", err)
|
||||
}
|
||||
if statusResp.GetNeedleId() != needleID {
|
||||
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
|
||||
}
|
||||
if statusResp.GetCookie() != cookie {
|
||||
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
|
||||
}
|
||||
if statusResp.GetSize() == 0 {
|
||||
t.Fatalf("expected non-zero needle size from EC-backed needle status")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: needleID + 999999,
|
||||
})
|
||||
if err == nil || !strings.Contains(strings.ToLower(err.Error()), "not found") {
|
||||
t.Fatalf("VolumeNeedleStatus via EC shards missing-needle error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeNeedleStatusMissingVolumeAndNeedle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(25)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: 99925,
|
||||
NeedleId: 1,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeNeedleStatus should fail for missing volume")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "volume not found") {
|
||||
t.Fatalf("VolumeNeedleStatus missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: 123456789,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeNeedleStatus should fail for missing needle")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "not found") {
|
||||
t.Fatalf("VolumeNeedleStatus missing-needle error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewRequest(t testing.TB, method, url string) *http.Request {
|
||||
t.Helper()
|
||||
req, err := http.NewRequest(method, url, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("create request %s %s: %v", method, url, err)
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func TestVolumeConfigureInvalidReplication(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(22)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
||||
VolumeId: volumeID,
|
||||
Replication: "bad-replication",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeConfigure returned grpc error: %v", err)
|
||||
}
|
||||
if resp.GetError() == "" {
|
||||
t.Fatalf("VolumeConfigure expected response error for invalid replication")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(resp.GetError()), "replication") {
|
||||
t.Fatalf("VolumeConfigure error should mention replication, got: %q", resp.GetError())
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeConfigureSuccessAndMissingRollbackPath(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(24)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
successResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
||||
VolumeId: volumeID,
|
||||
Replication: "000",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeConfigure success path returned grpc error: %v", err)
|
||||
}
|
||||
if successResp.GetError() != "" {
|
||||
t.Fatalf("VolumeConfigure success path expected empty response error, got: %q", successResp.GetError())
|
||||
}
|
||||
|
||||
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus after successful configure failed: %v", err)
|
||||
}
|
||||
if statusResp.GetIsReadOnly() {
|
||||
t.Fatalf("VolumeStatus after configure expected writable volume")
|
||||
}
|
||||
|
||||
missingResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
||||
VolumeId: 99024,
|
||||
Replication: "000",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeConfigure missing-volume branch should return response error, got grpc error: %v", err)
|
||||
}
|
||||
if missingResp.GetError() == "" {
|
||||
t.Fatalf("VolumeConfigure missing-volume expected non-empty response error")
|
||||
}
|
||||
lower := strings.ToLower(missingResp.GetError())
|
||||
if !strings.Contains(lower, "not found on disk") {
|
||||
t.Fatalf("VolumeConfigure missing-volume error should mention not found on disk, got: %q", missingResp.GetError())
|
||||
}
|
||||
if !strings.Contains(lower, "failed to restore mount") {
|
||||
t.Fatalf("VolumeConfigure missing-volume error should include remount rollback failure, got: %q", missingResp.GetError())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingVolumeTargetAndLeaveAffectsHealthz(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
pingResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: cluster.VolumeServerType,
|
||||
Target: clusterHarness.VolumeServerAddress(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Ping target volume server failed: %v", err)
|
||||
}
|
||||
if pingResp.GetRemoteTimeNs() == 0 {
|
||||
t.Fatalf("expected remote timestamp from ping target volume server")
|
||||
}
|
||||
|
||||
if _, err = grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
||||
t.Fatalf("VolumeServerLeave failed: %v", err)
|
||||
}
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
|
||||
deadline := time.Now().Add(5 * time.Second)
|
||||
for {
|
||||
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
|
||||
_ = framework.ReadAllAndClose(t, resp)
|
||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||
return
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("expected healthz to return 503 after leave, got %d", resp.StatusCode)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeServerLeaveIsIdempotent(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
||||
t.Fatalf("first VolumeServerLeave failed: %v", err)
|
||||
}
|
||||
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
||||
t.Fatalf("second VolumeServerLeave should be idempotent success, got: %v", err)
|
||||
}
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
|
||||
deadline := time.Now().Add(5 * time.Second)
|
||||
for {
|
||||
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
|
||||
_ = framework.ReadAllAndClose(t, resp)
|
||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
||||
return
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
t.Fatalf("expected healthz to stay 503 after repeated leave, got %d", resp.StatusCode)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingUnknownAndUnreachableTargetPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
unknownResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: "unknown-type",
|
||||
Target: "127.0.0.1:12345",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Ping unknown target type should not return grpc error, got: %v", err)
|
||||
}
|
||||
if unknownResp.GetRemoteTimeNs() != 0 {
|
||||
t.Fatalf("Ping unknown target type expected remote_time_ns=0, got %d", unknownResp.GetRemoteTimeNs())
|
||||
}
|
||||
if unknownResp.GetStopTimeNs() < unknownResp.GetStartTimeNs() {
|
||||
t.Fatalf("Ping unknown target type expected stop_time_ns >= start_time_ns")
|
||||
}
|
||||
|
||||
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: cluster.MasterType,
|
||||
Target: "127.0.0.1:1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Ping master target should fail when target is unreachable")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "ping master") {
|
||||
t.Fatalf("Ping master unreachable error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: cluster.FilerType,
|
||||
Target: "127.0.0.1:1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Ping filer target should fail when target is unreachable")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "ping filer") {
|
||||
t.Fatalf("Ping filer unreachable error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingMasterTargetSuccess(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: cluster.MasterType,
|
||||
Target: clusterHarness.MasterAddress(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Ping master target success path failed: %v", err)
|
||||
}
|
||||
if resp.GetRemoteTimeNs() == 0 {
|
||||
t.Fatalf("Ping master target expected non-zero remote time")
|
||||
}
|
||||
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
|
||||
t.Fatalf("Ping master target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingFilerTargetSuccess(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeClusterWithFiler(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
||||
TargetType: cluster.FilerType,
|
||||
Target: clusterHarness.FilerServerAddress(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Ping filer target success path failed: %v", err)
|
||||
}
|
||||
if resp.GetRemoteTimeNs() == 0 {
|
||||
t.Fatalf("Ping filer target expected non-zero remote time")
|
||||
}
|
||||
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
|
||||
t.Fatalf("Ping filer target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
|
||||
}
|
||||
}
|
||||
215
test/volume_server/grpc/admin_lifecycle_test.go
Normal file
215
test/volume_server/grpc/admin_lifecycle_test.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestVolumeAdminLifecycleRPCs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
const volumeID = uint32(11)
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
statusResp, err := client.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus failed: %v", err)
|
||||
}
|
||||
if statusResp.GetFileCount() != 0 {
|
||||
t.Fatalf("new volume should be empty, got file_count=%d", statusResp.GetFileCount())
|
||||
}
|
||||
|
||||
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
|
||||
t.Fatalf("VolumeUnmount failed: %v", err)
|
||||
}
|
||||
if _, err = client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: volumeID}); err != nil {
|
||||
t.Fatalf("VolumeMount failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err = client.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true}); err != nil {
|
||||
t.Fatalf("VolumeDelete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeStatus should fail after delete")
|
||||
}
|
||||
if st, ok := status.FromError(err); !ok || st.Code() == codes.OK {
|
||||
t.Fatalf("VolumeStatus error should be a non-OK grpc status, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeDeleteOnlyEmptyVariants(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(13)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 66001, 0x11223344)
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("volume-delete-only-empty"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true})
|
||||
if err == nil || !strings.Contains(err.Error(), "volume not empty") {
|
||||
t.Fatalf("VolumeDelete only_empty=true expected volume-not-empty error, got: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: false})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeDelete only_empty=false failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeStatus should fail after non-empty delete with only_empty=false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintenanceModeRejectsAllocateVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{VolumeId: 12, Replication: "000"})
|
||||
if err == nil {
|
||||
t.Fatalf("AllocateVolume should fail when maintenance mode is enabled")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("expected maintenance mode error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocateDuplicateAndMountUnmountMissingVariants(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
const missingVolumeID = uint32(99331)
|
||||
const volumeID = uint32(14)
|
||||
|
||||
if _, err := client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: missingVolumeID}); err != nil {
|
||||
t.Fatalf("VolumeUnmount missing volume should be idempotent success, got: %v", err)
|
||||
}
|
||||
|
||||
_, err := client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: missingVolumeID})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeMount missing volume should fail")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found on disk") {
|
||||
t.Fatalf("VolumeMount missing volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
_, err = client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{
|
||||
VolumeId: volumeID,
|
||||
Replication: "000",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("AllocateVolume duplicate should fail")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "already exists") {
|
||||
t.Fatalf("AllocateVolume duplicate error mismatch: %v", err)
|
||||
}
|
||||
|
||||
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
|
||||
t.Fatalf("VolumeUnmount existing volume failed: %v", err)
|
||||
}
|
||||
if _, err = client.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{VolumeId: volumeID}); err != nil {
|
||||
t.Fatalf("VolumeUnmount already-unmounted volume should be idempotent success, got: %v", err)
|
||||
}
|
||||
if _, err = client.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{VolumeId: volumeID}); err != nil {
|
||||
t.Fatalf("VolumeMount remount failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintenanceModeRejectsVolumeDelete(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(15)
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{VolumeId: volumeID, OnlyEmpty: true})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeDelete should fail when maintenance mode is enabled")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("expected maintenance mode error, got: %v", err)
|
||||
}
|
||||
}
|
||||
177
test/volume_server/grpc/admin_readonly_collection_test.go
Normal file
177
test/volume_server/grpc/admin_readonly_collection_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVolumeMarkReadonlyAndWritableLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(72)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
|
||||
VolumeId: volumeID,
|
||||
Persist: false,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeMarkReadonly failed: %v", err)
|
||||
}
|
||||
|
||||
readOnlyStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus after readonly failed: %v", err)
|
||||
}
|
||||
if !readOnlyStatus.GetIsReadOnly() {
|
||||
t.Fatalf("VolumeStatus expected readonly=true after VolumeMarkReadonly")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeMarkWritable failed: %v", err)
|
||||
}
|
||||
|
||||
writableStatus, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus after writable failed: %v", err)
|
||||
}
|
||||
if writableStatus.GetIsReadOnly() {
|
||||
t.Fatalf("VolumeStatus expected readonly=false after VolumeMarkWritable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeMarkReadonlyPersistTrue(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(74)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{
|
||||
VolumeId: volumeID,
|
||||
Persist: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeMarkReadonly persist=true failed: %v", err)
|
||||
}
|
||||
|
||||
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus after persist readonly failed: %v", err)
|
||||
}
|
||||
if !statusResp.GetIsReadOnly() {
|
||||
t.Fatalf("VolumeStatus expected readonly=true after persist readonly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeMarkReadonlyWritableErrorPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 98771, Persist: true})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeMarkReadonly missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 98772})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeMarkWritable missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: stateResp.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{VolumeId: 1, Persist: true})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeMarkReadonly maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeMarkWritable(ctx, &volume_server_pb.VolumeMarkWritableRequest{VolumeId: 1})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeMarkWritable maintenance error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteCollectionRemovesVolumeAndIsIdempotent(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(73)
|
||||
const collection = "it-delete-collection"
|
||||
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, collection)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeStatus before DeleteCollection failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteCollection existing collection failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeStatus should fail after collection delete")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("VolumeStatus after DeleteCollection error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{Collection: collection})
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteCollection idempotent retry failed: %v", err)
|
||||
}
|
||||
}
|
||||
264
test/volume_server/grpc/batch_delete_test.go
Normal file
264
test/volume_server/grpc/batch_delete_test.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestBatchDeleteInvalidFidAndMaintenanceMode(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{"bad-fid"}})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete invalid fid should return response, got error: %v", err)
|
||||
}
|
||||
if len(resp.GetResults()) != 1 {
|
||||
t.Fatalf("expected one batch delete result, got %d", len(resp.GetResults()))
|
||||
}
|
||||
if got := resp.GetResults()[0].GetStatus(); got != 400 {
|
||||
t.Fatalf("invalid fid expected status 400, got %d", got)
|
||||
}
|
||||
|
||||
stateResp, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{"1,1234567890ab"}})
|
||||
if err == nil {
|
||||
t.Fatalf("BatchDelete should fail when maintenance mode is enabled")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("expected maintenance mode error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchDeleteCookieMismatchAndSkipCheck(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(31)
|
||||
const needleID = uint64(900001)
|
||||
const correctCookie = uint32(0x1122AABB)
|
||||
const wrongCookie = uint32(0x1122AABC)
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, correctCookie)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, cluster.VolumeAdminURL(), fid, []byte("batch-delete-cookie-check"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
wrongCookieFid := framework.NewFileID(volumeID, needleID, wrongCookie)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
mismatchResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
|
||||
FileIds: []string{wrongCookieFid},
|
||||
SkipCookieCheck: false,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete with cookie check failed: %v", err)
|
||||
}
|
||||
if len(mismatchResp.GetResults()) != 1 {
|
||||
t.Fatalf("BatchDelete cookie mismatch expected 1 result, got %d", len(mismatchResp.GetResults()))
|
||||
}
|
||||
if mismatchResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
|
||||
t.Fatalf("BatchDelete cookie mismatch expected status 400, got %d", mismatchResp.GetResults()[0].GetStatus())
|
||||
}
|
||||
|
||||
skipCheckResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
|
||||
FileIds: []string{wrongCookieFid},
|
||||
SkipCookieCheck: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete skip cookie check failed: %v", err)
|
||||
}
|
||||
if len(skipCheckResp.GetResults()) != 1 {
|
||||
t.Fatalf("BatchDelete skip check expected 1 result, got %d", len(skipCheckResp.GetResults()))
|
||||
}
|
||||
if skipCheckResp.GetResults()[0].GetStatus() != http.StatusAccepted {
|
||||
t.Fatalf("BatchDelete skip check expected status 202, got %d", skipCheckResp.GetResults()[0].GetStatus())
|
||||
}
|
||||
|
||||
readAfterDelete := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fid)
|
||||
_ = framework.ReadAllAndClose(t, readAfterDelete)
|
||||
if readAfterDelete.StatusCode != http.StatusNotFound {
|
||||
t.Fatalf("read after skip-check batch delete expected 404, got %d", readAfterDelete.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchDeleteMixedStatusesAndMismatchStopsProcessing(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(32)
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
const needleA = uint64(910001)
|
||||
const needleB = uint64(910002)
|
||||
const needleC = uint64(910003)
|
||||
const cookieA = uint32(0x11111111)
|
||||
const cookieB = uint32(0x22222222)
|
||||
const cookieC = uint32(0x33333333)
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fidA := framework.NewFileID(volumeID, needleA, cookieA)
|
||||
fidB := framework.NewFileID(volumeID, needleB, cookieB)
|
||||
fidC := framework.NewFileID(volumeID, needleC, cookieC)
|
||||
|
||||
for _, tc := range []struct {
|
||||
fid string
|
||||
body string
|
||||
}{
|
||||
{fid: fidA, body: "batch-delete-mixed-a"},
|
||||
{fid: fidB, body: "batch-delete-mixed-b"},
|
||||
{fid: fidC, body: "batch-delete-mixed-c"},
|
||||
} {
|
||||
uploadResp := framework.UploadBytes(t, httpClient, cluster.VolumeAdminURL(), tc.fid, []byte(tc.body))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload %s expected 201, got %d", tc.fid, uploadResp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
missingFid := framework.NewFileID(volumeID, 919999, 0x44444444)
|
||||
mixedResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
|
||||
FileIds: []string{"bad-fid", fidA, missingFid},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete mixed status request failed: %v", err)
|
||||
}
|
||||
if len(mixedResp.GetResults()) != 3 {
|
||||
t.Fatalf("BatchDelete mixed status expected 3 results, got %d", len(mixedResp.GetResults()))
|
||||
}
|
||||
if mixedResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
|
||||
t.Fatalf("BatchDelete mixed result[0] expected 400, got %d", mixedResp.GetResults()[0].GetStatus())
|
||||
}
|
||||
if mixedResp.GetResults()[1].GetStatus() != http.StatusAccepted {
|
||||
t.Fatalf("BatchDelete mixed result[1] expected 202, got %d", mixedResp.GetResults()[1].GetStatus())
|
||||
}
|
||||
if mixedResp.GetResults()[2].GetStatus() != http.StatusNotFound {
|
||||
t.Fatalf("BatchDelete mixed result[2] expected 404, got %d", mixedResp.GetResults()[2].GetStatus())
|
||||
}
|
||||
|
||||
readDeletedA := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidA)
|
||||
_ = framework.ReadAllAndClose(t, readDeletedA)
|
||||
if readDeletedA.StatusCode != http.StatusNotFound {
|
||||
t.Fatalf("fidA should be deleted after batch delete, got status %d", readDeletedA.StatusCode)
|
||||
}
|
||||
|
||||
wrongCookieB := framework.NewFileID(volumeID, needleB, cookieB+1)
|
||||
stopResp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{
|
||||
FileIds: []string{wrongCookieB, fidC},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete mismatch-stop request failed: %v", err)
|
||||
}
|
||||
if len(stopResp.GetResults()) != 1 {
|
||||
t.Fatalf("BatchDelete mismatch-stop expected 1 result due early break, got %d", len(stopResp.GetResults()))
|
||||
}
|
||||
if stopResp.GetResults()[0].GetStatus() != http.StatusBadRequest {
|
||||
t.Fatalf("BatchDelete mismatch-stop expected 400, got %d", stopResp.GetResults()[0].GetStatus())
|
||||
}
|
||||
|
||||
readB := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidB)
|
||||
_ = framework.ReadAllAndClose(t, readB)
|
||||
if readB.StatusCode != http.StatusOK {
|
||||
t.Fatalf("fidB should remain after cookie mismatch path, got %d", readB.StatusCode)
|
||||
}
|
||||
|
||||
readC := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fidC)
|
||||
_ = framework.ReadAllAndClose(t, readC)
|
||||
if readC.StatusCode != http.StatusOK {
|
||||
t.Fatalf("fidC should remain when batch processing stops on mismatch, got %d", readC.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchDeleteRejectsChunkManifestNeedles(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(33)
|
||||
framework.AllocateVolume(t, client, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 920001, 0x5555AAAA)
|
||||
req, err := http.NewRequest(http.MethodPost, cluster.VolumeAdminURL()+"/"+fid+"?cm=true", bytes.NewReader([]byte("manifest-placeholder-payload")))
|
||||
if err != nil {
|
||||
t.Fatalf("create chunk manifest upload request: %v", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
uploadResp := framework.DoRequest(t, httpClient, req)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("chunk manifest upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := client.BatchDelete(ctx, &volume_server_pb.BatchDeleteRequest{FileIds: []string{fid}})
|
||||
if err != nil {
|
||||
t.Fatalf("BatchDelete chunk manifest should return response, got grpc error: %v", err)
|
||||
}
|
||||
if len(resp.GetResults()) != 1 {
|
||||
t.Fatalf("BatchDelete chunk manifest expected one result, got %d", len(resp.GetResults()))
|
||||
}
|
||||
if resp.GetResults()[0].GetStatus() != http.StatusNotAcceptable {
|
||||
t.Fatalf("BatchDelete chunk manifest expected status 406, got %d", resp.GetResults()[0].GetStatus())
|
||||
}
|
||||
if !strings.Contains(resp.GetResults()[0].GetError(), "ChunkManifest") {
|
||||
t.Fatalf("BatchDelete chunk manifest expected error mentioning ChunkManifest, got %q", resp.GetResults()[0].GetError())
|
||||
}
|
||||
|
||||
readResp := framework.ReadBytes(t, httpClient, cluster.VolumeAdminURL(), fid)
|
||||
_ = framework.ReadAllAndClose(t, readResp)
|
||||
if readResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("chunk manifest should not be deleted by BatchDelete reject path, got %d", readResp.StatusCode)
|
||||
}
|
||||
}
|
||||
431
test/volume_server/grpc/copy_receive_variants_test.go
Normal file
431
test/volume_server/grpc/copy_receive_variants_test.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVolumeIncrementalCopyDataAndNoDataPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(91)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 770001, 0x1122AABB)
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("incremental-copy-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dataStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
|
||||
VolumeId: volumeID,
|
||||
SinceNs: 0,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeIncrementalCopy start failed: %v", err)
|
||||
}
|
||||
|
||||
totalBytes := 0
|
||||
for {
|
||||
msg, recvErr := dataStream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("VolumeIncrementalCopy recv failed: %v", recvErr)
|
||||
}
|
||||
totalBytes += len(msg.GetFileContent())
|
||||
}
|
||||
if totalBytes == 0 {
|
||||
t.Fatalf("VolumeIncrementalCopy expected streamed bytes for since_ns=0")
|
||||
}
|
||||
|
||||
noDataStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{
|
||||
VolumeId: volumeID,
|
||||
SinceNs: math.MaxUint64,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeIncrementalCopy no-data start failed: %v", err)
|
||||
}
|
||||
_, err = noDataStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("VolumeIncrementalCopy no-data expected EOF, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFileIgnoreNotFoundAndStopOffsetZeroPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(92)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
missingNoIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".definitely-missing",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
IgnoreSourceFileNotFound: false,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = missingNoIgnore.Recv()
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("CopyFile should fail for missing source file when ignore_source_file_not_found=false")
|
||||
}
|
||||
|
||||
missingIgnored, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".definitely-missing",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
IgnoreSourceFileNotFound: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile ignore-not-found start failed: %v", err)
|
||||
}
|
||||
_, err = missingIgnored.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("CopyFile ignore-not-found expected EOF, got: %v", err)
|
||||
}
|
||||
|
||||
stopZeroStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".definitely-missing",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 0,
|
||||
IgnoreSourceFileNotFound: false,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile stop_offset=0 start failed: %v", err)
|
||||
}
|
||||
_, err = stopZeroStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("CopyFile stop_offset=0 expected EOF, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFileCompactionRevisionMismatch(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(94)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".idx",
|
||||
CompactionRevision: 1, // fresh volume starts at revision 0
|
||||
StopOffset: 1,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = stream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "is compacted") {
|
||||
t.Fatalf("CopyFile compaction mismatch error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveFileProtocolViolationResponses(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
contentFirstStream, err := grpcClient.ReceiveFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile stream create failed: %v", err)
|
||||
}
|
||||
if err = contentFirstStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{
|
||||
FileContent: []byte("content-before-info"),
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send content-first failed: %v", err)
|
||||
}
|
||||
contentFirstResp, err := contentFirstStream.CloseAndRecv()
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile content-first close failed: %v", err)
|
||||
}
|
||||
if !strings.Contains(contentFirstResp.GetError(), "file info must be sent first") {
|
||||
t.Fatalf("ReceiveFile content-first response mismatch: %+v", contentFirstResp)
|
||||
}
|
||||
|
||||
unknownTypeStream, err := grpcClient.ReceiveFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile stream create for unknown-type failed: %v", err)
|
||||
}
|
||||
if err = unknownTypeStream.Send(&volume_server_pb.ReceiveFileRequest{}); err != nil {
|
||||
t.Fatalf("ReceiveFile send unknown-type request failed: %v", err)
|
||||
}
|
||||
unknownTypeResp, err := unknownTypeStream.CloseAndRecv()
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile unknown-type close failed: %v", err)
|
||||
}
|
||||
if !strings.Contains(unknownTypeResp.GetError(), "unknown message type") {
|
||||
t.Fatalf("ReceiveFile unknown-type response mismatch: %+v", unknownTypeResp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveFileSuccessForRegularVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(95)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
payloadA := []byte("receive-file-chunk-a:")
|
||||
payloadB := []byte("receive-file-chunk-b")
|
||||
expected := append(append([]byte{}, payloadA...), payloadB...)
|
||||
|
||||
receiveStream, err := grpcClient.ReceiveFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile stream create failed: %v", err)
|
||||
}
|
||||
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_Info{
|
||||
Info: &volume_server_pb.ReceiveFileInfo{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".tmprecv",
|
||||
Collection: "",
|
||||
IsEcVolume: false,
|
||||
FileSize: uint64(len(expected)),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send info failed: %v", err)
|
||||
}
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadA},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send payloadA failed: %v", err)
|
||||
}
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadB},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send payloadB failed: %v", err)
|
||||
}
|
||||
|
||||
resp, err := receiveStream.CloseAndRecv()
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile close failed: %v", err)
|
||||
}
|
||||
if resp.GetError() != "" {
|
||||
t.Fatalf("ReceiveFile unexpected error response: %+v", resp)
|
||||
}
|
||||
if resp.GetBytesWritten() != uint64(len(expected)) {
|
||||
t.Fatalf("ReceiveFile bytes_written mismatch: got %d want %d", resp.GetBytesWritten(), len(expected))
|
||||
}
|
||||
|
||||
copyStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".tmprecv",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: uint64(len(expected)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile for received data start failed: %v", err)
|
||||
}
|
||||
|
||||
var copied []byte
|
||||
for {
|
||||
msg, recvErr := copyStream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("CopyFile for received data recv failed: %v", recvErr)
|
||||
}
|
||||
copied = append(copied, msg.GetFileContent()...)
|
||||
}
|
||||
|
||||
if string(copied) != string(expected) {
|
||||
t.Fatalf("received file data mismatch: got %q want %q", string(copied), string(expected))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveFileSuccessForEcVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
const volumeID = uint32(96)
|
||||
const collection = "ec-receive-success"
|
||||
const ext = ".ec00"
|
||||
|
||||
payloadA := []byte("receive-ec-file-chunk-a:")
|
||||
payloadB := []byte("receive-ec-file-chunk-b")
|
||||
expected := append(append([]byte{}, payloadA...), payloadB...)
|
||||
|
||||
receiveStream, err := grpcClient.ReceiveFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile stream create failed: %v", err)
|
||||
}
|
||||
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_Info{
|
||||
Info: &volume_server_pb.ReceiveFileInfo{
|
||||
VolumeId: volumeID,
|
||||
Ext: ext,
|
||||
Collection: collection,
|
||||
IsEcVolume: true,
|
||||
ShardId: 0,
|
||||
FileSize: uint64(len(expected)),
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send EC info failed: %v", err)
|
||||
}
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadA},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send EC payloadA failed: %v", err)
|
||||
}
|
||||
if err = receiveStream.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_FileContent{FileContent: payloadB},
|
||||
}); err != nil {
|
||||
t.Fatalf("ReceiveFile send EC payloadB failed: %v", err)
|
||||
}
|
||||
|
||||
resp, err := receiveStream.CloseAndRecv()
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile EC close failed: %v", err)
|
||||
}
|
||||
if resp.GetError() != "" {
|
||||
t.Fatalf("ReceiveFile EC unexpected error response: %+v", resp)
|
||||
}
|
||||
if resp.GetBytesWritten() != uint64(len(expected)) {
|
||||
t.Fatalf("ReceiveFile EC bytes_written mismatch: got %d want %d", resp.GetBytesWritten(), len(expected))
|
||||
}
|
||||
|
||||
copyStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: collection,
|
||||
IsEcVolume: true,
|
||||
Ext: ext,
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: uint64(len(expected)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile for received EC data start failed: %v", err)
|
||||
}
|
||||
|
||||
var copied []byte
|
||||
for {
|
||||
msg, recvErr := copyStream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("CopyFile for received EC data recv failed: %v", recvErr)
|
||||
}
|
||||
copied = append(copied, msg.GetFileContent()...)
|
||||
}
|
||||
|
||||
if string(copied) != string(expected) {
|
||||
t.Fatalf("received EC file data mismatch: got %q want %q", string(copied), string(expected))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFileEcVolumeIgnoreMissingSourcePaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
streamNoIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: 99601,
|
||||
Collection: "ec-copy-missing",
|
||||
IsEcVolume: true,
|
||||
Ext: ".ec00",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
IgnoreSourceFileNotFound: false,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = streamNoIgnore.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found ec volume id") {
|
||||
t.Fatalf("CopyFile EC missing source error mismatch: %v", err)
|
||||
}
|
||||
|
||||
streamIgnore, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: 99602,
|
||||
Collection: "ec-copy-missing",
|
||||
IsEcVolume: true,
|
||||
Ext: ".ec00",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
IgnoreSourceFileNotFound: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile EC ignore-missing start failed: %v", err)
|
||||
}
|
||||
_, err = streamIgnore.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("CopyFile EC ignore-missing expected EOF, got: %v", err)
|
||||
}
|
||||
}
|
||||
284
test/volume_server/grpc/copy_sync_test.go
Normal file
284
test/volume_server/grpc/copy_sync_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVolumeSyncStatusAndReadVolumeFileStatus(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(41)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
syncResp, err := grpcClient.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeSyncStatus failed: %v", err)
|
||||
}
|
||||
if syncResp.GetVolumeId() != volumeID {
|
||||
t.Fatalf("VolumeSyncStatus volume id mismatch: got %d want %d", syncResp.GetVolumeId(), volumeID)
|
||||
}
|
||||
|
||||
statusResp, err := grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadVolumeFileStatus failed: %v", err)
|
||||
}
|
||||
if statusResp.GetVolumeId() != volumeID {
|
||||
t.Fatalf("ReadVolumeFileStatus volume id mismatch: got %d want %d", statusResp.GetVolumeId(), volumeID)
|
||||
}
|
||||
if statusResp.GetVersion() == 0 {
|
||||
t.Fatalf("ReadVolumeFileStatus expected non-zero version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyAndStreamMethodsMissingVolumePaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{VolumeId: 98761})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeSyncStatus should fail for missing volume")
|
||||
}
|
||||
|
||||
incrementalStream, err := grpcClient.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{VolumeId: 98762, SinceNs: 0})
|
||||
if err == nil {
|
||||
_, err = incrementalStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("VolumeIncrementalCopy missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
readAllStream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{VolumeIds: []uint32{98763}})
|
||||
if err == nil {
|
||||
_, err = readAllStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("ReadAllNeedles missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
copyFileStream, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{VolumeId: 98764, Ext: ".dat", StopOffset: 1})
|
||||
if err == nil {
|
||||
_, err = copyFileStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("CopyFile missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: 98765})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("ReadVolumeFileStatus missing-volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeCopyAndReceiveFileMaintenanceRejection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
copyStream, err := grpcClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{VolumeId: 1, SourceDataNode: "127.0.0.1:1234"})
|
||||
if err == nil {
|
||||
_, err = copyStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeCopy maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
receiveClient, err := grpcClient.ReceiveFile(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("ReceiveFile client creation failed: %v", err)
|
||||
}
|
||||
_ = receiveClient.Send(&volume_server_pb.ReceiveFileRequest{
|
||||
Data: &volume_server_pb.ReceiveFileRequest_Info{
|
||||
Info: &volume_server_pb.ReceiveFileInfo{VolumeId: 1, Ext: ".dat"},
|
||||
},
|
||||
})
|
||||
_, err = receiveClient.CloseAndRecv()
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("ReceiveFile maintenance error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeCopySuccessFromPeerAndMountsDestination(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
|
||||
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
|
||||
defer sourceConn.Close()
|
||||
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
|
||||
defer destConn.Close()
|
||||
|
||||
const volumeID = uint32(42)
|
||||
framework.AllocateVolume(t, sourceClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 880001, 0x12345678)
|
||||
payload := []byte("volume-copy-success-payload")
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload to source expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
copyStream, err := destClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
SourceDataNode: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeCopy start failed: %v", err)
|
||||
}
|
||||
|
||||
sawFinalAppendTimestamp := false
|
||||
for {
|
||||
msg, recvErr := copyStream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("VolumeCopy recv failed: %v", recvErr)
|
||||
}
|
||||
if msg.GetLastAppendAtNs() > 0 {
|
||||
sawFinalAppendTimestamp = true
|
||||
}
|
||||
}
|
||||
if !sawFinalAppendTimestamp {
|
||||
t.Fatalf("VolumeCopy expected final response with last_append_at_ns")
|
||||
}
|
||||
|
||||
destReadResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
|
||||
destReadBody := framework.ReadAllAndClose(t, destReadResp)
|
||||
if destReadResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("read from copied destination expected 200, got %d", destReadResp.StatusCode)
|
||||
}
|
||||
if string(destReadBody) != string(payload) {
|
||||
t.Fatalf("destination copied payload mismatch: got %q want %q", string(destReadBody), string(payload))
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeCopyOverwritesExistingDestinationVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
|
||||
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
|
||||
defer sourceConn.Close()
|
||||
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
|
||||
defer destConn.Close()
|
||||
|
||||
const volumeID = uint32(43)
|
||||
framework.AllocateVolume(t, sourceClient, volumeID, "")
|
||||
framework.AllocateVolume(t, destClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 880002, 0x23456789)
|
||||
sourcePayload := []byte("volume-copy-overwrite-source")
|
||||
destPayload := []byte("volume-copy-overwrite-destination-old")
|
||||
|
||||
sourceUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, sourcePayload)
|
||||
_ = framework.ReadAllAndClose(t, sourceUploadResp)
|
||||
if sourceUploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload to source expected 201, got %d", sourceUploadResp.StatusCode)
|
||||
}
|
||||
|
||||
destUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid, destPayload)
|
||||
_ = framework.ReadAllAndClose(t, destUploadResp)
|
||||
if destUploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload to destination expected 201, got %d", destUploadResp.StatusCode)
|
||||
}
|
||||
|
||||
destReadBeforeResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
|
||||
destReadBeforeBody := framework.ReadAllAndClose(t, destReadBeforeResp)
|
||||
if destReadBeforeResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("destination pre-copy read expected 200, got %d", destReadBeforeResp.StatusCode)
|
||||
}
|
||||
if string(destReadBeforeBody) != string(destPayload) {
|
||||
t.Fatalf("destination pre-copy payload mismatch: got %q want %q", string(destReadBeforeBody), string(destPayload))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
copyStream, err := destClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
SourceDataNode: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeCopy overwrite start failed: %v", err)
|
||||
}
|
||||
|
||||
sawFinalAppendTimestamp := false
|
||||
for {
|
||||
msg, recvErr := copyStream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("VolumeCopy overwrite recv failed: %v", recvErr)
|
||||
}
|
||||
if msg.GetLastAppendAtNs() > 0 {
|
||||
sawFinalAppendTimestamp = true
|
||||
}
|
||||
}
|
||||
if !sawFinalAppendTimestamp {
|
||||
t.Fatalf("VolumeCopy overwrite expected final response with last_append_at_ns")
|
||||
}
|
||||
|
||||
destReadAfterResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
|
||||
destReadAfterBody := framework.ReadAllAndClose(t, destReadAfterResp)
|
||||
if destReadAfterResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("destination post-copy read expected 200, got %d", destReadAfterResp.StatusCode)
|
||||
}
|
||||
if string(destReadAfterBody) != string(sourcePayload) {
|
||||
t.Fatalf("destination post-copy payload mismatch: got %q want %q", string(destReadAfterBody), string(sourcePayload))
|
||||
}
|
||||
}
|
||||
146
test/volume_server/grpc/data_rw_test.go
Normal file
146
test/volume_server/grpc/data_rw_test.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestReadNeedleBlobAndMetaMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
|
||||
VolumeId: 99111,
|
||||
Offset: 0,
|
||||
Size: 16,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ReadNeedleBlob should fail for missing volume")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("ReadNeedleBlob missing volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
|
||||
VolumeId: 99112,
|
||||
NeedleId: 1,
|
||||
Offset: 0,
|
||||
Size: 16,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ReadNeedleMeta should fail for missing volume")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("ReadNeedleMeta missing volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteNeedleBlobMaintenanceAndMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
|
||||
VolumeId: 99113,
|
||||
NeedleId: 1,
|
||||
NeedleBlob: []byte("abc"),
|
||||
Size: 3,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("WriteNeedleBlob should fail for missing volume")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("WriteNeedleBlob missing volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
|
||||
VolumeId: 1,
|
||||
NeedleId: 2,
|
||||
NeedleBlob: []byte("def"),
|
||||
Size: 3,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("WriteNeedleBlob should fail in maintenance mode")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("WriteNeedleBlob maintenance mode error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadNeedleBlobAndMetaInvalidOffsets(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(92)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 880001, 0xCCDD1122)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("invalid-offset-check"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
|
||||
VolumeId: volumeID,
|
||||
Offset: 1 << 40,
|
||||
Size: 64,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ReadNeedleBlob should fail for invalid offset")
|
||||
}
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "read needle blob") {
|
||||
t.Fatalf("ReadNeedleBlob invalid offset error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: 880001,
|
||||
Offset: 1 << 40,
|
||||
Size: 64,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ReadNeedleMeta should fail for invalid offset")
|
||||
}
|
||||
}
|
||||
273
test/volume_server/grpc/data_stream_success_test.go
Normal file
273
test/volume_server/grpc/data_stream_success_test.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/idx"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types"
|
||||
)
|
||||
|
||||
func TestReadWriteNeedleBlobAndMetaRoundTrip(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(83)
|
||||
const sourceNeedleID = uint64(333333)
|
||||
const sourceCookie = uint32(0xABCD0102)
|
||||
const clonedNeedleID = uint64(333334)
|
||||
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
payload := []byte("blob-roundtrip-content")
|
||||
fid := framework.NewFileID(volumeID, sourceNeedleID, sourceCookie)
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fileStatus, err := grpcClient.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadVolumeFileStatus failed: %v", err)
|
||||
}
|
||||
if fileStatus.GetIdxFileSize() == 0 {
|
||||
t.Fatalf("expected non-zero idx file size after upload")
|
||||
}
|
||||
|
||||
idxBytes := copyFileBytes(t, grpcClient, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Ext: ".idx",
|
||||
CompactionRevision: fileStatus.GetCompactionRevision(),
|
||||
StopOffset: fileStatus.GetIdxFileSize(),
|
||||
})
|
||||
offset, size := findNeedleOffsetAndSize(t, idxBytes, sourceNeedleID)
|
||||
|
||||
blobResp, err := grpcClient.ReadNeedleBlob(ctx, &volume_server_pb.ReadNeedleBlobRequest{
|
||||
VolumeId: volumeID,
|
||||
Offset: offset,
|
||||
Size: size,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadNeedleBlob failed: %v", err)
|
||||
}
|
||||
if len(blobResp.GetNeedleBlob()) == 0 {
|
||||
t.Fatalf("ReadNeedleBlob returned empty blob")
|
||||
}
|
||||
|
||||
metaResp, err := grpcClient.ReadNeedleMeta(ctx, &volume_server_pb.ReadNeedleMetaRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: sourceNeedleID,
|
||||
Offset: offset,
|
||||
Size: size,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadNeedleMeta failed: %v", err)
|
||||
}
|
||||
if metaResp.GetCookie() != sourceCookie {
|
||||
t.Fatalf("ReadNeedleMeta cookie mismatch: got %d want %d", metaResp.GetCookie(), sourceCookie)
|
||||
}
|
||||
|
||||
_, err = grpcClient.WriteNeedleBlob(ctx, &volume_server_pb.WriteNeedleBlobRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: clonedNeedleID,
|
||||
Size: size,
|
||||
NeedleBlob: blobResp.GetNeedleBlob(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("WriteNeedleBlob failed: %v", err)
|
||||
}
|
||||
|
||||
clonedStatus, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: clonedNeedleID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeNeedleStatus for cloned needle failed: %v", err)
|
||||
}
|
||||
if clonedStatus.GetNeedleId() != sourceNeedleID {
|
||||
t.Fatalf("cloned needle status id mismatch: got %d want %d", clonedStatus.GetNeedleId(), sourceNeedleID)
|
||||
}
|
||||
if clonedStatus.GetCookie() != sourceCookie {
|
||||
t.Fatalf("cloned needle cookie mismatch: got %d want %d", clonedStatus.GetCookie(), sourceCookie)
|
||||
}
|
||||
|
||||
clonedReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeID, clonedNeedleID, sourceCookie))
|
||||
clonedReadBody := framework.ReadAllAndClose(t, clonedReadResp)
|
||||
if clonedReadResp.StatusCode != 200 {
|
||||
t.Fatalf("cloned needle GET expected 200, got %d", clonedReadResp.StatusCode)
|
||||
}
|
||||
if string(clonedReadBody) != string(payload) {
|
||||
t.Fatalf("cloned needle body mismatch: got %q want %q", string(clonedReadBody), string(payload))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAllNeedlesStreamsUploadedRecords(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(84)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
expected := map[uint64]string{
|
||||
444441: "read-all-needle-one",
|
||||
444442: "read-all-needle-two",
|
||||
}
|
||||
for key, body := range expected {
|
||||
resp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), framework.NewFileID(volumeID, key, 0xA0B0C0D0), []byte(body))
|
||||
_ = framework.ReadAllAndClose(t, resp)
|
||||
if resp.StatusCode != 201 {
|
||||
t.Fatalf("upload for key %d expected 201, got %d", key, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{VolumeIds: []uint32{volumeID}})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAllNeedles start failed: %v", err)
|
||||
}
|
||||
|
||||
seen := map[uint64]string{}
|
||||
for {
|
||||
msg, recvErr := stream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("ReadAllNeedles recv failed: %v", recvErr)
|
||||
}
|
||||
if _, wanted := expected[msg.GetNeedleId()]; wanted {
|
||||
seen[msg.GetNeedleId()] = string(msg.GetNeedleBlob())
|
||||
}
|
||||
}
|
||||
|
||||
for key, body := range expected {
|
||||
got, found := seen[key]
|
||||
if !found {
|
||||
t.Fatalf("ReadAllNeedles missing key %d in stream", key)
|
||||
}
|
||||
if got != body {
|
||||
t.Fatalf("ReadAllNeedles body mismatch for key %d: got %q want %q", key, got, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAllNeedlesExistingThenMissingVolumeAbortsStream(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const existingVolumeID = uint32(85)
|
||||
const missingVolumeID = uint32(98585)
|
||||
const needleID = uint64(445551)
|
||||
framework.AllocateVolume(t, grpcClient, existingVolumeID, "")
|
||||
|
||||
client := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(existingVolumeID, needleID, 0xAA11BB22)
|
||||
payload := "read-all-existing-then-missing"
|
||||
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte(payload))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.ReadAllNeedles(ctx, &volume_server_pb.ReadAllNeedlesRequest{
|
||||
VolumeIds: []uint32{existingVolumeID, missingVolumeID},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAllNeedles start failed: %v", err)
|
||||
}
|
||||
|
||||
seenUploadedNeedle := false
|
||||
for {
|
||||
msg, recvErr := stream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
t.Fatalf("ReadAllNeedles expected stream error for missing volume, got EOF")
|
||||
}
|
||||
if recvErr != nil {
|
||||
if !strings.Contains(recvErr.Error(), "not found volume id") {
|
||||
t.Fatalf("ReadAllNeedles missing-volume error mismatch: %v", recvErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
if msg.GetNeedleId() == needleID && string(msg.GetNeedleBlob()) == payload {
|
||||
seenUploadedNeedle = true
|
||||
}
|
||||
}
|
||||
|
||||
if !seenUploadedNeedle {
|
||||
t.Fatalf("ReadAllNeedles should stream entries from existing volume before missing-volume abort")
|
||||
}
|
||||
}
|
||||
|
||||
func copyFileBytes(t testing.TB, grpcClient volume_server_pb.VolumeServerClient, req *volume_server_pb.CopyFileRequest) []byte {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.CopyFile(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile start failed: %v", err)
|
||||
}
|
||||
|
||||
var out []byte
|
||||
for {
|
||||
msg, recvErr := stream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
return out
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("CopyFile recv failed: %v", recvErr)
|
||||
}
|
||||
out = append(out, msg.GetFileContent()...)
|
||||
}
|
||||
}
|
||||
|
||||
func findNeedleOffsetAndSize(t testing.TB, idxBytes []byte, needleID uint64) (offset int64, size int32) {
|
||||
t.Helper()
|
||||
|
||||
for i := 0; i+types.NeedleMapEntrySize <= len(idxBytes); i += types.NeedleMapEntrySize {
|
||||
key, entryOffset, entrySize := idx.IdxFileEntry(idxBytes[i : i+types.NeedleMapEntrySize])
|
||||
if uint64(key) != needleID {
|
||||
continue
|
||||
}
|
||||
if entryOffset.IsZero() || entrySize <= 0 {
|
||||
continue
|
||||
}
|
||||
return entryOffset.ToActualOffset(), int32(entrySize)
|
||||
}
|
||||
|
||||
t.Fatalf("needle id %d not found in idx entries", needleID)
|
||||
return 0, 0
|
||||
}
|
||||
777
test/volume_server/grpc/erasure_coding_test.go
Normal file
777
test/volume_server/grpc/erasure_coding_test.go
Normal file
@@ -0,0 +1,777 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestEcMaintenanceModeRejections(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: stateResp.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{VolumeId: 1, Collection: ""})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeEcShardsGenerate maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
||||
VolumeId: 1,
|
||||
Collection: "",
|
||||
SourceDataNode: "127.0.0.1:1",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeEcShardsCopy maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||
VolumeId: 1,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeEcShardsDelete maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
|
||||
VolumeId: 1,
|
||||
Collection: "",
|
||||
FileKey: 1,
|
||||
Version: 3,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeEcBlobDelete maintenance error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
||||
VolumeId: 1,
|
||||
Collection: "",
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeEcShardsToVolume maintenance error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcMissingInvalidAndNoopPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: 98791,
|
||||
Collection: "",
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeEcShardsGenerate missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
rebuildResp, err := grpcClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
|
||||
VolumeId: 98792,
|
||||
Collection: "ec-rebuild",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsRebuild missing-volume should return empty success, got: %v", err)
|
||||
}
|
||||
if len(rebuildResp.GetRebuiltShardIds()) != 0 {
|
||||
t.Fatalf("VolumeEcShardsRebuild expected no rebuilt shards for missing volume, got %v", rebuildResp.GetRebuiltShardIds())
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
||||
VolumeId: 98793,
|
||||
Collection: "ec-copy",
|
||||
SourceDataNode: "127.0.0.1:1",
|
||||
ShardIds: []uint32{0},
|
||||
DiskId: 99,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "invalid disk_id") {
|
||||
t.Fatalf("VolumeEcShardsCopy invalid-disk error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||
VolumeId: 98794,
|
||||
Collection: "ec-delete",
|
||||
ShardIds: []uint32{0, 1},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsDelete missing-volume should be no-op success, got: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: 98795,
|
||||
Collection: "ec-mount",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeEcShardsMount should fail for missing EC shards")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
|
||||
VolumeId: 98796,
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsUnmount missing shards should be no-op success, got: %v", err)
|
||||
}
|
||||
|
||||
readStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
|
||||
VolumeId: 98797,
|
||||
ShardId: 0,
|
||||
Offset: 0,
|
||||
Size: 1,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = readStream.Recv()
|
||||
}
|
||||
if err == nil || err == io.EOF {
|
||||
t.Fatalf("VolumeEcShardRead should fail for missing EC volume")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
|
||||
VolumeId: 98798,
|
||||
Collection: "ec-blob",
|
||||
FileKey: 1,
|
||||
Version: 3,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcBlobDelete missing local EC volume should be no-op success, got: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
||||
VolumeId: 98799,
|
||||
Collection: "ec-to-volume",
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeEcShardsToVolume missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
|
||||
VolumeId: 98800,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeEcShardsInfo missing-volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcGenerateMountInfoUnmountLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(115)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 990001, 0x1234ABCD)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-generate-lifecycle-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate success path failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount success path failed: %v", err)
|
||||
}
|
||||
|
||||
infoResp, err := grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
|
||||
VolumeId: volumeID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsInfo after mount failed: %v", err)
|
||||
}
|
||||
if len(infoResp.GetEcShardInfos()) == 0 {
|
||||
t.Fatalf("VolumeEcShardsInfo expected non-empty shard infos after mount")
|
||||
}
|
||||
if infoResp.GetVolumeSize() == 0 {
|
||||
t.Fatalf("VolumeEcShardsInfo expected non-zero volume size after mount")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{
|
||||
VolumeId: volumeID,
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsUnmount success path failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsInfo(ctx, &volume_server_pb.VolumeEcShardsInfoRequest{
|
||||
VolumeId: volumeID,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeEcShardsInfo after unmount expected not-found error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcShardReadAndBlobDeleteLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(116)
|
||||
const fileKey = uint64(990002)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, fileKey, 0x2233CCDD)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-shard-read-delete-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount failed: %v", err)
|
||||
}
|
||||
|
||||
readStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
|
||||
VolumeId: volumeID,
|
||||
ShardId: 0,
|
||||
Offset: 0,
|
||||
Size: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardRead start failed: %v", err)
|
||||
}
|
||||
firstChunk, err := readStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardRead recv failed: %v", err)
|
||||
}
|
||||
if len(firstChunk.GetData()) == 0 {
|
||||
t.Fatalf("VolumeEcShardRead expected non-empty data chunk before deletion")
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
FileKey: fileKey,
|
||||
Version: uint32(needle.GetCurrentVersion()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcBlobDelete first delete failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
FileKey: fileKey,
|
||||
Version: uint32(needle.GetCurrentVersion()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcBlobDelete second delete should be idempotent success, got: %v", err)
|
||||
}
|
||||
|
||||
deletedStream, err := grpcClient.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{
|
||||
VolumeId: volumeID,
|
||||
ShardId: 0,
|
||||
FileKey: fileKey,
|
||||
Offset: 0,
|
||||
Size: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardRead deleted-check start failed: %v", err)
|
||||
}
|
||||
deletedMsg, err := deletedStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardRead deleted-check recv failed: %v", err)
|
||||
}
|
||||
if !deletedMsg.GetIsDeleted() {
|
||||
t.Fatalf("VolumeEcShardRead expected IsDeleted=true after blob delete")
|
||||
}
|
||||
_, err = deletedStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("VolumeEcShardRead deleted-check expected EOF after deleted marker, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcRebuildMissingShardLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(117)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 990003, 0x3344DDEE)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-rebuild-shard-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsDelete shard 0 failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeEcShardsMount should fail when shard 0 has been deleted")
|
||||
}
|
||||
|
||||
rebuildResp, err := grpcClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsRebuild failed: %v", err)
|
||||
}
|
||||
if len(rebuildResp.GetRebuiltShardIds()) == 0 {
|
||||
t.Fatalf("VolumeEcShardsRebuild expected rebuilt shard ids")
|
||||
}
|
||||
foundShard0 := false
|
||||
for _, shardID := range rebuildResp.GetRebuiltShardIds() {
|
||||
if shardID == 0 {
|
||||
foundShard0 = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundShard0 {
|
||||
t.Fatalf("VolumeEcShardsRebuild expected shard 0 to be rebuilt, got %v", rebuildResp.GetRebuiltShardIds())
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount shard 0 after rebuild failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcShardsToVolumeMissingShardAndNoLiveEntries(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
t.Run("missing shard returns error", func(t *testing.T) {
|
||||
const volumeID = uint32(118)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
fid := framework.NewFileID(volumeID, 990004, 0x4455EEFF)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-to-volume-missing-shard-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsDelete shard 0 failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{1},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount shard 1 failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "missing shard 0") {
|
||||
t.Fatalf("VolumeEcShardsToVolume missing-shard error mismatch: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no live entries returns failed precondition", func(t *testing.T) {
|
||||
const volumeID = uint32(119)
|
||||
const needleID = uint64(990005)
|
||||
const cookie = uint32(0x5566FF11)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-no-live-entries-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
deleteResp := framework.DoRequest(t, httpClient, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid))
|
||||
_ = framework.ReadAllAndClose(t, deleteResp)
|
||||
if deleteResp.StatusCode != http.StatusAccepted {
|
||||
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
|
||||
}
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("VolumeEcShardsToVolume expected failed-precondition error when no live entries")
|
||||
}
|
||||
if status.Code(err) != codes.FailedPrecondition {
|
||||
t.Fatalf("VolumeEcShardsToVolume no-live-entries expected FailedPrecondition, got %v (%v)", status.Code(err), err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), erasure_coding.EcNoLiveEntriesSubstring) {
|
||||
t.Fatalf("VolumeEcShardsToVolume no-live-entries error should mention %q, got %v", erasure_coding.EcNoLiveEntriesSubstring, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestEcShardsToVolumeSuccessRoundTrip(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(120)
|
||||
const needleID = uint64(990006)
|
||||
const cookie = uint32(0x66771122)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
payload := []byte("ec-shards-to-volume-success-roundtrip-content")
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsToVolume success path failed: %v", err)
|
||||
}
|
||||
|
||||
readResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid)
|
||||
readBody := framework.ReadAllAndClose(t, readResp)
|
||||
if readResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("post-conversion read expected 200, got %d", readResp.StatusCode)
|
||||
}
|
||||
if string(readBody) != string(payload) {
|
||||
t.Fatalf("post-conversion payload mismatch: got %q want %q", string(readBody), string(payload))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcShardsDeleteLastShardRemovesEcx(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(121)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 990007, 0x77882233)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, []byte("ec-delete-all-shards-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify .ecx is present before deleting all shards.
|
||||
ecxBeforeDelete, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
IsEcVolume: true,
|
||||
Ext: ".ecx",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("CopyFile .ecx before shard deletion start failed: %v", err)
|
||||
}
|
||||
if _, err = ecxBeforeDelete.Recv(); err != nil {
|
||||
t.Fatalf("CopyFile .ecx before shard deletion recv failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeEcShardsDelete all shards failed: %v", err)
|
||||
}
|
||||
|
||||
ecxAfterDelete, err := grpcClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
IsEcVolume: true,
|
||||
Ext: ".ecx",
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = ecxAfterDelete.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found ec volume id") {
|
||||
t.Fatalf("CopyFile .ecx after deleting all shards should fail not-found, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcShardsCopyFromPeerSuccess(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
|
||||
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
|
||||
defer sourceConn.Close()
|
||||
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
|
||||
defer destConn.Close()
|
||||
|
||||
const volumeID = uint32(122)
|
||||
framework.AllocateVolume(t, sourceClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 990008, 0x88993344)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, []byte("ec-copy-from-peer-content"))
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("source upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := sourceClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("source VolumeEcShardsGenerate failed: %v", err)
|
||||
}
|
||||
|
||||
sourceDataNode := clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1]
|
||||
_, err = destClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
SourceDataNode: sourceDataNode,
|
||||
ShardIds: []uint32{0},
|
||||
CopyEcxFile: true,
|
||||
CopyVifFile: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("destination VolumeEcShardsCopy success path failed: %v", err)
|
||||
}
|
||||
|
||||
for _, ext := range []string{".ec00", ".ecx", ".vif"} {
|
||||
copyStream, copyErr := destClient.CopyFile(ctx, &volume_server_pb.CopyFileRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "",
|
||||
IsEcVolume: true,
|
||||
Ext: ext,
|
||||
CompactionRevision: math.MaxUint32,
|
||||
StopOffset: 1,
|
||||
})
|
||||
if copyErr != nil {
|
||||
t.Fatalf("destination CopyFile %s start failed: %v", ext, copyErr)
|
||||
}
|
||||
if _, copyErr = copyStream.Recv(); copyErr != nil {
|
||||
t.Fatalf("destination CopyFile %s recv failed: %v", ext, copyErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEcShardsCopyFailsWhenSourceUnavailable(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{
|
||||
VolumeId: 12345,
|
||||
Collection: "",
|
||||
SourceDataNode: "127.0.0.1:1.1",
|
||||
ShardIds: []uint32{0},
|
||||
CopyEcxFile: true,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "VolumeEcShardsCopy volume") {
|
||||
t.Fatalf("VolumeEcShardsCopy source-unavailable error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
139
test/volume_server/grpc/health_state_test.go
Normal file
139
test/volume_server/grpc/health_state_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestStateAndStatusRPCs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
initialState, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
if initialState.GetState() == nil {
|
||||
t.Fatalf("GetState returned nil state")
|
||||
}
|
||||
|
||||
setResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: initialState.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState(maintenance=true) failed: %v", err)
|
||||
}
|
||||
if !setResp.GetState().GetMaintenance() {
|
||||
t.Fatalf("expected maintenance=true after SetState")
|
||||
}
|
||||
|
||||
setResp, err = client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: false,
|
||||
Version: setResp.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState(maintenance=false) failed: %v", err)
|
||||
}
|
||||
if setResp.GetState().GetMaintenance() {
|
||||
t.Fatalf("expected maintenance=false after SetState")
|
||||
}
|
||||
|
||||
statusResp, err := client.VolumeServerStatus(ctx, &volume_server_pb.VolumeServerStatusRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeServerStatus failed: %v", err)
|
||||
}
|
||||
if statusResp.GetVersion() == "" {
|
||||
t.Fatalf("VolumeServerStatus returned empty version")
|
||||
}
|
||||
if len(statusResp.GetDiskStatuses()) == 0 {
|
||||
t.Fatalf("VolumeServerStatus returned no disk statuses")
|
||||
}
|
||||
if statusResp.GetState() == nil {
|
||||
t.Fatalf("VolumeServerStatus returned nil state")
|
||||
}
|
||||
if statusResp.GetMemoryStatus() == nil {
|
||||
t.Fatalf("VolumeServerStatus returned nil memory status")
|
||||
}
|
||||
if statusResp.GetMemoryStatus().GetGoroutines() <= 0 {
|
||||
t.Fatalf("VolumeServerStatus memory status should report goroutines, got %d", statusResp.GetMemoryStatus().GetGoroutines())
|
||||
}
|
||||
|
||||
pingResp, err := client.Ping(ctx, &volume_server_pb.PingRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("Ping failed: %v", err)
|
||||
}
|
||||
if pingResp.GetStartTimeNs() == 0 || pingResp.GetStopTimeNs() == 0 {
|
||||
t.Fatalf("Ping timestamps should be non-zero: %+v", pingResp)
|
||||
}
|
||||
if pingResp.GetStopTimeNs() < pingResp.GetStartTimeNs() {
|
||||
t.Fatalf("Ping stop time should be >= start time: %+v", pingResp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetStateVersionMismatchAndNilStateNoop(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, client := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
initialState, err := client.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
initialVersion := initialState.GetState().GetVersion()
|
||||
|
||||
staleResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: initialVersion + 1,
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("SetState with stale version should fail")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "version mismatch") {
|
||||
t.Fatalf("SetState stale version error mismatch: %v", err)
|
||||
}
|
||||
if staleResp.GetState().GetVersion() != initialVersion {
|
||||
t.Fatalf("SetState stale version should not mutate server version: got %d want %d", staleResp.GetState().GetVersion(), initialVersion)
|
||||
}
|
||||
if staleResp.GetState().GetMaintenance() != initialState.GetState().GetMaintenance() {
|
||||
t.Fatalf("SetState stale version should not mutate maintenance flag")
|
||||
}
|
||||
|
||||
nilResp, err := client.SetState(ctx, &volume_server_pb.SetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState nil-state request should be no-op success: %v", err)
|
||||
}
|
||||
if nilResp.GetState().GetVersion() != initialVersion {
|
||||
t.Fatalf("SetState nil-state should keep version unchanged: got %d want %d", nilResp.GetState().GetVersion(), initialVersion)
|
||||
}
|
||||
if nilResp.GetState().GetMaintenance() != initialState.GetState().GetMaintenance() {
|
||||
t.Fatalf("SetState nil-state should keep maintenance unchanged")
|
||||
}
|
||||
}
|
||||
385
test/volume_server/grpc/scrub_query_test.go
Normal file
385
test/volume_server/grpc/scrub_query_test.go
Normal file
@@ -0,0 +1,385 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestScrubVolumeIndexAndUnsupportedMode(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(61)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
indexResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
|
||||
VolumeIds: []uint32{volumeID},
|
||||
Mode: volume_server_pb.VolumeScrubMode_INDEX,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ScrubVolume index mode failed: %v", err)
|
||||
}
|
||||
if indexResp.GetTotalVolumes() != 1 {
|
||||
t.Fatalf("ScrubVolume expected total_volumes=1, got %d", indexResp.GetTotalVolumes())
|
||||
}
|
||||
|
||||
_, err = grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
|
||||
VolumeIds: []uint32{volumeID},
|
||||
Mode: volume_server_pb.VolumeScrubMode(99),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ScrubVolume should fail for unsupported mode")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "unsupported volume scrub mode") {
|
||||
t.Fatalf("ScrubVolume unsupported mode error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrubEcVolumeMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
|
||||
VolumeIds: []uint32{98765},
|
||||
Mode: volume_server_pb.VolumeScrubMode_INDEX,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("ScrubEcVolume should fail for missing EC volume")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "EC volume id") {
|
||||
t.Fatalf("ScrubEcVolume missing-volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrubEcVolumeAutoSelectNoEcVolumes(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
|
||||
Mode: volume_server_pb.VolumeScrubMode_INDEX,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ScrubEcVolume auto-select failed: %v", err)
|
||||
}
|
||||
if resp.GetTotalVolumes() != 0 {
|
||||
t.Fatalf("ScrubEcVolume auto-select expected total_volumes=0 without EC data, got %d", resp.GetTotalVolumes())
|
||||
}
|
||||
if len(resp.GetBrokenVolumeIds()) != 0 {
|
||||
t.Fatalf("ScrubEcVolume auto-select expected no broken volumes, got %v", resp.GetBrokenVolumeIds())
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryInvalidAndMissingFileIDPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
invalidStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{"bad-fid"},
|
||||
Selections: []string{"name"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
_, err = invalidStream.Recv()
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("Query should fail for invalid file id")
|
||||
}
|
||||
|
||||
missingFid := framework.NewFileID(98766, 1, 1)
|
||||
missingStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{missingFid},
|
||||
Selections: []string{"name"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
_, err = missingStream.Recv()
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("Query should fail for missing file id volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeIDA = uint32(62)
|
||||
const volumeIDB = uint32(63)
|
||||
framework.AllocateVolume(t, grpcClient, volumeIDA, "")
|
||||
framework.AllocateVolume(t, grpcClient, volumeIDB, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
autoResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
|
||||
Mode: volume_server_pb.VolumeScrubMode_INDEX,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ScrubVolume auto-select failed: %v", err)
|
||||
}
|
||||
if autoResp.GetTotalVolumes() < 2 {
|
||||
t.Fatalf("ScrubVolume auto-select expected at least 2 volumes, got %d", autoResp.GetTotalVolumes())
|
||||
}
|
||||
|
||||
localResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
|
||||
VolumeIds: []uint32{volumeIDA},
|
||||
Mode: volume_server_pb.VolumeScrubMode_LOCAL,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ScrubVolume local mode failed: %v", err)
|
||||
}
|
||||
if localResp.GetTotalVolumes() != 1 {
|
||||
t.Fatalf("ScrubVolume local mode expected total_volumes=1, got %d", localResp.GetTotalVolumes())
|
||||
}
|
||||
if len(localResp.GetBrokenVolumeIds()) != 1 || localResp.GetBrokenVolumeIds()[0] != volumeIDA {
|
||||
t.Fatalf("ScrubVolume local mode expected broken volume %d, got %v", volumeIDA, localResp.GetBrokenVolumeIds())
|
||||
}
|
||||
if len(localResp.GetDetails()) == 0 || !strings.Contains(strings.Join(localResp.GetDetails(), " "), "not implemented") {
|
||||
t.Fatalf("ScrubVolume local mode expected not-implemented details, got %v", localResp.GetDetails())
|
||||
}
|
||||
|
||||
fullResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
|
||||
VolumeIds: []uint32{volumeIDA},
|
||||
Mode: volume_server_pb.VolumeScrubMode_FULL,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ScrubVolume full mode failed: %v", err)
|
||||
}
|
||||
if fullResp.GetTotalVolumes() != 1 {
|
||||
t.Fatalf("ScrubVolume full mode expected total_volumes=1, got %d", fullResp.GetTotalVolumes())
|
||||
}
|
||||
if len(fullResp.GetDetails()) == 0 || !strings.Contains(strings.Join(fullResp.GetDetails(), " "), "not implemented") {
|
||||
t.Fatalf("ScrubVolume full mode expected not-implemented details, got %v", fullResp.GetDetails())
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryJsonSuccessAndCsvNoOutput(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(64)
|
||||
const needleID = uint64(777001)
|
||||
const cookie = uint32(0xAABBCCDD)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
jsonLines := []byte("{\"score\":3}\n{\"score\":12}\n{\"score\":18}\n")
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{fid},
|
||||
Selections: []string{"score"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{
|
||||
Field: "score",
|
||||
Operand: ">",
|
||||
Value: "10",
|
||||
},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Query json start failed: %v", err)
|
||||
}
|
||||
|
||||
firstStripe, err := queryStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("Query json recv failed: %v", err)
|
||||
}
|
||||
records := string(firstStripe.GetRecords())
|
||||
if !strings.Contains(records, "score:12") || !strings.Contains(records, "score:18") {
|
||||
t.Fatalf("Query json records missing expected filtered scores: %q", records)
|
||||
}
|
||||
if strings.Contains(records, "score:3") {
|
||||
t.Fatalf("Query json records should not include filtered-out score: %q", records)
|
||||
}
|
||||
_, err = queryStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Query json expected EOF after first stripe, got: %v", err)
|
||||
}
|
||||
|
||||
csvStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{fid},
|
||||
Selections: []string{"score"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
CsvInput: &volume_server_pb.QueryRequest_InputSerialization_CSVInput{},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Query csv start failed: %v", err)
|
||||
}
|
||||
_, err = csvStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Query csv expected EOF with no rows, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryJsonNoMatchReturnsEmptyStripe(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(65)
|
||||
const needleID = uint64(777002)
|
||||
const cookie = uint32(0xABABCDCD)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
jsonLines := []byte("{\"score\":1}\n{\"score\":2}\n")
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{fid},
|
||||
Selections: []string{"score"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{
|
||||
Field: "score",
|
||||
Operand: ">",
|
||||
Value: "100",
|
||||
},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Query json no-match start failed: %v", err)
|
||||
}
|
||||
|
||||
firstStripe, err := queryStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("Query json no-match recv failed: %v", err)
|
||||
}
|
||||
if len(firstStripe.GetRecords()) != 0 {
|
||||
t.Fatalf("Query json no-match expected empty records stripe, got: %q", string(firstStripe.GetRecords()))
|
||||
}
|
||||
|
||||
_, err = queryStream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Query json no-match expected EOF after first empty stripe, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryCookieMismatchReturnsEOFNoResults(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(66)
|
||||
const needleID = uint64(777003)
|
||||
const cookie = uint32(0xCDCDABAB)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
jsonLines := []byte("{\"score\":7}\n{\"score\":8}\n")
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, needleID, cookie)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != 201 {
|
||||
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
wrongCookieFid := framework.NewFileID(volumeID, needleID, cookie+1)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
|
||||
FromFileIds: []string{wrongCookieFid},
|
||||
Selections: []string{"score"},
|
||||
Filter: &volume_server_pb.QueryRequest_Filter{
|
||||
Field: "score",
|
||||
Operand: ">",
|
||||
Value: "0",
|
||||
},
|
||||
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
|
||||
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Query start for cookie mismatch should not fail immediately, got: %v", err)
|
||||
}
|
||||
|
||||
_, err = stream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("Query cookie mismatch expected EOF with no streamed records, got: %v", err)
|
||||
}
|
||||
}
|
||||
206
test/volume_server/grpc/tail_test.go
Normal file
206
test/volume_server/grpc/tail_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVolumeTailSenderMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{VolumeId: 77777, SinceNs: 0, IdleTimeoutSeconds: 1})
|
||||
if err == nil {
|
||||
_, err = stream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume") {
|
||||
t.Fatalf("VolumeTailSender missing-volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTailSenderHeartbeatThenEOF(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(71)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
|
||||
VolumeId: volumeID,
|
||||
SinceNs: 0,
|
||||
IdleTimeoutSeconds: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeTailSender start failed: %v", err)
|
||||
}
|
||||
|
||||
msg, err := stream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeTailSender first recv failed: %v", err)
|
||||
}
|
||||
if !msg.GetIsLastChunk() {
|
||||
t.Fatalf("expected first tail message to be heartbeat IsLastChunk=true")
|
||||
}
|
||||
|
||||
_, err = stream.Recv()
|
||||
if err != io.EOF {
|
||||
t.Fatalf("expected EOF after idle timeout drain, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTailReceiverMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{VolumeId: 88888, SourceVolumeServer: clusterHarness.VolumeServerAddress(), SinceNs: 0, IdleTimeoutSeconds: 1})
|
||||
if err == nil || !strings.Contains(err.Error(), "receiver not found volume") {
|
||||
t.Fatalf("VolumeTailReceiver missing-volume error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTailReceiverReplicatesSourceUpdates(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartDualVolumeCluster(t, matrix.P1())
|
||||
sourceConn, sourceClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
|
||||
defer sourceConn.Close()
|
||||
destConn, destClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
|
||||
defer destConn.Close()
|
||||
|
||||
const volumeID = uint32(72)
|
||||
framework.AllocateVolume(t, sourceClient, volumeID, "")
|
||||
framework.AllocateVolume(t, destClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 880003, 0x3456789A)
|
||||
payload := []byte("tail-receiver-replicates-source-updates")
|
||||
|
||||
sourceUploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(0), fid, payload)
|
||||
_ = framework.ReadAllAndClose(t, sourceUploadResp)
|
||||
if sourceUploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("source upload expected 201, got %d", sourceUploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := destClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{
|
||||
VolumeId: volumeID,
|
||||
SourceVolumeServer: clusterHarness.VolumeAdminAddress(0) + "." + strings.Split(clusterHarness.VolumeGRPCAddress(0), ":")[1],
|
||||
SinceNs: 0,
|
||||
IdleTimeoutSeconds: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeTailReceiver success path failed: %v", err)
|
||||
}
|
||||
|
||||
destReadResp := framework.ReadBytes(t, httpClient, clusterHarness.VolumeAdminURL(1), fid)
|
||||
destReadBody := framework.ReadAllAndClose(t, destReadResp)
|
||||
if destReadResp.StatusCode != http.StatusOK {
|
||||
t.Fatalf("destination read after tail receive expected 200, got %d", destReadResp.StatusCode)
|
||||
}
|
||||
if string(destReadBody) != string(payload) {
|
||||
t.Fatalf("destination tail-received payload mismatch: got %q want %q", string(destReadBody), string(payload))
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTailSenderLargeNeedleChunking(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(73)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
httpClient := framework.NewHTTPClient()
|
||||
fid := framework.NewFileID(volumeID, 880004, 0x456789AB)
|
||||
largePayload := bytes.Repeat([]byte("L"), 2*1024*1024+128*1024)
|
||||
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, largePayload)
|
||||
_ = framework.ReadAllAndClose(t, uploadResp)
|
||||
if uploadResp.StatusCode != http.StatusCreated {
|
||||
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{
|
||||
VolumeId: volumeID,
|
||||
SinceNs: 0,
|
||||
IdleTimeoutSeconds: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("VolumeTailSender start failed: %v", err)
|
||||
}
|
||||
|
||||
dataChunkCount := 0
|
||||
sawNonLastDataChunk := false
|
||||
sawLastDataChunk := false
|
||||
for {
|
||||
msg, recvErr := stream.Recv()
|
||||
if recvErr == io.EOF {
|
||||
break
|
||||
}
|
||||
if recvErr != nil {
|
||||
t.Fatalf("VolumeTailSender recv failed: %v", recvErr)
|
||||
}
|
||||
if len(msg.GetNeedleBody()) == 0 {
|
||||
continue
|
||||
}
|
||||
dataChunkCount++
|
||||
if msg.GetIsLastChunk() {
|
||||
sawLastDataChunk = true
|
||||
} else {
|
||||
sawNonLastDataChunk = true
|
||||
}
|
||||
}
|
||||
|
||||
if dataChunkCount < 2 {
|
||||
t.Fatalf("VolumeTailSender expected multiple chunks for large needle, got %d", dataChunkCount)
|
||||
}
|
||||
if !sawNonLastDataChunk {
|
||||
t.Fatalf("VolumeTailSender expected at least one non-last data chunk")
|
||||
}
|
||||
if !sawLastDataChunk {
|
||||
t.Fatalf("VolumeTailSender expected a final data chunk marked IsLastChunk=true")
|
||||
}
|
||||
}
|
||||
236
test/volume_server/grpc/tiering_remote_test.go
Normal file
236
test/volume_server/grpc/tiering_remote_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/remote_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestFetchAndWriteNeedleMaintenanceAndMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
|
||||
VolumeId: 98781,
|
||||
NeedleId: 1,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "not found volume id") {
|
||||
t.Fatalf("FetchAndWriteNeedle missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: stateResp.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
|
||||
VolumeId: 1,
|
||||
NeedleId: 1,
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("FetchAndWriteNeedle maintenance error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchAndWriteNeedleInvalidRemoteConfig(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(88)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := grpcClient.FetchAndWriteNeedle(ctx, &volume_server_pb.FetchAndWriteNeedleRequest{
|
||||
VolumeId: volumeID,
|
||||
NeedleId: 1,
|
||||
Cookie: 1,
|
||||
Size: 1,
|
||||
RemoteConf: &remote_pb.RemoteConf{
|
||||
Name: "it-invalid-remote",
|
||||
Type: "does-not-exist",
|
||||
},
|
||||
RemoteLocation: &remote_pb.RemoteStorageLocation{
|
||||
Name: "it-invalid-remote",
|
||||
Path: "/test",
|
||||
},
|
||||
})
|
||||
if err == nil || !strings.Contains(err.Error(), "get remote client") {
|
||||
t.Fatalf("FetchAndWriteNeedle invalid-remote error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTierMoveDatToRemoteErrorPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(85)
|
||||
const collection = "tier-collection"
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, collection)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
missingStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
|
||||
VolumeId: 98782,
|
||||
Collection: collection,
|
||||
DestinationBackendName: "dummy",
|
||||
})
|
||||
if err == nil {
|
||||
_, err = missingStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeTierMoveDatToRemote missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
mismatchStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "wrong-collection",
|
||||
DestinationBackendName: "dummy",
|
||||
})
|
||||
if err == nil {
|
||||
_, err = mismatchStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "unexpected input") {
|
||||
t.Fatalf("VolumeTierMoveDatToRemote collection mismatch error mismatch: %v", err)
|
||||
}
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{
|
||||
Maintenance: true,
|
||||
Version: stateResp.GetState().GetVersion(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
maintenanceStream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: collection,
|
||||
DestinationBackendName: "dummy",
|
||||
})
|
||||
if err == nil {
|
||||
_, err = maintenanceStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("VolumeTierMoveDatToRemote maintenance error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTierMoveDatToRemoteMissingBackend(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(89)
|
||||
const collection = "tier-missing-backend"
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, collection)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stream, err := grpcClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: collection,
|
||||
DestinationBackendName: "definitely-missing-backend",
|
||||
})
|
||||
if err == nil {
|
||||
_, err = stream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "destination definitely-missing-backend not found") {
|
||||
t.Fatalf("VolumeTierMoveDatToRemote missing-backend error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeTierMoveDatFromRemoteErrorPaths(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(86)
|
||||
const collection = "tier-download-collection"
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, collection)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
missingStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
|
||||
VolumeId: 98783,
|
||||
Collection: collection,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = missingStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "not found") {
|
||||
t.Fatalf("VolumeTierMoveDatFromRemote missing-volume error mismatch: %v", err)
|
||||
}
|
||||
|
||||
mismatchStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: "wrong-collection",
|
||||
})
|
||||
if err == nil {
|
||||
_, err = mismatchStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "unexpected input") {
|
||||
t.Fatalf("VolumeTierMoveDatFromRemote collection mismatch error mismatch: %v", err)
|
||||
}
|
||||
|
||||
localDiskStream, err := grpcClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: collection,
|
||||
})
|
||||
if err == nil {
|
||||
_, err = localDiskStream.Recv()
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "already on local disk") {
|
||||
t.Fatalf("VolumeTierMoveDatFromRemote local-disk error mismatch: %v", err)
|
||||
}
|
||||
}
|
||||
87
test/volume_server/grpc/vacuum_test.go
Normal file
87
test/volume_server/grpc/vacuum_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package volume_server_grpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
)
|
||||
|
||||
func TestVacuumVolumeCheckSuccessAndMissingVolume(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
const volumeID = uint32(31)
|
||||
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := grpcClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{VolumeId: volumeID})
|
||||
if err != nil {
|
||||
t.Fatalf("VacuumVolumeCheck existing volume failed: %v", err)
|
||||
}
|
||||
if resp.GetGarbageRatio() < 0 || resp.GetGarbageRatio() > 1 {
|
||||
t.Fatalf("unexpected garbage ratio: %f", resp.GetGarbageRatio())
|
||||
}
|
||||
|
||||
_, err = grpcClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{VolumeId: 99999})
|
||||
if err == nil {
|
||||
t.Fatalf("VacuumVolumeCheck should fail for missing volume")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVacuumMaintenanceModeRejections(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test in short mode")
|
||||
}
|
||||
|
||||
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
||||
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
||||
defer conn.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stateResp, err := grpcClient.GetState(ctx, &volume_server_pb.GetStateRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetState failed: %v", err)
|
||||
}
|
||||
_, err = grpcClient.SetState(ctx, &volume_server_pb.SetStateRequest{
|
||||
State: &volume_server_pb.VolumeServerState{Maintenance: true, Version: stateResp.GetState().GetVersion()},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("SetState maintenance=true failed: %v", err)
|
||||
}
|
||||
|
||||
assertMaintenanceErr := func(name string, err error) {
|
||||
t.Helper()
|
||||
if err == nil {
|
||||
t.Fatalf("%s should fail in maintenance mode", name)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "maintenance mode") {
|
||||
t.Fatalf("%s expected maintenance mode error, got: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
compactStream, err := grpcClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{VolumeId: 31})
|
||||
if err == nil {
|
||||
_, err = compactStream.Recv()
|
||||
}
|
||||
assertMaintenanceErr("VacuumVolumeCompact", err)
|
||||
|
||||
_, err = grpcClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{VolumeId: 31})
|
||||
assertMaintenanceErr("VacuumVolumeCommit", err)
|
||||
|
||||
_, err = grpcClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{VolumeId: 31})
|
||||
assertMaintenanceErr("VacuumVolumeCleanup", err)
|
||||
}
|
||||
Reference in New Issue
Block a user