* docs(volume_server): add integration test development plan * test(volume_server): add integration harness and profile matrix * test(volume_server/http): add admin and options integration coverage * test(volume_server/grpc): add state and status integration coverage * test(volume_server): auto-build weed binary and harden cluster startup * test(volume_server/http): add upload read range head delete coverage * test(volume_server/grpc): expand admin lifecycle and state coverage * docs(volume_server): update progress tracker for implemented tests * test(volume_server/http): cover if-none-match and invalid-range branches * test(volume_server/grpc): add batch delete integration coverage * docs(volume_server): log latest HTTP and gRPC test coverage * ci(volume_server): run volume server integration tests in github actions * test(volume_server/grpc): add needle status configure ping and leave coverage * docs(volume_server): record additional grpc coverage progress * test(volume_server/grpc): add vacuum integration coverage * docs(volume_server): record vacuum test coverage progress * test(volume_server/grpc): add read and write needle blob error-path coverage * docs(volume_server): record data rw grpc coverage progress * test(volume_server/http): add jwt auth integration coverage * test(volume_server/grpc): add sync copy and stream error-path coverage * docs(volume_server): record jwt and sync/copy test coverage * test(volume_server/grpc): add scrub and query integration coverage * test(volume_server/grpc): add volume tail sender and receiver coverage * docs(volume_server): record scrub query and tail test progress * test(volume_server/grpc): add readonly writable and collection lifecycle coverage * test(volume_server/http): add public-port cors and method parity coverage * test(volume_server/grpc): add blob meta and read-all success path coverage * test(volume_server/grpc): expand scrub and query variation coverage * test(volume_server/grpc): add tiering and remote fetch error-path coverage * test(volume_server/http): add unchanged write and delete edge-case coverage * test(volume_server/grpc): add ping unknown and unreachable target coverage * test(volume_server/grpc): add volume delete only-empty variation coverage * test(volume_server/http): add jwt fid-mismatch auth coverage * test(volume_server/grpc): add scrub ec auto-select empty coverage * test(volume_server/grpc): stabilize ping timestamp assertion * docs(volume_server): update integration coverage progress log * test(volume_server/grpc): add tier remote backend and config variation coverage * docs(volume_server): record tier remote variation progress * test(volume_server/grpc): add incremental copy and receive-file protocol coverage * test(volume_server/http): add read path shape and if-modified-since coverage * test(volume_server/grpc): add copy-file compaction and receive-file success coverage * test(volume_server/http): add passthrough headers and static asset coverage * test(volume_server/grpc): add ping filer unreachable coverage * docs(volume_server): record copy receive and http variant progress * test(volume_server/grpc): add erasure coding maintenance and missing-path coverage * docs(volume_server): record initial erasure coding rpc coverage * test(volume_server/http): add multi-range multipart response coverage * docs(volume_server): record multi-range http coverage progress * test(volume_server/grpc): add query empty-stripe no-match coverage * docs(volume_server): record query no-match stream behavior coverage * test(volume_server/http): add upload throttling timeout and replicate bypass coverage * docs(volume_server): record upload throttling coverage progress * test(volume_server/http): add download throttling timeout coverage * docs(volume_server): record download throttling coverage progress * test(volume_server/http): add jwt wrong-cookie fid mismatch coverage * docs(volume_server): record jwt wrong-cookie mismatch coverage * test(volume_server/http): add jwt expired-token rejection coverage * docs(volume_server): record jwt expired-token coverage * test(volume_server/http): add jwt query and cookie transport coverage * docs(volume_server): record jwt token transport coverage * test(volume_server/http): add jwt token-source precedence coverage * docs(volume_server): record jwt token-source precedence coverage * test(volume_server/http): add jwt header-over-cookie precedence coverage * docs(volume_server): record jwt header cookie precedence coverage * test(volume_server/http): add jwt query-over-cookie precedence coverage * docs(volume_server): record jwt query cookie precedence coverage * test(volume_server/grpc): add setstate version mismatch and nil-state coverage * docs(volume_server): record setstate validation coverage * test(volume_server/grpc): add readonly persist-true lifecycle coverage * docs(volume_server): record readonly persist variation coverage * test(volume_server/http): add options origin cors header coverage * docs(volume_server): record options origin cors coverage * test(volume_server/http): add trace unsupported-method parity coverage * docs(volume_server): record trace method parity coverage * test(volume_server/grpc): add batch delete cookie-check variation coverage * docs(volume_server): record batch delete cookie-check coverage * test(volume_server/grpc): add admin lifecycle missing and maintenance variants * docs(volume_server): record admin lifecycle edge-case coverage * test(volume_server/grpc): add mixed batch delete status matrix coverage * docs(volume_server): record mixed batch delete matrix coverage * test(volume_server/http): add jwt-profile ui access gating coverage * docs(volume_server): record jwt ui-gating http coverage * test(volume_server/http): add propfind unsupported-method parity coverage * docs(volume_server): record propfind method parity coverage * test(volume_server/grpc): add volume configure success and rollback-path coverage * docs(volume_server): record volume configure branch coverage * test(volume_server/grpc): add volume needle status missing-path coverage * docs(volume_server): record volume needle status error-path coverage * test(volume_server/http): add readDeleted query behavior coverage * docs(volume_server): record readDeleted http behavior coverage * test(volume_server/http): add delete ts override parity coverage * docs(volume_server): record delete ts parity coverage * test(volume_server/grpc): add invalid blob/meta offset coverage * docs(volume_server): record invalid blob/meta offset coverage * test(volume_server/grpc): add read-all mixed volume abort coverage * docs(volume_server): record read-all mixed-volume abort coverage * test(volume_server/http): assert head response body parity * docs(volume_server): record head body parity assertion * test(volume_server/grpc): assert status state and memory payload completeness * docs(volume_server): record volume server status payload coverage * test(volume_server/grpc): add batch delete chunk-manifest rejection coverage * docs(volume_server): record batch delete chunk-manifest coverage * test(volume_server/grpc): add query cookie-mismatch eof parity coverage * docs(volume_server): record query cookie-mismatch parity coverage * test(volume_server/grpc): add ping master success target coverage * docs(volume_server): record ping master success coverage * test(volume_server/http): add head if-none-match conditional parity * docs(volume_server): record head if-none-match parity coverage * test(volume_server/http): add head if-modified-since parity coverage * docs(volume_server): record head if-modified-since parity coverage * test(volume_server/http): add connect unsupported-method parity coverage * docs(volume_server): record connect method parity coverage * test(volume_server/http): assert options allow-headers cors parity * docs(volume_server): record options allow-headers coverage * test(volume_server/framework): add dual volume cluster integration harness * test(volume_server/http): add missing-local read mode proxy redirect local coverage * docs(volume_server): record read mode missing-local matrix coverage * test(volume_server/http): add download over-limit replica proxy fallback coverage * docs(volume_server): record download replica fallback coverage * test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage * docs(volume_server): record missing-local readDeleted mode coverage * test(volume_server/framework): add single-volume cluster with filer harness * test(volume_server/grpc): add ping filer success target coverage * docs(volume_server): record ping filer success coverage * test(volume_server/http): add proxied-loop guard download timeout coverage * docs(volume_server): record proxied-loop download coverage * test(volume_server/http): add disabled upload and download limit coverage * docs(volume_server): record disabled throttling path coverage * test(volume_server/grpc): add idempotent volume server leave coverage * docs(volume_server): record leave idempotence coverage * test(volume_server/http): add redirect collection query preservation coverage * docs(volume_server): record redirect collection query coverage * test(volume_server/http): assert admin server headers on status and health * docs(volume_server): record admin server header coverage * test(volume_server/http): assert healthz request-id echo parity * docs(volume_server): record healthz request-id parity coverage * test(volume_server/http): add over-limit invalid-vid download branch coverage * docs(volume_server): record over-limit invalid-vid branch coverage * test(volume_server/http): add public-port static asset coverage * docs(volume_server): record public static endpoint coverage * test(volume_server/http): add public head method parity coverage * docs(volume_server): record public head parity coverage * test(volume_server/http): add throttling wait-then-proceed path coverage * docs(volume_server): record throttling wait-then-proceed coverage * test(volume_server/http): add read cookie-mismatch not-found coverage * docs(volume_server): record read cookie-mismatch coverage * test(volume_server/http): add throttling timeout-recovery coverage * docs(volume_server): record throttling timeout-recovery coverage * test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage * docs(volume_server): record ec positive lifecycle coverage * test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage * docs(volume_server): record ec shard read/blob delete lifecycle coverage * test(volume_server/grpc): add ec rebuild and to-volume error branch coverage * docs(volume_server): record ec rebuild and to-volume branch coverage * test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage * docs(volume_server): record ec shards-to-volume success coverage * test(volume_server/grpc): add ec receive and copy-file missing-source coverage * docs(volume_server): record ec receive and copy-file coverage * test(volume_server/grpc): add ec last-shard delete cleanup coverage * docs(volume_server): record ec last-shard delete cleanup coverage * test(volume_server/grpc): add volume copy success path coverage * docs(volume_server): record volume copy success coverage * test(volume_server/grpc): add volume copy overwrite-destination coverage * docs(volume_server): record volume copy overwrite coverage * test(volume_server/http): add write error-path variant coverage * docs(volume_server): record http write error-path coverage * test(volume_server/http): add conditional header precedence coverage * docs(volume_server): record conditional header precedence coverage * test(volume_server/http): add oversized combined range guard coverage * docs(volume_server): record oversized range guard coverage * test(volume_server/http): add image resize and crop read coverage * docs(volume_server): record image transform coverage * test(volume_server/http): add chunk-manifest expansion and bypass coverage * docs(volume_server): record chunk-manifest read coverage * test(volume_server/http): add compressed read encoding matrix coverage * docs(volume_server): record compressed read matrix coverage * test(volume_server/grpc): add tail receiver source replication coverage * docs(volume_server): record tail receiver replication coverage * test(volume_server/grpc): add tail sender large-needle chunking coverage * docs(volume_server): record tail sender chunking coverage * test(volume_server/grpc): add ec-backed volume needle status coverage * docs(volume_server): record ec-backed needle status coverage * test(volume_server/grpc): add ec shard copy from peer success coverage * docs(volume_server): record ec shard copy success coverage * test(volume_server/http): add chunk-manifest delete child cleanup coverage * docs(volume_server): record chunk-manifest delete cleanup coverage * test(volume_server/http): add chunk-manifest delete failure-path coverage * docs(volume_server): record chunk-manifest delete failure coverage * test(volume_server/grpc): add ec shard copy source-unavailable coverage * docs(volume_server): record ec shard copy source-unavailable coverage * parallel
446 lines
15 KiB
Go
446 lines
15 KiB
Go
package volume_server_grpc_test
|
|
|
|
import (
|
|
"context"
|
|
"net/http"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
|
|
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
|
"github.com/seaweedfs/seaweedfs/weed/cluster"
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
|
)
|
|
|
|
func TestVolumeNeedleStatusForUploadedFile(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(21)
|
|
const needleID = uint64(778899)
|
|
const cookie = uint32(0xA1B2C3D4)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
fid := framework.NewFileID(volumeID, needleID, cookie)
|
|
client := framework.NewHTTPClient()
|
|
payload := []byte("needle-status-payload")
|
|
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
|
|
_ = framework.ReadAllAndClose(t, uploadResp)
|
|
if uploadResp.StatusCode != http.StatusCreated {
|
|
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
|
VolumeId: volumeID,
|
|
NeedleId: needleID,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeNeedleStatus failed: %v", err)
|
|
}
|
|
if statusResp.GetNeedleId() != needleID {
|
|
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
|
|
}
|
|
if statusResp.GetCookie() != cookie {
|
|
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
|
|
}
|
|
if statusResp.GetSize() == 0 {
|
|
t.Fatalf("expected non-zero needle size")
|
|
}
|
|
}
|
|
|
|
func TestVolumeNeedleStatusViaEcShardsWhenNormalVolumeUnmounted(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(26)
|
|
const needleID = uint64(778900)
|
|
const cookie = uint32(0xA1B2C3D5)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
client := framework.NewHTTPClient()
|
|
fid := framework.NewFileID(volumeID, needleID, cookie)
|
|
payload := []byte("needle-status-ec-path-payload")
|
|
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
|
|
_ = framework.ReadAllAndClose(t, uploadResp)
|
|
if uploadResp.StatusCode != http.StatusCreated {
|
|
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := grpcClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{
|
|
VolumeId: volumeID,
|
|
Collection: "",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeEcShardsGenerate failed: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{
|
|
VolumeId: volumeID,
|
|
Collection: "",
|
|
ShardIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeEcShardsMount data shards failed: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{
|
|
VolumeId: volumeID,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeUnmount failed: %v", err)
|
|
}
|
|
|
|
statusResp, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
|
VolumeId: volumeID,
|
|
NeedleId: needleID,
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeNeedleStatus via EC shards failed: %v", err)
|
|
}
|
|
if statusResp.GetNeedleId() != needleID {
|
|
t.Fatalf("needle id mismatch: got %d want %d", statusResp.GetNeedleId(), needleID)
|
|
}
|
|
if statusResp.GetCookie() != cookie {
|
|
t.Fatalf("cookie mismatch: got %d want %d", statusResp.GetCookie(), cookie)
|
|
}
|
|
if statusResp.GetSize() == 0 {
|
|
t.Fatalf("expected non-zero needle size from EC-backed needle status")
|
|
}
|
|
|
|
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
|
VolumeId: volumeID,
|
|
NeedleId: needleID + 999999,
|
|
})
|
|
if err == nil || !strings.Contains(strings.ToLower(err.Error()), "not found") {
|
|
t.Fatalf("VolumeNeedleStatus via EC shards missing-needle error mismatch: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestVolumeNeedleStatusMissingVolumeAndNeedle(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(25)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
|
VolumeId: 99925,
|
|
NeedleId: 1,
|
|
})
|
|
if err == nil {
|
|
t.Fatalf("VolumeNeedleStatus should fail for missing volume")
|
|
}
|
|
if !strings.Contains(strings.ToLower(err.Error()), "volume not found") {
|
|
t.Fatalf("VolumeNeedleStatus missing-volume error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.VolumeNeedleStatus(ctx, &volume_server_pb.VolumeNeedleStatusRequest{
|
|
VolumeId: volumeID,
|
|
NeedleId: 123456789,
|
|
})
|
|
if err == nil {
|
|
t.Fatalf("VolumeNeedleStatus should fail for missing needle")
|
|
}
|
|
if !strings.Contains(strings.ToLower(err.Error()), "not found") {
|
|
t.Fatalf("VolumeNeedleStatus missing-needle error mismatch: %v", err)
|
|
}
|
|
}
|
|
|
|
func mustNewRequest(t testing.TB, method, url string) *http.Request {
|
|
t.Helper()
|
|
req, err := http.NewRequest(method, url, nil)
|
|
if err != nil {
|
|
t.Fatalf("create request %s %s: %v", method, url, err)
|
|
}
|
|
return req
|
|
}
|
|
|
|
func TestVolumeConfigureInvalidReplication(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(22)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
resp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
|
VolumeId: volumeID,
|
|
Replication: "bad-replication",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeConfigure returned grpc error: %v", err)
|
|
}
|
|
if resp.GetError() == "" {
|
|
t.Fatalf("VolumeConfigure expected response error for invalid replication")
|
|
}
|
|
if !strings.Contains(strings.ToLower(resp.GetError()), "replication") {
|
|
t.Fatalf("VolumeConfigure error should mention replication, got: %q", resp.GetError())
|
|
}
|
|
}
|
|
|
|
func TestVolumeConfigureSuccessAndMissingRollbackPath(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
const volumeID = uint32(24)
|
|
framework.AllocateVolume(t, grpcClient, volumeID, "")
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
successResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
|
VolumeId: volumeID,
|
|
Replication: "000",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeConfigure success path returned grpc error: %v", err)
|
|
}
|
|
if successResp.GetError() != "" {
|
|
t.Fatalf("VolumeConfigure success path expected empty response error, got: %q", successResp.GetError())
|
|
}
|
|
|
|
statusResp, err := grpcClient.VolumeStatus(ctx, &volume_server_pb.VolumeStatusRequest{VolumeId: volumeID})
|
|
if err != nil {
|
|
t.Fatalf("VolumeStatus after successful configure failed: %v", err)
|
|
}
|
|
if statusResp.GetIsReadOnly() {
|
|
t.Fatalf("VolumeStatus after configure expected writable volume")
|
|
}
|
|
|
|
missingResp, err := grpcClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{
|
|
VolumeId: 99024,
|
|
Replication: "000",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("VolumeConfigure missing-volume branch should return response error, got grpc error: %v", err)
|
|
}
|
|
if missingResp.GetError() == "" {
|
|
t.Fatalf("VolumeConfigure missing-volume expected non-empty response error")
|
|
}
|
|
lower := strings.ToLower(missingResp.GetError())
|
|
if !strings.Contains(lower, "not found on disk") {
|
|
t.Fatalf("VolumeConfigure missing-volume error should mention not found on disk, got: %q", missingResp.GetError())
|
|
}
|
|
if !strings.Contains(lower, "failed to restore mount") {
|
|
t.Fatalf("VolumeConfigure missing-volume error should include remount rollback failure, got: %q", missingResp.GetError())
|
|
}
|
|
}
|
|
|
|
func TestPingVolumeTargetAndLeaveAffectsHealthz(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
pingResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: cluster.VolumeServerType,
|
|
Target: clusterHarness.VolumeServerAddress(),
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("Ping target volume server failed: %v", err)
|
|
}
|
|
if pingResp.GetRemoteTimeNs() == 0 {
|
|
t.Fatalf("expected remote timestamp from ping target volume server")
|
|
}
|
|
|
|
if _, err = grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
|
t.Fatalf("VolumeServerLeave failed: %v", err)
|
|
}
|
|
|
|
client := framework.NewHTTPClient()
|
|
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
|
|
deadline := time.Now().Add(5 * time.Second)
|
|
for {
|
|
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
|
|
_ = framework.ReadAllAndClose(t, resp)
|
|
if resp.StatusCode == http.StatusServiceUnavailable {
|
|
return
|
|
}
|
|
if time.Now().After(deadline) {
|
|
t.Fatalf("expected healthz to return 503 after leave, got %d", resp.StatusCode)
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
}
|
|
}
|
|
|
|
func TestVolumeServerLeaveIsIdempotent(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
|
t.Fatalf("first VolumeServerLeave failed: %v", err)
|
|
}
|
|
if _, err := grpcClient.VolumeServerLeave(ctx, &volume_server_pb.VolumeServerLeaveRequest{}); err != nil {
|
|
t.Fatalf("second VolumeServerLeave should be idempotent success, got: %v", err)
|
|
}
|
|
|
|
client := framework.NewHTTPClient()
|
|
healthURL := clusterHarness.VolumeAdminURL() + "/healthz"
|
|
deadline := time.Now().Add(5 * time.Second)
|
|
for {
|
|
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, healthURL))
|
|
_ = framework.ReadAllAndClose(t, resp)
|
|
if resp.StatusCode == http.StatusServiceUnavailable {
|
|
return
|
|
}
|
|
if time.Now().After(deadline) {
|
|
t.Fatalf("expected healthz to stay 503 after repeated leave, got %d", resp.StatusCode)
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
}
|
|
}
|
|
|
|
func TestPingUnknownAndUnreachableTargetPaths(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
unknownResp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: "unknown-type",
|
|
Target: "127.0.0.1:12345",
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("Ping unknown target type should not return grpc error, got: %v", err)
|
|
}
|
|
if unknownResp.GetRemoteTimeNs() != 0 {
|
|
t.Fatalf("Ping unknown target type expected remote_time_ns=0, got %d", unknownResp.GetRemoteTimeNs())
|
|
}
|
|
if unknownResp.GetStopTimeNs() < unknownResp.GetStartTimeNs() {
|
|
t.Fatalf("Ping unknown target type expected stop_time_ns >= start_time_ns")
|
|
}
|
|
|
|
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: cluster.MasterType,
|
|
Target: "127.0.0.1:1",
|
|
})
|
|
if err == nil {
|
|
t.Fatalf("Ping master target should fail when target is unreachable")
|
|
}
|
|
if !strings.Contains(err.Error(), "ping master") {
|
|
t.Fatalf("Ping master unreachable error mismatch: %v", err)
|
|
}
|
|
|
|
_, err = grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: cluster.FilerType,
|
|
Target: "127.0.0.1:1",
|
|
})
|
|
if err == nil {
|
|
t.Fatalf("Ping filer target should fail when target is unreachable")
|
|
}
|
|
if !strings.Contains(err.Error(), "ping filer") {
|
|
t.Fatalf("Ping filer unreachable error mismatch: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestPingMasterTargetSuccess(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: cluster.MasterType,
|
|
Target: clusterHarness.MasterAddress(),
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("Ping master target success path failed: %v", err)
|
|
}
|
|
if resp.GetRemoteTimeNs() == 0 {
|
|
t.Fatalf("Ping master target expected non-zero remote time")
|
|
}
|
|
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
|
|
t.Fatalf("Ping master target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
|
|
}
|
|
}
|
|
|
|
func TestPingFilerTargetSuccess(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skip("skipping integration test in short mode")
|
|
}
|
|
|
|
clusterHarness := framework.StartSingleVolumeClusterWithFiler(t, matrix.P1())
|
|
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
|
|
defer conn.Close()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
resp, err := grpcClient.Ping(ctx, &volume_server_pb.PingRequest{
|
|
TargetType: cluster.FilerType,
|
|
Target: clusterHarness.FilerServerAddress(),
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("Ping filer target success path failed: %v", err)
|
|
}
|
|
if resp.GetRemoteTimeNs() == 0 {
|
|
t.Fatalf("Ping filer target expected non-zero remote time")
|
|
}
|
|
if resp.GetStopTimeNs() < resp.GetStartTimeNs() {
|
|
t.Fatalf("Ping filer target expected stop >= start, got start=%d stop=%d", resp.GetStartTimeNs(), resp.GetStopTimeNs())
|
|
}
|
|
}
|