Add volume server integration test suite and CI workflow (#8322)

* docs(volume_server): add integration test development plan

* test(volume_server): add integration harness and profile matrix

* test(volume_server/http): add admin and options integration coverage

* test(volume_server/grpc): add state and status integration coverage

* test(volume_server): auto-build weed binary and harden cluster startup

* test(volume_server/http): add upload read range head delete coverage

* test(volume_server/grpc): expand admin lifecycle and state coverage

* docs(volume_server): update progress tracker for implemented tests

* test(volume_server/http): cover if-none-match and invalid-range branches

* test(volume_server/grpc): add batch delete integration coverage

* docs(volume_server): log latest HTTP and gRPC test coverage

* ci(volume_server): run volume server integration tests in github actions

* test(volume_server/grpc): add needle status configure ping and leave coverage

* docs(volume_server): record additional grpc coverage progress

* test(volume_server/grpc): add vacuum integration coverage

* docs(volume_server): record vacuum test coverage progress

* test(volume_server/grpc): add read and write needle blob error-path coverage

* docs(volume_server): record data rw grpc coverage progress

* test(volume_server/http): add jwt auth integration coverage

* test(volume_server/grpc): add sync copy and stream error-path coverage

* docs(volume_server): record jwt and sync/copy test coverage

* test(volume_server/grpc): add scrub and query integration coverage

* test(volume_server/grpc): add volume tail sender and receiver coverage

* docs(volume_server): record scrub query and tail test progress

* test(volume_server/grpc): add readonly writable and collection lifecycle coverage

* test(volume_server/http): add public-port cors and method parity coverage

* test(volume_server/grpc): add blob meta and read-all success path coverage

* test(volume_server/grpc): expand scrub and query variation coverage

* test(volume_server/grpc): add tiering and remote fetch error-path coverage

* test(volume_server/http): add unchanged write and delete edge-case coverage

* test(volume_server/grpc): add ping unknown and unreachable target coverage

* test(volume_server/grpc): add volume delete only-empty variation coverage

* test(volume_server/http): add jwt fid-mismatch auth coverage

* test(volume_server/grpc): add scrub ec auto-select empty coverage

* test(volume_server/grpc): stabilize ping timestamp assertion

* docs(volume_server): update integration coverage progress log

* test(volume_server/grpc): add tier remote backend and config variation coverage

* docs(volume_server): record tier remote variation progress

* test(volume_server/grpc): add incremental copy and receive-file protocol coverage

* test(volume_server/http): add read path shape and if-modified-since coverage

* test(volume_server/grpc): add copy-file compaction and receive-file success coverage

* test(volume_server/http): add passthrough headers and static asset coverage

* test(volume_server/grpc): add ping filer unreachable coverage

* docs(volume_server): record copy receive and http variant progress

* test(volume_server/grpc): add erasure coding maintenance and missing-path coverage

* docs(volume_server): record initial erasure coding rpc coverage

* test(volume_server/http): add multi-range multipart response coverage

* docs(volume_server): record multi-range http coverage progress

* test(volume_server/grpc): add query empty-stripe no-match coverage

* docs(volume_server): record query no-match stream behavior coverage

* test(volume_server/http): add upload throttling timeout and replicate bypass coverage

* docs(volume_server): record upload throttling coverage progress

* test(volume_server/http): add download throttling timeout coverage

* docs(volume_server): record download throttling coverage progress

* test(volume_server/http): add jwt wrong-cookie fid mismatch coverage

* docs(volume_server): record jwt wrong-cookie mismatch coverage

* test(volume_server/http): add jwt expired-token rejection coverage

* docs(volume_server): record jwt expired-token coverage

* test(volume_server/http): add jwt query and cookie transport coverage

* docs(volume_server): record jwt token transport coverage

* test(volume_server/http): add jwt token-source precedence coverage

* docs(volume_server): record jwt token-source precedence coverage

* test(volume_server/http): add jwt header-over-cookie precedence coverage

* docs(volume_server): record jwt header cookie precedence coverage

* test(volume_server/http): add jwt query-over-cookie precedence coverage

* docs(volume_server): record jwt query cookie precedence coverage

* test(volume_server/grpc): add setstate version mismatch and nil-state coverage

* docs(volume_server): record setstate validation coverage

* test(volume_server/grpc): add readonly persist-true lifecycle coverage

* docs(volume_server): record readonly persist variation coverage

* test(volume_server/http): add options origin cors header coverage

* docs(volume_server): record options origin cors coverage

* test(volume_server/http): add trace unsupported-method parity coverage

* docs(volume_server): record trace method parity coverage

* test(volume_server/grpc): add batch delete cookie-check variation coverage

* docs(volume_server): record batch delete cookie-check coverage

* test(volume_server/grpc): add admin lifecycle missing and maintenance variants

* docs(volume_server): record admin lifecycle edge-case coverage

* test(volume_server/grpc): add mixed batch delete status matrix coverage

* docs(volume_server): record mixed batch delete matrix coverage

* test(volume_server/http): add jwt-profile ui access gating coverage

* docs(volume_server): record jwt ui-gating http coverage

* test(volume_server/http): add propfind unsupported-method parity coverage

* docs(volume_server): record propfind method parity coverage

* test(volume_server/grpc): add volume configure success and rollback-path coverage

* docs(volume_server): record volume configure branch coverage

* test(volume_server/grpc): add volume needle status missing-path coverage

* docs(volume_server): record volume needle status error-path coverage

* test(volume_server/http): add readDeleted query behavior coverage

* docs(volume_server): record readDeleted http behavior coverage

* test(volume_server/http): add delete ts override parity coverage

* docs(volume_server): record delete ts parity coverage

* test(volume_server/grpc): add invalid blob/meta offset coverage

* docs(volume_server): record invalid blob/meta offset coverage

* test(volume_server/grpc): add read-all mixed volume abort coverage

* docs(volume_server): record read-all mixed-volume abort coverage

* test(volume_server/http): assert head response body parity

* docs(volume_server): record head body parity assertion

* test(volume_server/grpc): assert status state and memory payload completeness

* docs(volume_server): record volume server status payload coverage

* test(volume_server/grpc): add batch delete chunk-manifest rejection coverage

* docs(volume_server): record batch delete chunk-manifest coverage

* test(volume_server/grpc): add query cookie-mismatch eof parity coverage

* docs(volume_server): record query cookie-mismatch parity coverage

* test(volume_server/grpc): add ping master success target coverage

* docs(volume_server): record ping master success coverage

* test(volume_server/http): add head if-none-match conditional parity

* docs(volume_server): record head if-none-match parity coverage

* test(volume_server/http): add head if-modified-since parity coverage

* docs(volume_server): record head if-modified-since parity coverage

* test(volume_server/http): add connect unsupported-method parity coverage

* docs(volume_server): record connect method parity coverage

* test(volume_server/http): assert options allow-headers cors parity

* docs(volume_server): record options allow-headers coverage

* test(volume_server/framework): add dual volume cluster integration harness

* test(volume_server/http): add missing-local read mode proxy redirect local coverage

* docs(volume_server): record read mode missing-local matrix coverage

* test(volume_server/http): add download over-limit replica proxy fallback coverage

* docs(volume_server): record download replica fallback coverage

* test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage

* docs(volume_server): record missing-local readDeleted mode coverage

* test(volume_server/framework): add single-volume cluster with filer harness

* test(volume_server/grpc): add ping filer success target coverage

* docs(volume_server): record ping filer success coverage

* test(volume_server/http): add proxied-loop guard download timeout coverage

* docs(volume_server): record proxied-loop download coverage

* test(volume_server/http): add disabled upload and download limit coverage

* docs(volume_server): record disabled throttling path coverage

* test(volume_server/grpc): add idempotent volume server leave coverage

* docs(volume_server): record leave idempotence coverage

* test(volume_server/http): add redirect collection query preservation coverage

* docs(volume_server): record redirect collection query coverage

* test(volume_server/http): assert admin server headers on status and health

* docs(volume_server): record admin server header coverage

* test(volume_server/http): assert healthz request-id echo parity

* docs(volume_server): record healthz request-id parity coverage

* test(volume_server/http): add over-limit invalid-vid download branch coverage

* docs(volume_server): record over-limit invalid-vid branch coverage

* test(volume_server/http): add public-port static asset coverage

* docs(volume_server): record public static endpoint coverage

* test(volume_server/http): add public head method parity coverage

* docs(volume_server): record public head parity coverage

* test(volume_server/http): add throttling wait-then-proceed path coverage

* docs(volume_server): record throttling wait-then-proceed coverage

* test(volume_server/http): add read cookie-mismatch not-found coverage

* docs(volume_server): record read cookie-mismatch coverage

* test(volume_server/http): add throttling timeout-recovery coverage

* docs(volume_server): record throttling timeout-recovery coverage

* test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage

* docs(volume_server): record ec positive lifecycle coverage

* test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage

* docs(volume_server): record ec shard read/blob delete lifecycle coverage

* test(volume_server/grpc): add ec rebuild and to-volume error branch coverage

* docs(volume_server): record ec rebuild and to-volume branch coverage

* test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage

* docs(volume_server): record ec shards-to-volume success coverage

* test(volume_server/grpc): add ec receive and copy-file missing-source coverage

* docs(volume_server): record ec receive and copy-file coverage

* test(volume_server/grpc): add ec last-shard delete cleanup coverage

* docs(volume_server): record ec last-shard delete cleanup coverage

* test(volume_server/grpc): add volume copy success path coverage

* docs(volume_server): record volume copy success coverage

* test(volume_server/grpc): add volume copy overwrite-destination coverage

* docs(volume_server): record volume copy overwrite coverage

* test(volume_server/http): add write error-path variant coverage

* docs(volume_server): record http write error-path coverage

* test(volume_server/http): add conditional header precedence coverage

* docs(volume_server): record conditional header precedence coverage

* test(volume_server/http): add oversized combined range guard coverage

* docs(volume_server): record oversized range guard coverage

* test(volume_server/http): add image resize and crop read coverage

* docs(volume_server): record image transform coverage

* test(volume_server/http): add chunk-manifest expansion and bypass coverage

* docs(volume_server): record chunk-manifest read coverage

* test(volume_server/http): add compressed read encoding matrix coverage

* docs(volume_server): record compressed read matrix coverage

* test(volume_server/grpc): add tail receiver source replication coverage

* docs(volume_server): record tail receiver replication coverage

* test(volume_server/grpc): add tail sender large-needle chunking coverage

* docs(volume_server): record tail sender chunking coverage

* test(volume_server/grpc): add ec-backed volume needle status coverage

* docs(volume_server): record ec-backed needle status coverage

* test(volume_server/grpc): add ec shard copy from peer success coverage

* docs(volume_server): record ec shard copy success coverage

* test(volume_server/http): add chunk-manifest delete child cleanup coverage

* docs(volume_server): record chunk-manifest delete cleanup coverage

* test(volume_server/http): add chunk-manifest delete failure-path coverage

* docs(volume_server): record chunk-manifest delete failure coverage

* test(volume_server/grpc): add ec shard copy source-unavailable coverage

* docs(volume_server): record ec shard copy source-unavailable coverage

* parallel
This commit is contained in:
Chris Lu
2026-02-13 00:40:56 -08:00
committed by GitHub
parent c433fee36a
commit beeb375a88
41 changed files with 9459 additions and 0 deletions

View File

@@ -0,0 +1,174 @@
package volume_server_http_test
import (
"encoding/json"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
)
func TestAdminStatusAndHealthz(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
statusReq, err := http.NewRequest(http.MethodGet, cluster.VolumeAdminURL()+"/status", nil)
if err != nil {
t.Fatalf("create status request: %v", err)
}
statusReq.Header.Set(request_id.AmzRequestIDHeader, "test-request-id-1")
statusResp := framework.DoRequest(t, client, statusReq)
statusBody := framework.ReadAllAndClose(t, statusResp)
if statusResp.StatusCode != http.StatusOK {
t.Fatalf("expected /status code 200, got %d, body: %s", statusResp.StatusCode, string(statusBody))
}
if got := statusResp.Header.Get("Server"); !strings.Contains(got, "SeaweedFS Volume") {
t.Fatalf("expected /status Server header to contain SeaweedFS Volume, got %q", got)
}
if got := statusResp.Header.Get(request_id.AmzRequestIDHeader); got != "test-request-id-1" {
t.Fatalf("expected echoed request id, got %q", got)
}
var payload map[string]interface{}
if err := json.Unmarshal(statusBody, &payload); err != nil {
t.Fatalf("decode status response: %v", err)
}
for _, field := range []string{"Version", "DiskStatuses", "Volumes"} {
if _, found := payload[field]; !found {
t.Fatalf("status payload missing field %q", field)
}
}
healthReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/healthz")
healthReq.Header.Set(request_id.AmzRequestIDHeader, "test-request-id-2")
healthResp := framework.DoRequest(t, client, healthReq)
_ = framework.ReadAllAndClose(t, healthResp)
if healthResp.StatusCode != http.StatusOK {
t.Fatalf("expected /healthz code 200, got %d", healthResp.StatusCode)
}
if got := healthResp.Header.Get("Server"); !strings.Contains(got, "SeaweedFS Volume") {
t.Fatalf("expected /healthz Server header to contain SeaweedFS Volume, got %q", got)
}
if got := healthResp.Header.Get(request_id.AmzRequestIDHeader); got != "test-request-id-2" {
t.Fatalf("expected /healthz echoed request id, got %q", got)
}
uiResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/ui/index.html"))
uiBody := framework.ReadAllAndClose(t, uiResp)
if uiResp.StatusCode != http.StatusOK {
t.Fatalf("expected /ui/index.html code 200, got %d, body: %s", uiResp.StatusCode, string(uiBody))
}
if !strings.Contains(strings.ToLower(string(uiBody)), "volume") {
t.Fatalf("ui page does not look like volume status page")
}
}
func TestOptionsMethodsByPort(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
adminResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodOptions, cluster.VolumeAdminURL()+"/"))
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusOK {
t.Fatalf("admin OPTIONS expected 200, got %d", adminResp.StatusCode)
}
adminAllowed := adminResp.Header.Get("Access-Control-Allow-Methods")
for _, expected := range []string{"PUT", "POST", "GET", "DELETE", "OPTIONS"} {
if !strings.Contains(adminAllowed, expected) {
t.Fatalf("admin allow methods missing %q, got %q", expected, adminAllowed)
}
}
if adminResp.Header.Get("Access-Control-Allow-Headers") != "*" {
t.Fatalf("admin allow headers expected '*', got %q", adminResp.Header.Get("Access-Control-Allow-Headers"))
}
publicResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodOptions, cluster.VolumePublicURL()+"/"))
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public OPTIONS expected 200, got %d", publicResp.StatusCode)
}
publicAllowed := publicResp.Header.Get("Access-Control-Allow-Methods")
if !strings.Contains(publicAllowed, "GET") || !strings.Contains(publicAllowed, "OPTIONS") {
t.Fatalf("public allow methods expected GET and OPTIONS, got %q", publicAllowed)
}
if strings.Contains(publicAllowed, "POST") {
t.Fatalf("public allow methods should not include POST, got %q", publicAllowed)
}
if publicResp.Header.Get("Access-Control-Allow-Headers") != "*" {
t.Fatalf("public allow headers expected '*', got %q", publicResp.Header.Get("Access-Control-Allow-Headers"))
}
}
func TestOptionsWithOriginIncludesCorsHeaders(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
adminReq := mustNewRequest(t, http.MethodOptions, cluster.VolumeAdminURL()+"/")
adminReq.Header.Set("Origin", "https://example.com")
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusOK {
t.Fatalf("admin OPTIONS expected 200, got %d", adminResp.StatusCode)
}
if adminResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("admin OPTIONS expected Access-Control-Allow-Origin=*, got %q", adminResp.Header.Get("Access-Control-Allow-Origin"))
}
if adminResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("admin OPTIONS expected Access-Control-Allow-Credentials=true, got %q", adminResp.Header.Get("Access-Control-Allow-Credentials"))
}
publicReq := mustNewRequest(t, http.MethodOptions, cluster.VolumePublicURL()+"/")
publicReq.Header.Set("Origin", "https://example.com")
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public OPTIONS expected 200, got %d", publicResp.StatusCode)
}
if publicResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("public OPTIONS expected Access-Control-Allow-Origin=*, got %q", publicResp.Header.Get("Access-Control-Allow-Origin"))
}
if publicResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("public OPTIONS expected Access-Control-Allow-Credentials=true, got %q", publicResp.Header.Get("Access-Control-Allow-Credentials"))
}
}
func TestUiIndexNotExposedWhenJwtSigningEnabled(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P3())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/ui/index.html"))
body := framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expected /ui/index.html to be gated by auth under JWT profile (401), got %d body=%s", resp.StatusCode, string(body))
}
}
func mustNewRequest(t testing.TB, method, url string) *http.Request {
t.Helper()
req, err := http.NewRequest(method, url, nil)
if err != nil {
t.Fatalf("create request %s %s: %v", method, url, err)
}
return req
}

View File

@@ -0,0 +1,419 @@
package volume_server_http_test
import (
"bytes"
"net/http"
"testing"
"time"
jwt "github.com/golang-jwt/jwt/v5"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/security"
)
func TestJWTAuthForWriteAndRead(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(51)
const needleID = uint64(123456)
const cookie = uint32(0xABCDEF12)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("jwt-protected-content")
client := framework.NewHTTPClient()
unauthWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
unauthWriteResp := framework.DoRequest(t, client, unauthWrite)
_ = framework.ReadAllAndClose(t, unauthWriteResp)
if unauthWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unauthorized write expected 401, got %d", unauthWriteResp.StatusCode)
}
invalidWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
invalidWrite.Header.Set("Authorization", "Bearer invalid")
invalidWriteResp := framework.DoRequest(t, client, invalidWrite)
_ = framework.ReadAllAndClose(t, invalidWriteResp)
if invalidWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("invalid write token expected 401, got %d", invalidWriteResp.StatusCode)
}
writeToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
authWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
authWrite.Header.Set("Authorization", "Bearer "+string(writeToken))
authWriteResp := framework.DoRequest(t, client, authWrite)
_ = framework.ReadAllAndClose(t, authWriteResp)
if authWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("authorized write expected 201, got %d", authWriteResp.StatusCode)
}
unauthReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
unauthReadResp := framework.DoRequest(t, client, unauthReadReq)
_ = framework.ReadAllAndClose(t, unauthReadResp)
if unauthReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("unauthorized read expected 401, got %d", unauthReadResp.StatusCode)
}
readToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
authReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
authReadReq.Header.Set("Authorization", "Bearer "+string(readToken))
authReadResp := framework.DoRequest(t, client, authReadReq)
authReadBody := framework.ReadAllAndClose(t, authReadResp)
if authReadResp.StatusCode != http.StatusOK {
t.Fatalf("authorized read expected 200, got %d", authReadResp.StatusCode)
}
if string(authReadBody) != string(payload) {
t.Fatalf("authorized read content mismatch: got %q want %q", string(authReadBody), string(payload))
}
}
func TestJWTAuthRejectsFidMismatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(52)
const needleID = uint64(223344)
const cookie = uint32(0x10203040)
const otherNeedleID = uint64(223345)
const otherCookie = uint32(0x50607080)
const wrongCookie = uint32(0x10203041)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFid := framework.NewFileID(volumeID, otherNeedleID, otherCookie)
payload := []byte("jwt-fid-mismatch-content")
client := framework.NewHTTPClient()
writeTokenForOtherFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFid)
mismatchedWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
mismatchedWrite.Header.Set("Authorization", "Bearer "+string(writeTokenForOtherFid))
mismatchedWriteResp := framework.DoRequest(t, client, mismatchedWrite)
_ = framework.ReadAllAndClose(t, mismatchedWriteResp)
if mismatchedWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("write with mismatched fid token expected 401, got %d", mismatchedWriteResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, wrongCookie)
writeTokenWrongCookie := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, wrongCookieFid)
wrongCookieWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
wrongCookieWrite.Header.Set("Authorization", "Bearer "+string(writeTokenWrongCookie))
wrongCookieWriteResp := framework.DoRequest(t, client, wrongCookieWrite)
_ = framework.ReadAllAndClose(t, wrongCookieWriteResp)
if wrongCookieWriteResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("write with wrong-cookie fid token expected 401, got %d", wrongCookieWriteResp.StatusCode)
}
writeTokenForFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
validWrite := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
validWrite.Header.Set("Authorization", "Bearer "+string(writeTokenForFid))
validWriteResp := framework.DoRequest(t, client, validWrite)
_ = framework.ReadAllAndClose(t, validWriteResp)
if validWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("authorized write expected 201, got %d", validWriteResp.StatusCode)
}
readTokenForOtherFid := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFid)
mismatchedReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
mismatchedReadReq.Header.Set("Authorization", "Bearer "+string(readTokenForOtherFid))
mismatchedReadResp := framework.DoRequest(t, client, mismatchedReadReq)
_ = framework.ReadAllAndClose(t, mismatchedReadResp)
if mismatchedReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("read with mismatched fid token expected 401, got %d", mismatchedReadResp.StatusCode)
}
readTokenWrongCookie := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, wrongCookieFid)
wrongCookieReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
wrongCookieReadReq.Header.Set("Authorization", "Bearer "+string(readTokenWrongCookie))
wrongCookieReadResp := framework.DoRequest(t, client, wrongCookieReadReq)
_ = framework.ReadAllAndClose(t, wrongCookieReadResp)
if wrongCookieReadResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("read with wrong-cookie fid token expected 401, got %d", wrongCookieReadResp.StatusCode)
}
}
func newUploadRequest(t testing.TB, url string, payload []byte) *http.Request {
t.Helper()
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(payload))
if err != nil {
t.Fatalf("create upload request %s: %v", url, err)
}
req.Header.Set("Content-Type", "application/octet-stream")
return req
}
func TestJWTAuthRejectsExpiredTokens(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(53)
const needleID = uint64(334455)
const cookie = uint32(0x22334455)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("expired-token-content")
client := framework.NewHTTPClient()
expiredWriteToken := mustGenExpiredToken(t, []byte(profile.JWTSigningKey), fid)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
writeReq.Header.Set("Authorization", "Bearer "+expiredWriteToken)
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expired write token expected 401, got %d", writeResp.StatusCode)
}
// Seed data with a valid token so read auth path can be exercised against existing content.
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
validWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
validWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
validWriteResp := framework.DoRequest(t, client, validWriteReq)
_ = framework.ReadAllAndClose(t, validWriteResp)
if validWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("valid write expected 201, got %d", validWriteResp.StatusCode)
}
expiredReadToken := mustGenExpiredToken(t, []byte(profile.JWTReadKey), fid)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.Header.Set("Authorization", "Bearer "+expiredReadToken)
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("expired read token expected 401, got %d", readResp.StatusCode)
}
}
func TestJWTAuthViaQueryParamAndCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(54)
const needleID = uint64(445566)
const cookie = uint32(0x31415926)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
payload := []byte("jwt-query-cookie-content")
client := framework.NewHTTPClient()
writeToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(writeToken), payload)
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusCreated {
t.Fatalf("query-jwt write expected 201, got %d", writeResp.StatusCode)
}
readToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(readToken)})
readResp := framework.DoRequest(t, client, readReq)
readBody := framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("cookie-jwt read expected 200, got %d", readResp.StatusCode)
}
if string(readBody) != string(payload) {
t.Fatalf("cookie-jwt read body mismatch: got %q want %q", string(readBody), string(payload))
}
}
func TestJWTTokenSourcePrecedenceQueryOverHeader(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(55)
const needleID = uint64(556677)
const cookie = uint32(0x99887766)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-content")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidWriteQueryToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidWriteQueryToken), payload)
writeReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over header token for write, expected 401 got %d", writeResp.StatusCode)
}
// Seed data with valid write token, then exercise read precedence.
seedWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
seedWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
seedWriteResp := framework.DoRequest(t, client, seedWriteReq)
_ = framework.ReadAllAndClose(t, seedWriteResp)
if seedWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("seed write expected 201, got %d", seedWriteResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidReadQueryToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidReadQueryToken))
readReq.Header.Set("Authorization", "Bearer "+string(validReadToken))
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over header token for read, expected 401 got %d", readResp.StatusCode)
}
}
func TestJWTTokenSourcePrecedenceHeaderOverCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(56)
const needleID = uint64(667788)
const cookie = uint32(0x11229988)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-header-cookie")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidCookieWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
writeReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
writeReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieWriteToken)})
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusCreated {
t.Fatalf("header token should take precedence over cookie token for write, expected 201 got %d", writeResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidCookieReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
readReq.Header.Set("Authorization", "Bearer "+string(validReadToken))
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieReadToken)})
readResp := framework.DoRequest(t, client, readReq)
readBody := framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusOK {
t.Fatalf("header token should take precedence over cookie token for read, expected 200 got %d", readResp.StatusCode)
}
if string(readBody) != string(payload) {
t.Fatalf("header-over-cookie read body mismatch: got %q want %q", string(readBody), string(payload))
}
}
func TestJWTTokenSourcePrecedenceQueryOverCookie(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P3()
clusterHarness := framework.StartSingleVolumeCluster(t, profile)
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(57)
const needleID = uint64(778899)
const cookie = uint32(0x88776655)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, needleID, cookie)
otherFID := framework.NewFileID(volumeID, needleID+1, cookie+1)
payload := []byte("jwt-precedence-query-cookie")
client := framework.NewHTTPClient()
validWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, fid)
invalidQueryWriteToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTSigningKey)), 60, otherFID)
writeReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidQueryWriteToken), payload)
writeReq.AddCookie(&http.Cookie{Name: "AT", Value: string(validWriteToken)})
writeResp := framework.DoRequest(t, client, writeReq)
_ = framework.ReadAllAndClose(t, writeResp)
if writeResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over cookie token for write, expected 401 got %d", writeResp.StatusCode)
}
// Seed data with valid write token so read precedence can be exercised.
seedWriteReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
seedWriteReq.Header.Set("Authorization", "Bearer "+string(validWriteToken))
seedWriteResp := framework.DoRequest(t, client, seedWriteReq)
_ = framework.ReadAllAndClose(t, seedWriteResp)
if seedWriteResp.StatusCode != http.StatusCreated {
t.Fatalf("seed write expected 201, got %d", seedWriteResp.StatusCode)
}
validReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, fid)
invalidQueryReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
readReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(invalidQueryReadToken))
readReq.AddCookie(&http.Cookie{Name: "AT", Value: string(validReadToken)})
readResp := framework.DoRequest(t, client, readReq)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusUnauthorized {
t.Fatalf("query token should take precedence over cookie token for read, expected 401 got %d", readResp.StatusCode)
}
// Validate positive path: valid query token should succeed even if cookie token is invalid.
validQueryReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?jwt="+string(validReadToken))
invalidCookieReadToken := security.GenJwtForVolumeServer(security.SigningKey([]byte(profile.JWTReadKey)), 60, otherFID)
validQueryReadReq.AddCookie(&http.Cookie{Name: "AT", Value: string(invalidCookieReadToken)})
validQueryReadResp := framework.DoRequest(t, client, validQueryReadReq)
validQueryReadBody := framework.ReadAllAndClose(t, validQueryReadResp)
if validQueryReadResp.StatusCode != http.StatusOK {
t.Fatalf("valid query token should succeed over invalid cookie token, expected 200 got %d", validQueryReadResp.StatusCode)
}
if string(validQueryReadBody) != string(payload) {
t.Fatalf("query-over-cookie read body mismatch: got %q want %q", string(validQueryReadBody), string(payload))
}
}
func mustGenExpiredToken(t testing.TB, key []byte, fid string) string {
t.Helper()
claims := security.SeaweedFileIdClaims{
Fid: fid,
RegisteredClaims: jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Minute)),
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signed, err := token.SignedString(key)
if err != nil {
t.Fatalf("sign expired token: %v", err)
}
return signed
}

View File

@@ -0,0 +1,232 @@
package volume_server_http_test
import (
"bytes"
"encoding/json"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/operation"
)
func TestChunkManifestExpansionAndBypass(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(102)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
chunkFID := framework.NewFileID(volumeID, 772005, 0x5E6F7081)
chunkPayload := []byte("chunk-manifest-expanded-content")
chunkUploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID, chunkPayload)
_ = framework.ReadAllAndClose(t, chunkUploadResp)
if chunkUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("chunk upload expected 201, got %d", chunkUploadResp.StatusCode)
}
manifest := &operation.ChunkManifest{
Name: "manifest.bin",
Mime: "application/octet-stream",
Size: int64(len(chunkPayload)),
Chunks: []*operation.ChunkInfo{
{
Fid: chunkFID,
Offset: 0,
Size: int64(len(chunkPayload)),
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772006, 0x6F708192)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
expandedReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), manifestFID)
expandedReadBody := framework.ReadAllAndClose(t, expandedReadResp)
if expandedReadResp.StatusCode != http.StatusOK {
t.Fatalf("manifest expanded read expected 200, got %d", expandedReadResp.StatusCode)
}
if string(expandedReadBody) != string(chunkPayload) {
t.Fatalf("manifest expanded read mismatch: got %q want %q", string(expandedReadBody), string(chunkPayload))
}
if expandedReadResp.Header.Get("X-File-Store") != "chunked" {
t.Fatalf("manifest expanded read expected X-File-Store=chunked, got %q", expandedReadResp.Header.Get("X-File-Store"))
}
bypassReadResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=false"))
bypassReadBody := framework.ReadAllAndClose(t, bypassReadResp)
if bypassReadResp.StatusCode != http.StatusOK {
t.Fatalf("manifest bypass read expected 200, got %d", bypassReadResp.StatusCode)
}
if bypassReadResp.Header.Get("X-File-Store") != "" {
t.Fatalf("manifest bypass read expected empty X-File-Store header, got %q", bypassReadResp.Header.Get("X-File-Store"))
}
var gotManifest operation.ChunkManifest
if err = json.Unmarshal(bypassReadBody, &gotManifest); err != nil {
t.Fatalf("manifest bypass read expected JSON payload, got decode error: %v body=%q", err, string(bypassReadBody))
}
if len(gotManifest.Chunks) != 1 || gotManifest.Chunks[0].Fid != chunkFID {
t.Fatalf("manifest bypass read payload mismatch: %+v", gotManifest)
}
}
func TestChunkManifestDeleteRemovesChildChunks(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(104)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
chunkFID := framework.NewFileID(volumeID, 772008, 0x8192A3B4)
chunkPayload := []byte("chunk-manifest-delete-content")
chunkUploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID, chunkPayload)
_ = framework.ReadAllAndClose(t, chunkUploadResp)
if chunkUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("chunk upload expected 201, got %d", chunkUploadResp.StatusCode)
}
manifest := &operation.ChunkManifest{
Name: "manifest-delete.bin",
Mime: "application/octet-stream",
Size: int64(len(chunkPayload)),
Chunks: []*operation.ChunkInfo{
{
Fid: chunkFID,
Offset: 0,
Size: int64(len(chunkPayload)),
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772009, 0x92A3B4C5)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+manifestFID))
deleteBody := framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("manifest delete expected 202, got %d", deleteResp.StatusCode)
}
var deleteResult map[string]int64
if err = json.Unmarshal(deleteBody, &deleteResult); err != nil {
t.Fatalf("decode manifest delete response: %v body=%q", err, string(deleteBody))
}
if deleteResult["size"] != int64(len(chunkPayload)) {
t.Fatalf("manifest delete expected size=%d, got %d", len(chunkPayload), deleteResult["size"])
}
manifestReadAfterDelete := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), manifestFID)
_ = framework.ReadAllAndClose(t, manifestReadAfterDelete)
if manifestReadAfterDelete.StatusCode != http.StatusNotFound {
t.Fatalf("manifest read after delete expected 404, got %d", manifestReadAfterDelete.StatusCode)
}
chunkReadAfterDelete := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), chunkFID)
_ = framework.ReadAllAndClose(t, chunkReadAfterDelete)
if chunkReadAfterDelete.StatusCode != http.StatusNotFound {
t.Fatalf("chunk read after manifest delete expected 404, got %d", chunkReadAfterDelete.StatusCode)
}
}
func TestChunkManifestDeleteFailsWhenChildDeletionFails(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(105)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
manifest := &operation.ChunkManifest{
Name: "manifest-delete-failure.bin",
Mime: "application/octet-stream",
Size: 1,
Chunks: []*operation.ChunkInfo{
{
Fid: "not-a-valid-fid",
Offset: 0,
Size: 1,
},
},
}
manifestBytes, err := json.Marshal(manifest)
if err != nil {
t.Fatalf("marshal chunk manifest: %v", err)
}
manifestFID := framework.NewFileID(volumeID, 772010, 0xA3B4C5D6)
manifestUploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=true", bytes.NewReader(manifestBytes))
if err != nil {
t.Fatalf("create manifest upload request: %v", err)
}
manifestUploadReq.Header.Set("Content-Type", "application/json")
manifestUploadResp := framework.DoRequest(t, client, manifestUploadReq)
_ = framework.ReadAllAndClose(t, manifestUploadResp)
if manifestUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("manifest upload expected 201, got %d", manifestUploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+manifestFID))
deleteBody := framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusInternalServerError {
t.Fatalf("manifest delete with invalid child fid expected 500, got %d body=%q", deleteResp.StatusCode, string(deleteBody))
}
manifestBypassRead := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+manifestFID+"?cm=false"))
manifestBypassBody := framework.ReadAllAndClose(t, manifestBypassRead)
if manifestBypassRead.StatusCode != http.StatusOK {
t.Fatalf("manifest bypass read after failed delete expected 200, got %d", manifestBypassRead.StatusCode)
}
var gotManifest operation.ChunkManifest
if err = json.Unmarshal(manifestBypassBody, &gotManifest); err != nil {
t.Fatalf("manifest bypass read expected JSON payload, got decode error: %v body=%q", err, string(manifestBypassBody))
}
if len(gotManifest.Chunks) != 1 || gotManifest.Chunks[0].Fid != "not-a-valid-fid" {
t.Fatalf("manifest payload mismatch after failed delete: %+v", gotManifest)
}
}

View File

@@ -0,0 +1,97 @@
package volume_server_http_test
import (
"bytes"
"compress/gzip"
"io"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func gzipData(t testing.TB, data []byte) []byte {
t.Helper()
var buf bytes.Buffer
zw := gzip.NewWriter(&buf)
if _, err := zw.Write(data); err != nil {
t.Fatalf("gzip write: %v", err)
}
if err := zw.Close(); err != nil {
t.Fatalf("gzip close: %v", err)
}
return buf.Bytes()
}
func gunzipData(t testing.TB, data []byte) []byte {
t.Helper()
zr, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
t.Fatalf("gunzip new reader: %v", err)
}
defer zr.Close()
out, err := io.ReadAll(zr)
if err != nil {
t.Fatalf("gunzip read: %v", err)
}
return out
}
func TestCompressedReadAcceptEncodingMatrix(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(103)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772007, 0x708192A3)
plainPayload := []byte("compressed-read-accept-encoding-matrix-content-compressed-read-accept-encoding-matrix-content")
compressedPayload := gzipData(t, plainPayload)
uploadReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+fid, bytes.NewReader(compressedPayload))
if err != nil {
t.Fatalf("create compressed upload request: %v", err)
}
uploadReq.Header.Set("Content-Type", "text/plain")
uploadReq.Header.Set("Content-Encoding", "gzip")
uploadResp := framework.DoRequest(t, client, uploadReq)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("compressed upload expected 201, got %d", uploadResp.StatusCode)
}
gzipReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
gzipReadReq.Header.Set("Accept-Encoding", "gzip")
gzipReadResp := framework.DoRequest(t, client, gzipReadReq)
gzipReadBody := framework.ReadAllAndClose(t, gzipReadResp)
if gzipReadResp.StatusCode != http.StatusOK {
t.Fatalf("gzip-accepted read expected 200, got %d", gzipReadResp.StatusCode)
}
if gzipReadResp.Header.Get("Content-Encoding") != "gzip" {
t.Fatalf("gzip-accepted read expected Content-Encoding=gzip, got %q", gzipReadResp.Header.Get("Content-Encoding"))
}
if string(gunzipData(t, gzipReadBody)) != string(plainPayload) {
t.Fatalf("gzip-accepted read body mismatch after gunzip")
}
identityReadReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
identityReadReq.Header.Set("Accept-Encoding", "identity")
identityReadResp := framework.DoRequest(t, client, identityReadReq)
identityReadBody := framework.ReadAllAndClose(t, identityReadResp)
if identityReadResp.StatusCode != http.StatusOK {
t.Fatalf("identity read expected 200, got %d", identityReadResp.StatusCode)
}
if identityReadResp.Header.Get("Content-Encoding") != "" {
t.Fatalf("identity read expected no Content-Encoding header, got %q", identityReadResp.Header.Get("Content-Encoding"))
}
if string(identityReadBody) != string(plainPayload) {
t.Fatalf("identity read body mismatch: got %q want %q", string(identityReadBody), string(plainPayload))
}
}

View File

@@ -0,0 +1,102 @@
package volume_server_http_test
import (
"fmt"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadPassthroughHeadersAndDownloadDisposition(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(96)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 661122, 0x55667788)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fullFileID, []byte("passthrough-header-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
url := fmt.Sprintf("%s/%d/%s/%s?response-content-type=text/plain&response-cache-control=no-store&dl=true",
clusterHarness.VolumeAdminURL(),
volumeID,
fidOnly,
"report.txt",
)
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, url))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusOK {
t.Fatalf("passthrough read expected 200, got %d", resp.StatusCode)
}
if resp.Header.Get("Content-Type") != "text/plain" {
t.Fatalf("response-content-type override mismatch: %q", resp.Header.Get("Content-Type"))
}
if resp.Header.Get("Cache-Control") != "no-store" {
t.Fatalf("response-cache-control override mismatch: %q", resp.Header.Get("Cache-Control"))
}
contentDisposition := resp.Header.Get("Content-Disposition")
if !strings.Contains(contentDisposition, "attachment") || !strings.Contains(contentDisposition, "report.txt") {
t.Fatalf("download disposition header mismatch: %q", contentDisposition)
}
}
func TestStaticAssetEndpoints(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
faviconResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/favicon.ico"))
_ = framework.ReadAllAndClose(t, faviconResp)
if faviconResp.StatusCode != http.StatusOK {
t.Fatalf("/favicon.ico expected 200, got %d", faviconResp.StatusCode)
}
staticResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/seaweedfsstatic/seaweed50x50.png"))
_ = framework.ReadAllAndClose(t, staticResp)
if staticResp.StatusCode != http.StatusOK {
t.Fatalf("/seaweedfsstatic/seaweed50x50.png expected 200, got %d", staticResp.StatusCode)
}
}
func TestStaticAssetEndpointsOnPublicPort(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
client := framework.NewHTTPClient()
faviconResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/favicon.ico"))
_ = framework.ReadAllAndClose(t, faviconResp)
if faviconResp.StatusCode != http.StatusOK {
t.Fatalf("public /favicon.ico expected 200, got %d", faviconResp.StatusCode)
}
staticResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/seaweedfsstatic/seaweed50x50.png"))
_ = framework.ReadAllAndClose(t, staticResp)
if staticResp.StatusCode != http.StatusOK {
t.Fatalf("public /seaweedfsstatic/seaweed50x50.png expected 200, got %d", staticResp.StatusCode)
}
}

View File

@@ -0,0 +1,92 @@
package volume_server_http_test
import (
"bytes"
"fmt"
"image"
"image/color"
"image/png"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func makePNGFixture(t testing.TB, width, height int) []byte {
t.Helper()
img := image.NewRGBA(image.Rect(0, 0, width, height))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
img.Set(x, y, color.RGBA{R: uint8(x * 20), G: uint8(y * 20), B: 200, A: 255})
}
}
var buf bytes.Buffer
if err := png.Encode(&buf, img); err != nil {
t.Fatalf("encode png fixture: %v", err)
}
return buf.Bytes()
}
func decodeImageConfig(t testing.TB, data []byte) image.Config {
t.Helper()
cfg, _, err := image.DecodeConfig(bytes.NewReader(data))
if err != nil {
t.Fatalf("decode image config: %v", err)
}
return cfg
}
func TestImageResizeAndCropReadVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(101)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 772004, 0x4D5E6F70)
uploadReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fullFileID, makePNGFixture(t, 6, 4))
uploadReq.Header.Set("Content-Type", "image/png")
uploadResp := framework.DoRequest(t, client, uploadReq)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("image upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
resizeURL := fmt.Sprintf("%s/%d/%s/%s?width=2&height=1", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "fixture.png")
resizeResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, resizeURL))
resizeBody := framework.ReadAllAndClose(t, resizeResp)
if resizeResp.StatusCode != http.StatusOK {
t.Fatalf("image resize read expected 200, got %d", resizeResp.StatusCode)
}
resizeCfg := decodeImageConfig(t, resizeBody)
if resizeCfg.Width > 2 || resizeCfg.Height > 1 {
t.Fatalf("image resize expected dimensions <= 2x1, got %dx%d", resizeCfg.Width, resizeCfg.Height)
}
cropURL := fmt.Sprintf("%s/%d/%s/%s?crop_x1=1&crop_y1=1&crop_x2=4&crop_y2=3", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "fixture.png")
cropResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cropURL))
cropBody := framework.ReadAllAndClose(t, cropResp)
if cropResp.StatusCode != http.StatusOK {
t.Fatalf("image crop read expected 200, got %d", cropResp.StatusCode)
}
cropCfg := decodeImageConfig(t, cropBody)
if cropCfg.Width != 3 || cropCfg.Height != 2 {
t.Fatalf("image crop expected 3x2, got %dx%d", cropCfg.Width, cropCfg.Height)
}
}

View File

@@ -0,0 +1,287 @@
package volume_server_http_test
import (
"bytes"
"net/http"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestPublicPortReadOnlyMethodBehavior(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(81)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123321, 0x01020304)
originalData := []byte("public-port-original")
replacementData := []byte("public-port-replacement")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, originalData)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("admin upload expected 201, got %d", uploadResp.StatusCode)
}
publicReadResp := framework.ReadBytes(t, client, clusterHarness.VolumePublicURL(), fid)
publicReadBody := framework.ReadAllAndClose(t, publicReadResp)
if publicReadResp.StatusCode != http.StatusOK {
t.Fatalf("public GET expected 200, got %d", publicReadResp.StatusCode)
}
if string(publicReadBody) != string(originalData) {
t.Fatalf("public GET body mismatch: got %q want %q", string(publicReadBody), string(originalData))
}
publicPostReq := newUploadRequest(t, clusterHarness.VolumePublicURL()+"/"+fid, replacementData)
publicPostResp := framework.DoRequest(t, client, publicPostReq)
_ = framework.ReadAllAndClose(t, publicPostResp)
if publicPostResp.StatusCode != http.StatusOK {
t.Fatalf("public POST expected passthrough 200, got %d", publicPostResp.StatusCode)
}
publicDeleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumePublicURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, publicDeleteResp)
if publicDeleteResp.StatusCode != http.StatusOK {
t.Fatalf("public DELETE expected passthrough 200, got %d", publicDeleteResp.StatusCode)
}
adminReadResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
adminReadBody := framework.ReadAllAndClose(t, adminReadResp)
if adminReadResp.StatusCode != http.StatusOK {
t.Fatalf("admin GET after public POST/DELETE expected 200, got %d", adminReadResp.StatusCode)
}
if string(adminReadBody) != string(originalData) {
t.Fatalf("public port should not mutate data: got %q want %q", string(adminReadBody), string(originalData))
}
}
func TestCorsAndUnsupportedMethodBehavior(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(82)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 789789, 0x0A0B0C0D)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("cors-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("admin upload expected 201, got %d", uploadResp.StatusCode)
}
adminOriginReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
adminOriginReq.Header.Set("Origin", "https://example.com")
adminOriginResp := framework.DoRequest(t, client, adminOriginReq)
_ = framework.ReadAllAndClose(t, adminOriginResp)
if adminOriginResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("admin GET origin header mismatch: %q", adminOriginResp.Header.Get("Access-Control-Allow-Origin"))
}
if adminOriginResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("admin GET credentials header mismatch: %q", adminOriginResp.Header.Get("Access-Control-Allow-Credentials"))
}
publicOriginReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumePublicURL()+"/"+fid)
publicOriginReq.Header.Set("Origin", "https://example.com")
publicOriginResp := framework.DoRequest(t, client, publicOriginReq)
_ = framework.ReadAllAndClose(t, publicOriginResp)
if publicOriginResp.Header.Get("Access-Control-Allow-Origin") != "*" {
t.Fatalf("public GET origin header mismatch: %q", publicOriginResp.Header.Get("Access-Control-Allow-Origin"))
}
if publicOriginResp.Header.Get("Access-Control-Allow-Credentials") != "true" {
t.Fatalf("public GET credentials header mismatch: %q", publicOriginResp.Header.Get("Access-Control-Allow-Credentials"))
}
adminPatchReq, err := http.NewRequest(http.MethodPatch, clusterHarness.VolumeAdminURL()+"/"+fid, bytes.NewReader([]byte("patch")))
if err != nil {
t.Fatalf("create admin PATCH request: %v", err)
}
adminPatchResp := framework.DoRequest(t, client, adminPatchReq)
_ = framework.ReadAllAndClose(t, adminPatchResp)
if adminPatchResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin PATCH expected 400, got %d", adminPatchResp.StatusCode)
}
publicPatchReq, err := http.NewRequest(http.MethodPatch, clusterHarness.VolumePublicURL()+"/"+fid, bytes.NewReader([]byte("patch")))
if err != nil {
t.Fatalf("create public PATCH request: %v", err)
}
publicPatchResp := framework.DoRequest(t, client, publicPatchReq)
_ = framework.ReadAllAndClose(t, publicPatchResp)
if publicPatchResp.StatusCode != http.StatusOK {
t.Fatalf("public PATCH expected passthrough 200, got %d", publicPatchResp.StatusCode)
}
}
func TestUnsupportedMethodTraceParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(83)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123999, 0x01010101)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("trace-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminTraceReq := mustNewRequest(t, http.MethodTrace, clusterHarness.VolumeAdminURL()+"/"+fid)
adminTraceResp := framework.DoRequest(t, client, adminTraceReq)
_ = framework.ReadAllAndClose(t, adminTraceResp)
if adminTraceResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin TRACE expected 400, got %d", adminTraceResp.StatusCode)
}
publicTraceReq := mustNewRequest(t, http.MethodTrace, clusterHarness.VolumePublicURL()+"/"+fid)
publicTraceResp := framework.DoRequest(t, client, publicTraceReq)
_ = framework.ReadAllAndClose(t, publicTraceResp)
if publicTraceResp.StatusCode != http.StatusOK {
t.Fatalf("public TRACE expected passthrough 200, got %d", publicTraceResp.StatusCode)
}
}
func TestUnsupportedMethodPropfindParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(84)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124000, 0x02020202)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("propfind-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminReq := mustNewRequest(t, "PROPFIND", clusterHarness.VolumeAdminURL()+"/"+fid)
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin PROPFIND expected 400, got %d", adminResp.StatusCode)
}
publicReq := mustNewRequest(t, "PROPFIND", clusterHarness.VolumePublicURL()+"/"+fid)
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public PROPFIND expected passthrough 200, got %d", publicResp.StatusCode)
}
verifyResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
verifyBody := framework.ReadAllAndClose(t, verifyResp)
if verifyResp.StatusCode != http.StatusOK {
t.Fatalf("verify GET expected 200, got %d", verifyResp.StatusCode)
}
if string(verifyBody) != "propfind-method-check" {
t.Fatalf("PROPFIND should not mutate data, got %q", string(verifyBody))
}
}
func TestUnsupportedMethodConnectParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(85)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124001, 0x03030303)
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("connect-method-check"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
adminReq := mustNewRequest(t, "CONNECT", clusterHarness.VolumeAdminURL()+"/"+fid)
adminResp := framework.DoRequest(t, client, adminReq)
_ = framework.ReadAllAndClose(t, adminResp)
if adminResp.StatusCode != http.StatusBadRequest {
t.Fatalf("admin CONNECT expected 400, got %d", adminResp.StatusCode)
}
publicReq := mustNewRequest(t, "CONNECT", clusterHarness.VolumePublicURL()+"/"+fid)
publicResp := framework.DoRequest(t, client, publicReq)
_ = framework.ReadAllAndClose(t, publicResp)
if publicResp.StatusCode != http.StatusOK {
t.Fatalf("public CONNECT expected passthrough 200, got %d", publicResp.StatusCode)
}
verifyResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
verifyBody := framework.ReadAllAndClose(t, verifyResp)
if verifyResp.StatusCode != http.StatusOK {
t.Fatalf("verify GET expected 200, got %d", verifyResp.StatusCode)
}
if string(verifyBody) != "connect-method-check" {
t.Fatalf("CONNECT should not mutate data, got %q", string(verifyBody))
}
}
func TestPublicPortHeadReadParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P2())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(86)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 124002, 0x04040404)
payload := []byte("public-head-parity-content")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, clusterHarness.VolumePublicURL()+"/"+fid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusOK {
t.Fatalf("public HEAD expected 200, got %d", headResp.StatusCode)
}
if got := headResp.Header.Get("Content-Length"); got != strconv.Itoa(len(payload)) {
t.Fatalf("public HEAD content-length mismatch: got %q want %d", got, len(payload))
}
if len(headBody) != 0 {
t.Fatalf("public HEAD body should be empty, got %d bytes", len(headBody))
}
}

View File

@@ -0,0 +1,82 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestMultiRangeReadReturnsMultipartPayload(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(97)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 771999, 0x0A1B2C3D)
payload := []byte("0123456789abcdef")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
multiRangeReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
multiRangeReq.Header.Set("Range", "bytes=0-1,4-5")
multiRangeResp := framework.DoRequest(t, client, multiRangeReq)
multiRangeBody := framework.ReadAllAndClose(t, multiRangeResp)
if multiRangeResp.StatusCode != http.StatusPartialContent {
t.Fatalf("multi-range expected 206, got %d", multiRangeResp.StatusCode)
}
if !strings.Contains(multiRangeResp.Header.Get("Content-Type"), "multipart/byteranges") {
t.Fatalf("multi-range content-type mismatch: %q", multiRangeResp.Header.Get("Content-Type"))
}
bodyText := string(multiRangeBody)
if !strings.Contains(bodyText, "01") || !strings.Contains(bodyText, "45") {
t.Fatalf("multi-range body missing expected segments: %q", bodyText)
}
}
func TestOversizedCombinedRangesAreIgnored(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(100)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772003, 0x3C4D5E6F)
payload := []byte("0123456789abcdef")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
// Range bytes sum is 22 (> payload size 16), which exercises the oversized-range guard path.
oversizedRangeReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
oversizedRangeReq.Header.Set("Range", "bytes=0-10,5-15")
oversizedRangeResp := framework.DoRequest(t, client, oversizedRangeReq)
oversizedRangeBody := framework.ReadAllAndClose(t, oversizedRangeResp)
if oversizedRangeResp.StatusCode != http.StatusOK {
t.Fatalf("oversized combined range expected 200, got %d", oversizedRangeResp.StatusCode)
}
if len(oversizedRangeBody) != 0 {
t.Fatalf("oversized combined range expected empty body, got %d bytes", len(oversizedRangeBody))
}
}

View File

@@ -0,0 +1,54 @@
package volume_server_http_test
import (
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadDeletedQueryReturnsDeletedNeedleData(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(94)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 551234, 0xCAFE1234)
payload := []byte("read-deleted-needle-payload")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
normalRead := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, normalRead)
if normalRead.StatusCode != http.StatusNotFound {
t.Fatalf("normal read after delete expected 404, got %d", normalRead.StatusCode)
}
readDeletedReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?readDeleted=true")
readDeletedResp := framework.DoRequest(t, client, readDeletedReq)
readDeletedBody := framework.ReadAllAndClose(t, readDeletedResp)
if readDeletedResp.StatusCode != http.StatusOK {
t.Fatalf("read with readDeleted=true expected 200, got %d", readDeletedResp.StatusCode)
}
if string(readDeletedBody) != string(payload) {
t.Fatalf("readDeleted body mismatch: got %q want %q", string(readDeletedBody), string(payload))
}
}

View File

@@ -0,0 +1,319 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadModeProxyMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(101)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120001, 0x0102ABCD)
payload := []byte("proxy-read-mode-forwarded-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
var finalBody []byte
if !waitForHTTPStatus(t, client, readURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
finalBody = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("proxy read mode did not return 200 from non-owning volume server within deadline")
}
if string(finalBody) != string(payload) {
t.Fatalf("proxy read mode body mismatch: got %q want %q", string(finalBody), string(payload))
}
}
func TestReadModeRedirectMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(102)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120002, 0x0102DCBA)
payload := []byte("redirect-read-mode-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
var redirectLocation string
if !waitForHTTPStatus(t, noRedirectClient, readURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
redirectLocation = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect read mode did not return 301 from non-owning volume server within deadline")
}
if redirectLocation == "" {
t.Fatalf("redirect response missing Location header")
}
if !strings.Contains(redirectLocation, "proxied=true") {
t.Fatalf("redirect Location should include proxied=true, got %q", redirectLocation)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, redirectLocation))
followBody := framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusOK {
t.Fatalf("following redirect expected 200, got %d", followResp.StatusCode)
}
if string(followBody) != string(payload) {
t.Fatalf("redirect-follow body mismatch: got %q want %q", string(followBody), string(payload))
}
}
func TestReadModeLocalMissingLocalVolumeReturnsNotFound(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "local"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(103)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120003, 0x0102BEEF)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, []byte("local-read-mode-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
readResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(1), fid)
_ = framework.ReadAllAndClose(t, readResp)
if readResp.StatusCode != http.StatusNotFound {
t.Fatalf("local read mode expected 404 on non-owning server, got %d", readResp.StatusCode)
}
}
func TestReadDeletedProxyModeOnMissingLocalVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(104)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120004, 0x0102CAFE)
payload := []byte("proxy-readDeleted-missing-local-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL(0)+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
readURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?readDeleted=true"
var proxiedBody []byte
if !waitForHTTPStatus(t, client, readURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
proxiedBody = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("proxy readDeleted path did not return 200 from non-owning volume server within deadline")
}
if string(proxiedBody) != string(payload) {
t.Fatalf("proxy readDeleted body mismatch: got %q want %q", string(proxiedBody), string(payload))
}
}
func TestReadDeletedRedirectModeDropsQueryParameterParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(105)
framework.AllocateVolume(t, grpc0, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120005, 0x0102FACE)
payload := []byte("redirect-readDeleted-query-drop-parity")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL(0)+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete expected 202, got %d", deleteResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
redirectURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?readDeleted=true"
var location string
if !waitForHTTPStatus(t, noRedirectClient, redirectURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
location = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect readDeleted path did not return 301 from non-owning volume server within deadline")
}
if location == "" {
t.Fatalf("redirect readDeleted response missing Location header")
}
if !strings.Contains(location, "proxied=true") {
t.Fatalf("redirect readDeleted Location should include proxied=true, got %q", location)
}
if strings.Contains(location, "readDeleted=true") {
t.Fatalf("redirect readDeleted Location should reflect current query-drop behavior, got %q", location)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, location))
_ = framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusNotFound {
t.Fatalf("redirect-follow without readDeleted query expected 404 for deleted needle, got %d", followResp.StatusCode)
}
}
func TestReadModeRedirectPreservesCollectionQuery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P1()
profile.ReadMode = "redirect"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
const volumeID = uint32(109)
const collection = "redirect-collection"
framework.AllocateVolume(t, grpc0, volumeID, collection)
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 120006, 0x0102F00D)
payload := []byte("redirect-collection-preserve-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(0), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
noRedirectClient := &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
redirectURL := clusterHarness.VolumeAdminURL(1) + "/" + fid + "?collection=" + collection
var location string
if !waitForHTTPStatus(t, noRedirectClient, redirectURL, http.StatusMovedPermanently, 10*time.Second, func(resp *http.Response) {
location = resp.Header.Get("Location")
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("redirect collection path did not return 301 from non-owning volume server within deadline")
}
if location == "" {
t.Fatalf("redirect collection response missing Location header")
}
if !strings.Contains(location, "proxied=true") {
t.Fatalf("redirect collection Location should include proxied=true, got %q", location)
}
if !strings.Contains(location, "collection="+collection) {
t.Fatalf("redirect collection Location should preserve collection query, got %q", location)
}
followResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, location))
followBody := framework.ReadAllAndClose(t, followResp)
if followResp.StatusCode != http.StatusOK {
t.Fatalf("redirect-follow expected 200, got %d", followResp.StatusCode)
}
if string(followBody) != string(payload) {
t.Fatalf("redirect-follow body mismatch: got %q want %q", string(followBody), string(payload))
}
}
func waitForHTTPStatus(t testing.TB, client *http.Client, url string, expectedStatus int, timeout time.Duration, onMatch func(resp *http.Response)) bool {
t.Helper()
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, url))
if resp.StatusCode == expectedStatus {
onMatch(resp)
return true
}
_ = framework.ReadAllAndClose(t, resp)
time.Sleep(200 * time.Millisecond)
}
return false
}

View File

@@ -0,0 +1,191 @@
package volume_server_http_test
import (
"fmt"
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestReadPathShapesAndIfModifiedSince(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(93)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fullFileID := framework.NewFileID(volumeID, 771234, 0xBEEFCACE)
uploadPayload := []byte("read-path-shape-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fullFileID, uploadPayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
parts := strings.SplitN(fullFileID, ",", 2)
if len(parts) != 2 {
t.Fatalf("unexpected file id format: %q", fullFileID)
}
fidOnly := parts[1]
readByVidFid := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, fmt.Sprintf("%s/%d/%s", clusterHarness.VolumeAdminURL(), volumeID, fidOnly)))
readByVidFidBody := framework.ReadAllAndClose(t, readByVidFid)
if readByVidFid.StatusCode != http.StatusOK {
t.Fatalf("GET /{vid}/{fid} expected 200, got %d", readByVidFid.StatusCode)
}
if string(readByVidFidBody) != string(uploadPayload) {
t.Fatalf("GET /{vid}/{fid} body mismatch: got %q want %q", string(readByVidFidBody), string(uploadPayload))
}
readWithFilename := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, fmt.Sprintf("%s/%d/%s/%s", clusterHarness.VolumeAdminURL(), volumeID, fidOnly, "named.bin")))
readWithFilenameBody := framework.ReadAllAndClose(t, readWithFilename)
if readWithFilename.StatusCode != http.StatusOK {
t.Fatalf("GET /{vid}/{fid}/{filename} expected 200, got %d", readWithFilename.StatusCode)
}
if string(readWithFilenameBody) != string(uploadPayload) {
t.Fatalf("GET /{vid}/{fid}/{filename} body mismatch: got %q want %q", string(readWithFilenameBody), string(uploadPayload))
}
lastModified := readWithFilename.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("expected Last-Modified header on read response")
}
ifModifiedSinceReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fullFileID)
ifModifiedSinceReq.Header.Set("If-Modified-Since", lastModified)
ifModifiedSinceResp := framework.DoRequest(t, client, ifModifiedSinceReq)
_ = framework.ReadAllAndClose(t, ifModifiedSinceResp)
if ifModifiedSinceResp.StatusCode != http.StatusNotModified {
t.Fatalf("If-Modified-Since expected 304, got %d", ifModifiedSinceResp.StatusCode)
}
headIfModifiedSinceReq := mustNewRequest(t, http.MethodHead, clusterHarness.VolumeAdminURL()+"/"+fullFileID)
headIfModifiedSinceReq.Header.Set("If-Modified-Since", lastModified)
headIfModifiedSinceResp := framework.DoRequest(t, client, headIfModifiedSinceReq)
headIfModifiedSinceBody := framework.ReadAllAndClose(t, headIfModifiedSinceResp)
if headIfModifiedSinceResp.StatusCode != http.StatusNotModified {
t.Fatalf("HEAD If-Modified-Since expected 304, got %d", headIfModifiedSinceResp.StatusCode)
}
if len(headIfModifiedSinceBody) != 0 {
t.Fatalf("HEAD If-Modified-Since expected empty body, got %d bytes", len(headIfModifiedSinceBody))
}
}
func TestMalformedVidFidPathReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/not-a-vid/not-a-fid"))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusBadRequest {
t.Fatalf("malformed /{vid}/{fid} expected 400, got %d", resp.StatusCode)
}
}
func TestReadWrongCookieReturnsNotFound(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(95)
const needleID = uint64(771235)
const cookie = uint32(0xBEEFCACF)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("read-cookie-mismatch-content"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, cookie+1)
getResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), wrongCookieFid)
_ = framework.ReadAllAndClose(t, getResp)
if getResp.StatusCode != http.StatusNotFound {
t.Fatalf("GET with wrong cookie expected 404, got %d", getResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, clusterHarness.VolumeAdminURL()+"/"+wrongCookieFid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusNotFound {
t.Fatalf("HEAD with wrong cookie expected 404, got %d", headResp.StatusCode)
}
if len(headBody) != 0 {
t.Fatalf("HEAD wrong-cookie response body should be empty, got %d bytes", len(headBody))
}
}
func TestConditionalHeaderPrecedenceAndInvalidIfModifiedSince(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(99)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772002, 0x2B3C4D5E)
payload := []byte("conditional-precedence-content")
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, payload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
baselineResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, baselineResp)
if baselineResp.StatusCode != http.StatusOK {
t.Fatalf("baseline read expected 200, got %d", baselineResp.StatusCode)
}
lastModified := baselineResp.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("baseline read expected Last-Modified header")
}
precedenceReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
precedenceReq.Header.Set("If-Modified-Since", lastModified)
precedenceReq.Header.Set("If-None-Match", "\"definitely-different-etag\"")
precedenceResp := framework.DoRequest(t, client, precedenceReq)
precedenceBody := framework.ReadAllAndClose(t, precedenceResp)
if precedenceResp.StatusCode != http.StatusNotModified {
t.Fatalf("conditional precedence expected 304, got %d", precedenceResp.StatusCode)
}
if len(precedenceBody) != 0 {
t.Fatalf("conditional precedence expected empty body, got %d bytes", len(precedenceBody))
}
invalidIMSReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid)
invalidIMSReq.Header.Set("If-Modified-Since", "not-a-valid-http-date")
invalidIMSReq.Header.Set("If-None-Match", "\"definitely-different-etag\"")
invalidIMSResp := framework.DoRequest(t, client, invalidIMSReq)
invalidIMSBody := framework.ReadAllAndClose(t, invalidIMSResp)
if invalidIMSResp.StatusCode != http.StatusOK {
t.Fatalf("invalid If-Modified-Since with mismatched etag expected 200, got %d", invalidIMSResp.StatusCode)
}
if string(invalidIMSBody) != string(payload) {
t.Fatalf("invalid If-Modified-Since fallback body mismatch: got %q want %q", string(invalidIMSBody), string(payload))
}
}

View File

@@ -0,0 +1,123 @@
package volume_server_http_test
import (
"net/http"
"strconv"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestUploadReadRangeHeadDeleteRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, cluster.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(7)
framework.AllocateVolume(t, grpcClient, volumeID, "")
fid := framework.NewFileID(volumeID, 123456, 0xA1B2C3D4)
data := []byte("hello-volume-server-integration")
client := framework.NewHTTPClient()
uploadResp := framework.UploadBytes(t, client, cluster.VolumeAdminURL(), fid, data)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload status: expected 201, got %d", uploadResp.StatusCode)
}
getResp := framework.ReadBytes(t, client, cluster.VolumeAdminURL(), fid)
getBody := framework.ReadAllAndClose(t, getResp)
if getResp.StatusCode != http.StatusOK {
t.Fatalf("get status: expected 200, got %d", getResp.StatusCode)
}
if string(getBody) != string(data) {
t.Fatalf("get body mismatch: got %q want %q", string(getBody), string(data))
}
etag := getResp.Header.Get("ETag")
if etag == "" {
t.Fatalf("expected ETag header from GET response")
}
notModifiedReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
notModifiedReq.Header.Set("If-None-Match", etag)
notModifiedResp := framework.DoRequest(t, client, notModifiedReq)
_ = framework.ReadAllAndClose(t, notModifiedResp)
if notModifiedResp.StatusCode != http.StatusNotModified {
t.Fatalf("if-none-match expected 304, got %d", notModifiedResp.StatusCode)
}
rangeReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
rangeReq.Header.Set("Range", "bytes=0-4")
rangeResp := framework.DoRequest(t, client, rangeReq)
rangeBody := framework.ReadAllAndClose(t, rangeResp)
if rangeResp.StatusCode != http.StatusPartialContent {
t.Fatalf("range status: expected 206, got %d", rangeResp.StatusCode)
}
if got, want := string(rangeBody), "hello"; got != want {
t.Fatalf("range body mismatch: got %q want %q", got, want)
}
invalidRangeReq := mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/"+fid)
invalidRangeReq.Header.Set("Range", "bytes=9999-10000")
invalidRangeResp := framework.DoRequest(t, client, invalidRangeReq)
_ = framework.ReadAllAndClose(t, invalidRangeResp)
if invalidRangeResp.StatusCode != http.StatusRequestedRangeNotSatisfiable {
t.Fatalf("invalid range expected 416, got %d", invalidRangeResp.StatusCode)
}
headResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodHead, cluster.VolumeAdminURL()+"/"+fid))
headBody := framework.ReadAllAndClose(t, headResp)
if headResp.StatusCode != http.StatusOK {
t.Fatalf("head status: expected 200, got %d", headResp.StatusCode)
}
if got := headResp.Header.Get("Content-Length"); got != strconv.Itoa(len(data)) {
t.Fatalf("head content-length mismatch: got %q want %d", got, len(data))
}
if len(headBody) != 0 {
t.Fatalf("head body should be empty, got %d bytes", len(headBody))
}
headNotModifiedReq := mustNewRequest(t, http.MethodHead, cluster.VolumeAdminURL()+"/"+fid)
headNotModifiedReq.Header.Set("If-None-Match", etag)
headNotModifiedResp := framework.DoRequest(t, client, headNotModifiedReq)
headNotModifiedBody := framework.ReadAllAndClose(t, headNotModifiedResp)
if headNotModifiedResp.StatusCode != http.StatusNotModified {
t.Fatalf("head if-none-match expected 304, got %d", headNotModifiedResp.StatusCode)
}
if len(headNotModifiedBody) != 0 {
t.Fatalf("head if-none-match body should be empty, got %d bytes", len(headNotModifiedBody))
}
deleteResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, cluster.VolumeAdminURL()+"/"+fid))
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete status: expected 202, got %d", deleteResp.StatusCode)
}
notFoundResp := framework.ReadBytes(t, client, cluster.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, notFoundResp)
if notFoundResp.StatusCode != http.StatusNotFound {
t.Fatalf("read after delete: expected 404, got %d", notFoundResp.StatusCode)
}
}
func TestInvalidReadPathReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
cluster := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
resp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, cluster.VolumeAdminURL()+"/invalid,needle"))
_ = framework.ReadAllAndClose(t, resp)
if resp.StatusCode != http.StatusBadRequest {
t.Fatalf("invalid read expected 400, got %d", resp.StatusCode)
}
}

View File

@@ -0,0 +1,730 @@
package volume_server_http_test
import (
"bytes"
"context"
"io"
"net/http"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
)
type pausableReader struct {
remaining int64
pauseAfter int64
paused bool
unblock <-chan struct{}
}
func (r *pausableReader) Read(p []byte) (int, error) {
if r.remaining <= 0 {
return 0, io.EOF
}
if !r.paused && r.pauseAfter > 0 {
n := int64(len(p))
if n > r.pauseAfter {
n = r.pauseAfter
}
for i := int64(0); i < n; i++ {
p[i] = 'a'
}
r.remaining -= n
r.pauseAfter -= n
if r.pauseAfter == 0 {
r.paused = true
}
return int(n), nil
}
if r.paused {
<-r.unblock
r.paused = false
}
n := int64(len(p))
if n > r.remaining {
n = r.remaining
}
for i := int64(0); i < n; i++ {
p[i] = 'b'
}
r.remaining -= n
return int(n), nil
}
func TestUploadLimitTimeoutAndReplicateBypass(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(98)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024 // over 1MB P8 upload limit
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880001, 0x1A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}
firstUploadDone <- err
}()
// Give the first upload time to pass limit checks and block in body processing.
time.Sleep(300 * time.Millisecond)
replicateFID := framework.NewFileID(volumeID, 880002, 0x5E6F7A8B)
replicateReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+replicateFID+"?type=replicate", bytes.NewReader([]byte("replicate")))
if err != nil {
t.Fatalf("create replicate request: %v", err)
}
replicateReq.Header.Set("Content-Type", "application/octet-stream")
replicateReq.ContentLength = int64(len("replicate"))
replicateResp, err := framework.NewHTTPClient().Do(replicateReq)
if err != nil {
t.Fatalf("replicate request failed: %v", err)
}
_ = framework.ReadAllAndClose(t, replicateResp)
if replicateResp.StatusCode != http.StatusCreated {
t.Fatalf("replicate request expected 201 bypassing limit, got %d", replicateResp.StatusCode)
}
normalFID := framework.NewFileID(volumeID, 880003, 0x9C0D1E2F)
normalReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+normalFID, bytes.NewReader([]byte("normal")))
if err != nil {
t.Fatalf("create normal request: %v", err)
}
normalReq.Header.Set("Content-Type", "application/octet-stream")
normalReq.ContentLength = int64(len("normal"))
timeoutClient := &http.Client{Timeout: 10 * time.Second}
normalResp, err := timeoutClient.Do(normalReq)
if err != nil {
t.Fatalf("normal upload request failed: %v", err)
}
_ = framework.ReadAllAndClose(t, normalResp)
if normalResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("normal upload expected 429 while limit blocked, got %d", normalResp.StatusCode)
}
close(unblockFirstUpload)
select {
case <-firstUploadDone:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for blocked upload to finish")
}
}
func TestUploadLimitWaitThenProceed(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(111)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880601, 0x6A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
type uploadResult struct {
resp *http.Response
err error
}
secondUploadDone := make(chan uploadResult, 1)
secondFID := framework.NewFileID(volumeID, 880602, 0x6A2B3C4E)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+secondFID, bytes.NewReader([]byte("wait-then-proceed")))
if err != nil {
secondUploadDone <- uploadResult{err: err}
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = int64(len("wait-then-proceed"))
resp, err := (&http.Client{Timeout: 10 * time.Second}).Do(req)
secondUploadDone <- uploadResult{resp: resp, err: err}
}()
time.Sleep(500 * time.Millisecond)
close(unblockFirstUpload)
select {
case firstErr := <-firstUploadDone:
if firstErr != nil {
t.Fatalf("first blocked upload failed: %v", firstErr)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
select {
case result := <-secondUploadDone:
if result.err != nil {
t.Fatalf("second upload failed: %v", result.err)
}
_ = framework.ReadAllAndClose(t, result.resp)
if result.resp.StatusCode != http.StatusCreated {
t.Fatalf("second upload expected 201 after waiting for slot, got %d", result.resp.StatusCode)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for second upload completion")
}
}
func TestUploadLimitTimeoutThenRecovery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(113)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880801, 0x7A2B3C4D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
timeoutFID := framework.NewFileID(volumeID, 880802, 0x7A2B3C4E)
timeoutResp := framework.UploadBytes(t, &http.Client{Timeout: 10 * time.Second}, clusterHarness.VolumeAdminURL(), timeoutFID, []byte("should-timeout"))
_ = framework.ReadAllAndClose(t, timeoutResp)
if timeoutResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second upload under blocked pressure expected 429, got %d", timeoutResp.StatusCode)
}
close(unblockFirstUpload)
select {
case firstErr := <-firstUploadDone:
if firstErr != nil {
t.Fatalf("first blocked upload failed: %v", firstErr)
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
recoveryFID := framework.NewFileID(volumeID, 880803, 0x7A2B3C4F)
recoveryResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), recoveryFID, []byte("recovered-upload"))
_ = framework.ReadAllAndClose(t, recoveryResp)
if recoveryResp.StatusCode != http.StatusCreated {
t.Fatalf("recovery upload expected 201, got %d", recoveryResp.StatusCode)
}
}
func TestDownloadLimitTimeoutReturnsTooManyRequests(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(99)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024) // over 1MB P8 download limit
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
downloadFID := framework.NewFileID(volumeID, 880101, 0x10203040)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), downloadFID, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+downloadFID))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
// Keep first response body unread so server write path stays in-flight.
time.Sleep(300 * time.Millisecond)
secondClient := &http.Client{Timeout: 10 * time.Second}
secondResp, err := secondClient.Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+downloadFID))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET expected 429 while first download holds limit, got %d", secondResp.StatusCode)
}
}
func TestDownloadLimitWaitThenProceedWithoutReplica(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(112)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880701, 0x60708090)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
type readResult struct {
resp *http.Response
err error
}
secondReadDone := make(chan readResult, 1)
go func() {
resp, readErr := (&http.Client{Timeout: 10 * time.Second}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
secondReadDone <- readResult{resp: resp, err: readErr}
}()
time.Sleep(500 * time.Millisecond)
_ = firstResp.Body.Close()
select {
case result := <-secondReadDone:
if result.err != nil {
t.Fatalf("second GET failed: %v", result.err)
}
secondBody := framework.ReadAllAndClose(t, result.resp)
if result.resp.StatusCode != http.StatusOK {
t.Fatalf("second GET expected 200 after waiting for slot, got %d", result.resp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for second GET completion")
}
}
func TestDownloadLimitTimeoutThenRecovery(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(114)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880901, 0x708090A0)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
time.Sleep(300 * time.Millisecond)
timeoutResp := framework.ReadBytes(t, &http.Client{Timeout: 10 * time.Second}, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, timeoutResp)
if timeoutResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET under blocked pressure expected 429, got %d", timeoutResp.StatusCode)
}
_ = firstResp.Body.Close()
recoveryResp := framework.ReadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid)
recoveryBody := framework.ReadAllAndClose(t, recoveryResp)
if recoveryResp.StatusCode != http.StatusOK {
t.Fatalf("recovery GET expected 200, got %d", recoveryResp.StatusCode)
}
if len(recoveryBody) != len(largePayload) {
t.Fatalf("recovery GET body size mismatch: got %d want %d", len(recoveryBody), len(largePayload))
}
}
func TestDownloadLimitOverageProxiesToReplica(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P8()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
conn1, grpc1 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer conn1.Close()
const volumeID = uint32(100)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Replication: "001",
Version: uint32(needle.GetCurrentVersion()),
}
if _, err := grpc0.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node0: %v", err)
}
if _, err := grpc1.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node1: %v", err)
}
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880201, 0x0A0B0C0D)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(0), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("replicated large upload expected 201, got %d", uploadResp.StatusCode)
}
replicaReadURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
if !waitForHTTPStatus(t, framework.NewHTTPClient(), replicaReadURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("replica did not become readable within deadline")
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
secondResp, err := framework.NewHTTPClient().Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
secondBody := framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusOK {
t.Fatalf("second GET expected 200 via replica proxy fallback, got %d", secondResp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET proxied body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
}
func TestDownloadLimitProxiedRequestSkipsReplicaFallbackAndTimesOut(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
profile := matrix.P8()
profile.ReadMode = "proxy"
clusterHarness := framework.StartDualVolumeCluster(t, profile)
conn0, grpc0 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(0))
defer conn0.Close()
conn1, grpc1 := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress(1))
defer conn1.Close()
const volumeID = uint32(106)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := &volume_server_pb.AllocateVolumeRequest{
VolumeId: volumeID,
Replication: "001",
Version: uint32(needle.GetCurrentVersion()),
}
if _, err := grpc0.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node0: %v", err)
}
if _, err := grpc1.AllocateVolume(ctx, req); err != nil {
t.Fatalf("allocate replicated volume on node1: %v", err)
}
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880202, 0x0A0B0D0E)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(0), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("replicated large upload expected 201, got %d", uploadResp.StatusCode)
}
// Ensure replica path is actually available, so a non-proxied request would proxy.
replicaReadURL := clusterHarness.VolumeAdminURL(1) + "/" + fid
if !waitForHTTPStatus(t, framework.NewHTTPClient(), replicaReadURL, http.StatusOK, 10*time.Second, func(resp *http.Response) {
_ = framework.ReadAllAndClose(t, resp)
}) {
t.Fatalf("replica did not become readable within deadline")
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
// proxied=true should bypass replica fallback and hit wait/timeout branch.
secondResp, err := framework.NewHTTPClient().Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL(0)+"/"+fid+"?proxied=true"))
if err != nil {
t.Fatalf("second GET failed: %v", err)
}
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusTooManyRequests {
t.Fatalf("second GET with proxied=true expected 429 timeout path, got %d", secondResp.StatusCode)
}
}
func TestUploadLimitDisabledAllowsConcurrentUploads(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(107)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const blockedUploadSize = 2 * 1024 * 1024
unblockFirstUpload := make(chan struct{})
firstUploadDone := make(chan error, 1)
firstFID := framework.NewFileID(volumeID, 880301, 0x1A2B3C5D)
go func() {
req, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+firstFID, &pausableReader{
remaining: blockedUploadSize,
pauseAfter: 1,
unblock: unblockFirstUpload,
})
if err != nil {
firstUploadDone <- err
return
}
req.Header.Set("Content-Type", "application/octet-stream")
req.ContentLength = blockedUploadSize
resp, err := (&http.Client{}).Do(req)
if resp != nil {
_ = framework.ReadAllAndClose(t, resp)
}
firstUploadDone <- err
}()
time.Sleep(300 * time.Millisecond)
secondFID := framework.NewFileID(volumeID, 880302, 0x1A2B3C5E)
secondResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), secondFID, []byte("no-limit-second-upload"))
_ = framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusCreated {
t.Fatalf("second upload with disabled limit expected 201, got %d", secondResp.StatusCode)
}
close(unblockFirstUpload)
select {
case <-firstUploadDone:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for first upload completion")
}
}
func TestDownloadLimitDisabledAllowsConcurrentDownloads(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(108)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880401, 0x20304050)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
secondResp := framework.ReadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid)
secondBody := framework.ReadAllAndClose(t, secondResp)
if secondResp.StatusCode != http.StatusOK {
t.Fatalf("second GET with disabled limit expected 200, got %d", secondResp.StatusCode)
}
if len(secondBody) != len(largePayload) {
t.Fatalf("second GET body size mismatch: got %d want %d", len(secondBody), len(largePayload))
}
}
func TestDownloadLimitInvalidVidWhileOverLimitReturnsBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P8())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(110)
framework.AllocateVolume(t, grpcClient, volumeID, "")
largePayload := make([]byte, 12*1024*1024)
for i := range largePayload {
largePayload[i] = byte(i % 251)
}
fid := framework.NewFileID(volumeID, 880501, 0x50607080)
uploadResp := framework.UploadBytes(t, framework.NewHTTPClient(), clusterHarness.VolumeAdminURL(), fid, largePayload)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("large upload expected 201, got %d", uploadResp.StatusCode)
}
firstResp, err := (&http.Client{}).Do(mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid))
if err != nil {
t.Fatalf("first GET failed: %v", err)
}
if firstResp.StatusCode != http.StatusOK {
_ = framework.ReadAllAndClose(t, firstResp)
t.Fatalf("first GET expected 200, got %d", firstResp.StatusCode)
}
defer firstResp.Body.Close()
time.Sleep(300 * time.Millisecond)
invalidReq := mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/not-a-vid,1234567890ab")
invalidResp := framework.DoRequest(t, framework.NewHTTPClient(), invalidReq)
_ = framework.ReadAllAndClose(t, invalidResp)
if invalidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("invalid vid while over limit expected 400, got %d", invalidResp.StatusCode)
}
}

View File

@@ -0,0 +1,118 @@
package volume_server_http_test
import (
"encoding/json"
"net/http"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestWriteUnchangedAndDeleteEdgeVariants(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(87)
framework.AllocateVolume(t, grpcClient, volumeID, "")
const key = uint64(999001)
const cookie = uint32(0xDEADBEEF)
fid := framework.NewFileID(volumeID, key, cookie)
client := framework.NewHTTPClient()
payload := []byte("unchanged-write-content")
firstUpload := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
firstUploadResp := framework.DoRequest(t, client, firstUpload)
_ = framework.ReadAllAndClose(t, firstUploadResp)
if firstUploadResp.StatusCode != http.StatusCreated {
t.Fatalf("first upload expected 201, got %d", firstUploadResp.StatusCode)
}
secondUpload := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, payload)
secondUploadResp := framework.DoRequest(t, client, secondUpload)
_ = framework.ReadAllAndClose(t, secondUploadResp)
if secondUploadResp.StatusCode != http.StatusNoContent {
t.Fatalf("second unchanged upload expected 204, got %d", secondUploadResp.StatusCode)
}
if secondUploadResp.Header.Get("ETag") == "" {
t.Fatalf("second unchanged upload expected ETag header")
}
wrongCookieFid := framework.NewFileID(volumeID, key, cookie+1)
wrongCookieDelete := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+wrongCookieFid))
_ = framework.ReadAllAndClose(t, wrongCookieDelete)
if wrongCookieDelete.StatusCode != http.StatusBadRequest {
t.Fatalf("delete with mismatched cookie expected 400, got %d", wrongCookieDelete.StatusCode)
}
missingDelete := framework.DoRequest(t, client, mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+framework.NewFileID(volumeID, key+1, cookie)))
missingDeleteBody := framework.ReadAllAndClose(t, missingDelete)
if missingDelete.StatusCode != http.StatusNotFound {
t.Fatalf("delete missing needle expected 404, got %d", missingDelete.StatusCode)
}
var payloadMap map[string]int64
if err := json.Unmarshal(missingDeleteBody, &payloadMap); err != nil {
t.Fatalf("decode delete missing response: %v", err)
}
if payloadMap["size"] != 0 {
t.Fatalf("delete missing needle expected size=0, got %d", payloadMap["size"])
}
}
func TestDeleteTimestampOverrideKeepsReadDeletedLastModifiedParity(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(88)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 999002, 0xABCD1234)
uploadResp := framework.UploadBytes(t, client, clusterHarness.VolumeAdminURL(), fid, []byte("delete-ts-override"))
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != http.StatusCreated {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
beforeDeleteResp := framework.ReadBytes(t, client, clusterHarness.VolumeAdminURL(), fid)
_ = framework.ReadAllAndClose(t, beforeDeleteResp)
if beforeDeleteResp.StatusCode != http.StatusOK {
t.Fatalf("pre-delete read expected 200, got %d", beforeDeleteResp.StatusCode)
}
lastModifiedBeforeDelete := beforeDeleteResp.Header.Get("Last-Modified")
if lastModifiedBeforeDelete == "" {
t.Fatalf("expected Last-Modified before delete")
}
deleteReq := mustNewRequest(t, http.MethodDelete, clusterHarness.VolumeAdminURL()+"/"+fid+"?ts=1700000000")
deleteResp := framework.DoRequest(t, client, deleteReq)
_ = framework.ReadAllAndClose(t, deleteResp)
if deleteResp.StatusCode != http.StatusAccepted {
t.Fatalf("delete with ts override expected 202, got %d", deleteResp.StatusCode)
}
readDeletedResp := framework.DoRequest(t, client, mustNewRequest(t, http.MethodGet, clusterHarness.VolumeAdminURL()+"/"+fid+"?readDeleted=true"))
_ = framework.ReadAllAndClose(t, readDeletedResp)
if readDeletedResp.StatusCode != http.StatusOK {
t.Fatalf("readDeleted after ts override expected 200, got %d", readDeletedResp.StatusCode)
}
lastModified := readDeletedResp.Header.Get("Last-Modified")
if lastModified == "" {
t.Fatalf("expected Last-Modified header on readDeleted response")
}
if lastModified != lastModifiedBeforeDelete {
t.Fatalf("expected readDeleted Last-Modified parity with pre-delete header, got %q want %q", lastModified, lastModifiedBeforeDelete)
}
}

View File

@@ -0,0 +1,74 @@
package volume_server_http_test
import (
"net/http"
"strings"
"testing"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
)
func TestWriteInvalidVidAndFidReturnBadRequest(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
client := framework.NewHTTPClient()
invalidVidReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/invalid,12345678", []byte("x"))
invalidVidResp := framework.DoRequest(t, client, invalidVidReq)
_ = framework.ReadAllAndClose(t, invalidVidResp)
if invalidVidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("write with invalid vid expected 400, got %d", invalidVidResp.StatusCode)
}
invalidFidReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/1,bad", []byte("x"))
invalidFidResp := framework.DoRequest(t, client, invalidFidReq)
_ = framework.ReadAllAndClose(t, invalidFidResp)
if invalidFidResp.StatusCode != http.StatusBadRequest {
t.Fatalf("write with invalid fid expected 400, got %d", invalidFidResp.StatusCode)
}
}
func TestWriteMalformedMultipartAndMD5Mismatch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(98)
framework.AllocateVolume(t, grpcClient, volumeID, "")
client := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, 772001, 0x1A2B3C4D)
malformedMultipartReq, err := http.NewRequest(http.MethodPost, clusterHarness.VolumeAdminURL()+"/"+fid, strings.NewReader("not-a-valid-multipart-body"))
if err != nil {
t.Fatalf("create malformed multipart request: %v", err)
}
malformedMultipartReq.Header.Set("Content-Type", "multipart/form-data")
malformedMultipartResp := framework.DoRequest(t, client, malformedMultipartReq)
malformedMultipartBody := framework.ReadAllAndClose(t, malformedMultipartResp)
if malformedMultipartResp.StatusCode != http.StatusBadRequest {
t.Fatalf("malformed multipart write expected 400, got %d", malformedMultipartResp.StatusCode)
}
if !strings.Contains(strings.ToLower(string(malformedMultipartBody)), "boundary") {
t.Fatalf("malformed multipart response should mention boundary parse failure, got %q", string(malformedMultipartBody))
}
md5MismatchReq := newUploadRequest(t, clusterHarness.VolumeAdminURL()+"/"+fid, []byte("content-md5-mismatch-body"))
md5MismatchReq.Header.Set("Content-MD5", "AAAAAAAAAAAAAAAAAAAAAA==")
md5MismatchResp := framework.DoRequest(t, client, md5MismatchReq)
md5MismatchBody := framework.ReadAllAndClose(t, md5MismatchResp)
if md5MismatchResp.StatusCode != http.StatusBadRequest {
t.Fatalf("content-md5 mismatch write expected 400, got %d", md5MismatchResp.StatusCode)
}
if !strings.Contains(string(md5MismatchBody), "Content-MD5") {
t.Fatalf("content-md5 mismatch response should mention Content-MD5, got %q", string(md5MismatchBody))
}
}