Add volume server integration test suite and CI workflow (#8322)
* docs(volume_server): add integration test development plan * test(volume_server): add integration harness and profile matrix * test(volume_server/http): add admin and options integration coverage * test(volume_server/grpc): add state and status integration coverage * test(volume_server): auto-build weed binary and harden cluster startup * test(volume_server/http): add upload read range head delete coverage * test(volume_server/grpc): expand admin lifecycle and state coverage * docs(volume_server): update progress tracker for implemented tests * test(volume_server/http): cover if-none-match and invalid-range branches * test(volume_server/grpc): add batch delete integration coverage * docs(volume_server): log latest HTTP and gRPC test coverage * ci(volume_server): run volume server integration tests in github actions * test(volume_server/grpc): add needle status configure ping and leave coverage * docs(volume_server): record additional grpc coverage progress * test(volume_server/grpc): add vacuum integration coverage * docs(volume_server): record vacuum test coverage progress * test(volume_server/grpc): add read and write needle blob error-path coverage * docs(volume_server): record data rw grpc coverage progress * test(volume_server/http): add jwt auth integration coverage * test(volume_server/grpc): add sync copy and stream error-path coverage * docs(volume_server): record jwt and sync/copy test coverage * test(volume_server/grpc): add scrub and query integration coverage * test(volume_server/grpc): add volume tail sender and receiver coverage * docs(volume_server): record scrub query and tail test progress * test(volume_server/grpc): add readonly writable and collection lifecycle coverage * test(volume_server/http): add public-port cors and method parity coverage * test(volume_server/grpc): add blob meta and read-all success path coverage * test(volume_server/grpc): expand scrub and query variation coverage * test(volume_server/grpc): add tiering and remote fetch error-path coverage * test(volume_server/http): add unchanged write and delete edge-case coverage * test(volume_server/grpc): add ping unknown and unreachable target coverage * test(volume_server/grpc): add volume delete only-empty variation coverage * test(volume_server/http): add jwt fid-mismatch auth coverage * test(volume_server/grpc): add scrub ec auto-select empty coverage * test(volume_server/grpc): stabilize ping timestamp assertion * docs(volume_server): update integration coverage progress log * test(volume_server/grpc): add tier remote backend and config variation coverage * docs(volume_server): record tier remote variation progress * test(volume_server/grpc): add incremental copy and receive-file protocol coverage * test(volume_server/http): add read path shape and if-modified-since coverage * test(volume_server/grpc): add copy-file compaction and receive-file success coverage * test(volume_server/http): add passthrough headers and static asset coverage * test(volume_server/grpc): add ping filer unreachable coverage * docs(volume_server): record copy receive and http variant progress * test(volume_server/grpc): add erasure coding maintenance and missing-path coverage * docs(volume_server): record initial erasure coding rpc coverage * test(volume_server/http): add multi-range multipart response coverage * docs(volume_server): record multi-range http coverage progress * test(volume_server/grpc): add query empty-stripe no-match coverage * docs(volume_server): record query no-match stream behavior coverage * test(volume_server/http): add upload throttling timeout and replicate bypass coverage * docs(volume_server): record upload throttling coverage progress * test(volume_server/http): add download throttling timeout coverage * docs(volume_server): record download throttling coverage progress * test(volume_server/http): add jwt wrong-cookie fid mismatch coverage * docs(volume_server): record jwt wrong-cookie mismatch coverage * test(volume_server/http): add jwt expired-token rejection coverage * docs(volume_server): record jwt expired-token coverage * test(volume_server/http): add jwt query and cookie transport coverage * docs(volume_server): record jwt token transport coverage * test(volume_server/http): add jwt token-source precedence coverage * docs(volume_server): record jwt token-source precedence coverage * test(volume_server/http): add jwt header-over-cookie precedence coverage * docs(volume_server): record jwt header cookie precedence coverage * test(volume_server/http): add jwt query-over-cookie precedence coverage * docs(volume_server): record jwt query cookie precedence coverage * test(volume_server/grpc): add setstate version mismatch and nil-state coverage * docs(volume_server): record setstate validation coverage * test(volume_server/grpc): add readonly persist-true lifecycle coverage * docs(volume_server): record readonly persist variation coverage * test(volume_server/http): add options origin cors header coverage * docs(volume_server): record options origin cors coverage * test(volume_server/http): add trace unsupported-method parity coverage * docs(volume_server): record trace method parity coverage * test(volume_server/grpc): add batch delete cookie-check variation coverage * docs(volume_server): record batch delete cookie-check coverage * test(volume_server/grpc): add admin lifecycle missing and maintenance variants * docs(volume_server): record admin lifecycle edge-case coverage * test(volume_server/grpc): add mixed batch delete status matrix coverage * docs(volume_server): record mixed batch delete matrix coverage * test(volume_server/http): add jwt-profile ui access gating coverage * docs(volume_server): record jwt ui-gating http coverage * test(volume_server/http): add propfind unsupported-method parity coverage * docs(volume_server): record propfind method parity coverage * test(volume_server/grpc): add volume configure success and rollback-path coverage * docs(volume_server): record volume configure branch coverage * test(volume_server/grpc): add volume needle status missing-path coverage * docs(volume_server): record volume needle status error-path coverage * test(volume_server/http): add readDeleted query behavior coverage * docs(volume_server): record readDeleted http behavior coverage * test(volume_server/http): add delete ts override parity coverage * docs(volume_server): record delete ts parity coverage * test(volume_server/grpc): add invalid blob/meta offset coverage * docs(volume_server): record invalid blob/meta offset coverage * test(volume_server/grpc): add read-all mixed volume abort coverage * docs(volume_server): record read-all mixed-volume abort coverage * test(volume_server/http): assert head response body parity * docs(volume_server): record head body parity assertion * test(volume_server/grpc): assert status state and memory payload completeness * docs(volume_server): record volume server status payload coverage * test(volume_server/grpc): add batch delete chunk-manifest rejection coverage * docs(volume_server): record batch delete chunk-manifest coverage * test(volume_server/grpc): add query cookie-mismatch eof parity coverage * docs(volume_server): record query cookie-mismatch parity coverage * test(volume_server/grpc): add ping master success target coverage * docs(volume_server): record ping master success coverage * test(volume_server/http): add head if-none-match conditional parity * docs(volume_server): record head if-none-match parity coverage * test(volume_server/http): add head if-modified-since parity coverage * docs(volume_server): record head if-modified-since parity coverage * test(volume_server/http): add connect unsupported-method parity coverage * docs(volume_server): record connect method parity coverage * test(volume_server/http): assert options allow-headers cors parity * docs(volume_server): record options allow-headers coverage * test(volume_server/framework): add dual volume cluster integration harness * test(volume_server/http): add missing-local read mode proxy redirect local coverage * docs(volume_server): record read mode missing-local matrix coverage * test(volume_server/http): add download over-limit replica proxy fallback coverage * docs(volume_server): record download replica fallback coverage * test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage * docs(volume_server): record missing-local readDeleted mode coverage * test(volume_server/framework): add single-volume cluster with filer harness * test(volume_server/grpc): add ping filer success target coverage * docs(volume_server): record ping filer success coverage * test(volume_server/http): add proxied-loop guard download timeout coverage * docs(volume_server): record proxied-loop download coverage * test(volume_server/http): add disabled upload and download limit coverage * docs(volume_server): record disabled throttling path coverage * test(volume_server/grpc): add idempotent volume server leave coverage * docs(volume_server): record leave idempotence coverage * test(volume_server/http): add redirect collection query preservation coverage * docs(volume_server): record redirect collection query coverage * test(volume_server/http): assert admin server headers on status and health * docs(volume_server): record admin server header coverage * test(volume_server/http): assert healthz request-id echo parity * docs(volume_server): record healthz request-id parity coverage * test(volume_server/http): add over-limit invalid-vid download branch coverage * docs(volume_server): record over-limit invalid-vid branch coverage * test(volume_server/http): add public-port static asset coverage * docs(volume_server): record public static endpoint coverage * test(volume_server/http): add public head method parity coverage * docs(volume_server): record public head parity coverage * test(volume_server/http): add throttling wait-then-proceed path coverage * docs(volume_server): record throttling wait-then-proceed coverage * test(volume_server/http): add read cookie-mismatch not-found coverage * docs(volume_server): record read cookie-mismatch coverage * test(volume_server/http): add throttling timeout-recovery coverage * docs(volume_server): record throttling timeout-recovery coverage * test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage * docs(volume_server): record ec positive lifecycle coverage * test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage * docs(volume_server): record ec shard read/blob delete lifecycle coverage * test(volume_server/grpc): add ec rebuild and to-volume error branch coverage * docs(volume_server): record ec rebuild and to-volume branch coverage * test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage * docs(volume_server): record ec shards-to-volume success coverage * test(volume_server/grpc): add ec receive and copy-file missing-source coverage * docs(volume_server): record ec receive and copy-file coverage * test(volume_server/grpc): add ec last-shard delete cleanup coverage * docs(volume_server): record ec last-shard delete cleanup coverage * test(volume_server/grpc): add volume copy success path coverage * docs(volume_server): record volume copy success coverage * test(volume_server/grpc): add volume copy overwrite-destination coverage * docs(volume_server): record volume copy overwrite coverage * test(volume_server/http): add write error-path variant coverage * docs(volume_server): record http write error-path coverage * test(volume_server/http): add conditional header precedence coverage * docs(volume_server): record conditional header precedence coverage * test(volume_server/http): add oversized combined range guard coverage * docs(volume_server): record oversized range guard coverage * test(volume_server/http): add image resize and crop read coverage * docs(volume_server): record image transform coverage * test(volume_server/http): add chunk-manifest expansion and bypass coverage * docs(volume_server): record chunk-manifest read coverage * test(volume_server/http): add compressed read encoding matrix coverage * docs(volume_server): record compressed read matrix coverage * test(volume_server/grpc): add tail receiver source replication coverage * docs(volume_server): record tail receiver replication coverage * test(volume_server/grpc): add tail sender large-needle chunking coverage * docs(volume_server): record tail sender chunking coverage * test(volume_server/grpc): add ec-backed volume needle status coverage * docs(volume_server): record ec-backed needle status coverage * test(volume_server/grpc): add ec shard copy from peer success coverage * docs(volume_server): record ec shard copy success coverage * test(volume_server/http): add chunk-manifest delete child cleanup coverage * docs(volume_server): record chunk-manifest delete cleanup coverage * test(volume_server/http): add chunk-manifest delete failure-path coverage * docs(volume_server): record chunk-manifest delete failure coverage * test(volume_server/grpc): add ec shard copy source-unavailable coverage * docs(volume_server): record ec shard copy source-unavailable coverage * parallel
This commit is contained in:
442
test/volume_server/framework/cluster.go
Normal file
442
test/volume_server/framework/cluster.go
Normal file
@@ -0,0 +1,442 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultWaitTimeout = 30 * time.Second
|
||||
defaultWaitTick = 200 * time.Millisecond
|
||||
testVolumeSizeLimitMB = 32
|
||||
)
|
||||
|
||||
// Cluster is a lightweight SeaweedFS master + one volume server test harness.
|
||||
type Cluster struct {
|
||||
testingTB testing.TB
|
||||
profile matrix.Profile
|
||||
|
||||
weedBinary string
|
||||
baseDir string
|
||||
configDir string
|
||||
logsDir string
|
||||
keepLogs bool
|
||||
|
||||
masterPort int
|
||||
masterGrpcPort int
|
||||
volumePort int
|
||||
volumeGrpcPort int
|
||||
volumePubPort int
|
||||
|
||||
masterCmd *exec.Cmd
|
||||
volumeCmd *exec.Cmd
|
||||
|
||||
cleanupOnce sync.Once
|
||||
}
|
||||
|
||||
// StartSingleVolumeCluster boots one master and one volume server.
|
||||
func StartSingleVolumeCluster(t testing.TB, profile matrix.Profile) *Cluster {
|
||||
t.Helper()
|
||||
|
||||
weedBinary, err := FindOrBuildWeedBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("resolve weed binary: %v", err)
|
||||
}
|
||||
|
||||
baseDir, keepLogs, err := newWorkDir()
|
||||
if err != nil {
|
||||
t.Fatalf("create temp test directory: %v", err)
|
||||
}
|
||||
|
||||
configDir := filepath.Join(baseDir, "config")
|
||||
logsDir := filepath.Join(baseDir, "logs")
|
||||
masterDataDir := filepath.Join(baseDir, "master")
|
||||
volumeDataDir := filepath.Join(baseDir, "volume")
|
||||
for _, dir := range []string{configDir, logsDir, masterDataDir, volumeDataDir} {
|
||||
if mkErr := os.MkdirAll(dir, 0o755); mkErr != nil {
|
||||
t.Fatalf("create %s: %v", dir, mkErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err = writeSecurityConfig(configDir, profile); err != nil {
|
||||
t.Fatalf("write security config: %v", err)
|
||||
}
|
||||
|
||||
masterPort, masterGrpcPort, err := allocateMasterPortPair()
|
||||
if err != nil {
|
||||
t.Fatalf("allocate master port pair: %v", err)
|
||||
}
|
||||
|
||||
ports, err := allocatePorts(3)
|
||||
if err != nil {
|
||||
t.Fatalf("allocate ports: %v", err)
|
||||
}
|
||||
|
||||
c := &Cluster{
|
||||
testingTB: t,
|
||||
profile: profile,
|
||||
weedBinary: weedBinary,
|
||||
baseDir: baseDir,
|
||||
configDir: configDir,
|
||||
logsDir: logsDir,
|
||||
keepLogs: keepLogs,
|
||||
masterPort: masterPort,
|
||||
masterGrpcPort: masterGrpcPort,
|
||||
volumePort: ports[0],
|
||||
volumeGrpcPort: ports[1],
|
||||
volumePubPort: ports[0],
|
||||
}
|
||||
if profile.SplitPublicPort {
|
||||
c.volumePubPort = ports[2]
|
||||
}
|
||||
|
||||
if err = c.startMaster(masterDataDir); err != nil {
|
||||
c.Stop()
|
||||
t.Fatalf("start master: %v", err)
|
||||
}
|
||||
if err = c.waitForHTTP(c.MasterURL() + "/dir/status"); err != nil {
|
||||
masterLog := c.tailLog("master.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for master readiness: %v\nmaster log tail:\n%s", err, masterLog)
|
||||
}
|
||||
|
||||
if err = c.startVolume(volumeDataDir); err != nil {
|
||||
masterLog := c.tailLog("master.log")
|
||||
c.Stop()
|
||||
t.Fatalf("start volume: %v\nmaster log tail:\n%s", err, masterLog)
|
||||
}
|
||||
if err = c.waitForHTTP(c.VolumeAdminURL() + "/status"); err != nil {
|
||||
volumeLog := c.tailLog("volume.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
if err = c.waitForTCP(c.VolumeGRPCAddress()); err != nil {
|
||||
volumeLog := c.tailLog("volume.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
c.Stop()
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Stop terminates all processes and cleans temporary files.
|
||||
func (c *Cluster) Stop() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.cleanupOnce.Do(func() {
|
||||
stopProcess(c.volumeCmd)
|
||||
stopProcess(c.masterCmd)
|
||||
if !c.keepLogs && !c.testingTB.Failed() {
|
||||
_ = os.RemoveAll(c.baseDir)
|
||||
} else if c.baseDir != "" {
|
||||
c.testingTB.Logf("volume server integration logs kept at %s", c.baseDir)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Cluster) startMaster(dataDir string) error {
|
||||
logFile, err := os.Create(filepath.Join(c.logsDir, "master.log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-config_dir=" + c.configDir,
|
||||
"master",
|
||||
"-ip=127.0.0.1",
|
||||
"-port=" + strconv.Itoa(c.masterPort),
|
||||
"-port.grpc=" + strconv.Itoa(c.masterGrpcPort),
|
||||
"-mdir=" + dataDir,
|
||||
"-peers=none",
|
||||
"-volumeSizeLimitMB=" + strconv.Itoa(testVolumeSizeLimitMB),
|
||||
"-defaultReplication=000",
|
||||
}
|
||||
|
||||
c.masterCmd = exec.Command(c.weedBinary, args...)
|
||||
c.masterCmd.Dir = c.baseDir
|
||||
c.masterCmd.Stdout = logFile
|
||||
c.masterCmd.Stderr = logFile
|
||||
return c.masterCmd.Start()
|
||||
}
|
||||
|
||||
func (c *Cluster) startVolume(dataDir string) error {
|
||||
logFile, err := os.Create(filepath.Join(c.logsDir, "volume.log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-config_dir=" + c.configDir,
|
||||
"volume",
|
||||
"-ip=127.0.0.1",
|
||||
"-port=" + strconv.Itoa(c.volumePort),
|
||||
"-port.grpc=" + strconv.Itoa(c.volumeGrpcPort),
|
||||
"-port.public=" + strconv.Itoa(c.volumePubPort),
|
||||
"-dir=" + dataDir,
|
||||
"-max=16",
|
||||
"-master=127.0.0.1:" + strconv.Itoa(c.masterPort),
|
||||
"-readMode=" + c.profile.ReadMode,
|
||||
"-concurrentUploadLimitMB=" + strconv.Itoa(c.profile.ConcurrentUploadLimitMB),
|
||||
"-concurrentDownloadLimitMB=" + strconv.Itoa(c.profile.ConcurrentDownloadLimitMB),
|
||||
}
|
||||
if c.profile.InflightUploadTimeout > 0 {
|
||||
args = append(args, "-inflightUploadDataTimeout="+c.profile.InflightUploadTimeout.String())
|
||||
}
|
||||
if c.profile.InflightDownloadTimeout > 0 {
|
||||
args = append(args, "-inflightDownloadDataTimeout="+c.profile.InflightDownloadTimeout.String())
|
||||
}
|
||||
|
||||
c.volumeCmd = exec.Command(c.weedBinary, args...)
|
||||
c.volumeCmd.Dir = c.baseDir
|
||||
c.volumeCmd.Stdout = logFile
|
||||
c.volumeCmd.Stderr = logFile
|
||||
return c.volumeCmd.Start()
|
||||
}
|
||||
|
||||
func (c *Cluster) waitForHTTP(url string) error {
|
||||
client := &http.Client{Timeout: 1 * time.Second}
|
||||
deadline := time.Now().Add(defaultWaitTimeout)
|
||||
for time.Now().Before(deadline) {
|
||||
resp, err := client.Get(url)
|
||||
if err == nil {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode < 500 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
time.Sleep(defaultWaitTick)
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for %s", url)
|
||||
}
|
||||
|
||||
func (c *Cluster) waitForTCP(addr string) error {
|
||||
deadline := time.Now().Add(defaultWaitTimeout)
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", addr, time.Second)
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
return nil
|
||||
}
|
||||
time.Sleep(defaultWaitTick)
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for tcp %s", addr)
|
||||
}
|
||||
|
||||
func stopProcess(cmd *exec.Cmd) {
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = cmd.Process.Signal(os.Interrupt)
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
_ = cmd.Process.Kill()
|
||||
<-done
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
func allocatePorts(count int) ([]int, error) {
|
||||
listeners := make([]net.Listener, 0, count)
|
||||
ports := make([]int, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
for _, ll := range listeners {
|
||||
_ = ll.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
listeners = append(listeners, l)
|
||||
ports = append(ports, l.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
for _, l := range listeners {
|
||||
_ = l.Close()
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func allocateMasterPortPair() (int, int, error) {
|
||||
for masterPort := 10000; masterPort <= 55535; masterPort++ {
|
||||
masterGrpcPort := masterPort + 10000
|
||||
l1, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(masterPort)))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
l2, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(masterGrpcPort)))
|
||||
if err != nil {
|
||||
_ = l1.Close()
|
||||
continue
|
||||
}
|
||||
_ = l2.Close()
|
||||
_ = l1.Close()
|
||||
return masterPort, masterGrpcPort, nil
|
||||
}
|
||||
return 0, 0, errors.New("unable to find available master port pair")
|
||||
}
|
||||
|
||||
func newWorkDir() (dir string, keepLogs bool, err error) {
|
||||
keepLogs = os.Getenv("VOLUME_SERVER_IT_KEEP_LOGS") == "1"
|
||||
dir, err = os.MkdirTemp("", "seaweedfs_volume_server_it_")
|
||||
return dir, keepLogs, err
|
||||
}
|
||||
|
||||
func writeSecurityConfig(configDir string, profile matrix.Profile) error {
|
||||
var b strings.Builder
|
||||
if profile.EnableJWT {
|
||||
if profile.JWTSigningKey == "" || profile.JWTReadKey == "" {
|
||||
return errors.New("jwt profile requires both write and read keys")
|
||||
}
|
||||
b.WriteString("[jwt.signing]\n")
|
||||
b.WriteString("key = \"")
|
||||
b.WriteString(profile.JWTSigningKey)
|
||||
b.WriteString("\"\n")
|
||||
b.WriteString("expires_after_seconds = 60\n\n")
|
||||
|
||||
b.WriteString("[jwt.signing.read]\n")
|
||||
b.WriteString("key = \"")
|
||||
b.WriteString(profile.JWTReadKey)
|
||||
b.WriteString("\"\n")
|
||||
b.WriteString("expires_after_seconds = 60\n")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
b.WriteString("# optional security config generated for integration tests\n")
|
||||
}
|
||||
return os.WriteFile(filepath.Join(configDir, "security.toml"), []byte(b.String()), 0o644)
|
||||
}
|
||||
|
||||
// FindOrBuildWeedBinary returns an executable weed binary, building one when needed.
|
||||
func FindOrBuildWeedBinary() (string, error) {
|
||||
if fromEnv := os.Getenv("WEED_BINARY"); fromEnv != "" {
|
||||
if isExecutableFile(fromEnv) {
|
||||
return fromEnv, nil
|
||||
}
|
||||
return "", fmt.Errorf("WEED_BINARY is set but not executable: %s", fromEnv)
|
||||
}
|
||||
|
||||
repoRoot := ""
|
||||
if _, file, _, ok := runtime.Caller(0); ok {
|
||||
repoRoot = filepath.Clean(filepath.Join(filepath.Dir(file), "..", "..", ".."))
|
||||
candidate := filepath.Join(repoRoot, "weed", "weed")
|
||||
if isExecutableFile(candidate) {
|
||||
return candidate, nil
|
||||
}
|
||||
}
|
||||
|
||||
if repoRoot == "" {
|
||||
return "", errors.New("unable to detect repository root")
|
||||
}
|
||||
|
||||
binDir := filepath.Join(os.TempDir(), "seaweedfs_volume_server_it_bin")
|
||||
if err := os.MkdirAll(binDir, 0o755); err != nil {
|
||||
return "", fmt.Errorf("create binary directory %s: %w", binDir, err)
|
||||
}
|
||||
binPath := filepath.Join(binDir, "weed")
|
||||
if isExecutableFile(binPath) {
|
||||
return binPath, nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "build", "-o", binPath, ".")
|
||||
cmd.Dir = filepath.Join(repoRoot, "weed")
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", fmt.Errorf("build weed binary: %w\n%s", err, out.String())
|
||||
}
|
||||
if !isExecutableFile(binPath) {
|
||||
return "", fmt.Errorf("built weed binary is not executable: %s", binPath)
|
||||
}
|
||||
return binPath, nil
|
||||
}
|
||||
|
||||
func isExecutableFile(path string) bool {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil || info.IsDir() {
|
||||
return false
|
||||
}
|
||||
mode := info.Mode().Perm()
|
||||
return mode&0o111 != 0
|
||||
}
|
||||
|
||||
func (c *Cluster) tailLog(logName string) string {
|
||||
f, err := os.Open(filepath.Join(c.logsDir, logName))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
lines := make([]string, 0, 40)
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
if len(lines) > 40 {
|
||||
lines = lines[1:]
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (c *Cluster) MasterAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.masterPort))
|
||||
}
|
||||
|
||||
func (c *Cluster) VolumeAdminAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort))
|
||||
}
|
||||
|
||||
func (c *Cluster) VolumePublicAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort))
|
||||
}
|
||||
|
||||
func (c *Cluster) VolumeGRPCAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort))
|
||||
}
|
||||
|
||||
// VolumeServerAddress returns SeaweedFS server address format: ip:httpPort.grpcPort
|
||||
func (c *Cluster) VolumeServerAddress() string {
|
||||
return fmt.Sprintf("%s.%d", c.VolumeAdminAddress(), c.volumeGrpcPort)
|
||||
}
|
||||
|
||||
func (c *Cluster) MasterURL() string {
|
||||
return "http://" + c.MasterAddress()
|
||||
}
|
||||
|
||||
func (c *Cluster) VolumeAdminURL() string {
|
||||
return "http://" + c.VolumeAdminAddress()
|
||||
}
|
||||
|
||||
func (c *Cluster) VolumePublicURL() string {
|
||||
return "http://" + c.VolumePublicAddress()
|
||||
}
|
||||
|
||||
func (c *Cluster) BaseDir() string {
|
||||
return c.baseDir
|
||||
}
|
||||
293
test/volume_server/framework/cluster_dual.go
Normal file
293
test/volume_server/framework/cluster_dual.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
)
|
||||
|
||||
type DualVolumeCluster struct {
|
||||
testingTB testing.TB
|
||||
profile matrix.Profile
|
||||
|
||||
weedBinary string
|
||||
baseDir string
|
||||
configDir string
|
||||
logsDir string
|
||||
keepLogs bool
|
||||
|
||||
masterPort int
|
||||
masterGrpcPort int
|
||||
|
||||
volumePort0 int
|
||||
volumeGrpcPort0 int
|
||||
volumePubPort0 int
|
||||
volumePort1 int
|
||||
volumeGrpcPort1 int
|
||||
volumePubPort1 int
|
||||
|
||||
masterCmd *exec.Cmd
|
||||
volumeCmd0 *exec.Cmd
|
||||
volumeCmd1 *exec.Cmd
|
||||
|
||||
cleanupOnce sync.Once
|
||||
}
|
||||
|
||||
func StartDualVolumeCluster(t testing.TB, profile matrix.Profile) *DualVolumeCluster {
|
||||
t.Helper()
|
||||
|
||||
weedBinary, err := FindOrBuildWeedBinary()
|
||||
if err != nil {
|
||||
t.Fatalf("resolve weed binary: %v", err)
|
||||
}
|
||||
|
||||
baseDir, keepLogs, err := newWorkDir()
|
||||
if err != nil {
|
||||
t.Fatalf("create temp test directory: %v", err)
|
||||
}
|
||||
|
||||
configDir := filepath.Join(baseDir, "config")
|
||||
logsDir := filepath.Join(baseDir, "logs")
|
||||
masterDataDir := filepath.Join(baseDir, "master")
|
||||
volumeDataDir0 := filepath.Join(baseDir, "volume0")
|
||||
volumeDataDir1 := filepath.Join(baseDir, "volume1")
|
||||
for _, dir := range []string{configDir, logsDir, masterDataDir, volumeDataDir0, volumeDataDir1} {
|
||||
if mkErr := os.MkdirAll(dir, 0o755); mkErr != nil {
|
||||
t.Fatalf("create %s: %v", dir, mkErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err = writeSecurityConfig(configDir, profile); err != nil {
|
||||
t.Fatalf("write security config: %v", err)
|
||||
}
|
||||
|
||||
masterPort, masterGrpcPort, err := allocateMasterPortPair()
|
||||
if err != nil {
|
||||
t.Fatalf("allocate master port pair: %v", err)
|
||||
}
|
||||
|
||||
ports, err := allocatePorts(6)
|
||||
if err != nil {
|
||||
t.Fatalf("allocate volume ports: %v", err)
|
||||
}
|
||||
|
||||
c := &DualVolumeCluster{
|
||||
testingTB: t,
|
||||
profile: profile,
|
||||
weedBinary: weedBinary,
|
||||
baseDir: baseDir,
|
||||
configDir: configDir,
|
||||
logsDir: logsDir,
|
||||
keepLogs: keepLogs,
|
||||
masterPort: masterPort,
|
||||
masterGrpcPort: masterGrpcPort,
|
||||
volumePort0: ports[0],
|
||||
volumeGrpcPort0: ports[1],
|
||||
volumePubPort0: ports[0],
|
||||
volumePort1: ports[2],
|
||||
volumeGrpcPort1: ports[3],
|
||||
volumePubPort1: ports[2],
|
||||
}
|
||||
if profile.SplitPublicPort {
|
||||
c.volumePubPort0 = ports[4]
|
||||
c.volumePubPort1 = ports[5]
|
||||
}
|
||||
|
||||
if err = c.startMaster(masterDataDir); err != nil {
|
||||
c.Stop()
|
||||
t.Fatalf("start master: %v", err)
|
||||
}
|
||||
if err = c.waitForHTTP(c.MasterURL() + "/dir/status"); err != nil {
|
||||
masterLog := c.tailLog("master.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for master readiness: %v\nmaster log tail:\n%s", err, masterLog)
|
||||
}
|
||||
|
||||
if err = c.startVolume(0, volumeDataDir0); err != nil {
|
||||
masterLog := c.tailLog("master.log")
|
||||
c.Stop()
|
||||
t.Fatalf("start first volume server: %v\nmaster log tail:\n%s", err, masterLog)
|
||||
}
|
||||
if err = c.waitForHTTP(c.VolumeAdminURL(0) + "/status"); err != nil {
|
||||
volumeLog := c.tailLog("volume0.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for first volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
if err = c.waitForTCP(c.VolumeGRPCAddress(0)); err != nil {
|
||||
volumeLog := c.tailLog("volume0.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for first volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
|
||||
if err = c.startVolume(1, volumeDataDir1); err != nil {
|
||||
volumeLog := c.tailLog("volume0.log")
|
||||
c.Stop()
|
||||
t.Fatalf("start second volume server: %v\nfirst volume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
if err = c.waitForHTTP(c.VolumeAdminURL(1) + "/status"); err != nil {
|
||||
volumeLog := c.tailLog("volume1.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for second volume readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
if err = c.waitForTCP(c.VolumeGRPCAddress(1)); err != nil {
|
||||
volumeLog := c.tailLog("volume1.log")
|
||||
c.Stop()
|
||||
t.Fatalf("wait for second volume grpc readiness: %v\nvolume log tail:\n%s", err, volumeLog)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
c.Stop()
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) Stop() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.cleanupOnce.Do(func() {
|
||||
stopProcess(c.volumeCmd1)
|
||||
stopProcess(c.volumeCmd0)
|
||||
stopProcess(c.masterCmd)
|
||||
if !c.keepLogs && !c.testingTB.Failed() {
|
||||
_ = os.RemoveAll(c.baseDir)
|
||||
} else if c.baseDir != "" {
|
||||
c.testingTB.Logf("volume server integration logs kept at %s", c.baseDir)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) startMaster(dataDir string) error {
|
||||
logFile, err := os.Create(filepath.Join(c.logsDir, "master.log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-config_dir=" + c.configDir,
|
||||
"master",
|
||||
"-ip=127.0.0.1",
|
||||
"-port=" + strconv.Itoa(c.masterPort),
|
||||
"-port.grpc=" + strconv.Itoa(c.masterGrpcPort),
|
||||
"-mdir=" + dataDir,
|
||||
"-peers=none",
|
||||
"-volumeSizeLimitMB=" + strconv.Itoa(testVolumeSizeLimitMB),
|
||||
"-defaultReplication=000",
|
||||
}
|
||||
|
||||
c.masterCmd = exec.Command(c.weedBinary, args...)
|
||||
c.masterCmd.Dir = c.baseDir
|
||||
c.masterCmd.Stdout = logFile
|
||||
c.masterCmd.Stderr = logFile
|
||||
return c.masterCmd.Start()
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) startVolume(index int, dataDir string) error {
|
||||
logName := fmt.Sprintf("volume%d.log", index)
|
||||
logFile, err := os.Create(filepath.Join(c.logsDir, logName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
volumePort := c.volumePort0
|
||||
volumeGrpcPort := c.volumeGrpcPort0
|
||||
volumePubPort := c.volumePubPort0
|
||||
if index == 1 {
|
||||
volumePort = c.volumePort1
|
||||
volumeGrpcPort = c.volumeGrpcPort1
|
||||
volumePubPort = c.volumePubPort1
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-config_dir=" + c.configDir,
|
||||
"volume",
|
||||
"-ip=127.0.0.1",
|
||||
"-port=" + strconv.Itoa(volumePort),
|
||||
"-port.grpc=" + strconv.Itoa(volumeGrpcPort),
|
||||
"-port.public=" + strconv.Itoa(volumePubPort),
|
||||
"-dir=" + dataDir,
|
||||
"-max=16",
|
||||
"-master=127.0.0.1:" + strconv.Itoa(c.masterPort),
|
||||
"-readMode=" + c.profile.ReadMode,
|
||||
"-concurrentUploadLimitMB=" + strconv.Itoa(c.profile.ConcurrentUploadLimitMB),
|
||||
"-concurrentDownloadLimitMB=" + strconv.Itoa(c.profile.ConcurrentDownloadLimitMB),
|
||||
}
|
||||
if c.profile.InflightUploadTimeout > 0 {
|
||||
args = append(args, "-inflightUploadDataTimeout="+c.profile.InflightUploadTimeout.String())
|
||||
}
|
||||
if c.profile.InflightDownloadTimeout > 0 {
|
||||
args = append(args, "-inflightDownloadDataTimeout="+c.profile.InflightDownloadTimeout.String())
|
||||
}
|
||||
|
||||
cmd := exec.Command(c.weedBinary, args...)
|
||||
cmd.Dir = c.baseDir
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if index == 1 {
|
||||
c.volumeCmd1 = cmd
|
||||
} else {
|
||||
c.volumeCmd0 = cmd
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) waitForHTTP(url string) error {
|
||||
return (&Cluster{}).waitForHTTP(url)
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) waitForTCP(addr string) error {
|
||||
return (&Cluster{}).waitForTCP(addr)
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) tailLog(logName string) string {
|
||||
return (&Cluster{logsDir: c.logsDir}).tailLog(logName)
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) MasterAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.masterPort))
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) MasterURL() string {
|
||||
return "http://" + c.MasterAddress()
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) VolumeAdminAddress(index int) string {
|
||||
if index == 1 {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort1))
|
||||
}
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePort0))
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) VolumePublicAddress(index int) string {
|
||||
if index == 1 {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort1))
|
||||
}
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumePubPort0))
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) VolumeGRPCAddress(index int) string {
|
||||
if index == 1 {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort1))
|
||||
}
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.volumeGrpcPort0))
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) VolumeAdminURL(index int) string {
|
||||
return "http://" + c.VolumeAdminAddress(index)
|
||||
}
|
||||
|
||||
func (c *DualVolumeCluster) VolumePublicURL(index int) string {
|
||||
return "http://" + c.VolumePublicAddress(index)
|
||||
}
|
||||
91
test/volume_server/framework/cluster_with_filer.go
Normal file
91
test/volume_server/framework/cluster_with_filer.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
|
||||
)
|
||||
|
||||
type ClusterWithFiler struct {
|
||||
*Cluster
|
||||
|
||||
filerCmd *exec.Cmd
|
||||
filerPort int
|
||||
filerGrpcPort int
|
||||
}
|
||||
|
||||
func StartSingleVolumeClusterWithFiler(t testing.TB, profile matrix.Profile) *ClusterWithFiler {
|
||||
t.Helper()
|
||||
|
||||
baseCluster := StartSingleVolumeCluster(t, profile)
|
||||
|
||||
ports, err := allocatePorts(2)
|
||||
if err != nil {
|
||||
t.Fatalf("allocate filer ports: %v", err)
|
||||
}
|
||||
|
||||
filerDataDir := filepath.Join(baseCluster.baseDir, "filer")
|
||||
if mkErr := os.MkdirAll(filerDataDir, 0o755); mkErr != nil {
|
||||
t.Fatalf("create filer data dir: %v", mkErr)
|
||||
}
|
||||
|
||||
logFile, err := os.Create(filepath.Join(baseCluster.logsDir, "filer.log"))
|
||||
if err != nil {
|
||||
t.Fatalf("create filer log file: %v", err)
|
||||
}
|
||||
|
||||
filerPort := ports[0]
|
||||
filerGrpcPort := ports[1]
|
||||
args := []string{
|
||||
"-config_dir=" + baseCluster.configDir,
|
||||
"filer",
|
||||
"-master=127.0.0.1:" + strconv.Itoa(baseCluster.masterPort),
|
||||
"-ip=127.0.0.1",
|
||||
"-port=" + strconv.Itoa(filerPort),
|
||||
"-port.grpc=" + strconv.Itoa(filerGrpcPort),
|
||||
"-defaultStoreDir=" + filerDataDir,
|
||||
}
|
||||
|
||||
filerCmd := exec.Command(baseCluster.weedBinary, args...)
|
||||
filerCmd.Dir = baseCluster.baseDir
|
||||
filerCmd.Stdout = logFile
|
||||
filerCmd.Stderr = logFile
|
||||
if err = filerCmd.Start(); err != nil {
|
||||
t.Fatalf("start filer: %v", err)
|
||||
}
|
||||
|
||||
if err = baseCluster.waitForTCP(net.JoinHostPort("127.0.0.1", strconv.Itoa(filerGrpcPort))); err != nil {
|
||||
filerLogTail := baseCluster.tailLog("filer.log")
|
||||
stopProcess(filerCmd)
|
||||
t.Fatalf("wait for filer grpc readiness: %v\nfiler log tail:\n%s", err, filerLogTail)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
stopProcess(filerCmd)
|
||||
})
|
||||
|
||||
return &ClusterWithFiler{
|
||||
Cluster: baseCluster,
|
||||
filerCmd: filerCmd,
|
||||
filerPort: filerPort,
|
||||
filerGrpcPort: filerGrpcPort,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterWithFiler) FilerAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.filerPort))
|
||||
}
|
||||
|
||||
func (c *ClusterWithFiler) FilerGRPCAddress() string {
|
||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(c.filerGrpcPort))
|
||||
}
|
||||
|
||||
func (c *ClusterWithFiler) FilerServerAddress() string {
|
||||
return fmt.Sprintf("%s.%d", c.FilerAddress(), c.filerGrpcPort)
|
||||
}
|
||||
8
test/volume_server/framework/fault_injection.go
Normal file
8
test/volume_server/framework/fault_injection.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package framework
|
||||
|
||||
// Phase 0 placeholder for future fault injection utilities.
|
||||
//
|
||||
// Planned extensions:
|
||||
// - restart/kill selected processes
|
||||
// - temporary network isolation hooks
|
||||
// - master or peer outage helpers for proxy/replication branch coverage
|
||||
28
test/volume_server/framework/grpc_client.go
Normal file
28
test/volume_server/framework/grpc_client.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func DialVolumeServer(t testing.TB, address string) (*grpc.ClientConn, volume_server_pb.VolumeServerClient) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx, address,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("dial volume grpc %s: %v", address, err)
|
||||
}
|
||||
|
||||
return conn, volume_server_pb.NewVolumeServerClient(conn)
|
||||
}
|
||||
34
test/volume_server/framework/http_client.go
Normal file
34
test/volume_server/framework/http_client.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewHTTPClient() *http.Client {
|
||||
return &http.Client{Timeout: 10 * time.Second}
|
||||
}
|
||||
|
||||
func DoRequest(t testing.TB, client *http.Client, req *http.Request) *http.Response {
|
||||
t.Helper()
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("http request %s %s: %v", req.Method, req.URL.String(), err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func ReadAllAndClose(t testing.TB, resp *http.Response) []byte {
|
||||
t.Helper()
|
||||
if resp == nil {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("read response body: %v", err)
|
||||
}
|
||||
return body
|
||||
}
|
||||
56
test/volume_server/framework/volume_fixture.go
Normal file
56
test/volume_server/framework/volume_fixture.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
||||
)
|
||||
|
||||
func AllocateVolume(t testing.TB, client volume_server_pb.VolumeServerClient, volumeID uint32, collection string) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.AllocateVolume(ctx, &volume_server_pb.AllocateVolumeRequest{
|
||||
VolumeId: volumeID,
|
||||
Collection: collection,
|
||||
Replication: "000",
|
||||
Version: uint32(needle.GetCurrentVersion()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("allocate volume %d: %v", volumeID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewFileID(volumeID uint32, key uint64, cookie uint32) string {
|
||||
return needle.NewFileId(needle.VolumeId(volumeID), key, cookie).String()
|
||||
}
|
||||
|
||||
func UploadBytes(t testing.TB, client *http.Client, volumeURL, fid string, data []byte) *http.Response {
|
||||
t.Helper()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", volumeURL, fid), bytes.NewReader(data))
|
||||
if err != nil {
|
||||
t.Fatalf("build upload request: %v", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(data)))
|
||||
return DoRequest(t, client, req)
|
||||
}
|
||||
|
||||
func ReadBytes(t testing.TB, client *http.Client, volumeURL, fid string) *http.Response {
|
||||
t.Helper()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", volumeURL, fid), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("build read request: %v", err)
|
||||
}
|
||||
return DoRequest(t, client, req)
|
||||
}
|
||||
Reference in New Issue
Block a user