Files
seaweedFS/test/volume_server/grpc/scrub_query_test.go
Chris Lu beeb375a88 Add volume server integration test suite and CI workflow (#8322)
* docs(volume_server): add integration test development plan

* test(volume_server): add integration harness and profile matrix

* test(volume_server/http): add admin and options integration coverage

* test(volume_server/grpc): add state and status integration coverage

* test(volume_server): auto-build weed binary and harden cluster startup

* test(volume_server/http): add upload read range head delete coverage

* test(volume_server/grpc): expand admin lifecycle and state coverage

* docs(volume_server): update progress tracker for implemented tests

* test(volume_server/http): cover if-none-match and invalid-range branches

* test(volume_server/grpc): add batch delete integration coverage

* docs(volume_server): log latest HTTP and gRPC test coverage

* ci(volume_server): run volume server integration tests in github actions

* test(volume_server/grpc): add needle status configure ping and leave coverage

* docs(volume_server): record additional grpc coverage progress

* test(volume_server/grpc): add vacuum integration coverage

* docs(volume_server): record vacuum test coverage progress

* test(volume_server/grpc): add read and write needle blob error-path coverage

* docs(volume_server): record data rw grpc coverage progress

* test(volume_server/http): add jwt auth integration coverage

* test(volume_server/grpc): add sync copy and stream error-path coverage

* docs(volume_server): record jwt and sync/copy test coverage

* test(volume_server/grpc): add scrub and query integration coverage

* test(volume_server/grpc): add volume tail sender and receiver coverage

* docs(volume_server): record scrub query and tail test progress

* test(volume_server/grpc): add readonly writable and collection lifecycle coverage

* test(volume_server/http): add public-port cors and method parity coverage

* test(volume_server/grpc): add blob meta and read-all success path coverage

* test(volume_server/grpc): expand scrub and query variation coverage

* test(volume_server/grpc): add tiering and remote fetch error-path coverage

* test(volume_server/http): add unchanged write and delete edge-case coverage

* test(volume_server/grpc): add ping unknown and unreachable target coverage

* test(volume_server/grpc): add volume delete only-empty variation coverage

* test(volume_server/http): add jwt fid-mismatch auth coverage

* test(volume_server/grpc): add scrub ec auto-select empty coverage

* test(volume_server/grpc): stabilize ping timestamp assertion

* docs(volume_server): update integration coverage progress log

* test(volume_server/grpc): add tier remote backend and config variation coverage

* docs(volume_server): record tier remote variation progress

* test(volume_server/grpc): add incremental copy and receive-file protocol coverage

* test(volume_server/http): add read path shape and if-modified-since coverage

* test(volume_server/grpc): add copy-file compaction and receive-file success coverage

* test(volume_server/http): add passthrough headers and static asset coverage

* test(volume_server/grpc): add ping filer unreachable coverage

* docs(volume_server): record copy receive and http variant progress

* test(volume_server/grpc): add erasure coding maintenance and missing-path coverage

* docs(volume_server): record initial erasure coding rpc coverage

* test(volume_server/http): add multi-range multipart response coverage

* docs(volume_server): record multi-range http coverage progress

* test(volume_server/grpc): add query empty-stripe no-match coverage

* docs(volume_server): record query no-match stream behavior coverage

* test(volume_server/http): add upload throttling timeout and replicate bypass coverage

* docs(volume_server): record upload throttling coverage progress

* test(volume_server/http): add download throttling timeout coverage

* docs(volume_server): record download throttling coverage progress

* test(volume_server/http): add jwt wrong-cookie fid mismatch coverage

* docs(volume_server): record jwt wrong-cookie mismatch coverage

* test(volume_server/http): add jwt expired-token rejection coverage

* docs(volume_server): record jwt expired-token coverage

* test(volume_server/http): add jwt query and cookie transport coverage

* docs(volume_server): record jwt token transport coverage

* test(volume_server/http): add jwt token-source precedence coverage

* docs(volume_server): record jwt token-source precedence coverage

* test(volume_server/http): add jwt header-over-cookie precedence coverage

* docs(volume_server): record jwt header cookie precedence coverage

* test(volume_server/http): add jwt query-over-cookie precedence coverage

* docs(volume_server): record jwt query cookie precedence coverage

* test(volume_server/grpc): add setstate version mismatch and nil-state coverage

* docs(volume_server): record setstate validation coverage

* test(volume_server/grpc): add readonly persist-true lifecycle coverage

* docs(volume_server): record readonly persist variation coverage

* test(volume_server/http): add options origin cors header coverage

* docs(volume_server): record options origin cors coverage

* test(volume_server/http): add trace unsupported-method parity coverage

* docs(volume_server): record trace method parity coverage

* test(volume_server/grpc): add batch delete cookie-check variation coverage

* docs(volume_server): record batch delete cookie-check coverage

* test(volume_server/grpc): add admin lifecycle missing and maintenance variants

* docs(volume_server): record admin lifecycle edge-case coverage

* test(volume_server/grpc): add mixed batch delete status matrix coverage

* docs(volume_server): record mixed batch delete matrix coverage

* test(volume_server/http): add jwt-profile ui access gating coverage

* docs(volume_server): record jwt ui-gating http coverage

* test(volume_server/http): add propfind unsupported-method parity coverage

* docs(volume_server): record propfind method parity coverage

* test(volume_server/grpc): add volume configure success and rollback-path coverage

* docs(volume_server): record volume configure branch coverage

* test(volume_server/grpc): add volume needle status missing-path coverage

* docs(volume_server): record volume needle status error-path coverage

* test(volume_server/http): add readDeleted query behavior coverage

* docs(volume_server): record readDeleted http behavior coverage

* test(volume_server/http): add delete ts override parity coverage

* docs(volume_server): record delete ts parity coverage

* test(volume_server/grpc): add invalid blob/meta offset coverage

* docs(volume_server): record invalid blob/meta offset coverage

* test(volume_server/grpc): add read-all mixed volume abort coverage

* docs(volume_server): record read-all mixed-volume abort coverage

* test(volume_server/http): assert head response body parity

* docs(volume_server): record head body parity assertion

* test(volume_server/grpc): assert status state and memory payload completeness

* docs(volume_server): record volume server status payload coverage

* test(volume_server/grpc): add batch delete chunk-manifest rejection coverage

* docs(volume_server): record batch delete chunk-manifest coverage

* test(volume_server/grpc): add query cookie-mismatch eof parity coverage

* docs(volume_server): record query cookie-mismatch parity coverage

* test(volume_server/grpc): add ping master success target coverage

* docs(volume_server): record ping master success coverage

* test(volume_server/http): add head if-none-match conditional parity

* docs(volume_server): record head if-none-match parity coverage

* test(volume_server/http): add head if-modified-since parity coverage

* docs(volume_server): record head if-modified-since parity coverage

* test(volume_server/http): add connect unsupported-method parity coverage

* docs(volume_server): record connect method parity coverage

* test(volume_server/http): assert options allow-headers cors parity

* docs(volume_server): record options allow-headers coverage

* test(volume_server/framework): add dual volume cluster integration harness

* test(volume_server/http): add missing-local read mode proxy redirect local coverage

* docs(volume_server): record read mode missing-local matrix coverage

* test(volume_server/http): add download over-limit replica proxy fallback coverage

* docs(volume_server): record download replica fallback coverage

* test(volume_server/http): add missing-local readDeleted proxy redirect parity coverage

* docs(volume_server): record missing-local readDeleted mode coverage

* test(volume_server/framework): add single-volume cluster with filer harness

* test(volume_server/grpc): add ping filer success target coverage

* docs(volume_server): record ping filer success coverage

* test(volume_server/http): add proxied-loop guard download timeout coverage

* docs(volume_server): record proxied-loop download coverage

* test(volume_server/http): add disabled upload and download limit coverage

* docs(volume_server): record disabled throttling path coverage

* test(volume_server/grpc): add idempotent volume server leave coverage

* docs(volume_server): record leave idempotence coverage

* test(volume_server/http): add redirect collection query preservation coverage

* docs(volume_server): record redirect collection query coverage

* test(volume_server/http): assert admin server headers on status and health

* docs(volume_server): record admin server header coverage

* test(volume_server/http): assert healthz request-id echo parity

* docs(volume_server): record healthz request-id parity coverage

* test(volume_server/http): add over-limit invalid-vid download branch coverage

* docs(volume_server): record over-limit invalid-vid branch coverage

* test(volume_server/http): add public-port static asset coverage

* docs(volume_server): record public static endpoint coverage

* test(volume_server/http): add public head method parity coverage

* docs(volume_server): record public head parity coverage

* test(volume_server/http): add throttling wait-then-proceed path coverage

* docs(volume_server): record throttling wait-then-proceed coverage

* test(volume_server/http): add read cookie-mismatch not-found coverage

* docs(volume_server): record read cookie-mismatch coverage

* test(volume_server/http): add throttling timeout-recovery coverage

* docs(volume_server): record throttling timeout-recovery coverage

* test(volume_server/grpc): add ec generate mount info unmount lifecycle coverage

* docs(volume_server): record ec positive lifecycle coverage

* test(volume_server/grpc): add ec shard read and blob delete lifecycle coverage

* docs(volume_server): record ec shard read/blob delete lifecycle coverage

* test(volume_server/grpc): add ec rebuild and to-volume error branch coverage

* docs(volume_server): record ec rebuild and to-volume branch coverage

* test(volume_server/grpc): add ec shards-to-volume success roundtrip coverage

* docs(volume_server): record ec shards-to-volume success coverage

* test(volume_server/grpc): add ec receive and copy-file missing-source coverage

* docs(volume_server): record ec receive and copy-file coverage

* test(volume_server/grpc): add ec last-shard delete cleanup coverage

* docs(volume_server): record ec last-shard delete cleanup coverage

* test(volume_server/grpc): add volume copy success path coverage

* docs(volume_server): record volume copy success coverage

* test(volume_server/grpc): add volume copy overwrite-destination coverage

* docs(volume_server): record volume copy overwrite coverage

* test(volume_server/http): add write error-path variant coverage

* docs(volume_server): record http write error-path coverage

* test(volume_server/http): add conditional header precedence coverage

* docs(volume_server): record conditional header precedence coverage

* test(volume_server/http): add oversized combined range guard coverage

* docs(volume_server): record oversized range guard coverage

* test(volume_server/http): add image resize and crop read coverage

* docs(volume_server): record image transform coverage

* test(volume_server/http): add chunk-manifest expansion and bypass coverage

* docs(volume_server): record chunk-manifest read coverage

* test(volume_server/http): add compressed read encoding matrix coverage

* docs(volume_server): record compressed read matrix coverage

* test(volume_server/grpc): add tail receiver source replication coverage

* docs(volume_server): record tail receiver replication coverage

* test(volume_server/grpc): add tail sender large-needle chunking coverage

* docs(volume_server): record tail sender chunking coverage

* test(volume_server/grpc): add ec-backed volume needle status coverage

* docs(volume_server): record ec-backed needle status coverage

* test(volume_server/grpc): add ec shard copy from peer success coverage

* docs(volume_server): record ec shard copy success coverage

* test(volume_server/http): add chunk-manifest delete child cleanup coverage

* docs(volume_server): record chunk-manifest delete cleanup coverage

* test(volume_server/http): add chunk-manifest delete failure-path coverage

* docs(volume_server): record chunk-manifest delete failure coverage

* test(volume_server/grpc): add ec shard copy source-unavailable coverage

* docs(volume_server): record ec shard copy source-unavailable coverage

* parallel
2026-02-13 00:40:56 -08:00

386 lines
13 KiB
Go

package volume_server_grpc_test
import (
"context"
"io"
"strings"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/test/volume_server/framework"
"github.com/seaweedfs/seaweedfs/test/volume_server/matrix"
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
)
func TestScrubVolumeIndexAndUnsupportedMode(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(61)
framework.AllocateVolume(t, grpcClient, volumeID, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
indexResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeID},
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubVolume index mode failed: %v", err)
}
if indexResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume expected total_volumes=1, got %d", indexResp.GetTotalVolumes())
}
_, err = grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeID},
Mode: volume_server_pb.VolumeScrubMode(99),
})
if err == nil {
t.Fatalf("ScrubVolume should fail for unsupported mode")
}
if !strings.Contains(err.Error(), "unsupported volume scrub mode") {
t.Fatalf("ScrubVolume unsupported mode error mismatch: %v", err)
}
}
func TestScrubEcVolumeMissingVolume(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
VolumeIds: []uint32{98765},
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err == nil {
t.Fatalf("ScrubEcVolume should fail for missing EC volume")
}
if !strings.Contains(err.Error(), "EC volume id") {
t.Fatalf("ScrubEcVolume missing-volume error mismatch: %v", err)
}
}
func TestScrubEcVolumeAutoSelectNoEcVolumes(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
resp, err := grpcClient.ScrubEcVolume(ctx, &volume_server_pb.ScrubEcVolumeRequest{
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubEcVolume auto-select failed: %v", err)
}
if resp.GetTotalVolumes() != 0 {
t.Fatalf("ScrubEcVolume auto-select expected total_volumes=0 without EC data, got %d", resp.GetTotalVolumes())
}
if len(resp.GetBrokenVolumeIds()) != 0 {
t.Fatalf("ScrubEcVolume auto-select expected no broken volumes, got %v", resp.GetBrokenVolumeIds())
}
}
func TestQueryInvalidAndMissingFileIDPaths(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
invalidStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{"bad-fid"},
Selections: []string{"name"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
},
})
if err == nil {
_, err = invalidStream.Recv()
}
if err == nil {
t.Fatalf("Query should fail for invalid file id")
}
missingFid := framework.NewFileID(98766, 1, 1)
missingStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{missingFid},
Selections: []string{"name"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{},
},
})
if err == nil {
_, err = missingStream.Recv()
}
if err == nil {
t.Fatalf("Query should fail for missing file id volume")
}
}
func TestScrubVolumeAutoSelectAndNotImplementedModes(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeIDA = uint32(62)
const volumeIDB = uint32(63)
framework.AllocateVolume(t, grpcClient, volumeIDA, "")
framework.AllocateVolume(t, grpcClient, volumeIDB, "")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
autoResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
Mode: volume_server_pb.VolumeScrubMode_INDEX,
})
if err != nil {
t.Fatalf("ScrubVolume auto-select failed: %v", err)
}
if autoResp.GetTotalVolumes() < 2 {
t.Fatalf("ScrubVolume auto-select expected at least 2 volumes, got %d", autoResp.GetTotalVolumes())
}
localResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeIDA},
Mode: volume_server_pb.VolumeScrubMode_LOCAL,
})
if err != nil {
t.Fatalf("ScrubVolume local mode failed: %v", err)
}
if localResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume local mode expected total_volumes=1, got %d", localResp.GetTotalVolumes())
}
if len(localResp.GetBrokenVolumeIds()) != 1 || localResp.GetBrokenVolumeIds()[0] != volumeIDA {
t.Fatalf("ScrubVolume local mode expected broken volume %d, got %v", volumeIDA, localResp.GetBrokenVolumeIds())
}
if len(localResp.GetDetails()) == 0 || !strings.Contains(strings.Join(localResp.GetDetails(), " "), "not implemented") {
t.Fatalf("ScrubVolume local mode expected not-implemented details, got %v", localResp.GetDetails())
}
fullResp, err := grpcClient.ScrubVolume(ctx, &volume_server_pb.ScrubVolumeRequest{
VolumeIds: []uint32{volumeIDA},
Mode: volume_server_pb.VolumeScrubMode_FULL,
})
if err != nil {
t.Fatalf("ScrubVolume full mode failed: %v", err)
}
if fullResp.GetTotalVolumes() != 1 {
t.Fatalf("ScrubVolume full mode expected total_volumes=1, got %d", fullResp.GetTotalVolumes())
}
if len(fullResp.GetDetails()) == 0 || !strings.Contains(strings.Join(fullResp.GetDetails(), " "), "not implemented") {
t.Fatalf("ScrubVolume full mode expected not-implemented details, got %v", fullResp.GetDetails())
}
}
func TestQueryJsonSuccessAndCsvNoOutput(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(64)
const needleID = uint64(777001)
const cookie = uint32(0xAABBCCDD)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":3}\n{\"score\":12}\n{\"score\":18}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "10",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query json start failed: %v", err)
}
firstStripe, err := queryStream.Recv()
if err != nil {
t.Fatalf("Query json recv failed: %v", err)
}
records := string(firstStripe.GetRecords())
if !strings.Contains(records, "score:12") || !strings.Contains(records, "score:18") {
t.Fatalf("Query json records missing expected filtered scores: %q", records)
}
if strings.Contains(records, "score:3") {
t.Fatalf("Query json records should not include filtered-out score: %q", records)
}
_, err = queryStream.Recv()
if err != io.EOF {
t.Fatalf("Query json expected EOF after first stripe, got: %v", err)
}
csvStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
CsvInput: &volume_server_pb.QueryRequest_InputSerialization_CSVInput{},
},
})
if err != nil {
t.Fatalf("Query csv start failed: %v", err)
}
_, err = csvStream.Recv()
if err != io.EOF {
t.Fatalf("Query csv expected EOF with no rows, got: %v", err)
}
}
func TestQueryJsonNoMatchReturnsEmptyStripe(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(65)
const needleID = uint64(777002)
const cookie = uint32(0xABABCDCD)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":1}\n{\"score\":2}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
queryStream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{fid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "100",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query json no-match start failed: %v", err)
}
firstStripe, err := queryStream.Recv()
if err != nil {
t.Fatalf("Query json no-match recv failed: %v", err)
}
if len(firstStripe.GetRecords()) != 0 {
t.Fatalf("Query json no-match expected empty records stripe, got: %q", string(firstStripe.GetRecords()))
}
_, err = queryStream.Recv()
if err != io.EOF {
t.Fatalf("Query json no-match expected EOF after first empty stripe, got: %v", err)
}
}
func TestQueryCookieMismatchReturnsEOFNoResults(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
clusterHarness := framework.StartSingleVolumeCluster(t, matrix.P1())
conn, grpcClient := framework.DialVolumeServer(t, clusterHarness.VolumeGRPCAddress())
defer conn.Close()
const volumeID = uint32(66)
const needleID = uint64(777003)
const cookie = uint32(0xCDCDABAB)
framework.AllocateVolume(t, grpcClient, volumeID, "")
jsonLines := []byte("{\"score\":7}\n{\"score\":8}\n")
httpClient := framework.NewHTTPClient()
fid := framework.NewFileID(volumeID, needleID, cookie)
uploadResp := framework.UploadBytes(t, httpClient, clusterHarness.VolumeAdminURL(), fid, jsonLines)
_ = framework.ReadAllAndClose(t, uploadResp)
if uploadResp.StatusCode != 201 {
t.Fatalf("upload expected 201, got %d", uploadResp.StatusCode)
}
wrongCookieFid := framework.NewFileID(volumeID, needleID, cookie+1)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
stream, err := grpcClient.Query(ctx, &volume_server_pb.QueryRequest{
FromFileIds: []string{wrongCookieFid},
Selections: []string{"score"},
Filter: &volume_server_pb.QueryRequest_Filter{
Field: "score",
Operand: ">",
Value: "0",
},
InputSerialization: &volume_server_pb.QueryRequest_InputSerialization{
JsonInput: &volume_server_pb.QueryRequest_InputSerialization_JSONInput{Type: "LINES"},
},
})
if err != nil {
t.Fatalf("Query start for cookie mismatch should not fail immediately, got: %v", err)
}
_, err = stream.Recv()
if err != io.EOF {
t.Fatalf("Query cookie mismatch expected EOF with no streamed records, got: %v", err)
}
}