mirror of
https://github.com/gogrlx/nats-server.git
synced 2026-04-02 03:38:42 -07:00
Move new test to new jetstream_cluster_3_test.go file
Since the second batch was already past the 5min mark and a bit longer than the first batch, it is a good opportunity to add this new test in a new file. Updated runTestsOnTravis and travis.yml accordingly. Signed-off-by: Ivan Kozlovic <ivan@synadia.com>
This commit is contained in:
@@ -27,6 +27,8 @@ jobs:
|
||||
env: TEST_SUITE=js_cluster_tests_1
|
||||
- name: "Run JetStream cluster tests (2)"
|
||||
env: TEST_SUITE=js_cluster_tests_2
|
||||
- name: "Run JetStream cluster tests (3)"
|
||||
env: TEST_SUITE=js_cluster_tests_3
|
||||
- name: "Run JetStream super cluster tests"
|
||||
env: TEST_SUITE=js_super_cluster_tests
|
||||
- name: "Run MQTT tests"
|
||||
|
||||
@@ -32,26 +32,34 @@ elif [ "$1" = "js_tests" ]; then
|
||||
|
||||
# Run JetStream non-clustered tests. By convention, all JS tests start
|
||||
# with `TestJetStream`. We exclude the clustered and super-clustered
|
||||
# tests by using the `skip_js_cluster_tests`, `skip_js_cluster_tests_2`
|
||||
# and `skip_js_super_cluster_tests` build tags.
|
||||
# tests by using the appropriate tags.
|
||||
|
||||
go test -race -v -run=TestJetStream ./server -tags=skip_js_cluster_tests,skip_js_cluster_tests_2,skip_js_super_cluster_tests -count=1 -vet=off -timeout=30m -failfast
|
||||
go test -race -v -run=TestJetStream ./server -tags=skip_js_cluster_tests,skip_js_cluster_tests_2,skip_js_cluster_tests_3,skip_js_super_cluster_tests -count=1 -vet=off -timeout=30m -failfast
|
||||
|
||||
elif [ "$1" = "js_cluster_tests_1" ]; then
|
||||
|
||||
# Run JetStream clustered tests. By convention, all JS cluster tests
|
||||
# start with `TestJetStreamCluster`. Will run the first batch of tests,
|
||||
# excluding the ones with the tag skip_js_cluster_tests_2.
|
||||
# excluding others with use of proper tags.
|
||||
|
||||
go test -race -v -run=TestJetStreamCluster ./server -tags=skip_js_cluster_tests_2 -count=1 -vet=off -timeout=30m -failfast
|
||||
go test -race -v -run=TestJetStreamCluster ./server -tags=skip_js_cluster_tests_2,skip_js_cluster_tests_3 -count=1 -vet=off -timeout=30m -failfast
|
||||
|
||||
elif [ "$1" = "js_cluster_tests_2" ]; then
|
||||
|
||||
# Run JetStream clustered tests. By convention, all JS cluster tests
|
||||
# start with `TestJetStreamCluster`. Will run the second batch of tests,
|
||||
# excluding the ones with the tag skip_js_cluster_tests.
|
||||
# excluding others with use of proper tags.
|
||||
|
||||
go test -race -v -run=TestJetStreamCluster ./server -tags=skip_js_cluster_tests -count=1 -vet=off -timeout=30m -failfast
|
||||
go test -race -v -run=TestJetStreamCluster ./server -tags=skip_js_cluster_tests,skip_js_cluster_tests_3 -count=1 -vet=off -timeout=30m -failfast
|
||||
|
||||
elif [ "$1" = "js_cluster_tests_3" ]; then
|
||||
|
||||
# Run JetStream clustered tests. By convention, all JS cluster tests
|
||||
# start with `TestJetStreamCluster`. Will run the third batch of tests,
|
||||
# excluding others with use of proper tags.
|
||||
#
|
||||
|
||||
go test -race -v -run=TestJetStreamCluster ./server -tags=skip_js_cluster_tests,skip_js_cluster_tests_2 -count=1 -vet=off -timeout=30m -failfast
|
||||
|
||||
elif [ "$1" = "js_super_cluster_tests" ]; then
|
||||
|
||||
|
||||
@@ -7260,85 +7260,7 @@ func TestJetStreamClusterCompressedStreamMessages(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestJetStreamClusterRemovePeerByID(t *testing.T) {
|
||||
c := createJetStreamClusterExplicit(t, "R3S", 3)
|
||||
defer c.shutdown()
|
||||
|
||||
s := c.randomNonLeader()
|
||||
nc, js := jsClientConnect(t, s)
|
||||
defer nc.Close()
|
||||
|
||||
_, err := js.AddStream(&nats.StreamConfig{
|
||||
Name: "TEST",
|
||||
Subjects: []string{"foo", "bar"},
|
||||
Replicas: 3,
|
||||
})
|
||||
require_NoError(t, err)
|
||||
|
||||
// Wait for a leader
|
||||
c.waitOnStreamLeader(globalAccountName, "TEST")
|
||||
|
||||
// Get the name of the one that is not restarted
|
||||
srvName := c.opts[2].ServerName
|
||||
// And its node ID
|
||||
peerID := c.servers[2].Node()
|
||||
|
||||
nc.Close()
|
||||
// Now stop the whole cluster
|
||||
c.stopAll()
|
||||
// Restart all but one
|
||||
for i := 0; i < 2; i++ {
|
||||
opts := c.opts[i]
|
||||
s, o := RunServerWithConfig(opts.ConfigFile)
|
||||
c.servers[i] = s
|
||||
c.opts[i] = o
|
||||
}
|
||||
|
||||
c.waitOnClusterReadyWithNumPeers(2)
|
||||
c.waitOnStreamLeader(globalAccountName, "TEST")
|
||||
|
||||
// Now attempt to remove by name, this should fail because the cluster
|
||||
// was restarted and names are not persisted.
|
||||
ml := c.leader()
|
||||
nc, err = nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
|
||||
require_NoError(t, err)
|
||||
defer nc.Close()
|
||||
|
||||
req := &JSApiMetaServerRemoveRequest{Server: srvName}
|
||||
jsreq, err := json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err := nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
var resp JSApiMetaServerRemoveResponse
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error != nil)
|
||||
require_True(t, IsNatsErr(resp.Error, JSClusterServerNotMemberErr))
|
||||
|
||||
// Now try by ID, but first with an ID that does not match any peerID
|
||||
req.Peer = "some_bad_id"
|
||||
jsreq, err = json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err = nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
resp = JSApiMetaServerRemoveResponse{}
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error != nil)
|
||||
require_True(t, IsNatsErr(resp.Error, JSClusterServerNotMemberErr))
|
||||
|
||||
// Now with the proper peer ID
|
||||
req.Peer = peerID
|
||||
jsreq, err = json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err = nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
resp = JSApiMetaServerRemoveResponse{}
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error == nil)
|
||||
require_True(t, resp.Success)
|
||||
}
|
||||
//
|
||||
// DO NOT ADD NEW TESTS IN THIS FILE
|
||||
// Add at the end of jetstream_cluster_<n>_test.go, with <n> being the highest value.
|
||||
//
|
||||
|
||||
108
server/jetstream_cluster_3_test.go
Normal file
108
server/jetstream_cluster_3_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2022 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !skip_js_tests && !skip_js_cluster_tests_3
|
||||
// +build !skip_js_tests,!skip_js_cluster_tests_3
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/nats-io/nats.go"
|
||||
)
|
||||
|
||||
func TestJetStreamClusterRemovePeerByID(t *testing.T) {
|
||||
c := createJetStreamClusterExplicit(t, "R3S", 3)
|
||||
defer c.shutdown()
|
||||
|
||||
s := c.randomNonLeader()
|
||||
nc, js := jsClientConnect(t, s)
|
||||
defer nc.Close()
|
||||
|
||||
_, err := js.AddStream(&nats.StreamConfig{
|
||||
Name: "TEST",
|
||||
Subjects: []string{"foo", "bar"},
|
||||
Replicas: 3,
|
||||
})
|
||||
require_NoError(t, err)
|
||||
|
||||
// Wait for a leader
|
||||
c.waitOnStreamLeader(globalAccountName, "TEST")
|
||||
|
||||
// Get the name of the one that is not restarted
|
||||
srvName := c.opts[2].ServerName
|
||||
// And its node ID
|
||||
peerID := c.servers[2].Node()
|
||||
|
||||
nc.Close()
|
||||
// Now stop the whole cluster
|
||||
c.stopAll()
|
||||
// Restart all but one
|
||||
for i := 0; i < 2; i++ {
|
||||
opts := c.opts[i]
|
||||
s, o := RunServerWithConfig(opts.ConfigFile)
|
||||
c.servers[i] = s
|
||||
c.opts[i] = o
|
||||
}
|
||||
|
||||
c.waitOnClusterReadyWithNumPeers(2)
|
||||
c.waitOnStreamLeader(globalAccountName, "TEST")
|
||||
|
||||
// Now attempt to remove by name, this should fail because the cluster
|
||||
// was restarted and names are not persisted.
|
||||
ml := c.leader()
|
||||
nc, err = nats.Connect(ml.ClientURL(), nats.UserInfo("admin", "s3cr3t!"))
|
||||
require_NoError(t, err)
|
||||
defer nc.Close()
|
||||
|
||||
req := &JSApiMetaServerRemoveRequest{Server: srvName}
|
||||
jsreq, err := json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err := nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
var resp JSApiMetaServerRemoveResponse
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error != nil)
|
||||
require_True(t, IsNatsErr(resp.Error, JSClusterServerNotMemberErr))
|
||||
|
||||
// Now try by ID, but first with an ID that does not match any peerID
|
||||
req.Peer = "some_bad_id"
|
||||
jsreq, err = json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err = nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
resp = JSApiMetaServerRemoveResponse{}
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error != nil)
|
||||
require_True(t, IsNatsErr(resp.Error, JSClusterServerNotMemberErr))
|
||||
|
||||
// Now with the proper peer ID
|
||||
req.Peer = peerID
|
||||
jsreq, err = json.Marshal(req)
|
||||
require_NoError(t, err)
|
||||
rmsg, err = nc.Request(JSApiRemoveServer, jsreq, 2*time.Second)
|
||||
require_NoError(t, err)
|
||||
|
||||
resp = JSApiMetaServerRemoveResponse{}
|
||||
err = json.Unmarshal(rmsg.Data, &resp)
|
||||
require_NoError(t, err)
|
||||
require_True(t, resp.Error == nil)
|
||||
require_True(t, resp.Success)
|
||||
}
|
||||
Reference in New Issue
Block a user