Tweak tests due to changes, make test timeouts uniform.

Signed-off-by: Derek Collison <derek@nats.io>
This commit is contained in:
Derek Collison
2023-03-28 23:40:54 -07:00
parent 52fbac644c
commit e97ddcd14f
4 changed files with 10 additions and 9 deletions

View File

@@ -971,6 +971,7 @@ func TestJetStreamClusterRestoreSingleConsumer(t *testing.T) {
c.stopAll()
c.restartAll()
c.waitOnLeader()
c.waitOnStreamLeader("$G", "foo")
s = c.randomServer()
nc, js = jsClientConnect(t, s)

View File

@@ -4322,7 +4322,7 @@ func TestJetStreamClusterStreamReplicaUpdates(t *testing.T) {
require_NoError(t, err)
c.waitOnStreamLeader("$G", "TEST")
checkFor(t, 5*time.Second, 100*time.Millisecond, func() error {
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
si, err = js.StreamInfo("TEST")
require_NoError(t, err)
if len(si.Cluster.Replicas) != r-1 {

View File

@@ -37,9 +37,9 @@ import (
func init() {
// Speed up raft for tests.
hbInterval = 50 * time.Millisecond
minElectionTimeout = 250 * time.Millisecond
maxElectionTimeout = 1 * time.Second
lostQuorumInterval = time.Second
minElectionTimeout = 750 * time.Millisecond
maxElectionTimeout = 2500 * time.Millisecond
lostQuorumInterval = 500 * time.Millisecond
lostQuorumCheck = 4 * hbInterval
}
@@ -509,7 +509,7 @@ func (sc *supercluster) leader() *Server {
func (sc *supercluster) waitOnLeader() {
sc.t.Helper()
expires := time.Now().Add(10 * time.Second)
expires := time.Now().Add(30 * time.Second)
for time.Now().Before(expires) {
for _, c := range sc.clusters {
if leader := c.leader(); leader != nil {
@@ -548,7 +548,7 @@ func (sc *supercluster) waitOnPeerCount(n int) {
sc.t.Helper()
sc.waitOnLeader()
leader := sc.leader()
expires := time.Now().Add(20 * time.Second)
expires := time.Now().Add(30 * time.Second)
for time.Now().Before(expires) {
peers := leader.JetStreamClusterPeers()
if len(peers) == n {
@@ -1237,7 +1237,7 @@ func (c *cluster) waitOnPeerCount(n int) {
func (c *cluster) waitOnConsumerLeader(account, stream, consumer string) {
c.t.Helper()
expires := time.Now().Add(20 * time.Second)
expires := time.Now().Add(30 * time.Second)
for time.Now().Before(expires) {
if leader := c.consumerLeader(account, stream, consumer); leader != nil {
time.Sleep(200 * time.Millisecond)
@@ -1329,7 +1329,7 @@ func (c *cluster) waitOnServerHealthz(s *Server) {
func (c *cluster) waitOnServerCurrent(s *Server) {
c.t.Helper()
expires := time.Now().Add(20 * time.Second)
expires := time.Now().Add(30 * time.Second)
for time.Now().Before(expires) {
time.Sleep(100 * time.Millisecond)
if !s.JetStreamEnabled() || s.JetStreamIsCurrent() {

View File

@@ -5267,7 +5267,7 @@ func TestNoRaceJetStreamClusterDirectAccessAllPeersSubs(t *testing.T) {
t.Fatalf("Expected to see messages increase, got %d", si.State.Msgs)
}
checkFor(t, 2*time.Second, 100*time.Millisecond, func() error {
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
// Make sure they are all the same from a state perspective.
// Leader will have the expected state.
lmset, err := c.streamLeader("$G", "TEST").GlobalAccount().lookupStream("TEST")