[ADDED] Support for route S2 compression

The new field `compression` in the `cluster{}` block allows to
specify which compression mode to use between servers.

It can be simply specified as a boolean or a string for the
simple modes, or as an object for the "s2_auto" mode where
a list of RTT thresholds can be specified.

By default, if no compression field is specified, the server
will use the s2_auto mode with default RTT thresholds of
10ms, 50ms and 100ms for the "uncompressed", "fast", "better"
and "best" modes.

```
cluster {
..
  # Possible values are "disabled", "off", "enabled", "on",
  # "accept", "s2_fast", "s2_better", "s2_best" or "s2_auto"
  compression: s2_fast
}
```

To specify a different list of thresholds for the s2_auto,
here is how it would look like:
```
cluster {
..
  compression: {
    mode: s2_auto
    # This means that for RTT up to 5ms (included), then
    # the compression level will be "uncompressed", then
    # from 5ms+ to 15ms, the mode will switch to "s2_fast",
    # then from 15ms+ to 50ms, the level will switch to
    # "s2_better", and anything above 50ms will result
    # in the "s2_best" compression mode.
    rtt_thresholds: [5ms, 15ms, 50ms]
  }
}
```

Note that the "accept" mode means that a server will accept
compression from a remote and switch to that same compression
mode, but will otherwise not initiate compression. That is,
if 2 servers are configured with "accept", then compression
will actually be "off". If one of the server had say s2_fast
then they would both use this mode.

If a server has compression mode set (other than "off") but
connects to an older server, there will be no compression between
those 2 routes.

Signed-off-by: Ivan Kozlovic <ivan@synadia.com>
This commit is contained in:
Ivan Kozlovic
2023-04-27 17:59:25 -06:00
parent d573b78aee
commit d6fe9d4c2d
21 changed files with 1604 additions and 150 deletions

View File

@@ -29,6 +29,7 @@ import (
"time"
"github.com/nats-io/nats-server/v2/server"
"github.com/nats-io/nats.go"
)
const PERF_PORT = 8422
@@ -735,6 +736,105 @@ func Benchmark___Routed16QueueSub(b *testing.B) {
routeQueue(b, 16, 2)
}
func doS2CompressBench(b *testing.B, compress string) {
b.StopTimer()
conf1 := createConfFile(b, []byte(fmt.Sprintf(`
port: -1
cluster {
name: "local"
port: -1
pool_size: -1
compression: %s
}
`, compress)))
s1, o1 := RunServerWithConfig(conf1)
defer s1.Shutdown()
conf2 := createConfFile(b, []byte(fmt.Sprintf(`
port: -1
cluster {
name: "local"
port: -1
pool_size: -1
compression: %s
routes: ["nats://127.0.0.1:%d"]
}
`, compress, o1.Cluster.Port)))
s2, _ := RunServerWithConfig(conf2)
defer s2.Shutdown()
checkClusterFormed(b, s1, s2)
nc2, err := nats.Connect(s2.ClientURL())
if err != nil {
b.Fatalf("Error on connect: %v", err)
}
defer nc2.Close()
ch := make(chan struct{}, 1)
var count int
nc2.Subscribe("foo", func(_ *nats.Msg) {
if count++; count == b.N {
select {
case ch <- struct{}{}:
default:
}
}
})
checkSubInterest(b, s1, "$G", "foo", time.Second)
nc1, err := nats.Connect(s1.ClientURL())
if err != nil {
b.Fatalf("Error on connect: %v", err)
}
defer nc1.Close()
// This one is easily compressible.
payload1 := make([]byte, 128)
// Make it random so that compression code has more to do.
payload2 := make([]byte, 256)
for i := 0; i < len(payload); i++ {
payload2[i] = byte(rand.Intn(26) + 'A')
}
b.StartTimer()
for i := 0; i < b.N; i++ {
if i%2 == 0 {
nc1.Publish("foo", payload1)
} else {
nc1.Publish("foo", payload2)
}
}
select {
case <-ch:
return
case <-time.After(10 * time.Second):
b.Fatal("Timeout waiting to receive all messages")
}
}
func Benchmark____________RouteCompressOff(b *testing.B) {
doS2CompressBench(b, server.CompressionOff)
}
func Benchmark_RouteCompressS2Uncompressed(b *testing.B) {
doS2CompressBench(b, server.CompressionS2Uncompressed)
}
func Benchmark_________RouteCompressS2Fast(b *testing.B) {
doS2CompressBench(b, server.CompressionS2Fast)
}
func Benchmark_______RouteCompressS2Better(b *testing.B) {
doS2CompressBench(b, server.CompressionS2Better)
}
func Benchmark_________RouteCompressS2Best(b *testing.B) {
doS2CompressBench(b, server.CompressionS2Best)
}
func doFanout(b *testing.B, numServers, numConnections, subsPerConnection int, subject, payload string) {
var s1, s2 *server.Server
var o1, o2 *server.Options

View File

@@ -74,7 +74,7 @@ func checkExpectedSubs(expected int, servers ...*server.Server) error {
return nil
}
func checkSubInterest(t *testing.T, s *server.Server, accName, subject string, timeout time.Duration) {
func checkSubInterest(t testing.TB, s *server.Server, accName, subject string, timeout time.Duration) {
t.Helper()
checkFor(t, timeout, 15*time.Millisecond, func() error {
acc, err := s.LookupAccount(accName)

View File

@@ -104,6 +104,7 @@ func TestClusterTLSInsecure(t *testing.T) {
name: "xyz"
listen: "127.0.0.1:-1"
pool_size: -1
compression: "disabled"
tls {
cert_file: "./configs/certs/server-noip.pem"
key_file: "./configs/certs/server-key-noip.pem"
@@ -124,6 +125,7 @@ func TestClusterTLSInsecure(t *testing.T) {
name: "xyz"
listen: "127.0.0.1:-1"
pool_size: -1
compression: "disabled"
tls {
cert_file: "./configs/certs/server-noip.pem"
key_file: "./configs/certs/server-key-noip.pem"

View File

@@ -47,6 +47,7 @@ func TestNoRaceRouteSendSubs(t *testing.T) {
cluster {
port: -1
pool_size: -1
compression: disabled
%s
}
no_sys_acc: true

View File

@@ -1206,6 +1206,7 @@ func TestOCSPCluster(t *testing.T) {
cluster {
port: -1
pool_size: -1
compression: "disabled"
name: AB
host: "127.0.0.1"
advertise: 127.0.0.1

View File

@@ -342,6 +342,7 @@ func TestReloadDoesNotWipeAccountsWithOperatorMode(t *testing.T) {
name: "A"
listen: 127.0.0.1:-1
pool_size: -1
compression: "disabled"
authorization {
timeout: 2.2
} %s

View File

@@ -81,6 +81,8 @@ func RunServerCallback(opts *server.Options, callback func(*server.Server)) *ser
opts.Debug = doDebug
// For all tests in the "test" package, we will disable route pooling.
opts.Cluster.PoolSize = -1
// Also disable compression for "test" package.
opts.Cluster.Compression.Mode = server.CompressionOff
s, err := server.NewServer(opts)
if err != nil || s == nil {